org_text
stringlengths 830
329k
| texts
sequence | scores
sequence | num_lines
int64 1
8.05k
| avg_score
float64 0
0.27
| check
bool 1
class |
---|---|---|---|---|---|
#-------------------------------------------------------------------------------
# Name: navitia2OSM.py
#
# Author: @nlehuby - noemie.lehuby(at)gmail.com
#
# Created: 04/04/2014
# Licence: WTFPL
#-------------------------------------------------------------------------------
#!/usr/bin/env python
import requests
import json
import xmltodict
import logging
from logging.handlers import RotatingFileHandler
def Navitia_stop_points_nearby(longitude, latitude, distance) :
""" recherche tous les stop_points à moins de [distance] mètres du point (longitude, latitude).
"""
appel_nav = requests.get("http://api.navitia.io/v1/coverage/paris/coords/"+ str(longitude) +';'+ str(latitude) +"/places_nearby?type[]=stop_point&distance=" + str(distance), headers={'Authorization': 'my-api-key'})
data_nav = json.loads(appel_nav.content)
if data_nav['pagination']['total_result'] == 0:
log.info( "Pas de résultats")
return 0
log.info( "il y a " + str(data_nav['pagination']['total_result']) + " résultats :")
resultats = set()
for resultat in data_nav['places_nearby'] :
log.info( "--> " + str(resultat['stop_point']['name'].encode('utf-8').title()) + " : à " + str(resultat['distance']) + " mètres.")
resultats.add(str(resultat['stop_point']['name'].encode('utf-8').title()))
return list(resultats)
#test de fonction :
#Navitia_stop_points_nearby(2.2959928,48.8631204,100)
def send_to_JOSM(id_arret_bus, nom_arret_bus) :
""" crée un fichier JOSM pour l'ajout du nom nom_arret_bus au node id_arret_bus.
"""
#appel de l'API OSM
appel = requests.get('http://api.openstreetmap.org/api/0.6/node/' + str(id_arret_bus))
obj = xmltodict.parse(appel.content)
# tags généraux du fichier
del obj['osm']['@attribution']
del obj['osm']['@license']
del obj['osm']['@copyright']
obj['osm']['@generator'] = "powered by navitia.io python script"
# tags spécifiques du noeud
obj['osm']["node"]['@action'] = 'modify'
try:
obj['osm']["node"]['tag'][0]
except :
#il n'y a qu'un seul tag, donc il faut construire la liste soi-même
del obj['osm']["node"]['tag']
obj['osm']["node"]['tag'] = []
tag = dict()
tag['@k'] = 'highway'
tag['@v'] = 'bus_stop'
obj['osm']["node"]['tag'].append(tag)
tag = dict()
tag['@k'] = 'name'
tag['@v'] = nom_arret_bus
obj['osm']["node"]['tag'].append(tag)
tag = dict()
tag['@k'] = 'source:name'
tag['@v'] = 'opendata RATP - navitia.io'
obj['osm']["node"]['tag'].append(tag)
#reconstitution du fichier .osm
#log.debug( xmltodict.unparse(obj, pretty=True))
fichier = open(str(id_arret_bus) + '.osm', 'w')
fichier.write(xmltodict.unparse(obj).encode('utf-8'))
fichier.close()
#test de fonction :
#send_to_JOSM(2522119878,"ici, c'est Boissy")
if __name__ == '__main__':
# Gestion des logs
##soigneusement pompé sur http://sametmax.com/ecrire-des-logs-en-python/ ;)
log = logging.getLogger()
log.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s :: %(levelname)s :: %(message)s')
file_handler = RotatingFileHandler('NAViTiA2OSM.log', 'a', 1000000, 1)
file_handler.setLevel(logging.INFO)
file_handler.setFormatter(formatter)
log.addHandler(file_handler)
steam_handler = logging.StreamHandler()
steam_handler.setLevel(logging.WARNING)
log.addHandler(steam_handler)
##
#requete = "http://api.openstreetmap.fr/oapi/interpreter?data=[out:json];node%28area:3600007444%29[%22highway%22=%22bus_stop%22][%22shelter%22=%22yes%22][name!~%27.%27];out%20body;" #avec abri, à Paris > 216
#requete = 'http://api.openstreetmap.fr/oapi/interpreter?data=[out:json][timeout:25];area(3600105794)->.area;node["highway"="bus_stop"]["name"!~"."](area.area);out meta qt;' # à Bonneuil sur Marne
#requete = 'http://api.openstreetmap.fr/oapi/interpreter?data=[out:json][timeout:25];area(3600107966)->.area;node["highway"="bus_stop"]["name"!~"."](area.area);out meta qt;' # à Boissy-Saint-Léger
requete = 'http://api.openstreetmap.fr/oapi/interpreter?data=[out:json][timeout:125];area(3600007444)->.area;node["highway"="bus_stop"]["name"!~"."](area.area);out meta qt;' # à Paris > 267
appel = requests.get(requete)
data_OSM = json.loads(appel.content)
sans_nom = []
avec_nom = []
for bus_stop in data_OSM["elements"] :
log.info(bus_stop["id"])
lat = bus_stop["lat"]
lon = bus_stop["lon"]
#print Navitia_stop_points_nearby(lon,lat,100)
test = Navitia_stop_points_nearby(lon,lat,10)
if test == 0 :
sans_nom.append(bus_stop["id"])
else :
avec_nom.append([bus_stop["id"], test])
log.warning( "---")
log.warning( "il y a " + str(len(data_OSM["elements"])) + " arrêts de bus OSM sans nom.")
log.warning( "il y a " + str(len(avec_nom)) + " arrêts de bus situés à moins de 10 mètres d'un arrêt présent navitia.io")
log.warning( "il y a " + str(len(sans_nom)) + " arrêts de bus pour lequels aucun arrêt n'a été trouvé dans navitia.io")
## for arret in avec_nom :
## if len(arret[1]) == 1 :
## log.warning( str(arret[0]) + ' : ' + arret[1][0])
## send_to_JOSM(arret[0],arret[1][0])
| [
"#-------------------------------------------------------------------------------\n",
"# Name: navitia2OSM.py\n",
"#\n",
"# Author: @nlehuby - noemie.lehuby(at)gmail.com\n",
"#\n",
"# Created: 04/04/2014\n",
"# Licence: WTFPL\n",
"#-------------------------------------------------------------------------------\n",
"#!/usr/bin/env python\n",
"\n",
"import requests\n",
"import json\n",
"import xmltodict\n",
"import logging\n",
"from logging.handlers import RotatingFileHandler\n",
"\n",
"\n",
"def Navitia_stop_points_nearby(longitude, latitude, distance) :\n",
" \"\"\" recherche tous les stop_points à moins de [distance] mètres du point (longitude, latitude).\n",
" \"\"\"\n",
" appel_nav = requests.get(\"http://api.navitia.io/v1/coverage/paris/coords/\"+ str(longitude) +';'+ str(latitude) +\"/places_nearby?type[]=stop_point&distance=\" + str(distance), headers={'Authorization': 'my-api-key'})\n",
" data_nav = json.loads(appel_nav.content)\n",
"\n",
" if data_nav['pagination']['total_result'] == 0:\n",
" log.info( \"Pas de résultats\")\n",
" return 0\n",
"\n",
" log.info( \"il y a \" + str(data_nav['pagination']['total_result']) + \" résultats :\")\n",
"\n",
" resultats = set()\n",
" for resultat in data_nav['places_nearby'] :\n",
" log.info( \"--> \" + str(resultat['stop_point']['name'].encode('utf-8').title()) + \" : à \" + str(resultat['distance']) + \" mètres.\")\n",
" resultats.add(str(resultat['stop_point']['name'].encode('utf-8').title()))\n",
" return list(resultats)\n",
"\n",
"#test de fonction :\n",
"#Navitia_stop_points_nearby(2.2959928,48.8631204,100)\n",
"\n",
"def send_to_JOSM(id_arret_bus, nom_arret_bus) :\n",
" \"\"\" crée un fichier JOSM pour l'ajout du nom nom_arret_bus au node id_arret_bus.\n",
" \"\"\"\n",
" #appel de l'API OSM\n",
" appel = requests.get('http://api.openstreetmap.org/api/0.6/node/' + str(id_arret_bus))\n",
" obj = xmltodict.parse(appel.content)\n",
"\n",
" # tags généraux du fichier\n",
" del obj['osm']['@attribution']\n",
" del obj['osm']['@license']\n",
" del obj['osm']['@copyright']\n",
"\n",
" obj['osm']['@generator'] = \"powered by navitia.io python script\"\n",
"\n",
" # tags spécifiques du noeud\n",
" obj['osm'][\"node\"]['@action'] = 'modify'\n",
"\n",
" try:\n",
" obj['osm'][\"node\"]['tag'][0]\n",
" except :\n",
" #il n'y a qu'un seul tag, donc il faut construire la liste soi-même\n",
" del obj['osm'][\"node\"]['tag']\n",
" obj['osm'][\"node\"]['tag'] = []\n",
"\n",
" tag = dict()\n",
" tag['@k'] = 'highway'\n",
" tag['@v'] = 'bus_stop'\n",
" obj['osm'][\"node\"]['tag'].append(tag)\n",
"\n",
"\n",
" tag = dict()\n",
" tag['@k'] = 'name'\n",
" tag['@v'] = nom_arret_bus\n",
" obj['osm'][\"node\"]['tag'].append(tag)\n",
"\n",
" tag = dict()\n",
" tag['@k'] = 'source:name'\n",
" tag['@v'] = 'opendata RATP - navitia.io'\n",
" obj['osm'][\"node\"]['tag'].append(tag)\n",
"\n",
" #reconstitution du fichier .osm\n",
" #log.debug( xmltodict.unparse(obj, pretty=True))\n",
" fichier = open(str(id_arret_bus) + '.osm', 'w')\n",
" fichier.write(xmltodict.unparse(obj).encode('utf-8'))\n",
" fichier.close()\n",
"\n",
"#test de fonction :\n",
"#send_to_JOSM(2522119878,\"ici, c'est Boissy\")\n",
"\n",
"\n",
"if __name__ == '__main__':\n",
" # Gestion des logs\n",
" ##soigneusement pompé sur http://sametmax.com/ecrire-des-logs-en-python/ ;)\n",
" log = logging.getLogger()\n",
" log.setLevel(logging.DEBUG)\n",
" formatter = logging.Formatter('%(asctime)s :: %(levelname)s :: %(message)s')\n",
" file_handler = RotatingFileHandler('NAViTiA2OSM.log', 'a', 1000000, 1)\n",
" file_handler.setLevel(logging.INFO)\n",
" file_handler.setFormatter(formatter)\n",
" log.addHandler(file_handler)\n",
" steam_handler = logging.StreamHandler()\n",
" steam_handler.setLevel(logging.WARNING)\n",
" log.addHandler(steam_handler)\n",
" ##\n",
"\n",
" #requete = \"http://api.openstreetmap.fr/oapi/interpreter?data=[out:json];node%28area:3600007444%29[%22highway%22=%22bus_stop%22][%22shelter%22=%22yes%22][name!~%27.%27];out%20body;\" #avec abri, à Paris > 216\n",
" #requete = 'http://api.openstreetmap.fr/oapi/interpreter?data=[out:json][timeout:25];area(3600105794)->.area;node[\"highway\"=\"bus_stop\"][\"name\"!~\".\"](area.area);out meta qt;' # à Bonneuil sur Marne\n",
" #requete = 'http://api.openstreetmap.fr/oapi/interpreter?data=[out:json][timeout:25];area(3600107966)->.area;node[\"highway\"=\"bus_stop\"][\"name\"!~\".\"](area.area);out meta qt;' # à Boissy-Saint-Léger\n",
" requete = 'http://api.openstreetmap.fr/oapi/interpreter?data=[out:json][timeout:125];area(3600007444)->.area;node[\"highway\"=\"bus_stop\"][\"name\"!~\".\"](area.area);out meta qt;' # à Paris > 267\n",
"\n",
"\n",
" appel = requests.get(requete)\n",
" data_OSM = json.loads(appel.content)\n",
"\n",
" sans_nom = []\n",
" avec_nom = []\n",
"\n",
" for bus_stop in data_OSM[\"elements\"] :\n",
" log.info(bus_stop[\"id\"])\n",
" lat = bus_stop[\"lat\"]\n",
" lon = bus_stop[\"lon\"]\n",
"\n",
" #print Navitia_stop_points_nearby(lon,lat,100)\n",
" test = Navitia_stop_points_nearby(lon,lat,10)\n",
" if test == 0 :\n",
" sans_nom.append(bus_stop[\"id\"])\n",
" else :\n",
" avec_nom.append([bus_stop[\"id\"], test])\n",
"\n",
" log.warning( \"---\")\n",
" log.warning( \"il y a \" + str(len(data_OSM[\"elements\"])) + \" arrêts de bus OSM sans nom.\")\n",
" log.warning( \"il y a \" + str(len(avec_nom)) + \" arrêts de bus situés à moins de 10 mètres d'un arrêt présent navitia.io\")\n",
" log.warning( \"il y a \" + str(len(sans_nom)) + \" arrêts de bus pour lequels aucun arrêt n'a été trouvé dans navitia.io\")\n",
"\n",
"## for arret in avec_nom :\n",
"## if len(arret[1]) == 1 :\n",
"## log.warning( str(arret[0]) + ' : ' + arret[1][0])\n",
"## send_to_JOSM(arret[0],arret[1][0])\n",
"\n"
] | [
0.024691358024691357,
0,
0,
0,
0,
0,
0,
0.024691358024691357,
0.045454545454545456,
0,
0,
0,
0,
0,
0,
0,
0,
0.015625,
0.01,
0,
0.0228310502283105,
0,
0,
0,
0.02631578947368421,
0,
0,
0.022727272727272728,
0,
0,
0.020833333333333332,
0.014388489208633094,
0.012048192771084338,
0,
0,
0.05,
0.018518518518518517,
0,
0.041666666666666664,
0.011764705882352941,
0,
0.041666666666666664,
0.01098901098901099,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.027777777777777776,
0.15384615384615385,
0.02666666666666667,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.058823529411764705,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.027777777777777776,
0.018867924528301886,
0,
0,
0,
0,
0.05,
0.021739130434782608,
0,
0,
0,
0,
0.0125,
0,
0,
0.012345679012345678,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.009433962264150943,
0.009950248756218905,
0.009950248756218905,
0.010309278350515464,
0,
0,
0.029411764705882353,
0,
0,
0,
0,
0,
0.023255813953488372,
0,
0,
0,
0,
0.01818181818181818,
0.037037037037037035,
0.043478260869565216,
0,
0.06666666666666667,
0,
0,
0.041666666666666664,
0.02127659574468085,
0.015873015873015872,
0.016129032258064516,
0,
0.03333333333333333,
0.029411764705882353,
0.015625,
0.02040816326530612,
1
] | 137 | 0.016613 | false |
#-*- coding: utf-8 -*-
######################################################################
# Copyright (c) 2017 by WLBlazers Corporation
#
# failover.py
#
#
######################################################################
# Modifications Section:
######################################################################
## Date File Changes
######################################################################
## 07/13/2019 Baseline version 1.0.0
##
######################################################################
import os
import string
import sys, getopt
import traceback
import mysql_handle as mysql
import common
import logging
import logging.config
logging.config.fileConfig('./logging.conf')
logger = logging.getLogger('WLBlazers')
###############################################################################
# function switch2master
###############################################################################
def switch2master(mysql_conn, db_type, group_id, s_conn, sta_id):
result=-1
logger.info("FAILOVER database to master in progress...")
# get database role
role=mysql.IsSlave(s_conn)
common.log_db_op_process(mysql_conn, db_type, group_id, 'FAILOVER', '获取数据库角色成功', 0, 2)
logger.info("The current database role is: %s (0:MASTER; 1:SLAVE)" %(role))
# get database version
if role==1:
common.log_db_op_process(mysql_conn, db_type, group_id, 'FAILOVER', '验证从库数据库角色成功', 0, 2)
# check slave status
slave_info=mysql.GetSingleRow(s_conn, 'show slave status;')
if slave_info:
current_binlog_file=slave_info[9]
current_binlog_pos=slave_info[21]
master_binlog_file=slave_info[5]
master_binlog_pos=slave_info[6]
logger.debug("current_binlog_file: %s" %(current_binlog_file))
logger.debug("current_binlog_pos: %s" %(current_binlog_pos))
logger.debug("master_binlog_file: %s" %(master_binlog_file))
logger.debug("master_binlog_pos: %s" %(master_binlog_pos))
# can switch now
logger.info("Now we are going to switch database %s to master." %(sta_id))
common.log_db_op_process(mysql_conn, db_type, group_id, 'FAILOVER', '正在将从库切换成主库...', 0, 0)
str='''stop slave io_thread; '''
res=mysql.ExecuteSQL(s_conn, str)
logger.debug("Stop slave io_thread.")
str='''stop slave; '''
res=mysql.ExecuteSQL(s_conn, str)
logger.debug("Stop slave.")
str='''reset slave all; '''
res=mysql.ExecuteSQL(s_conn, str)
logger.debug("Reset slave all.")
logger.info("FAILOVER slave to master successfully.")
common.log_db_op_process(mysql_conn, db_type, group_id, 'FAILOVER', '从库已经成功切换成主库', 0, 2)
result=0
else:
logger.info("Check slave status failed.")
common.log_db_op_process(mysql_conn, db_type, group_id, 'FAILOVER', '从库切换主库失败', 0, 2)
result=-1
else:
common.update_db_op_reason(mysql_conn, db_type, group_id, 'FAILOVER', '验证数据库角色失败,当前数据库不是从库,不能切换到主库')
common.log_db_op_process(mysql_conn, db_type, group_id, 'FAILOVER', '验证数据库角色失败,当前数据库不是从库,不能切换到主库', 0, 2)
logger.error("You can not FAILOVER a master database to master!")
result=-1
return result
###############################################################################
# function update_switch_flag
###############################################################################
def update_switch_flag(mysql_conn, group_id):
logger.info("Update switch flag in db_cfg_mysql_dr for group %s in progress..." %(group_id))
# get current switch flag
str='select is_switch from db_cfg_mysql_dr where id= %s' %(group_id)
is_switch=mysql.GetSingleValue(mysql_conn, str)
logger.info("The current switch flag is: %s" %(is_switch))
if is_switch==0:
str="""update db_cfg_mysql_dr set is_switch = 1 where id = %s"""%(group_id)
else:
str="""update db_cfg_mysql_dr set is_switch = 0 where id = %s"""%(group_id)
is_succ = mysql.ExecuteSQL(mysql_conn, str)
if is_succ==1:
common.log_db_op_process(mysql_conn, db_type, group_id, 'FAILOVER', '容灾组更新状态成功', 100, 2)
logger.info("Update switch flag in db_cfg_mysql_dr for group %s successfully." %(group_id))
else:
logger.info("Update switch flag in db_cfg_mysql_dr for group %s failed." %(group_id))
###############################################################################
# main function
###############################################################################
if __name__=="__main__":
# parse argv
pri_id = ''
sta_id = ''
try:
opts, args = getopt.getopt(sys.argv[1:],"p:s:g:")
except getopt.GetoptError:
sys.exit(2)
for opt, arg in opts:
if opt == '-p':
pri_id = arg
elif opt == '-s':
sta_id = arg
elif opt == '-g':
group_id = arg
###########################################################################
# connect to mysql
mysql_conn = ''
try:
mysql_conn = mysql.ConnectMysql()
except Exception as e:
logger.error(e)
sys.exit(2)
# get infomation from mysql
s_host = ""
s_port = ""
s_username = ""
s_password = ""
s_str = """select host, port, username, password from db_cfg_mysql where id=%s; """ %(sta_id)
res2 = mysql.GetSingleRow(mysql_conn, s_str)
if res2:
s_host = res2[0]
s_port = res2[1]
s_username = res2[2]
s_password = res2[3]
#print s_host,s_port,s_username,s_password
p_str = """select concat(host, ':', port) from db_cfg_mysql where id=%s; """ %(pri_id)
p_nopass_str = mysql.GetSingleValue(mysql_conn, p_str)
s_str = """select concat(host, ':', port) from db_cfg_mysql where id=%s; """ %(sta_id)
s_nopass_str = mysql.GetSingleValue(mysql_conn, s_str)
logger.info("The master database is: " + p_nopass_str + ", the id is: " + str(pri_id))
logger.info("The slave database is: " + s_nopass_str + ", the id is: " + str(sta_id))
db_type = "mysql"
try:
common.db_op_lock(mysql_conn, db_type, group_id, 'FAILOVER') # 加锁
common.init_db_op_instance(mysql_conn, db_type, group_id, 'FAILOVER') #初始化切换实例
# connect to mysql
s_conn = mysql.ConnectMysql_T(s_host,s_port,s_username,s_password)
if s_conn is None:
common.log_db_op_process(mysql_conn, db_type, group_id, 'FAILOVER', '连接从库失败,请根据相应日志查看原因', 0, 3)
logger.error("Connect to standby database error, exit!!!")
common.update_db_op_reason(mysql_conn, db_type, group_id, 'FAILOVER', '连接从库失败')
common.update_db_op_result(mysql_conn, db_type, group_id, 'FAILOVER', '-1')
sys.exit(2)
# 正式开始切换
try:
common.log_db_op_process(mysql_conn, db_type, group_id, 'FAILOVER', '准备执行主从切换', 0, 2)
res_2m=switch2master(mysql_conn, db_type, group_id, s_conn, sta_id)
if res_2m ==0:
update_switch_flag(mysql_conn, group_id)
common.gen_alert_mysql(sta_id, 1) # generate alert
common.update_db_op_result(mysql_conn, db_type, group_id, 'FAILOVER', '0')
else:
common.update_db_op_result(mysql_conn, db_type, group_id, 'FAILOVER', res_2m)
except Exception,e:
logger.error(traceback.format_exc())
pass
except Exception,e:
logger.error(traceback.format_exc())
pass
finally:
common.db_op_unlock(mysql_conn, db_type, group_id, 'FAILOVER')
| [
"#-*- coding: utf-8 -*-\n",
"\n",
"######################################################################\n",
"# Copyright (c) 2017 by WLBlazers Corporation\n",
"#\n",
"# failover.py\n",
"# \n",
"# \n",
"######################################################################\n",
"# Modifications Section:\n",
"######################################################################\n",
"## Date File Changes\n",
"######################################################################\n",
"## 07/13/2019 Baseline version 1.0.0\n",
"##\n",
"######################################################################\n",
"\n",
"import os\n",
"import string\n",
"import sys, getopt\n",
"import traceback\n",
"\n",
"import mysql_handle as mysql\n",
"import common\n",
"\n",
"import logging\n",
"import logging.config\n",
"\n",
"logging.config.fileConfig('./logging.conf')\n",
"logger = logging.getLogger('WLBlazers')\n",
"\n",
"\t\n",
"###############################################################################\n",
"# function switch2master\n",
"###############################################################################\n",
"def switch2master(mysql_conn, db_type, group_id, s_conn, sta_id):\n",
" result=-1\n",
" \n",
" logger.info(\"FAILOVER database to master in progress...\")\n",
" # get database role\n",
" role=mysql.IsSlave(s_conn)\n",
" common.log_db_op_process(mysql_conn, db_type, group_id, 'FAILOVER', '获取数据库角色成功', 0, 2)\n",
" logger.info(\"The current database role is: %s (0:MASTER; 1:SLAVE)\" %(role))\n",
"\t\n",
" # get database version\n",
"\t\n",
" \n",
" if role==1:\n",
" common.log_db_op_process(mysql_conn, db_type, group_id, 'FAILOVER', '验证从库数据库角色成功', 0, 2)\n",
" \n",
" # check slave status\n",
" slave_info=mysql.GetSingleRow(s_conn, 'show slave status;')\n",
" if slave_info:\n",
" current_binlog_file=slave_info[9]\n",
" current_binlog_pos=slave_info[21]\n",
" master_binlog_file=slave_info[5]\n",
" master_binlog_pos=slave_info[6]\n",
" \n",
" logger.debug(\"current_binlog_file: %s\" %(current_binlog_file))\n",
" logger.debug(\"current_binlog_pos: %s\" %(current_binlog_pos))\n",
" logger.debug(\"master_binlog_file: %s\" %(master_binlog_file))\n",
" logger.debug(\"master_binlog_pos: %s\" %(master_binlog_pos))\n",
" \n",
" # can switch now\n",
" logger.info(\"Now we are going to switch database %s to master.\" %(sta_id))\n",
" common.log_db_op_process(mysql_conn, db_type, group_id, 'FAILOVER', '正在将从库切换成主库...', 0, 0)\n",
" str='''stop slave io_thread; '''\n",
" res=mysql.ExecuteSQL(s_conn, str)\n",
" logger.debug(\"Stop slave io_thread.\")\n",
" \n",
" str='''stop slave; '''\n",
" res=mysql.ExecuteSQL(s_conn, str)\n",
" logger.debug(\"Stop slave.\")\n",
" \n",
" str='''reset slave all; '''\n",
" res=mysql.ExecuteSQL(s_conn, str)\n",
" logger.debug(\"Reset slave all.\")\n",
" \n",
" logger.info(\"FAILOVER slave to master successfully.\")\n",
" common.log_db_op_process(mysql_conn, db_type, group_id, 'FAILOVER', '从库已经成功切换成主库', 0, 2)\n",
" result=0\n",
" else:\n",
" logger.info(\"Check slave status failed.\")\n",
" common.log_db_op_process(mysql_conn, db_type, group_id, 'FAILOVER', '从库切换主库失败', 0, 2)\n",
" result=-1\n",
" \n",
" else:\n",
" common.update_db_op_reason(mysql_conn, db_type, group_id, 'FAILOVER', '验证数据库角色失败,当前数据库不是从库,不能切换到主库')\n",
" common.log_db_op_process(mysql_conn, db_type, group_id, 'FAILOVER', '验证数据库角色失败,当前数据库不是从库,不能切换到主库', 0, 2)\n",
" logger.error(\"You can not FAILOVER a master database to master!\")\n",
" result=-1\n",
" \n",
" return result\n",
"\n",
"\n",
"\n",
"###############################################################################\n",
"# function update_switch_flag\n",
"###############################################################################\n",
"def update_switch_flag(mysql_conn, group_id):\n",
" logger.info(\"Update switch flag in db_cfg_mysql_dr for group %s in progress...\" %(group_id))\n",
" # get current switch flag\n",
" str='select is_switch from db_cfg_mysql_dr where id= %s' %(group_id)\n",
" is_switch=mysql.GetSingleValue(mysql_conn, str)\n",
" logger.info(\"The current switch flag is: %s\" %(is_switch))\n",
"\t\n",
" if is_switch==0:\n",
" str=\"\"\"update db_cfg_mysql_dr set is_switch = 1 where id = %s\"\"\"%(group_id)\n",
" else:\n",
" str=\"\"\"update db_cfg_mysql_dr set is_switch = 0 where id = %s\"\"\"%(group_id)\n",
"\n",
"\t\t\n",
" is_succ = mysql.ExecuteSQL(mysql_conn, str)\n",
"\n",
" if is_succ==1:\n",
" common.log_db_op_process(mysql_conn, db_type, group_id, 'FAILOVER', '容灾组更新状态成功', 100, 2)\n",
" logger.info(\"Update switch flag in db_cfg_mysql_dr for group %s successfully.\" %(group_id))\n",
" else:\n",
" logger.info(\"Update switch flag in db_cfg_mysql_dr for group %s failed.\" %(group_id))\n",
"\t\n",
"\n",
"\n",
" \n",
"###############################################################################\n",
"# main function\n",
"###############################################################################\n",
"if __name__==\"__main__\":\n",
" # parse argv\n",
" pri_id = ''\n",
" sta_id = ''\n",
" try:\n",
" opts, args = getopt.getopt(sys.argv[1:],\"p:s:g:\")\n",
" except getopt.GetoptError:\n",
" sys.exit(2)\n",
"\t\t\n",
" for opt, arg in opts:\n",
" if opt == '-p':\n",
" pri_id = arg\n",
" elif opt == '-s':\n",
" sta_id = arg\n",
" elif opt == '-g':\n",
" group_id = arg\n",
" \n",
"\t\n",
"\t###########################################################################\n",
"\t# connect to mysql\n",
" mysql_conn = ''\n",
" try:\n",
" mysql_conn = mysql.ConnectMysql()\n",
" except Exception as e:\n",
" logger.error(e)\n",
" sys.exit(2)\n",
"\t\t\n",
" \n",
" # get infomation from mysql\n",
" s_host = \"\"\n",
" s_port = \"\"\n",
" s_username = \"\"\n",
" s_password = \"\"\n",
" \n",
" s_str = \"\"\"select host, port, username, password from db_cfg_mysql where id=%s; \"\"\" %(sta_id)\n",
" res2 = mysql.GetSingleRow(mysql_conn, s_str)\n",
" if res2:\n",
" s_host = res2[0]\n",
" s_port = res2[1]\n",
" s_username = res2[2]\n",
" s_password = res2[3]\n",
" #print s_host,s_port,s_username,s_password\n",
"\n",
"\t\n",
" p_str = \"\"\"select concat(host, ':', port) from db_cfg_mysql where id=%s; \"\"\" %(pri_id)\n",
" p_nopass_str = mysql.GetSingleValue(mysql_conn, p_str)\n",
" s_str = \"\"\"select concat(host, ':', port) from db_cfg_mysql where id=%s; \"\"\" %(sta_id)\n",
" s_nopass_str = mysql.GetSingleValue(mysql_conn, s_str)\n",
"\t\n",
" logger.info(\"The master database is: \" + p_nopass_str + \", the id is: \" + str(pri_id))\n",
" logger.info(\"The slave database is: \" + s_nopass_str + \", the id is: \" + str(sta_id))\n",
"\n",
"\n",
"\n",
" db_type = \"mysql\"\n",
" try:\n",
" common.db_op_lock(mysql_conn, db_type, group_id, 'FAILOVER')\t\t\t# 加锁\n",
" common.init_db_op_instance(mysql_conn, db_type, group_id, 'FAILOVER')\t\t\t\t\t#初始化切换实例\n",
"\t\n",
" # connect to mysql\n",
" s_conn = mysql.ConnectMysql_T(s_host,s_port,s_username,s_password)\n",
"\n",
" if s_conn is None:\n",
" common.log_db_op_process(mysql_conn, db_type, group_id, 'FAILOVER', '连接从库失败,请根据相应日志查看原因', 0, 3)\n",
" logger.error(\"Connect to standby database error, exit!!!\")\n",
" \n",
" common.update_db_op_reason(mysql_conn, db_type, group_id, 'FAILOVER', '连接从库失败')\n",
" common.update_db_op_result(mysql_conn, db_type, group_id, 'FAILOVER', '-1')\n",
" sys.exit(2)\n",
" \n",
"\n",
" \n",
" \n",
" # 正式开始切换 \n",
" try:\n",
" common.log_db_op_process(mysql_conn, db_type, group_id, 'FAILOVER', '准备执行主从切换', 0, 2)\n",
" \n",
" res_2m=switch2master(mysql_conn, db_type, group_id, s_conn, sta_id)\n",
" if res_2m ==0:\n",
" update_switch_flag(mysql_conn, group_id)\n",
" common.gen_alert_mysql(sta_id, 1) # generate alert\n",
" common.update_db_op_result(mysql_conn, db_type, group_id, 'FAILOVER', '0')\n",
" else:\n",
" common.update_db_op_result(mysql_conn, db_type, group_id, 'FAILOVER', res_2m)\n",
" \n",
" except Exception,e:\n",
" logger.error(traceback.format_exc())\n",
" pass\n",
"\n",
" except Exception,e:\n",
" logger.error(traceback.format_exc())\n",
" pass\n",
" finally:\n",
" common.db_op_unlock(mysql_conn, db_type, group_id, 'FAILOVER')\n",
"\t"
] | [
0.043478260869565216,
0,
0,
0,
0,
0,
0.3333333333333333,
0.3333333333333333,
0,
0,
0,
0.023255813953488372,
0,
0.01694915254237288,
0,
0,
0,
0,
0,
0.05263157894736842,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0.14285714285714285,
0.2,
0,
0,
0.03225806451612903,
0.01098901098901099,
0.0125,
1.5,
0.037037037037037035,
1.5,
0.4,
0.125,
0.010309278350515464,
0.1111111111111111,
0,
0.014705882352941176,
0,
0.021739130434782608,
0.021739130434782608,
0.022222222222222223,
0.022727272727272728,
0.07692307692307693,
0.013333333333333334,
0.0136986301369863,
0.0136986301369863,
0.014084507042253521,
0.07692307692307693,
0,
0.022988505747126436,
0.009708737864077669,
0.022222222222222223,
0.021739130434782608,
0,
0.1111111111111111,
0.02857142857142857,
0.021739130434782608,
0,
0.1111111111111111,
0.025,
0.021739130434782608,
0,
0.058823529411764705,
0,
0.009900990099009901,
0.047619047619047616,
0,
0,
0.01020408163265306,
0.045454545454545456,
0.1111111111111111,
0,
0.009174311926605505,
0.008849557522123894,
0,
0.05555555555555555,
0.1111111111111111,
0,
0,
0,
0,
0.0125,
0,
0,
0.021739130434782608,
0.020618556701030927,
0,
0.0273972602739726,
0.019230769230769232,
0.015873015873015872,
1.5,
0.09523809523809523,
0.03571428571428571,
0,
0.03571428571428571,
0,
1,
0.041666666666666664,
0,
0.05263157894736842,
0.010309278350515464,
0.02,
0,
0.02127659574468085,
1.5,
0,
0,
0.4,
0.0125,
0,
0,
0.08,
0,
0,
0,
0,
0.017241379310344827,
0,
0,
1,
0.038461538461538464,
0,
0,
0,
0,
0,
0,
0.2,
1.5,
0.025974025974025976,
0.05,
0.05,
0,
0,
0,
0,
0,
1,
0.4,
0.03125,
0,
0,
0,
0,
0.2,
0.02040816326530612,
0,
0,
0,
0,
0,
0,
0.02127659574468085,
0,
1.5,
0.04395604395604396,
0,
0.02197802197802198,
0,
1.5,
0.02197802197802198,
0.011111111111111112,
0,
0,
0,
0.045454545454545456,
0,
0,
0.02197802197802198,
1.5,
0.037037037037037035,
0.04,
0,
0,
0.009259259259259259,
0,
0.07692307692307693,
0.010869565217391304,
0.011363636363636364,
0,
0.1111111111111111,
0,
0.2,
0.25,
0.10526315789473684,
0,
0.01020408163265306,
0.07692307692307693,
0.0125,
0.037037037037037035,
0,
0,
0.01098901098901099,
0,
0.010638297872340425,
0.058823529411764705,
0.03571428571428571,
0,
0,
0,
0.041666666666666664,
0,
0,
0,
0,
4
] | 221 | 0.119232 | false |
################################################################################
### Copyright © 2012-2013 BlackDragonHunt
###
### This file is part of the Super Duper Script Editor.
###
### The Super Duper Script Editor is free software: you can redistribute it
### and/or modify it under the terms of the GNU General Public License as
### published by the Free Software Foundation, either version 3 of the License,
### or (at your option) any later version.
###
### The Super Duper Script Editor is distributed in the hope that it will be
### useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
### MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
### GNU General Public License for more details.
###
### You should have received a copy of the GNU General Public License
### along with the Super Duper Script Editor.
### If not, see <http://www.gnu.org/licenses/>.
################################################################################
from scene_info import SceneInfo
from collections import defaultdict
# Kind of imitating the ScriptFile class.
class ScriptJump(object):
def __init__(self, scene_info):
self.scene_info = scene_info
self.ch = scene_info.goto_ch
self.scene = scene_info.goto_scene
self.room = scene_info.goto_room
self.text = u""
self.notags = defaultdict(lambda: u"")
self.comments = u""
self.filename = u"→ " + self.target()
def target(self):
return u"e%02d_%03d_%03d.lin" % (self.ch, self.scene, self.room)
def __getitem__(self, lang):
return u""
def __setitem__(self, lang, text):
pass
### EOF ### | [
"################################################################################\n",
"### Copyright © 2012-2013 BlackDragonHunt\n",
"### \n",
"### This file is part of the Super Duper Script Editor.\n",
"### \n",
"### The Super Duper Script Editor is free software: you can redistribute it\n",
"### and/or modify it under the terms of the GNU General Public License as\n",
"### published by the Free Software Foundation, either version 3 of the License,\n",
"### or (at your option) any later version.\n",
"### \n",
"### The Super Duper Script Editor is distributed in the hope that it will be\n",
"### useful, but WITHOUT ANY WARRANTY; without even the implied warranty of\n",
"### MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n",
"### GNU General Public License for more details.\n",
"### \n",
"### You should have received a copy of the GNU General Public License\n",
"### along with the Super Duper Script Editor.\n",
"### If not, see <http://www.gnu.org/licenses/>.\n",
"################################################################################\n",
"\n",
"from scene_info import SceneInfo\n",
"from collections import defaultdict\n",
"\n",
"# Kind of imitating the ScriptFile class.\n",
"class ScriptJump(object):\n",
" \n",
" def __init__(self, scene_info):\n",
" self.scene_info = scene_info\n",
" \n",
" self.ch = scene_info.goto_ch\n",
" self.scene = scene_info.goto_scene\n",
" self.room = scene_info.goto_room\n",
" \n",
" self.text = u\"\"\n",
" self.notags = defaultdict(lambda: u\"\")\n",
" self.comments = u\"\"\n",
" self.filename = u\"→ \" + self.target()\n",
" \n",
" def target(self):\n",
" return u\"e%02d_%03d_%03d.lin\" % (self.ch, self.scene, self.room)\n",
" \n",
" def __getitem__(self, lang):\n",
" return u\"\"\n",
" \n",
" def __setitem__(self, lang, text):\n",
" pass\n",
"\n",
"### EOF ###"
] | [
0.012345679012345678,
0.023809523809523808,
0.2,
0.017857142857142856,
0.2,
0.013157894736842105,
0.013513513513513514,
0.0125,
0.023255813953488372,
0.2,
0.012987012987012988,
0.013333333333333334,
0.015151515151515152,
0.02040816326530612,
0.2,
0.014285714285714285,
0.021739130434782608,
0.020833333333333332,
0.012345679012345678,
0,
0,
0,
0,
0,
0.038461538461538464,
0.3333333333333333,
0.029411764705882353,
0,
0.2,
0.02702702702702703,
0.025,
0.02564102564102564,
0.2,
0.041666666666666664,
0.022222222222222223,
0,
0,
0.3333333333333333,
0.05,
0,
0.14285714285714285,
0.03225806451612903,
0,
0.3333333333333333,
0.02702702702702703,
0,
0,
0.18181818181818182
] | 48 | 0.064394 | false |
from __future__ import division
from PyQt4 import QtCore
from numpy import *
from scipy import optimize
from scipy import constants as scipycsts
import pylab
from filter import savitzky_golay
# Define some global variables
DEBUG=0 # Debug mode can take values (0,1,2). 0 plots no intermediary results. 1 plots intermediary plots when the canvas is free. 2 plots all intermediary plots
METHOD='hybrid' # Calculation method: ('maxmin','modesum','hybrid','fit')
CONVOLVE=False # Whether to convolve the fit function with the response function when METHOD=='fit'
MINSMOOTH=None # use 5 sample smoothing in calculating the minimum. Set to None to disable
MIN_MODE_INTENSITY=20/65536 # Minimum difference in counts between max and min value of a mode to be recognized as legit
#DEFAULT_PARAM={"R1":.997,"R2":.322,"L":375e-6,"n":3.619}
#DEFAULT_PARAM={"R1":.94,"R2":.3,"L":600e-6,"n":3.619}
DEFAULT_PARAM={"R1":.94,"R2":.3,"L":375e-6,"n":3.619}
MIN_MODE_SPACING=0.4e-9*(375e-6/DEFAULT_PARAM["L"]) # Minimum spacing in nm between each mode used for peakClean algorithm
class HakkiPaoli(QtCore.QObject):
updateProgress=QtCore.pyqtSignal(float)
plotDataReady=QtCore.pyqtSignal(dict)
""" Class which holds the code for Hakki-Paoli gain calculation """
def __init__(self,x,y,param=DEFAULT_PARAM,parent=None):
super(HakkiPaoli, self).__init__()
self.x=x
self.y=y
self.param=param # holds some parameters defining the cavity
self.rendering=False # flag which says whether the canvas is ready to draw again
self.parent=parent
def restoreThreadAffinity(self):
""" move back to parent thread if multithreading was invoked by the parent """
self.moveToThread(self.parent.thread())
def gainCalculation(self):
""" Tries to calculate the gain from the spectrum using the Hakki-Paoli technique. R1 and R2 are the reflectivities of the mirrors """
# Detect all of the peaks in y
maxIdxRaw=peakDetect(self.y)
maxIdx=peakClean(self.x,self.y,maxIdxRaw)
# calculate cavity length using FSR[nm]=lambda0^2/2/n/L (note: this is not accurate since we need to use the unknown wavlength dependent effective index)
#L=mean((x[maxIdx[1:]]-diff(x[maxIdx])/2)**2/2/n/diff(x[maxIdx]))
# Loop through each mode and calculate Pmax, Pmin, and the lambda we use for that mode idx
modeGain=array([])
modeWavelength=array([])
for modeIdx in range(len(maxIdx)-2):
try:
self.updateProgress.emit(modeIdx/(len(maxIdx)-2))
# Slice off the section in x and y corresponding to current mode (ignore first two modes)
modeSpacing=(maxIdx[modeIdx+2]-maxIdx[modeIdx])/2 # average mode spacing based on next two peaks
startIdx=maxIdx[modeIdx+1]-round(modeSpacing/2)
stopIdx=stopIdx=maxIdx[modeIdx+1]+round(modeSpacing/2)
xCurrMode=self.x[startIdx:stopIdx]
yCurrMode=self.y[startIdx:stopIdx]
# Set Pmax as power at central peak of mode
Pmax=self.y[maxIdx[modeIdx+1]]
# Set Pmin as minimum power over the whole space. This is less accurate, but more robust than making assumptions about where the min should be and averaging
if MINSMOOTH!=None:
ysmooth=savitzky_golay(yCurrMode,MINSMOOTH,1,0) # linear polynomial with MINSMOOTH points
Pmin=max(min(ysmooth),0)
"""if DEBUG==2 or (DEBUG and not self.rendering):
xAxis={"data":(xCurrMode*1e9,xCurrMode*1e9),"label":"wavelength [nm]"}
yAxis={"data":(yCurrMode,ysmooth),"lineProp":("xk","ko"),"label":"Mode intensity [a.u.]"}
plotDictionary={"x":xAxis,"y":yAxis}
self.emit(QtCore.SIGNAL("plotDataReady"),plotDictionary)
self.rendering=True"""
else:
Pmin=min(yCurrMode)
# Set the gain using Hakki-Paoli or Hakki-Paoli-Cassidy method or nonlinear curve fit from Wang, Cassidy paper
if METHOD == "maxmin":
# Standard Hakki-Paoli:
currModeWavelength,currModeGain=self.maxMinGain(xCurrMode,yCurrMode)
elif METHOD == "modesum":
# Cassidy modification:
currModeWavelength,currModeGain=self.modeSumGain(xCurrMode,yCurrMode)
elif METHOD == "hybrid":
# Cassidy modification close to threshold, Standard Hakki-Paoli gain everywhere else
currModeWavelength,currModeGain=self.maxMinGain(xCurrMode,yCurrMode)
# If gain close to threshold then use Cassidy modification
mirrorLoss=1/2/self.param["L"]*log(1/self.param["R1"]/self.param["R2"])
if currModeGain > 0.3*mirrorLoss:
currModeWavelength,currModeGain=self.modeSumGain(xCurrMode,yCurrMode)
elif METHOD == "fit":
# Nonlinear curve-fit method:
currModeWavelength,currModeGain=self.modeFitGain(xCurrMode,yCurrMode)
# Add calculated gain for current mode to main array if it wasn't skipped
if currModeGain!=None:
# add the gain for current mode to main array
modeGain=append(modeGain,currModeGain)
# add wavelength for current mode as the wavelength at central peak
modeWavelength=append(modeWavelength,currModeWavelength)
else:
# add the gain for current mode to main array
modeGain=append(modeGain,NaN)
# add wavelength for current mode as the wavelength at central peak
modeWavelength=append(modeWavelength,currModeWavelength)
except (RuntimeError,ValueError) as e:
# don't add the mode to list if there was a runtime error calculating the gain
print(e.args[0])
# TODO: I need to implement convolution to improve the accuracy, and averaging to improve minima calculation
return (modeWavelength,modeGain)
def maxMinGain(self,modeLambda,modeI):
""" Return the standard Hakki-Paoli (max/min) Gain """
Pmax=modeI.max()
Pmin=modeI.min()
if (Pmax-Pmin)>0:
avgModeGain=-(1/self.param["L"])*log(sqrt(self.param["R1"]*self.param["R2"])*(sqrt(Pmax)+sqrt(Pmin))/(sqrt(Pmax)-sqrt(Pmin)))
else:
avgModeGain=None
avgModeLambda=modeLambda[modeI==Pmax][0]
return (avgModeLambda,avgModeGain)
def modeSumGain(self,modeLambda,modeI):
""" Return the modified Cassidy version of Hakki-Paoli Gain (mode sum) """
Pmax=modeI.max()
Pmin=modeI.min()
if Pmin>0:
avgModeGain = -(1/self.param["L"])*log(sqrt(self.param["R1"]*self.param["R2"])*((sum(modeI)/Pmin/size(modeI) + 1)/
(sum(modeI)/Pmin/size(modeI) - 1)))
else:
avgModeGain=None
avgModeLambda=modeLambda[modeI==Pmax][0]
return (avgModeLambda,avgModeGain)
def modeFitGain(self,modeLambda,modeI):
""" Return the mode gain by fitting the mode to ideal FP resonator:
Wang, H., & Cassidy, D. T. (2005). Gain measurements of Fabry-Perot semiconductor lasers using a nonlinear least-squares fitting method.
Quantum Electronics, IEEE Journal of, 41(4), 532-540."""
Pmax=modeI.max()
Pmin=modeI.min()
# Use standard Hakki-Paoli calculation as starting point for nonlinear fit
PRG0=(sqrt(Pmax)-sqrt(Pmin))/(sqrt(Pmax)+sqrt(Pmin))
# Define the starting point for optimization [PRG,lambda0,n,C,beta,gamma]
lambda0=modeLambda[modeI==Pmax][0]
#p0=[G0*sqrt(R1*R2),x[maxIdx[modeIdx+1]],n,Pmax*(1+G0*sqrt(R1*R2))**2,0,0]
p0=[PRG0,lambda0,self.param["n"],Pmax*(1-PRG0)**2]
# Do nonlinear curve fit to modeFitFunc and return the fit parameters
modeFitFunc=self.makeModeFitFunc(self.param["L"],x,startIdx,stopIdx)
p=optimize.curve_fit(modeFitFunc,xCurrMode,yCurrMode,p0)[0]
avgModeGain = (1/self.param["L"])*log(p[0]/sqrt(self.param["R1"]*self.param["R2"]))
avgModeWavelength=p[1]
QtCore.QCoreApplication.processEvents()
if DEBUG==2 or (DEBUG and not self.rendering):
xAxis={"data":(modeLambda*1e9,modeLambda*1e9,array([p[1],p[1]])*1e9),"label":"Wavelength [nm]"}
yAxis={"data":(modeI/max(modeI),modeFitFunc(modeLambda,*p)/max(modeI),array([min(modeI),max(modeI)])/max(modeI)),"lineProp":("xk","bo-",":^"),"label":"Mode intensity [a.u.]"}
plotDictionary={"x":xAxis,"y":yAxis}
self.rendering=True
self.plotDataReady.emit(plotDictionary)
return (avgModeLambda,avgModeGain)
def makeModeFitFunc(self,L,xAll=None,startIdx=None,stopIdx=None):
""" scipy.optimize.curve_fit doesn't let us pass additional arguments, so we use Currying via this intermediary function to give xAll which represents the whole spectrum across all modes."""
def modeFitFunc(xm,*param):
""" Does least-squres fit of fabryPerotFunc for a single Fabry-Perot mode """
# Calculate the Fabry-Perot spectrum for ALL modes using the input parameters
return self.fabryPerotFunc(xm,L,*param)
def modeFitFuncConv(xm,*param):
""" Does least-squres fit of fabryPerotFunc convolved with responseFunc to the data for a single Fabry-Perot mode """
# Calculate the Fabry-Perot spectrum for ALL modes using the input parameters
fpSpectrum=self.fabryPerotFunc(xAll,L,*param)
# Convolve fbSpectrum with resonseFunc
yhat=convolve(fpSpectrum,self.responseFunc(xm,param[1]),'same') # 'same' does the same as taking the region [x0Idx:(x0Idx+len(x))] with x0Idx=(abs(x-x0)==min(abs(x-x0))).nonzero()[0]
# trim off the current mode from the data
ymhat=yhat[startIdx:stopIdx]
return ymhat
# Return a different function depending on whether or not convolution was specified
if CONVOLVE:
return modeFitFuncConv
else:
return modeFitFunc
def fabryPerotFunc(self,x,L,*param):
""" Function which defines what the Fabry-Perot mode function should look like. This function is copied directly from the paper:
' Gain Measurements of Fabry-Perot Semiconductor Lasers Using a Nonlinear Least-Sqares Fitting Method in IEEE JQE vol. 41, 532 by Wang and Cassidy"""
PRG0=param[0] # product of RG at lambda0
x0=param[1] # wavelength at center of the mode
n=param[2] # effective mode index
C0=param[3] # value of fitting parameter related to the Einstein B coefficient at lambda0
#beta=param[4] # linear slope of change in gain over the mode
#gamma=param[5] # linear slope of change in fitting parameter C over the mode
PRG=PRG0#+beta*(x-x0)
C=C0#+gamma*(x-x0)
denominator=(1-PRG)**2+4*PRG*sin(2*pi*n*L*(1/x-1/x0))**2
I=C/denominator # calculated intensity
return I
def responseFunc(self,x,x0,sigma=25e-12):
""" Gaussian response function for the spectrometer which can be used as a convolution kernel """
m=exp(-(x-x0)**2/sigma**2)
return m/sum(m)
@QtCore.pyqtSlot()
def readyToDraw(self):
""" Slot which allows the figure canvas to say when it's ready to draw again """
self.rendering=False
# Some helper methods which can be imported from the module
def peakDetect(y):
""" Given a vector y, return the indices of all the peaks without applying any filtering or special criteria """
yd=diff(y) # calculate 1st derivative
# define maxima as points where there is a zero crossing of first derivative and first deriviative is decreasing
ydZero=(yd==0)[0:-1] # first check for the case where the first derivative is exactly zero
ydNegativeSoon=yd[1:]<0 # check if next sample of first derivative is negative
ydZeroCross=yd[0:-1]*yd[1:]<0 # check if first derivative crosses through zero (i.e. between samples)
ydDecreasing=(yd[1:]-yd[0:-1])<0 # check if first derivative is decreasing
maxIdx=((ydZero&ydNegativeSoon)|(ydZeroCross&ydDecreasing)).nonzero()[0]+1
return maxIdx
def peakClean(x,y,maxIdx,xth=MIN_MODE_SPACING,yth=None):
""" runs through each of the indices for peaks in y data, and if any two points closer than xth, remove the point with smaller y value """
i=1
while i<(len(maxIdx)):
dx=x[maxIdx[i]]-x[maxIdx[i-1]]
if dx<xth:
if y[maxIdx[i]] > y[maxIdx[i-1]]:
maxIdx=delete(maxIdx,i-1)
else:
maxIdx=delete(maxIdx,i)
else:
i=i+1
# if specified, also check that the peak to peak difference of y is bigger than yth
if yth!=None:
maxIdxClean=[]
for i in range(len(maxIdx)-1) :
dy=(max(y[maxIdx[i]:maxIdx[i+1]])-min(y[maxIdx[i]:maxIdx[i+1]]))/max(y)
if dy > yth:
maxIdxClean.append(maxIdx[i])
maxIdx=maxIdxClean
return maxIdx | [
"from __future__ import division\n",
"from PyQt4 import QtCore\n",
"from numpy import *\n",
"from scipy import optimize\n",
"from scipy import constants as scipycsts\n",
"import pylab\n",
"from filter import savitzky_golay\n",
"\n",
"# Define some global variables\n",
"DEBUG=0 # Debug mode can take values (0,1,2). 0 plots no intermediary results. 1 plots intermediary plots when the canvas is free. 2 plots all intermediary plots\n",
"METHOD='hybrid' # Calculation method: ('maxmin','modesum','hybrid','fit')\n",
"CONVOLVE=False # Whether to convolve the fit function with the response function when METHOD=='fit'\n",
"MINSMOOTH=None # use 5 sample smoothing in calculating the minimum. Set to None to disable\n",
"MIN_MODE_INTENSITY=20/65536 # Minimum difference in counts between max and min value of a mode to be recognized as legit\n",
"#DEFAULT_PARAM={\"R1\":.997,\"R2\":.322,\"L\":375e-6,\"n\":3.619}\n",
"#DEFAULT_PARAM={\"R1\":.94,\"R2\":.3,\"L\":600e-6,\"n\":3.619}\n",
"DEFAULT_PARAM={\"R1\":.94,\"R2\":.3,\"L\":375e-6,\"n\":3.619}\n",
"MIN_MODE_SPACING=0.4e-9*(375e-6/DEFAULT_PARAM[\"L\"]) # Minimum spacing in nm between each mode used for peakClean algorithm\n",
"\n",
"class HakkiPaoli(QtCore.QObject):\n",
" updateProgress=QtCore.pyqtSignal(float)\n",
" plotDataReady=QtCore.pyqtSignal(dict)\n",
" \"\"\" Class which holds the code for Hakki-Paoli gain calculation \"\"\"\n",
" def __init__(self,x,y,param=DEFAULT_PARAM,parent=None):\n",
" super(HakkiPaoli, self).__init__()\n",
" self.x=x\n",
" self.y=y\n",
" self.param=param # holds some parameters defining the cavity\n",
" self.rendering=False # flag which says whether the canvas is ready to draw again\n",
" self.parent=parent\n",
"\n",
" def restoreThreadAffinity(self):\n",
" \"\"\" move back to parent thread if multithreading was invoked by the parent \"\"\"\n",
" self.moveToThread(self.parent.thread())\n",
"\n",
" def gainCalculation(self):\n",
" \"\"\" Tries to calculate the gain from the spectrum using the Hakki-Paoli technique. R1 and R2 are the reflectivities of the mirrors \"\"\"\n",
" # Detect all of the peaks in y\n",
" maxIdxRaw=peakDetect(self.y)\n",
" maxIdx=peakClean(self.x,self.y,maxIdxRaw)\n",
" # calculate cavity length using FSR[nm]=lambda0^2/2/n/L (note: this is not accurate since we need to use the unknown wavlength dependent effective index)\n",
" #L=mean((x[maxIdx[1:]]-diff(x[maxIdx])/2)**2/2/n/diff(x[maxIdx]))\n",
" # Loop through each mode and calculate Pmax, Pmin, and the lambda we use for that mode idx\n",
" modeGain=array([])\n",
" modeWavelength=array([])\n",
" for modeIdx in range(len(maxIdx)-2):\n",
" try:\n",
" self.updateProgress.emit(modeIdx/(len(maxIdx)-2))\n",
" # Slice off the section in x and y corresponding to current mode (ignore first two modes)\n",
" modeSpacing=(maxIdx[modeIdx+2]-maxIdx[modeIdx])/2 # average mode spacing based on next two peaks\n",
" startIdx=maxIdx[modeIdx+1]-round(modeSpacing/2)\n",
" stopIdx=stopIdx=maxIdx[modeIdx+1]+round(modeSpacing/2)\n",
" xCurrMode=self.x[startIdx:stopIdx]\n",
" yCurrMode=self.y[startIdx:stopIdx]\n",
" # Set Pmax as power at central peak of mode\n",
" Pmax=self.y[maxIdx[modeIdx+1]]\n",
" # Set Pmin as minimum power over the whole space. This is less accurate, but more robust than making assumptions about where the min should be and averaging\n",
" if MINSMOOTH!=None:\n",
" ysmooth=savitzky_golay(yCurrMode,MINSMOOTH,1,0) # linear polynomial with MINSMOOTH points\n",
" Pmin=max(min(ysmooth),0)\n",
" \"\"\"if DEBUG==2 or (DEBUG and not self.rendering):\n",
" xAxis={\"data\":(xCurrMode*1e9,xCurrMode*1e9),\"label\":\"wavelength [nm]\"}\n",
" yAxis={\"data\":(yCurrMode,ysmooth),\"lineProp\":(\"xk\",\"ko\"),\"label\":\"Mode intensity [a.u.]\"}\n",
" plotDictionary={\"x\":xAxis,\"y\":yAxis}\n",
" self.emit(QtCore.SIGNAL(\"plotDataReady\"),plotDictionary)\n",
" self.rendering=True\"\"\"\n",
" else:\n",
" Pmin=min(yCurrMode)\n",
" # Set the gain using Hakki-Paoli or Hakki-Paoli-Cassidy method or nonlinear curve fit from Wang, Cassidy paper\n",
" if METHOD == \"maxmin\":\n",
" # Standard Hakki-Paoli: \n",
" currModeWavelength,currModeGain=self.maxMinGain(xCurrMode,yCurrMode)\n",
" elif METHOD == \"modesum\":\n",
" # Cassidy modification:\n",
" currModeWavelength,currModeGain=self.modeSumGain(xCurrMode,yCurrMode)\n",
" elif METHOD == \"hybrid\":\n",
" # Cassidy modification close to threshold, Standard Hakki-Paoli gain everywhere else\n",
" currModeWavelength,currModeGain=self.maxMinGain(xCurrMode,yCurrMode)\n",
" # If gain close to threshold then use Cassidy modification\n",
" mirrorLoss=1/2/self.param[\"L\"]*log(1/self.param[\"R1\"]/self.param[\"R2\"])\n",
" if currModeGain > 0.3*mirrorLoss:\n",
" currModeWavelength,currModeGain=self.modeSumGain(xCurrMode,yCurrMode)\n",
" elif METHOD == \"fit\":\n",
" # Nonlinear curve-fit method:\n",
" currModeWavelength,currModeGain=self.modeFitGain(xCurrMode,yCurrMode)\n",
" # Add calculated gain for current mode to main array if it wasn't skipped\n",
" if currModeGain!=None:\n",
" # add the gain for current mode to main array\n",
" modeGain=append(modeGain,currModeGain)\n",
" # add wavelength for current mode as the wavelength at central peak\n",
" modeWavelength=append(modeWavelength,currModeWavelength)\n",
" else:\n",
" # add the gain for current mode to main array\n",
" modeGain=append(modeGain,NaN)\n",
" # add wavelength for current mode as the wavelength at central peak\n",
" modeWavelength=append(modeWavelength,currModeWavelength)\n",
" except (RuntimeError,ValueError) as e:\n",
" # don't add the mode to list if there was a runtime error calculating the gain\n",
" print(e.args[0])\n",
" # TODO: I need to implement convolution to improve the accuracy, and averaging to improve minima calculation\n",
" return (modeWavelength,modeGain)\n",
"\n",
" def maxMinGain(self,modeLambda,modeI):\n",
" \"\"\" Return the standard Hakki-Paoli (max/min) Gain \"\"\"\n",
" Pmax=modeI.max()\n",
" Pmin=modeI.min()\n",
" if (Pmax-Pmin)>0:\n",
" avgModeGain=-(1/self.param[\"L\"])*log(sqrt(self.param[\"R1\"]*self.param[\"R2\"])*(sqrt(Pmax)+sqrt(Pmin))/(sqrt(Pmax)-sqrt(Pmin)))\n",
" else:\n",
" avgModeGain=None\n",
" avgModeLambda=modeLambda[modeI==Pmax][0]\n",
" return (avgModeLambda,avgModeGain)\n",
"\n",
" def modeSumGain(self,modeLambda,modeI):\n",
" \"\"\" Return the modified Cassidy version of Hakki-Paoli Gain (mode sum) \"\"\"\n",
" Pmax=modeI.max()\n",
" Pmin=modeI.min()\n",
" if Pmin>0:\n",
" avgModeGain = -(1/self.param[\"L\"])*log(sqrt(self.param[\"R1\"]*self.param[\"R2\"])*((sum(modeI)/Pmin/size(modeI) + 1)/\n",
" (sum(modeI)/Pmin/size(modeI) - 1)))\n",
" else:\n",
" avgModeGain=None\n",
" avgModeLambda=modeLambda[modeI==Pmax][0]\n",
" return (avgModeLambda,avgModeGain)\n",
"\n",
" def modeFitGain(self,modeLambda,modeI):\n",
" \"\"\" Return the mode gain by fitting the mode to ideal FP resonator:\n",
" Wang, H., & Cassidy, D. T. (2005). Gain measurements of Fabry-Perot semiconductor lasers using a nonlinear least-squares fitting method.\n",
" Quantum Electronics, IEEE Journal of, 41(4), 532-540.\"\"\"\n",
" Pmax=modeI.max()\n",
" Pmin=modeI.min()\n",
" # Use standard Hakki-Paoli calculation as starting point for nonlinear fit\n",
" PRG0=(sqrt(Pmax)-sqrt(Pmin))/(sqrt(Pmax)+sqrt(Pmin))\n",
" # Define the starting point for optimization [PRG,lambda0,n,C,beta,gamma]\n",
" lambda0=modeLambda[modeI==Pmax][0]\n",
" #p0=[G0*sqrt(R1*R2),x[maxIdx[modeIdx+1]],n,Pmax*(1+G0*sqrt(R1*R2))**2,0,0]\n",
" p0=[PRG0,lambda0,self.param[\"n\"],Pmax*(1-PRG0)**2]\n",
" # Do nonlinear curve fit to modeFitFunc and return the fit parameters\n",
" modeFitFunc=self.makeModeFitFunc(self.param[\"L\"],x,startIdx,stopIdx)\n",
" p=optimize.curve_fit(modeFitFunc,xCurrMode,yCurrMode,p0)[0]\n",
" avgModeGain = (1/self.param[\"L\"])*log(p[0]/sqrt(self.param[\"R1\"]*self.param[\"R2\"]))\n",
" avgModeWavelength=p[1]\n",
" QtCore.QCoreApplication.processEvents()\n",
" if DEBUG==2 or (DEBUG and not self.rendering):\n",
" xAxis={\"data\":(modeLambda*1e9,modeLambda*1e9,array([p[1],p[1]])*1e9),\"label\":\"Wavelength [nm]\"}\n",
" yAxis={\"data\":(modeI/max(modeI),modeFitFunc(modeLambda,*p)/max(modeI),array([min(modeI),max(modeI)])/max(modeI)),\"lineProp\":(\"xk\",\"bo-\",\":^\"),\"label\":\"Mode intensity [a.u.]\"}\n",
" plotDictionary={\"x\":xAxis,\"y\":yAxis}\n",
" self.rendering=True\n",
" self.plotDataReady.emit(plotDictionary)\n",
" return (avgModeLambda,avgModeGain)\n",
"\n",
"\n",
" def makeModeFitFunc(self,L,xAll=None,startIdx=None,stopIdx=None):\n",
" \"\"\" scipy.optimize.curve_fit doesn't let us pass additional arguments, so we use Currying via this intermediary function to give xAll which represents the whole spectrum across all modes.\"\"\"\n",
" def modeFitFunc(xm,*param):\n",
" \"\"\" Does least-squres fit of fabryPerotFunc for a single Fabry-Perot mode \"\"\"\n",
" # Calculate the Fabry-Perot spectrum for ALL modes using the input parameters\n",
" return self.fabryPerotFunc(xm,L,*param)\n",
" def modeFitFuncConv(xm,*param):\n",
" \"\"\" Does least-squres fit of fabryPerotFunc convolved with responseFunc to the data for a single Fabry-Perot mode \"\"\"\n",
" # Calculate the Fabry-Perot spectrum for ALL modes using the input parameters\n",
" fpSpectrum=self.fabryPerotFunc(xAll,L,*param)\n",
" # Convolve fbSpectrum with resonseFunc\n",
" yhat=convolve(fpSpectrum,self.responseFunc(xm,param[1]),'same') # 'same' does the same as taking the region [x0Idx:(x0Idx+len(x))] with x0Idx=(abs(x-x0)==min(abs(x-x0))).nonzero()[0]\n",
" # trim off the current mode from the data\n",
" ymhat=yhat[startIdx:stopIdx]\n",
" return ymhat\n",
" # Return a different function depending on whether or not convolution was specified\n",
" if CONVOLVE:\n",
" return modeFitFuncConv\n",
" else:\n",
" return modeFitFunc\n",
"\n",
" def fabryPerotFunc(self,x,L,*param):\n",
" \"\"\" Function which defines what the Fabry-Perot mode function should look like. This function is copied directly from the paper:\n",
" ' Gain Measurements of Fabry-Perot Semiconductor Lasers Using a Nonlinear Least-Sqares Fitting Method in IEEE JQE vol. 41, 532 by Wang and Cassidy\"\"\"\n",
" PRG0=param[0] # product of RG at lambda0\n",
" x0=param[1] # wavelength at center of the mode\n",
" n=param[2] # effective mode index\n",
" C0=param[3] # value of fitting parameter related to the Einstein B coefficient at lambda0\n",
" #beta=param[4] # linear slope of change in gain over the mode\n",
" #gamma=param[5] # linear slope of change in fitting parameter C over the mode\n",
" PRG=PRG0#+beta*(x-x0)\n",
" C=C0#+gamma*(x-x0) \n",
" denominator=(1-PRG)**2+4*PRG*sin(2*pi*n*L*(1/x-1/x0))**2\n",
" I=C/denominator # calculated intensity\n",
" return I\n",
"\n",
" def responseFunc(self,x,x0,sigma=25e-12):\n",
" \"\"\" Gaussian response function for the spectrometer which can be used as a convolution kernel \"\"\"\n",
" m=exp(-(x-x0)**2/sigma**2)\n",
" return m/sum(m)\n",
"\n",
" @QtCore.pyqtSlot()\n",
" def readyToDraw(self):\n",
" \"\"\" Slot which allows the figure canvas to say when it's ready to draw again \"\"\"\n",
" self.rendering=False\n",
"\n",
"# Some helper methods which can be imported from the module\n",
"def peakDetect(y):\n",
" \"\"\" Given a vector y, return the indices of all the peaks without applying any filtering or special criteria \"\"\"\n",
" yd=diff(y) # calculate 1st derivative\n",
" # define maxima as points where there is a zero crossing of first derivative and first deriviative is decreasing\n",
" ydZero=(yd==0)[0:-1] # first check for the case where the first derivative is exactly zero\n",
" ydNegativeSoon=yd[1:]<0 # check if next sample of first derivative is negative\n",
" ydZeroCross=yd[0:-1]*yd[1:]<0 # check if first derivative crosses through zero (i.e. between samples)\n",
" ydDecreasing=(yd[1:]-yd[0:-1])<0 # check if first derivative is decreasing\n",
" maxIdx=((ydZero&ydNegativeSoon)|(ydZeroCross&ydDecreasing)).nonzero()[0]+1\n",
" return maxIdx\n",
"\n",
"def peakClean(x,y,maxIdx,xth=MIN_MODE_SPACING,yth=None):\n",
" \"\"\" runs through each of the indices for peaks in y data, and if any two points closer than xth, remove the point with smaller y value \"\"\"\n",
" i=1\n",
" while i<(len(maxIdx)):\n",
" dx=x[maxIdx[i]]-x[maxIdx[i-1]]\n",
" if dx<xth:\n",
" if y[maxIdx[i]] > y[maxIdx[i-1]]:\n",
" maxIdx=delete(maxIdx,i-1)\n",
" else:\n",
" maxIdx=delete(maxIdx,i)\n",
" else:\n",
" i=i+1\n",
" # if specified, also check that the peak to peak difference of y is bigger than yth\n",
" if yth!=None:\n",
" maxIdxClean=[]\n",
" for i in range(len(maxIdx)-1) :\n",
" dy=(max(y[maxIdx[i]:maxIdx[i+1]])-min(y[maxIdx[i]:maxIdx[i+1]]))/max(y)\n",
" if dy > yth:\n",
" maxIdxClean.append(maxIdx[i])\n",
" maxIdx=maxIdxClean\n",
"\n",
" return maxIdx"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012269938650306749,
0.02702702702702703,
0.03,
0.03296703296703297,
0.016129032258064516,
0.017241379310344827,
0.01818181818181818,
0.14814814814814814,
0.015267175572519083,
0,
0.029411764705882353,
0.022727272727272728,
0.023809523809523808,
0,
0.06666666666666667,
0,
0.058823529411764705,
0.058823529411764705,
0.028985507246376812,
0.033707865168539325,
0.037037037037037035,
0,
0,
0.011494252873563218,
0,
0,
0,
0.006993006993006993,
0,
0.02702702702702703,
0.06,
0.006172839506172839,
0.013513513513513514,
0.010101010101010102,
0.037037037037037035,
0.030303030303030304,
0,
0,
0,
0.009433962264150943,
0.02654867256637168,
0.015625,
0.028169014084507043,
0.0196078431372549,
0.0196078431372549,
0,
0.02127659574468085,
0.005780346820809248,
0.05555555555555555,
0.05454545454545454,
0.044444444444444446,
0,
0.010526315789473684,
0.008771929824561403,
0,
0,
0,
0,
0.025,
0.007874015748031496,
0,
0.015625,
0.0449438202247191,
0,
0,
0.044444444444444446,
0,
0.009523809523809525,
0.0449438202247191,
0,
0.021739130434782608,
0,
0.0425531914893617,
0,
0,
0.044444444444444446,
0.011111111111111112,
0.05128205128205128,
0,
0.03389830508474576,
0.011363636363636364,
0.025974025974025976,
0,
0,
0.04,
0.011363636363636364,
0.025974025974025976,
0.0196078431372549,
0.010526315789473684,
0,
0.008547008547008548,
0.024390243902439025,
0,
0.046511627906976744,
0,
0.04,
0.04,
0.038461538461538464,
0.014492753623188406,
0,
0.034482758620689655,
0.04081632653061224,
0.023255813953488372,
0,
0.045454545454545456,
0.012048192771084338,
0.04,
0.04,
0.05263157894736842,
0.015748031496062992,
0.016,
0,
0.034482758620689655,
0.04081632653061224,
0.023255813953488372,
0,
0.045454545454545456,
0,
0.006896551724137931,
0,
0.04,
0.04,
0.012048192771084338,
0.01639344262295082,
0.012195121951219513,
0.046511627906976744,
0.024096385542168676,
0.06779661016949153,
0,
0.05194805194805195,
0.058823529411764705,
0.010869565217391304,
0.03225806451612903,
0,
0.01818181818181818,
0.07407407407407407,
0.06951871657754011,
0.08163265306122448,
0.03125,
0,
0.023255813953488372,
0,
0,
0.07142857142857142,
0.005025125628140704,
0.027777777777777776,
0.011111111111111112,
0.011111111111111112,
0.038461538461538464,
0.05,
0.007692307692307693,
0.011111111111111112,
0.05172413793103448,
0,
0.024875621890547265,
0,
0.024390243902439025,
0,
0.010869565217391304,
0,
0,
0,
0,
0,
0.07317073170731707,
0.0072992700729927005,
0.006369426751592357,
0.01818181818181818,
0.015873015873015872,
0.0196078431372549,
0.018867924528301886,
0.013157894736842105,
0.02197802197802198,
0.1,
0.13333333333333333,
0.015384615384615385,
0.0392156862745098,
0,
0,
0.06521739130434782,
0.009433962264150943,
0.02857142857142857,
0,
0,
0,
0,
0.011235955056179775,
0.034482758620689655,
0,
0,
0.05263157894736842,
0.008547008547008548,
0.047619047619047616,
0.008547008547008548,
0.03,
0.03571428571428571,
0.027777777777777776,
0.025,
0.05063291139240506,
0,
0,
0.08771929824561403,
0.006993006993006993,
0.125,
0.037037037037037035,
0.02564102564102564,
0.05263157894736842,
0,
0.047619047619047616,
0,
0.05,
0,
0.05555555555555555,
0.011363636363636364,
0.1111111111111111,
0.043478260869565216,
0.025,
0.023809523809523808,
0,
0,
0.037037037037037035,
0,
0.058823529411764705
] | 232 | 0.022409 | false |
# twisted imports
from twisted.words.protocols import irc
from twisted.internet import reactor, protocol
from twisted.python import log
# system imports
import time, sys
from chat import chat
from comment import comment
from threading import Thread
import pygame
class TwitchMonitor(irc.IRCClient):
"""Client to monitor Twitch chat"""
nickname = "justinfan31415926535"
def connectionMade(self):
irc.IRCClient.connectionMade(self)
def connectionLost(self, reason):
irc.IRCClient.connectionLost(self, reason)
# callbacks for events
def signedOn(self):
"""Called when client has succesfully signed on to server."""
self.join(self.factory.channel)
def privmsg(self, user, channel, msg):
"""This will get called when the client receives a message."""
user = user.split('!', 1)[0]
#print("<%s> %s" % (user, msg))
self.twitch.addComment(msg)
def action(self, user, channel, msg):
"""This will get called when the client sees someone do an action."""
user = user.split('!', 1)[0]
#print("* %s %s" % (user, msg))
self.twitch.addComment(msg)
class MonitorFactory(protocol.ClientFactory):
"""A factory for TwitchMonitor.
A new protocol instance will be created each time we connect to the server.
"""
def __init__(self, channel, twitchChat):
self.channel = channel
self.twitch = twitchChat
def buildProtocol(self, addr):
p = TwitchMonitor()
p.factory = self
p.twitch = self.twitch
return p
def clientConnectionLost(self, connector, reason):
"""If we get disconnected, reconnect to server."""
connector.connect()
def clientConnectionFailed(self, connector, reason):
print "connection failed:", reason
reactor.stop()
def runReactor(reactor):
reactor.run()
return
if __name__ == '__main__':
channel = raw_input("Enter channel name: ");
pygame.init()
twitchFont = pygame.font.SysFont("helvetica", 24, bold=True)
twitchChat = chat(twitchFont)
# create factory protocol and application
f = MonitorFactory(channel, twitchChat)
# connect factory to this host and port
reactor.connectTCP("irc.twitch.tv", 6667, f)
# run bot
reactorThread = Thread(target=runReactor, args=(reactor,))
reactorThread.daemon = True
reactorThread.start()
#reactor.run()
# Set the height and width of the screen
SIZE = [1184, 500]
screen = pygame.display.set_mode(SIZE)
#screen2 = pygame.display.set_mode((400, 300))
pygame.display.set_caption("Twitch Chat")
clock = pygame.time.Clock()
# Loop until the user clicks the close button.
GREEN = [00, 100, 0]
done = False
while not done:
for event in pygame.event.get(): # User did something
if event.type == pygame.QUIT: # If user clicked close
done = True # Flag that we are done so we exit this loop
# Set the screen background
screen.fill(GREEN)
# Draw words
for comment in twitchChat.comments:
screen.blit(comment.getComment(), (comment.x, comment.y))
twitchChat.run()
# Go ahead and update the screen with what we've drawn.
pygame.display.flip()
clock.tick(60)
# Be IDLE friendly. If you forget this line, the program will 'hang'
# on exit.
pygame.quit() | [
"# twisted imports\n",
"from twisted.words.protocols import irc\n",
"from twisted.internet import reactor, protocol\n",
"from twisted.python import log\n",
"\n",
"# system imports\n",
"import time, sys\n",
"from chat import chat\n",
"from comment import comment\n",
"from threading import Thread\n",
"import pygame\n",
"\n",
"class TwitchMonitor(irc.IRCClient):\n",
"\t\"\"\"Client to monitor Twitch chat\"\"\"\n",
"\t\n",
"\tnickname = \"justinfan31415926535\"\n",
"\t\n",
"\tdef connectionMade(self):\n",
"\t\tirc.IRCClient.connectionMade(self)\n",
"\n",
"\tdef connectionLost(self, reason):\n",
"\t\tirc.IRCClient.connectionLost(self, reason)\n",
"\n",
"\t# callbacks for events\n",
"\n",
"\tdef signedOn(self):\n",
"\t\t\"\"\"Called when client has succesfully signed on to server.\"\"\"\n",
"\t\tself.join(self.factory.channel)\n",
"\n",
"\tdef privmsg(self, user, channel, msg):\n",
"\t\t\"\"\"This will get called when the client receives a message.\"\"\"\n",
"\t\tuser = user.split('!', 1)[0]\n",
"\t\t#print(\"<%s> %s\" % (user, msg))\n",
"\t\tself.twitch.addComment(msg)\n",
"\n",
"\tdef action(self, user, channel, msg):\n",
"\t\t\"\"\"This will get called when the client sees someone do an action.\"\"\"\n",
"\t\tuser = user.split('!', 1)[0]\n",
"\t\t#print(\"* %s %s\" % (user, msg))\n",
"\t\tself.twitch.addComment(msg)\n",
"\n",
"\n",
"\n",
"class MonitorFactory(protocol.ClientFactory):\n",
"\t\"\"\"A factory for TwitchMonitor.\n",
"\n",
"\tA new protocol instance will be created each time we connect to the server.\n",
"\t\"\"\"\n",
"\n",
"\tdef __init__(self, channel, twitchChat):\n",
"\t\tself.channel = channel\n",
"\t\tself.twitch = twitchChat\n",
"\n",
"\tdef buildProtocol(self, addr):\n",
"\t\tp = TwitchMonitor()\n",
"\t\tp.factory = self\n",
"\t\tp.twitch = self.twitch\n",
"\t\treturn p\n",
"\n",
"\tdef clientConnectionLost(self, connector, reason):\n",
"\t\t\"\"\"If we get disconnected, reconnect to server.\"\"\"\n",
"\t\tconnector.connect()\n",
"\n",
"\tdef clientConnectionFailed(self, connector, reason):\n",
"\t\tprint \"connection failed:\", reason\n",
"\t\treactor.stop()\n",
"\n",
"\n",
"def runReactor(reactor):\n",
"\treactor.run()\n",
"\treturn\n",
"\n",
"if __name__ == '__main__':\n",
"\tchannel = raw_input(\"Enter channel name: \");\n",
"\tpygame.init()\n",
"\ttwitchFont = pygame.font.SysFont(\"helvetica\", 24, bold=True)\n",
"\ttwitchChat = chat(twitchFont)\n",
"\t# create factory protocol and application\n",
"\tf = MonitorFactory(channel, twitchChat)\n",
"\n",
"\t# connect factory to this host and port\n",
"\treactor.connectTCP(\"irc.twitch.tv\", 6667, f)\n",
"\n",
"\t# run bot\n",
"\treactorThread = Thread(target=runReactor, args=(reactor,))\n",
"\treactorThread.daemon = True\n",
"\treactorThread.start()\n",
"\t#reactor.run()\n",
"\t\t\n",
"\t# Set the height and width of the screen\n",
"\tSIZE = [1184, 500]\n",
"\t \n",
"\tscreen = pygame.display.set_mode(SIZE)\n",
"\t#screen2 = pygame.display.set_mode((400, 300))\n",
"\tpygame.display.set_caption(\"Twitch Chat\")\n",
"\tclock = pygame.time.Clock()\n",
"\t \n",
"\t# Loop until the user clicks the close button.\n",
"\tGREEN = [00, 100, 0]\n",
"\tdone = False\n",
"\twhile not done:\n",
"\t\tfor event in pygame.event.get(): # User did something\n",
"\t\t\tif event.type == pygame.QUIT: # If user clicked close\n",
"\t\t\t\tdone = True # Flag that we are done so we exit this loop\n",
"\t\t\n",
"\t\t# Set the screen background\n",
"\t\tscreen.fill(GREEN)\n",
"\t\t\n",
"\t\t# Draw words\n",
"\t\tfor comment in twitchChat.comments:\n",
"\t\t\tscreen.blit(comment.getComment(), (comment.x, comment.y))\n",
"\t\t\n",
"\t\ttwitchChat.run()\n",
"\t\t\n",
"\t\t# Go ahead and update the screen with what we've drawn.\n",
"\t\tpygame.display.flip()\n",
"\t\tclock.tick(60)\n",
"\t \n",
"\t# Be IDLE friendly. If you forget this line, the program will 'hang'\n",
"\t# on exit.\n",
"\tpygame.quit()"
] | [
0,
0,
0,
0,
0,
0,
0.058823529411764705,
0,
0,
0,
0,
0,
0.027777777777777776,
0.02702702702702703,
1,
0.02857142857142857,
1,
0.037037037037037035,
0.02702702702702703,
0,
0.02857142857142857,
0.022222222222222223,
0,
0.041666666666666664,
0,
0.047619047619047616,
0.015625,
0.029411764705882353,
0,
0.025,
0.015384615384615385,
0.03225806451612903,
0.058823529411764705,
0.03333333333333333,
0,
0.02564102564102564,
0.013888888888888888,
0.03225806451612903,
0.058823529411764705,
0.03333333333333333,
0,
0,
0,
0.021739130434782608,
0.030303030303030304,
0,
0.012987012987012988,
0.2,
0,
0.023809523809523808,
0.04,
0.037037037037037035,
0,
0.03125,
0.045454545454545456,
0.05263157894736842,
0.04,
0.09090909090909091,
0,
0.019230769230769232,
0.018867924528301886,
0.045454545454545456,
0,
0.018518518518518517,
0.02702702702702703,
0.058823529411764705,
0,
0,
0,
0.06666666666666667,
0.125,
0,
0.037037037037037035,
0.043478260869565216,
0.06666666666666667,
0.016129032258064516,
0.03225806451612903,
0.023255813953488372,
0.024390243902439025,
0,
0.024390243902439025,
0.021739130434782608,
0,
0.09090909090909091,
0.016666666666666666,
0.034482758620689655,
0.043478260869565216,
0.125,
0.6666666666666666,
0.023809523809523808,
0.05,
1,
0.025,
0.041666666666666664,
0.023255813953488372,
0.034482758620689655,
1,
0.020833333333333332,
0.045454545454545456,
0.07142857142857142,
0.058823529411764705,
0.017241379310344827,
0.017241379310344827,
0.015873015873015872,
0.6666666666666666,
0.03333333333333333,
0.047619047619047616,
0.6666666666666666,
0.06666666666666667,
0.02631578947368421,
0.01639344262295082,
0.6666666666666666,
0.05263157894736842,
0.6666666666666666,
0.017241379310344827,
0.041666666666666664,
0.058823529411764705,
1,
0.014285714285714285,
0.08333333333333333,
0.14285714285714285
] | 121 | 0.096738 | false |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import print_function
from collections import defaultdict
from difflib import SequenceMatcher
from nltk.tokenize import word_tokenize
import networkx as nx
import os
import sys
import time
from HGDrawGraph import draw_graph
import numpy
import warnings
# noinspection PyCompatibility
reload(sys)
sys.setdefaultencoding('utf8')
warnings.simplefilter(action='ignore', category=RuntimeWarning)
def text_length(text, tokenizer):
"""Return the length of a given string.
:param text: location of a .txt file
:type text: string
:param tokenizer: the tokenizer that should be used (nltk or simple)
:type tokenizer: string
:return: number of tokens
:rtype: int
"""
text = open(text, 'r')
if tokenizer == 'nltk':
return len(word_tokenize(text.read()))
elif tokenizer == 'simple':
tokens = 0
for line in text:
for word in line.split():
tokens += 1
return tokens
def lines_in_file(file):
"""Return the number of lines in a given text file.
:param file: location of a .txt file
:type file: string
:return: number of lines the the file
:rtype: int
"""
with open(file) as f:
for i, l in enumerate(f):
pass
return i + 1
def levenshtein(a, b):
"""Calculate the levenhstein distance between two strings.
:param a: first word
:type a: string
:param b: second word
:type b: string
:return: levenshtein distance between a and b
:rtype: int
"""
if len(a) < len(b):
return levenshtein(b, a)
if len(b) == 0:
return len(a)
previous_row = range(len(b) + 1)
for i, c1 in enumerate(a):
current_row = [i + 1]
for j, c2 in enumerate(b):
insertions = previous_row[j + 1] + 1
deletions = current_row[j] + 1
substitutions = previous_row[j] + (c1 != c2)
current_row.append(min(insertions, deletions, substitutions))
previous_row = current_row
return previous_row[-1]
def write_finding(search_term, text, concordance, match_algorithm, confidence, project_name):
"""Write the findings to a csv file.
:param search_term: the word to look for
:type search_term: string
:param text: location of a .txt file
:type text: string
:param concordance: a concordance string
:type concordance: string
:param match_algorithm: the matching algorithm
:type match_algorithm: string
:param confidence: the confidence
:type confidence: float
:param project_name: the project's name
:type project_name: string
:return: none
"""
if not os.path.isfile('output/' + project_name + '/findings.csv'):
with open('output/' + project_name + '/findings.csv', 'a') as findings_file:
findings_file.write('search_term;search_file;match_algorithm;confidence;concordance;project_name\n')
with open('output/' + project_name + '/findings.csv', 'a') as findings_file:
findings_file.write(
search_term + ';' + text.name + ';' + match_algorithm + ';' + str(confidence) + ';' + concordance.replace(
';', ',') + ';' + project_name + '\n')
def search_word_count(search_term, text, match_threshold, match_algorithm, project_name, ignore_case):
"""Return a dictionary wwith the number of findings and a confidence using the simple tokenizer.
:param search_term: the word to look for
:type search_term: string
:param text: a file object pointing to a .txt file
:type text: file
:param match_threshold: the matching threshold
:type match_algorithm: float
:param match_algorithm: the matching algorithm
:type match_algorithm: string
:param project_name: the project's name
:type project_name: string
:param ignore_case: a boolean indicating wether case should be ignored or not
:type ignore_case: int
:return: dictionary containing the instances and the average_confidence
:rtype: dict
"""
instances = 0
confidence = []
for line in text:
for word in line.split():
if (ignore_case == 1):
compare_word = word.lower()
compare_search_term = search_term.lower()
else:
compare_word = word
compare_search_term = search_term
if match_algorithm == 'gestalt':
if SequenceMatcher(None, compare_search_term, compare_word).ratio() >= match_threshold:
confidence.append(SequenceMatcher(None, search_term, word).ratio())
instances += 1
write_finding(search_term, text, line.rstrip(), match_algorithm,
SequenceMatcher(None, search_term, word).ratio(),
project_name)
else:
if levenshtein(compare_search_term, compare_word) <= match_threshold:
confidence.append(levenshtein(search_term, word))
instances += 1
write_finding(search_term, text, line.rstrip(), match_algorithm, levenshtein(search_term, word),
project_name)
if round(numpy.mean(numpy.fromiter(iter(confidence), dtype=float)), 2) > 0:
average_confidence = round(numpy.mean(numpy.fromiter(iter(confidence), dtype=float)), 2)
else:
average_confidence = 0
return {'instances': instances, 'average_confidence': average_confidence}
def search_word_count_nltk(search_term, text, match_threshold, match_algorithm, project_name, ignore_case):
"""Return a dictionary wwith the number of findings and a confidence using the nltk tokenizer.
:param search_term: the word to look for
:type search_term: string
:param text: a file object pointing to a .txt file
:type text: file
:param match_threshold: the matching threshold
:type match_algorithm: float
:param match_algorithm: the matching algorithm
:type match_algorithm: string
:param project_name: the project's name
:type project_name: string
:param ignore_case: a boolean indicating wether case should be ignored or not
:type ignore_case: int
:return: dictionary containing the instances and the average_confidence
:rtype: dict
"""
instances = 0
confidence = []
current_word = 0
words = word_tokenize(text.read())
for word in words:
if (ignore_case == 1):
compare_word = word.lower()
compare_search_term = search_term.lower()
else:
compare_word = word
compare_search_term = search_term
# Calculate Concordance
concordance = ''
for x in range(-7, 7):
try:
concordance = concordance + words[current_word + x] + ' '
except:
pass
current_word += 1
if match_algorithm == 'gestalt':
if SequenceMatcher(None, compare_search_term, compare_word).ratio() >= match_threshold:
confidence.append(SequenceMatcher(None, search_term, word).ratio())
instances += 1
write_finding(search_term, text, concordance.rstrip(), match_algorithm,
SequenceMatcher(None, search_term, word).ratio(),
project_name)
else:
if levenshtein(compare_search_term, compare_word) <= match_threshold:
confidence.append(levenshtein(search_term, word))
instances += 1
write_finding(search_term, text, concordance.rstrip(), match_algorithm, levenshtein(search_term, word),
project_name)
if round(numpy.mean(numpy.fromiter(iter(confidence), dtype=float)), 2) > 0:
average_confidence = round(numpy.mean(numpy.fromiter(iter(confidence), dtype=float)), 2)
else:
average_confidence = 0
return {'instances': instances, 'average_confidence': average_confidence}
def generate(file_dir, project_name, search_terms_file, tokenizer, match_algorithm, match_threshold, show_preview, ignore_case):
"""Initate the search process.
:param file_dir: the location of a directory with .txt files
:type file_dir: string
:param project_name: the project's name
:type project_name: string
:param search_terms_file: location to a .txt file containing search terms
:type search_terms_file: string
:param tokenizer: the tokenizer that should be used (nltk or simple)
:type tokenizer: string
:param match_algorithm: the matching algorithm
:type match_algorithm: string
:param match_threshold: the matching threshold
:type match_threshold: float
:param show_preview: a boolean indicating if a preview should be shown to the user
:type show_preview: int
:param ignore_case: a boolean indicating wether case should be ignored or not
:type ignore_case: int
:note: Goes through the search terms and files one by one, calculating the occurrences
:return: none
"""
global max_count, search_term
max_count = 0
def search_in_files(search_term, tokenizer, match_algorithm, project_name, ignore_case):
"""Search a specific search term given a tokenizer and a matching algorithm.
:param search_term: the word to look for
:type search_term: string
:param tokenizer: the tokenizer that should be used (nltk or simple)
:type tokenizer: string
:param match_algorithm: the matching algorithm
:type match_algorithm: string
:param project_name: the project's name
:type project_name: string
:param ignore_case: a boolean indicating wether case should be ignored or not
:type ignore_case: int
:return: none
"""
global max_count
for text in text_files: # Loop through files
f = open(file_dir + text, 'r')
if tokenizer == 'simple':
search_result = search_word_count(search_term, f, match_threshold, match_algorithm, project_name, ignore_case)
else:
search_result = search_word_count_nltk(search_term, f, match_threshold, match_algorithm, project_name, ignore_case)
search_results[search_term][text] = search_result["instances"]
f.close()
# Adding nodes and edges to NetworkX object g
if search_result['instances'] > 0:
g.add_node(text.replace('.txt', ''), node_type='text', tokens=text_length(file_dir + text, tokenizer))
g.add_node(search_term, node_type='search_term', tokens=0)
g.add_edge(text.replace('.txt', ''), search_term, weight=search_result['instances'],
average_confidence=float(search_result['average_confidence']),
match_algorithm=match_algorithm)
# Incrementing max_count if necessary
if search_result['instances'] > max_count:
max_count = search_result['instances']
file_dir = os.path.join(file_dir, '') # add trailing slash to the file_dir
if os.path.isdir(file_dir):
print('Running in: "' + file_dir + '" with search terms from "' + search_terms_file + '"')
else:
print('Not a valid folder!')
exit()
# Generate the graph object for NetworkX
g = nx.DiGraph()
if not os.path.exists('output/' + project_name):
os.makedirs('output/' + project_name)
text_files = os.listdir(file_dir)
search_terms = open(search_terms_file, 'r')
search_results = defaultdict(lambda: defaultdict(dict)) # Create Array search_results[search_term][search_text]
csv_file = open('output/' + project_name + '/adjacencyMatrix.csv', 'w')
no_search_terms = lines_in_file(search_terms_file)
no_text_files = len(text_files)
running_times = []
search_term_counter = 0
print('Matching Algorithm: ' + match_algorithm)
print('Matching Threshold: ' + str(match_threshold))
print('Tokenizer: ' + str(tokenizer))
print('Ignore Case: ' + str(ignore_case))
print('Show Preview: ' + str(show_preview))
print('Project: ' + str(no_search_terms) + ' search terms and ' + str(no_text_files) + ' text files')
# Loop through the individual search terms
start_time_total = time.time()
print('Processing first search term, please wait for an estimated running time...')
for search_term in search_terms:
start_time_term = time.time()
search_term_counter += 1
search_term = search_term.rstrip()
search_in_files(search_term, tokenizer, match_algorithm, project_name, ignore_case)
running_times.append(time.time() - start_time_term)
estimated_time_left = round(numpy.mean(
numpy.fromiter(iter(running_times), dtype=float) * (no_search_terms - search_term_counter) / 60), 1)
print('Search Term ' + str(search_term_counter) + '/' + str(no_search_terms) + ' [Estimated time left: ' + str(
estimated_time_left) + ' min]', end='\r')
search_terms.close()
print('\n\nTotal run-time: ' + str(round((time.time() - start_time_total) / 60, 3)) + ' minutes')
# CSV Adjacency-Matrix Output
csv_heading = 'Author'
for text in search_results[search_term]: # This must be a valid search_term.
csv_heading = csv_heading + ';' + text.replace('.txt', '')
csv_file.write(csv_heading + '\n')
for search_term in search_results:
csv_line = search_term
for text in search_results[search_term]:
csv_line = csv_line + ';' + str(search_results[search_term][text])
csv_file.write(csv_line + '\n')
csv_file.close()
# NetworkX Output and Network Preview
nx.write_gexf(g, 'output/' + project_name + '/network.gexf')
nx.write_graphml(g, 'output/' + project_name + '/network.graphml')
draw_graph(max_count, project_name, g, show_preview)
| [
"#!/usr/bin/python\r\n",
"# -*- coding: utf-8 -*-\r\n",
"from __future__ import print_function\r\n",
"from collections import defaultdict\r\n",
"from difflib import SequenceMatcher\r\n",
"from nltk.tokenize import word_tokenize\r\n",
"import networkx as nx\r\n",
"import os\r\n",
"import sys\r\n",
"import time\r\n",
"from HGDrawGraph import draw_graph\r\n",
"import numpy\r\n",
"import warnings\r\n",
"\r\n",
"# noinspection PyCompatibility\r\n",
"reload(sys)\r\n",
"sys.setdefaultencoding('utf8')\r\n",
"warnings.simplefilter(action='ignore', category=RuntimeWarning)\r\n",
"\r\n",
"\r\n",
"def text_length(text, tokenizer):\r\n",
" \"\"\"Return the length of a given string.\r\n",
"\r\n",
" :param text: location of a .txt file\r\n",
" :type text: string\r\n",
" :param tokenizer: the tokenizer that should be used (nltk or simple)\r\n",
" :type tokenizer: string\r\n",
" :return: number of tokens\r\n",
" :rtype: int\r\n",
" \"\"\"\r\n",
" text = open(text, 'r')\r\n",
" if tokenizer == 'nltk':\r\n",
" return len(word_tokenize(text.read()))\r\n",
" elif tokenizer == 'simple':\r\n",
" tokens = 0\r\n",
" for line in text:\r\n",
" for word in line.split():\r\n",
" tokens += 1\r\n",
" return tokens\r\n",
"\r\n",
"\r\n",
"def lines_in_file(file):\r\n",
" \"\"\"Return the number of lines in a given text file.\r\n",
"\r\n",
" :param file: location of a .txt file\r\n",
" :type file: string\r\n",
" :return: number of lines the the file\r\n",
" :rtype: int\r\n",
" \"\"\"\r\n",
" with open(file) as f:\r\n",
" for i, l in enumerate(f):\r\n",
" pass\r\n",
" return i + 1\r\n",
"\r\n",
"\r\n",
"def levenshtein(a, b):\r\n",
" \"\"\"Calculate the levenhstein distance between two strings.\r\n",
"\r\n",
" :param a: first word\r\n",
" :type a: string\r\n",
" :param b: second word\r\n",
" :type b: string\r\n",
" :return: levenshtein distance between a and b\r\n",
" :rtype: int\r\n",
" \"\"\"\r\n",
" if len(a) < len(b):\r\n",
" return levenshtein(b, a)\r\n",
"\r\n",
" if len(b) == 0:\r\n",
" return len(a)\r\n",
"\r\n",
" previous_row = range(len(b) + 1)\r\n",
" for i, c1 in enumerate(a):\r\n",
" current_row = [i + 1]\r\n",
" for j, c2 in enumerate(b):\r\n",
" insertions = previous_row[j + 1] + 1\r\n",
" deletions = current_row[j] + 1\r\n",
" substitutions = previous_row[j] + (c1 != c2)\r\n",
" current_row.append(min(insertions, deletions, substitutions))\r\n",
" previous_row = current_row\r\n",
"\r\n",
" return previous_row[-1]\r\n",
"\r\n",
"\r\n",
"def write_finding(search_term, text, concordance, match_algorithm, confidence, project_name):\r\n",
" \"\"\"Write the findings to a csv file.\r\n",
"\r\n",
" :param search_term: the word to look for\r\n",
" :type search_term: string\r\n",
" :param text: location of a .txt file\r\n",
" :type text: string\r\n",
" :param concordance: a concordance string\r\n",
" :type concordance: string\r\n",
" :param match_algorithm: the matching algorithm\r\n",
" :type match_algorithm: string\r\n",
" :param confidence: the confidence\r\n",
" :type confidence: float\r\n",
" :param project_name: the project's name\r\n",
" :type project_name: string\r\n",
" :return: none\r\n",
" \"\"\"\r\n",
" if not os.path.isfile('output/' + project_name + '/findings.csv'):\r\n",
" with open('output/' + project_name + '/findings.csv', 'a') as findings_file:\r\n",
" findings_file.write('search_term;search_file;match_algorithm;confidence;concordance;project_name\\n')\r\n",
"\r\n",
" with open('output/' + project_name + '/findings.csv', 'a') as findings_file:\r\n",
" findings_file.write(\r\n",
" search_term + ';' + text.name + ';' + match_algorithm + ';' + str(confidence) + ';' + concordance.replace(\r\n",
" ';', ',') + ';' + project_name + '\\n')\r\n",
"\r\n",
"\r\n",
"def search_word_count(search_term, text, match_threshold, match_algorithm, project_name, ignore_case):\r\n",
" \"\"\"Return a dictionary wwith the number of findings and a confidence using the simple tokenizer.\r\n",
"\r\n",
" :param search_term: the word to look for\r\n",
" :type search_term: string\r\n",
" :param text: a file object pointing to a .txt file\r\n",
" :type text: file\r\n",
" :param match_threshold: the matching threshold\r\n",
" :type match_algorithm: float\r\n",
" :param match_algorithm: the matching algorithm\r\n",
" :type match_algorithm: string\r\n",
" :param project_name: the project's name\r\n",
" :type project_name: string\r\n",
" :param ignore_case: a boolean indicating wether case should be ignored or not\r\n",
" :type ignore_case: int\r\n",
" :return: dictionary containing the instances and the average_confidence\r\n",
" :rtype: dict\r\n",
" \"\"\"\r\n",
" instances = 0\r\n",
" confidence = []\r\n",
" for line in text:\r\n",
" for word in line.split():\r\n",
"\r\n",
" if (ignore_case == 1):\r\n",
" compare_word = word.lower()\r\n",
" compare_search_term = search_term.lower()\r\n",
" else:\r\n",
" compare_word = word\r\n",
" compare_search_term = search_term\r\n",
"\r\n",
" if match_algorithm == 'gestalt':\r\n",
" if SequenceMatcher(None, compare_search_term, compare_word).ratio() >= match_threshold:\r\n",
" confidence.append(SequenceMatcher(None, search_term, word).ratio())\r\n",
" instances += 1\r\n",
" write_finding(search_term, text, line.rstrip(), match_algorithm,\r\n",
" SequenceMatcher(None, search_term, word).ratio(),\r\n",
" project_name)\r\n",
" else:\r\n",
" if levenshtein(compare_search_term, compare_word) <= match_threshold:\r\n",
" confidence.append(levenshtein(search_term, word))\r\n",
" instances += 1\r\n",
" write_finding(search_term, text, line.rstrip(), match_algorithm, levenshtein(search_term, word),\r\n",
" project_name)\r\n",
"\r\n",
" if round(numpy.mean(numpy.fromiter(iter(confidence), dtype=float)), 2) > 0:\r\n",
" average_confidence = round(numpy.mean(numpy.fromiter(iter(confidence), dtype=float)), 2)\r\n",
" else:\r\n",
" average_confidence = 0\r\n",
"\r\n",
" return {'instances': instances, 'average_confidence': average_confidence}\r\n",
"\r\n",
"\r\n",
"def search_word_count_nltk(search_term, text, match_threshold, match_algorithm, project_name, ignore_case):\r\n",
" \"\"\"Return a dictionary wwith the number of findings and a confidence using the nltk tokenizer.\r\n",
"\r\n",
" :param search_term: the word to look for\r\n",
" :type search_term: string\r\n",
" :param text: a file object pointing to a .txt file\r\n",
" :type text: file\r\n",
" :param match_threshold: the matching threshold\r\n",
" :type match_algorithm: float\r\n",
" :param match_algorithm: the matching algorithm\r\n",
" :type match_algorithm: string\r\n",
" :param project_name: the project's name\r\n",
" :type project_name: string\r\n",
" :param ignore_case: a boolean indicating wether case should be ignored or not\r\n",
" :type ignore_case: int\r\n",
" :return: dictionary containing the instances and the average_confidence\r\n",
" :rtype: dict\r\n",
" \"\"\"\r\n",
" instances = 0\r\n",
" confidence = []\r\n",
" current_word = 0\r\n",
" words = word_tokenize(text.read())\r\n",
"\r\n",
" for word in words:\r\n",
"\r\n",
" if (ignore_case == 1):\r\n",
" compare_word = word.lower()\r\n",
" compare_search_term = search_term.lower()\r\n",
" else:\r\n",
" compare_word = word\r\n",
" compare_search_term = search_term\r\n",
"\r\n",
" # Calculate Concordance\r\n",
" concordance = ''\r\n",
" for x in range(-7, 7):\r\n",
" try:\r\n",
" concordance = concordance + words[current_word + x] + ' '\r\n",
" except:\r\n",
" pass\r\n",
" current_word += 1\r\n",
"\r\n",
" if match_algorithm == 'gestalt':\r\n",
" if SequenceMatcher(None, compare_search_term, compare_word).ratio() >= match_threshold:\r\n",
" confidence.append(SequenceMatcher(None, search_term, word).ratio())\r\n",
" instances += 1\r\n",
" write_finding(search_term, text, concordance.rstrip(), match_algorithm,\r\n",
" SequenceMatcher(None, search_term, word).ratio(),\r\n",
" project_name)\r\n",
" else:\r\n",
" if levenshtein(compare_search_term, compare_word) <= match_threshold:\r\n",
" confidence.append(levenshtein(search_term, word))\r\n",
" instances += 1\r\n",
" write_finding(search_term, text, concordance.rstrip(), match_algorithm, levenshtein(search_term, word),\r\n",
" project_name)\r\n",
"\r\n",
" if round(numpy.mean(numpy.fromiter(iter(confidence), dtype=float)), 2) > 0:\r\n",
" average_confidence = round(numpy.mean(numpy.fromiter(iter(confidence), dtype=float)), 2)\r\n",
" else:\r\n",
" average_confidence = 0\r\n",
"\r\n",
" return {'instances': instances, 'average_confidence': average_confidence}\r\n",
"\r\n",
"\r\n",
"def generate(file_dir, project_name, search_terms_file, tokenizer, match_algorithm, match_threshold, show_preview, ignore_case):\r\n",
" \"\"\"Initate the search process.\r\n",
"\r\n",
" :param file_dir: the location of a directory with .txt files\r\n",
" :type file_dir: string\r\n",
" :param project_name: the project's name\r\n",
" :type project_name: string\r\n",
" :param search_terms_file: location to a .txt file containing search terms\r\n",
" :type search_terms_file: string\r\n",
" :param tokenizer: the tokenizer that should be used (nltk or simple)\r\n",
" :type tokenizer: string\r\n",
" :param match_algorithm: the matching algorithm\r\n",
" :type match_algorithm: string\r\n",
" :param match_threshold: the matching threshold\r\n",
" :type match_threshold: float\r\n",
" :param show_preview: a boolean indicating if a preview should be shown to the user\r\n",
" :type show_preview: int\r\n",
" :param ignore_case: a boolean indicating wether case should be ignored or not\r\n",
" :type ignore_case: int\r\n",
" :note: Goes through the search terms and files one by one, calculating the occurrences\r\n",
" :return: none\r\n",
" \"\"\"\r\n",
" global max_count, search_term\r\n",
" max_count = 0\r\n",
"\r\n",
" def search_in_files(search_term, tokenizer, match_algorithm, project_name, ignore_case):\r\n",
" \"\"\"Search a specific search term given a tokenizer and a matching algorithm.\r\n",
"\r\n",
" :param search_term: the word to look for\r\n",
" :type search_term: string\r\n",
" :param tokenizer: the tokenizer that should be used (nltk or simple)\r\n",
" :type tokenizer: string\r\n",
" :param match_algorithm: the matching algorithm\r\n",
" :type match_algorithm: string\r\n",
" :param project_name: the project's name\r\n",
" :type project_name: string\r\n",
" :param ignore_case: a boolean indicating wether case should be ignored or not\r\n",
" :type ignore_case: int\r\n",
" :return: none\r\n",
" \"\"\"\r\n",
" global max_count\r\n",
" for text in text_files: # Loop through files\r\n",
" f = open(file_dir + text, 'r')\r\n",
" if tokenizer == 'simple':\r\n",
" search_result = search_word_count(search_term, f, match_threshold, match_algorithm, project_name, ignore_case)\r\n",
" else:\r\n",
" search_result = search_word_count_nltk(search_term, f, match_threshold, match_algorithm, project_name, ignore_case)\r\n",
" search_results[search_term][text] = search_result[\"instances\"]\r\n",
"\r\n",
" f.close()\r\n",
"\r\n",
" # Adding nodes and edges to NetworkX object g\r\n",
" if search_result['instances'] > 0:\r\n",
" g.add_node(text.replace('.txt', ''), node_type='text', tokens=text_length(file_dir + text, tokenizer))\r\n",
" g.add_node(search_term, node_type='search_term', tokens=0)\r\n",
" g.add_edge(text.replace('.txt', ''), search_term, weight=search_result['instances'],\r\n",
" average_confidence=float(search_result['average_confidence']),\r\n",
" match_algorithm=match_algorithm)\r\n",
"\r\n",
" # Incrementing max_count if necessary\r\n",
" if search_result['instances'] > max_count:\r\n",
" max_count = search_result['instances']\r\n",
"\r\n",
" file_dir = os.path.join(file_dir, '') # add trailing slash to the file_dir\r\n",
" if os.path.isdir(file_dir):\r\n",
" print('Running in: \"' + file_dir + '\" with search terms from \"' + search_terms_file + '\"')\r\n",
" else:\r\n",
" print('Not a valid folder!')\r\n",
" exit()\r\n",
"\r\n",
" # Generate the graph object for NetworkX\r\n",
" g = nx.DiGraph()\r\n",
"\r\n",
" if not os.path.exists('output/' + project_name):\r\n",
" os.makedirs('output/' + project_name)\r\n",
"\r\n",
" text_files = os.listdir(file_dir)\r\n",
" search_terms = open(search_terms_file, 'r')\r\n",
" search_results = defaultdict(lambda: defaultdict(dict)) # Create Array search_results[search_term][search_text]\r\n",
" csv_file = open('output/' + project_name + '/adjacencyMatrix.csv', 'w')\r\n",
" no_search_terms = lines_in_file(search_terms_file)\r\n",
" no_text_files = len(text_files)\r\n",
" running_times = []\r\n",
" search_term_counter = 0\r\n",
"\r\n",
" print('Matching Algorithm: ' + match_algorithm)\r\n",
" print('Matching Threshold: ' + str(match_threshold))\r\n",
" print('Tokenizer: ' + str(tokenizer))\r\n",
" print('Ignore Case: ' + str(ignore_case))\r\n",
" print('Show Preview: ' + str(show_preview))\r\n",
" print('Project: ' + str(no_search_terms) + ' search terms and ' + str(no_text_files) + ' text files')\r\n",
"\r\n",
" # Loop through the individual search terms\r\n",
" start_time_total = time.time()\r\n",
" print('Processing first search term, please wait for an estimated running time...')\r\n",
"\r\n",
" for search_term in search_terms:\r\n",
" start_time_term = time.time()\r\n",
" search_term_counter += 1\r\n",
" search_term = search_term.rstrip()\r\n",
" search_in_files(search_term, tokenizer, match_algorithm, project_name, ignore_case)\r\n",
" running_times.append(time.time() - start_time_term)\r\n",
" estimated_time_left = round(numpy.mean(\r\n",
" numpy.fromiter(iter(running_times), dtype=float) * (no_search_terms - search_term_counter) / 60), 1)\r\n",
" print('Search Term ' + str(search_term_counter) + '/' + str(no_search_terms) + ' [Estimated time left: ' + str(\r\n",
" estimated_time_left) + ' min]', end='\\r')\r\n",
"\r\n",
" search_terms.close()\r\n",
" print('\\n\\nTotal run-time: ' + str(round((time.time() - start_time_total) / 60, 3)) + ' minutes')\r\n",
"\r\n",
" # CSV Adjacency-Matrix Output\r\n",
" csv_heading = 'Author'\r\n",
" for text in search_results[search_term]: # This must be a valid search_term.\r\n",
" csv_heading = csv_heading + ';' + text.replace('.txt', '')\r\n",
" csv_file.write(csv_heading + '\\n')\r\n",
"\r\n",
" for search_term in search_results:\r\n",
" csv_line = search_term\r\n",
" for text in search_results[search_term]:\r\n",
" csv_line = csv_line + ';' + str(search_results[search_term][text])\r\n",
" csv_file.write(csv_line + '\\n')\r\n",
" csv_file.close()\r\n",
"\r\n",
" # NetworkX Output and Network Preview\r\n",
" nx.write_gexf(g, 'output/' + project_name + '/network.gexf')\r\n",
" nx.write_graphml(g, 'output/' + project_name + '/network.graphml')\r\n",
" draw_graph(max_count, project_name, g, show_preview)\r\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010526315789473684,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011627906976744186,
0.008771929824561403,
0,
0.012195121951219513,
0,
0.008333333333333333,
0,
0,
0,
0.009615384615384616,
0.00980392156862745,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011904761904761904,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.009523809523809525,
0.011235955056179775,
0,
0.011627906976744186,
0.011764705882352941,
0,
0,
0.011494252873563218,
0,
0,
0.00847457627118644,
0,
0,
0,
0.01020408163265306,
0,
0,
0,
0,
0,
0,
0.009174311926605505,
0.01,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011904761904761904,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.047619047619047616,
0,
0,
0,
0,
0.009900990099009901,
0.011764705882352941,
0,
0.011235955056179775,
0,
0,
0,
0.012048192771084338,
0,
0,
0.008264462809917356,
0,
0,
0,
0.01020408163265306,
0,
0,
0,
0,
0,
0,
0.007692307692307693,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011235955056179775,
0,
0.011904761904761904,
0,
0.010869565217391304,
0,
0,
0,
0,
0,
0.010638297872340425,
0.011627906976744186,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011363636363636364,
0,
0,
0,
0,
0,
0,
0,
0.0078125,
0,
0.007518796992481203,
0,
0,
0,
0,
0,
0,
0.008333333333333333,
0,
0.00980392156862745,
0.01098901098901099,
0,
0,
0,
0,
0,
0,
0,
0,
0.01,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.00847457627118644,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.009345794392523364,
0,
0,
0,
0.011235955056179775,
0,
0,
0,
0,
0,
0.010752688172043012,
0,
0,
0.008771929824561403,
0.008264462809917356,
0,
0,
0,
0.009708737864077669,
0,
0,
0,
0.012048192771084338,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 353 | 0.001438 | false |
# --------------------------------------------------------------------------
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the ""Software""), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# --------------------------------------------------------------------------
import unittest
import subprocess
import sys
import isodate
import tempfile
import json
from datetime import date, datetime, timedelta
import os
from os.path import dirname, pardir, join, realpath
cwd = dirname(realpath(__file__))
log_level = int(os.environ.get('PythonLogLevel', 30))
tests = realpath(join(cwd, pardir, "Expected", "AcceptanceTests"))
sys.path.append(join(tests, "BodyByte"))
from msrest.serialization import Deserializer
from msrest.exceptions import DeserializationError
from auto_rest_swagger_bat_byte_service import AutoRestSwaggerBATByteService
class ByteTests(unittest.TestCase):
def test_byte(self):
client = AutoRestSwaggerBATByteService(base_url="http://localhost:3000")
test_bytes = bytearray([0x0FF, 0x0FE, 0x0FD, 0x0FC, 0x0FB, 0x0FA, 0x0F9, 0x0F8, 0x0F7, 0x0F6])
client.byte.put_non_ascii(test_bytes)
self.assertEqual(test_bytes, client.byte.get_non_ascii())
self.assertIsNone(client.byte.get_null())
self.assertEqual(bytearray(), client.byte.get_empty())
with self.assertRaises(DeserializationError):
client.byte.get_invalid()
if __name__ == '__main__':
unittest.main()
| [
"# --------------------------------------------------------------------------\n",
"#\n",
"# Copyright (c) Microsoft Corporation. All rights reserved.\n",
"#\n",
"# The MIT License (MIT)\n",
"#\n",
"# Permission is hereby granted, free of charge, to any person obtaining a copy\n",
"# of this software and associated documentation files (the \"\"Software\"\"), to\n",
"# deal in the Software without restriction, including without limitation the\n",
"# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n",
"# sell copies of the Software, and to permit persons to whom the Software is\n",
"# furnished to do so, subject to the following conditions:\n",
"#\n",
"# The above copyright notice and this permission notice shall be included in\n",
"# all copies or substantial portions of the Software.\n",
"#\n",
"# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n",
"# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n",
"# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n",
"# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n",
"# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n",
"# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n",
"# IN THE SOFTWARE.\n",
"#\n",
"# --------------------------------------------------------------------------\n",
"\n",
"import unittest\n",
"import subprocess\n",
"import sys\n",
"import isodate\n",
"import tempfile\n",
"import json\n",
"from datetime import date, datetime, timedelta\n",
"import os\n",
"from os.path import dirname, pardir, join, realpath\n",
"\n",
"cwd = dirname(realpath(__file__))\n",
"log_level = int(os.environ.get('PythonLogLevel', 30))\n",
"\n",
"tests = realpath(join(cwd, pardir, \"Expected\", \"AcceptanceTests\"))\n",
"sys.path.append(join(tests, \"BodyByte\"))\n",
"\n",
"from msrest.serialization import Deserializer\n",
"from msrest.exceptions import DeserializationError\n",
"\n",
"from auto_rest_swagger_bat_byte_service import AutoRestSwaggerBATByteService\n",
"\n",
"\n",
"class ByteTests(unittest.TestCase):\n",
"\n",
" def test_byte(self):\n",
" client = AutoRestSwaggerBATByteService(base_url=\"http://localhost:3000\")\n",
"\n",
" test_bytes = bytearray([0x0FF, 0x0FE, 0x0FD, 0x0FC, 0x0FB, 0x0FA, 0x0F9, 0x0F8, 0x0F7, 0x0F6])\n",
" client.byte.put_non_ascii(test_bytes)\n",
" self.assertEqual(test_bytes, client.byte.get_non_ascii())\n",
"\n",
" self.assertIsNone(client.byte.get_null())\n",
" self.assertEqual(bytearray(), client.byte.get_empty())\n",
"\n",
" with self.assertRaises(DeserializationError):\n",
" client.byte.get_invalid()\n",
"\n",
"if __name__ == '__main__':\n",
" unittest.main()\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.021739130434782608,
0.0196078431372549,
0,
0.012987012987012988,
0,
0,
0,
0,
0,
0.012345679012345678,
0,
0.009708737864077669,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.037037037037037035,
0
] | 65 | 0.001745 | false |
"""
Module for dependency source downloads
"""
import os, glob, shutil, zipfile, tarfile, time
from os.path import join, abspath, exists
from pylib.logwrapper import LogWrapper
# Source Base Class
class DepSource(object):
ArchiveDir = "."
RootExtractDir = "."
def __init__(self):
self.url = ""
self.destsubdir = ""
self.md5hash = ""
self.version = ""
self.arch_filename = ""
self.arch_filepath = ""
self.subdirmove = 0
self.log = LogWrapper.getlogger()
self.Extracted = False
def download(self):
return True
def extract(self):
if self.arch_filepath == None:
return False
# Create the extraction directory
self.log.info("Extracting: " + self.arch_filename + " To: " + self.destsubdir)
extractdir = abspath(join(DepSource.RootExtractDir, self.destsubdir))
if exists(extractdir):
#shutil.rmtree(extractdir)
# Skip if destination subdir already exists
self.log.warn("Skipping: " + self.destsubdir + " Directory already exists")
return False
os.makedirs(extractdir)
# Get the file extension
extension = os.path.splitext(self.arch_filename)[1]
# Extract the file
if extension == ".zip":
with zipfile.ZipFile(self.arch_filepath, "r") as z:
z.extractall(extractdir)
elif extension == ".gz":
with tarfile.open(self.arch_filepath, 'r:gz') as tfile:
tfile.extractall(extractdir)
elif extension == ".xz":
with tarfile.open(self.arch_filepath, 'r:xz') as tfile:
tfile.extractall(extractdir)
elif extension == ".bz2":
with tarfile.open(self.arch_filepath, 'r:bz2') as tfile:
tfile.extractall(extractdir)
else:
raise ValueError("File Extension: " + extension + " Not supported")
return True
# Delete the downloaded archive file
def remove_archivefile(self):
if self.arch_filepath == "" or self.arch_filepath == None:
return False
elif exists(self.arch_filepath):
os.remove(self.arch_filepath)
return True
else:
return False
def movetoparent_multiple(self):
# Move subdir to parent if required
for x in range(0, self.subdirmove):
self.log.info("Moving source directory to parent: " + self.destsubdir)
# Avoid issues with directory locking
self.movetoparent()
# Some sources include a subdirectory with a name / version inside
# This just moves things around so that all the source sits at the top of the directory within deps
def movetoparent(self):
"""Move sub directory to parent"""
parentdir = abspath(join(DepSource.RootExtractDir, self.destsubdir))
olddir = parentdir + "_old"
# move the deps/subdir to deps/subdir_old
if exists(olddir):
shutil.rmtree(olddir)
os.rename(parentdir, olddir)
# Relocate the inner directory to /deps/subdir
innerdir = os.listdir(olddir)[0]
innerdir = join(olddir, innerdir)
shutil.move(innerdir, parentdir)
shutil.rmtree(olddir)
return
@staticmethod
def parsexml(root):
ret = []
from pylib.depend.dephttp import HttpSource, GitHubZipSource
for source in root.findall('FileExtract'):
newsource = FileExtract(
source.find('SrcFile').text,
source.find('DestSubDir').text)
if source.find('Md5Hash') != None:
newsource.md5hash = source.find('Md5Hash').text
if source.find('Version') != None:
newsource.version = source.find('Version').text
if source.find('SubDirMove') != None:
newsource.subdirmove = int(source.find('SubDirMove').text)
ret.append(newsource)
for source in root.findall('HttpSource'):
newsource = HttpSource(
source.find('Url').text,
source.find('DestSubDir').text)
if source.find('Md5Hash') != None:
newsource.md5hash = source.find('Md5Hash').text
if source.find('Version') != None:
newsource.version = source.find('Version').text
if source.find('SubDirMove') != None:
newsource.subdirmove = int(source.find('SubDirMove').text)
ret.append(newsource)
for source in root.findall('GitHubZipSource'):
newsource = GitHubZipSource(
source.find('Url').text,
source.find('DestSubDir').text,
source.find('CommitId').text)
if source.find('Md5Hash') != None:
newsource.md5hash = source.find('Md5Hash').text
if source.find('Version') != None:
newsource.version = source.find('Version').text
if source.find('SubDirMove') != None:
newsource.subdirmove = int(source.find('SubDirMove').text)
ret.append(newsource)
return ret
# File Extract Source for file that has to be manually downloaded into the Archive directory
class FileExtract(DepSource):
def __init__(self, arcfile, destsubdir):
super().__init__()
self.url = arcfile
self.destsubdir = destsubdir
#If path is not absolute, assume it's relative to the Archive directory
if not os.path.isabs(self.url):
self.url = join(DepSource.ArchiveDir, self.url)
self.url = abspath(self.url)
#Assume glob is needed
globlist = glob.glob(self.url)
self.arch_filepath = None if not globlist else globlist[0]
if self.arch_filepath:
self.arch_filename = os.path.basename(self.arch_filepath)
return
| [
"\"\"\"\n",
"Module for dependency source downloads\n",
"\"\"\"\n",
"\n",
"import os, glob, shutil, zipfile, tarfile, time\n",
"from os.path import join, abspath, exists\n",
"from pylib.logwrapper import LogWrapper\n",
"\n",
"# Source Base Class\n",
"class DepSource(object):\n",
"\n",
" ArchiveDir = \".\"\n",
" RootExtractDir = \".\"\n",
"\n",
" def __init__(self):\n",
" self.url = \"\"\n",
" self.destsubdir = \"\"\n",
" self.md5hash = \"\"\n",
" self.version = \"\"\n",
" self.arch_filename = \"\"\n",
" self.arch_filepath = \"\"\n",
" self.subdirmove = 0\n",
" self.log = LogWrapper.getlogger()\n",
" self.Extracted = False\n",
"\n",
" def download(self):\n",
" return True\n",
"\n",
" def extract(self):\n",
" if self.arch_filepath == None:\n",
" return False\n",
"\n",
" # Create the extraction directory\n",
" self.log.info(\"Extracting: \" + self.arch_filename + \" To: \" + self.destsubdir)\n",
" extractdir = abspath(join(DepSource.RootExtractDir, self.destsubdir))\n",
" if exists(extractdir):\n",
" #shutil.rmtree(extractdir)\n",
" # Skip if destination subdir already exists\n",
" self.log.warn(\"Skipping: \" + self.destsubdir + \" Directory already exists\")\n",
" return False\n",
" os.makedirs(extractdir)\n",
"\n",
" # Get the file extension\n",
" extension = os.path.splitext(self.arch_filename)[1]\n",
"\n",
" # Extract the file\n",
" if extension == \".zip\":\n",
" with zipfile.ZipFile(self.arch_filepath, \"r\") as z:\n",
" z.extractall(extractdir)\n",
" elif extension == \".gz\":\n",
" with tarfile.open(self.arch_filepath, 'r:gz') as tfile:\n",
" tfile.extractall(extractdir)\n",
" elif extension == \".xz\":\n",
" with tarfile.open(self.arch_filepath, 'r:xz') as tfile:\n",
" tfile.extractall(extractdir)\n",
" elif extension == \".bz2\":\n",
" with tarfile.open(self.arch_filepath, 'r:bz2') as tfile:\n",
" tfile.extractall(extractdir)\n",
" else:\n",
" raise ValueError(\"File Extension: \" + extension + \" Not supported\")\n",
"\n",
" return True\n",
"\n",
" # Delete the downloaded archive file\n",
" def remove_archivefile(self):\n",
" if self.arch_filepath == \"\" or self.arch_filepath == None:\n",
" return False\n",
" elif exists(self.arch_filepath):\n",
" os.remove(self.arch_filepath)\n",
" return True\n",
" else:\n",
" return False\n",
"\n",
" def movetoparent_multiple(self):\n",
" # Move subdir to parent if required\n",
" for x in range(0, self.subdirmove):\n",
" self.log.info(\"Moving source directory to parent: \" + self.destsubdir)\n",
" # Avoid issues with directory locking\n",
" self.movetoparent()\n",
"\n",
" # Some sources include a subdirectory with a name / version inside\n",
" # This just moves things around so that all the source sits at the top of the directory within deps\n",
" def movetoparent(self):\n",
" \"\"\"Move sub directory to parent\"\"\"\n",
" parentdir = abspath(join(DepSource.RootExtractDir, self.destsubdir))\n",
" olddir = parentdir + \"_old\"\n",
"\n",
" # move the deps/subdir to deps/subdir_old\n",
" if exists(olddir):\n",
" shutil.rmtree(olddir)\n",
" os.rename(parentdir, olddir)\n",
" \n",
" # Relocate the inner directory to /deps/subdir\n",
" innerdir = os.listdir(olddir)[0]\n",
" innerdir = join(olddir, innerdir)\n",
" shutil.move(innerdir, parentdir)\n",
" shutil.rmtree(olddir)\n",
" return\n",
"\n",
" @staticmethod\n",
" def parsexml(root):\n",
" ret = []\n",
"\n",
" from pylib.depend.dephttp import HttpSource, GitHubZipSource\n",
"\n",
" for source in root.findall('FileExtract'):\n",
" newsource = FileExtract(\n",
" source.find('SrcFile').text,\n",
" source.find('DestSubDir').text)\n",
" if source.find('Md5Hash') != None:\n",
" newsource.md5hash = source.find('Md5Hash').text\n",
" if source.find('Version') != None:\n",
" newsource.version = source.find('Version').text\n",
" if source.find('SubDirMove') != None:\n",
" newsource.subdirmove = int(source.find('SubDirMove').text)\n",
" ret.append(newsource)\n",
"\n",
" for source in root.findall('HttpSource'):\n",
" newsource = HttpSource(\n",
" source.find('Url').text,\n",
" source.find('DestSubDir').text)\n",
" if source.find('Md5Hash') != None:\n",
" newsource.md5hash = source.find('Md5Hash').text\n",
" if source.find('Version') != None:\n",
" newsource.version = source.find('Version').text\n",
" if source.find('SubDirMove') != None:\n",
" newsource.subdirmove = int(source.find('SubDirMove').text)\n",
" ret.append(newsource)\n",
"\n",
" for source in root.findall('GitHubZipSource'):\n",
" newsource = GitHubZipSource(\n",
" source.find('Url').text,\n",
" source.find('DestSubDir').text,\n",
" source.find('CommitId').text)\n",
" if source.find('Md5Hash') != None:\n",
" newsource.md5hash = source.find('Md5Hash').text\n",
" if source.find('Version') != None:\n",
" newsource.version = source.find('Version').text\n",
" if source.find('SubDirMove') != None:\n",
" newsource.subdirmove = int(source.find('SubDirMove').text)\n",
" ret.append(newsource)\n",
"\n",
" return ret\n",
"\n",
"# File Extract Source for file that has to be manually downloaded into the Archive directory\n",
"class FileExtract(DepSource):\n",
"\n",
" def __init__(self, arcfile, destsubdir):\n",
" super().__init__()\n",
" self.url = arcfile\n",
" self.destsubdir = destsubdir\n",
"\n",
" #If path is not absolute, assume it's relative to the Archive directory\n",
" if not os.path.isabs(self.url):\n",
" self.url = join(DepSource.ArchiveDir, self.url)\n",
" self.url = abspath(self.url)\n",
"\n",
" #Assume glob is needed\n",
" globlist = glob.glob(self.url)\n",
" self.arch_filepath = None if not globlist else globlist[0]\n",
" if self.arch_filepath:\n",
" self.arch_filename = os.path.basename(self.arch_filepath)\n",
" return\n"
] | [
0,
0,
0,
0,
0.020833333333333332,
0,
0,
0,
0,
0.04,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.02564102564102564,
0,
0,
0,
0.011494252873563218,
0,
0,
0.02564102564102564,
0,
0.011363636363636364,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.014925373134328358,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012048192771084338,
0,
0,
0,
0,
0.009615384615384616,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.1111111111111111,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.02127659574468085,
0,
0.02127659574468085,
0,
0.02,
0,
0,
0,
0,
0,
0,
0,
0.02127659574468085,
0,
0.02127659574468085,
0,
0.02,
0,
0,
0,
0,
0,
0,
0,
0,
0.02127659574468085,
0,
0.02127659574468085,
0,
0.02,
0,
0,
0,
0,
0,
0.010752688172043012,
0.03333333333333333,
0,
0,
0,
0,
0,
0,
0.0125,
0,
0,
0,
0,
0.03225806451612903,
0,
0,
0,
0,
0
] | 163 | 0.003431 | false |
from sigma.core.permission import check_man_roles
from sigma.core.rolecheck import matching_role, user_matching_role
import discord
async def giverole(cmd, message, args):
if not check_man_roles(message.author, message.channel):
out_content = discord.Embed(type='rich', color=0xDB0000,
title='⛔ Insufficient Permissions. Server Admin Only.')
await message.channel.send(None, embed=out_content)
return
if len(args) < 2:
out_content = discord.Embed(type='rich', color=0xDB0000, title='❗ Error')
out_content.add_field(name='Missing Arguments', value=cmd.help())
await message.channel.send(None, embed=out_content)
return
if not message.mentions:
out_content = discord.Embed(type='rich', color=0xDB0000, title='❗ Error')
out_content.add_field(name='Missing Target User', value=cmd.help())
await message.channel.send(None, embed=out_content)
return
role_qry = ' '.join(args[1:])
target_role = matching_role(message.guild, role_qry)
target_user = message.mentions[0]
user_contained_role = user_matching_role(target_user, role_qry)
if not target_role:
out_content = discord.Embed(type='rich', color=0xFF9900, title='❗ Error')
out_content.add_field(name='Role Not Found', value='I was unable to find **' + role_qry + '** on this server.')
await message.channel.send(None, embed=out_content)
else:
if not user_contained_role:
await target_user.add_roles(target_role)
out_content = discord.Embed(type='rich', color=0x66cc66,
title='✅ Role ' + role_qry + ' given to **' + target_user.name + '**.')
await message.channel.send(None, embed=out_content)
else:
out_content = discord.Embed(type='rich', color=0xFF9900, title='❗ Error')
out_content.add_field(name='User Has Role',
value='The user **' + target_user.name + '** already has this role.')
await message.channel.send(None, embed=out_content)
| [
"from sigma.core.permission import check_man_roles\n",
"from sigma.core.rolecheck import matching_role, user_matching_role\n",
"import discord\n",
"\n",
"\n",
"async def giverole(cmd, message, args):\n",
" if not check_man_roles(message.author, message.channel):\n",
" out_content = discord.Embed(type='rich', color=0xDB0000,\n",
" title='⛔ Insufficient Permissions. Server Admin Only.')\n",
" await message.channel.send(None, embed=out_content)\n",
" return\n",
" if len(args) < 2:\n",
" out_content = discord.Embed(type='rich', color=0xDB0000, title='❗ Error')\n",
" out_content.add_field(name='Missing Arguments', value=cmd.help())\n",
" await message.channel.send(None, embed=out_content)\n",
" return\n",
" if not message.mentions:\n",
" out_content = discord.Embed(type='rich', color=0xDB0000, title='❗ Error')\n",
" out_content.add_field(name='Missing Target User', value=cmd.help())\n",
" await message.channel.send(None, embed=out_content)\n",
" return\n",
" role_qry = ' '.join(args[1:])\n",
" target_role = matching_role(message.guild, role_qry)\n",
" target_user = message.mentions[0]\n",
" user_contained_role = user_matching_role(target_user, role_qry)\n",
" if not target_role:\n",
" out_content = discord.Embed(type='rich', color=0xFF9900, title='❗ Error')\n",
" out_content.add_field(name='Role Not Found', value='I was unable to find **' + role_qry + '** on this server.')\n",
" await message.channel.send(None, embed=out_content)\n",
" else:\n",
" if not user_contained_role:\n",
" await target_user.add_roles(target_role)\n",
" out_content = discord.Embed(type='rich', color=0x66cc66,\n",
" title='✅ Role ' + role_qry + ' given to **' + target_user.name + '**.')\n",
" await message.channel.send(None, embed=out_content)\n",
" else:\n",
" out_content = discord.Embed(type='rich', color=0xFF9900, title='❗ Error')\n",
" out_content.add_field(name='User Has Role',\n",
" value='The user **' + target_user.name + '** already has this role.')\n",
" await message.channel.send(None, embed=out_content)\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0.010869565217391304,
0,
0,
0,
0.012195121951219513,
0,
0,
0,
0,
0.012195121951219513,
0,
0,
0,
0,
0,
0,
0,
0,
0.012195121951219513,
0.008333333333333333,
0,
0,
0,
0,
0,
0.008928571428571428,
0,
0,
0.011627906976744186,
0,
0.009615384615384616,
0
] | 40 | 0.002149 | false |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""User friendly / multi platform builder script"""
import argparse
import datetime
import glob
import logging
import os
import platform
import shutil
import sys
import time
from distutils.dir_util import copy_tree
from enum import Enum
from subprocess import check_call
from util import *
KNOWN_VCVARS = {
'VS 2015': r'C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\bin\x86_amd64\vcvarsx86_amd64.bat',
'VS 2017': r'C:\Program Files (x86)\Microsoft Visual Studio\2017\Community\VC\Auxiliary\Build\vcvarsx86_amd64.bat'
}
class BuildFlavour(Enum):
WIN_CPU = 'WIN_CPU'
WIN_CPU_MKLDNN = 'WIN_CPU_MKLDNN'
WIN_CPU_MKLDNN_MKL = 'WIN_CPU_MKLDNN_MKL'
WIN_CPU_MKL = 'WIN_CPU_MKL'
WIN_GPU = 'WIN_GPU'
WIN_GPU_MKLDNN = 'WIN_GPU_MKLDNN'
CMAKE_FLAGS = {
'WIN_CPU': (
'-DUSE_CUDA=OFF '
'-DUSE_CUDNN=OFF '
'-DUSE_NVRTC=OFF '
'-DUSE_OPENCV=ON '
'-DUSE_OPENMP=ON '
'-DUSE_BLAS=open '
'-DUSE_LAPACK=ON '
'-DUSE_DIST_KVSTORE=OFF '
'-DBUILD_CPP_EXAMPLES=ON '
'-DUSE_MKL_IF_AVAILABLE=OFF '
'-DCMAKE_BUILD_TYPE=Release')
, 'WIN_CPU_MKLDNN': (
'-DUSE_CUDA=OFF '
'-DUSE_CUDNN=OFF '
'-DUSE_NVRTC=OFF '
'-DUSE_OPENCV=ON '
'-DUSE_OPENMP=ON '
'-DUSE_BLAS=open '
'-DUSE_LAPACK=ON '
'-DUSE_DIST_KVSTORE=OFF '
'-DUSE_MKL_IF_AVAILABLE=ON '
'-DUSE_MKLDNN=ON '
'-DCMAKE_BUILD_TYPE=Release')
, 'WIN_CPU_MKLDNN_MKL': (
'-DUSE_CUDA=OFF '
'-DUSE_CUDNN=OFF '
'-DUSE_NVRTC=OFF '
'-DUSE_OPENCV=ON '
'-DUSE_OPENMP=ON '
'-DUSE_BLAS=mkl '
'-DUSE_LAPACK=ON '
'-DUSE_DIST_KVSTORE=OFF '
'-DUSE_MKL_IF_AVAILABLE=ON '
'-DUSE_MKLDNN=ON '
'-DCMAKE_BUILD_TYPE=Release')
, 'WIN_CPU_MKL': (
'-DUSE_CUDA=OFF '
'-DUSE_CUDNN=OFF '
'-DUSE_NVRTC=OFF '
'-DUSE_OPENCV=ON '
'-DUSE_OPENMP=ON '
'-DUSE_BLAS=mkl '
'-DUSE_LAPACK=ON '
'-DUSE_DIST_KVSTORE=OFF '
'-DUSE_MKL_IF_AVAILABLE=ON '
'-DUSE_MKLDNN=OFF '
'-DCMAKE_BUILD_TYPE=Release')
, 'WIN_GPU': (
'-DUSE_CUDA=ON '
'-DUSE_CUDNN=ON '
'-DUSE_NVRTC=ON '
'-DUSE_OPENCV=ON '
'-DUSE_OPENMP=ON '
'-DUSE_BLAS=open '
'-DUSE_LAPACK=ON '
'-DUSE_DIST_KVSTORE=OFF '
'-DCUDA_ARCH_NAME=Manual '
'-DCUDA_ARCH_BIN=52 '
'-DCUDA_ARCH_PTX=52 '
'-DCMAKE_CXX_FLAGS="/FS /MD /O2 /Ob2" '
'-DUSE_MKL_IF_AVAILABLE=OFF '
'-DCMAKE_BUILD_TYPE=Release')
, 'WIN_GPU_MKLDNN': (
'-DUSE_CUDA=ON '
'-DUSE_CUDNN=ON '
'-DUSE_NVRTC=ON '
'-DUSE_OPENCV=ON '
'-DUSE_OPENMP=ON '
'-DUSE_BLAS=open '
'-DUSE_LAPACK=ON '
'-DUSE_DIST_KVSTORE=OFF '
'-DCUDA_ARCH_NAME=Manual '
'-DCUDA_ARCH_BIN=52 '
'-DCUDA_ARCH_PTX=52 '
'-DUSE_MKLDNN=ON '
'-DCMAKE_CXX_FLAGS="/FS /MD /O2 /Ob2" '
'-DCMAKE_BUILD_TYPE=Release')
}
def windows_build(args):
logging.info("Using vcvars environment:\n{}".format(args.vcvars))
path = args.output
os.makedirs(path, exist_ok=True)
mxnet_root = get_mxnet_root()
logging.info("Found MXNet root: {}".format(mxnet_root))
with remember_cwd():
os.chdir(path)
cmd = "\"{}\" && cmake -G \"NMake Makefiles JOM\" {} {}".format(args.vcvars,
CMAKE_FLAGS[args.flavour],
mxnet_root)
logging.info("Generating project with CMake:\n{}".format(cmd))
check_call(cmd, shell=True)
cmd = "\"{}\" && jom".format(args.vcvars)
logging.info("Building with jom:\n{}".format(cmd))
t0 = int(time.time())
check_call(cmd, shell=True)
logging.info("Build flavour: {} complete in directory: \"{}\"".format(args.flavour, os.path.abspath(path)))
logging.info("Build took {}".format(datetime.timedelta(seconds=int(time.time() - t0))))
windows_package(args)
def windows_package(args):
pkgfile = 'windows_package.7z'
pkgdir = os.path.abspath('windows_package')
logging.info("Packaging libraries and headers in package: %s", pkgfile)
j = os.path.join
pkgdir_lib = os.path.abspath(j(pkgdir, 'lib'))
with remember_cwd():
os.chdir(args.output)
logging.info("Looking for static libraries and dlls in: \"%s", os.getcwd())
libs = list(glob.iglob('**/*.lib', recursive=True))
dlls = list(glob.iglob('**/*.dll', recursive=True))
os.makedirs(pkgdir_lib, exist_ok=True)
for lib in libs:
logging.info("packing lib: %s", lib)
shutil.copy(lib, pkgdir_lib)
for dll in dlls:
logging.info("packing dll: %s", dll)
shutil.copy(dll, pkgdir_lib)
os.chdir(get_mxnet_root())
logging.info('packing python bindings')
copy_tree('python', j(pkgdir, 'python'))
logging.info('packing headers')
copy_tree('include', j(pkgdir, 'include'))
logging.info("Compressing package: %s", pkgfile)
check_call(['7z', 'a', pkgfile, pkgdir])
def nix_build(args):
path = args.output
os.makedirs(path, exist_ok=True)
with remember_cwd():
os.chdir(path)
logging.info("Generating project with CMake")
check_call("cmake \
-DUSE_CUDA=OFF \
-DUSE_OPENCV=OFF \
-DUSE_OPENMP=OFF \
-DCMAKE_BUILD_TYPE=Debug \
-GNinja ..", shell=True)
check_call("ninja", shell=True)
def main():
logging.getLogger().setLevel(logging.INFO)
logging.basicConfig(format='%(asctime)-15s %(message)s')
logging.info("MXNet Windows build helper")
parser = argparse.ArgumentParser()
parser.add_argument("-o", "--output",
help="output directory",
default='build',
type=str)
parser.add_argument("--vcvars",
help="vcvars batch file location, typically inside vs studio install dir",
default=KNOWN_VCVARS['VS 2015'],
type=str)
parser.add_argument("--arch",
help="architecture",
default='x64',
type=str)
parser.add_argument("-f", "--flavour",
help="build flavour",
default='WIN_CPU',
choices=[x.name for x in BuildFlavour],
type=str)
args = parser.parse_args()
logging.info("Build flavour: %s", args.flavour)
system = platform.system()
if system == 'Windows':
logging.info("Detected Windows platform")
if 'OpenBLAS_HOME' not in os.environ:
os.environ["OpenBLAS_HOME"] = "C:\\Program Files\\OpenBLAS-v0.2.19"
if 'OpenCV_DIR' not in os.environ:
os.environ["OpenCV_DIR"] = "C:\\Program Files\\OpenCV-v3.4.1\\build"
if 'CUDA_PATH' not in os.environ:
os.environ["CUDA_PATH"] = "C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v9.2"
if 'MKL_ROOT' not in os.environ:
os.environ["MKL_ROOT"] = "C:\\Program Files (x86)\\IntelSWTools\\compilers_and_libraries\\windows\\mkl"
windows_build(args)
elif system == 'Linux' or system == 'Darwin':
nix_build(args)
else:
logging.error("Don't know how to build for {} yet".format(platform.system()))
return 0
if __name__ == '__main__':
sys.exit(main())
| [
"#!/usr/bin/env python\n",
"# -*- coding: utf-8 -*-\n",
"\n",
"# Licensed to the Apache Software Foundation (ASF) under one\n",
"# or more contributor license agreements. See the NOTICE file\n",
"# distributed with this work for additional information\n",
"# regarding copyright ownership. The ASF licenses this file\n",
"# to you under the Apache License, Version 2.0 (the\n",
"# \"License\"); you may not use this file except in compliance\n",
"# with the License. You may obtain a copy of the License at\n",
"#\n",
"# http://www.apache.org/licenses/LICENSE-2.0\n",
"#\n",
"# Unless required by applicable law or agreed to in writing,\n",
"# software distributed under the License is distributed on an\n",
"# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n",
"# KIND, either express or implied. See the License for the\n",
"# specific language governing permissions and limitations\n",
"# under the License.\n",
"\n",
"\"\"\"User friendly / multi platform builder script\"\"\"\n",
"\n",
"import argparse\n",
"import datetime\n",
"import glob\n",
"import logging\n",
"import os\n",
"import platform\n",
"import shutil\n",
"import sys\n",
"import time\n",
"from distutils.dir_util import copy_tree\n",
"from enum import Enum\n",
"from subprocess import check_call\n",
"\n",
"from util import *\n",
"\n",
"KNOWN_VCVARS = {\n",
" 'VS 2015': r'C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\bin\\x86_amd64\\vcvarsx86_amd64.bat',\n",
" 'VS 2017': r'C:\\Program Files (x86)\\Microsoft Visual Studio\\2017\\Community\\VC\\Auxiliary\\Build\\vcvarsx86_amd64.bat'\n",
"}\n",
"\n",
"\n",
"class BuildFlavour(Enum):\n",
" WIN_CPU = 'WIN_CPU'\n",
" WIN_CPU_MKLDNN = 'WIN_CPU_MKLDNN'\n",
" WIN_CPU_MKLDNN_MKL = 'WIN_CPU_MKLDNN_MKL'\n",
" WIN_CPU_MKL = 'WIN_CPU_MKL'\n",
" WIN_GPU = 'WIN_GPU'\n",
" WIN_GPU_MKLDNN = 'WIN_GPU_MKLDNN'\n",
"\n",
"\n",
"CMAKE_FLAGS = {\n",
" 'WIN_CPU': (\n",
" '-DUSE_CUDA=OFF '\n",
" '-DUSE_CUDNN=OFF '\n",
" '-DUSE_NVRTC=OFF '\n",
" '-DUSE_OPENCV=ON '\n",
" '-DUSE_OPENMP=ON '\n",
" '-DUSE_BLAS=open '\n",
" '-DUSE_LAPACK=ON '\n",
" '-DUSE_DIST_KVSTORE=OFF '\n",
" '-DBUILD_CPP_EXAMPLES=ON '\n",
" '-DUSE_MKL_IF_AVAILABLE=OFF '\n",
" '-DCMAKE_BUILD_TYPE=Release')\n",
"\n",
" , 'WIN_CPU_MKLDNN': (\n",
" '-DUSE_CUDA=OFF '\n",
" '-DUSE_CUDNN=OFF '\n",
" '-DUSE_NVRTC=OFF '\n",
" '-DUSE_OPENCV=ON '\n",
" '-DUSE_OPENMP=ON '\n",
" '-DUSE_BLAS=open '\n",
" '-DUSE_LAPACK=ON '\n",
" '-DUSE_DIST_KVSTORE=OFF '\n",
" '-DUSE_MKL_IF_AVAILABLE=ON '\n",
" '-DUSE_MKLDNN=ON '\n",
" '-DCMAKE_BUILD_TYPE=Release')\n",
"\n",
" , 'WIN_CPU_MKLDNN_MKL': (\n",
" '-DUSE_CUDA=OFF '\n",
" '-DUSE_CUDNN=OFF '\n",
" '-DUSE_NVRTC=OFF '\n",
" '-DUSE_OPENCV=ON '\n",
" '-DUSE_OPENMP=ON '\n",
" '-DUSE_BLAS=mkl '\n",
" '-DUSE_LAPACK=ON '\n",
" '-DUSE_DIST_KVSTORE=OFF '\n",
" '-DUSE_MKL_IF_AVAILABLE=ON '\n",
" '-DUSE_MKLDNN=ON '\n",
" '-DCMAKE_BUILD_TYPE=Release')\n",
"\n",
" , 'WIN_CPU_MKL': (\n",
" '-DUSE_CUDA=OFF '\n",
" '-DUSE_CUDNN=OFF '\n",
" '-DUSE_NVRTC=OFF '\n",
" '-DUSE_OPENCV=ON '\n",
" '-DUSE_OPENMP=ON '\n",
" '-DUSE_BLAS=mkl '\n",
" '-DUSE_LAPACK=ON '\n",
" '-DUSE_DIST_KVSTORE=OFF '\n",
" '-DUSE_MKL_IF_AVAILABLE=ON '\n",
" '-DUSE_MKLDNN=OFF '\n",
" '-DCMAKE_BUILD_TYPE=Release')\n",
"\n",
" , 'WIN_GPU': (\n",
" '-DUSE_CUDA=ON '\n",
" '-DUSE_CUDNN=ON '\n",
" '-DUSE_NVRTC=ON '\n",
" '-DUSE_OPENCV=ON '\n",
" '-DUSE_OPENMP=ON '\n",
" '-DUSE_BLAS=open '\n",
" '-DUSE_LAPACK=ON '\n",
" '-DUSE_DIST_KVSTORE=OFF '\n",
" '-DCUDA_ARCH_NAME=Manual '\n",
" '-DCUDA_ARCH_BIN=52 '\n",
" '-DCUDA_ARCH_PTX=52 '\n",
" '-DCMAKE_CXX_FLAGS=\"/FS /MD /O2 /Ob2\" '\n",
" '-DUSE_MKL_IF_AVAILABLE=OFF '\n",
" '-DCMAKE_BUILD_TYPE=Release')\n",
"\n",
" , 'WIN_GPU_MKLDNN': (\n",
" '-DUSE_CUDA=ON '\n",
" '-DUSE_CUDNN=ON '\n",
" '-DUSE_NVRTC=ON '\n",
" '-DUSE_OPENCV=ON '\n",
" '-DUSE_OPENMP=ON '\n",
" '-DUSE_BLAS=open '\n",
" '-DUSE_LAPACK=ON '\n",
" '-DUSE_DIST_KVSTORE=OFF '\n",
" '-DCUDA_ARCH_NAME=Manual '\n",
" '-DCUDA_ARCH_BIN=52 '\n",
" '-DCUDA_ARCH_PTX=52 '\n",
" '-DUSE_MKLDNN=ON '\n",
" '-DCMAKE_CXX_FLAGS=\"/FS /MD /O2 /Ob2\" '\n",
" '-DCMAKE_BUILD_TYPE=Release')\n",
"\n",
"}\n",
"\n",
"\n",
"def windows_build(args):\n",
" logging.info(\"Using vcvars environment:\\n{}\".format(args.vcvars))\n",
"\n",
" path = args.output\n",
" os.makedirs(path, exist_ok=True)\n",
"\n",
" mxnet_root = get_mxnet_root()\n",
" logging.info(\"Found MXNet root: {}\".format(mxnet_root))\n",
"\n",
" with remember_cwd():\n",
" os.chdir(path)\n",
" cmd = \"\\\"{}\\\" && cmake -G \\\"NMake Makefiles JOM\\\" {} {}\".format(args.vcvars,\n",
" CMAKE_FLAGS[args.flavour],\n",
" mxnet_root)\n",
" logging.info(\"Generating project with CMake:\\n{}\".format(cmd))\n",
" check_call(cmd, shell=True)\n",
"\n",
" cmd = \"\\\"{}\\\" && jom\".format(args.vcvars)\n",
" logging.info(\"Building with jom:\\n{}\".format(cmd))\n",
"\n",
" t0 = int(time.time())\n",
" check_call(cmd, shell=True)\n",
"\n",
" logging.info(\"Build flavour: {} complete in directory: \\\"{}\\\"\".format(args.flavour, os.path.abspath(path)))\n",
" logging.info(\"Build took {}\".format(datetime.timedelta(seconds=int(time.time() - t0))))\n",
" windows_package(args)\n",
"\n",
"\n",
"def windows_package(args):\n",
" pkgfile = 'windows_package.7z'\n",
" pkgdir = os.path.abspath('windows_package')\n",
" logging.info(\"Packaging libraries and headers in package: %s\", pkgfile)\n",
" j = os.path.join\n",
" pkgdir_lib = os.path.abspath(j(pkgdir, 'lib'))\n",
" with remember_cwd():\n",
" os.chdir(args.output)\n",
" logging.info(\"Looking for static libraries and dlls in: \\\"%s\", os.getcwd())\n",
" libs = list(glob.iglob('**/*.lib', recursive=True))\n",
" dlls = list(glob.iglob('**/*.dll', recursive=True))\n",
" os.makedirs(pkgdir_lib, exist_ok=True)\n",
" for lib in libs:\n",
" logging.info(\"packing lib: %s\", lib)\n",
" shutil.copy(lib, pkgdir_lib)\n",
" for dll in dlls:\n",
" logging.info(\"packing dll: %s\", dll)\n",
" shutil.copy(dll, pkgdir_lib)\n",
" os.chdir(get_mxnet_root())\n",
" logging.info('packing python bindings')\n",
" copy_tree('python', j(pkgdir, 'python'))\n",
" logging.info('packing headers')\n",
" copy_tree('include', j(pkgdir, 'include'))\n",
" logging.info(\"Compressing package: %s\", pkgfile)\n",
" check_call(['7z', 'a', pkgfile, pkgdir])\n",
"\n",
"\n",
"def nix_build(args):\n",
" path = args.output\n",
" os.makedirs(path, exist_ok=True)\n",
" with remember_cwd():\n",
" os.chdir(path)\n",
" logging.info(\"Generating project with CMake\")\n",
" check_call(\"cmake \\\n",
" -DUSE_CUDA=OFF \\\n",
" -DUSE_OPENCV=OFF \\\n",
" -DUSE_OPENMP=OFF \\\n",
" -DCMAKE_BUILD_TYPE=Debug \\\n",
" -GNinja ..\", shell=True)\n",
" check_call(\"ninja\", shell=True)\n",
"\n",
"\n",
"def main():\n",
" logging.getLogger().setLevel(logging.INFO)\n",
" logging.basicConfig(format='%(asctime)-15s %(message)s')\n",
" logging.info(\"MXNet Windows build helper\")\n",
"\n",
" parser = argparse.ArgumentParser()\n",
" parser.add_argument(\"-o\", \"--output\",\n",
" help=\"output directory\",\n",
" default='build',\n",
" type=str)\n",
"\n",
" parser.add_argument(\"--vcvars\",\n",
" help=\"vcvars batch file location, typically inside vs studio install dir\",\n",
" default=KNOWN_VCVARS['VS 2015'],\n",
" type=str)\n",
"\n",
" parser.add_argument(\"--arch\",\n",
" help=\"architecture\",\n",
" default='x64',\n",
" type=str)\n",
"\n",
" parser.add_argument(\"-f\", \"--flavour\",\n",
" help=\"build flavour\",\n",
" default='WIN_CPU',\n",
" choices=[x.name for x in BuildFlavour],\n",
" type=str)\n",
"\n",
" args = parser.parse_args()\n",
" logging.info(\"Build flavour: %s\", args.flavour)\n",
"\n",
" system = platform.system()\n",
" if system == 'Windows':\n",
" logging.info(\"Detected Windows platform\")\n",
" if 'OpenBLAS_HOME' not in os.environ:\n",
" os.environ[\"OpenBLAS_HOME\"] = \"C:\\\\Program Files\\\\OpenBLAS-v0.2.19\"\n",
" if 'OpenCV_DIR' not in os.environ:\n",
" os.environ[\"OpenCV_DIR\"] = \"C:\\\\Program Files\\\\OpenCV-v3.4.1\\\\build\"\n",
" if 'CUDA_PATH' not in os.environ:\n",
" os.environ[\"CUDA_PATH\"] = \"C:\\\\Program Files\\\\NVIDIA GPU Computing Toolkit\\\\CUDA\\\\v9.2\"\n",
" if 'MKL_ROOT' not in os.environ:\n",
" os.environ[\"MKL_ROOT\"] = \"C:\\\\Program Files (x86)\\\\IntelSWTools\\\\compilers_and_libraries\\\\windows\\\\mkl\"\n",
" windows_build(args)\n",
"\n",
" elif system == 'Linux' or system == 'Darwin':\n",
" nix_build(args)\n",
"\n",
" else:\n",
" logging.error(\"Don't know how to build for {} yet\".format(platform.system()))\n",
"\n",
" return 0\n",
"\n",
"\n",
"if __name__ == '__main__':\n",
" sys.exit(main())\n",
"\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.009259259259259259,
0.008403361344537815,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.02631578947368421,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.02631578947368421,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.02631578947368421,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.02631578947368421,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.02631578947368421,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011764705882352941,
0.010101010101010102,
0.011904761904761904,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.008620689655172414,
0.010416666666666666,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011904761904761904,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.030303030303030304,
0.04,
0.05555555555555555,
0,
0,
0.024096385542168676,
0.024390243902439025,
0.05555555555555555,
0,
0,
0.034482758620689655,
0.043478260869565216,
0.05555555555555555,
0,
0,
0.03333333333333333,
0.037037037037037035,
0.020833333333333332,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012345679012345678,
0,
0.01,
0,
0.008620689655172414,
0,
0,
0,
0,
0,
0,
0.011627906976744186,
0,
0,
0,
0,
0,
0,
1
] | 265 | 0.006457 | false |
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Reflection;
using PrettyGood.Util;
using System.Xml;
using System.Globalization;
namespace PrettyGood.LastFm
{
public class LastFm
{
private readonly string api;
private Encoding encoding = Encoding.UTF32;
public LastFm(string api)
{
this.api = api;
foreach (ApiItem item in Reflect.MembersOf<ApiItem, LastFm>(this))
{
item.app = this;
}
}
public class ApiItem
{
internal LastFm app;
internal XmlElement call(Arguments a)
{
return app.call(a);
}
}
public static IEnumerable<System.Globalization.CultureInfo> Languages
{
get
{
Dictionary<string, CultureInfo> infos = new Dictionary<string, CultureInfo>();
foreach( CultureInfo ci in CultureInfo.GetCultures(CultureTypes.AllCultures) )
{
if (false == string.IsNullOrEmpty(ci.TwoLetterISOLanguageName) && false == infos.ContainsKey(ci.TwoLetterISOLanguageName) )
{
infos.Add(ci.TwoLetterISOLanguageName, ci);
yield return ci;
}
}
}
}
public class Artist : ApiItem
{
public ArtistInfo getInfo(string artist, string mbid, System.Globalization.CultureInfo lang)
{
XmlElement el = call(new Arguments().rarg("method", "artist.getInfo").arg("artist", artist).arg("mbid", mbid).lang(lang));
return new ArtistInfo(el);
}
public IEnumerable<FoundArtist> search(string artist, string mbid, System.Globalization.CultureInfo lang)
{
XmlElement el = call(new Arguments().rarg("method", "artist.search").arg("artist", artist).arg("mbid", mbid).lang(lang));
XmlElement matches = Xml.GetFirstChild( Xml.GetFirstChild(el, "results") , "artistmatches");
foreach (XmlElement ael in Xml.ElementsNamed(matches, "artist"))
{
yield return new FoundArtist(ael);
}
}
public ArtistInfo getInfo(FoundArtist artist, System.Globalization.CultureInfo lang)
{
return getInfo(artist.name, artist.mbid, lang);
}
}
public class Album : ApiItem
{
public IEnumerable<FoundAlbum> search(string album)
{
XmlElement el = call(new Arguments().rarg("method", "album.search").arg("album", album));
XmlElement matches = Xml.GetFirstChild(Xml.GetFirstChild(el, "results"), "albummatches");
foreach (XmlElement ael in Xml.ElementsNamed(matches, "album"))
{
yield return new FoundAlbum(ael);
}
}
}
internal static string buildUrl(Arguments a)
{
string args = new StringListCombiner("&")
.combineFromEnumerable(
Util.CSharp.Convert(
a.Args, k => k.Key + "=" + Web.Escape(k.Value)
)
);
return "http://ws.audioscrobbler.com/2.0/?" + args;
}
public Artist artist = new Artist();
public Album album = new Album();
internal XmlElement call(Arguments a)
{
string url = buildUrl(a.rarg("api_key", api));
string source = Web.FetchString(url, ref encoding);
XmlElement root = Xml.Open(Xml.FromSource(source), "lfm");
ThrowIfError(root);
return root;
}
private static void ThrowIfError(XmlElement root)
{
string status = Xml.GetAttributeString(root, "status");
if (status != "ok") throw new Error(root);
}
}
}
| [
"using System;\n",
"using System.Collections.Generic;\n",
"using System.Linq;\n",
"using System.Text;\n",
"using System.Reflection;\n",
"using PrettyGood.Util;\n",
"using System.Xml;\n",
"using System.Globalization;\n",
"\n",
"namespace PrettyGood.LastFm\n",
"{\n",
" public class LastFm\n",
" {\n",
" private readonly string api;\n",
" private Encoding encoding = Encoding.UTF32;\n",
"\n",
" public LastFm(string api)\n",
" {\n",
" this.api = api;\n",
" foreach (ApiItem item in Reflect.MembersOf<ApiItem, LastFm>(this))\n",
" {\n",
" item.app = this;\n",
" }\n",
" }\n",
"\n",
" public class ApiItem\n",
" {\n",
" internal LastFm app;\n",
" internal XmlElement call(Arguments a)\n",
" {\n",
" return app.call(a);\n",
" }\n",
" }\n",
"\n",
" public static IEnumerable<System.Globalization.CultureInfo> Languages\n",
" {\n",
" get\n",
" {\n",
" Dictionary<string, CultureInfo> infos = new Dictionary<string, CultureInfo>();\n",
" foreach( CultureInfo ci in CultureInfo.GetCultures(CultureTypes.AllCultures) )\n",
" {\n",
" if (false == string.IsNullOrEmpty(ci.TwoLetterISOLanguageName) && false == infos.ContainsKey(ci.TwoLetterISOLanguageName) )\n",
" {\n",
" infos.Add(ci.TwoLetterISOLanguageName, ci);\n",
" yield return ci;\n",
" }\n",
" }\n",
" }\n",
" }\n",
"\n",
" public class Artist : ApiItem\n",
" {\n",
" public ArtistInfo getInfo(string artist, string mbid, System.Globalization.CultureInfo lang)\n",
" {\n",
" XmlElement el = call(new Arguments().rarg(\"method\", \"artist.getInfo\").arg(\"artist\", artist).arg(\"mbid\", mbid).lang(lang));\n",
" return new ArtistInfo(el);\n",
" }\n",
"\n",
" public IEnumerable<FoundArtist> search(string artist, string mbid, System.Globalization.CultureInfo lang)\n",
" {\n",
" XmlElement el = call(new Arguments().rarg(\"method\", \"artist.search\").arg(\"artist\", artist).arg(\"mbid\", mbid).lang(lang));\n",
" XmlElement matches = Xml.GetFirstChild( Xml.GetFirstChild(el, \"results\") , \"artistmatches\");\n",
" foreach (XmlElement ael in Xml.ElementsNamed(matches, \"artist\"))\n",
" {\n",
" yield return new FoundArtist(ael);\n",
" }\n",
" }\n",
"\n",
" public ArtistInfo getInfo(FoundArtist artist, System.Globalization.CultureInfo lang)\n",
" {\n",
" return getInfo(artist.name, artist.mbid, lang);\n",
" }\n",
" }\n",
"\n",
" public class Album : ApiItem\n",
" {\n",
" public IEnumerable<FoundAlbum> search(string album)\n",
" {\n",
" XmlElement el = call(new Arguments().rarg(\"method\", \"album.search\").arg(\"album\", album));\n",
" XmlElement matches = Xml.GetFirstChild(Xml.GetFirstChild(el, \"results\"), \"albummatches\");\n",
" foreach (XmlElement ael in Xml.ElementsNamed(matches, \"album\"))\n",
" {\n",
" yield return new FoundAlbum(ael);\n",
" }\n",
" }\n",
" }\n",
"\n",
" internal static string buildUrl(Arguments a)\n",
" {\n",
" string args = new StringListCombiner(\"&\")\n",
" .combineFromEnumerable(\n",
" Util.CSharp.Convert(\n",
" a.Args, k => k.Key + \"=\" + Web.Escape(k.Value)\n",
" )\n",
" );\n",
" return \"http://ws.audioscrobbler.com/2.0/?\" + args;\n",
" }\n",
"\n",
" public Artist artist = new Artist();\n",
" public Album album = new Album();\n",
"\n",
" internal XmlElement call(Arguments a)\n",
" {\n",
" string url = buildUrl(a.rarg(\"api_key\", api));\n",
" string source = Web.FetchString(url, ref encoding);\n",
" XmlElement root = Xml.Open(Xml.FromSource(source), \"lfm\");\n",
" ThrowIfError(root);\n",
" return root;\n",
" }\n",
"\n",
" private static void ThrowIfError(XmlElement root)\n",
" {\n",
" string status = Xml.GetAttributeString(root, \"status\");\n",
" if (status != \"ok\") throw new Error(root);\n",
" }\n",
" }\n",
"}\n"
] | [
0.07142857142857142,
0.029411764705882353,
0.05263157894736842,
0.05263157894736842,
0.04,
0.043478260869565216,
0.05555555555555555,
0.03571428571428571,
0,
0,
0,
0,
0,
0.02702702702702703,
0.019230769230769232,
0,
0,
0,
0.03571428571428571,
0.0379746835443038,
0,
0.06060606060606061,
0,
0,
0,
0,
0,
0.030303030303030304,
0,
0,
0.05555555555555555,
0,
0,
0,
0.02564102564102564,
0,
0,
0,
0.06315789473684211,
0.031578947368421054,
0,
0.020833333333333332,
0,
0.014705882352941176,
0.04878048780487805,
0,
0,
0,
0,
0,
0.02631578947368421,
0,
0.009523809523809525,
0,
0.014388489208633094,
0.046511627906976744,
0,
0,
0.025423728813559324,
0,
0.014492753623188406,
0.03669724770642202,
0.024691358024691357,
0,
0.03636363636363636,
0,
0,
0,
0.010309278350515464,
0,
0.03125,
0,
0,
0,
0.02702702702702703,
0,
0.03125,
0,
0.018867924528301886,
0.018867924528301886,
0.0125,
0,
0.037037037037037035,
0,
0,
0,
0,
0,
0,
0,
0.025,
0,
0.02666666666666667,
0,
0.043478260869565216,
0.03125,
0,
0,
0.022222222222222223,
0.023809523809523808,
0,
0,
0,
0.01694915254237288,
0.015625,
0.014084507042253521,
0.03125,
0.08,
0,
0,
0,
0,
0.014705882352941176,
0.03636363636363636,
0,
0,
0
] | 117 | 0.014144 | false |
#!/usr/bin/python
from __future__ import print_function
import os
import sys
import errno
import argparse
import tempfile
import subprocess
import shlex
import shutil
import stat
import json
cmdLog = None
# Derived from http://stackoverflow.com/a/4417735
def execute(command):
cmdLog.write(command + '\n')
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
lines_iterator = iter(p.stdout.readline, b"")
savedOutput = []
for line in lines_iterator:
savedOutput.append(line.rstrip('\r\n'))
cmdLog.write(line) # yield line
cmdLog.write('\n')
if p.wait() == 0:
print('SUCCESS')
else:
raise ValueError('Command returned with non-zero value.')
return savedOutput
# Derived from http://stackoverflow.com/a/22331852
def copytree(src, dst, symlinks = False, ignore = None):
if not os.path.exists(dst):
os.makedirs(dst)
shutil.copystat(src, dst)
lst = os.listdir(src)
if ignore:
excl = ignore(src, lst)
lst = [x for x in lst if x not in excl]
for item in lst:
s = os.path.join(src, item)
d = os.path.join(dst, item)
if symlinks and os.path.islink(s):
if os.path.lexists(d):
os.remove(d)
os.symlink(os.readlink(s), d)
try:
st = os.lstat(s)
mode = stat.S_IMODE(st.st_mode)
os.lchmod(d, mode)
except:
pass # lchmod not available
elif os.path.isdir(s):
copytree(s, d, symlinks, ignore)
else:
shutil.copy2(s, d)
# # Derived from http://stackoverflow.com/a/1889686
def remove_readonly(func, path, excinfo):
os.chmod(path, stat.S_IWRITE)
func(path)
# Wrap os.makedirs to not throw exception when directory already exists
def makedirs(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
class getsource:
def __init__(self, globalargs, testname, actiondict):
# Initialize attributes
self.globalargs = globalargs
self.testname = testname
for k, v in actiondict.items():
setattr(self, k, v)
# Check for important values
if (not self.cmd):
raise ValueError(testname + 'test does not specify a getsource command.')
if (not self.output):
raise ValueError(testname + 'test does not specify an output directory.')
def execute(self, clean):
os.chdir(self.globalargs.srcdir)
if (os.path.isdir(self.output) and clean):
print('Removing ' + self.testname + ' source directory... ', end='')
shutil.rmtree(self.output, onerror=remove_readonly)
print('SUCCESS')
if (not os.path.isdir(self.output)):
print('Acquiring ' + self.testname + ' source code... ', end='')
execute(self.cmd)
print('Entering the ' + self.testname + ' repository directory... ', end='')
os.chdir(self.output)
print('SUCCESS')
class vsimport:
def __init__(self, globalargs, testname, actiondict):
# Initialize attributes
self.globalargs = globalargs
self.testname = testname
self.args = ''
for k, v in actiondict.items():
setattr(self, k, v)
if (self.dir):
self.dir = os.path.join(globalargs.srcdir, self.dir)
def execute(self, clean):
if (self.dir):
print('Entering the ' + self.testname + ' import directory... ', end='')
os.chdir(self.dir)
print('SUCCESS')
print('Generating ' + self.testname + ' VS projects... ', end='')
vsimporter = os.path.join(sys.path[0], '..\\..\\bin\\vsimporter.exe')
output = execute(vsimporter + ' ' + self.args)
print('Copying test signing certificates...', end='')
for line in output:
if not (line.startswith('Generated ') and line.endswith('.vcxproj')):
continue
projectDir = os.path.dirname(line[10:])
projectName = os.path.splitext(os.path.basename(line))[0]
if os.path.isfile(os.path.join(projectDir, 'Package.appxmanifest')):
shutil.copyfile(os.path.join(sys.path[0], 'TemporaryKey.pfx'), os.path.join(projectDir, projectName + '_TemporaryKey.pfx'))
print('SUCCESS')
class msbuild:
def __init__(self, globalargs, testname, actiondict):
# Initialize attributes
self.globalargs = globalargs
self.testname = testname
self.args = ''
self.dir = ''
for k, v in actiondict.items():
setattr(self, k, v)
if (self.dir):
self.dir = os.path.join(globalargs.srcdir, self.dir)
def execute(self, clean):
if (self.dir):
print('Entering the ' + self.testname + ' build directory... ', end='')
os.chdir(self.dir)
print('SUCCESS')
print('Building ' + self.testname + ' projects... ', end='')
execute('MSBuild.exe ' + self.args)
print('Copying ' + self.testname + ' AppX packages... ', end='')
for root, subFolders, files in os.walk('AppPackages'):
for file in files:
if file.endswith('.appx'):
copytree(root, os.path.join(self.globalargs.appxdir, os.path.basename(root)))
subFolders[:] = []
break;
print('SUCCESS')
def main(argv):
# Get PATH from the environment
envPath = os.environ.get("PATH", os.defpath)
# Set up argument parsing
parser = argparse.ArgumentParser(description = 'Run WinObjC tests.')
parser.add_argument('--testfile', default = os.path.join(sys.path[0], 'AutoAppBuild-tests.json'), type = argparse.FileType('r'), help = 'Test descriptions file.')
parser.add_argument('--clean', default = False, action='store_true', help = 'Clean git repositories before buidling.')
parser.add_argument('--srcdir', default = "src", help = 'Directory where tests will be cloned and built.')
parser.add_argument('--appxdir', default = "appx", help = 'Destination directory for AppX packages.')
args = parser.parse_args()
# Create build directory
args.srcdir = os.path.abspath(args.srcdir)
makedirs(args.srcdir)
# Create AppX directory
args.appxdir = os.path.abspath(args.appxdir)
makedirs(args.appxdir)
# Open log file
global cmdLog
cmdLog = open(os.path.join(args.srcdir, 'CmdLog.txt'), 'wb', 1)
# Read the JSON test descriptions
tests = json.load(args.testfile)
# Print info
print('Test file:', args.testfile.name)
print('Build directory: ', args.srcdir)
print('AppX directory: ', args.appxdir)
# Iterate over tests
successCount = 0
totalCount = 0
for test in tests:
# Deserialize build steps into objects
actionObjects = []
try:
for step in test['buildSteps']:
actionObj = globals()[step['action']](args, test['name'], step)
actionObjects.append(actionObj)
totalCount += 1
except Exception as e:
print('Failed to parse test description: ' + str(e))
continue
# Execute build steps
print()
try:
for action in actionObjects:
action.execute(args.clean)
successCount += 1
except Exception as e:
print('FAILURE')
cmdLog.write(str(e))
# Print results
print()
print('Results: ' + str(successCount) + '/' + str(totalCount))
if __name__ == "__main__":
main(sys.argv) | [
"#!/usr/bin/python\n",
"\n",
"from __future__ import print_function\n",
"import os\n",
"import sys\n",
"import errno\n",
"import argparse\n",
"import tempfile\n",
"import subprocess\n",
"import shlex\n",
"import shutil\n",
"import stat\n",
"import json\n",
"\n",
"cmdLog = None\n",
"\n",
"# Derived from http://stackoverflow.com/a/4417735\n",
"def execute(command):\n",
" cmdLog.write(command + '\\n')\n",
" p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n",
" lines_iterator = iter(p.stdout.readline, b\"\")\n",
" savedOutput = []\n",
" for line in lines_iterator:\n",
" savedOutput.append(line.rstrip('\\r\\n'))\n",
" cmdLog.write(line) # yield line\n",
" cmdLog.write('\\n')\n",
"\n",
" if p.wait() == 0:\n",
" print('SUCCESS')\n",
" else:\n",
" raise ValueError('Command returned with non-zero value.')\n",
"\n",
" return savedOutput\n",
"\n",
"# Derived from http://stackoverflow.com/a/22331852\n",
"def copytree(src, dst, symlinks = False, ignore = None):\n",
" if not os.path.exists(dst):\n",
" os.makedirs(dst)\n",
" shutil.copystat(src, dst)\n",
" lst = os.listdir(src)\n",
" if ignore:\n",
" excl = ignore(src, lst)\n",
" lst = [x for x in lst if x not in excl]\n",
" for item in lst:\n",
" s = os.path.join(src, item)\n",
" d = os.path.join(dst, item)\n",
" if symlinks and os.path.islink(s):\n",
" if os.path.lexists(d):\n",
" os.remove(d)\n",
" os.symlink(os.readlink(s), d)\n",
" try:\n",
" st = os.lstat(s)\n",
" mode = stat.S_IMODE(st.st_mode)\n",
" os.lchmod(d, mode)\n",
" except:\n",
" pass # lchmod not available\n",
" elif os.path.isdir(s):\n",
" copytree(s, d, symlinks, ignore)\n",
" else:\n",
" shutil.copy2(s, d)\n",
"\n",
"# # Derived from http://stackoverflow.com/a/1889686\n",
"def remove_readonly(func, path, excinfo):\n",
" os.chmod(path, stat.S_IWRITE)\n",
" func(path)\n",
"\n",
"# Wrap os.makedirs to not throw exception when directory already exists\n",
"def makedirs(path):\n",
" try:\n",
" os.makedirs(path)\n",
" except OSError as e:\n",
" if e.errno == errno.EEXIST and os.path.isdir(path):\n",
" pass\n",
" else:\n",
" raise\n",
"\n",
"class getsource:\n",
" def __init__(self, globalargs, testname, actiondict):\n",
" # Initialize attributes\n",
" self.globalargs = globalargs\n",
" self.testname = testname\n",
" for k, v in actiondict.items():\n",
" setattr(self, k, v)\n",
"\n",
" # Check for important values\n",
" if (not self.cmd):\n",
" raise ValueError(testname + 'test does not specify a getsource command.')\n",
" if (not self.output):\n",
" raise ValueError(testname + 'test does not specify an output directory.')\n",
"\n",
" def execute(self, clean):\n",
" os.chdir(self.globalargs.srcdir)\n",
"\n",
" if (os.path.isdir(self.output) and clean):\n",
" print('Removing ' + self.testname + ' source directory... ', end='')\n",
" shutil.rmtree(self.output, onerror=remove_readonly)\n",
" print('SUCCESS')\n",
"\n",
" if (not os.path.isdir(self.output)):\n",
" print('Acquiring ' + self.testname + ' source code... ', end='')\n",
" execute(self.cmd)\n",
"\n",
" print('Entering the ' + self.testname + ' repository directory... ', end='')\n",
" os.chdir(self.output)\n",
" print('SUCCESS')\n",
"\n",
"class vsimport:\n",
" def __init__(self, globalargs, testname, actiondict):\n",
" # Initialize attributes\n",
" self.globalargs = globalargs\n",
" self.testname = testname\n",
" self.args = ''\n",
" for k, v in actiondict.items():\n",
" setattr(self, k, v)\n",
"\n",
" if (self.dir):\n",
" self.dir = os.path.join(globalargs.srcdir, self.dir)\n",
"\n",
" def execute(self, clean):\n",
" if (self.dir):\n",
" print('Entering the ' + self.testname + ' import directory... ', end='')\n",
" os.chdir(self.dir)\n",
" print('SUCCESS')\n",
"\n",
" print('Generating ' + self.testname + ' VS projects... ', end='')\n",
" vsimporter = os.path.join(sys.path[0], '..\\\\..\\\\bin\\\\vsimporter.exe')\n",
" output = execute(vsimporter + ' ' + self.args)\n",
"\n",
" print('Copying test signing certificates...', end='')\n",
" for line in output:\n",
" if not (line.startswith('Generated ') and line.endswith('.vcxproj')):\n",
" continue\n",
"\n",
" projectDir = os.path.dirname(line[10:])\n",
" projectName = os.path.splitext(os.path.basename(line))[0]\n",
" if os.path.isfile(os.path.join(projectDir, 'Package.appxmanifest')):\n",
" shutil.copyfile(os.path.join(sys.path[0], 'TemporaryKey.pfx'), os.path.join(projectDir, projectName + '_TemporaryKey.pfx'))\n",
" print('SUCCESS')\n",
"\n",
"class msbuild:\n",
" def __init__(self, globalargs, testname, actiondict):\n",
" # Initialize attributes\n",
" self.globalargs = globalargs\n",
" self.testname = testname\n",
" self.args = ''\n",
" self.dir = ''\n",
" for k, v in actiondict.items():\n",
" setattr(self, k, v)\n",
"\n",
" if (self.dir):\n",
" self.dir = os.path.join(globalargs.srcdir, self.dir)\n",
"\n",
" def execute(self, clean):\n",
" if (self.dir):\n",
" print('Entering the ' + self.testname + ' build directory... ', end='')\n",
" os.chdir(self.dir)\n",
" print('SUCCESS')\n",
"\n",
" print('Building ' + self.testname + ' projects... ', end='')\n",
" execute('MSBuild.exe ' + self.args)\n",
"\n",
" print('Copying ' + self.testname + ' AppX packages... ', end='')\n",
" for root, subFolders, files in os.walk('AppPackages'):\n",
" for file in files:\n",
" if file.endswith('.appx'):\n",
" copytree(root, os.path.join(self.globalargs.appxdir, os.path.basename(root)))\n",
" subFolders[:] = []\n",
" break;\n",
" print('SUCCESS')\n",
"\n",
"def main(argv):\n",
" # Get PATH from the environment\n",
" envPath = os.environ.get(\"PATH\", os.defpath)\n",
"\n",
" # Set up argument parsing\n",
" parser = argparse.ArgumentParser(description = 'Run WinObjC tests.')\n",
" parser.add_argument('--testfile', default = os.path.join(sys.path[0], 'AutoAppBuild-tests.json'), type = argparse.FileType('r'), help = 'Test descriptions file.')\n",
" parser.add_argument('--clean', default = False, action='store_true', help = 'Clean git repositories before buidling.')\n",
" parser.add_argument('--srcdir', default = \"src\", help = 'Directory where tests will be cloned and built.')\n",
" parser.add_argument('--appxdir', default = \"appx\", help = 'Destination directory for AppX packages.')\n",
" args = parser.parse_args()\n",
"\n",
" # Create build directory\n",
" args.srcdir = os.path.abspath(args.srcdir)\n",
" makedirs(args.srcdir)\n",
"\n",
" # Create AppX directory\n",
" args.appxdir = os.path.abspath(args.appxdir)\n",
" makedirs(args.appxdir)\n",
"\n",
" # Open log file\n",
" global cmdLog\n",
" cmdLog = open(os.path.join(args.srcdir, 'CmdLog.txt'), 'wb', 1)\n",
"\n",
" # Read the JSON test descriptions\n",
" tests = json.load(args.testfile)\n",
"\n",
" # Print info\n",
" print('Test file:', args.testfile.name)\n",
" print('Build directory: ', args.srcdir)\n",
" print('AppX directory: ', args.appxdir)\n",
"\n",
" # Iterate over tests\n",
" successCount = 0\n",
" totalCount = 0\n",
" for test in tests:\n",
" # Deserialize build steps into objects\n",
" actionObjects = []\n",
" try:\n",
" for step in test['buildSteps']:\n",
" actionObj = globals()[step['action']](args, test['name'], step)\n",
" actionObjects.append(actionObj)\n",
" totalCount += 1\n",
" except Exception as e:\n",
" print('Failed to parse test description: ' + str(e))\n",
" continue\n",
"\n",
" # Execute build steps\n",
" print()\n",
" try:\n",
" for action in actionObjects:\n",
" action.execute(args.clean)\n",
" successCount += 1\n",
" except Exception as e:\n",
" print('FAILURE')\n",
" cmdLog.write(str(e))\n",
"\n",
" # Print results\n",
" print()\n",
" print('Results: ' + str(successCount) + '/' + str(totalCount))\n",
"\n",
"if __name__ == \"__main__\":\n",
" main(sys.argv)"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.045454545454545456,
0,
0.011904761904761904,
0,
0,
0,
0,
0.025,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.08771929824561403,
0.03333333333333333,
0,
0,
0.041666666666666664,
0.07692307692307693,
0,
0,
0.05263157894736842,
0,
0,
0,
0.034482758620689655,
0,
0.027777777777777776,
0.09090909090909091,
0,
0,
0,
0.14285714285714285,
0.027777777777777776,
0,
0.02564102564102564,
0,
0.04,
0,
0,
0.023809523809523808,
0,
0,
0,
0,
0.05,
0,
0,
0,
0,
0,
0,
0,
0,
0.058823529411764705,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011627906976744186,
0,
0.011627906976744186,
0,
0,
0,
0,
0,
0.012345679012345678,
0,
0,
0,
0,
0,
0,
0,
0.011764705882352941,
0,
0,
0,
0.0625,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011764705882352941,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012195121951219513,
0,
0,
0,
0,
0.012345679012345678,
0.007142857142857143,
0,
0,
0.06666666666666667,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011904761904761904,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.01020408163265306,
0,
0.037037037037037035,
0,
0,
0.0625,
0,
0,
0,
0,
0.0273972602739726,
0.041916167664670656,
0.04065040650406504,
0.04504504504504504,
0.04716981132075472,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.037037037037037035,
0.05555555555555555
] | 233 | 0.00658 | false |
import clr, sys
clr.AddReference('ZyGames.Framework.Common');
clr.AddReference('ZyGames.Framework');
clr.AddReference('ZyGames.Framework.Game');
clr.AddReference('ZyGames.Tianjiexing.Model');
clr.AddReference('ZyGames.Tianjiexing.BLL');
clr.AddReference('ZyGames.Tianjiexing.Lang');
from action import *
from ZyGames.Framework.Common.Log import *
from System.Collections.Generic import *
from ZyGames.Tianjiexing.Model import *
from ZyGames.Tianjiexing.BLL import *
from ZyGames.Tianjiexing.Lang import *
from ZyGames.Framework.Game.Cache import *
from ZyGames.Framework.Game.Service import *
from ZyGames.Framework.Common import *
from ZyGames.Framework.Cache.Generic import *
from ZyGames.Tianjiexing.Model.Config import *
class UrlParam(HttpParam):
def __init__(self):
HttpParam.__init__(self)
class ActionResult(DataResult):
def __init__(self):
DataResult.__init__(self)
self.ItemNum = 0
self.BringUpInfoList = List[BringUpInfo]()
def getUrlElement(httpGet,parent):
urlParam = UrlParam();
return urlParam;
def takeAction(urlParam,parent):
#for key, value in urlParam.items():
# TraceLog.ReleaseWrite('{0}={1}',key,value);
#TraceLog.ReleaseWrite('1004 param BackpackType:{0}', urlParam.BackpackType);
actionResult =ActionResult();
itemId = ConfigEnvSet.GetInt('User.DrugItemID');
userId =parent.Current.UserId;
cacheSetUserItem = GameDataCacheSet[UserItemPackage]();
cacheSetBringUp = ConfigCacheSet[BringUpInfo]();
actionResult.BringUpInfoList = cacheSetBringUp.FindAll();
userItem = cacheSetUserItem.FindKey(userId.ToString());
if(userItem and userItem.ItemPackage):
itemList = userItem.ItemPackage.FindAll(lambda s:s.ItemID == itemId);
for info in itemList:
actionResult.ItemNum = MathUtils.Addition(actionResult.ItemNum,info.Num);
#ctionResult = ['he'];
#处理结果存储在字典中
return actionResult;
def buildPacket(writer, urlParam, actionResult):
#输出
writer.PushIntoStack(len(actionResult.BringUpInfoList));
for info in actionResult.BringUpInfoList:
ds = DataStruct();
ds.PushIntoStack(info.BringUpType);
ds.PushIntoStack(info.UseUpType);
ds.PushIntoStack(info.UseUpNum);
writer.PushIntoStack(ds);
writer.PushIntoStack(actionResult.ItemNum);
return True; | [
"import clr, sys\r\n",
"clr.AddReference('ZyGames.Framework.Common');\r\n",
"clr.AddReference('ZyGames.Framework');\r\n",
"clr.AddReference('ZyGames.Framework.Game');\r\n",
"clr.AddReference('ZyGames.Tianjiexing.Model');\r\n",
"clr.AddReference('ZyGames.Tianjiexing.BLL');\r\n",
"clr.AddReference('ZyGames.Tianjiexing.Lang');\r\n",
"\r\n",
"from action import *\r\n",
"from ZyGames.Framework.Common.Log import *\r\n",
"from System.Collections.Generic import *\r\n",
"from ZyGames.Tianjiexing.Model import *\r\n",
"from ZyGames.Tianjiexing.BLL import *\r\n",
"from ZyGames.Tianjiexing.Lang import *\r\n",
"from ZyGames.Framework.Game.Cache import *\r\n",
"from ZyGames.Framework.Game.Service import *\r\n",
"from ZyGames.Framework.Common import *\r\n",
"from ZyGames.Framework.Cache.Generic import *\r\n",
"from ZyGames.Tianjiexing.Model.Config import *\r\n",
"\r\n",
"\r\n",
"class UrlParam(HttpParam):\r\n",
" def __init__(self):\r\n",
" HttpParam.__init__(self)\r\n",
"\r\n",
"class ActionResult(DataResult):\r\n",
" def __init__(self):\r\n",
" DataResult.__init__(self)\r\n",
" self.ItemNum = 0\r\n",
" self.BringUpInfoList = List[BringUpInfo]()\r\n",
"\r\n",
"def getUrlElement(httpGet,parent):\r\n",
" urlParam = UrlParam();\r\n",
" return urlParam;\r\n",
"\r\n",
"def takeAction(urlParam,parent):\r\n",
" #for key, value in urlParam.items():\r\n",
" # TraceLog.ReleaseWrite('{0}={1}',key,value);\r\n",
" #TraceLog.ReleaseWrite('1004 param BackpackType:{0}', urlParam.BackpackType);\r\n",
" actionResult =ActionResult();\r\n",
" itemId = ConfigEnvSet.GetInt('User.DrugItemID');\r\n",
" userId =parent.Current.UserId;\r\n",
" cacheSetUserItem = GameDataCacheSet[UserItemPackage]();\r\n",
" cacheSetBringUp = ConfigCacheSet[BringUpInfo]();\r\n",
" actionResult.BringUpInfoList = cacheSetBringUp.FindAll();\r\n",
" userItem = cacheSetUserItem.FindKey(userId.ToString());\r\n",
" if(userItem and userItem.ItemPackage):\r\n",
" itemList = userItem.ItemPackage.FindAll(lambda s:s.ItemID == itemId);\r\n",
" for info in itemList:\r\n",
" actionResult.ItemNum = MathUtils.Addition(actionResult.ItemNum,info.Num);\r\n",
" #ctionResult = ['he'];\r\n",
" #处理结果存储在字典中\r\n",
" return actionResult;\r\n",
"\r\n",
"def buildPacket(writer, urlParam, actionResult):\r\n",
" #输出\r\n",
" writer.PushIntoStack(len(actionResult.BringUpInfoList));\r\n",
" for info in actionResult.BringUpInfoList:\r\n",
" ds = DataStruct();\r\n",
" ds.PushIntoStack(info.BringUpType);\r\n",
" ds.PushIntoStack(info.UseUpType);\r\n",
" ds.PushIntoStack(info.UseUpNum);\r\n",
" writer.PushIntoStack(ds);\r\n",
" writer.PushIntoStack(actionResult.ItemNum);\r\n",
" return True;"
] | [
0.058823529411764705,
0.02127659574468085,
0.025,
0.022222222222222223,
0.020833333333333332,
0.021739130434782608,
0.02127659574468085,
0,
0.045454545454545456,
0.022727272727272728,
0.023809523809523808,
0.024390243902439025,
0.02564102564102564,
0.025,
0.022727272727272728,
0.021739130434782608,
0.025,
0.02127659574468085,
0.020833333333333332,
0,
0,
0,
0,
0,
0,
0.030303030303030304,
0,
0,
0,
0,
0,
0.05555555555555555,
0.03571428571428571,
0.045454545454545456,
0,
0.058823529411764705,
0.023809523809523808,
0,
0.024096385542168676,
0.05714285714285714,
0.018518518518518517,
0.05555555555555555,
0.03225806451612903,
0.03636363636363636,
0.015873015873015872,
0.01639344262295082,
0,
0.02531645569620253,
0,
0.034482758620689655,
0.03571428571428571,
0.058823529411764705,
0.038461538461538464,
0,
0.02,
0.1111111111111111,
0.016129032258064516,
0,
0.03571428571428571,
0.022222222222222223,
0.023255813953488372,
0.023809523809523808,
0.02857142857142857,
0.02040816326530612,
0.125
] | 65 | 0.024533 | false |
# -*- coding: utf-8 -*-
###############################################################################################
#
# MediaPortal for Dreambox OS
#
# Coded by MediaPortal Team (c) 2013-2017
#
# This plugin is open source but it is NOT free software.
#
# This plugin may only be distributed to and executed on hardware which
# is licensed by Dream Property GmbH. This includes commercial distribution.
# In other words:
# It's NOT allowed to distribute any parts of this plugin or its source code in ANY way
# to hardware which is NOT licensed by Dream Property GmbH.
# It's NOT allowed to execute this plugin and its source code or even parts of it in ANY way
# on hardware which is NOT licensed by Dream Property GmbH.
#
# This applies to the source code as a whole as well as to parts of it, unless
# explicitely stated otherwise.
#
# If you want to use or modify the code or parts of it,
# you have to keep OUR license and inform us about the modifications, but it may NOT be
# commercially distributed other than under the conditions noted above.
#
# As an exception regarding execution on hardware, you are permitted to execute this plugin on VU+ hardware
# which is licensed by satco europe GmbH, if the VTi image is used on that hardware.
#
# As an exception regarding modifcations, you are NOT permitted to remove
# any copy protections implemented in this plugin or change them for means of disabling
# or working around the copy protections, unless the change has been explicitly permitted
# by the original authors. Also decompiling and modification of the closed source
# parts is NOT permitted.
#
# Advertising with this plugin is NOT allowed.
# For other uses, permission from the authors is necessary.
#
###############################################################################################
from Plugins.Extensions.MediaPortal.plugin import _
from Plugins.Extensions.MediaPortal.resources.imports import *
from Plugins.Extensions.MediaPortal.resources.youtubeplayer import YoutubePlayer
try:
from Plugins.Extensions.MediaPortal.resources import cfscrape
except:
cfscrapeModule = False
else:
cfscrapeModule = True
try:
import requests
except:
requestsModule = False
else:
requestsModule = True
import urlparse
import thread
hf_cookies = CookieJar()
hf_ck = {}
hf_agent = ''
BASE_URL = 'http://hdfilme.tv'
def hf_grabpage(pageurl):
if requestsModule:
try:
s = requests.session()
url = urlparse.urlparse(pageurl)
headers = {'User-Agent': hf_agent}
page = s.get(url.geturl(), cookies=hf_cookies, headers=headers)
return page.content
except:
pass
class hdfilmeMain(MPScreen):
def __init__(self, session):
MPScreen.__init__(self, session, skin='MP_Plugin')
self["actions"] = ActionMap(["MP_Actions"], {
"0" : self.closeAll,
"ok" : self.keyOK,
"cancel" : self.keyCancel,
"up" : self.keyUp,
"down" : self.keyDown,
"right" : self.keyRight,
"left" : self.keyLeft
}, -1)
self['title'] = Label("HDFilme")
self.streamList = []
self.suchString = ''
self.ml = MenuList([], enableWrapAround=True, content=eListboxPythonMultiContent)
self['liste'] = self.ml
self.keyLocked = False
self.onFirstExecBegin.append(self.layoutFinished)
def layoutFinished(self):
self.keyLocked = True
thread.start_new_thread(self.get_tokens,("GetTokens",))
self['name'].setText(_("Please wait..."))
def get_tokens(self, threadName):
if requestsModule and cfscrapeModule:
printl("Calling thread: %s" % threadName,self,'A')
global hf_ck
global hf_agent
if hf_ck == {} or hf_agent == '':
hf_ck, hf_agent = cfscrape.get_tokens(BASE_URL)
requests.cookies.cookiejar_from_dict(hf_ck, cookiejar=hf_cookies)
else:
s = requests.session()
url = urlparse.urlparse(BASE_URL)
headers = {'user-agent': hf_agent}
page = s.get(url.geturl(), cookies=hf_cookies, headers=headers)
if page.status_code == 503 and page.headers.get("Server") == "cloudflare-nginx":
hf_ck, hf_agent = cfscrape.get_tokens(BASE_URL)
requests.cookies.cookiejar_from_dict(hf_ck, cookiejar=hf_cookies)
self.keyLocked = False
reactor.callFromThread(self.getPage)
else:
reactor.callFromThread(self.hf_error)
def hf_error(self):
message = self.session.open(MessageBoxExt, _("Mandatory depends python-requests and/or python-pyexecjs and nodejs are missing!"), MessageBoxExt.TYPE_ERROR)
self.keyCancel()
def getPage(self):
data = hf_grabpage('%s/movie-movies' % BASE_URL)
self.loadPage(data)
def loadPage(self, data):
self.keyLocked = True
parse = re.search('>Genre</option>(.*?)</select>', data, re.S)
if parse:
cats = re.findall('<option value="(\d+)"\s+>\s+(.*?)\s\s', parse.group(1), re.S)
if cats:
for tagid, name in cats:
self.streamList.append(("%s" % name, "%s/movie-movies?cat=%s&country=&order_f=last_update&order_d=desc&page=" % (BASE_URL, str(tagid))))
self.streamList.sort(key=lambda t : t[0].lower())
self.streamList.insert(0, ("Serien","%s/movie-series?page=" % BASE_URL))
self.streamList.insert(0, ("Kinofilme","%s/movie-movies?page=" % BASE_URL))
self.streamList.insert(0, ("--- Search ---", "search"))
self.ml.setList(map(self._defaultlistcenter, self.streamList))
self.keyLocked = False
self.showInfos()
def keyOK(self):
exist = self['liste'].getCurrent()
if self.keyLocked or exist == None:
return
genre = self['liste'].getCurrent()[0][0]
url = self['liste'].getCurrent()[0][1]
if genre == "--- Search ---":
self.suchen(auto_text_init=True)
else:
self.session.open(hdfilmeParsing, genre, url)
def SuchenCallback(self, callback = None, entry = None):
if callback is not None and len(callback):
self.suchString = callback.strip()
url = '%s/movie-search?key=%s&page_film=' % (BASE_URL, urllib.quote_plus(self.suchString))
genre = self['liste'].getCurrent()[0][0]
self.session.open(hdfilmeParsing, genre, url)
class hdfilmeParsing(MPScreen, ThumbsHelper):
def __init__(self, session, genre, url):
self.genre = genre
self.url = url
MPScreen.__init__(self, session, skin='MP_PluginDescr')
ThumbsHelper.__init__(self)
self["actions"] = ActionMap(["MP_Actions"], {
"0" : self.closeAll,
"5" : self.keyShowThumb,
"ok" : self.keyOK,
"cancel": self.keyCancel,
"up" : self.keyUp,
"down" : self.keyDown,
"right" : self.keyRight,
"left" : self.keyLeft,
"nextBouquet" : self.keyPageUp,
"prevBouquet" : self.keyPageDown
}, -1)
self['title'] = Label("HDFilme")
self['Page'] = Label(_("Page:"))
self['ContentTitle'] = Label(genre)
self.streamList = []
self.ml = MenuList([], enableWrapAround=True, content=eListboxPythonMultiContent)
self['liste'] = self.ml
self.page = 1
self.lastpage = 1
self.keyLocked = True
self.onLayoutFinish.append(self.loadPage)
def loadPage(self):
self.streamList = []
url = self.url+str(self.page)
data = hf_grabpage(url)
self.parseData(data)
def parseData(self, data):
self.getLastPage(data, '', '</i>\s*Seite.*?/\s*(\d+)')
movies = re.findall('data-popover="movie-data.*?">\s*<a href="(.*?)">\s*<img.*?src="(.*?)".*?alt="(.*?)"', data, re.I)
if movies:
for url,bild,title in movies:
self.streamList.append((decodeHtml(title),url,bild))
if len(self.streamList) == 0:
self.streamList.append((_('No movies found!'), None, None))
else:
self.keyLocked = False
self.ml.setList(map(self._defaultlistleft, self.streamList))
self.ml.moveToIndex(0)
self.th_ThumbsQuery(self.streamList, 0, 1, 2, None, None, self.page, self.lastpage, agent=hf_agent, cookies=hf_ck)
self.showInfos()
def showInfos(self):
exist = self['liste'].getCurrent()
if self.keyLocked or exist == None:
return
title = self['liste'].getCurrent()[0][0]
self.coverurl = self['liste'].getCurrent()[0][2]
CoverHelper(self['coverArt']).getCover(self.coverurl, agent=hf_agent, cookieJar=hf_cookies)
self['name'].setText(title)
def keyOK(self):
exist = self['liste'].getCurrent()
if self.keyLocked or exist == None:
return
title = self['liste'].getCurrent()[0][0]
url = self['liste'].getCurrent()[0][1]
cover = self['liste'].getCurrent()[0][2]
self.session.open(hdfilmeStreams, title, url, cover)
class hdfilmeStreams(MPScreen):
new_video_formats = (
{
'1080' : 4, #MP4 1080p
'720' : 3, #MP4 720p
'480' : 2, #MP4 480p
'360' : 1, #MP4 360p
},
{
'1080' : 4, #MP4 1080p
'720' : 3, #MP4 720p
'480' : 1, #MP4 480p
'360' : 2, #MP4 360p
},
{
'1080' : 1, #MP4 1080p
'720' : 2, #MP4 720p
'480' : 3, #MP4 480p
'360' : 4, #MP4 360p
}
)
def __init__(self, session, title, url, cover):
self.movietitle = title
self.url = url
self.cover = cover
MPScreen.__init__(self, session, skin='MP_PluginDescr')
self["actions"] = ActionMap(["MP_Actions"], {
"0" : self.closeAll,
"ok" : self.keyOK,
"cancel": self.keyCancel,
"green" : self.keyTrailer,
}, -1)
self['title'] = Label("HDFilme")
self['leftContentTitle'] = Label(_("Stream Selection"))
self['ContentTitle'] = Label(_("Stream Selection"))
self['name'] = Label(self.movietitle)
self.trailer = None
self.streamList = []
self.ml = MenuList([], enableWrapAround=True, content=eListboxPythonMultiContent)
self['liste'] = self.ml
self.keyLocked = True
self.onLayoutFinish.append(self.loadPage)
def loadPage(self):
data = hf_grabpage(self.url)
self.parseData(data)
def parseData(self, data):
m = re.search('<a class="btn.*?href="(.*?)">Trailer\s{0,1}[</a>|<i]', data)
if m:
self.trailer = m.group(1)
self['F2'].setText('Trailer')
servers = re.findall('<a.+?href="#(.*?)"\srole="tab" data-toggle="tab"><b>(.*?)</b></a>', data, re.S)
if servers:
for tab, server in servers:
m = re.search('<div\srole="tabpanel"\sclass="tab-pane.*?"\sid="%s">(.*?)</div>' % tab, data, re.S)
if m:
streams = re.findall('_episode="(\d+)" _link(?:=""|) _sub(?:=""|)\s+href="(.*?)"', m.group(1), re.S)
if streams:
folge = 'Folge ' if len(streams) > 1 and len(servers) == 1 else server.strip()
for (epi_num, link) in streams:
if not folge[0] == 'F': epi_num = ''
self.streamList.append((folge+epi_num, link.replace('&','&'), epi_num))
if not len(self.streamList):
streams = re.findall('_episode=".*?" _link(?:=""|) _sub(?:=""|)\s+href="(.*?)">', data, re.S)
if streams:
for link in streams:
epi_num = re.findall('episode=(\d+)(?:\&|)', link)
if epi_num:
epi_num = epi_num[0]
if re.search('staffel ', self.movietitle, re.I):
folge = 'Folge '
_epi_num = epi_num.strip(' \t\n\r')
else:
folge = 'Stream '
_epi_num = ''
self.streamList.append((folge+epi_num, link.replace('&','&'), _epi_num))
if len(self.streamList) == 0:
self.streamList.append((_('No supported streams found!'), None, None))
else:
self.keyLocked = False
self.ml.setList(map(self._defaultlisthoster, self.streamList))
CoverHelper(self['coverArt']).getCover(self.cover, agent=hf_agent, cookieJar=hf_cookies)
def keyOK(self):
exist = self['liste'].getCurrent()
if self.keyLocked or exist == None:
return
link = self['liste'].getCurrent()[0][1]
data = hf_grabpage(link)
self.getStreamUrl(data)
def makeTitle(self):
episode = self['liste'].getCurrent()[0][2]
if episode:
title = "%s - Folge %s" % (self.movietitle, episode)
else:
title = self.movietitle
return title
def getStreamUrl(self, data):
parse = re.findall('initPlayer\(\s+\"(\d+)\",\s+\"(\d+)\",', data, re.S)
if parse:
url = BASE_URL + "/movie/getlink/"+str(parse[0][0])+"/"+str(parse[0][1])
data = hf_grabpage(url)
self.extractStreams(data)
def extractStreams(self, data, videoPrio=2):
try:
import base64
data = base64.b64decode(data)
except:
self.stream_not_found()
try:
d = json.loads(data)
links = {}
if d['playinfo']:
for stream in d['playinfo']:
key = str(stream.get('label'))
if key:
key = key.strip('p')
if self.new_video_formats[videoPrio].has_key(key):
links[self.new_video_formats[videoPrio][key]] = stream.get('file')
else:
print 'no format prio:', key
try:
video_url = links[sorted(links.iterkeys())[0]]
except (KeyError,IndexError):
self.stream_not_found()
else:
self.play(str(video_url))
else:
self.stream_not_found()
except:
try:
d = json.loads(data)
links = {}
if d['playinfo']:
stream = d['playinfo']
self.play(str(stream))
else:
self.stream_not_found()
except:
self.stream_not_found()
def play(self, url):
title = self.makeTitle()
self.session.open(SimplePlayer, [(title, url, self.cover)], showPlaylist=False, ltype='hdfilme', cover=True)
def stream_not_found(self):
self.session.open(MessageBoxExt, _("Sorry, can't extract a stream url."), MessageBoxExt.TYPE_INFO, timeout=5)
def keyTrailer(self):
if self.trailer:
data = hf_grabpage(self.trailer)
self.playTrailer(data)
def playTrailer(self, data):
from Plugins.Extensions.MediaPortal.resources.youtubeplayer import YoutubePlayer
m = re.search('//www.youtube\.com/(embed|v|p)/(.*?)(\?|" |&)', data)
if m:
trailerId = m.group(2)
title = self.movietitle
self.session.open(
YoutubePlayer,
[(title+' - Trailer', trailerId, self.cover)],
playAll = False,
showPlaylist=False,
showCover=True
)
else:
self.stream_not_found() | [
"# -*- coding: utf-8 -*-\n",
"###############################################################################################\n",
"#\n",
"# MediaPortal for Dreambox OS\n",
"#\n",
"# Coded by MediaPortal Team (c) 2013-2017\n",
"#\n",
"# This plugin is open source but it is NOT free software.\n",
"#\n",
"# This plugin may only be distributed to and executed on hardware which\n",
"# is licensed by Dream Property GmbH. This includes commercial distribution.\n",
"# In other words:\n",
"# It's NOT allowed to distribute any parts of this plugin or its source code in ANY way\n",
"# to hardware which is NOT licensed by Dream Property GmbH.\n",
"# It's NOT allowed to execute this plugin and its source code or even parts of it in ANY way\n",
"# on hardware which is NOT licensed by Dream Property GmbH.\n",
"#\n",
"# This applies to the source code as a whole as well as to parts of it, unless\n",
"# explicitely stated otherwise.\n",
"#\n",
"# If you want to use or modify the code or parts of it,\n",
"# you have to keep OUR license and inform us about the modifications, but it may NOT be\n",
"# commercially distributed other than under the conditions noted above.\n",
"#\n",
"# As an exception regarding execution on hardware, you are permitted to execute this plugin on VU+ hardware\n",
"# which is licensed by satco europe GmbH, if the VTi image is used on that hardware.\n",
"#\n",
"# As an exception regarding modifcations, you are NOT permitted to remove\n",
"# any copy protections implemented in this plugin or change them for means of disabling\n",
"# or working around the copy protections, unless the change has been explicitly permitted\n",
"# by the original authors. Also decompiling and modification of the closed source\n",
"# parts is NOT permitted.\n",
"#\n",
"# Advertising with this plugin is NOT allowed.\n",
"# For other uses, permission from the authors is necessary.\n",
"#\n",
"###############################################################################################\n",
"\n",
"from Plugins.Extensions.MediaPortal.plugin import _\n",
"from Plugins.Extensions.MediaPortal.resources.imports import *\n",
"from Plugins.Extensions.MediaPortal.resources.youtubeplayer import YoutubePlayer\n",
"\n",
"try:\n",
"\tfrom Plugins.Extensions.MediaPortal.resources import cfscrape\n",
"except:\n",
"\tcfscrapeModule = False\n",
"else:\n",
"\tcfscrapeModule = True\n",
"\n",
"try:\n",
"\timport requests\n",
"except:\n",
"\trequestsModule = False\n",
"else:\n",
"\trequestsModule = True\n",
"\n",
"import urlparse\n",
"import thread\n",
"\n",
"hf_cookies = CookieJar()\n",
"hf_ck = {}\n",
"hf_agent = ''\n",
"BASE_URL = 'http://hdfilme.tv'\n",
"\n",
"def hf_grabpage(pageurl):\n",
"\tif requestsModule:\n",
"\t\ttry:\n",
"\t\t\ts = requests.session()\n",
"\t\t\turl = urlparse.urlparse(pageurl)\n",
"\t\t\theaders = {'User-Agent': hf_agent}\n",
"\t\t\tpage = s.get(url.geturl(), cookies=hf_cookies, headers=headers)\n",
"\t\t\treturn page.content\n",
"\t\texcept:\n",
"\t\t\tpass\n",
"\n",
"class hdfilmeMain(MPScreen):\n",
"\n",
"\tdef __init__(self, session):\n",
"\t\tMPScreen.__init__(self, session, skin='MP_Plugin')\n",
"\n",
"\t\tself[\"actions\"] = ActionMap([\"MP_Actions\"], {\n",
"\t\t\t\"0\" : self.closeAll,\n",
"\t\t\t\"ok\" : self.keyOK,\n",
"\t\t\t\"cancel\" : self.keyCancel,\n",
"\t\t\t\"up\" : self.keyUp,\n",
"\t\t\t\"down\" : self.keyDown,\n",
"\t\t\t\"right\" : self.keyRight,\n",
"\t\t\t\"left\" : self.keyLeft\n",
"\t\t}, -1)\n",
"\n",
"\t\tself['title'] = Label(\"HDFilme\")\n",
"\n",
"\t\tself.streamList = []\n",
"\t\tself.suchString = ''\n",
"\t\tself.ml = MenuList([], enableWrapAround=True, content=eListboxPythonMultiContent)\n",
"\t\tself['liste'] = self.ml\n",
"\n",
"\t\tself.keyLocked = False\n",
"\t\tself.onFirstExecBegin.append(self.layoutFinished)\n",
"\n",
"\tdef layoutFinished(self):\n",
"\t\tself.keyLocked = True\n",
"\t\tthread.start_new_thread(self.get_tokens,(\"GetTokens\",))\n",
"\t\tself['name'].setText(_(\"Please wait...\"))\n",
"\n",
"\tdef get_tokens(self, threadName):\n",
"\t\tif requestsModule and cfscrapeModule:\n",
"\t\t\tprintl(\"Calling thread: %s\" % threadName,self,'A')\n",
"\t\t\tglobal hf_ck\n",
"\t\t\tglobal hf_agent\n",
"\t\t\tif hf_ck == {} or hf_agent == '':\n",
"\t\t\t\thf_ck, hf_agent = cfscrape.get_tokens(BASE_URL)\n",
"\t\t\t\trequests.cookies.cookiejar_from_dict(hf_ck, cookiejar=hf_cookies)\n",
"\t\t\telse:\n",
"\t\t\t\ts = requests.session()\n",
"\t\t\t\turl = urlparse.urlparse(BASE_URL)\n",
"\t\t\t\theaders = {'user-agent': hf_agent}\n",
"\t\t\t\tpage = s.get(url.geturl(), cookies=hf_cookies, headers=headers)\n",
"\t\t\t\tif page.status_code == 503 and page.headers.get(\"Server\") == \"cloudflare-nginx\":\n",
"\t\t\t\t\thf_ck, hf_agent = cfscrape.get_tokens(BASE_URL)\n",
"\t\t\t\t\trequests.cookies.cookiejar_from_dict(hf_ck, cookiejar=hf_cookies)\n",
"\t\t\tself.keyLocked = False\n",
"\t\t\treactor.callFromThread(self.getPage)\n",
"\t\telse:\n",
"\t\t\treactor.callFromThread(self.hf_error)\n",
"\n",
"\tdef hf_error(self):\n",
"\t\tmessage = self.session.open(MessageBoxExt, _(\"Mandatory depends python-requests and/or python-pyexecjs and nodejs are missing!\"), MessageBoxExt.TYPE_ERROR)\n",
"\t\tself.keyCancel()\n",
"\n",
"\tdef getPage(self):\n",
"\t\tdata = hf_grabpage('%s/movie-movies' % BASE_URL)\n",
"\t\tself.loadPage(data)\n",
"\n",
"\tdef loadPage(self, data):\n",
"\t\tself.keyLocked = True\n",
"\t\tparse = re.search('>Genre</option>(.*?)</select>', data, re.S)\n",
"\t\tif parse:\n",
"\t\t\tcats = re.findall('<option value=\"(\\d+)\"\\s+>\\s+(.*?)\\s\\s', parse.group(1), re.S)\n",
"\t\t\tif cats:\n",
"\t\t\t\tfor tagid, name in cats:\n",
"\t\t\t\t\tself.streamList.append((\"%s\" % name, \"%s/movie-movies?cat=%s&country=&order_f=last_update&order_d=desc&page=\" % (BASE_URL, str(tagid))))\n",
"\t\tself.streamList.sort(key=lambda t : t[0].lower())\n",
"\t\tself.streamList.insert(0, (\"Serien\",\"%s/movie-series?page=\" % BASE_URL))\n",
"\t\tself.streamList.insert(0, (\"Kinofilme\",\"%s/movie-movies?page=\" % BASE_URL))\n",
"\t\tself.streamList.insert(0, (\"--- Search ---\", \"search\"))\n",
"\t\tself.ml.setList(map(self._defaultlistcenter, self.streamList))\n",
"\t\tself.keyLocked = False\n",
"\t\tself.showInfos()\n",
"\n",
"\tdef keyOK(self):\n",
"\t\texist = self['liste'].getCurrent()\n",
"\t\tif self.keyLocked or exist == None:\n",
"\t\t\treturn\n",
"\t\tgenre = self['liste'].getCurrent()[0][0]\n",
"\t\turl = self['liste'].getCurrent()[0][1]\n",
"\t\tif genre == \"--- Search ---\":\n",
"\t\t\tself.suchen(auto_text_init=True)\n",
"\t\telse:\n",
"\t\t\tself.session.open(hdfilmeParsing, genre, url)\n",
"\n",
"\tdef SuchenCallback(self, callback = None, entry = None):\n",
"\t\tif callback is not None and len(callback):\n",
"\t\t\tself.suchString = callback.strip()\n",
"\t\t\turl = '%s/movie-search?key=%s&page_film=' % (BASE_URL, urllib.quote_plus(self.suchString))\n",
"\t\t\tgenre = self['liste'].getCurrent()[0][0]\n",
"\t\t\tself.session.open(hdfilmeParsing, genre, url)\n",
"\n",
"class hdfilmeParsing(MPScreen, ThumbsHelper):\n",
"\n",
"\tdef __init__(self, session, genre, url):\n",
"\t\tself.genre = genre\n",
"\t\tself.url = url\n",
"\t\tMPScreen.__init__(self, session, skin='MP_PluginDescr')\n",
"\t\tThumbsHelper.__init__(self)\n",
"\n",
"\t\tself[\"actions\"] = ActionMap([\"MP_Actions\"], {\n",
"\t\t\t\"0\" : self.closeAll,\n",
"\t\t\t\"5\" : self.keyShowThumb,\n",
"\t\t\t\"ok\" : self.keyOK,\n",
"\t\t\t\"cancel\": self.keyCancel,\n",
"\t\t\t\"up\" : self.keyUp,\n",
"\t\t\t\"down\" : self.keyDown,\n",
"\t\t\t\"right\" : self.keyRight,\n",
"\t\t\t\"left\" : self.keyLeft,\n",
"\t\t\t\"nextBouquet\" : self.keyPageUp,\n",
"\t\t\t\"prevBouquet\" : self.keyPageDown\n",
"\t\t}, -1)\n",
"\n",
"\t\tself['title'] = Label(\"HDFilme\")\n",
"\t\tself['Page'] = Label(_(\"Page:\"))\n",
"\t\tself['ContentTitle'] = Label(genre)\n",
"\n",
"\t\tself.streamList = []\n",
"\t\tself.ml = MenuList([], enableWrapAround=True, content=eListboxPythonMultiContent)\n",
"\t\tself['liste'] = self.ml\n",
"\n",
"\t\tself.page = 1\n",
"\t\tself.lastpage = 1\n",
"\t\tself.keyLocked = True\n",
"\t\tself.onLayoutFinish.append(self.loadPage)\n",
"\n",
"\tdef loadPage(self):\n",
"\t\tself.streamList = []\n",
"\t\turl = self.url+str(self.page)\n",
"\t\tdata = hf_grabpage(url)\n",
"\t\tself.parseData(data)\n",
"\n",
"\tdef parseData(self, data):\n",
"\t\tself.getLastPage(data, '', '</i>\\s*Seite.*?/\\s*(\\d+)')\n",
"\t\tmovies = re.findall('data-popover=\"movie-data.*?\">\\s*<a href=\"(.*?)\">\\s*<img.*?src=\"(.*?)\".*?alt=\"(.*?)\"', data, re.I)\n",
"\t\tif movies:\n",
"\t\t\tfor url,bild,title in movies:\n",
"\t\t\t\tself.streamList.append((decodeHtml(title),url,bild))\n",
"\t\tif len(self.streamList) == 0:\n",
"\t\t\tself.streamList.append((_('No movies found!'), None, None))\n",
"\t\telse:\n",
"\t\t\tself.keyLocked = False\n",
"\t\tself.ml.setList(map(self._defaultlistleft, self.streamList))\n",
"\t\tself.ml.moveToIndex(0)\n",
"\t\tself.th_ThumbsQuery(self.streamList, 0, 1, 2, None, None, self.page, self.lastpage, agent=hf_agent, cookies=hf_ck)\n",
"\t\tself.showInfos()\n",
"\n",
"\tdef showInfos(self):\n",
"\t\texist = self['liste'].getCurrent()\n",
"\t\tif self.keyLocked or exist == None:\n",
"\t\t\treturn\n",
"\t\ttitle = self['liste'].getCurrent()[0][0]\n",
"\t\tself.coverurl = self['liste'].getCurrent()[0][2]\n",
"\t\tCoverHelper(self['coverArt']).getCover(self.coverurl, agent=hf_agent, cookieJar=hf_cookies)\n",
"\t\tself['name'].setText(title)\n",
"\n",
"\tdef keyOK(self):\n",
"\t\texist = self['liste'].getCurrent()\n",
"\t\tif self.keyLocked or exist == None:\n",
"\t\t\treturn\n",
"\t\ttitle = self['liste'].getCurrent()[0][0]\n",
"\t\turl = self['liste'].getCurrent()[0][1]\n",
"\t\tcover = self['liste'].getCurrent()[0][2]\n",
"\t\tself.session.open(hdfilmeStreams, title, url, cover)\n",
"\n",
"class hdfilmeStreams(MPScreen):\n",
"\tnew_video_formats = (\n",
"\t\t\t{\n",
"\t\t\t\t'1080' : 4, #MP4 1080p\n",
"\t\t\t\t'720' : 3, #MP4 720p\n",
"\t\t\t\t'480' : 2, #MP4 480p\n",
"\t\t\t\t'360' : 1, #MP4 360p\n",
"\t\t\t},\n",
"\t\t\t{\n",
"\t\t\t\t'1080' : 4, #MP4 1080p\n",
"\t\t\t\t'720' : 3, #MP4 720p\n",
"\t\t\t\t'480' : 1, #MP4 480p\n",
"\t\t\t\t'360' : 2, #MP4 360p\n",
"\t\t\t},\n",
"\t\t\t{\n",
"\t\t\t\t'1080' : 1, #MP4 1080p\n",
"\t\t\t\t'720' : 2, #MP4 720p\n",
"\t\t\t\t'480' : 3, #MP4 480p\n",
"\t\t\t\t'360' : 4, #MP4 360p\n",
"\t\t\t}\n",
"\t\t)\n",
"\n",
"\tdef __init__(self, session, title, url, cover):\n",
"\t\tself.movietitle = title\n",
"\t\tself.url = url\n",
"\t\tself.cover = cover\n",
"\t\tMPScreen.__init__(self, session, skin='MP_PluginDescr')\n",
"\n",
"\t\tself[\"actions\"] = ActionMap([\"MP_Actions\"], {\n",
"\t\t\t\"0\" : self.closeAll,\n",
"\t\t\t\"ok\" : self.keyOK,\n",
"\t\t\t\"cancel\": self.keyCancel,\n",
"\t\t\t\"green\" : self.keyTrailer,\n",
"\t\t}, -1)\n",
"\n",
"\t\tself['title'] = Label(\"HDFilme\")\n",
"\t\tself['leftContentTitle'] = Label(_(\"Stream Selection\"))\n",
"\t\tself['ContentTitle'] = Label(_(\"Stream Selection\"))\n",
"\t\tself['name'] = Label(self.movietitle)\n",
"\n",
"\t\tself.trailer = None\n",
"\t\tself.streamList = []\n",
"\t\tself.ml = MenuList([], enableWrapAround=True, content=eListboxPythonMultiContent)\n",
"\t\tself['liste'] = self.ml\n",
"\t\tself.keyLocked = True\n",
"\t\tself.onLayoutFinish.append(self.loadPage)\n",
"\n",
"\tdef loadPage(self):\n",
"\t\tdata = hf_grabpage(self.url)\n",
"\t\tself.parseData(data)\n",
"\n",
"\tdef parseData(self, data):\n",
"\t\tm = re.search('<a class=\"btn.*?href=\"(.*?)\">Trailer\\s{0,1}[</a>|<i]', data)\n",
"\t\tif m:\n",
"\t\t\tself.trailer = m.group(1)\n",
"\t\t\tself['F2'].setText('Trailer')\n",
"\n",
"\t\tservers = re.findall('<a.+?href=\"#(.*?)\"\\srole=\"tab\" data-toggle=\"tab\"><b>(.*?)</b></a>', data, re.S)\n",
"\t\tif servers:\n",
"\t\t\tfor tab, server in servers:\n",
"\t\t\t\tm = re.search('<div\\srole=\"tabpanel\"\\sclass=\"tab-pane.*?\"\\sid=\"%s\">(.*?)</div>' % tab, data, re.S)\n",
"\t\t\t\tif m:\n",
"\t\t\t\t\tstreams = re.findall('_episode=\"(\\d+)\" _link(?:=\"\"|) _sub(?:=\"\"|)\\s+href=\"(.*?)\"', m.group(1), re.S)\n",
"\t\t\t\t\tif streams:\n",
"\t\t\t\t\t\tfolge = 'Folge ' if len(streams) > 1 and len(servers) == 1 else server.strip()\n",
"\t\t\t\t\t\tfor (epi_num, link) in streams:\n",
"\t\t\t\t\t\t\tif not folge[0] == 'F': epi_num = ''\n",
"\t\t\t\t\t\t\tself.streamList.append((folge+epi_num, link.replace('&','&'), epi_num))\n",
"\t\tif not len(self.streamList):\n",
"\t\t\tstreams = re.findall('_episode=\".*?\" _link(?:=\"\"|) _sub(?:=\"\"|)\\s+href=\"(.*?)\">', data, re.S)\n",
"\t\t\tif streams:\n",
"\t\t\t\tfor link in streams:\n",
"\t\t\t\t\tepi_num = re.findall('episode=(\\d+)(?:\\&|)', link)\n",
"\t\t\t\t\tif epi_num:\n",
"\t\t\t\t\t\tepi_num = epi_num[0]\n",
"\t\t\t\t\t\tif re.search('staffel ', self.movietitle, re.I):\n",
"\t\t\t\t\t\t\tfolge = 'Folge '\n",
"\t\t\t\t\t\t\t_epi_num = epi_num.strip(' \\t\\n\\r')\n",
"\t\t\t\t\t\telse:\n",
"\t\t\t\t\t\t\tfolge = 'Stream '\n",
"\t\t\t\t\t\t\t_epi_num = ''\n",
"\t\t\t\t\t\tself.streamList.append((folge+epi_num, link.replace('&','&'), _epi_num))\n",
"\n",
"\t\tif len(self.streamList) == 0:\n",
"\t\t\tself.streamList.append((_('No supported streams found!'), None, None))\n",
"\t\telse:\n",
"\t\t\tself.keyLocked = False\n",
"\t\tself.ml.setList(map(self._defaultlisthoster, self.streamList))\n",
"\t\tCoverHelper(self['coverArt']).getCover(self.cover, agent=hf_agent, cookieJar=hf_cookies)\n",
"\n",
"\tdef keyOK(self):\n",
"\t\texist = self['liste'].getCurrent()\n",
"\t\tif self.keyLocked or exist == None:\n",
"\t\t\treturn\n",
"\t\tlink = self['liste'].getCurrent()[0][1]\n",
"\t\tdata = hf_grabpage(link)\n",
"\t\tself.getStreamUrl(data)\n",
"\n",
"\tdef makeTitle(self):\n",
"\t\tepisode = self['liste'].getCurrent()[0][2]\n",
"\t\tif episode:\n",
"\t\t\ttitle = \"%s - Folge %s\" % (self.movietitle, episode)\n",
"\t\telse:\n",
"\t\t\ttitle = self.movietitle\n",
"\t\treturn title\n",
"\n",
"\tdef getStreamUrl(self, data):\n",
"\t\tparse = re.findall('initPlayer\\(\\s+\\\"(\\d+)\\\",\\s+\\\"(\\d+)\\\",', data, re.S)\n",
"\t\tif parse:\n",
"\t\t\turl = BASE_URL + \"/movie/getlink/\"+str(parse[0][0])+\"/\"+str(parse[0][1])\n",
"\t\t\tdata = hf_grabpage(url)\n",
"\t\t\tself.extractStreams(data)\n",
"\n",
"\tdef extractStreams(self, data, videoPrio=2):\n",
"\t\ttry:\n",
"\t\t\timport base64\n",
"\t\t\tdata = base64.b64decode(data)\n",
"\t\texcept:\n",
"\t\t\tself.stream_not_found()\n",
"\t\ttry:\n",
"\t\t\td = json.loads(data)\n",
"\t\t\tlinks = {}\n",
"\t\t\tif d['playinfo']:\n",
"\t\t\t\tfor stream in d['playinfo']:\n",
"\t\t\t\t\tkey = str(stream.get('label'))\n",
"\t\t\t\t\tif key:\n",
"\t\t\t\t\t\tkey = key.strip('p')\n",
"\t\t\t\t\t\tif self.new_video_formats[videoPrio].has_key(key):\n",
"\t\t\t\t\t\t\tlinks[self.new_video_formats[videoPrio][key]] = stream.get('file')\n",
"\t\t\t\t\t\telse:\n",
"\t\t\t\t\t\t\tprint 'no format prio:', key\n",
"\t\t\t\ttry:\n",
"\t\t\t\t\tvideo_url = links[sorted(links.iterkeys())[0]]\n",
"\t\t\t\texcept (KeyError,IndexError):\n",
"\t\t\t\t\tself.stream_not_found()\n",
"\t\t\t\telse:\n",
"\t\t\t\t\tself.play(str(video_url))\n",
"\t\t\telse:\n",
"\t\t\t\tself.stream_not_found()\n",
"\t\texcept:\n",
"\t\t\ttry:\n",
"\t\t\t\td = json.loads(data)\n",
"\t\t\t\tlinks = {}\n",
"\t\t\t\tif d['playinfo']:\n",
"\t\t\t\t\tstream = d['playinfo']\n",
"\t\t\t\t\tself.play(str(stream))\n",
"\t\t\t\telse:\n",
"\t\t\t\t\tself.stream_not_found()\n",
"\t\t\texcept:\n",
"\t\t\t\tself.stream_not_found()\n",
"\n",
"\tdef play(self, url):\n",
"\t\ttitle = self.makeTitle()\n",
"\t\tself.session.open(SimplePlayer, [(title, url, self.cover)], showPlaylist=False, ltype='hdfilme', cover=True)\n",
"\n",
"\tdef stream_not_found(self):\n",
"\t\tself.session.open(MessageBoxExt, _(\"Sorry, can't extract a stream url.\"), MessageBoxExt.TYPE_INFO, timeout=5)\n",
"\n",
"\tdef keyTrailer(self):\n",
"\t\tif self.trailer:\n",
"\t\t\tdata = hf_grabpage(self.trailer)\n",
"\t\t\tself.playTrailer(data)\n",
"\n",
"\tdef playTrailer(self, data):\n",
"\t\tfrom Plugins.Extensions.MediaPortal.resources.youtubeplayer import YoutubePlayer\n",
"\t\tm = re.search('//www.youtube\\.com/(embed|v|p)/(.*?)(\\?|\" |&)', data)\n",
"\t\tif m:\n",
"\t\t\ttrailerId = m.group(2)\n",
"\t\t\ttitle = self.movietitle\n",
"\t\t\tself.session.open(\n",
"\t\t\t\tYoutubePlayer,\n",
"\t\t\t\t[(title+' - Trailer', trailerId, self.cover)],\n",
"\t\t\t\tplayAll = False,\n",
"\t\t\t\tshowPlaylist=False,\n",
"\t\t\t\tshowCover=True\n",
"\t\t\t\t)\n",
"\t\telse:\n",
"\t\t\tself.stream_not_found()"
] | [
0,
0.010416666666666666,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011235955056179775,
0,
0.010638297872340425,
0,
0,
0,
0,
0,
0,
0.011235955056179775,
0,
0,
0.009174311926605505,
0.011627906976744186,
0,
0,
0.011235955056179775,
0.01098901098901099,
0.012048192771084338,
0,
0,
0,
0,
0,
0.010416666666666666,
0,
0,
0,
0.012345679012345678,
0,
0,
0.015873015873015872,
0.125,
0.041666666666666664,
0,
0.043478260869565216,
0,
0,
0.058823529411764705,
0.125,
0.041666666666666664,
0,
0.043478260869565216,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.038461538461538464,
0.05,
0.14285714285714285,
0.038461538461538464,
0.027777777777777776,
0.02631578947368421,
0.014925373134328358,
0.043478260869565216,
0.2,
0.125,
0,
0.034482758620689655,
0,
0.03333333333333333,
0.018867924528301886,
0,
0.020833333333333332,
0.08333333333333333,
0.09090909090909091,
0.06666666666666667,
0.09090909090909091,
0.07692307692307693,
0.07142857142857142,
0.08,
0.1111111111111111,
0,
0.02857142857142857,
0,
0.043478260869565216,
0.043478260869565216,
0.023809523809523808,
0.038461538461538464,
0,
0.04,
0.019230769230769232,
0,
0.037037037037037035,
0.041666666666666664,
0.034482758620689655,
0.022727272727272728,
0,
0.02857142857142857,
0.025,
0.05555555555555555,
0.0625,
0.05263157894736842,
0.02702702702702703,
0.019230769230769232,
0.014285714285714285,
0.1111111111111111,
0.037037037037037035,
0.02631578947368421,
0.02564102564102564,
0.014705882352941176,
0.023529411764705882,
0.018867924528301886,
0.014084507042253521,
0.038461538461538464,
0.025,
0.125,
0.024390243902439025,
0,
0.047619047619047616,
0.012658227848101266,
0.05263157894736842,
0,
0.05,
0.0196078431372549,
0.045454545454545456,
0,
0.037037037037037035,
0.041666666666666664,
0.015384615384615385,
0.08333333333333333,
0.08333333333333333,
0.08333333333333333,
0.034482758620689655,
0.014084507042253521,
0.038461538461538464,
0.02666666666666667,
0.02564102564102564,
0.017241379310344827,
0.015384615384615385,
0.04,
0.05263157894736842,
0,
0.05555555555555555,
0.02702702702702703,
0.05263157894736842,
0.1,
0.023255813953488372,
0.024390243902439025,
0.03125,
0.027777777777777776,
0.125,
0.02040816326530612,
0,
0.08620689655172414,
0.022222222222222223,
0.02631578947368421,
0.02127659574468085,
0.022727272727272728,
0.02040816326530612,
0,
0.021739130434782608,
0,
0.023809523809523808,
0.047619047619047616,
0.058823529411764705,
0.017241379310344827,
0.03333333333333333,
0,
0.020833333333333332,
0.08333333333333333,
0.07142857142857142,
0.09090909090909091,
0.034482758620689655,
0.09090909090909091,
0.07692307692307693,
0.07142857142857142,
0.07692307692307693,
0.05714285714285714,
0.05555555555555555,
0.1111111111111111,
0,
0.02857142857142857,
0.02857142857142857,
0.02631578947368421,
0,
0.043478260869565216,
0.023809523809523808,
0.038461538461538464,
0,
0.0625,
0.05,
0.041666666666666664,
0.022727272727272728,
0,
0.047619047619047616,
0.043478260869565216,
0.03125,
0.038461538461538464,
0.043478260869565216,
0,
0.03571428571428571,
0.07017543859649122,
0.03305785123966942,
0.07692307692307693,
0.09090909090909091,
0.05263157894736842,
0.03125,
0.015873015873015872,
0.125,
0.038461538461538464,
0.015873015873015872,
0.04,
0.017094017094017096,
0.05263157894736842,
0,
0.045454545454545456,
0.02702702702702703,
0.05263157894736842,
0.1,
0.023255813953488372,
0.0196078431372549,
0.02127659574468085,
0.03333333333333333,
0,
0.05555555555555555,
0.02702702702702703,
0.05263157894736842,
0.1,
0.023255813953488372,
0.024390243902439025,
0.023255813953488372,
0.01818181818181818,
0,
0.03125,
0.043478260869565216,
0.2,
0.14814814814814814,
0.16,
0.16,
0.16,
0.16666666666666666,
0.2,
0.14814814814814814,
0.16,
0.16,
0.16,
0.16666666666666666,
0.2,
0.14814814814814814,
0.16,
0.16,
0.16,
0.2,
0.25,
0,
0.02040816326530612,
0.038461538461538464,
0.058823529411764705,
0.047619047619047616,
0.017241379310344827,
0,
0.020833333333333332,
0.08333333333333333,
0.09090909090909091,
0.034482758620689655,
0.06666666666666667,
0.1111111111111111,
0,
0.02857142857142857,
0.017241379310344827,
0.018518518518518517,
0.025,
0,
0.045454545454545456,
0.043478260869565216,
0.023809523809523808,
0.038461538461538464,
0.041666666666666664,
0.022727272727272728,
0,
0.047619047619047616,
0.03225806451612903,
0.043478260869565216,
0,
0.03571428571428571,
0.02564102564102564,
0.125,
0.034482758620689655,
0.030303030303030304,
0,
0.028846153846153848,
0.07142857142857142,
0.03225806451612903,
0.04854368932038835,
0.1,
0.03773584905660377,
0.058823529411764705,
0.023529411764705882,
0.02631578947368421,
0.045454545454545456,
0.03614457831325301,
0.03225806451612903,
0.030927835051546393,
0.06666666666666667,
0.04,
0.05084745762711865,
0.058823529411764705,
0.037037037037037035,
0.01818181818181818,
0.041666666666666664,
0.023255813953488372,
0.08333333333333333,
0.04,
0.047619047619047616,
0.03614457831325301,
0,
0.03125,
0.013513513513513514,
0.125,
0.038461538461538464,
0.015384615384615385,
0.02197802197802198,
0,
0.05555555555555555,
0.02702702702702703,
0.05263157894736842,
0.1,
0.023809523809523808,
0.037037037037037035,
0.038461538461538464,
0,
0.045454545454545456,
0.022222222222222223,
0.07142857142857142,
0.017857142857142856,
0.125,
0.037037037037037035,
0.06666666666666667,
0,
0.03225806451612903,
0.08,
0.08333333333333333,
0.013157894736842105,
0.037037037037037035,
0.034482758620689655,
0,
0.021739130434782608,
0.14285714285714285,
0.058823529411764705,
0.030303030303030304,
0.2,
0.037037037037037035,
0.14285714285714285,
0.041666666666666664,
0.07142857142857142,
0.047619047619047616,
0.030303030303030304,
0.027777777777777776,
0.07692307692307693,
0.037037037037037035,
0.03508771929824561,
0.013513513513513514,
0.08333333333333333,
0.027777777777777776,
0.1111111111111111,
0.019230769230769232,
0.058823529411764705,
0.034482758620689655,
0.1,
0.03225806451612903,
0.1111111111111111,
0.03571428571428571,
0.2,
0.125,
0.04,
0.06666666666666667,
0.045454545454545456,
0.03571428571428571,
0.03571428571428571,
0.1,
0.034482758620689655,
0.18181818181818182,
0.03571428571428571,
0,
0.045454545454545456,
0.037037037037037035,
0.018018018018018018,
0,
0.034482758620689655,
0.017857142857142856,
0,
0.043478260869565216,
0.05263157894736842,
0.027777777777777776,
0.038461538461538464,
0,
0.03333333333333333,
0.024096385542168676,
0.04054054054054054,
0.125,
0.038461538461538464,
0.037037037037037035,
0.045454545454545456,
0.05263157894736842,
0.0196078431372549,
0.14285714285714285,
0.041666666666666664,
0.05263157894736842,
0.16666666666666666,
0.125,
0.07692307692307693
] | 419 | 0.044306 | false |
################################################################################
### Copyright © 2012-2013 BlackDragonHunt
###
### This file is part of the Super Duper Script Editor.
###
### The Super Duper Script Editor is free software: you can redistribute it
### and/or modify it under the terms of the GNU General Public License as
### published by the Free Software Foundation, either version 3 of the License,
### or (at your option) any later version.
###
### The Super Duper Script Editor is distributed in the hope that it will be
### useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
### MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
### GNU General Public License for more details.
###
### You should have received a copy of the GNU General Public License
### along with the Super Duper Script Editor.
### If not, see <http://www.gnu.org/licenses/>.
################################################################################
from bitstring import ConstBitStream
import os
import re
from .eboot import get_toc
from .pak_ex import get_pak_files, get_txt_files, get_script_pak_files, get_lin_files, EXT_MODE
from .invalidarchiveexception import InvalidArchiveException
EXTRACT_EXT = [".pak", ".lin"]
SKIP_EXTRACT_FILE_RE = re.compile(ur"hs_mtb_s\d\d|bg_\d\d\d|bg_lc01")
FILE_NORECURSIVE_RE = re.compile(ur"effect_lensflare00")
SPECIAL_FILE_EXTRACT = [
(re.compile(ur"^e?\d\d_.*?.pak$"), get_txt_files),
(re.compile(ur"^(event|mtb_s\d\d|voice).pak$"), get_txt_files),
(re.compile(ur"^script_pak_.*?\.pak$"), get_script_pak_files),
(re.compile(ur"\.lin$"), get_lin_files),
(re.compile(ur"\.pak$"), get_pak_files),
]
############################################################
### FUNCTIONS
############################################################
##################################################
###
##################################################
def dump_to_file(data, filename):
dirname = os.path.dirname(filename)
try:
os.makedirs(dirname)
except: pass
with open(filename, "wb") as out_file:
data.tofile(out_file)
##################################################
###
##################################################
def extract_umdimage(filename, eboot, umdimage, out_dir = None):
if out_dir == None:
out_dir = filename + "-out"
data = ConstBitStream(filename = filename)
eboot_data = ConstBitStream(filename = eboot)
toc = get_toc(eboot_data, umdimage)
for filename, file_data in get_pak_files(data, recursive = True, toc = toc):
out_path = os.path.join(out_dir, filename)
final_dir = os.path.dirname(out_path)
try:
os.makedirs(final_dir)
except: pass
name, ext = os.path.splitext(filename)
ext = ext.lower()
print filename
if ext in EXTRACT_EXT and SKIP_EXTRACT_FILE_RE.search(filename) == None:
try:
extract_fn = get_pak_files
file_ext = None
ext_mode = EXT_MODE.auto
recursive = True
if FILE_NORECURSIVE_RE.search(filename):
recursive = False
for special in SPECIAL_FILE_EXTRACT:
if special[0].search(filename):
extract_fn = special[1]
break
# Hacky stupid thing to handle how I handled lin files in the old QuickBMS script.
if extract_fn == get_lin_files or extract_fn == get_script_pak_files:
lin_name = os.path.basename(out_path)
lin_name = os.path.splitext(lin_name)[0]
for sub_filename, sub_data in extract_fn(file_data, recursive, file_ext, ext_mode, lin_name = lin_name):
dump_to_file(sub_data, os.path.join(out_path, sub_filename))
else:
for sub_filename, sub_data in extract_fn(file_data, recursive, file_ext, ext_mode):
dump_to_file(sub_data, os.path.join(out_path, sub_filename))
except InvalidArchiveException:
dump_to_file(file_data, out_path)
else:
dump_to_file(file_data, out_path)
##################################################
###
##################################################
# if __name__ == "__main__":
# extract_umdimage("full-umdimage.dat", eboot = "full-EBOOT.BIN", type = UMDIMAGE_TYPE.full, out_dir = "full-umdimage", toc_filename = "!full-toc.txt")
# extract_umdimage("full-umdimage2.dat", eboot = "full-EBOOT.BIN", type = UMDIMAGE_TYPE.full2, out_dir = "full-umdimage2", toc_filename = "!full-toc2.txt")
# extract_umdimage("best-umdimage.dat", eboot = "best-EBOOT.BIN", type = UMDIMAGE_TYPE.best, out_dir = "best-umdimage", toc_filename = "!best-toc.txt")
# extract_umdimage("best-umdimage2.dat", eboot = "best-EBOOT.BIN", type = UMDIMAGE_TYPE.best2, out_dir = "best-umdimage2", toc_filename = "!best-toc2.txt")
### EOF ### | [
"################################################################################\n",
"### Copyright © 2012-2013 BlackDragonHunt\n",
"### \n",
"### This file is part of the Super Duper Script Editor.\n",
"### \n",
"### The Super Duper Script Editor is free software: you can redistribute it\n",
"### and/or modify it under the terms of the GNU General Public License as\n",
"### published by the Free Software Foundation, either version 3 of the License,\n",
"### or (at your option) any later version.\n",
"### \n",
"### The Super Duper Script Editor is distributed in the hope that it will be\n",
"### useful, but WITHOUT ANY WARRANTY; without even the implied warranty of\n",
"### MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n",
"### GNU General Public License for more details.\n",
"### \n",
"### You should have received a copy of the GNU General Public License\n",
"### along with the Super Duper Script Editor.\n",
"### If not, see <http://www.gnu.org/licenses/>.\n",
"################################################################################\n",
"\n",
"from bitstring import ConstBitStream\n",
"\n",
"import os\n",
"import re\n",
"\n",
"from .eboot import get_toc\n",
"from .pak_ex import get_pak_files, get_txt_files, get_script_pak_files, get_lin_files, EXT_MODE\n",
"from .invalidarchiveexception import InvalidArchiveException\n",
"\n",
"EXTRACT_EXT = [\".pak\", \".lin\"]\n",
"SKIP_EXTRACT_FILE_RE = re.compile(ur\"hs_mtb_s\\d\\d|bg_\\d\\d\\d|bg_lc01\")\n",
"FILE_NORECURSIVE_RE = re.compile(ur\"effect_lensflare00\")\n",
"\n",
"SPECIAL_FILE_EXTRACT = [\n",
" (re.compile(ur\"^e?\\d\\d_.*?.pak$\"), get_txt_files),\n",
" (re.compile(ur\"^(event|mtb_s\\d\\d|voice).pak$\"), get_txt_files),\n",
" (re.compile(ur\"^script_pak_.*?\\.pak$\"), get_script_pak_files),\n",
" (re.compile(ur\"\\.lin$\"), get_lin_files),\n",
" (re.compile(ur\"\\.pak$\"), get_pak_files),\n",
"]\n",
"\n",
"############################################################\n",
"### FUNCTIONS\n",
"############################################################\n",
"\n",
"##################################################\n",
"### \n",
"##################################################\n",
"def dump_to_file(data, filename):\n",
" dirname = os.path.dirname(filename)\n",
" try:\n",
" os.makedirs(dirname)\n",
" except: pass\n",
" \n",
" with open(filename, \"wb\") as out_file:\n",
" data.tofile(out_file)\n",
"\n",
"##################################################\n",
"### \n",
"##################################################\n",
"def extract_umdimage(filename, eboot, umdimage, out_dir = None):\n",
" if out_dir == None:\n",
" out_dir = filename + \"-out\"\n",
" \n",
" data = ConstBitStream(filename = filename)\n",
" eboot_data = ConstBitStream(filename = eboot)\n",
" \n",
" toc = get_toc(eboot_data, umdimage)\n",
" \n",
" for filename, file_data in get_pak_files(data, recursive = True, toc = toc):\n",
" out_path = os.path.join(out_dir, filename)\n",
" \n",
" final_dir = os.path.dirname(out_path)\n",
" try:\n",
" os.makedirs(final_dir)\n",
" except: pass\n",
" \n",
" name, ext = os.path.splitext(filename)\n",
" ext = ext.lower()\n",
" \n",
" print filename\n",
" \n",
" if ext in EXTRACT_EXT and SKIP_EXTRACT_FILE_RE.search(filename) == None:\n",
" try:\n",
" extract_fn = get_pak_files\n",
" file_ext = None\n",
" ext_mode = EXT_MODE.auto\n",
" recursive = True\n",
" \n",
" if FILE_NORECURSIVE_RE.search(filename):\n",
" recursive = False\n",
" \n",
" for special in SPECIAL_FILE_EXTRACT:\n",
" if special[0].search(filename):\n",
" extract_fn = special[1]\n",
" break\n",
" \n",
" # Hacky stupid thing to handle how I handled lin files in the old QuickBMS script.\n",
" if extract_fn == get_lin_files or extract_fn == get_script_pak_files:\n",
" lin_name = os.path.basename(out_path)\n",
" lin_name = os.path.splitext(lin_name)[0]\n",
" for sub_filename, sub_data in extract_fn(file_data, recursive, file_ext, ext_mode, lin_name = lin_name):\n",
" dump_to_file(sub_data, os.path.join(out_path, sub_filename))\n",
" else:\n",
" for sub_filename, sub_data in extract_fn(file_data, recursive, file_ext, ext_mode):\n",
" dump_to_file(sub_data, os.path.join(out_path, sub_filename))\n",
" \n",
" except InvalidArchiveException:\n",
" dump_to_file(file_data, out_path)\n",
" \n",
" else:\n",
" dump_to_file(file_data, out_path)\n",
" \n",
"##################################################\n",
"### \n",
"##################################################\n",
"# if __name__ == \"__main__\":\n",
" # extract_umdimage(\"full-umdimage.dat\", eboot = \"full-EBOOT.BIN\", type = UMDIMAGE_TYPE.full, out_dir = \"full-umdimage\", toc_filename = \"!full-toc.txt\")\n",
" # extract_umdimage(\"full-umdimage2.dat\", eboot = \"full-EBOOT.BIN\", type = UMDIMAGE_TYPE.full2, out_dir = \"full-umdimage2\", toc_filename = \"!full-toc2.txt\")\n",
" # extract_umdimage(\"best-umdimage.dat\", eboot = \"best-EBOOT.BIN\", type = UMDIMAGE_TYPE.best, out_dir = \"best-umdimage\", toc_filename = \"!best-toc.txt\")\n",
" # extract_umdimage(\"best-umdimage2.dat\", eboot = \"best-EBOOT.BIN\", type = UMDIMAGE_TYPE.best2, out_dir = \"best-umdimage2\", toc_filename = \"!best-toc2.txt\")\n",
"\n",
"### EOF ###"
] | [
0.012345679012345678,
0.023809523809523808,
0.2,
0.017857142857142856,
0.2,
0.013157894736842105,
0.013513513513513514,
0.0125,
0.023255813953488372,
0.2,
0.012987012987012988,
0.013333333333333334,
0.015151515151515152,
0.02040816326530612,
0.2,
0.014285714285714285,
0.021739130434782608,
0.020833333333333332,
0.012345679012345678,
0,
0,
0,
0,
0,
0,
0,
0.010416666666666666,
0,
0,
0,
0.07142857142857142,
0.017241379310344827,
0,
0,
0.030303030303030304,
0.030303030303030304,
0.0136986301369863,
0.015151515151515152,
0.015151515151515152,
0,
0,
0,
0.07142857142857142,
0,
0,
0,
0.2,
0,
0.029411764705882353,
0.02631578947368421,
0.14285714285714285,
0,
0.2,
0.3333333333333333,
0.024390243902439025,
0,
0,
0,
0.2,
0,
0.046153846153846156,
0.09090909090909091,
0,
0.3333333333333333,
0.07692307692307693,
0.08163265306122448,
0.3333333333333333,
0.02631578947368421,
0.3333333333333333,
0.06329113924050633,
0,
0.2,
0,
0,
0.034482758620689655,
0.11764705882352941,
0.2,
0,
0,
0.2,
0,
0.2,
0.012987012987012988,
0.09090909090909091,
0,
0,
0,
0,
0.1111111111111111,
0,
0.03571428571428571,
0.1111111111111111,
0,
0.023809523809523808,
0,
0,
0.1111111111111111,
0.01098901098901099,
0,
0.020833333333333332,
0.0196078431372549,
0.034782608695652174,
0,
0,
0.02127659574468085,
0,
0.1111111111111111,
0.02631578947368421,
0,
0.2,
0,
0.025,
0.3333333333333333,
0,
0.2,
0,
0,
0.012738853503184714,
0.012658227848101266,
0.012738853503184714,
0.012658227848101266,
0,
0.18181818181818182
] | 123 | 0.053243 | false |
import sys
from OpenGL import Vector3
from WhateverEngine.Engine import Input
from WhateverEngine.Engine import Renderer
from WhateverEngine.Engine import GameObject
from WhateverEngine.Engine import EngineFunctions
pos=Vector3()
rot=Vector3()
deltaTime=0.0
#non-standard stuff
hasSpawned = False
go = GameObject()
def Start():
global pos
global rot
pos += Vector3(0.0, 0.0, -150.0)
rot = Vector3(0.0, 90.0, 0.0)
def Update():
global rot
global pos
global deltaTime
rot = Vector3(0.0, 0.1 * deltaTime, 0.0)
#Instantiate a new object if the player presses 'k'
if(Input.GetKeyboardKeyUp['k'] is True):
global go
global hasSpawned
if(hasSpawned is False):
go.AddGameComponent(Renderer("data\\rifle.obj"))
EngineFunctions.Instantiate(go)
hasSpawned = True
else:
EngineFunctions.Destroy(go)
#Rotation controls.
#if (Input.GetKeyboardKey['w'] is True):
# rot = Vector3(-1.0 * deltaTime, 0.0, 0.0)
#if(Input.GetKeyboardKey['s'] is True):
# rot = Vector3(1.0 * deltaTime, 0.0, 0.0)
#if (Input.GetKeyboardKey['q'] is True):
# rot = Vector3(0.0, 0.0, 1.0 * deltaTime)
#if(Input.GetKeyboardKey['e'] is True):
# rot = Vector3(0.0, 0.0, -1.0 * deltaTime)
#if(Input.GetKeyboardKey['a'] is True):
# rot = Vector3(0.0, 1.0 * deltaTime, 0.0)
#if(Input.GetKeyboardKey['d'] is True):
# rot = Vector3(0.0, -1.0 * deltaTime, 0.0)
| [
"import sys\n",
"from OpenGL import Vector3\n",
"from WhateverEngine.Engine import Input\n",
"from WhateverEngine.Engine import Renderer\n",
"from WhateverEngine.Engine import GameObject\n",
"from WhateverEngine.Engine import EngineFunctions\n",
"\n",
"pos=Vector3()\n",
"rot=Vector3()\n",
"deltaTime=0.0\n",
"\n",
"#non-standard stuff\n",
"hasSpawned = False\n",
"go = GameObject()\n",
"\n",
"def Start():\n",
" global pos\n",
" global rot\n",
" pos += Vector3(0.0, 0.0, -150.0)\n",
" rot = Vector3(0.0, 90.0, 0.0)\n",
"\n",
"def Update():\n",
" global rot\n",
" global pos\n",
" global deltaTime\n",
" rot = Vector3(0.0, 0.1 * deltaTime, 0.0)\n",
"\n",
" #Instantiate a new object if the player presses 'k'\n",
" if(Input.GetKeyboardKeyUp['k'] is True):\n",
" global go\n",
" global hasSpawned\n",
" if(hasSpawned is False):\n",
" go.AddGameComponent(Renderer(\"data\\\\rifle.obj\"))\n",
" EngineFunctions.Instantiate(go)\n",
" hasSpawned = True \n",
" else:\n",
" EngineFunctions.Destroy(go)\n",
"\n",
" #Rotation controls.\n",
" \n",
" #if (Input.GetKeyboardKey['w'] is True):\n",
" # rot = Vector3(-1.0 * deltaTime, 0.0, 0.0)\n",
" #if(Input.GetKeyboardKey['s'] is True):\n",
" # rot = Vector3(1.0 * deltaTime, 0.0, 0.0)\n",
"\n",
" #if (Input.GetKeyboardKey['q'] is True):\n",
" # rot = Vector3(0.0, 0.0, 1.0 * deltaTime)\n",
" #if(Input.GetKeyboardKey['e'] is True):\n",
" # rot = Vector3(0.0, 0.0, -1.0 * deltaTime)\n",
"\n",
" #if(Input.GetKeyboardKey['a'] is True):\n",
" # rot = Vector3(0.0, 1.0 * deltaTime, 0.0)\n",
" #if(Input.GetKeyboardKey['d'] is True):\n",
" # rot = Vector3(0.0, -1.0 * deltaTime, 0.0)\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0.07142857142857142,
0.07142857142857142,
0.07142857142857142,
0,
0.05,
0,
0,
0,
0.07692307692307693,
0,
0,
0,
0,
0,
0.07142857142857142,
0,
0,
0,
0,
0,
0.017857142857142856,
0,
0,
0,
0,
0,
0,
0.02631578947368421,
0,
0,
0,
0.041666666666666664,
0.2,
0.022222222222222223,
0,
0.022727272727272728,
0,
0,
0.022222222222222223,
0,
0.022727272727272728,
0,
0,
0.022727272727272728,
0,
0.022727272727272728,
0
] | 54 | 0.015441 | false |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""This file contains the classes Geocache (for a single geocache) and
Waypoint (for a waypoint, consisting only of name and coordinates."""
import os
import time
import datetime
import xml.etree.ElementTree as ElementTree
import ownfunctions
SIZE_LIST = ["other", "micro", "small", "regular", "large"]
TYPE_LIST = ["Traditional Cache", "Multi-cache", "EarthCache", "Letterbox Hybrid", "Event Cache", "Wherigo Cache",
"Mystery Cache", "Geocaching HQ", "Unknown Type"]
# stringcollection for German language
STR_D = "Schwierigkeit"
STR_T = "Gelaende"
STR_SIZE = "Groesse"
STR_TYPE = "Typ"
STR_COORDS = "Koordinaten"
STR_OWNER = "Owner"
STR_ATTR = "Attribute"
STR_ACT = "Cache ist aktiv"
STR_DATE = "Stand"
STR_LINK = "Link"
STR_HINT = "Hinweis"
STR_WAYPOINTS = "Wegpunkte"
class Geocache(object):
"""
An object of this class contains all relevant information of the corresponding gpx-file
Attributes:
-----------
filename_path: string
filename (including path)
source: string
'downloader' if gpx-file is created by geocaching.com gpx-downloader
'geocaching.com' if gpx-file is created by geocaching.com itself
gccode: string
gc-code - used for overloaded operators == and != and is printed by "print"
name: string
name of the geocache
difficulty: float
difficulty value of the cache
terrain: float
terrain value of the cache
size: int
size of the cache as number (other = 0, then increasing by size)
size_string: string
size of the cache as word
type: string
type of the cache (member of TYPE_LIST)
used in short description as well as for searching and sorting
longtype: string
type of the cache (any description possible)
used in long description
description: string
description of the cache
hint: string
hint for the cache
owner: string
owner of the cache
url: string
weblink to the cache on geocaching.com
coordinates: list
coordinates in decimal degree, first element of the list: latitude, second element: longitude
coordinates_string: string
coordinates as degree and minutes
attributes: list
attributes of the cache
logs: list
last logs before download
every element of list: list [date, logtype, name of logger (, logtext)]
last element only if gpx-file is created directly from geocaching.com
available: bool
availability at the time of download
downloaddate: datetime.date
date when the gpx-file was downloaded from geocaching.com
downloaddate_string: string
date when the gpx-file was downloaded from geocaching.com as string
waypoints: list
list of waypoints that belong to cache (empty if no waypoints)
Methods:
---------
__init__(filename_path): Create a Geocache-object out of the gpx-file (complete name with path)
add_waypoint(self, waypoint): adds a waypoint to the cache
shortinfo(space=0): string
one-line information about the cache and the waypoints
set space to 12 if cache is shown with distance
longinfo():
detailed information about the cache
"""
def __init__(self, filename_path):
"""constructor: reads all attributes from gpx-file that has to be given by 'filename_path'"""
if type(filename_path) != str:
raise TypeError("Bad input.")
self.filename_path = filename_path
self.gccode = os.path.splitext(os.path.basename(self.filename_path))[0].upper() # gc-code
downloaddate = time.ctime(os.path.getmtime(self.filename_path)) # read downloaddate (= change of gpx-file)
downloaddate = ownfunctions.remove_spaces(downloaddate).split(" ")
self.downloaddate_string = "{:02} {} {}".format(int(downloaddate[2]), downloaddate[1], downloaddate[-1])
month = ownfunctions.get_month_number(downloaddate[1])
self.downloaddate = datetime.date(int(downloaddate[-1]), month, int(downloaddate[2]))
self.name = "" # initialize attributes for geocache
self.difficulty = 0
self.terrain = 0
self.size_string = ""
self.size = ""
self.longtype = ""
self.type = ""
self.description = ""
self.hint = ""
self.owner = ""
self.url = ""
self.coordinates = []
self.coordinates_string = ""
self.attributes = []
self.logs = []
self.available = False
geocache_tree = ElementTree.parse(self.filename_path) # read .gpx-Datei and find source
source = geocache_tree.find(".//{http://www.topografix.com/GPX/1/0}name").text
if source == "Cache Listing Generated from Geocaching.com": # get attributes from gpx-file
self.source = "geocaching.com"
self._read_from_geocachingcom_gpxfile(geocache_tree)
else:
self.source = "downloader"
self._read_from_gpx_downloader(geocache_tree)
self.distance = 0 # initialize attributes for waypoints
self.waypoints = []
def _read_from_gpx_downloader(self, geocache_tree):
"""reads attributes from a gpx-file that is created by the firefox plugin 'Geocaching.com GPX Downloader'
(part of __init__)"""
name = geocache_tree.find(".//{http://www.groundspeak.com/cache/1/0}name").text # read name
self.name = ownfunctions.replace_signs(name)
difficulty = geocache_tree.find(".//{http://www.groundspeak.com/cache/1/0}difficulty").text # read difficulty
self.difficulty = float(difficulty)
terrain = geocache_tree.find(".//{http://www.groundspeak.com/cache/1/0}terrain").text # read terrain
self.terrain = float(terrain)
self.size_string = geocache_tree.find(".//{http://www.groundspeak.com/cache/1/0}container").text # read size
if self.size_string not in SIZE_LIST:
self.size_string = "other"
self.size = SIZE_LIST.index(self.size_string)
self.longtype = geocache_tree.find(".//{http://www.groundspeak.com/cache/1/0}type").text # read type
if self.longtype == "Unknown Cache":
self.longtype = "Mystery Cache"
self.type = self._read_type(self.longtype)
self.description = self._read_description(geocache_tree) # read description
hint = geocache_tree.find(".//{http://www.groundspeak.com/cache/1/0}encoded_hints").text # read hint
self.hint = ownfunctions.replace_signs(hint)
owner = geocache_tree.find(".//{http://www.groundspeak.com/cache/1/0}placed_by").text # read owner
self.owner = ownfunctions.replace_signs(owner)
self.url = geocache_tree.find(".//{http://www.topografix.com/GPX/1/0}url").text # read url
wpt = geocache_tree.find(".//{http://www.topografix.com/GPX/1/0}wpt") # read coordinates
self.coordinates = [float(wpt.get("lat")), float(wpt.get("lon"))] # list of floats [lat, lon]
coord_str = ownfunctions.coords_decimal_to_minutes(self.coordinates)
self.coordinates_string = coord_str # string 'X XX°XX.XXX, X XXX°XX.XXX'
attributes = geocache_tree.find(".//{http://www.groundspeak.com/cache/1/0}text").text # read attributes
attributes = attributes.split(",")
for a in attributes:
self.attributes.append(ownfunctions.remove_spaces(a))
self.logs = self._read_logs(geocache_tree) # read logs
cache = geocache_tree.find(".//{http://www.groundspeak.com/cache/1/0}cache") # read if available or not
available = cache.get("available")
if available == "True":
self.available = True
elif available == "False":
self.available = False
def _read_from_geocachingcom_gpxfile(self, geocache_tree):
"""reads attributes from a gpx-file that is created by geocaching.com (part of __init__)"""
name = geocache_tree.find(".//{http://www.groundspeak.com/cache/1/0/1}name").text # read name
self.name = ownfunctions.replace_signs(name)
difficulty = geocache_tree.find(".//{http://www.groundspeak.com/cache/1/0/1}difficulty").text # read difficulty
self.difficulty = float(difficulty)
terrain = geocache_tree.find(".//{http://www.groundspeak.com/cache/1/0/1}terrain").text # read terrain
self.terrain = float(terrain)
self.size_string = geocache_tree.find(".//{http://www.groundspeak.com/cache/1/0/1}container").text.lower() # size
if self.size_string not in SIZE_LIST:
self.size_string = "other"
self.size = SIZE_LIST.index(self.size_string)
self.longtype = geocache_tree.find(".//{http://www.groundspeak.com/cache/1/0/1}type").text # read type
if self.longtype == "Unknown Cache":
self.longtype = "Mystery Cache"
self.type = self._read_type(self.longtype)
self.description = self._read_description(geocache_tree) # read description
hint = geocache_tree.find(".//{http://www.groundspeak.com/cache/1/0/1}encoded_hints").text # read hint
self.hint = ownfunctions.replace_signs(hint)
owner = geocache_tree.find(".//{http://www.groundspeak.com/cache/1/0/1}placed_by").text # read owner
self.owner = ownfunctions.replace_signs(owner)
url_raw = geocache_tree.findall(".//{http://www.topografix.com/GPX/1/0}url") # read url
self.url = url_raw[1].text # because index 0 is only http://www.geocaching.com
wpt = geocache_tree.find(".//{http://www.topografix.com/GPX/1/0}wpt") # read coordinates
self.coordinates = [float(wpt.get("lat")), float(wpt.get("lon"))] # list of floats [lat, lon]
coord_str = ownfunctions.coords_decimal_to_minutes(self.coordinates)
self.coordinates_string = coord_str # string 'X XX°XX.XXX, X XXX°XX.XXX'
attributes = geocache_tree.findall(".//{http://www.groundspeak.com/cache/1/0/1}attribute") # read attributes
for a in attributes:
self.attributes.append(ownfunctions.remove_spaces(a.text))
self.logs = self._read_logs(geocache_tree) # read logs
cache = geocache_tree.find(".//{http://www.groundspeak.com/cache/1/0/1}cache") # read if available or not
available = cache.get("available")
if available == "True":
self.available = True
elif available == "False":
self.available = False
def _read_logs(self, geocache_tree):
"""reads logs from xml-file, part of __init__"""
log_dates_raw = []
if self.source == "downloader":
log_dates_raw = geocache_tree.findall(".//{http://www.groundspeak.com/cache/1/0}date")
elif self.source == "geocaching.com":
log_dates_raw = geocache_tree.findall(".//{http://www.groundspeak.com/cache/1/0/1}date")
log_dates = []
for i, ld in enumerate(log_dates_raw):
if i == 0 and self.source == "geocaching.com":
log_dates.append(ld.text[:10])
elif i > 0: # in gpx-file from the gpx-downloader the attributes are also saved as logs
log_dates.append(ld.text[:10]) # but not taken into account here
log_types_raw = []
if self.source == "downloader":
log_types_raw = geocache_tree.findall(".//{http://www.groundspeak.com/cache/1/0}type")
elif self.source == "geocaching.com":
log_types_raw = geocache_tree.findall(".//{http://www.groundspeak.com/cache/1/0/1}type")
log_types = []
for i, lt in enumerate(log_types_raw):
if i == 1 and self.source == "geocaching.com": # index 0 corresponds to cachetyp (Tradi, Multi,...),
log_types.append(lt.text) # # index 1 to the attributes if gpx from gpx-downloader
elif i > 1:
log_types.append(lt.text)
finder_raw = []
if self.source == "downloader":
finder_raw = geocache_tree.findall(".//{http://www.groundspeak.com/cache/1/0}finder")
elif self.source == "geocaching.com":
finder_raw = geocache_tree.findall(".//{http://www.groundspeak.com/cache/1/0/1}finder")
finder = []
for i, fd in enumerate(finder_raw):
if i == 0 and self.source == "geocaching.com":
next_fd = ownfunctions.replace_signs(fd.text)
finder.append(next_fd)
if i > 0: # in gpx-file from the gpx-downloader index 0 corresponding to attributes
next_fd = ownfunctions.replace_signs(fd.text)
finder.append(next_fd)
log_texts = []
if self.source == "geocaching.com": # logtext is only saved in gpx-files from geocaching.com
text_raw = geocache_tree.findall(".//{http://www.groundspeak.com/cache/1/0/1}text")
for i, tx in enumerate(text_raw):
next_tx = ownfunctions.replace_signs(tx.text)
log_texts.append(next_tx)
logs = []
log_number = len(log_dates)
if len(log_dates) == len(log_types) == len(finder):
for i in range(log_number):
new_log = [log_dates[i], log_types[i], finder[i]]
if self.source == "geocaching.com":
new_log.append(log_texts[i])
logs.append(new_log)
else:
print("\nWARNING! Error in gpx-file. Reading logs correctly not possible.")
return logs
def _read_description(self, geocache_tree):
"""reads description from xml-file, part of __init__
source: is the xml-file created by geocaching.com gpx-downloader or by geocaching.com itself?"""
description_short = ""
if self.source == "downloader":
description_short = geocache_tree.find(".//{http://www.groundspeak.com/cache/1/0}short_description").text
elif self.source == "geocaching.com":
description_short = geocache_tree.find(".//{http://www.groundspeak.com/cache/1/0/1}short_description").text
if description_short:
description_short = ownfunctions.replace_signs(description_short)
else:
description_short = ""
description_long = ""
if self.source == "downloader":
description_long = geocache_tree.find(".//{http://www.groundspeak.com/cache/1/0}long_description").text
elif self.source == "geocaching.com":
description_long = geocache_tree.find(".//{http://www.groundspeak.com/cache/1/0/1}long_description").text
if description_long:
description_long = ownfunctions.replace_signs(description_long)
else:
description_long = ""
return description_short + "\n\n" + description_long
@staticmethod
def _read_type(lt):
"""converts cachetypes from xml-file to those from TYPE_LIST, part of __init__"""
if lt in TYPE_LIST:
cachetype = lt
elif lt == "Cache In Trash Out Event" or lt == "Mega-Event Cache" or lt == "Giga-Event Cache":
cachetype = "Event Cache"
else:
cachetype = "Unknown Type"
return cachetype
def __str__(self):
return self.gccode
def __eq__(self, other):
return self.gccode == other
def __ne__(self, other):
return self.gccode != other
def add_waypoint(self, waypoint):
"""adds a waypoint to the cache"""
if type(waypoint) == Waypoint:
waypoint.find_shown_name_and_distance(self)
self.waypoints.append(waypoint)
else:
raise TypeError("Waypoint can't be added because it is not of waypoint type")
def shortinfo(self, space=0):
"""returns one-line information about the cache
space = number of spaces before waypoint lines
(space = 0 if cache is shown without distance, space = 12 if it's shown with distance)"""
a = self.gccode.ljust(7)
b = self.coordinates_string
c = self.type.ljust(17)
d = self.difficulty
e = self.terrain
f = self.size_string.ljust(7)
g = str(self.available).ljust(5)
h = self.downloaddate_string
i = self.name
result = "{} | {} | {} | D {} | T {} | {} | {} | {} | {}".format(a, b, c, d, e, f, g, h, i)
for w in self.waypoints:
result += "\n" + space*" " + w.info()
return result
def longinfo(self):
"""returns detailed information about the cache"""
z1 = "\n{} : {}".format(self.gccode, self.name)
z2 = "\n"
for i in range(len(z1)):
z2 += "-"
d = self.difficulty
t = self.terrain
sizestr = self.size_string
lt = self.longtype
z3 = "\n{}: {}, {}: {}, {}: {}, {}: {}".format(STR_D, d, STR_T, t, STR_SIZE, sizestr, STR_TYPE, lt)
z4 = "\n{}: {}".format(STR_COORDS, self.coordinates_string)
if self.waypoints:
z4 += ", {}: ".format(STR_WAYPOINTS)
for w in self.waypoints:
z4 += "{} ({}), ".format(w.shown_name, w.coordinates_string)
z4 = z4[:-2]
z5 = "\n{}: {}".format(STR_OWNER, self.owner)
z6 = "\n{}: ".format(STR_ATTR)
for a in self.attributes:
z6 = z6 + str(a) + ", "
z6 = z6[:-2]
z7 = "\n{}: {}, {}: {}".format(STR_ACT, self.available, STR_DATE, self.downloaddate_string)
z8 = "\n{}: {}".format(STR_LINK, self.url)
z9 = "\n\n{}".format(self.description)
z10 = "\n{}: {}".format(STR_HINT, self.hint)
z11 = "\n\n"
for l in self.logs:
z11 += "{}: {} by {}\n".format(l[0], l[1], l[2])
if len(l) > 3:
z11 += "{}\n\n".format(l[3])
return z1 + z2 + z3 + z4 + z5 + z6 + z7 + z8 + z9 + z10 + z11
class Waypoint(object):
"""
An object of this class contains all information about a waypoint
Attributes:
-----------
name: string
name of the waypoint
shown_name: string
name of the waypoint that is shown
i.e. without the gccode of the cache if waypoint belongs to a geocache
coordinates: list
coordinates in decimal degree, first element of the list: latitude, second element: longitude
coordinates_string: string
coordinates as degree and minutes
distance: float
distance of the coordinates of the cache if waypoint belongs to a cache
else None
Methods:
---------
__init__(name, coordinates): creates the object out of name and coordinates as list [lat, lon]
find_shown_name_and_distance(geocache): is performed if waypoint belongs to a geocache,
calculates shown_name and distance
info(): returns information about the waypoint
"""
ALLOWED_SIGNS = ["A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T",
"U", "V", "W", "X", "Y", "Z", u"Ä", u"Ö", u"Ü", u"ß", "!", "#", "$", '"', "?", "*", "/", "(", ")",
"-", "+", "&", "'", ";", ":", ",", ".", "=", "@", "%", "<", ">", "0", "1", "2", "3", "4", "5", "6",
"7", "8", "9", " "]
def __init__(self, name, coordinates):
"""creates the object out of name and coordinates as list [lat, lon]"""
if type(name) != str:
raise TypeError("waypoint name is of wrong type")
self.name = name.upper()
for c in self.name:
if c not in self.ALLOWED_SIGNS:
raise TypeError("GARMIN does not allow '{}' in a waypoint name.".format(c))
self.shown_name = self.name # for waypoints not belonging to a geocache
ownfunctions.validate_coordinates(coordinates) # throws an error if coordinates not valid
self.coordinates = coordinates
coord_str = ownfunctions.coords_decimal_to_minutes(self.coordinates)
self.coordinates_string = coord_str # string 'X XX°XX.XXX, X XXX°XX.XXX'
self.distance = None # initialize for later use
def find_shown_name_and_distance(self, geocache):
"""calculates the shown name and the distance to the 'main coordinates'
if waypoint belongs to a geocache"""
namelist = self.name.split()
if not (namelist[-1].startswith("(GC") and namelist[-1].endswith(")")) or namelist[-1][1:-1] != geocache.gccode:
raise TypeError("This waypoint does not belong to the geocache.")
self.shown_name = " ".join(namelist[:-1])
self.distance = ownfunctions.calculate_distance(self.coordinates, geocache.coordinates)
def info(self):
"""returns information about the waypoint"""
result = " | {} | {}".format(self.coordinates_string, self.shown_name)
if self.distance:
result += " ({}km)".format(round(self.distance, 1))
return result
| [
"#!/usr/bin/python\n",
"# -*- coding: utf-8 -*-\n",
"\n",
"\"\"\"This file contains the classes Geocache (for a single geocache) and\n",
"Waypoint (for a waypoint, consisting only of name and coordinates.\"\"\"\n",
"\n",
"import os\n",
"import time\n",
"import datetime\n",
"import xml.etree.ElementTree as ElementTree\n",
"\n",
"import ownfunctions \n",
"\n",
"SIZE_LIST = [\"other\", \"micro\", \"small\", \"regular\", \"large\"]\n",
"TYPE_LIST = [\"Traditional Cache\", \"Multi-cache\", \"EarthCache\", \"Letterbox Hybrid\", \"Event Cache\", \"Wherigo Cache\",\n",
" \"Mystery Cache\", \"Geocaching HQ\", \"Unknown Type\"]\n",
"\n",
"\n",
"# stringcollection for German language\n",
"STR_D = \"Schwierigkeit\"\n",
"STR_T = \"Gelaende\"\n",
"STR_SIZE = \"Groesse\"\n",
"STR_TYPE = \"Typ\"\n",
"STR_COORDS = \"Koordinaten\"\n",
"STR_OWNER = \"Owner\"\n",
"STR_ATTR = \"Attribute\"\n",
"STR_ACT = \"Cache ist aktiv\"\n",
"STR_DATE = \"Stand\"\n",
"STR_LINK = \"Link\"\n",
"STR_HINT = \"Hinweis\"\n",
"STR_WAYPOINTS = \"Wegpunkte\"\n",
"\n",
"\n",
"class Geocache(object):\n",
"\n",
" \"\"\"\n",
" An object of this class contains all relevant information of the corresponding gpx-file\n",
" \n",
" \n",
" Attributes:\n",
" -----------\n",
" filename_path: string\n",
" filename (including path)\n",
"\n",
" source: string\n",
" 'downloader' if gpx-file is created by geocaching.com gpx-downloader\n",
" 'geocaching.com' if gpx-file is created by geocaching.com itself\n",
" \n",
" gccode: string\n",
" gc-code - used for overloaded operators == and != and is printed by \"print\"\n",
" \n",
" name: string\n",
" name of the geocache\n",
" \n",
" difficulty: float\n",
" difficulty value of the cache\n",
" \n",
" terrain: float\n",
" terrain value of the cache\n",
" \n",
" size: int\n",
" size of the cache as number (other = 0, then increasing by size)\n",
" \n",
" size_string: string\n",
" size of the cache as word\n",
" \n",
" type: string\n",
" type of the cache (member of TYPE_LIST)\n",
" used in short description as well as for searching and sorting\n",
" \n",
" longtype: string\n",
" type of the cache (any description possible)\n",
" used in long description\n",
" \n",
" description: string\n",
" description of the cache\n",
" \n",
" hint: string\n",
" hint for the cache\n",
" \n",
" owner: string\n",
" owner of the cache\n",
" \n",
" url: string\n",
" weblink to the cache on geocaching.com \n",
" \n",
" coordinates: list \n",
" coordinates in decimal degree, first element of the list: latitude, second element: longitude\n",
" \n",
" coordinates_string: string\n",
" coordinates as degree and minutes\n",
" \n",
" attributes: list\n",
" attributes of the cache\n",
" \n",
" logs: list\n",
" last logs before download\n",
" every element of list: list [date, logtype, name of logger (, logtext)]\n",
" last element only if gpx-file is created directly from geocaching.com\n",
" \n",
" available: bool \n",
" availability at the time of download \n",
" \n",
" downloaddate: datetime.date\n",
" date when the gpx-file was downloaded from geocaching.com\n",
" \n",
" downloaddate_string: string\n",
" date when the gpx-file was downloaded from geocaching.com as string\n",
"\n",
" waypoints: list\n",
" list of waypoints that belong to cache (empty if no waypoints)\n",
" \n",
" \n",
" Methods:\n",
" ---------\n",
" __init__(filename_path): Create a Geocache-object out of the gpx-file (complete name with path)\n",
"\n",
" add_waypoint(self, waypoint): adds a waypoint to the cache\n",
" \n",
" shortinfo(space=0): string\n",
" one-line information about the cache and the waypoints\n",
" set space to 12 if cache is shown with distance\n",
" \n",
" longinfo(): \n",
" detailed information about the cache\n",
" \"\"\"\n",
" \n",
" def __init__(self, filename_path):\n",
" \"\"\"constructor: reads all attributes from gpx-file that has to be given by 'filename_path'\"\"\"\n",
" \n",
" if type(filename_path) != str:\n",
" raise TypeError(\"Bad input.\")\n",
"\n",
" self.filename_path = filename_path\n",
" self.gccode = os.path.splitext(os.path.basename(self.filename_path))[0].upper() # gc-code\n",
"\n",
" downloaddate = time.ctime(os.path.getmtime(self.filename_path)) # read downloaddate (= change of gpx-file)\n",
" downloaddate = ownfunctions.remove_spaces(downloaddate).split(\" \")\n",
" self.downloaddate_string = \"{:02} {} {}\".format(int(downloaddate[2]), downloaddate[1], downloaddate[-1])\n",
" month = ownfunctions.get_month_number(downloaddate[1])\n",
" self.downloaddate = datetime.date(int(downloaddate[-1]), month, int(downloaddate[2]))\n",
"\n",
" self.name = \"\" # initialize attributes for geocache\n",
" self.difficulty = 0\n",
" self.terrain = 0\n",
" self.size_string = \"\"\n",
" self.size = \"\"\n",
" self.longtype = \"\"\n",
" self.type = \"\"\n",
" self.description = \"\"\n",
" self.hint = \"\"\n",
" self.owner = \"\"\n",
" self.url = \"\"\n",
" self.coordinates = []\n",
" self.coordinates_string = \"\"\n",
" self.attributes = []\n",
" self.logs = []\n",
" self.available = False\n",
"\n",
" geocache_tree = ElementTree.parse(self.filename_path) # read .gpx-Datei and find source\n",
" source = geocache_tree.find(\".//{http://www.topografix.com/GPX/1/0}name\").text\n",
"\n",
" if source == \"Cache Listing Generated from Geocaching.com\": # get attributes from gpx-file\n",
" self.source = \"geocaching.com\"\n",
" self._read_from_geocachingcom_gpxfile(geocache_tree)\n",
" else:\n",
" self.source = \"downloader\"\n",
" self._read_from_gpx_downloader(geocache_tree)\n",
" \n",
" self.distance = 0 # initialize attributes for waypoints\n",
" self.waypoints = []\n",
"\n",
" def _read_from_gpx_downloader(self, geocache_tree):\n",
" \"\"\"reads attributes from a gpx-file that is created by the firefox plugin 'Geocaching.com GPX Downloader'\n",
" (part of __init__)\"\"\"\n",
"\n",
" name = geocache_tree.find(\".//{http://www.groundspeak.com/cache/1/0}name\").text # read name\n",
" self.name = ownfunctions.replace_signs(name)\n",
"\n",
" difficulty = geocache_tree.find(\".//{http://www.groundspeak.com/cache/1/0}difficulty\").text # read difficulty\n",
" self.difficulty = float(difficulty)\n",
"\n",
" terrain = geocache_tree.find(\".//{http://www.groundspeak.com/cache/1/0}terrain\").text # read terrain\n",
" self.terrain = float(terrain)\n",
"\n",
" self.size_string = geocache_tree.find(\".//{http://www.groundspeak.com/cache/1/0}container\").text # read size\n",
" if self.size_string not in SIZE_LIST:\n",
" self.size_string = \"other\"\n",
" self.size = SIZE_LIST.index(self.size_string)\n",
"\n",
" self.longtype = geocache_tree.find(\".//{http://www.groundspeak.com/cache/1/0}type\").text # read type\n",
" if self.longtype == \"Unknown Cache\":\n",
" self.longtype = \"Mystery Cache\"\n",
" self.type = self._read_type(self.longtype)\n",
"\n",
" self.description = self._read_description(geocache_tree) # read description\n",
"\n",
" hint = geocache_tree.find(\".//{http://www.groundspeak.com/cache/1/0}encoded_hints\").text # read hint\n",
" self.hint = ownfunctions.replace_signs(hint)\n",
"\n",
" owner = geocache_tree.find(\".//{http://www.groundspeak.com/cache/1/0}placed_by\").text # read owner\n",
" self.owner = ownfunctions.replace_signs(owner)\n",
"\n",
" self.url = geocache_tree.find(\".//{http://www.topografix.com/GPX/1/0}url\").text # read url\n",
"\n",
" wpt = geocache_tree.find(\".//{http://www.topografix.com/GPX/1/0}wpt\") # read coordinates\n",
" self.coordinates = [float(wpt.get(\"lat\")), float(wpt.get(\"lon\"))] # list of floats [lat, lon]\n",
" coord_str = ownfunctions.coords_decimal_to_minutes(self.coordinates)\n",
" self.coordinates_string = coord_str # string 'X XX°XX.XXX, X XXX°XX.XXX'\n",
"\n",
" attributes = geocache_tree.find(\".//{http://www.groundspeak.com/cache/1/0}text\").text # read attributes\n",
" attributes = attributes.split(\",\")\n",
" for a in attributes:\n",
" self.attributes.append(ownfunctions.remove_spaces(a))\n",
"\n",
" self.logs = self._read_logs(geocache_tree) # read logs\n",
"\n",
" cache = geocache_tree.find(\".//{http://www.groundspeak.com/cache/1/0}cache\") # read if available or not\n",
" available = cache.get(\"available\")\n",
" if available == \"True\":\n",
" self.available = True\n",
" elif available == \"False\":\n",
" self.available = False\n",
"\n",
" def _read_from_geocachingcom_gpxfile(self, geocache_tree):\n",
" \"\"\"reads attributes from a gpx-file that is created by geocaching.com (part of __init__)\"\"\"\n",
"\n",
" name = geocache_tree.find(\".//{http://www.groundspeak.com/cache/1/0/1}name\").text # read name\n",
" self.name = ownfunctions.replace_signs(name)\n",
"\n",
" difficulty = geocache_tree.find(\".//{http://www.groundspeak.com/cache/1/0/1}difficulty\").text # read difficulty\n",
" self.difficulty = float(difficulty)\n",
"\n",
" terrain = geocache_tree.find(\".//{http://www.groundspeak.com/cache/1/0/1}terrain\").text # read terrain\n",
" self.terrain = float(terrain)\n",
"\n",
" self.size_string = geocache_tree.find(\".//{http://www.groundspeak.com/cache/1/0/1}container\").text.lower() # size\n",
" if self.size_string not in SIZE_LIST:\n",
" self.size_string = \"other\"\n",
" self.size = SIZE_LIST.index(self.size_string)\n",
"\n",
" self.longtype = geocache_tree.find(\".//{http://www.groundspeak.com/cache/1/0/1}type\").text # read type\n",
" if self.longtype == \"Unknown Cache\":\n",
" self.longtype = \"Mystery Cache\"\n",
" self.type = self._read_type(self.longtype)\n",
"\n",
" self.description = self._read_description(geocache_tree) # read description\n",
"\n",
" hint = geocache_tree.find(\".//{http://www.groundspeak.com/cache/1/0/1}encoded_hints\").text # read hint\n",
" self.hint = ownfunctions.replace_signs(hint)\n",
"\n",
" owner = geocache_tree.find(\".//{http://www.groundspeak.com/cache/1/0/1}placed_by\").text # read owner\n",
" self.owner = ownfunctions.replace_signs(owner)\n",
"\n",
" url_raw = geocache_tree.findall(\".//{http://www.topografix.com/GPX/1/0}url\") # read url\n",
" self.url = url_raw[1].text # because index 0 is only http://www.geocaching.com\n",
"\n",
" wpt = geocache_tree.find(\".//{http://www.topografix.com/GPX/1/0}wpt\") # read coordinates\n",
" self.coordinates = [float(wpt.get(\"lat\")), float(wpt.get(\"lon\"))] # list of floats [lat, lon]\n",
" coord_str = ownfunctions.coords_decimal_to_minutes(self.coordinates)\n",
" self.coordinates_string = coord_str # string 'X XX°XX.XXX, X XXX°XX.XXX'\n",
"\n",
" attributes = geocache_tree.findall(\".//{http://www.groundspeak.com/cache/1/0/1}attribute\") # read attributes\n",
" for a in attributes:\n",
" self.attributes.append(ownfunctions.remove_spaces(a.text))\n",
"\n",
" self.logs = self._read_logs(geocache_tree) # read logs\n",
"\n",
" cache = geocache_tree.find(\".//{http://www.groundspeak.com/cache/1/0/1}cache\") # read if available or not\n",
" available = cache.get(\"available\")\n",
" if available == \"True\":\n",
" self.available = True\n",
" elif available == \"False\":\n",
" self.available = False\n",
"\n",
" def _read_logs(self, geocache_tree):\n",
" \"\"\"reads logs from xml-file, part of __init__\"\"\"\n",
"\n",
" log_dates_raw = []\n",
" if self.source == \"downloader\":\n",
" log_dates_raw = geocache_tree.findall(\".//{http://www.groundspeak.com/cache/1/0}date\")\n",
" elif self.source == \"geocaching.com\":\n",
" log_dates_raw = geocache_tree.findall(\".//{http://www.groundspeak.com/cache/1/0/1}date\")\n",
" log_dates = []\n",
" for i, ld in enumerate(log_dates_raw):\n",
" if i == 0 and self.source == \"geocaching.com\":\n",
" log_dates.append(ld.text[:10])\n",
" elif i > 0: # in gpx-file from the gpx-downloader the attributes are also saved as logs\n",
" log_dates.append(ld.text[:10]) # but not taken into account here\n",
"\n",
" log_types_raw = []\n",
" if self.source == \"downloader\":\n",
" log_types_raw = geocache_tree.findall(\".//{http://www.groundspeak.com/cache/1/0}type\")\n",
" elif self.source == \"geocaching.com\":\n",
" log_types_raw = geocache_tree.findall(\".//{http://www.groundspeak.com/cache/1/0/1}type\")\n",
" log_types = []\n",
" for i, lt in enumerate(log_types_raw):\n",
" if i == 1 and self.source == \"geocaching.com\": # index 0 corresponds to cachetyp (Tradi, Multi,...),\n",
" log_types.append(lt.text) # # index 1 to the attributes if gpx from gpx-downloader\n",
" elif i > 1:\n",
" log_types.append(lt.text)\n",
"\n",
" finder_raw = []\n",
" if self.source == \"downloader\":\n",
" finder_raw = geocache_tree.findall(\".//{http://www.groundspeak.com/cache/1/0}finder\")\n",
" elif self.source == \"geocaching.com\":\n",
" finder_raw = geocache_tree.findall(\".//{http://www.groundspeak.com/cache/1/0/1}finder\")\n",
" finder = []\n",
" for i, fd in enumerate(finder_raw):\n",
" if i == 0 and self.source == \"geocaching.com\":\n",
" next_fd = ownfunctions.replace_signs(fd.text)\n",
" finder.append(next_fd)\n",
" if i > 0: # in gpx-file from the gpx-downloader index 0 corresponding to attributes\n",
" next_fd = ownfunctions.replace_signs(fd.text)\n",
" finder.append(next_fd)\n",
"\n",
" log_texts = []\n",
" if self.source == \"geocaching.com\": # logtext is only saved in gpx-files from geocaching.com\n",
" text_raw = geocache_tree.findall(\".//{http://www.groundspeak.com/cache/1/0/1}text\")\n",
" for i, tx in enumerate(text_raw):\n",
" next_tx = ownfunctions.replace_signs(tx.text)\n",
" log_texts.append(next_tx)\n",
"\n",
" logs = []\n",
" log_number = len(log_dates)\n",
" if len(log_dates) == len(log_types) == len(finder):\n",
" for i in range(log_number):\n",
" new_log = [log_dates[i], log_types[i], finder[i]]\n",
" if self.source == \"geocaching.com\":\n",
" new_log.append(log_texts[i])\n",
" logs.append(new_log)\n",
" else:\n",
" print(\"\\nWARNING! Error in gpx-file. Reading logs correctly not possible.\")\n",
"\n",
" return logs\n",
"\n",
" def _read_description(self, geocache_tree):\n",
" \"\"\"reads description from xml-file, part of __init__\n",
" source: is the xml-file created by geocaching.com gpx-downloader or by geocaching.com itself?\"\"\"\n",
"\n",
" description_short = \"\"\n",
" if self.source == \"downloader\":\n",
" description_short = geocache_tree.find(\".//{http://www.groundspeak.com/cache/1/0}short_description\").text\n",
" elif self.source == \"geocaching.com\":\n",
" description_short = geocache_tree.find(\".//{http://www.groundspeak.com/cache/1/0/1}short_description\").text\n",
" if description_short:\n",
" description_short = ownfunctions.replace_signs(description_short)\n",
" else:\n",
" description_short = \"\"\n",
"\n",
" description_long = \"\"\n",
" if self.source == \"downloader\":\n",
" description_long = geocache_tree.find(\".//{http://www.groundspeak.com/cache/1/0}long_description\").text\n",
" elif self.source == \"geocaching.com\":\n",
" description_long = geocache_tree.find(\".//{http://www.groundspeak.com/cache/1/0/1}long_description\").text\n",
" if description_long:\n",
" description_long = ownfunctions.replace_signs(description_long)\n",
" else:\n",
" description_long = \"\"\n",
"\n",
" return description_short + \"\\n\\n\" + description_long\n",
"\n",
" @staticmethod\n",
" def _read_type(lt):\n",
" \"\"\"converts cachetypes from xml-file to those from TYPE_LIST, part of __init__\"\"\"\n",
" if lt in TYPE_LIST:\n",
" cachetype = lt\n",
" elif lt == \"Cache In Trash Out Event\" or lt == \"Mega-Event Cache\" or lt == \"Giga-Event Cache\":\n",
" cachetype = \"Event Cache\"\n",
" else:\n",
" cachetype = \"Unknown Type\"\n",
" return cachetype\n",
" \n",
" def __str__(self):\n",
" return self.gccode\n",
" \n",
" def __eq__(self, other):\n",
" return self.gccode == other\n",
" \n",
" def __ne__(self, other):\n",
" return self.gccode != other\n",
"\n",
" def add_waypoint(self, waypoint):\n",
" \"\"\"adds a waypoint to the cache\"\"\"\n",
"\n",
" if type(waypoint) == Waypoint:\n",
" waypoint.find_shown_name_and_distance(self)\n",
" self.waypoints.append(waypoint)\n",
" else:\n",
" raise TypeError(\"Waypoint can't be added because it is not of waypoint type\")\n",
" \n",
" def shortinfo(self, space=0):\n",
" \"\"\"returns one-line information about the cache\n",
" space = number of spaces before waypoint lines\n",
" (space = 0 if cache is shown without distance, space = 12 if it's shown with distance)\"\"\"\n",
"\n",
" a = self.gccode.ljust(7)\n",
" b = self.coordinates_string\n",
" c = self.type.ljust(17)\n",
" d = self.difficulty\n",
" e = self.terrain\n",
" f = self.size_string.ljust(7)\n",
" g = str(self.available).ljust(5)\n",
" h = self.downloaddate_string\n",
" i = self.name\n",
" result = \"{} | {} | {} | D {} | T {} | {} | {} | {} | {}\".format(a, b, c, d, e, f, g, h, i)\n",
" for w in self.waypoints:\n",
" result += \"\\n\" + space*\" \" + w.info()\n",
" return result\n",
"\n",
" def longinfo(self): \n",
" \"\"\"returns detailed information about the cache\"\"\" \n",
" \n",
" z1 = \"\\n{} : {}\".format(self.gccode, self.name)\n",
" z2 = \"\\n\"\n",
" for i in range(len(z1)):\n",
" z2 += \"-\"\n",
" d = self.difficulty\n",
" t = self.terrain\n",
" sizestr = self.size_string\n",
" lt = self.longtype\n",
" z3 = \"\\n{}: {}, {}: {}, {}: {}, {}: {}\".format(STR_D, d, STR_T, t, STR_SIZE, sizestr, STR_TYPE, lt)\n",
" z4 = \"\\n{}: {}\".format(STR_COORDS, self.coordinates_string)\n",
" if self.waypoints:\n",
" z4 += \", {}: \".format(STR_WAYPOINTS)\n",
" for w in self.waypoints:\n",
" z4 += \"{} ({}), \".format(w.shown_name, w.coordinates_string)\n",
" z4 = z4[:-2]\n",
" z5 = \"\\n{}: {}\".format(STR_OWNER, self.owner)\n",
" z6 = \"\\n{}: \".format(STR_ATTR)\n",
" for a in self.attributes:\n",
" z6 = z6 + str(a) + \", \"\n",
" z6 = z6[:-2]\n",
" z7 = \"\\n{}: {}, {}: {}\".format(STR_ACT, self.available, STR_DATE, self.downloaddate_string)\n",
" z8 = \"\\n{}: {}\".format(STR_LINK, self.url)\n",
" z9 = \"\\n\\n{}\".format(self.description)\n",
" z10 = \"\\n{}: {}\".format(STR_HINT, self.hint)\n",
" z11 = \"\\n\\n\"\n",
" for l in self.logs:\n",
" z11 += \"{}: {} by {}\\n\".format(l[0], l[1], l[2])\n",
" if len(l) > 3:\n",
" z11 += \"{}\\n\\n\".format(l[3])\n",
" return z1 + z2 + z3 + z4 + z5 + z6 + z7 + z8 + z9 + z10 + z11\n",
"\n",
"\n",
"class Waypoint(object):\n",
" \"\"\"\n",
" An object of this class contains all information about a waypoint\n",
"\n",
"\n",
" Attributes:\n",
" -----------\n",
" name: string\n",
" name of the waypoint\n",
"\n",
" shown_name: string\n",
" name of the waypoint that is shown\n",
" i.e. without the gccode of the cache if waypoint belongs to a geocache\n",
"\n",
" coordinates: list\n",
" coordinates in decimal degree, first element of the list: latitude, second element: longitude\n",
"\n",
" coordinates_string: string\n",
" coordinates as degree and minutes\n",
"\n",
" distance: float\n",
" distance of the coordinates of the cache if waypoint belongs to a cache\n",
" else None\n",
"\n",
"\n",
" Methods:\n",
" ---------\n",
" __init__(name, coordinates): creates the object out of name and coordinates as list [lat, lon]\n",
"\n",
" find_shown_name_and_distance(geocache): is performed if waypoint belongs to a geocache,\n",
" calculates shown_name and distance\n",
"\n",
" info(): returns information about the waypoint\n",
" \"\"\"\n",
"\n",
" ALLOWED_SIGNS = [\"A\", \"B\", \"C\", \"D\", \"E\", \"F\", \"G\", \"H\", \"I\", \"J\", \"K\", \"L\", \"M\", \"N\", \"O\", \"P\", \"Q\", \"R\", \"S\", \"T\",\n",
" \"U\", \"V\", \"W\", \"X\", \"Y\", \"Z\", u\"Ä\", u\"Ö\", u\"Ü\", u\"ß\", \"!\", \"#\", \"$\", '\"', \"?\", \"*\", \"/\", \"(\", \")\",\n",
" \"-\", \"+\", \"&\", \"'\", \";\", \":\", \",\", \".\", \"=\", \"@\", \"%\", \"<\", \">\", \"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\",\n",
" \"7\", \"8\", \"9\", \" \"]\n",
"\n",
" def __init__(self, name, coordinates):\n",
" \"\"\"creates the object out of name and coordinates as list [lat, lon]\"\"\"\n",
"\n",
" if type(name) != str:\n",
" raise TypeError(\"waypoint name is of wrong type\")\n",
" self.name = name.upper()\n",
" for c in self.name:\n",
" if c not in self.ALLOWED_SIGNS:\n",
" raise TypeError(\"GARMIN does not allow '{}' in a waypoint name.\".format(c))\n",
" self.shown_name = self.name # for waypoints not belonging to a geocache\n",
"\n",
" ownfunctions.validate_coordinates(coordinates) # throws an error if coordinates not valid\n",
" self.coordinates = coordinates\n",
" coord_str = ownfunctions.coords_decimal_to_minutes(self.coordinates)\n",
" self.coordinates_string = coord_str # string 'X XX°XX.XXX, X XXX°XX.XXX'\n",
" self.distance = None # initialize for later use\n",
"\n",
" def find_shown_name_and_distance(self, geocache):\n",
" \"\"\"calculates the shown name and the distance to the 'main coordinates'\n",
" if waypoint belongs to a geocache\"\"\"\n",
"\n",
" namelist = self.name.split()\n",
" if not (namelist[-1].startswith(\"(GC\") and namelist[-1].endswith(\")\")) or namelist[-1][1:-1] != geocache.gccode:\n",
" raise TypeError(\"This waypoint does not belong to the geocache.\")\n",
" self.shown_name = \" \".join(namelist[:-1])\n",
" self.distance = ownfunctions.calculate_distance(self.coordinates, geocache.coordinates)\n",
"\n",
" def info(self):\n",
" \"\"\"returns information about the waypoint\"\"\"\n",
" result = \" | {} | {}\".format(self.coordinates_string, self.shown_name)\n",
" if self.distance:\n",
" result += \" ({}km)\".format(round(self.distance, 1))\n",
" return result\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.045454545454545456,
0,
0,
0.008695652173913044,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010869565217391304,
0.2,
0.2,
0,
0,
0,
0,
0,
0,
0,
0,
0.1111111111111111,
0,
0.011904761904761904,
0.1111111111111111,
0,
0,
0.2,
0,
0,
0.1111111111111111,
0,
0,
0.1111111111111111,
0,
0,
0.1111111111111111,
0,
0,
0.1111111111111111,
0,
0,
0,
0.1111111111111111,
0,
0,
0,
0.2,
0,
0,
0.1111111111111111,
0,
0,
0.1111111111111111,
0,
0,
0.1111111111111111,
0,
0.020833333333333332,
0.1111111111111111,
0.043478260869565216,
0.00980392156862745,
0.1111111111111111,
0,
0,
0.1111111111111111,
0,
0,
0.1111111111111111,
0,
0,
0,
0.009900990099009901,
0.1111111111111111,
0.047619047619047616,
0.021739130434782608,
0.1111111111111111,
0,
0,
0.1111111111111111,
0,
0,
0,
0,
0,
0.2,
0.2,
0,
0,
0.01,
0,
0,
0.2,
0,
0,
0,
0.1111111111111111,
0.058823529411764705,
0,
0,
0.2,
0,
0.00980392156862745,
0.2,
0,
0,
0,
0,
0.010101010101010102,
0,
0.008620689655172414,
0,
0.008849557522123894,
0,
0.010638297872340425,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010309278350515464,
0.011494252873563218,
0,
0.01,
0,
0,
0,
0,
0,
0.1111111111111111,
0,
0,
0,
0,
0.008771929824561403,
0,
0,
0.009900990099009901,
0,
0,
0.008403361344537815,
0,
0,
0.00909090909090909,
0,
0,
0.00847457627118644,
0,
0,
0,
0,
0.00909090909090909,
0,
0,
0,
0,
0.011764705882352941,
0,
0.00909090909090909,
0,
0,
0.009259259259259259,
0,
0,
0.01,
0,
0.01020408163265306,
0.009708737864077669,
0,
0.012195121951219513,
0,
0.008849557522123894,
0,
0,
0,
0,
0,
0,
0.008849557522123894,
0,
0,
0,
0,
0,
0,
0,
0.01,
0,
0.009708737864077669,
0,
0,
0.008264462809917356,
0,
0,
0.008928571428571428,
0,
0,
0.008130081300813009,
0,
0,
0,
0,
0.008928571428571428,
0,
0,
0,
0,
0.011764705882352941,
0,
0.008928571428571428,
0,
0,
0.00909090909090909,
0,
0,
0.010309278350515464,
0.011363636363636364,
0,
0.01020408163265306,
0.009708737864077669,
0,
0.012195121951219513,
0,
0.00847457627118644,
0,
0,
0,
0,
0,
0.008695652173913044,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010101010101010102,
0,
0.009900990099009901,
0,
0,
0,
0,
0.009900990099009901,
0.009900990099009901,
0,
0,
0,
0.010101010101010102,
0,
0.009900990099009901,
0,
0,
0.008771929824561403,
0.008547008547008548,
0,
0,
0,
0,
0,
0.01020408163265306,
0,
0.01,
0,
0,
0,
0,
0,
0.010309278350515464,
0,
0,
0,
0,
0.00980392156862745,
0.010416666666666666,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011363636363636364,
0,
0,
0,
0,
0,
0.009523809523809525,
0,
0,
0,
0.00847457627118644,
0,
0.008333333333333333,
0,
0,
0,
0,
0,
0,
0,
0.008620689655172414,
0,
0.00847457627118644,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011111111111111112,
0,
0,
0.009708737864077669,
0,
0,
0,
0,
0.1111111111111111,
0,
0,
0.1111111111111111,
0,
0,
0.1111111111111111,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011111111111111112,
0.07692307692307693,
0,
0,
0,
0.01020408163265306,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.01,
0,
0,
0,
0,
0.04,
0.016666666666666666,
0.1111111111111111,
0,
0,
0,
0,
0,
0,
0,
0,
0.009259259259259259,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.01,
0,
0,
0,
0,
0.03571428571428571,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.00980392156862745,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010101010101010102,
0,
0.010869565217391304,
0,
0,
0,
0,
0,
0.008264462809917356,
0.008333333333333333,
0.008264462809917356,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010869565217391304,
0.012345679012345678,
0,
0.010101010101010102,
0,
0,
0.012195121951219513,
0,
0,
0,
0,
0,
0,
0,
0.008264462809917356,
0,
0,
0.010416666666666666,
0,
0,
0,
0.011627906976744186,
0,
0,
0
] | 518 | 0.010787 | false |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This experiment was created using PsychoPy3 Experiment Builder (v2020.2.0b1),
on Wed Aug 5 12:32:22 2020
If you publish work using this script the most relevant publication is:
Peirce J, Gray JR, Simpson S, MacAskill M, Höchenberger R, Sogo H, Kastman E, Lindeløv JK. (2019)
PsychoPy2: Experiments in behavior made easy Behav Res 51: 195.
https://doi.org/10.3758/s13428-018-01193-y
"""
from __future__ import absolute_import, division
from psychopy import locale_setup
from psychopy import prefs
from psychopy import sound, gui, visual, core, data, event, logging, clock
from psychopy.constants import (NOT_STARTED, STARTED, PLAYING, PAUSED,
STOPPED, FINISHED, PRESSED, RELEASED, FOREVER)
import numpy as np # whole numpy lib is available, prepend 'np.'
from numpy import (sin, cos, tan, log, log10, pi, average,
sqrt, std, deg2rad, rad2deg, linspace, asarray)
from numpy.random import random, randint, normal, shuffle
import os # handy system and path functions
import sys # to get file system encoding
from psychopy.hardware import keyboard
# Ensure that relative paths start from the same directory as this script
_thisDir = os.path.dirname(os.path.abspath(__file__))
os.chdir(_thisDir)
# Store info about the experiment session
psychopyVersion = '2020.2.0b1'
expName = 'untitled.py'
expInfo = {'participant': '', 'session': '001'}
dlg = gui.DlgFromDict(dictionary=expInfo, sort_keys=False, title=expName)
if dlg.OK == False:
core.quit() # user pressed cancel
expInfo['date'] = data.getDateStr() # add a simple timestamp
expInfo['expName'] = expName
expInfo['psychopyVersion'] = psychopyVersion
# Data file name stem = absolute path + name; later add .psyexp, .csv, .log, etc
filename = _thisDir + os.sep + u'data/%s_%s_%s' % (expInfo['participant'], expName, expInfo['date'])
# An ExperimentHandler isn't essential but helps with data saving
thisExp = data.ExperimentHandler(name=expName, version='',
extraInfo=expInfo, runtimeInfo=None,
originPath='untitled.py',
savePickle=True, saveWideText=True,
dataFileName=filename)
# save a log file for detail verbose info
logFile = logging.LogFile(filename+'.log', level=logging.EXP)
logging.console.setLevel(logging.WARNING) # this outputs to the screen, not a file
endExpNow = False # flag for 'escape' or other condition => quit the exp
frameTolerance = 0.001 # how close to onset before 'same' frame
# Start Code - component code to be run before the window creation
# Setup the Window
win = visual.Window(
size=(1024, 768), fullscr=True, screen=0,
winType='pyglet', allowGUI=False, allowStencil=False,
monitor='testMonitor', color=[0,0,0], colorSpace='rgb',
blendMode='avg', useFBO=True,
units='height')
# store frame rate of monitor if we can measure it
expInfo['frameRate'] = win.getActualFrameRate()
if expInfo['frameRate'] != None:
frameDur = 1.0 / round(expInfo['frameRate'])
else:
frameDur = 1.0 / 60.0 # could not measure, so guess
# create a default keyboard (e.g. to check for escape)
defaultKeyboard = keyboard.Keyboard()
# Initialize components for Routine "trial"
trialClock = core.Clock()
# Create some handy timers
globalClock = core.Clock() # to track the time since experiment started
routineTimer = core.CountdownTimer() # to track time remaining of each (non-slip) routine
# ------Prepare to start Routine "trial"-------
continueRoutine = True
# update component parameters for each repeat
# keep track of which components have finished
trialComponents = []
for thisComponent in trialComponents:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
trialClock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
# -------Run Routine "trial"-------
while continueRoutine:
# get current time
t = trialClock.getTime()
tThisFlip = win.getFutureFlipTime(clock=trialClock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# check for quit (typically the Esc key)
if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]):
core.quit()
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in trialComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "trial"-------
for thisComponent in trialComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# the Routine "trial" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
# Flip one final time so any remaining win.callOnFlip()
# and win.timeOnFlip() tasks get executed before quitting
win.flip()
# these shouldn't be strictly necessary (should auto-save)
thisExp.saveAsWideText(filename+'.csv', delim='auto')
thisExp.saveAsPickle(filename)
logging.flush()
# make sure everything is closed down
thisExp.abort() # or data files will save again on exit
win.close()
core.quit()
| [
"#!/usr/bin/env python\n",
"# -*- coding: utf-8 -*-\n",
"\"\"\"\n",
"This experiment was created using PsychoPy3 Experiment Builder (v2020.2.0b1),\n",
" on Wed Aug 5 12:32:22 2020\n",
"If you publish work using this script the most relevant publication is:\n",
"\n",
" Peirce J, Gray JR, Simpson S, MacAskill M, Höchenberger R, Sogo H, Kastman E, Lindeløv JK. (2019) \n",
" PsychoPy2: Experiments in behavior made easy Behav Res 51: 195. \n",
" https://doi.org/10.3758/s13428-018-01193-y\n",
"\n",
"\"\"\"\n",
"\n",
"from __future__ import absolute_import, division\n",
"\n",
"from psychopy import locale_setup\n",
"from psychopy import prefs\n",
"from psychopy import sound, gui, visual, core, data, event, logging, clock\n",
"from psychopy.constants import (NOT_STARTED, STARTED, PLAYING, PAUSED,\n",
" STOPPED, FINISHED, PRESSED, RELEASED, FOREVER)\n",
"\n",
"import numpy as np # whole numpy lib is available, prepend 'np.'\n",
"from numpy import (sin, cos, tan, log, log10, pi, average,\n",
" sqrt, std, deg2rad, rad2deg, linspace, asarray)\n",
"from numpy.random import random, randint, normal, shuffle\n",
"import os # handy system and path functions\n",
"import sys # to get file system encoding\n",
"\n",
"from psychopy.hardware import keyboard\n",
"\n",
"\n",
"\n",
"# Ensure that relative paths start from the same directory as this script\n",
"_thisDir = os.path.dirname(os.path.abspath(__file__))\n",
"os.chdir(_thisDir)\n",
"\n",
"# Store info about the experiment session\n",
"psychopyVersion = '2020.2.0b1'\n",
"expName = 'untitled.py'\n",
"expInfo = {'participant': '', 'session': '001'}\n",
"dlg = gui.DlgFromDict(dictionary=expInfo, sort_keys=False, title=expName)\n",
"if dlg.OK == False:\n",
" core.quit() # user pressed cancel\n",
"expInfo['date'] = data.getDateStr() # add a simple timestamp\n",
"expInfo['expName'] = expName\n",
"expInfo['psychopyVersion'] = psychopyVersion\n",
"\n",
"# Data file name stem = absolute path + name; later add .psyexp, .csv, .log, etc\n",
"filename = _thisDir + os.sep + u'data/%s_%s_%s' % (expInfo['participant'], expName, expInfo['date'])\n",
"\n",
"# An ExperimentHandler isn't essential but helps with data saving\n",
"thisExp = data.ExperimentHandler(name=expName, version='',\n",
" extraInfo=expInfo, runtimeInfo=None,\n",
" originPath='untitled.py',\n",
" savePickle=True, saveWideText=True,\n",
" dataFileName=filename)\n",
"# save a log file for detail verbose info\n",
"logFile = logging.LogFile(filename+'.log', level=logging.EXP)\n",
"logging.console.setLevel(logging.WARNING) # this outputs to the screen, not a file\n",
"\n",
"endExpNow = False # flag for 'escape' or other condition => quit the exp\n",
"frameTolerance = 0.001 # how close to onset before 'same' frame\n",
"\n",
"# Start Code - component code to be run before the window creation\n",
"\n",
"# Setup the Window\n",
"win = visual.Window(\n",
" size=(1024, 768), fullscr=True, screen=0, \n",
" winType='pyglet', allowGUI=False, allowStencil=False,\n",
" monitor='testMonitor', color=[0,0,0], colorSpace='rgb',\n",
" blendMode='avg', useFBO=True, \n",
" units='height')\n",
"# store frame rate of monitor if we can measure it\n",
"expInfo['frameRate'] = win.getActualFrameRate()\n",
"if expInfo['frameRate'] != None:\n",
" frameDur = 1.0 / round(expInfo['frameRate'])\n",
"else:\n",
" frameDur = 1.0 / 60.0 # could not measure, so guess\n",
"\n",
"# create a default keyboard (e.g. to check for escape)\n",
"defaultKeyboard = keyboard.Keyboard()\n",
"\n",
"# Initialize components for Routine \"trial\"\n",
"trialClock = core.Clock()\n",
"\n",
"# Create some handy timers\n",
"globalClock = core.Clock() # to track the time since experiment started\n",
"routineTimer = core.CountdownTimer() # to track time remaining of each (non-slip) routine \n",
"\n",
"# ------Prepare to start Routine \"trial\"-------\n",
"continueRoutine = True\n",
"# update component parameters for each repeat\n",
"# keep track of which components have finished\n",
"trialComponents = []\n",
"for thisComponent in trialComponents:\n",
" thisComponent.tStart = None\n",
" thisComponent.tStop = None\n",
" thisComponent.tStartRefresh = None\n",
" thisComponent.tStopRefresh = None\n",
" if hasattr(thisComponent, 'status'):\n",
" thisComponent.status = NOT_STARTED\n",
"# reset timers\n",
"t = 0\n",
"_timeToFirstFrame = win.getFutureFlipTime(clock=\"now\")\n",
"trialClock.reset(-_timeToFirstFrame) # t0 is time of first possible flip\n",
"frameN = -1\n",
"\n",
"# -------Run Routine \"trial\"-------\n",
"while continueRoutine:\n",
" # get current time\n",
" t = trialClock.getTime()\n",
" tThisFlip = win.getFutureFlipTime(clock=trialClock)\n",
" tThisFlipGlobal = win.getFutureFlipTime(clock=None)\n",
" frameN = frameN + 1 # number of completed frames (so 0 is the first frame)\n",
" # update/draw components on each frame\n",
" \n",
" # check for quit (typically the Esc key)\n",
" if endExpNow or defaultKeyboard.getKeys(keyList=[\"escape\"]):\n",
" core.quit()\n",
" \n",
" # check if all components have finished\n",
" if not continueRoutine: # a component has requested a forced-end of Routine\n",
" break\n",
" continueRoutine = False # will revert to True if at least one component still running\n",
" for thisComponent in trialComponents:\n",
" if hasattr(thisComponent, \"status\") and thisComponent.status != FINISHED:\n",
" continueRoutine = True\n",
" break # at least one component has not yet finished\n",
" \n",
" # refresh the screen\n",
" if continueRoutine: # don't flip if this routine is over or we'll get a blank screen\n",
" win.flip()\n",
"\n",
"# -------Ending Routine \"trial\"-------\n",
"for thisComponent in trialComponents:\n",
" if hasattr(thisComponent, \"setAutoDraw\"):\n",
" thisComponent.setAutoDraw(False)\n",
"# the Routine \"trial\" was not non-slip safe, so reset the non-slip timer\n",
"routineTimer.reset()\n",
"\n",
"# Flip one final time so any remaining win.callOnFlip() \n",
"# and win.timeOnFlip() tasks get executed before quitting\n",
"win.flip()\n",
"\n",
"# these shouldn't be strictly necessary (should auto-save)\n",
"thisExp.saveAsWideText(filename+'.csv', delim='auto')\n",
"thisExp.saveAsPickle(filename)\n",
"logging.flush()\n",
"# make sure everything is closed down\n",
"thisExp.abort() # or data files will save again on exit\n",
"win.close()\n",
"core.quit()\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0.019417475728155338,
0.0136986301369863,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.013513513513513514,
0,
0,
0,
0,
0,
0,
0,
0,
0.05,
0,
0,
0,
0,
0,
0.012345679012345678,
0.009900990099009901,
0,
0,
0,
0.024390243902439025,
0.03333333333333333,
0.025,
0.037037037037037035,
0,
0,
0.011904761904761904,
0,
0,
0,
0,
0,
0,
0,
0,
0.02127659574468085,
0,
0.03333333333333333,
0.02857142857142857,
0,
0,
0,
0.030303030303030304,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.021739130434782608,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.2,
0,
0,
0,
0.2,
0,
0.012345679012345678,
0,
0.01098901098901099,
0,
0.012195121951219513,
0,
0,
0.2,
0,
0.011111111111111112,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.017543859649122806,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 152 | 0.006908 | false |
from PyQt5 import QtGui, QtCore, QtWidgets
from nupic_studio.ui import ICON
class SimulationLegendWindow(QtWidgets.QDialog):
def __init__(self):
"""
Initializes a new instance of this class.
"""
QtWidgets.QWidget.__init__(self)
self.initUI()
def initUI(self):
# button_inactive_element
self.button_inactive_element = QtWidgets.QPushButton()
self.button_inactive_element.setStyleSheet("background-color: rgb(190, 190, 190)")
self.button_inactive_element.setEnabled(False)
# label_inactive_element
self.label_inactive_element = QtWidgets.QLabel()
self.label_inactive_element.setText("Inactive")
# button_falsely_predicted_element
self.button_falsely_predicted_element = QtWidgets.QPushButton()
self.button_falsely_predicted_element.setStyleSheet("background-color: rgb(255, 0, 0)")
self.button_falsely_predicted_element.setEnabled(False)
# label_falsely_predicted_element
self.label_falsely_predicted_element = QtWidgets.QLabel()
self.label_falsely_predicted_element.setText("Falsely Predicted")
# button_predicted_element
self.button_predicted_element = QtWidgets.QPushButton()
self.button_predicted_element.setStyleSheet("background-color: rgb(255, 215, 80)")
self.button_predicted_element.setEnabled(False)
# label_predicted_element
self.label_predicted_element = QtWidgets.QLabel()
self.label_predicted_element.setText("Predicted")
# button_active_element
self.button_active_element = QtWidgets.QPushButton()
self.button_active_element.setStyleSheet("background-color: rgb(50, 205, 50)")
self.button_active_element.setEnabled(False)
# label_active_element
self.label_active_element = QtWidgets.QLabel()
self.label_active_element.setText("Active/Connected")
# button_learning_element
self.button_learning_element = QtWidgets.QPushButton()
self.button_learning_element.setStyleSheet("background-color: rgb(125, 255, 0)")
self.button_learning_element.setEnabled(False)
# label_learning_element
self.label_learning_element = QtWidgets.QLabel()
self.label_learning_element.setText("Learning")
# button_selected_element
self.button_selected_element = QtWidgets.QPushButton()
self.button_selected_element.setStyleSheet("background-color: rgb(0, 0, 255)")
self.button_selected_element.setEnabled(False)
# label_selected_element
self.label_selected_element = QtWidgets.QLabel()
self.label_selected_element.setText("Selected")
# layout
layout = QtWidgets.QGridLayout()
layout.addWidget(self.button_inactive_element, 0, 0)
layout.addWidget(self.label_inactive_element, 0, 1)
layout.addWidget(self.button_falsely_predicted_element, 1, 0)
layout.addWidget(self.label_falsely_predicted_element, 1, 1)
layout.addWidget(self.button_predicted_element, 2, 0)
layout.addWidget(self.label_predicted_element, 2, 1)
layout.addWidget(self.button_active_element, 3, 0)
layout.addWidget(self.label_active_element, 3, 1)
layout.addWidget(self.button_learning_element, 4, 0)
layout.addWidget(self.label_learning_element, 4, 1)
layout.addWidget(self.button_selected_element, 5, 0)
layout.addWidget(self.label_selected_element, 5, 1)
layout.setRowStretch(1, 100)
# self
self.setLayout(layout)
self.setWindowTitle("Simulation Legend")
self.setWindowIcon(ICON)
self.setMinimumWidth(100)
self.setMinimumHeight(150)
| [
"from PyQt5 import QtGui, QtCore, QtWidgets\n",
"from nupic_studio.ui import ICON\n",
"\n",
"\n",
"class SimulationLegendWindow(QtWidgets.QDialog):\n",
"\n",
" def __init__(self):\n",
" \"\"\"\n",
" Initializes a new instance of this class.\n",
" \"\"\"\n",
" QtWidgets.QWidget.__init__(self)\n",
" self.initUI()\n",
"\n",
" def initUI(self):\n",
"\n",
" # button_inactive_element\n",
" self.button_inactive_element = QtWidgets.QPushButton()\n",
" self.button_inactive_element.setStyleSheet(\"background-color: rgb(190, 190, 190)\")\n",
" self.button_inactive_element.setEnabled(False)\n",
"\n",
" # label_inactive_element\n",
" self.label_inactive_element = QtWidgets.QLabel()\n",
" self.label_inactive_element.setText(\"Inactive\")\n",
"\n",
" # button_falsely_predicted_element\n",
" self.button_falsely_predicted_element = QtWidgets.QPushButton()\n",
" self.button_falsely_predicted_element.setStyleSheet(\"background-color: rgb(255, 0, 0)\")\n",
" self.button_falsely_predicted_element.setEnabled(False)\n",
"\n",
" # label_falsely_predicted_element\n",
" self.label_falsely_predicted_element = QtWidgets.QLabel()\n",
" self.label_falsely_predicted_element.setText(\"Falsely Predicted\")\n",
"\n",
" # button_predicted_element\n",
" self.button_predicted_element = QtWidgets.QPushButton()\n",
" self.button_predicted_element.setStyleSheet(\"background-color: rgb(255, 215, 80)\")\n",
" self.button_predicted_element.setEnabled(False)\n",
"\n",
" # label_predicted_element\n",
" self.label_predicted_element = QtWidgets.QLabel()\n",
" self.label_predicted_element.setText(\"Predicted\")\n",
"\n",
" # button_active_element\n",
" self.button_active_element = QtWidgets.QPushButton()\n",
" self.button_active_element.setStyleSheet(\"background-color: rgb(50, 205, 50)\")\n",
" self.button_active_element.setEnabled(False)\n",
"\n",
" # label_active_element\n",
" self.label_active_element = QtWidgets.QLabel()\n",
" self.label_active_element.setText(\"Active/Connected\")\n",
"\n",
" # button_learning_element\n",
" self.button_learning_element = QtWidgets.QPushButton()\n",
" self.button_learning_element.setStyleSheet(\"background-color: rgb(125, 255, 0)\")\n",
" self.button_learning_element.setEnabled(False)\n",
"\n",
" # label_learning_element\n",
" self.label_learning_element = QtWidgets.QLabel()\n",
" self.label_learning_element.setText(\"Learning\")\n",
"\n",
" # button_selected_element\n",
" self.button_selected_element = QtWidgets.QPushButton()\n",
" self.button_selected_element.setStyleSheet(\"background-color: rgb(0, 0, 255)\")\n",
" self.button_selected_element.setEnabled(False)\n",
"\n",
" # label_selected_element\n",
" self.label_selected_element = QtWidgets.QLabel()\n",
" self.label_selected_element.setText(\"Selected\")\n",
"\n",
" # layout\n",
" layout = QtWidgets.QGridLayout()\n",
" layout.addWidget(self.button_inactive_element, 0, 0)\n",
" layout.addWidget(self.label_inactive_element, 0, 1)\n",
" layout.addWidget(self.button_falsely_predicted_element, 1, 0)\n",
" layout.addWidget(self.label_falsely_predicted_element, 1, 1)\n",
" layout.addWidget(self.button_predicted_element, 2, 0)\n",
" layout.addWidget(self.label_predicted_element, 2, 1)\n",
" layout.addWidget(self.button_active_element, 3, 0)\n",
" layout.addWidget(self.label_active_element, 3, 1)\n",
" layout.addWidget(self.button_learning_element, 4, 0)\n",
" layout.addWidget(self.label_learning_element, 4, 1)\n",
" layout.addWidget(self.button_selected_element, 5, 0)\n",
" layout.addWidget(self.label_selected_element, 5, 1)\n",
" layout.setRowStretch(1, 100)\n",
"\n",
" # self\n",
" self.setLayout(layout)\n",
" self.setWindowTitle(\"Simulation Legend\")\n",
" self.setWindowIcon(ICON)\n",
" self.setMinimumWidth(100)\n",
" self.setMinimumHeight(150)\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.01098901098901099,
0,
0,
0,
0,
0,
0,
0,
0,
0.010416666666666666,
0,
0,
0,
0,
0,
0,
0,
0,
0.01098901098901099,
0,
0,
0,
0,
0,
0,
0,
0,
0.011494252873563218,
0,
0,
0,
0,
0,
0,
0,
0,
0.011235955056179775,
0,
0,
0,
0,
0,
0,
0,
0,
0.011494252873563218,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 91 | 0.000732 | false |
import tiebaTitle as TT
import urllib
import os
import sys
import time
import threading
import pickle
import datetime
import MailService
#该脚本用来抓取我们贴吧帖子的标题
begURL = 'http://tieba.baidu.com/f?'
#主程序逻辑
TT.setupfiles()
os.system('cls')
print('>>>>>This script can be used to get data from Tieba\n>>>>>by Kanch kanchisme@gmail.com')
isize = os.path.getsize('result.txt')
if isize > 10:
f = open('result_add','rb')
xs = pickle.load(f)
f.close()
print('\t>>>Dataset size:'+str(isize)+' bytes,with'+str(xs['sum'])+'pieces of data,created on'+str(xs['time']))
opt = input('\r\n>>>>>Enter the name of Tieba you\'d like to retrive?If NO,[LiYi] defaulty(Y/N):____\b\b')
if opt == 'Y':
tieba_name = input('>>>>>Input the name of Tieba where data to retrive:______________________\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b')
print('>>>>>Target Tieba [' + tieba_name + ']')
else:
tieba_name = '李毅'
print('>>>>>no specific TIeba settled,retrive data from [LiYi] defaulty')
KWD = urllib.parse.urlencode({'kw':tieba_name})
begURL = begURL + KWD + '&ie=utf-8&pn='
TT.max_page = input('>>>>>How many pages you\'d like to retrive:______\b\b\b\b\b')
GTC = input('>>>>>how many threads fit you the best:_____\b\b\b')
TT.GV_THEAD_COUNT = int(GTC)
gmode = input('>>>>>Mode Selection:1.Default 2.Deep (in Deep mode,script will get author and post data besides post title)\nEnter mode::_______\b\b\b\b')
TT.GV_MODE = int(gmode)
mstr = "============================================================\r\nRESULT\r\n============================================================="
createdtime = datetime.datetime.now()
createdtime.strftime('%Y-%m-%d %H:%M:%S')
#======================================================================================
#=================================主程序逻辑=======================================
#我们用一个线程下载网页,一个线程处理下载后的数据。
#======================================================================================
time1 = time.time()
#下面是多线程方案
MAX_PAGE = int(TT.max_page)
#创建线程
t = []
x = 0
deltaX = MAX_PAGE / TT.GV_THEAD_COUNT
BEG = 0
END = deltaX
while x < TT.GV_THEAD_COUNT:
tn = threading.Thread(target=TT.downloadPage,args=(int(END),x+1,begURL,int(BEG),))
t.append(tn)
x += 1
BEG += deltaX
END += deltaX
#启动线程
for item in t:
item.setDaemon(True)
item.start()
#循环处理数据
sum,mstr = TT.pocessDataList(TT.GV_THEAD_COUNT,begURL)
#===================================全部处理完毕,储存至文件======================================
now = datetime.datetime.now()
now.strftime('%Y-%m-%d %H:%M:%S')
last_data_source = {'sum':sum,'time':now}
TT.savetofile(mstr,'result.txt')
f = open('result_add','wb')
pickle.dump(last_data_source, f,2)
f.close()
time2 = time.time()
tc = time2 - time1
print('>>>>>Script pocess finished!Total time cost:',str(tc),'seconds\n>>>>>with[',sum,']pieces of data in all\n>>>>>result have been save to','result.txt')
Title = "Download Success! Finised on " + str(now) + '.'
line1 = "Tieba job created on " + str(createdtime) + " now has been finised!\r\n=========================\r\nSummary\r\n\r\n"
line2 = "\r\nJob Created on: \t"+str(createdtime)+'\r\nJob finished on: \t'+str(now) +"\r\nPieces of data retrived: " + str(sum) +"\r\nTotal time cost: \t" + str(tc) + " seconds"
line3 = "\r\n\r\n\r\n This mail is send by Kanch's PythonBot @ 216.45.55.153\r\n=========================\r\n"
Content = line1 + line2 + line3
#print(Title,'\r\n',Content)
MailService.SendMail('1075900121@qq.com',Title,Content) | [
"import tiebaTitle as TT\n",
"import urllib\n",
"import os\n",
"import sys\n",
"import time\n",
"import threading\n",
"import pickle\n",
"import datetime\n",
"import MailService\n",
"\n",
"#该脚本用来抓取我们贴吧帖子的标题\n",
"begURL = 'http://tieba.baidu.com/f?'\n",
"#主程序逻辑\n",
"TT.setupfiles()\n",
"os.system('cls')\n",
"print('>>>>>This script can be used to get data from Tieba\\n>>>>>by Kanch kanchisme@gmail.com')\n",
"isize = os.path.getsize('result.txt')\n",
"if isize > 10:\n",
" f = open('result_add','rb')\n",
" xs = pickle.load(f)\n",
" f.close()\n",
" print('\\t>>>Dataset size:'+str(isize)+' bytes,with'+str(xs['sum'])+'pieces of data,created on'+str(xs['time']))\n",
"opt = input('\\r\\n>>>>>Enter the name of Tieba you\\'d like to retrive?If NO,[LiYi] defaulty(Y/N):____\\b\\b')\n",
"if opt == 'Y':\n",
" tieba_name = input('>>>>>Input the name of Tieba where data to retrive:______________________\\b\\b\\b\\b\\b\\b\\b\\b\\b\\b\\b\\b\\b\\b\\b\\b\\b\\b\\b\\b\\b')\n",
" print('>>>>>Target Tieba [' + tieba_name + ']')\n",
"else:\n",
" tieba_name = '李毅'\n",
" print('>>>>>no specific TIeba settled,retrive data from [LiYi] defaulty')\n",
"KWD = urllib.parse.urlencode({'kw':tieba_name})\n",
"begURL = begURL + KWD + '&ie=utf-8&pn='\n",
"TT.max_page = input('>>>>>How many pages you\\'d like to retrive:______\\b\\b\\b\\b\\b')\n",
"\n",
"GTC = input('>>>>>how many threads fit you the best:_____\\b\\b\\b')\n",
"TT.GV_THEAD_COUNT = int(GTC)\n",
"\n",
"gmode = input('>>>>>Mode Selection:1.Default 2.Deep (in Deep mode,script will get author and post data besides post title)\\nEnter mode::_______\\b\\b\\b\\b')\n",
"TT.GV_MODE = int(gmode)\n",
"\n",
"mstr = \"============================================================\\r\\nRESULT\\r\\n=============================================================\"\n",
"createdtime = datetime.datetime.now()\n",
"createdtime.strftime('%Y-%m-%d %H:%M:%S') \n",
"#======================================================================================\n",
"#=================================主程序逻辑=======================================\n",
"#我们用一个线程下载网页,一个线程处理下载后的数据。\n",
"#======================================================================================\n",
"time1 = time.time()\n",
"#下面是多线程方案\n",
"MAX_PAGE = int(TT.max_page)\n",
"#创建线程\n",
"t = [] \n",
"x = 0\n",
"deltaX = MAX_PAGE / TT.GV_THEAD_COUNT\n",
"BEG = 0\n",
"END = deltaX\n",
"while x < TT.GV_THEAD_COUNT:\n",
" tn = threading.Thread(target=TT.downloadPage,args=(int(END),x+1,begURL,int(BEG),))\n",
" t.append(tn)\n",
" x += 1\n",
" BEG += deltaX\n",
" END += deltaX\n",
"\n",
"\n",
"#启动线程\n",
"for item in t:\n",
" item.setDaemon(True)\n",
" item.start()\n",
"#循环处理数据\n",
"sum,mstr = TT.pocessDataList(TT.GV_THEAD_COUNT,begURL)\n",
"#===================================全部处理完毕,储存至文件======================================\n",
"now = datetime.datetime.now()\n",
"now.strftime('%Y-%m-%d %H:%M:%S') \n",
"last_data_source = {'sum':sum,'time':now}\n",
"\n",
"TT.savetofile(mstr,'result.txt')\n",
"f = open('result_add','wb')\n",
"pickle.dump(last_data_source, f,2)\n",
"f.close()\n",
"time2 = time.time()\n",
"tc = time2 - time1\n",
"print('>>>>>Script pocess finished!Total time cost:',str(tc),'seconds\\n>>>>>with[',sum,']pieces of data in all\\n>>>>>result have been save to','result.txt')\n",
"\n",
"\n",
"Title = \"Download Success! Finised on \" + str(now) + '.'\n",
"line1 = \"Tieba job created on \" + str(createdtime) + \" now has been finised!\\r\\n=========================\\r\\nSummary\\r\\n\\r\\n\"\n",
"line2 = \"\\r\\nJob Created on: \\t\"+str(createdtime)+'\\r\\nJob finished on: \\t'+str(now) +\"\\r\\nPieces of data retrived: \" + str(sum) +\"\\r\\nTotal time cost: \\t\" + str(tc) + \" seconds\"\n",
"line3 = \"\\r\\n\\r\\n\\r\\n This mail is send by Kanch's PythonBot @ 216.45.55.153\\r\\n=========================\\r\\n\"\n",
"Content = line1 + line2 + line3\n",
"#print(Title,'\\r\\n',Content)\n",
"MailService.SendMail('1075900121@qq.com',Title,Content)"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.05555555555555555,
0,
0.14285714285714285,
0,
0,
0.010416666666666666,
0,
0,
0.03125,
0,
0,
0.008620689655172414,
0.009345794392523364,
0,
0.007042253521126761,
0,
0,
0,
0,
0.020833333333333332,
0,
0.012048192771084338,
0,
0,
0,
0,
0.006493506493506494,
0,
0,
0.006896551724137931,
0,
0.022727272727272728,
0.022727272727272728,
0.012658227848101266,
0.037037037037037035,
0.022727272727272728,
0,
0.1,
0,
0.16666666666666666,
0.1,
0,
0,
0,
0,
0,
0.05747126436781609,
0,
0,
0,
0,
0,
0,
0.16666666666666666,
0,
0,
0,
0.125,
0.03636363636363636,
0.022988505747126436,
0,
0.027777777777777776,
0.07142857142857142,
0,
0.030303030303030304,
0.03571428571428571,
0.02857142857142857,
0,
0,
0,
0.03821656050955414,
0,
0,
0,
0.007936507936507936,
0.016574585635359115,
0.009009009009009009,
0,
0.034482758620689655,
0.05454545454545454
] | 90 | 0.017322 | false |
#! /usr/bin/python
from sgui_utils import *
from sgui_event import EventHook
class Widget(object):
def __init__(self):
# Fields
self._context = None # context of a widget (ideally layout or sgui class)
self._text = '' # text which may be printed on a widget (unnesessary to use)
self._width = 0
self._height = 0
self._x = 0
self._y = 0
self._align = Align.LEFT # align widget in container
self._contentAlign = Align.TOP_LEFT # align content in widget (do we really need it?)
self._margin = Margin()
self._visible = True # is widget visible or not
self._focused = False # is widget focused or not
self._border = Border.NOBORDER # border style
self._backColor = Back.BLUE # background color
self._foreColor = Fore.WHITE # foreground color
self._backAltColor = Back.CYAN # alternate background color (when focused)
self._foreAltColor = Fore.BLACK # alternate foreground color (when focused)
# Events
self.onClick = EventHook()
self.onFocus = EventHook()
self.onFocusLost = EventHook()
self.onKeyPress = EventHook()
self.onResize = EventHook()
self.onRender = EventHook()
def setContext(self, context):
self._context = context
def getContext(self):
return self._context
def setText(self, text):
self._text = text
def getText(self):
return self._text
def setWidth(self, width):
self._width = width
def getWidth(self):
return self._width
def setHeight(self, height):
self._height = height
def getHeight(self):
if self._border == Border.NOBORDER:
return self._height
else:
return self._height + 2
def setX(self, x):
self._x = x
def getX(self):
return self._x
def setY(self, y):
self._y = y
def getY(self):
return self._y
def setPosition(self, y, x):
if x > 0 and y > 0:
self._y = y
self._x = x
def setSize(self, height, width):
if width >= 0 and height >= 0:
self._width = width
self._height = height
self.render()
self.onResize.fire()
def setAlign(self, align):
self._align = align
def getAlign(self):
return self._align
def setMargin(self, top, right, bottom, left):
if top < 0 and right < 0 and bottom < 0 and left < 0: return None
self._margin.top = top
self._margin.right = right
self._margin.bottom = bottom
self._margin.left = left
def setVisible(self, visible):
self._visible = visible
def getVisible(self):
return self._visible
def setFocused(self, focused):
if (self._focused == False and focused == True) or (self._focused == True and focused == False):
self._focused = focused
# replace back- and foreground colors by alternative ones
tempBackColor = self._backColor
self._backColor = self._backAltColor
self._backAltColor = tempBackColor
tempForeColor = self._foreColor
self._foreColor = self._foreAltColor
self._foreAltColor = tempForeColor
self.onFocus.fire(self)
self.render()
def getFocused(self):
return self._focused
def setBorder(self, border):
"""if self._border == Border.NOBORDER and border != Border.NOBORDER:
self._width += 2
self._height += 2
elif self._border != Border.NOBORDER and border == Border.NOBORDER:
self._width -= 2
self._height -= 2"""
self._border = border
self.render()
def getBorder(self):
return self._border
def setBackColor(self, backColor):
self._backColor = backColor
def getBackColor(self):
return self._backColor
def setForeColor(self, foreColor):
self._foreColor = foreColor
def getForeColor(self):
return self._foreColor
def setBackAltColor(self, backAltColor):
self._backAltColor = backAltColor
def getBackAltColor(self):
return self._backAltColor
def setForeAltColor(self, foreAltColor):
self._foreAltColor = foreAltColor
def getForeAltColor(self):
return self._foreAltColor
def click(self):
self.onClick.fire(self)
def keyPress(self, code):
self.onKeyPress.fire(self, code)
def render(self):
renderCommands = []
renderCommands.append(self._backColor + self._foreColor)
if self._border == Border.NOBORDER:
for i in range(self._height):
renderCommands.append(Utils.getMoveCursorCode(self._y + i, self._x) + " " * self._width + Utils.NewLine)
else:
borderX = self._x - 1
borderY = self._y - 1
borderWidth = self._width + 2
borderHeight = self._height + 2
renderCommands.append(Utils.getMoveCursorCode(borderY, borderX) + self._border['LEFT_TOP'] + self._border['HORIZONTAL'] * (borderWidth - 2) + self._border['RIGHT_TOP'] + Utils.NewLine)
for i in range(borderHeight - 2):
renderCommands.append(Utils.getMoveCursorCode(borderY + i + 1, borderX) + self._border['VERTICAL'] + " " * (borderWidth - 2) + self._border['VERTICAL'] + Utils.NewLine)
renderCommands.append(Utils.getMoveCursorCode(borderY + borderHeight - 1, borderX) + self._border['LEFT_BOTTOM'] + self._border['HORIZONTAL'] * (borderWidth - 2) + self._border['RIGHT_BOTTOM'])
Utils.render(renderCommands) | [
"#! /usr/bin/python\n",
"\n",
"from sgui_utils import *\n",
"from sgui_event import EventHook\n",
"\n",
"class Widget(object):\n",
"\n",
"\tdef __init__(self):\n",
"\t\t\n",
"\t\t# Fields\n",
"\t\tself._context = None\t\t\t\t# context of a widget (ideally layout or sgui class)\n",
"\t\tself._text = ''\t\t\t\t\t\t# text which may be printed on a widget (unnesessary to use)\n",
"\t\tself._width = 0\n",
"\t\tself._height = 0\n",
"\t\tself._x = 0\n",
"\t\tself._y = 0\n",
"\t\tself._align = Align.LEFT\t\t\t# align widget in container\n",
"\t\tself._contentAlign = Align.TOP_LEFT\t# align content in widget (do we really need it?)\n",
"\t\tself._margin = Margin()\n",
"\t\tself._visible = True\t\t\t\t# is widget visible or not\n",
"\t\tself._focused = False\t\t\t\t# is widget focused or not\n",
"\t\tself._border = Border.NOBORDER\t\t# border style\n",
"\t\tself._backColor = Back.BLUE\t\t\t# background color\n",
"\t\tself._foreColor = Fore.WHITE\t\t# foreground color\n",
"\t\tself._backAltColor = Back.CYAN\t\t# alternate background color (when focused)\n",
"\t\tself._foreAltColor = Fore.BLACK\t\t# alternate foreground color (when focused)\n",
"\n",
"\t\t# Events\n",
"\t\tself.onClick = EventHook()\n",
"\t\tself.onFocus = EventHook()\n",
"\t\tself.onFocusLost = EventHook()\n",
"\t\tself.onKeyPress = EventHook()\n",
"\t\tself.onResize = EventHook()\n",
"\t\tself.onRender = EventHook()\n",
"\t\n",
"\tdef setContext(self, context):\n",
"\t\tself._context = context\n",
"\t\t\n",
"\tdef getContext(self):\n",
"\t\treturn self._context\n",
"\t\t\n",
"\tdef setText(self, text):\n",
"\t\tself._text = text\n",
"\t\t\n",
"\tdef getText(self):\n",
"\t\treturn self._text\n",
"\t\t\n",
"\tdef setWidth(self, width):\n",
"\t\tself._width = width\n",
"\t\n",
"\tdef getWidth(self):\n",
"\t\treturn self._width\n",
"\t\t\n",
"\tdef setHeight(self, height):\n",
"\t\tself._height = height\n",
"\t\n",
"\tdef getHeight(self):\n",
"\t\tif self._border == Border.NOBORDER:\n",
"\t\t\treturn self._height\n",
"\t\telse:\n",
"\t\t\treturn self._height + 2\n",
"\t\t\n",
"\tdef setX(self, x):\n",
"\t\tself._x = x\n",
"\t\t\n",
"\tdef getX(self):\n",
"\t\treturn self._x\n",
"\n",
"\tdef setY(self, y):\n",
"\t\tself._y = y\n",
"\t\t\n",
"\tdef getY(self):\n",
"\t\treturn self._y\n",
"\n",
"\tdef setPosition(self, y, x):\n",
"\t\tif x > 0 and y > 0:\n",
"\t\t\tself._y = y\n",
"\t\t\tself._x = x\n",
"\t\t\t\n",
"\tdef setSize(self, height, width):\n",
"\t\tif width >= 0 and height >= 0:\n",
"\t\t\tself._width = width\n",
"\t\t\tself._height = height\n",
"\t\t\tself.render()\n",
"\t\t\tself.onResize.fire()\n",
"\n",
"\tdef setAlign(self, align):\n",
"\t\tself._align = align\n",
"\t\t\n",
"\tdef getAlign(self):\n",
"\t\treturn self._align\n",
"\t\t\n",
"\tdef setMargin(self, top, right, bottom, left):\n",
"\t\tif top < 0 and right < 0 and bottom < 0 and left < 0: return None\n",
"\t\tself._margin.top = top\n",
"\t\tself._margin.right = right\n",
"\t\tself._margin.bottom = bottom\n",
"\t\tself._margin.left = left\n",
"\t\t\n",
"\tdef setVisible(self, visible):\n",
"\t\tself._visible = visible\n",
"\t\t\n",
"\tdef getVisible(self):\n",
"\t\treturn self._visible\n",
"\n",
"\tdef setFocused(self, focused):\n",
"\t\tif (self._focused == False and focused == True) or (self._focused == True and focused == False):\n",
"\t\t\tself._focused = focused\n",
"\t\t\t# replace back- and foreground colors by alternative ones\n",
"\t\t\ttempBackColor = self._backColor\n",
"\t\t\tself._backColor = self._backAltColor\n",
"\t\t\tself._backAltColor = tempBackColor\n",
"\t\t\ttempForeColor = self._foreColor\n",
"\t\t\tself._foreColor = self._foreAltColor\n",
"\t\t\tself._foreAltColor = tempForeColor\n",
"\t\t\tself.onFocus.fire(self)\n",
"\t\t\tself.render()\n",
"\t\t\t\n",
"\tdef getFocused(self):\n",
"\t\treturn self._focused\n",
"\t\t\t\n",
"\tdef setBorder(self, border):\n",
"\t\t\"\"\"if self._border == Border.NOBORDER and border != Border.NOBORDER:\n",
"\t\t\tself._width += 2\n",
"\t\t\tself._height += 2\n",
"\t\telif self._border != Border.NOBORDER and border == Border.NOBORDER:\n",
"\t\t\tself._width -= 2\n",
"\t\t\tself._height -= 2\"\"\"\n",
"\t\tself._border = border\n",
"\t\tself.render()\n",
"\t\t\n",
"\tdef getBorder(self):\n",
"\t\treturn self._border\n",
"\t\t\n",
"\tdef setBackColor(self, backColor):\n",
"\t\tself._backColor = backColor\n",
"\t\t\n",
"\tdef getBackColor(self):\n",
"\t\treturn self._backColor\n",
"\t\t\n",
"\tdef setForeColor(self, foreColor):\n",
"\t\tself._foreColor = foreColor\n",
"\t\t\n",
"\tdef getForeColor(self):\n",
"\t\treturn self._foreColor\n",
"\t\t\n",
"\tdef setBackAltColor(self, backAltColor):\n",
"\t\tself._backAltColor = backAltColor\n",
"\t\t\n",
"\tdef getBackAltColor(self):\n",
"\t\treturn self._backAltColor\n",
"\t\t\n",
"\tdef setForeAltColor(self, foreAltColor):\n",
"\t\tself._foreAltColor = foreAltColor\n",
"\t\t\n",
"\tdef getForeAltColor(self):\n",
"\t\treturn self._foreAltColor\n",
"\t\t\n",
"\tdef click(self):\n",
"\t\tself.onClick.fire(self)\n",
"\t\t\n",
"\tdef keyPress(self, code):\n",
"\t\tself.onKeyPress.fire(self, code)\n",
"\n",
"\tdef render(self):\n",
"\t\trenderCommands = []\n",
"\t\trenderCommands.append(self._backColor + self._foreColor)\n",
"\t\tif self._border == Border.NOBORDER:\n",
"\t\t\tfor i in range(self._height):\n",
"\t\t\t\trenderCommands.append(Utils.getMoveCursorCode(self._y + i, self._x) + \" \" * self._width + Utils.NewLine)\n",
"\t\telse:\n",
"\t\t\tborderX = self._x - 1\n",
"\t\t\tborderY = self._y - 1\n",
"\t\t\tborderWidth = self._width + 2\n",
"\t\t\tborderHeight = self._height + 2\n",
"\t\t\trenderCommands.append(Utils.getMoveCursorCode(borderY, borderX) + self._border['LEFT_TOP'] + self._border['HORIZONTAL'] * (borderWidth - 2) + self._border['RIGHT_TOP'] + Utils.NewLine)\n",
"\t\t\tfor i in range(borderHeight - 2):\n",
"\t\t\t\trenderCommands.append(Utils.getMoveCursorCode(borderY + i + 1, borderX) + self._border['VERTICAL'] + \" \" * (borderWidth - 2) + self._border['VERTICAL'] + Utils.NewLine)\n",
"\t\t\trenderCommands.append(Utils.getMoveCursorCode(borderY + borderHeight - 1, borderX) + self._border['LEFT_BOTTOM'] + self._border['HORIZONTAL'] * (borderWidth - 2) + self._border['RIGHT_BOTTOM'])\n",
"\t\tUtils.render(renderCommands)"
] | [
0,
0,
0,
0,
0,
0.045454545454545456,
0,
0.047619047619047616,
0.6666666666666666,
0.09090909090909091,
0.012658227848101266,
0.023809523809523808,
0.05555555555555555,
0.05263157894736842,
0.07142857142857142,
0.07142857142857142,
0.017543859649122806,
0.03409090909090909,
0.038461538461538464,
0.018867924528301886,
0.018518518518518517,
0.02040816326530612,
0.0196078431372549,
0.0196078431372549,
0.01282051282051282,
0.012658227848101266,
0,
0.09090909090909091,
0.034482758620689655,
0.034482758620689655,
0.030303030303030304,
0.03125,
0.03333333333333333,
0.03333333333333333,
1,
0.03125,
0.038461538461538464,
0.6666666666666666,
0.043478260869565216,
0.043478260869565216,
0.6666666666666666,
0.038461538461538464,
0.05,
0.6666666666666666,
0.05,
0.05,
0.6666666666666666,
0.03571428571428571,
0.045454545454545456,
1,
0.047619047619047616,
0.047619047619047616,
0.6666666666666666,
0.03333333333333333,
0.041666666666666664,
1,
0.045454545454545456,
0.02631578947368421,
0.043478260869565216,
0.125,
0.037037037037037035,
0.6666666666666666,
0.05,
0.07142857142857142,
0.6666666666666666,
0.058823529411764705,
0.058823529411764705,
0,
0.05,
0.07142857142857142,
0.6666666666666666,
0.058823529411764705,
0.058823529411764705,
0,
0.03333333333333333,
0.045454545454545456,
0.06666666666666667,
0.06666666666666667,
0.5,
0.02857142857142857,
0.030303030303030304,
0.043478260869565216,
0.04,
0.058823529411764705,
0.041666666666666664,
0,
0.03571428571428571,
0.045454545454545456,
0.6666666666666666,
0.047619047619047616,
0.047619047619047616,
0.6666666666666666,
0.020833333333333332,
0.029411764705882353,
0.04,
0.034482758620689655,
0.03225806451612903,
0.037037037037037035,
0.6666666666666666,
0.03125,
0.038461538461538464,
0.6666666666666666,
0.043478260869565216,
0.043478260869565216,
0,
0.03125,
0.06060606060606061,
0.037037037037037035,
0.01639344262295082,
0.02857142857142857,
0.025,
0.02631578947368421,
0.02857142857142857,
0.025,
0.02631578947368421,
0.037037037037037035,
0.058823529411764705,
0.5,
0.043478260869565216,
0.043478260869565216,
0.5,
0.03333333333333333,
0.014084507042253521,
0.05,
0.047619047619047616,
0.014285714285714285,
0.05,
0.041666666666666664,
0.041666666666666664,
0.0625,
0.6666666666666666,
0.045454545454545456,
0.045454545454545456,
0.6666666666666666,
0.027777777777777776,
0.03333333333333333,
0.6666666666666666,
0.04,
0.04,
0.6666666666666666,
0.027777777777777776,
0.03333333333333333,
0.6666666666666666,
0.04,
0.04,
0.6666666666666666,
0.023809523809523808,
0.027777777777777776,
0.6666666666666666,
0.03571428571428571,
0.03571428571428571,
0.6666666666666666,
0.023809523809523808,
0.027777777777777776,
0.6666666666666666,
0.03571428571428571,
0.03571428571428571,
0.6666666666666666,
0.05555555555555555,
0.038461538461538464,
0.6666666666666666,
0.037037037037037035,
0.02857142857142857,
0,
0.05263157894736842,
0.045454545454545456,
0.01694915254237288,
0.02631578947368421,
0.030303030303030304,
0.01834862385321101,
0.125,
0.04,
0.04,
0.030303030303030304,
0.02857142857142857,
0.010638297872340425,
0.02702702702702703,
0.011560693641618497,
0.01015228426395939,
0.06666666666666667
] | 180 | 0.144489 | false |
# Copyright 2014-2016 The ODL development group
#
# This file is part of ODL.
#
# ODL is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ODL is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ODL. If not, see <http://www.gnu.org/licenses/>.
"""Default operators defined on any (reasonable) space."""
# Imports for common Python 2/3 codebase
from __future__ import print_function, division, absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import super
from copy import copy
from odl.operator.operator import Operator
from odl.space import ProductSpace
from odl.set import LinearSpace, LinearSpaceElement, Field, RealNumbers
__all__ = ('ScalingOperator', 'ZeroOperator', 'IdentityOperator',
'LinCombOperator', 'MultiplyOperator', 'PowerOperator',
'InnerProductOperator', 'NormOperator', 'DistOperator',
'ConstantOperator', 'RealPart', 'ImagPart', 'ComplexEmbedding')
class ScalingOperator(Operator):
"""Operator of multiplication with a scalar.
``ScalingOperator(s)(x) == s * x``
"""
def __init__(self, domain, scalar):
"""Initialize a new instance.
Parameters
----------
domain : `LinearSpace` or `Field`
Set of elements on which this operator acts.
scalar : ``domain.field`` element
Fixed scaling factor of this operator.
Examples
--------
>>> r3 = odl.rn(3)
>>> vec = r3.element([1, 2, 3])
>>> out = r3.element()
>>> op = ScalingOperator(r3, 2.0)
>>> op(vec, out) # In-place, Returns out
rn(3).element([2.0, 4.0, 6.0])
>>> out
rn(3).element([2.0, 4.0, 6.0])
>>> op(vec) # Out-of-place
rn(3).element([2.0, 4.0, 6.0])
"""
if not isinstance(domain, (LinearSpace, Field)):
raise TypeError('`space` {!r} not a `LinearSpace` or `Field` '
'instance'.format(domain))
super().__init__(domain, domain, linear=True)
self.__scalar = domain.field.element(scalar)
@property
def scalar(self):
"""Fixed scaling factor of this operator."""
return self.__scalar
def _call(self, x, out=None):
"""Scale ``x`` and write to ``out`` if given."""
if out is None:
out = self.scalar * x
else:
out.lincomb(self.scalar, x)
return out
@property
def inverse(self):
"""Return the inverse operator.
Examples
--------
>>> r3 = odl.rn(3)
>>> vec = r3.element([1, 2, 3])
>>> op = ScalingOperator(r3, 2.0)
>>> inv = op.inverse
>>> inv(op(vec)) == vec
True
>>> op(inv(vec)) == vec
True
"""
if self.scalar == 0.0:
raise ZeroDivisionError('scaling operator not invertible for '
'scalar==0')
return ScalingOperator(self.domain, 1.0 / self.scalar)
@property
def adjoint(self):
"""Adjoint, given as scaling with the conjugate of the scalar.
Returns
-------
adjoint : `ScalingOperator`
``self`` if `scalar` is real, else `scalar` is conjugated.
"""
if complex(self.scalar).imag == 0.0:
return self
else:
return ScalingOperator(self.domain, self.scalar.conjugate())
def __repr__(self):
"""Return ``repr(self)``."""
return '{}({!r}, {!r})'.format(self.__class__.__name__,
self.domain, self.scalar)
def __str__(self):
"""Return ``str(self)``."""
return '{} * I'.format(self.scalar)
class IdentityOperator(ScalingOperator):
"""Operator mapping each element to itself.
``IdentityOperator()(x) == x``
"""
def __init__(self, space):
"""Initialize a new instance.
Parameters
----------
space : `LinearSpace`
Space of elements which the operator is acting on.
"""
super().__init__(space, 1)
def __repr__(self):
"""Return ``repr(self)``."""
return '{}({!r})'.format(self.__class__.__name__, self.domain)
def __str__(self):
"""Return ``str(self)``."""
return "I"
class LinCombOperator(Operator):
"""Operator mapping two space elements to a linear combination::
LinCombOperator(a, b)([x, y]) == a * x + b * y
"""
def __init__(self, space, a, b):
"""Initialize a new instance.
Parameters
----------
space : `LinearSpace`
Space of elements which the operator is acting on.
a, b : ``space.field`` elements
Scalars to multiply ``x[0]`` and ``x[1]`` with, respectively.
Examples
--------
>>> r3 = odl.rn(3)
>>> r3xr3 = odl.ProductSpace(r3, r3)
>>> xy = r3xr3.element([[1, 2, 3], [1, 2, 3]])
>>> z = r3.element()
>>> op = LinCombOperator(r3, 1.0, 1.0)
>>> op(xy, out=z) # Returns z
rn(3).element([2.0, 4.0, 6.0])
>>> z
rn(3).element([2.0, 4.0, 6.0])
"""
domain = ProductSpace(space, space)
super().__init__(domain, space, linear=True)
self.a = a
self.b = b
def _call(self, x, out=None):
"""Linearly combine ``x`` and write to ``out`` if given."""
if out is None:
out = self.range.element()
out.lincomb(self.a, x[0], self.b, x[1])
return out
def __repr__(self):
"""Return ``repr(self)``."""
return '{}({!r}, {!r}, {!r})'.format(self.__class__.__name__,
self.range, self.a, self.b)
def __str__(self):
"""Return ``str(self)``."""
return "{}*x + {}*y".format(self.a, self.b)
class MultiplyOperator(Operator):
"""Operator multiplying by a fixed space or field element.
``MultiplyOperator(y)(x) == x * y``
Here, ``y`` is a `LinearSpaceElement` or `Field` element and
``x`` is a `LinearSpaceElement`.
Hence, this operator can be defined either on a `LinearSpace` or on
a `Field`. In the first case it is the pointwise multiplication,
in the second the scalar multiplication.
"""
def __init__(self, multiplicand, domain=None, range=None):
"""Initialize a new instance.
Parameters
----------
multiplicand : `LinearSpaceElement` or scalar
Value to multiply by.
domain : `LinearSpace` or `Field`, optional
Set to which the operator can be applied.
Default: ``multiplicand.space``.
range : `LinearSpace` or `Field`, optional
Set to which the operator maps. Default: ``multiplicand.space``.
Examples
--------
>>> r3 = odl.rn(3)
>>> x = r3.element([1, 2, 3])
Multiply by vector:
>>> op = MultiplyOperator(x)
>>> op(x)
rn(3).element([1.0, 4.0, 9.0])
>>> out = r3.element()
>>> op(x, out)
rn(3).element([1.0, 4.0, 9.0])
Multiply by scalar:
>>> op2 = MultiplyOperator(x, domain=r3.field)
>>> op2(3)
rn(3).element([3.0, 6.0, 9.0])
>>> out = r3.element()
>>> op2(3, out)
rn(3).element([3.0, 6.0, 9.0])
"""
if domain is None:
domain = multiplicand.space
if range is None:
range = multiplicand.space
self.__multiplicand = multiplicand
self.__domain_is_field = isinstance(domain, Field)
self.__range_is_field = isinstance(range, Field)
super().__init__(domain, range, linear=True)
@property
def multiplicand(self):
"""Value to multiply by."""
return self.__multiplicand
def _call(self, x, out=None):
"""Multiply ``x`` and write to ``out`` if given."""
if out is None:
return x * self.multiplicand
elif not self.__range_is_field:
if self.__domain_is_field:
out.lincomb(x, self.multiplicand)
else:
x.multiply(self.multiplicand, out=out)
else:
raise ValueError('can only use `out` with `LinearSpace` range')
@property
def adjoint(self):
"""Adjoint of this operator.
Returns
-------
adjoint : `InnerProductOperator` or `MultiplyOperator`
If the domain of this operator is the scalar field of a
`LinearSpace` the adjoint is the inner product with ``y``,
else it is the multiplication with ``y``.
Examples
--------
>>> r3 = odl.rn(3)
>>> x = r3.element([1, 2, 3])
Multiply by a space element:
>>> op = MultiplyOperator(x)
>>> out = r3.element()
>>> op.adjoint(x)
rn(3).element([1.0, 4.0, 9.0])
Multiply by a scalar:
>>> op2 = MultiplyOperator(x, domain=r3.field)
>>> op2.adjoint(x)
14.0
"""
if self.__domain_is_field:
return InnerProductOperator(self.multiplicand)
else:
# TODO: complex case
return MultiplyOperator(self.multiplicand)
def __repr__(self):
"""Return ``repr(self)``."""
return '{}({!r})'.format(self.__class__.__name__, self.multiplicand)
def __str__(self):
"""Return ``str(self)``."""
return "x * {}".format(self.y)
class PowerOperator(Operator):
"""Operator taking a fixed power of a space or field element.
``PowerOperator(p)(x) == x ** p``
Here, ``x`` is a `LinearSpaceElement` or `Field` element and ``p`` is
a number. Hence, this operator can be defined either on a
`LinearSpace` or on a `Field`.
"""
def __init__(self, domain, exponent):
"""Initialize a new instance.
Parameters
----------
domain : `LinearSpace` or `Field`
Set of elements on which the operator can be applied.
exponent : float
Exponent parameter of the power function applied to an element.
Examples
--------
Use with vectors
>>> op = PowerOperator(odl.rn(3), exponent=2)
>>> op([1, 2, 3])
rn(3).element([1.0, 4.0, 9.0])
or scalars
>>> op = PowerOperator(odl.RealNumbers(), exponent=2)
>>> op(2.0)
4.0
"""
self.__exponent = float(exponent)
self.__domain_is_field = isinstance(domain, Field)
super().__init__(domain, domain, linear=(exponent == 1))
@property
def exponent(self):
"""Power of the input element to take."""
return self.__exponent
def _call(self, x, out=None):
"""Take the power of ``x`` and write to ``out`` if given."""
if out is None:
return x ** self.exponent
elif self.__domain_is_field:
raise ValueError('cannot use `out` with field')
else:
out.assign(x)
out **= self.exponent
def derivative(self, point):
"""Derivative of this operator.
``PowerOperator(p).derivative(y)(x) == p * y ** (p - 1) * x``
Parameters
----------
point : `domain` element
The point in which to take the derivative
Returns
-------
derivative : `Operator`
The derivative in ``point``
Examples
--------
Use on vector spaces:
>>> op = PowerOperator(odl.rn(3), exponent=2)
>>> dop = op.derivative(op.domain.element([1, 2, 3]))
>>> dop([1, 1, 1])
rn(3).element([2.0, 4.0, 6.0])
Use with scalars:
>>> op = PowerOperator(odl.RealNumbers(), exponent=2)
>>> dop = op.derivative(2.0)
>>> dop(2.0)
8.0
"""
return self.exponent * MultiplyOperator(point ** (self.exponent - 1),
domain=self.domain,
range=self.range)
def __repr__(self):
"""Return ``repr(self)``."""
return '{}({!r}, {!r})'.format(self.__class__.__name__,
self.domain, self.exponent)
def __str__(self):
"""Return ``str(self)``."""
return "x ** {}".format(self.exponent)
class InnerProductOperator(Operator):
"""Operator taking the inner product with a fixed space element.
``InnerProductOperator(y)(x) <==> y.inner(x)``
This is only applicable in inner product spaces.
See Also
--------
DistOperator : Distance to a fixed space element.
NormOperator : Vector space norm as operator.
"""
def __init__(self, vector):
"""Initialize a new instance.
Parameters
----------
vector : `LinearSpaceElement`
The element to take the inner product with.
Examples
--------
>>> r3 = odl.rn(3)
>>> x = r3.element([1, 2, 3])
>>> op = InnerProductOperator(x)
>>> op(r3.element([1, 2, 3]))
14.0
"""
self.__vector = vector
super().__init__(vector.space, vector.space.field, linear=True)
@property
def vector(self):
"""Element to take the inner product with."""
return self.__vector
def _call(self, x):
"""Return the inner product with ``x``."""
return x.inner(self.vector)
@property
def adjoint(self):
"""Adjoint of this operator.
Returns
-------
adjoint : `MultiplyOperator`
The operator of multiplication with `vector`.
Examples
--------
>>> r3 = odl.rn(3)
>>> x = r3.element([1, 2, 3])
>>> op = InnerProductOperator(x)
>>> op.adjoint(2.0)
rn(3).element([2.0, 4.0, 6.0])
"""
return MultiplyOperator(self.vector, self.vector.space.field)
@property
def T(self):
"""Fixed vector of this operator.
Returns
-------
vector : `LinearSpaceElement`
The fixed space element used in this inner product operator.
Examples
--------
>>> r3 = odl.rn(3)
>>> x = r3.element([1, 2, 3])
>>> x.T
InnerProductOperator(rn(3).element([1.0, 2.0, 3.0]))
>>> x.T.T
rn(3).element([1.0, 2.0, 3.0])
"""
return self.vector
def __repr__(self):
"""Return ``repr(self)``."""
return '{}({!r})'.format(self.__class__.__name__, self.vector)
def __str__(self):
"""Return ``str(self)``."""
return '{}.T'.format(self.vector)
class NormOperator(Operator):
"""Vector space norm as an operator.
``NormOperator()(x) <==> x.norm()``
This is only applicable in normed spaces.
See Also
--------
InnerProductOperator : Inner product with a fixed space element.
DistOperator : Distance to a fixed space element.
"""
def __init__(self, space):
"""Initialize a new instance.
Parameters
----------
space : `LinearSpace`
Space to take the norm in.
Examples
--------
>>> r2 = odl.rn(2)
>>> op = NormOperator(r2)
>>> op([3, 4])
5.0
"""
super().__init__(space, RealNumbers(), linear=False)
def _call(self, x):
"""Return the norm of ``x``."""
return x.norm()
def derivative(self, point):
"""Derivative of this operator in ``point``.
``NormOperator().derivative(y)(x) == (y / y.norm()).inner(x)``
This is only applicable in inner product spaces.
Parameters
----------
point : `domain` `element-like`
Point in which to take the derivative.
Returns
-------
derivative : `InnerProductOperator`
Raises
------
ValueError
If ``point.norm() == 0``, in which case the derivative is not well
defined in the Frechet sense.
Notes
-----
The derivative cannot be written in a general sense except in Hilbert
spaces, in which case it is given by
.. math::
(D \|\cdot\|)(y)(x) = \langle y / \|y\|, x \\rangle
Examples
--------
>>> r3 = odl.rn(3)
>>> op = NormOperator(r3)
>>> derivative = op.derivative([1, 0, 0])
>>> derivative([1, 0, 0])
1.0
"""
point = self.domain.element(point)
norm = point.norm()
if norm == 0:
raise ValueError('not differentiable in 0')
return InnerProductOperator(point / norm)
def __repr__(self):
"""Return ``repr(self)``."""
return '{}({!r})'.format(self.__class__.__name__, self.domain)
def __str__(self):
"""Return ``str(self)``."""
return '{}({})'.format(self.__class__.__name__, self.domain)
class DistOperator(Operator):
"""Operator taking the distance to a fixed space element.
``DistOperator(y)(x) == y.dist(x)``
This is only applicable in metric spaces.
See Also
--------
InnerProductOperator : Inner product with fixed space element.
NormOperator : Vector space norm as an operator.
"""
def __init__(self, vector):
"""Initialize a new instance.
Parameters
----------
vector : `LinearSpaceElement`
Point to calculate the distance to.
Examples
--------
>>> r2 = odl.rn(2)
>>> x = r2.element([1, 1])
>>> op = DistOperator(x)
>>> op([4, 5])
5.0
"""
self.__vector = vector
super().__init__(vector.space, RealNumbers(), linear=False)
@property
def vector(self):
"""Element to which to take the distance."""
return self.__vector
def _call(self, x):
"""Return the distance from ``self.vector`` to ``x``."""
return self.vector.dist(x)
def derivative(self, point):
"""The derivative operator.
``DistOperator(y).derivative(z)(x) ==
((y - z) / y.dist(z)).inner(x)``
This is only applicable in inner product spaces.
Parameters
----------
x : `domain` `element-like`
Point in which to take the derivative.
Returns
-------
derivative : `InnerProductOperator`
Raises
------
ValueError
If ``point == self.vector``, in which case the derivative is not
well defined in the Frechet sense.
Notes
-----
The derivative cannot be written in a general sense except in Hilbert
spaces, in which case it is given by
.. math::
(D d(\cdot, y))(z)(x) = \\langle (y-z) / d(y, z), x \\rangle
Examples
--------
>>> r2 = odl.rn(2)
>>> x = r2.element([1, 1])
>>> op = DistOperator(x)
>>> derivative = op.derivative([2, 1])
>>> derivative([1, 0])
1.0
"""
point = self.domain.element(point)
diff = point - self.vector
dist = self.vector.dist(point)
if dist == 0:
raise ValueError('not differentiable at the reference vector {!r}'
''.format(self.vector))
return InnerProductOperator(diff / dist)
def __repr__(self):
"""Return ``repr(self)``."""
return '{}({!r})'.format(self.__class__.__name__, self.vector)
def __str__(self):
"""Return ``str(self)``."""
return '{}({})'.format(self.__class__.__name__, self.vector)
class ConstantOperator(Operator):
"""Operator that always returns the same value.
``ConstantOperator(y)(x) == y``
"""
def __init__(self, constant, domain=None, range=None):
"""Initialize a new instance.
Parameters
----------
constant : `LinearSpaceElement` or ``range`` `element-like`
The constant space element to be returned. If ``range`` is not
provided, ``constant`` must be a `LinearSpaceElement` since the
operator range is then inferred from it.
domain : `LinearSpace`, optional
Domain of the operator. Default: ``vector.space``
range : `LinearSpace`, optional
Range of the operator. Default: ``vector.space``
Examples
--------
>>> r3 = odl.rn(3)
>>> x = r3.element([1, 2, 3])
>>> op = ConstantOperator(x)
>>> op(x, out=r3.element())
rn(3).element([1.0, 2.0, 3.0])
"""
if ((domain is None or range is None) and
not isinstance(constant, LinearSpaceElement)):
raise TypeError('If either domain or range is unspecified '
'`constant` must be LinearSpaceVector, got '
'{!r}.'.format(constant))
if domain is None:
domain = constant.space
if range is None:
range = constant.space
self.__constant = range.element(constant)
linear = self.constant.norm() == 0
super().__init__(domain, range, linear=linear)
@property
def constant(self):
"""Constant space element returned by this operator."""
return self.__constant
def _call(self, x, out=None):
"""Return the constant vector or assign it to ``out``."""
if out is None:
return self.range.element(copy(self.constant))
else:
out.assign(self.constant)
@property
def adjoint(self):
"""Adjoint of the operator.
Only defined if the operator is the constant operator.
"""
def derivative(self, point):
"""Derivative of this operator, always zero.
Returns
-------
derivative : `ZeroOperator`
Examples
--------
>>> r3 = odl.rn(3)
>>> x = r3.element([1, 2, 3])
>>> op = ConstantOperator(x)
>>> deriv = op.derivative([1, 1, 1])
>>> deriv([2, 2, 2])
rn(3).element([0.0, 0.0, 0.0])
"""
return ZeroOperator(domain=self.domain, range=self.range)
def __repr__(self):
"""Return ``repr(self)``."""
return '{}({!r})'.format(self.__class__.__name__, self.constant)
def __str__(self):
"""Return ``str(self)``."""
return "{}".format(self.constant)
class ZeroOperator(Operator):
"""Operator mapping each element to the zero element::
ZeroOperator(space)(x) == space.zero()
"""
def __init__(self, domain, range=None):
"""Initialize a new instance.
Parameters
----------
domain : `LinearSpace`
Domain of the operator.
range : `LinearSpace`, optional
Range of the operator. Default: ``domain``
Examples
--------
>>> op = odl.ZeroOperator(odl.rn(3))
>>> op([1, 2, 3])
rn(3).element([0.0, 0.0, 0.0])
Also works with domain != range:
>>> op = odl.ZeroOperator(odl.rn(3), odl.cn(4))
>>> op([1, 2, 3])
cn(4).element([0j, 0j, 0j, 0j])
"""
if range is None:
range = domain
super().__init__(domain, range, linear=True)
def _call(self, x, out=None):
"""Return the zero vector or assign it to ``out``."""
if self.domain == self.range:
if out is None:
out = 0 * x
else:
out.lincomb(0, x)
else:
result = self.range.zero()
if out is None:
out = result
else:
out.assign(result)
return out
@property
def adjoint(self):
"""Adjoint of the operator.
If ``self.domain == self.range`` the zero operator is self-adjoint,
otherwise it is the `ZeroOperator` from `range` to `domain`.
"""
return ZeroOperator(domain=self.range, range=self.domain)
def __repr__(self):
"""Return ``repr(self)``."""
return '{}({!r})'.format(self.__class__.__name__, self.domain)
def __str__(self):
"""Return ``str(self)``."""
return '0'
class RealPart(Operator):
"""Operator that extracts the real part of a vector."""
def __init__(self, space):
"""Initialize a new instance.
Parameters
----------
space : `FnBase`
Space which real part should be taken, needs to implement
``space.real_space``.
Examples
--------
Take the real part of complex vector:
>>> c3 = odl.cn(3)
>>> op = RealPart(c3)
>>> op([1 + 2j, 2, 3 - 1j])
rn(3).element([1.0, 2.0, 3.0])
The operator is the identity on real spaces:
>>> r3 = odl.rn(3)
>>> op = RealPart(r3)
>>> op([1, 2, 3])
rn(3).element([1.0, 2.0, 3.0])
The operator also works on other `FnBase` spaces such as
`DiscreteLp` spaces:
>>> r3 = odl.uniform_discr(0, 1, 3, dtype=complex)
>>> op = RealPart(r3)
>>> op([1, 2, 3])
uniform_discr(0.0, 1.0, 3).element([1.0, 2.0, 3.0])
"""
real_space = space.real_space
linear = (space == real_space)
Operator.__init__(self, space, real_space, linear=linear)
def _call(self, x):
"""Return ``self(x)``."""
return x.real
@property
def inverse(self):
"""Return the (pseudo-)inverse.
Examples
--------
The inverse is its own inverse if its domain is real:
>>> r3 = odl.rn(3)
>>> op = RealPart(r3)
>>> op.inverse(op([1, 2, 3]))
rn(3).element([1.0, 2.0, 3.0])
This is not a true inverse, only a pseudoinverse, the complex part
will by necessity be lost.
>>> c3 = odl.cn(3)
>>> op = RealPart(c3)
>>> op.inverse(op([1 + 2j, 2, 3 - 1j]))
cn(3).element([(1+0j), (2+0j), (3+0j)])
"""
if self.is_linear:
return self
else:
return ComplexEmbedding(self.domain, scalar=1)
@property
def adjoint(self):
"""Return the (left) adjoint.
Notes
-----
Due to technicalities of operators from a complex space into a real
space, this does not satisfy the usual adjoint equation:
.. math::
\langle Ax, y \rangle = \langle x, A^*y \rangle
Instead it is an adjoint in a weaker sense as follows:
.. math::
\langle AA^*x, y \rangle = \langle A^*x, A^*y \rangle
Examples
--------
The adjoint satisfies the adjoint equation for real spaces:
>>> r3 = odl.rn(3)
>>> op = RealPart(r3)
>>> x = op.domain.element([1, 2, 3])
>>> y = op.range.element([3, 2, 1])
>>> x.inner(op.adjoint(y)) == op(x).inner(y)
True
If the domain is complex, it only satisfies the weaker definition:
>>> c3 = odl.cn(3)
>>> op = RealPart(c3)
>>> x = op.range.element([1, 2, 3])
>>> y = op.range.element([3, 2, 1])
>>> AtAxy = op(op.adjoint(x)).inner(y)
>>> AtxAty = op.adjoint(x).inner(op.adjoint(y))
>>> AtAxy == AtxAty
True
"""
if self.is_linear:
return self
else:
return ComplexEmbedding(self.domain, scalar=1)
class ImagPart(Operator):
def __init__(self, space):
"""Operator that extracts the imaginary part of a vector.
Parameters
----------
space : `FnBase`
Space which imaginary part should be taken, needs to implement
``space.real_space``.
Examples
--------
Take the imaginary part of complex vector:
>>> c3 = odl.cn(3)
>>> op = ImagPart(c3)
>>> op([1 + 2j, 2, 3 - 1j])
rn(3).element([2.0, 0.0, -1.0])
The operator is the zero operator on real spaces:
>>> r3 = odl.rn(3)
>>> op = ImagPart(r3)
>>> op([1, 2, 3])
rn(3).element([0.0, 0.0, 0.0])
"""
real_space = space.real_space
linear = (space == real_space)
Operator.__init__(self, space, real_space, linear=linear)
def _call(self, x):
"""Return ``self(x)``."""
return x.imag
@property
def inverse(self):
"""Return the pseudoinverse.
Examples
--------
The inverse is the zero operator if the domain is real:
>>> r3 = odl.rn(3)
>>> op = ImagPart(r3)
>>> op.inverse(op([1, 2, 3]))
rn(3).element([0.0, 0.0, 0.0])
This is not a true inverse, only a pseudoinverse, the real part
will by necessity be lost.
>>> c3 = odl.cn(3)
>>> op = ImagPart(c3)
>>> op.inverse(op([1 + 2j, 2, 3 - 1j]))
cn(3).element([2j, 0j, (-0-1j)])
"""
if self.is_linear:
return ZeroOperator(self.domain)
else:
return ComplexEmbedding(self.domain, scalar=1j)
@property
def adjoint(self):
"""Return the (left) adjoint.
Notes
-----
Due to technicalities of operators from a complex space into a real
space, this does not satisfy the usual adjoint equation:
.. math::
\langle Ax, y \rangle = \langle x, A^*y \rangle
Instead it is an adjoint in a weaker sense as follows:
.. math::
\langle AA^*x, y \rangle = \langle A^*x, A^*y \rangle
Examples
--------
The adjoint satisfies the adjoint equation for real spaces:
>>> r3 = odl.rn(3)
>>> op = ImagPart(r3)
>>> x = op.domain.element([1, 2, 3])
>>> y = op.range.element([3, 2, 1])
>>> x.inner(op.adjoint(y)) == op(x).inner(y)
True
If the domain is complex, it only satisfies the weaker definition:
>>> c3 = odl.cn(3)
>>> op = ImagPart(c3)
>>> x = op.range.element([1, 2, 3])
>>> y = op.range.element([3, 2, 1])
>>> AtAxy = op(op.adjoint(x)).inner(y)
>>> AtxAty = op.adjoint(x).inner(op.adjoint(y))
>>> AtAxy == AtxAty
True
"""
if self.is_linear:
return ZeroOperator(self.domain)
else:
return ComplexEmbedding(self.domain, scalar=1j)
class ComplexEmbedding(Operator):
"""Operator that embeds a vector into a complex space."""
def __init__(self, space, scalar=1):
"""Initialize a new instance.
Parameters
----------
space : `FnBase`
Space which real part should be taken, needs to implement
``space.complex_space``.
scalar : ``space.complex_space.field`` element, optional
Scalar which the incomming vectors should be multiplied by in order
to get the complex vector.
Examples
--------
Embed real vector into complex space:
>>> r3 = odl.rn(3)
>>> op = ComplexEmbedding(r3)
>>> op([1, 2, 3])
cn(3).element([(1+0j), (2+0j), (3+0j)])
Embed real vector as imaginary part into complex space:
>>> op = ComplexEmbedding(r3, scalar=1j)
>>> op([1, 2, 3])
cn(3).element([1j, 2j, 3j])
On complex spaces the operator is the same as simple multiplication by
scalar:
>>> c3 = odl.cn(3)
>>> op = ComplexEmbedding(c3, scalar=1 + 2j)
>>> op([1 + 1j, 2 + 2j, 3 + 3j])
cn(3).element([(-1+3j), (-2+6j), (-3+9j)])
"""
complex_space = space.complex_space
self.scalar = complex_space.field.element(scalar)
Operator.__init__(self, space, complex_space, linear=True)
def _call(self, x, out):
"""Return ``self(x)``."""
if self.domain.is_rn:
# Real domain, multiply separately
out.real = self.scalar.real * x
out.imag = self.scalar.imag * x
else:
# Complex domain
out.lincomb(self.scalar, x)
@property
def inverse(self):
"""Return the (left) inverse.
If the domain is a real space, this is not a true inverse,
only a (left) inverse.
Examples
--------
>>> r3 = odl.rn(3)
>>> op = ComplexEmbedding(r3, scalar=1)
>>> op.inverse(op([1, 2, 4]))
rn(3).element([1.0, 2.0, 4.0])
"""
if self.domain.is_rn:
# Real domain
# Optimizations for simple cases.
if self.scalar.real == self.scalar:
return (1 / self.scalar.real) * RealPart(self.range)
elif 1j * self.scalar.imag == self.scalar:
return (1 / self.scalar.imag) * ImagPart(self.range)
else:
# General case
inv_scalar = (1 / self.scalar).conjugate()
return ((inv_scalar.real) * RealPart(self.range) +
(inv_scalar.imag) * ImagPart(self.range))
else:
# Complex domain
return ComplexEmbedding(self.range, self.scalar.conjugate())
@property
def adjoint(self):
"""Return the (right) adjoint.
Notes
-----
Due to technicalities of operators from a real space into a complex
space, this does not satisfy the usual adjoint equation:
.. math::
\langle Ax, y \rangle = \langle x, A^*y \rangle
Instead it is an adjoint in a weaker sense as follows:
.. math::
\langle A^*Ax, y \rangle = \langle Ax, Ay \rangle
Examples
--------
The adjoint satisfies the adjoint equation for complex spaces
>>> c3 = odl.cn(3)
>>> op = ComplexEmbedding(c3, scalar=1j)
>>> x = c3.element([1 + 1j, 2 + 2j, 3 + 3j])
>>> y = c3.element([3 + 1j, 2 + 2j, 3 + 1j])
>>> Axy = op(x).inner(y)
>>> xAty = x.inner(op.adjoint(y))
>>> Axy == xAty
True
For real domains, it only satisfies the (right) adjoint equation
>>> r3 = odl.rn(3)
>>> op = ComplexEmbedding(r3, scalar=1j)
>>> x = r3.element([1, 2, 3])
>>> y = r3.element([3, 2, 3])
>>> AtAxy = op.adjoint(op(x)).inner(y)
>>> AxAy = op(x).inner(op(y))
>>> AtAxy == AxAy
True
"""
if self.domain.is_rn:
# Real domain
# Optimizations for simple cases.
if self.scalar.real == self.scalar:
return self.scalar.real * RealPart(self.range)
elif 1j * self.scalar.imag == self.scalar:
return self.scalar.imag * ImagPart(self.range)
else:
# General case
return (self.scalar.real * RealPart(self.range) +
self.scalar.imag * ImagPart(self.range))
else:
# Complex domain
return ComplexEmbedding(self.range, self.scalar.conjugate())
if __name__ == '__main__':
# pylint: disable=wrong-import-position
from odl.util.testutils import run_doctests
run_doctests()
| [
"# Copyright 2014-2016 The ODL development group\n",
"#\n",
"# This file is part of ODL.\n",
"#\n",
"# ODL is free software: you can redistribute it and/or modify\n",
"# it under the terms of the GNU General Public License as published by\n",
"# the Free Software Foundation, either version 3 of the License, or\n",
"# (at your option) any later version.\n",
"#\n",
"# ODL is distributed in the hope that it will be useful,\n",
"# but WITHOUT ANY WARRANTY; without even the implied warranty of\n",
"# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n",
"# GNU General Public License for more details.\n",
"#\n",
"# You should have received a copy of the GNU General Public License\n",
"# along with ODL. If not, see <http://www.gnu.org/licenses/>.\n",
"\n",
"\"\"\"Default operators defined on any (reasonable) space.\"\"\"\n",
"\n",
"# Imports for common Python 2/3 codebase\n",
"from __future__ import print_function, division, absolute_import\n",
"from future import standard_library\n",
"standard_library.install_aliases()\n",
"from builtins import super\n",
"\n",
"from copy import copy\n",
"\n",
"from odl.operator.operator import Operator\n",
"from odl.space import ProductSpace\n",
"from odl.set import LinearSpace, LinearSpaceElement, Field, RealNumbers\n",
"\n",
"\n",
"__all__ = ('ScalingOperator', 'ZeroOperator', 'IdentityOperator',\n",
" 'LinCombOperator', 'MultiplyOperator', 'PowerOperator',\n",
" 'InnerProductOperator', 'NormOperator', 'DistOperator',\n",
" 'ConstantOperator', 'RealPart', 'ImagPart', 'ComplexEmbedding')\n",
"\n",
"\n",
"class ScalingOperator(Operator):\n",
"\n",
" \"\"\"Operator of multiplication with a scalar.\n",
"\n",
" ``ScalingOperator(s)(x) == s * x``\n",
" \"\"\"\n",
"\n",
" def __init__(self, domain, scalar):\n",
" \"\"\"Initialize a new instance.\n",
"\n",
" Parameters\n",
" ----------\n",
" domain : `LinearSpace` or `Field`\n",
" Set of elements on which this operator acts.\n",
" scalar : ``domain.field`` element\n",
" Fixed scaling factor of this operator.\n",
"\n",
" Examples\n",
" --------\n",
" >>> r3 = odl.rn(3)\n",
" >>> vec = r3.element([1, 2, 3])\n",
" >>> out = r3.element()\n",
" >>> op = ScalingOperator(r3, 2.0)\n",
" >>> op(vec, out) # In-place, Returns out\n",
" rn(3).element([2.0, 4.0, 6.0])\n",
" >>> out\n",
" rn(3).element([2.0, 4.0, 6.0])\n",
" >>> op(vec) # Out-of-place\n",
" rn(3).element([2.0, 4.0, 6.0])\n",
" \"\"\"\n",
" if not isinstance(domain, (LinearSpace, Field)):\n",
" raise TypeError('`space` {!r} not a `LinearSpace` or `Field` '\n",
" 'instance'.format(domain))\n",
"\n",
" super().__init__(domain, domain, linear=True)\n",
" self.__scalar = domain.field.element(scalar)\n",
"\n",
" @property\n",
" def scalar(self):\n",
" \"\"\"Fixed scaling factor of this operator.\"\"\"\n",
" return self.__scalar\n",
"\n",
" def _call(self, x, out=None):\n",
" \"\"\"Scale ``x`` and write to ``out`` if given.\"\"\"\n",
" if out is None:\n",
" out = self.scalar * x\n",
" else:\n",
" out.lincomb(self.scalar, x)\n",
" return out\n",
"\n",
" @property\n",
" def inverse(self):\n",
" \"\"\"Return the inverse operator.\n",
"\n",
" Examples\n",
" --------\n",
" >>> r3 = odl.rn(3)\n",
" >>> vec = r3.element([1, 2, 3])\n",
" >>> op = ScalingOperator(r3, 2.0)\n",
" >>> inv = op.inverse\n",
" >>> inv(op(vec)) == vec\n",
" True\n",
" >>> op(inv(vec)) == vec\n",
" True\n",
" \"\"\"\n",
" if self.scalar == 0.0:\n",
" raise ZeroDivisionError('scaling operator not invertible for '\n",
" 'scalar==0')\n",
" return ScalingOperator(self.domain, 1.0 / self.scalar)\n",
"\n",
" @property\n",
" def adjoint(self):\n",
" \"\"\"Adjoint, given as scaling with the conjugate of the scalar.\n",
"\n",
" Returns\n",
" -------\n",
" adjoint : `ScalingOperator`\n",
" ``self`` if `scalar` is real, else `scalar` is conjugated.\n",
" \"\"\"\n",
" if complex(self.scalar).imag == 0.0:\n",
" return self\n",
" else:\n",
" return ScalingOperator(self.domain, self.scalar.conjugate())\n",
"\n",
" def __repr__(self):\n",
" \"\"\"Return ``repr(self)``.\"\"\"\n",
" return '{}({!r}, {!r})'.format(self.__class__.__name__,\n",
" self.domain, self.scalar)\n",
"\n",
" def __str__(self):\n",
" \"\"\"Return ``str(self)``.\"\"\"\n",
" return '{} * I'.format(self.scalar)\n",
"\n",
"\n",
"class IdentityOperator(ScalingOperator):\n",
"\n",
" \"\"\"Operator mapping each element to itself.\n",
"\n",
" ``IdentityOperator()(x) == x``\n",
" \"\"\"\n",
"\n",
" def __init__(self, space):\n",
" \"\"\"Initialize a new instance.\n",
"\n",
" Parameters\n",
" ----------\n",
" space : `LinearSpace`\n",
" Space of elements which the operator is acting on.\n",
" \"\"\"\n",
" super().__init__(space, 1)\n",
"\n",
" def __repr__(self):\n",
" \"\"\"Return ``repr(self)``.\"\"\"\n",
" return '{}({!r})'.format(self.__class__.__name__, self.domain)\n",
"\n",
" def __str__(self):\n",
" \"\"\"Return ``str(self)``.\"\"\"\n",
" return \"I\"\n",
"\n",
"\n",
"class LinCombOperator(Operator):\n",
"\n",
" \"\"\"Operator mapping two space elements to a linear combination::\n",
"\n",
" LinCombOperator(a, b)([x, y]) == a * x + b * y\n",
" \"\"\"\n",
"\n",
" def __init__(self, space, a, b):\n",
" \"\"\"Initialize a new instance.\n",
"\n",
" Parameters\n",
" ----------\n",
" space : `LinearSpace`\n",
" Space of elements which the operator is acting on.\n",
" a, b : ``space.field`` elements\n",
" Scalars to multiply ``x[0]`` and ``x[1]`` with, respectively.\n",
"\n",
" Examples\n",
" --------\n",
" >>> r3 = odl.rn(3)\n",
" >>> r3xr3 = odl.ProductSpace(r3, r3)\n",
" >>> xy = r3xr3.element([[1, 2, 3], [1, 2, 3]])\n",
" >>> z = r3.element()\n",
" >>> op = LinCombOperator(r3, 1.0, 1.0)\n",
" >>> op(xy, out=z) # Returns z\n",
" rn(3).element([2.0, 4.0, 6.0])\n",
" >>> z\n",
" rn(3).element([2.0, 4.0, 6.0])\n",
" \"\"\"\n",
" domain = ProductSpace(space, space)\n",
" super().__init__(domain, space, linear=True)\n",
" self.a = a\n",
" self.b = b\n",
"\n",
" def _call(self, x, out=None):\n",
" \"\"\"Linearly combine ``x`` and write to ``out`` if given.\"\"\"\n",
" if out is None:\n",
" out = self.range.element()\n",
" out.lincomb(self.a, x[0], self.b, x[1])\n",
" return out\n",
"\n",
" def __repr__(self):\n",
" \"\"\"Return ``repr(self)``.\"\"\"\n",
" return '{}({!r}, {!r}, {!r})'.format(self.__class__.__name__,\n",
" self.range, self.a, self.b)\n",
"\n",
" def __str__(self):\n",
" \"\"\"Return ``str(self)``.\"\"\"\n",
" return \"{}*x + {}*y\".format(self.a, self.b)\n",
"\n",
"\n",
"class MultiplyOperator(Operator):\n",
"\n",
" \"\"\"Operator multiplying by a fixed space or field element.\n",
"\n",
" ``MultiplyOperator(y)(x) == x * y``\n",
"\n",
" Here, ``y`` is a `LinearSpaceElement` or `Field` element and\n",
" ``x`` is a `LinearSpaceElement`.\n",
" Hence, this operator can be defined either on a `LinearSpace` or on\n",
" a `Field`. In the first case it is the pointwise multiplication,\n",
" in the second the scalar multiplication.\n",
" \"\"\"\n",
"\n",
" def __init__(self, multiplicand, domain=None, range=None):\n",
" \"\"\"Initialize a new instance.\n",
"\n",
" Parameters\n",
" ----------\n",
" multiplicand : `LinearSpaceElement` or scalar\n",
" Value to multiply by.\n",
" domain : `LinearSpace` or `Field`, optional\n",
" Set to which the operator can be applied.\n",
" Default: ``multiplicand.space``.\n",
" range : `LinearSpace` or `Field`, optional\n",
" Set to which the operator maps. Default: ``multiplicand.space``.\n",
"\n",
" Examples\n",
" --------\n",
" >>> r3 = odl.rn(3)\n",
" >>> x = r3.element([1, 2, 3])\n",
"\n",
" Multiply by vector:\n",
"\n",
" >>> op = MultiplyOperator(x)\n",
" >>> op(x)\n",
" rn(3).element([1.0, 4.0, 9.0])\n",
" >>> out = r3.element()\n",
" >>> op(x, out)\n",
" rn(3).element([1.0, 4.0, 9.0])\n",
"\n",
" Multiply by scalar:\n",
"\n",
" >>> op2 = MultiplyOperator(x, domain=r3.field)\n",
" >>> op2(3)\n",
" rn(3).element([3.0, 6.0, 9.0])\n",
" >>> out = r3.element()\n",
" >>> op2(3, out)\n",
" rn(3).element([3.0, 6.0, 9.0])\n",
" \"\"\"\n",
" if domain is None:\n",
" domain = multiplicand.space\n",
"\n",
" if range is None:\n",
" range = multiplicand.space\n",
"\n",
" self.__multiplicand = multiplicand\n",
" self.__domain_is_field = isinstance(domain, Field)\n",
" self.__range_is_field = isinstance(range, Field)\n",
" super().__init__(domain, range, linear=True)\n",
"\n",
" @property\n",
" def multiplicand(self):\n",
" \"\"\"Value to multiply by.\"\"\"\n",
" return self.__multiplicand\n",
"\n",
" def _call(self, x, out=None):\n",
" \"\"\"Multiply ``x`` and write to ``out`` if given.\"\"\"\n",
" if out is None:\n",
" return x * self.multiplicand\n",
" elif not self.__range_is_field:\n",
" if self.__domain_is_field:\n",
" out.lincomb(x, self.multiplicand)\n",
" else:\n",
" x.multiply(self.multiplicand, out=out)\n",
" else:\n",
" raise ValueError('can only use `out` with `LinearSpace` range')\n",
"\n",
" @property\n",
" def adjoint(self):\n",
" \"\"\"Adjoint of this operator.\n",
"\n",
" Returns\n",
" -------\n",
" adjoint : `InnerProductOperator` or `MultiplyOperator`\n",
" If the domain of this operator is the scalar field of a\n",
" `LinearSpace` the adjoint is the inner product with ``y``,\n",
" else it is the multiplication with ``y``.\n",
"\n",
" Examples\n",
" --------\n",
" >>> r3 = odl.rn(3)\n",
" >>> x = r3.element([1, 2, 3])\n",
"\n",
" Multiply by a space element:\n",
"\n",
" >>> op = MultiplyOperator(x)\n",
" >>> out = r3.element()\n",
" >>> op.adjoint(x)\n",
" rn(3).element([1.0, 4.0, 9.0])\n",
"\n",
" Multiply by a scalar:\n",
"\n",
" >>> op2 = MultiplyOperator(x, domain=r3.field)\n",
" >>> op2.adjoint(x)\n",
" 14.0\n",
" \"\"\"\n",
" if self.__domain_is_field:\n",
" return InnerProductOperator(self.multiplicand)\n",
" else:\n",
" # TODO: complex case\n",
" return MultiplyOperator(self.multiplicand)\n",
"\n",
" def __repr__(self):\n",
" \"\"\"Return ``repr(self)``.\"\"\"\n",
" return '{}({!r})'.format(self.__class__.__name__, self.multiplicand)\n",
"\n",
" def __str__(self):\n",
" \"\"\"Return ``str(self)``.\"\"\"\n",
" return \"x * {}\".format(self.y)\n",
"\n",
"\n",
"class PowerOperator(Operator):\n",
"\n",
" \"\"\"Operator taking a fixed power of a space or field element.\n",
"\n",
" ``PowerOperator(p)(x) == x ** p``\n",
"\n",
" Here, ``x`` is a `LinearSpaceElement` or `Field` element and ``p`` is\n",
" a number. Hence, this operator can be defined either on a\n",
" `LinearSpace` or on a `Field`.\n",
" \"\"\"\n",
"\n",
" def __init__(self, domain, exponent):\n",
" \"\"\"Initialize a new instance.\n",
"\n",
" Parameters\n",
" ----------\n",
" domain : `LinearSpace` or `Field`\n",
" Set of elements on which the operator can be applied.\n",
" exponent : float\n",
" Exponent parameter of the power function applied to an element.\n",
"\n",
" Examples\n",
" --------\n",
" Use with vectors\n",
"\n",
" >>> op = PowerOperator(odl.rn(3), exponent=2)\n",
" >>> op([1, 2, 3])\n",
" rn(3).element([1.0, 4.0, 9.0])\n",
"\n",
" or scalars\n",
"\n",
" >>> op = PowerOperator(odl.RealNumbers(), exponent=2)\n",
" >>> op(2.0)\n",
" 4.0\n",
" \"\"\"\n",
"\n",
" self.__exponent = float(exponent)\n",
" self.__domain_is_field = isinstance(domain, Field)\n",
" super().__init__(domain, domain, linear=(exponent == 1))\n",
"\n",
" @property\n",
" def exponent(self):\n",
" \"\"\"Power of the input element to take.\"\"\"\n",
" return self.__exponent\n",
"\n",
" def _call(self, x, out=None):\n",
" \"\"\"Take the power of ``x`` and write to ``out`` if given.\"\"\"\n",
" if out is None:\n",
" return x ** self.exponent\n",
" elif self.__domain_is_field:\n",
" raise ValueError('cannot use `out` with field')\n",
" else:\n",
" out.assign(x)\n",
" out **= self.exponent\n",
"\n",
" def derivative(self, point):\n",
" \"\"\"Derivative of this operator.\n",
"\n",
" ``PowerOperator(p).derivative(y)(x) == p * y ** (p - 1) * x``\n",
"\n",
" Parameters\n",
" ----------\n",
" point : `domain` element\n",
" The point in which to take the derivative\n",
"\n",
" Returns\n",
" -------\n",
" derivative : `Operator`\n",
" The derivative in ``point``\n",
"\n",
" Examples\n",
" --------\n",
" Use on vector spaces:\n",
"\n",
" >>> op = PowerOperator(odl.rn(3), exponent=2)\n",
" >>> dop = op.derivative(op.domain.element([1, 2, 3]))\n",
" >>> dop([1, 1, 1])\n",
" rn(3).element([2.0, 4.0, 6.0])\n",
"\n",
" Use with scalars:\n",
"\n",
" >>> op = PowerOperator(odl.RealNumbers(), exponent=2)\n",
" >>> dop = op.derivative(2.0)\n",
" >>> dop(2.0)\n",
" 8.0\n",
" \"\"\"\n",
" return self.exponent * MultiplyOperator(point ** (self.exponent - 1),\n",
" domain=self.domain,\n",
" range=self.range)\n",
"\n",
" def __repr__(self):\n",
" \"\"\"Return ``repr(self)``.\"\"\"\n",
" return '{}({!r}, {!r})'.format(self.__class__.__name__,\n",
" self.domain, self.exponent)\n",
"\n",
" def __str__(self):\n",
" \"\"\"Return ``str(self)``.\"\"\"\n",
" return \"x ** {}\".format(self.exponent)\n",
"\n",
"\n",
"class InnerProductOperator(Operator):\n",
" \"\"\"Operator taking the inner product with a fixed space element.\n",
"\n",
" ``InnerProductOperator(y)(x) <==> y.inner(x)``\n",
"\n",
" This is only applicable in inner product spaces.\n",
"\n",
" See Also\n",
" --------\n",
" DistOperator : Distance to a fixed space element.\n",
" NormOperator : Vector space norm as operator.\n",
" \"\"\"\n",
"\n",
" def __init__(self, vector):\n",
" \"\"\"Initialize a new instance.\n",
"\n",
" Parameters\n",
" ----------\n",
" vector : `LinearSpaceElement`\n",
" The element to take the inner product with.\n",
"\n",
" Examples\n",
" --------\n",
" >>> r3 = odl.rn(3)\n",
" >>> x = r3.element([1, 2, 3])\n",
" >>> op = InnerProductOperator(x)\n",
" >>> op(r3.element([1, 2, 3]))\n",
" 14.0\n",
" \"\"\"\n",
" self.__vector = vector\n",
" super().__init__(vector.space, vector.space.field, linear=True)\n",
"\n",
" @property\n",
" def vector(self):\n",
" \"\"\"Element to take the inner product with.\"\"\"\n",
" return self.__vector\n",
"\n",
" def _call(self, x):\n",
" \"\"\"Return the inner product with ``x``.\"\"\"\n",
" return x.inner(self.vector)\n",
"\n",
" @property\n",
" def adjoint(self):\n",
" \"\"\"Adjoint of this operator.\n",
"\n",
" Returns\n",
" -------\n",
" adjoint : `MultiplyOperator`\n",
" The operator of multiplication with `vector`.\n",
"\n",
" Examples\n",
" --------\n",
" >>> r3 = odl.rn(3)\n",
" >>> x = r3.element([1, 2, 3])\n",
" >>> op = InnerProductOperator(x)\n",
" >>> op.adjoint(2.0)\n",
" rn(3).element([2.0, 4.0, 6.0])\n",
" \"\"\"\n",
" return MultiplyOperator(self.vector, self.vector.space.field)\n",
"\n",
" @property\n",
" def T(self):\n",
" \"\"\"Fixed vector of this operator.\n",
"\n",
" Returns\n",
" -------\n",
" vector : `LinearSpaceElement`\n",
" The fixed space element used in this inner product operator.\n",
"\n",
" Examples\n",
" --------\n",
" >>> r3 = odl.rn(3)\n",
" >>> x = r3.element([1, 2, 3])\n",
" >>> x.T\n",
" InnerProductOperator(rn(3).element([1.0, 2.0, 3.0]))\n",
" >>> x.T.T\n",
" rn(3).element([1.0, 2.0, 3.0])\n",
" \"\"\"\n",
" return self.vector\n",
"\n",
" def __repr__(self):\n",
" \"\"\"Return ``repr(self)``.\"\"\"\n",
" return '{}({!r})'.format(self.__class__.__name__, self.vector)\n",
"\n",
" def __str__(self):\n",
" \"\"\"Return ``str(self)``.\"\"\"\n",
" return '{}.T'.format(self.vector)\n",
"\n",
"\n",
"class NormOperator(Operator):\n",
"\n",
" \"\"\"Vector space norm as an operator.\n",
"\n",
" ``NormOperator()(x) <==> x.norm()``\n",
"\n",
" This is only applicable in normed spaces.\n",
"\n",
" See Also\n",
" --------\n",
" InnerProductOperator : Inner product with a fixed space element.\n",
" DistOperator : Distance to a fixed space element.\n",
" \"\"\"\n",
"\n",
" def __init__(self, space):\n",
" \"\"\"Initialize a new instance.\n",
"\n",
" Parameters\n",
" ----------\n",
" space : `LinearSpace`\n",
" Space to take the norm in.\n",
"\n",
" Examples\n",
" --------\n",
" >>> r2 = odl.rn(2)\n",
" >>> op = NormOperator(r2)\n",
" >>> op([3, 4])\n",
" 5.0\n",
" \"\"\"\n",
" super().__init__(space, RealNumbers(), linear=False)\n",
"\n",
" def _call(self, x):\n",
" \"\"\"Return the norm of ``x``.\"\"\"\n",
" return x.norm()\n",
"\n",
" def derivative(self, point):\n",
" \"\"\"Derivative of this operator in ``point``.\n",
"\n",
" ``NormOperator().derivative(y)(x) == (y / y.norm()).inner(x)``\n",
"\n",
" This is only applicable in inner product spaces.\n",
"\n",
" Parameters\n",
" ----------\n",
" point : `domain` `element-like`\n",
" Point in which to take the derivative.\n",
"\n",
" Returns\n",
" -------\n",
" derivative : `InnerProductOperator`\n",
"\n",
" Raises\n",
" ------\n",
" ValueError\n",
" If ``point.norm() == 0``, in which case the derivative is not well\n",
" defined in the Frechet sense.\n",
"\n",
" Notes\n",
" -----\n",
" The derivative cannot be written in a general sense except in Hilbert\n",
" spaces, in which case it is given by\n",
"\n",
" .. math::\n",
" (D \\|\\cdot\\|)(y)(x) = \\langle y / \\|y\\|, x \\\\rangle\n",
"\n",
" Examples\n",
" --------\n",
" >>> r3 = odl.rn(3)\n",
" >>> op = NormOperator(r3)\n",
" >>> derivative = op.derivative([1, 0, 0])\n",
" >>> derivative([1, 0, 0])\n",
" 1.0\n",
" \"\"\"\n",
" point = self.domain.element(point)\n",
" norm = point.norm()\n",
" if norm == 0:\n",
" raise ValueError('not differentiable in 0')\n",
"\n",
" return InnerProductOperator(point / norm)\n",
"\n",
" def __repr__(self):\n",
" \"\"\"Return ``repr(self)``.\"\"\"\n",
" return '{}({!r})'.format(self.__class__.__name__, self.domain)\n",
"\n",
" def __str__(self):\n",
" \"\"\"Return ``str(self)``.\"\"\"\n",
" return '{}({})'.format(self.__class__.__name__, self.domain)\n",
"\n",
"\n",
"class DistOperator(Operator):\n",
"\n",
" \"\"\"Operator taking the distance to a fixed space element.\n",
"\n",
" ``DistOperator(y)(x) == y.dist(x)``\n",
"\n",
" This is only applicable in metric spaces.\n",
"\n",
" See Also\n",
" --------\n",
" InnerProductOperator : Inner product with fixed space element.\n",
" NormOperator : Vector space norm as an operator.\n",
" \"\"\"\n",
"\n",
" def __init__(self, vector):\n",
" \"\"\"Initialize a new instance.\n",
"\n",
" Parameters\n",
" ----------\n",
" vector : `LinearSpaceElement`\n",
" Point to calculate the distance to.\n",
"\n",
" Examples\n",
" --------\n",
" >>> r2 = odl.rn(2)\n",
" >>> x = r2.element([1, 1])\n",
" >>> op = DistOperator(x)\n",
" >>> op([4, 5])\n",
" 5.0\n",
" \"\"\"\n",
" self.__vector = vector\n",
" super().__init__(vector.space, RealNumbers(), linear=False)\n",
"\n",
" @property\n",
" def vector(self):\n",
" \"\"\"Element to which to take the distance.\"\"\"\n",
" return self.__vector\n",
"\n",
" def _call(self, x):\n",
" \"\"\"Return the distance from ``self.vector`` to ``x``.\"\"\"\n",
" return self.vector.dist(x)\n",
"\n",
" def derivative(self, point):\n",
" \"\"\"The derivative operator.\n",
"\n",
" ``DistOperator(y).derivative(z)(x) ==\n",
" ((y - z) / y.dist(z)).inner(x)``\n",
"\n",
" This is only applicable in inner product spaces.\n",
"\n",
" Parameters\n",
" ----------\n",
" x : `domain` `element-like`\n",
" Point in which to take the derivative.\n",
"\n",
" Returns\n",
" -------\n",
" derivative : `InnerProductOperator`\n",
"\n",
" Raises\n",
" ------\n",
" ValueError\n",
" If ``point == self.vector``, in which case the derivative is not\n",
" well defined in the Frechet sense.\n",
"\n",
" Notes\n",
" -----\n",
" The derivative cannot be written in a general sense except in Hilbert\n",
" spaces, in which case it is given by\n",
"\n",
" .. math::\n",
" (D d(\\cdot, y))(z)(x) = \\\\langle (y-z) / d(y, z), x \\\\rangle\n",
"\n",
" Examples\n",
" --------\n",
" >>> r2 = odl.rn(2)\n",
" >>> x = r2.element([1, 1])\n",
" >>> op = DistOperator(x)\n",
" >>> derivative = op.derivative([2, 1])\n",
" >>> derivative([1, 0])\n",
" 1.0\n",
" \"\"\"\n",
" point = self.domain.element(point)\n",
" diff = point - self.vector\n",
" dist = self.vector.dist(point)\n",
" if dist == 0:\n",
" raise ValueError('not differentiable at the reference vector {!r}'\n",
" ''.format(self.vector))\n",
"\n",
" return InnerProductOperator(diff / dist)\n",
"\n",
" def __repr__(self):\n",
" \"\"\"Return ``repr(self)``.\"\"\"\n",
" return '{}({!r})'.format(self.__class__.__name__, self.vector)\n",
"\n",
" def __str__(self):\n",
" \"\"\"Return ``str(self)``.\"\"\"\n",
" return '{}({})'.format(self.__class__.__name__, self.vector)\n",
"\n",
"\n",
"class ConstantOperator(Operator):\n",
"\n",
" \"\"\"Operator that always returns the same value.\n",
"\n",
" ``ConstantOperator(y)(x) == y``\n",
" \"\"\"\n",
"\n",
" def __init__(self, constant, domain=None, range=None):\n",
" \"\"\"Initialize a new instance.\n",
"\n",
" Parameters\n",
" ----------\n",
" constant : `LinearSpaceElement` or ``range`` `element-like`\n",
" The constant space element to be returned. If ``range`` is not\n",
" provided, ``constant`` must be a `LinearSpaceElement` since the\n",
" operator range is then inferred from it.\n",
" domain : `LinearSpace`, optional\n",
" Domain of the operator. Default: ``vector.space``\n",
" range : `LinearSpace`, optional\n",
" Range of the operator. Default: ``vector.space``\n",
"\n",
" Examples\n",
" --------\n",
" >>> r3 = odl.rn(3)\n",
" >>> x = r3.element([1, 2, 3])\n",
" >>> op = ConstantOperator(x)\n",
" >>> op(x, out=r3.element())\n",
" rn(3).element([1.0, 2.0, 3.0])\n",
" \"\"\"\n",
"\n",
" if ((domain is None or range is None) and\n",
" not isinstance(constant, LinearSpaceElement)):\n",
" raise TypeError('If either domain or range is unspecified '\n",
" '`constant` must be LinearSpaceVector, got '\n",
" '{!r}.'.format(constant))\n",
"\n",
" if domain is None:\n",
" domain = constant.space\n",
" if range is None:\n",
" range = constant.space\n",
"\n",
" self.__constant = range.element(constant)\n",
" linear = self.constant.norm() == 0\n",
" super().__init__(domain, range, linear=linear)\n",
"\n",
" @property\n",
" def constant(self):\n",
" \"\"\"Constant space element returned by this operator.\"\"\"\n",
" return self.__constant\n",
"\n",
" def _call(self, x, out=None):\n",
" \"\"\"Return the constant vector or assign it to ``out``.\"\"\"\n",
" if out is None:\n",
" return self.range.element(copy(self.constant))\n",
" else:\n",
" out.assign(self.constant)\n",
"\n",
" @property\n",
" def adjoint(self):\n",
" \"\"\"Adjoint of the operator.\n",
"\n",
" Only defined if the operator is the constant operator.\n",
" \"\"\"\n",
"\n",
" def derivative(self, point):\n",
" \"\"\"Derivative of this operator, always zero.\n",
"\n",
" Returns\n",
" -------\n",
" derivative : `ZeroOperator`\n",
"\n",
" Examples\n",
" --------\n",
" >>> r3 = odl.rn(3)\n",
" >>> x = r3.element([1, 2, 3])\n",
" >>> op = ConstantOperator(x)\n",
" >>> deriv = op.derivative([1, 1, 1])\n",
" >>> deriv([2, 2, 2])\n",
" rn(3).element([0.0, 0.0, 0.0])\n",
" \"\"\"\n",
" return ZeroOperator(domain=self.domain, range=self.range)\n",
"\n",
" def __repr__(self):\n",
" \"\"\"Return ``repr(self)``.\"\"\"\n",
" return '{}({!r})'.format(self.__class__.__name__, self.constant)\n",
"\n",
" def __str__(self):\n",
" \"\"\"Return ``str(self)``.\"\"\"\n",
" return \"{}\".format(self.constant)\n",
"\n",
"\n",
"class ZeroOperator(Operator):\n",
"\n",
" \"\"\"Operator mapping each element to the zero element::\n",
"\n",
" ZeroOperator(space)(x) == space.zero()\n",
" \"\"\"\n",
"\n",
" def __init__(self, domain, range=None):\n",
" \"\"\"Initialize a new instance.\n",
"\n",
" Parameters\n",
" ----------\n",
" domain : `LinearSpace`\n",
" Domain of the operator.\n",
" range : `LinearSpace`, optional\n",
" Range of the operator. Default: ``domain``\n",
"\n",
" Examples\n",
" --------\n",
" >>> op = odl.ZeroOperator(odl.rn(3))\n",
" >>> op([1, 2, 3])\n",
" rn(3).element([0.0, 0.0, 0.0])\n",
"\n",
" Also works with domain != range:\n",
"\n",
" >>> op = odl.ZeroOperator(odl.rn(3), odl.cn(4))\n",
" >>> op([1, 2, 3])\n",
" cn(4).element([0j, 0j, 0j, 0j])\n",
" \"\"\"\n",
" if range is None:\n",
" range = domain\n",
"\n",
" super().__init__(domain, range, linear=True)\n",
"\n",
" def _call(self, x, out=None):\n",
" \"\"\"Return the zero vector or assign it to ``out``.\"\"\"\n",
" if self.domain == self.range:\n",
" if out is None:\n",
" out = 0 * x\n",
" else:\n",
" out.lincomb(0, x)\n",
" else:\n",
" result = self.range.zero()\n",
" if out is None:\n",
" out = result\n",
" else:\n",
" out.assign(result)\n",
" return out\n",
"\n",
" @property\n",
" def adjoint(self):\n",
" \"\"\"Adjoint of the operator.\n",
"\n",
" If ``self.domain == self.range`` the zero operator is self-adjoint,\n",
" otherwise it is the `ZeroOperator` from `range` to `domain`.\n",
" \"\"\"\n",
" return ZeroOperator(domain=self.range, range=self.domain)\n",
"\n",
" def __repr__(self):\n",
" \"\"\"Return ``repr(self)``.\"\"\"\n",
" return '{}({!r})'.format(self.__class__.__name__, self.domain)\n",
"\n",
" def __str__(self):\n",
" \"\"\"Return ``str(self)``.\"\"\"\n",
" return '0'\n",
"\n",
"\n",
"class RealPart(Operator):\n",
"\n",
" \"\"\"Operator that extracts the real part of a vector.\"\"\"\n",
"\n",
" def __init__(self, space):\n",
" \"\"\"Initialize a new instance.\n",
"\n",
" Parameters\n",
" ----------\n",
" space : `FnBase`\n",
" Space which real part should be taken, needs to implement\n",
" ``space.real_space``.\n",
"\n",
" Examples\n",
" --------\n",
" Take the real part of complex vector:\n",
"\n",
" >>> c3 = odl.cn(3)\n",
" >>> op = RealPart(c3)\n",
" >>> op([1 + 2j, 2, 3 - 1j])\n",
" rn(3).element([1.0, 2.0, 3.0])\n",
"\n",
" The operator is the identity on real spaces:\n",
"\n",
" >>> r3 = odl.rn(3)\n",
" >>> op = RealPart(r3)\n",
" >>> op([1, 2, 3])\n",
" rn(3).element([1.0, 2.0, 3.0])\n",
"\n",
" The operator also works on other `FnBase` spaces such as\n",
" `DiscreteLp` spaces:\n",
"\n",
" >>> r3 = odl.uniform_discr(0, 1, 3, dtype=complex)\n",
" >>> op = RealPart(r3)\n",
" >>> op([1, 2, 3])\n",
" uniform_discr(0.0, 1.0, 3).element([1.0, 2.0, 3.0])\n",
" \"\"\"\n",
" real_space = space.real_space\n",
" linear = (space == real_space)\n",
" Operator.__init__(self, space, real_space, linear=linear)\n",
"\n",
" def _call(self, x):\n",
" \"\"\"Return ``self(x)``.\"\"\"\n",
" return x.real\n",
"\n",
" @property\n",
" def inverse(self):\n",
" \"\"\"Return the (pseudo-)inverse.\n",
"\n",
" Examples\n",
" --------\n",
" The inverse is its own inverse if its domain is real:\n",
"\n",
" >>> r3 = odl.rn(3)\n",
" >>> op = RealPart(r3)\n",
" >>> op.inverse(op([1, 2, 3]))\n",
" rn(3).element([1.0, 2.0, 3.0])\n",
"\n",
" This is not a true inverse, only a pseudoinverse, the complex part\n",
" will by necessity be lost.\n",
"\n",
" >>> c3 = odl.cn(3)\n",
" >>> op = RealPart(c3)\n",
" >>> op.inverse(op([1 + 2j, 2, 3 - 1j]))\n",
" cn(3).element([(1+0j), (2+0j), (3+0j)])\n",
" \"\"\"\n",
" if self.is_linear:\n",
" return self\n",
" else:\n",
" return ComplexEmbedding(self.domain, scalar=1)\n",
"\n",
" @property\n",
" def adjoint(self):\n",
" \"\"\"Return the (left) adjoint.\n",
"\n",
" Notes\n",
" -----\n",
" Due to technicalities of operators from a complex space into a real\n",
" space, this does not satisfy the usual adjoint equation:\n",
"\n",
" .. math::\n",
" \\langle Ax, y \\rangle = \\langle x, A^*y \\rangle\n",
"\n",
" Instead it is an adjoint in a weaker sense as follows:\n",
"\n",
" .. math::\n",
" \\langle AA^*x, y \\rangle = \\langle A^*x, A^*y \\rangle\n",
"\n",
" Examples\n",
" --------\n",
" The adjoint satisfies the adjoint equation for real spaces:\n",
"\n",
" >>> r3 = odl.rn(3)\n",
" >>> op = RealPart(r3)\n",
" >>> x = op.domain.element([1, 2, 3])\n",
" >>> y = op.range.element([3, 2, 1])\n",
" >>> x.inner(op.adjoint(y)) == op(x).inner(y)\n",
" True\n",
"\n",
" If the domain is complex, it only satisfies the weaker definition:\n",
"\n",
" >>> c3 = odl.cn(3)\n",
" >>> op = RealPart(c3)\n",
" >>> x = op.range.element([1, 2, 3])\n",
" >>> y = op.range.element([3, 2, 1])\n",
" >>> AtAxy = op(op.adjoint(x)).inner(y)\n",
" >>> AtxAty = op.adjoint(x).inner(op.adjoint(y))\n",
" >>> AtAxy == AtxAty\n",
" True\n",
" \"\"\"\n",
" if self.is_linear:\n",
" return self\n",
" else:\n",
" return ComplexEmbedding(self.domain, scalar=1)\n",
"\n",
"\n",
"class ImagPart(Operator):\n",
" def __init__(self, space):\n",
" \"\"\"Operator that extracts the imaginary part of a vector.\n",
"\n",
" Parameters\n",
" ----------\n",
" space : `FnBase`\n",
" Space which imaginary part should be taken, needs to implement\n",
" ``space.real_space``.\n",
"\n",
" Examples\n",
" --------\n",
" Take the imaginary part of complex vector:\n",
"\n",
" >>> c3 = odl.cn(3)\n",
" >>> op = ImagPart(c3)\n",
" >>> op([1 + 2j, 2, 3 - 1j])\n",
" rn(3).element([2.0, 0.0, -1.0])\n",
"\n",
" The operator is the zero operator on real spaces:\n",
"\n",
" >>> r3 = odl.rn(3)\n",
" >>> op = ImagPart(r3)\n",
" >>> op([1, 2, 3])\n",
" rn(3).element([0.0, 0.0, 0.0])\n",
" \"\"\"\n",
" real_space = space.real_space\n",
" linear = (space == real_space)\n",
" Operator.__init__(self, space, real_space, linear=linear)\n",
"\n",
" def _call(self, x):\n",
" \"\"\"Return ``self(x)``.\"\"\"\n",
" return x.imag\n",
"\n",
" @property\n",
" def inverse(self):\n",
" \"\"\"Return the pseudoinverse.\n",
"\n",
" Examples\n",
" --------\n",
" The inverse is the zero operator if the domain is real:\n",
"\n",
" >>> r3 = odl.rn(3)\n",
" >>> op = ImagPart(r3)\n",
" >>> op.inverse(op([1, 2, 3]))\n",
" rn(3).element([0.0, 0.0, 0.0])\n",
"\n",
" This is not a true inverse, only a pseudoinverse, the real part\n",
" will by necessity be lost.\n",
"\n",
" >>> c3 = odl.cn(3)\n",
" >>> op = ImagPart(c3)\n",
" >>> op.inverse(op([1 + 2j, 2, 3 - 1j]))\n",
" cn(3).element([2j, 0j, (-0-1j)])\n",
" \"\"\"\n",
" if self.is_linear:\n",
" return ZeroOperator(self.domain)\n",
" else:\n",
" return ComplexEmbedding(self.domain, scalar=1j)\n",
"\n",
" @property\n",
" def adjoint(self):\n",
" \"\"\"Return the (left) adjoint.\n",
"\n",
" Notes\n",
" -----\n",
" Due to technicalities of operators from a complex space into a real\n",
" space, this does not satisfy the usual adjoint equation:\n",
"\n",
" .. math::\n",
" \\langle Ax, y \\rangle = \\langle x, A^*y \\rangle\n",
"\n",
" Instead it is an adjoint in a weaker sense as follows:\n",
"\n",
" .. math::\n",
" \\langle AA^*x, y \\rangle = \\langle A^*x, A^*y \\rangle\n",
"\n",
" Examples\n",
" --------\n",
" The adjoint satisfies the adjoint equation for real spaces:\n",
"\n",
" >>> r3 = odl.rn(3)\n",
" >>> op = ImagPart(r3)\n",
" >>> x = op.domain.element([1, 2, 3])\n",
" >>> y = op.range.element([3, 2, 1])\n",
" >>> x.inner(op.adjoint(y)) == op(x).inner(y)\n",
" True\n",
"\n",
" If the domain is complex, it only satisfies the weaker definition:\n",
"\n",
" >>> c3 = odl.cn(3)\n",
" >>> op = ImagPart(c3)\n",
" >>> x = op.range.element([1, 2, 3])\n",
" >>> y = op.range.element([3, 2, 1])\n",
" >>> AtAxy = op(op.adjoint(x)).inner(y)\n",
" >>> AtxAty = op.adjoint(x).inner(op.adjoint(y))\n",
" >>> AtAxy == AtxAty\n",
" True\n",
" \"\"\"\n",
" if self.is_linear:\n",
" return ZeroOperator(self.domain)\n",
" else:\n",
" return ComplexEmbedding(self.domain, scalar=1j)\n",
"\n",
"\n",
"class ComplexEmbedding(Operator):\n",
"\n",
" \"\"\"Operator that embeds a vector into a complex space.\"\"\"\n",
"\n",
" def __init__(self, space, scalar=1):\n",
" \"\"\"Initialize a new instance.\n",
"\n",
" Parameters\n",
" ----------\n",
" space : `FnBase`\n",
" Space which real part should be taken, needs to implement\n",
" ``space.complex_space``.\n",
" scalar : ``space.complex_space.field`` element, optional\n",
" Scalar which the incomming vectors should be multiplied by in order\n",
" to get the complex vector.\n",
"\n",
" Examples\n",
" --------\n",
" Embed real vector into complex space:\n",
"\n",
" >>> r3 = odl.rn(3)\n",
" >>> op = ComplexEmbedding(r3)\n",
" >>> op([1, 2, 3])\n",
" cn(3).element([(1+0j), (2+0j), (3+0j)])\n",
"\n",
" Embed real vector as imaginary part into complex space:\n",
"\n",
" >>> op = ComplexEmbedding(r3, scalar=1j)\n",
" >>> op([1, 2, 3])\n",
" cn(3).element([1j, 2j, 3j])\n",
"\n",
" On complex spaces the operator is the same as simple multiplication by\n",
" scalar:\n",
"\n",
" >>> c3 = odl.cn(3)\n",
" >>> op = ComplexEmbedding(c3, scalar=1 + 2j)\n",
" >>> op([1 + 1j, 2 + 2j, 3 + 3j])\n",
" cn(3).element([(-1+3j), (-2+6j), (-3+9j)])\n",
" \"\"\"\n",
" complex_space = space.complex_space\n",
" self.scalar = complex_space.field.element(scalar)\n",
" Operator.__init__(self, space, complex_space, linear=True)\n",
"\n",
" def _call(self, x, out):\n",
" \"\"\"Return ``self(x)``.\"\"\"\n",
" if self.domain.is_rn:\n",
" # Real domain, multiply separately\n",
" out.real = self.scalar.real * x\n",
" out.imag = self.scalar.imag * x\n",
" else:\n",
" # Complex domain\n",
" out.lincomb(self.scalar, x)\n",
"\n",
" @property\n",
" def inverse(self):\n",
" \"\"\"Return the (left) inverse.\n",
"\n",
" If the domain is a real space, this is not a true inverse,\n",
" only a (left) inverse.\n",
"\n",
" Examples\n",
" --------\n",
" >>> r3 = odl.rn(3)\n",
" >>> op = ComplexEmbedding(r3, scalar=1)\n",
" >>> op.inverse(op([1, 2, 4]))\n",
" rn(3).element([1.0, 2.0, 4.0])\n",
" \"\"\"\n",
" if self.domain.is_rn:\n",
" # Real domain\n",
" # Optimizations for simple cases.\n",
" if self.scalar.real == self.scalar:\n",
" return (1 / self.scalar.real) * RealPart(self.range)\n",
" elif 1j * self.scalar.imag == self.scalar:\n",
" return (1 / self.scalar.imag) * ImagPart(self.range)\n",
" else:\n",
" # General case\n",
" inv_scalar = (1 / self.scalar).conjugate()\n",
" return ((inv_scalar.real) * RealPart(self.range) +\n",
" (inv_scalar.imag) * ImagPart(self.range))\n",
" else:\n",
" # Complex domain\n",
" return ComplexEmbedding(self.range, self.scalar.conjugate())\n",
"\n",
" @property\n",
" def adjoint(self):\n",
" \"\"\"Return the (right) adjoint.\n",
"\n",
" Notes\n",
" -----\n",
" Due to technicalities of operators from a real space into a complex\n",
" space, this does not satisfy the usual adjoint equation:\n",
"\n",
" .. math::\n",
" \\langle Ax, y \\rangle = \\langle x, A^*y \\rangle\n",
"\n",
" Instead it is an adjoint in a weaker sense as follows:\n",
"\n",
" .. math::\n",
" \\langle A^*Ax, y \\rangle = \\langle Ax, Ay \\rangle\n",
"\n",
" Examples\n",
" --------\n",
" The adjoint satisfies the adjoint equation for complex spaces\n",
"\n",
" >>> c3 = odl.cn(3)\n",
" >>> op = ComplexEmbedding(c3, scalar=1j)\n",
" >>> x = c3.element([1 + 1j, 2 + 2j, 3 + 3j])\n",
" >>> y = c3.element([3 + 1j, 2 + 2j, 3 + 1j])\n",
" >>> Axy = op(x).inner(y)\n",
" >>> xAty = x.inner(op.adjoint(y))\n",
" >>> Axy == xAty\n",
" True\n",
"\n",
" For real domains, it only satisfies the (right) adjoint equation\n",
"\n",
" >>> r3 = odl.rn(3)\n",
" >>> op = ComplexEmbedding(r3, scalar=1j)\n",
" >>> x = r3.element([1, 2, 3])\n",
" >>> y = r3.element([3, 2, 3])\n",
" >>> AtAxy = op.adjoint(op(x)).inner(y)\n",
" >>> AxAy = op(x).inner(op(y))\n",
" >>> AtAxy == AxAy\n",
" True\n",
" \"\"\"\n",
" if self.domain.is_rn:\n",
" # Real domain\n",
" # Optimizations for simple cases.\n",
" if self.scalar.real == self.scalar:\n",
" return self.scalar.real * RealPart(self.range)\n",
" elif 1j * self.scalar.imag == self.scalar:\n",
" return self.scalar.imag * ImagPart(self.range)\n",
" else:\n",
" # General case\n",
" return (self.scalar.real * RealPart(self.range) +\n",
" self.scalar.imag * ImagPart(self.range))\n",
" else:\n",
" # Complex domain\n",
" return ComplexEmbedding(self.range, self.scalar.conjugate())\n",
"\n",
"if __name__ == '__main__':\n",
" # pylint: disable=wrong-import-position\n",
" from odl.util.testutils import run_doctests\n",
" run_doctests()\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.037037037037037035,
0,
0.045454545454545456,
0,
0.023255813953488372,
0.02857142857142857,
0.013888888888888888,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.09375,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.0136986301369863,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.03333333333333333,
0,
0,
0,
0,
0.030303030303030304,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.03333333333333333,
0,
0,
0,
0,
0.030303030303030304,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.03333333333333333,
0,
0,
0,
0,
0.03225806451612903,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.037037037037037035,
0,
0,
0
] | 1,230 | 0.000395 | false |
# This file is part of Tautulli.
#
# Tautulli is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Tautulli is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Tautulli. If not, see <http://www.gnu.org/licenses/>.
import json
from itertools import groupby
import plexpy
import common
import database
import datatables
import helpers
import logger
import pmsconnect
import session
class DataFactory(object):
"""
Retrieve and process data from the monitor database
"""
def __init__(self):
pass
def get_datatables_history(self, kwargs=None, custom_where=None, grouping=None):
data_tables = datatables.DataTables()
if custom_where is None:
custon_where = []
if grouping is None:
grouping = plexpy.CONFIG.GROUP_HISTORY_TABLES
if session.get_session_user_id():
session_user_id = str(session.get_session_user_id())
added = False
for c_where in custom_where:
if 'user_id' in c_where[0]:
# This currently only works if c_where[1] is not a list or tuple
if str(c_where[1]) == session_user_id:
added = True
break
else:
c_where[1] = (c_where[1], session_user_id)
added = True
if not added:
custom_where.append(['session_history.user_id', session.get_session_user_id()])
group_by = ['session_history.reference_id'] if grouping else ['session_history.id']
columns = [
'session_history.reference_id',
'session_history.id',
'MAX(started) AS date',
'MIN(started) AS started',
'MAX(stopped) AS stopped',
'SUM(CASE WHEN stopped > 0 THEN (stopped - started) ELSE 0 END) - \
SUM(CASE WHEN paused_counter IS NULL THEN 0 ELSE paused_counter END) AS duration',
'SUM(CASE WHEN paused_counter IS NULL THEN 0 ELSE paused_counter END) AS paused_counter',
'session_history.user_id',
'session_history.user',
'(CASE WHEN users.friendly_name IS NULL OR TRIM(users.friendly_name) = "" \
THEN users.username ELSE users.friendly_name END) AS friendly_name',
'platform',
'player',
'ip_address',
'session_history.media_type',
'session_history_metadata.rating_key',
'session_history_metadata.parent_rating_key',
'session_history_metadata.grandparent_rating_key',
'session_history_metadata.full_title',
'session_history_metadata.title',
'session_history_metadata.parent_title',
'session_history_metadata.grandparent_title',
'session_history_metadata.original_title',
'session_history_metadata.year',
'session_history_metadata.media_index',
'session_history_metadata.parent_media_index',
'session_history_metadata.thumb',
'session_history_metadata.parent_thumb',
'session_history_metadata.grandparent_thumb',
'MAX((CASE WHEN (view_offset IS NULL OR view_offset = "") THEN 0.1 ELSE view_offset * 1.0 END) / \
(CASE WHEN (session_history_metadata.duration IS NULL OR session_history_metadata.duration = "") \
THEN 1.0 ELSE session_history_metadata.duration * 1.0 END) * 100) AS percent_complete',
'session_history_media_info.transcode_decision',
'COUNT(*) AS group_count',
'GROUP_CONCAT(session_history.id) AS group_ids',
'NULL AS state',
'NULL AS session_key'
]
if plexpy.CONFIG.HISTORY_TABLE_ACTIVITY:
table_name_union = 'sessions'
# Very hacky way to match the custom where parameters for the unioned table
custom_where_union = [[c[0].split('.')[-1], c[1]] for c in custom_where]
group_by_union = ['session_key']
columns_union = [
'NULL AS reference_id',
'NULL AS id',
'started AS date',
'started',
'stopped',
'SUM(CASE WHEN stopped > 0 THEN (stopped - started) ELSE (strftime("%s", "now") - started) END) - \
SUM(CASE WHEN paused_counter IS NULL THEN 0 ELSE paused_counter END) AS duration',
'SUM(CASE WHEN paused_counter IS NULL THEN 0 ELSE paused_counter END) AS paused_counter',
'user_id',
'user',
'(CASE WHEN friendly_name IS NULL OR TRIM(friendly_name) = "" \
THEN user ELSE friendly_name END) AS friendly_name',
'platform',
'player',
'ip_address',
'media_type',
'rating_key',
'parent_rating_key',
'grandparent_rating_key',
'full_title',
'title',
'parent_title',
'grandparent_title',
'original_title',
'year',
'media_index',
'parent_media_index',
'thumb',
'parent_thumb',
'grandparent_thumb',
'MAX((CASE WHEN (view_offset IS NULL OR view_offset = "") THEN 0.1 ELSE view_offset * 1.0 END) / \
(CASE WHEN (duration IS NULL OR duration = "") \
THEN 1.0 ELSE duration * 1.0 END) * 100) AS percent_complete',
'transcode_decision',
'NULL AS group_count',
'NULL AS group_ids',
'state',
'session_key'
]
else:
table_name_union = None
custom_where_union = group_by_union = columns_union = []
try:
query = data_tables.ssp_query(table_name='session_history',
table_name_union=table_name_union,
columns=columns,
columns_union=columns_union,
custom_where=custom_where,
custom_where_union=custom_where_union,
group_by=group_by,
group_by_union=group_by_union,
join_types=['LEFT OUTER JOIN',
'JOIN',
'JOIN'],
join_tables=['users',
'session_history_metadata',
'session_history_media_info'],
join_evals=[['session_history.user_id', 'users.user_id'],
['session_history.id', 'session_history_metadata.id'],
['session_history.id', 'session_history_media_info.id']],
kwargs=kwargs)
except Exception as e:
logger.warn(u"Tautulli DataFactory :: Unable to execute database query for get_history: %s." % e)
return {'recordsFiltered': 0,
'recordsTotal': 0,
'draw': 0,
'data': 'null',
'error': 'Unable to execute database query.'}
history = query['result']
filter_duration = 0
total_duration = self.get_total_duration(custom_where=custom_where)
watched_percent = {'movie': plexpy.CONFIG.MOVIE_WATCHED_PERCENT,
'episode': plexpy.CONFIG.TV_WATCHED_PERCENT,
'track': plexpy.CONFIG.MUSIC_WATCHED_PERCENT,
'photo': 0,
'clip': plexpy.CONFIG.TV_WATCHED_PERCENT
}
rows = []
for item in history:
filter_duration += int(item['duration'])
if item['media_type'] == 'episode' and item['parent_thumb']:
thumb = item['parent_thumb']
elif item['media_type'] == 'episode':
thumb = item['grandparent_thumb']
else:
thumb = item['thumb']
if item['percent_complete'] >= watched_percent[item['media_type']]:
watched_status = 1
elif item['percent_complete'] >= watched_percent[item['media_type']]/2:
watched_status = 0.5
else:
watched_status = 0
# Rename Mystery platform names
platform = common.PLATFORM_NAME_OVERRIDES.get(item['platform'], item['platform'])
row = {'reference_id': item['reference_id'],
'id': item['id'],
'date': item['date'],
'started': item['started'],
'stopped': item['stopped'],
'duration': item['duration'],
'paused_counter': item['paused_counter'],
'user_id': item['user_id'],
'user': item['user'],
'friendly_name': item['friendly_name'],
'platform': platform,
'player': item['player'],
'ip_address': item['ip_address'],
'media_type': item['media_type'],
'rating_key': item['rating_key'],
'parent_rating_key': item['parent_rating_key'],
'grandparent_rating_key': item['grandparent_rating_key'],
'full_title': item['full_title'],
'title': item['parent_title'],
'parent_title': item['parent_title'],
'grandparent_title': item['grandparent_title'],
'original_title': item['original_title'],
'year': item['year'],
'media_index': item['media_index'],
'parent_media_index': item['parent_media_index'],
'thumb': thumb,
'transcode_decision': item['transcode_decision'],
'percent_complete': int(round(item['percent_complete'])),
'watched_status': watched_status,
'group_count': item['group_count'],
'group_ids': item['group_ids'],
'state': item['state'],
'session_key': item['session_key']
}
rows.append(row)
dict = {'recordsFiltered': query['filteredCount'],
'recordsTotal': query['totalCount'],
'data': session.friendly_name_to_username(rows),
'draw': query['draw'],
'filter_duration': helpers.human_duration(filter_duration, sig='dhm'),
'total_duration': helpers.human_duration(total_duration, sig='dhm')
}
return dict
def get_home_stats(self, grouping=None, time_range=30, stats_type='plays', stats_count=10, stats_cards=None):
monitor_db = database.MonitorDatabase()
if grouping is None:
grouping = plexpy.CONFIG.GROUP_HISTORY_TABLES
if stats_cards is None:
stats_cards = plexpy.CONFIG.HOME_STATS_CARDS
movie_watched_percent = plexpy.CONFIG.MOVIE_WATCHED_PERCENT
tv_watched_percent = plexpy.CONFIG.TV_WATCHED_PERCENT
music_watched_percent = plexpy.CONFIG.MUSIC_WATCHED_PERCENT
group_by = 'session_history.reference_id' if grouping else 'session_history.id'
sort_type = 'total_duration' if stats_type == 'duration' else 'total_plays'
home_stats = []
for stat in stats_cards:
if stat == 'top_movies':
top_movies = []
try:
query = 'SELECT t.id, t.full_title, t.rating_key, t.thumb, t.section_id, ' \
't.art, t.media_type, t.content_rating, t.labels, t.started, ' \
'MAX(t.started) AS last_watch, COUNT(t.id) AS total_plays, SUM(t.d) AS total_duration ' \
'FROM (SELECT *, SUM(CASE WHEN stopped > 0 THEN (stopped - started) - ' \
' (CASE WHEN paused_counter IS NULL THEN 0 ELSE paused_counter END) ELSE 0 END) ' \
' AS d ' \
' FROM session_history ' \
' JOIN session_history_metadata ON session_history_metadata.id = session_history.id ' \
' WHERE datetime(session_history.stopped, "unixepoch", "localtime") ' \
' >= datetime("now", "-%s days", "localtime") ' \
' AND session_history.media_type = "movie" ' \
' GROUP BY %s) AS t ' \
'GROUP BY t.full_title ' \
'ORDER BY %s DESC, started DESC ' \
'LIMIT %s ' % (time_range, group_by, sort_type, stats_count)
result = monitor_db.select(query)
except Exception as e:
logger.warn(u"Tautulli DataFactory :: Unable to execute database query for get_home_stats: top_movies: %s." % e)
return None
for item in result:
row = {'title': item['full_title'],
'total_plays': item['total_plays'],
'total_duration': item['total_duration'],
'users_watched': '',
'rating_key': item['rating_key'],
'last_play': item['last_watch'],
'grandparent_thumb': '',
'thumb': item['thumb'],
'art': item['art'],
'section_id': item['section_id'],
'media_type': item['media_type'],
'content_rating': item['content_rating'],
'labels': item['labels'].split(';') if item['labels'] else (),
'user': '',
'friendly_name': '',
'platform': '',
'platform': '',
'row_id': item['id']
}
top_movies.append(row)
home_stats.append({'stat_id': stat,
'stat_type': sort_type,
'stat_title': 'Most Watched Movies',
'rows': session.mask_session_info(top_movies)})
elif stat == 'popular_movies':
popular_movies = []
try:
query = 'SELECT t.id, t.full_title, t.rating_key, t.thumb, t.section_id, ' \
't.art, t.media_type, t.content_rating, t.labels, t.started, ' \
'COUNT(DISTINCT t.user_id) AS users_watched, ' \
'MAX(t.started) AS last_watch, COUNT(t.id) as total_plays, SUM(t.d) AS total_duration ' \
'FROM (SELECT *, SUM(CASE WHEN stopped > 0 THEN (stopped - started) - ' \
' (CASE WHEN paused_counter IS NULL THEN 0 ELSE paused_counter END) ELSE 0 END) ' \
' AS d ' \
' FROM session_history ' \
' JOIN session_history_metadata ON session_history_metadata.id = session_history.id ' \
' WHERE datetime(session_history.stopped, "unixepoch", "localtime") ' \
' >= datetime("now", "-%s days", "localtime") ' \
' AND session_history.media_type = "movie" ' \
' GROUP BY %s) AS t ' \
'GROUP BY t.full_title ' \
'ORDER BY users_watched DESC, %s DESC, started DESC ' \
'LIMIT %s ' % (time_range, group_by, sort_type, stats_count)
result = monitor_db.select(query)
except Exception as e:
logger.warn(u"Tautulli DataFactory :: Unable to execute database query for get_home_stats: popular_movies: %s." % e)
return None
for item in result:
row = {'title': item['full_title'],
'users_watched': item['users_watched'],
'rating_key': item['rating_key'],
'last_play': item['last_watch'],
'total_plays': item['total_plays'],
'grandparent_thumb': '',
'thumb': item['thumb'],
'art': item['art'],
'section_id': item['section_id'],
'media_type': item['media_type'],
'content_rating': item['content_rating'],
'labels': item['labels'].split(';') if item['labels'] else (),
'user': '',
'friendly_name': '',
'platform': '',
'row_id': item['id']
}
popular_movies.append(row)
home_stats.append({'stat_id': stat,
'stat_title': 'Most Popular Movies',
'rows': session.mask_session_info(popular_movies)})
elif stat == 'top_tv':
top_tv = []
try:
query = 'SELECT t.id, t.grandparent_title, t.grandparent_rating_key, t.grandparent_thumb, t.section_id, ' \
't.art, t.media_type, t.content_rating, t.labels, t.started, ' \
'MAX(t.started) AS last_watch, COUNT(t.id) AS total_plays, SUM(t.d) AS total_duration ' \
'FROM (SELECT *, SUM(CASE WHEN stopped > 0 THEN (stopped - started) - ' \
' (CASE WHEN paused_counter IS NULL THEN 0 ELSE paused_counter END) ELSE 0 END) ' \
' AS d ' \
' FROM session_history ' \
' JOIN session_history_metadata ON session_history_metadata.id = session_history.id ' \
' WHERE datetime(session_history.stopped, "unixepoch", "localtime") ' \
' >= datetime("now", "-%s days", "localtime") ' \
' AND session_history.media_type = "episode" ' \
' GROUP BY %s) AS t ' \
'GROUP BY t.grandparent_title ' \
'ORDER BY %s DESC, started DESC ' \
'LIMIT %s ' % (time_range, group_by, sort_type, stats_count)
result = monitor_db.select(query)
except Exception as e:
logger.warn(u"Tautulli DataFactory :: Unable to execute database query for get_home_stats: top_tv: %s." % e)
return None
for item in result:
row = {'title': item['grandparent_title'],
'total_plays': item['total_plays'],
'total_duration': item['total_duration'],
'users_watched': '',
'rating_key': item['grandparent_rating_key'],
'last_play': item['last_watch'],
'grandparent_thumb': item['grandparent_thumb'],
'thumb': item['grandparent_thumb'],
'art': item['art'],
'section_id': item['section_id'],
'media_type': item['media_type'],
'content_rating': item['content_rating'],
'labels': item['labels'].split(';') if item['labels'] else (),
'user': '',
'friendly_name': '',
'platform': '',
'row_id': item['id']
}
top_tv.append(row)
home_stats.append({'stat_id': stat,
'stat_type': sort_type,
'stat_title': 'Most Watched TV Shows',
'rows': session.mask_session_info(top_tv)})
elif stat == 'popular_tv':
popular_tv = []
try:
query = 'SELECT t.id, t.grandparent_title, t.grandparent_rating_key, t.grandparent_thumb, t.section_id, ' \
't.art, t.media_type, t.content_rating, t.labels, t.started, ' \
'COUNT(DISTINCT t.user_id) AS users_watched, ' \
'MAX(t.started) AS last_watch, COUNT(t.id) as total_plays, SUM(t.d) AS total_duration ' \
'FROM (SELECT *, SUM(CASE WHEN stopped > 0 THEN (stopped - started) - ' \
' (CASE WHEN paused_counter IS NULL THEN 0 ELSE paused_counter END) ELSE 0 END) ' \
' AS d ' \
' FROM session_history ' \
' JOIN session_history_metadata ON session_history_metadata.id = session_history.id ' \
' WHERE datetime(session_history.stopped, "unixepoch", "localtime") ' \
' >= datetime("now", "-%s days", "localtime") ' \
' AND session_history.media_type = "episode" ' \
' GROUP BY %s) AS t ' \
'GROUP BY t.grandparent_title ' \
'ORDER BY users_watched DESC, %s DESC, started DESC ' \
'LIMIT %s ' % (time_range, group_by, sort_type, stats_count)
result = monitor_db.select(query)
except Exception as e:
logger.warn(u"Tautulli DataFactory :: Unable to execute database query for get_home_stats: popular_tv: %s." % e)
return None
for item in result:
row = {'title': item['grandparent_title'],
'users_watched': item['users_watched'],
'rating_key': item['grandparent_rating_key'],
'last_play': item['last_watch'],
'total_plays': item['total_plays'],
'grandparent_thumb': item['grandparent_thumb'],
'thumb': item['grandparent_thumb'],
'art': item['art'],
'section_id': item['section_id'],
'media_type': item['media_type'],
'content_rating': item['content_rating'],
'labels': item['labels'].split(';') if item['labels'] else (),
'user': '',
'friendly_name': '',
'platform': '',
'row_id': item['id']
}
popular_tv.append(row)
home_stats.append({'stat_id': stat,
'stat_title': 'Most Popular TV Shows',
'rows': session.mask_session_info(popular_tv)})
elif stat == 'top_music':
top_music = []
try:
query = 'SELECT t.id, t.grandparent_title, t.original_title, ' \
't.grandparent_rating_key, t.grandparent_thumb, t.section_id, ' \
't.art, t.media_type, t.content_rating, t.labels, t.started, ' \
'MAX(t.started) AS last_watch, COUNT(t.id) AS total_plays, SUM(t.d) AS total_duration ' \
'FROM (SELECT *, SUM(CASE WHEN stopped > 0 THEN (stopped - started) - ' \
' (CASE WHEN paused_counter IS NULL THEN 0 ELSE paused_counter END) ELSE 0 END) ' \
' AS d ' \
' FROM session_history ' \
' JOIN session_history_metadata ON session_history_metadata.id = session_history.id ' \
' WHERE datetime(session_history.stopped, "unixepoch", "localtime") ' \
' >= datetime("now", "-%s days", "localtime") ' \
' AND session_history.media_type = "track" ' \
' GROUP BY %s) AS t ' \
'GROUP BY t.original_title, t.grandparent_title ' \
'ORDER BY %s DESC, started DESC ' \
'LIMIT %s ' % (time_range, group_by, sort_type, stats_count)
result = monitor_db.select(query)
except Exception as e:
logger.warn(u"Tautulli DataFactory :: Unable to execute database query for get_home_stats: top_music: %s." % e)
return None
for item in result:
row = {'title': item['original_title'] or item['grandparent_title'],
'total_plays': item['total_plays'],
'total_duration': item['total_duration'],
'users_watched': '',
'rating_key': item['grandparent_rating_key'],
'last_play': item['last_watch'],
'grandparent_thumb': item['grandparent_thumb'],
'thumb': item['grandparent_thumb'],
'art': item['art'],
'section_id': item['section_id'],
'media_type': item['media_type'],
'content_rating': item['content_rating'],
'labels': item['labels'].split(';') if item['labels'] else (),
'user': '',
'friendly_name': '',
'platform': '',
'row_id': item['id']
}
top_music.append(row)
home_stats.append({'stat_id': stat,
'stat_type': sort_type,
'stat_title': 'Most Played Artists',
'rows': session.mask_session_info(top_music)})
elif stat == 'popular_music':
popular_music = []
try:
query = 'SELECT t.id, t.grandparent_title, t.original_title, ' \
't.grandparent_rating_key, t.grandparent_thumb, t.section_id, ' \
't.art, t.media_type, t.content_rating, t.labels, t.started, ' \
'COUNT(DISTINCT t.user_id) AS users_watched, ' \
'MAX(t.started) AS last_watch, COUNT(t.id) as total_plays, SUM(t.d) AS total_duration ' \
'FROM (SELECT *, SUM(CASE WHEN stopped > 0 THEN (stopped - started) - ' \
' (CASE WHEN paused_counter IS NULL THEN 0 ELSE paused_counter END) ELSE 0 END) ' \
' AS d ' \
' FROM session_history ' \
' JOIN session_history_metadata ON session_history_metadata.id = session_history.id ' \
' WHERE datetime(session_history.stopped, "unixepoch", "localtime") ' \
' >= datetime("now", "-%s days", "localtime") ' \
' AND session_history.media_type = "track" ' \
' GROUP BY %s) AS t ' \
'GROUP BY t.original_title, t.grandparent_title ' \
'ORDER BY users_watched DESC, %s DESC, started DESC ' \
'LIMIT %s ' % (time_range, group_by, sort_type, stats_count)
result = monitor_db.select(query)
except Exception as e:
logger.warn(u"Tautulli DataFactory :: Unable to execute database query for get_home_stats: popular_music: %s." % e)
return None
for item in result:
row = {'title': item['original_title'] or item['grandparent_title'],
'users_watched': item['users_watched'],
'rating_key': item['grandparent_rating_key'],
'last_play': item['last_watch'],
'total_plays': item['total_plays'],
'grandparent_thumb': item['grandparent_thumb'],
'thumb': item['grandparent_thumb'],
'art': item['art'],
'section_id': item['section_id'],
'media_type': item['media_type'],
'content_rating': item['content_rating'],
'labels': item['labels'].split(';') if item['labels'] else (),
'user': '',
'friendly_name': '',
'platform': '',
'row_id': item['id']
}
popular_music.append(row)
home_stats.append({'stat_id': stat,
'stat_title': 'Most Popular Artists',
'rows': session.mask_session_info(popular_music)})
elif stat == 'top_users':
top_users = []
try:
query = 'SELECT t.user, t.user_id, t.user_thumb, t.custom_thumb, t.started, ' \
'(CASE WHEN t.friendly_name IS NULL THEN t.username ELSE t.friendly_name END) ' \
' AS friendly_name, ' \
'MAX(t.started) AS last_watch, COUNT(t.id) AS total_plays, SUM(t.d) AS total_duration ' \
'FROM (SELECT *, SUM(CASE WHEN stopped > 0 THEN (stopped - started) - ' \
' (CASE WHEN paused_counter IS NULL THEN 0 ELSE paused_counter END) ELSE 0 END) ' \
' AS d, users.thumb AS user_thumb, users.custom_avatar_url AS custom_thumb ' \
' FROM session_history ' \
' JOIN session_history_metadata ON session_history_metadata.id = session_history.id ' \
' LEFT OUTER JOIN users ON session_history.user_id = users.user_id ' \
' WHERE datetime(session_history.stopped, "unixepoch", "localtime") ' \
' >= datetime("now", "-%s days", "localtime") ' \
' GROUP BY %s) AS t ' \
'GROUP BY t.user_id ' \
'ORDER BY %s DESC, started DESC ' \
'LIMIT %s ' % (time_range, group_by, sort_type, stats_count)
result = monitor_db.select(query)
except Exception as e:
logger.warn(u"Tautulli DataFactory :: Unable to execute database query for get_home_stats: top_users: %s." % e)
return None
for item in result:
if item['custom_thumb'] and item['custom_thumb'] != item['user_thumb']:
user_thumb = item['custom_thumb']
elif item['user_thumb']:
user_thumb = item['user_thumb']
else:
user_thumb = common.DEFAULT_USER_THUMB
row = {'user': item['user'],
'user_id': item['user_id'],
'friendly_name': item['friendly_name'],
'total_plays': item['total_plays'],
'total_duration': item['total_duration'],
'last_play': item['last_watch'],
'user_thumb': user_thumb,
'grandparent_thumb': '',
'art': '',
'users_watched': '',
'rating_key': '',
'title': '',
'platform': '',
'row_id': ''
}
top_users.append(row)
home_stats.append({'stat_id': stat,
'stat_type': sort_type,
'stat_title': 'Most Active Users',
'rows': session.mask_session_info(top_users, mask_metadata=False)})
elif stat == 'top_platforms':
top_platform = []
try:
query = 'SELECT t.platform, t.started, ' \
'MAX(t.started) AS last_watch, COUNT(t.id) AS total_plays, SUM(t.d) AS total_duration ' \
'FROM (SELECT *, SUM(CASE WHEN stopped > 0 THEN (stopped - started) - ' \
' (CASE WHEN paused_counter IS NULL THEN 0 ELSE paused_counter END) ELSE 0 END) ' \
' AS d ' \
' FROM session_history ' \
' JOIN session_history_metadata ON session_history_metadata.id = session_history.id ' \
' WHERE datetime(session_history.stopped, "unixepoch", "localtime") ' \
' >= datetime("now", "-%s days", "localtime") ' \
' GROUP BY %s) AS t ' \
'GROUP BY t.platform ' \
'ORDER BY %s DESC, started DESC ' \
'LIMIT %s ' % (time_range, group_by, sort_type, stats_count)
result = monitor_db.select(query)
except Exception as e:
logger.warn(u"Tautulli DataFactory :: Unable to execute database query for get_home_stats: top_platforms: %s." % e)
return None
for item in result:
# Rename Mystery platform names
platform = common.PLATFORM_NAME_OVERRIDES.get(item['platform'], item['platform'])
platform_name = next((v for k, v in common.PLATFORM_NAMES.iteritems() if k in platform.lower()), 'default')
row = {'total_plays': item['total_plays'],
'total_duration': item['total_duration'],
'last_play': item['last_watch'],
'platform': platform,
'platform_name': platform_name,
'title': '',
'thumb': '',
'grandparent_thumb': '',
'art': '',
'users_watched': '',
'rating_key': '',
'user': '',
'friendly_name': '',
'row_id': ''
}
top_platform.append(row)
home_stats.append({'stat_id': stat,
'stat_type': sort_type,
'stat_title': 'Most Active Platforms',
'rows': session.mask_session_info(top_platform, mask_metadata=False)})
elif stat == 'last_watched':
last_watched = []
try:
query = 'SELECT t.id, t.full_title, t.rating_key, t.thumb, t.grandparent_thumb, ' \
't.user, t.user_id, t.custom_avatar_url as user_thumb, t.player, t.section_id, ' \
't.art, t.media_type, t.content_rating, t.labels, ' \
'(CASE WHEN t.friendly_name IS NULL THEN t.username ELSE t.friendly_name END) ' \
' AS friendly_name, ' \
'MAX(t.started) AS last_watch, ' \
'((CASE WHEN t.view_offset IS NULL THEN 0.1 ELSE t.view_offset * 1.0 END) / ' \
' (CASE WHEN t.duration IS NULL THEN 1.0 ELSE t.duration * 1.0 END) * 100) ' \
' AS percent_complete ' \
'FROM (SELECT * FROM session_history ' \
' JOIN session_history_metadata ON session_history_metadata.id = session_history.id ' \
' LEFT OUTER JOIN users ON session_history.user_id = users.user_id ' \
' WHERE datetime(session_history.stopped, "unixepoch", "localtime") ' \
' >= datetime("now", "-%s days", "localtime") ' \
' AND (session_history.media_type = "movie" ' \
' OR session_history_metadata.media_type = "episode") ' \
' GROUP BY %s) AS t ' \
'WHERE t.media_type == "movie" AND percent_complete >= %s ' \
' OR t.media_type == "episode" AND percent_complete >= %s ' \
'GROUP BY t.id ' \
'ORDER BY last_watch DESC ' \
'LIMIT %s' % (time_range, group_by, movie_watched_percent, tv_watched_percent, stats_count)
result = monitor_db.select(query)
except Exception as e:
logger.warn(u"Tautulli DataFactory :: Unable to execute database query for get_home_stats: last_watched: %s." % e)
return None
for item in result:
if not item['grandparent_thumb'] or item['grandparent_thumb'] == '':
thumb = item['thumb']
else:
thumb = item['grandparent_thumb']
row = {'row_id': item['id'],
'user': item['user'],
'friendly_name': item['friendly_name'],
'user_id': item['user_id'],
'user_thumb': item['user_thumb'],
'title': item['full_title'],
'rating_key': item['rating_key'],
'thumb': thumb,
'grandparent_thumb': item['grandparent_thumb'],
'art': item['art'],
'section_id': item['section_id'],
'media_type': item['media_type'],
'content_rating': item['content_rating'],
'labels': item['labels'].split(';') if item['labels'] else (),
'last_watch': item['last_watch'],
'player': item['player']
}
last_watched.append(row)
home_stats.append({'stat_id': stat,
'stat_title': 'Recently Watched',
'rows': session.mask_session_info(last_watched)})
elif stat == 'most_concurrent':
def calc_most_concurrent(title, result):
'''
Function to calculate most concurrent streams
Input: Stat title, SQLite query result
Output: Dict {title, count, started, stopped}
'''
times = []
for item in result:
times.append({'time': str(item['started']) + 'B', 'count': 1})
times.append({'time': str(item['stopped']) + 'A', 'count': -1})
times = sorted(times, key=lambda k: k['time'])
count = 0
last_count = 0
last_start = ''
concurrent = {'title': title,
'count': 0,
'started': None,
'stopped': None
}
for d in times:
if d['count'] == 1:
count += d['count']
if count >= last_count:
last_start = d['time']
else:
if count >= last_count:
last_count = count
concurrent['count'] = count
concurrent['started'] = last_start[:-1]
concurrent['stopped'] = d['time'][:-1]
count += d['count']
return concurrent
most_concurrent = []
try:
base_query = 'SELECT session_history.started, session_history.stopped ' \
'FROM session_history ' \
'JOIN session_history_media_info ON session_history.id = session_history_media_info.id ' \
'WHERE datetime(stopped, "unixepoch", "localtime") ' \
'>= datetime("now", "-%s days", "localtime") ' % time_range
title = 'Concurrent Streams'
query = base_query
result = monitor_db.select(query)
if result:
most_concurrent.append(calc_most_concurrent(title, result))
title = 'Concurrent Transcodes'
query = base_query \
+ 'AND session_history_media_info.transcode_decision = "transcode" '
result = monitor_db.select(query)
if result:
most_concurrent.append(calc_most_concurrent(title, result))
title = 'Concurrent Direct Streams'
query = base_query \
+ 'AND session_history_media_info.transcode_decision = "copy" '
result = monitor_db.select(query)
if result:
most_concurrent.append(calc_most_concurrent(title, result))
title = 'Concurrent Direct Plays'
query = base_query \
+ 'AND session_history_media_info.transcode_decision = "direct play" '
result = monitor_db.select(query)
if result:
most_concurrent.append(calc_most_concurrent(title, result))
except Exception as e:
logger.warn(u"Tautulli DataFactory :: Unable to execute database query for get_home_stats: most_concurrent: %s." % e)
return None
home_stats.append({'stat_id': stat,
'stat_title': 'Most Concurrent Streams',
'rows': most_concurrent})
return home_stats
def get_library_stats(self, library_cards=[]):
monitor_db = database.MonitorDatabase()
if session.get_session_shared_libraries():
library_cards = session.get_session_shared_libraries()
if 'first_run_wizard' in library_cards:
return None
library_stats = []
try:
query = 'SELECT section_id, section_name, section_type, thumb AS library_thumb, ' \
'custom_thumb_url AS custom_thumb, art, count, parent_count, child_count ' \
'FROM library_sections ' \
'WHERE section_id IN (%s) ' \
'ORDER BY section_type, count DESC, parent_count DESC, child_count DESC ' % ','.join(library_cards)
result = monitor_db.select(query)
except Exception as e:
logger.warn(u"Tautulli DataFactory :: Unable to execute database query for get_library_stats: %s." % e)
return None
for item in result:
if item['custom_thumb'] and item['custom_thumb'] != item['library_thumb']:
library_thumb = item['custom_thumb']
elif item['library_thumb']:
library_thumb = item['library_thumb']
else:
library_thumb = common.DEFAULT_COVER_THUMB
library = {'section_id': item['section_id'],
'section_name': item['section_name'],
'section_type': item['section_type'],
'thumb': library_thumb,
'art': item['art'],
'count': item['count'],
'child_count': item['parent_count'],
'grandchild_count': item['child_count']
}
library_stats.append(library)
library_stats = {k: list(v) for k, v in groupby(library_stats, key=lambda x: x['section_type'])}
return library_stats
def get_stream_details(self, row_id=None, session_key=None):
monitor_db = database.MonitorDatabase()
user_cond = ''
table = 'session_history' if row_id else 'sessions'
if session.get_session_user_id():
user_cond = 'AND %s.user_id = %s ' % (table, session.get_session_user_id())
if row_id:
query = 'SELECT bitrate, video_resolution, ' \
'optimized_version, optimized_version_profile, optimized_version_title, ' \
'synced_version, synced_version_profile, ' \
'container, video_codec, video_bitrate, video_width, video_height, video_framerate, aspect_ratio, ' \
'audio_codec, audio_bitrate, audio_channels, subtitle_codec, ' \
'stream_bitrate, stream_video_resolution, quality_profile, stream_container_decision, stream_container, ' \
'stream_video_decision, stream_video_codec, stream_video_bitrate, stream_video_width, stream_video_height, ' \
'stream_video_framerate, ' \
'stream_audio_decision, stream_audio_codec, stream_audio_bitrate, stream_audio_channels, ' \
'subtitles, stream_subtitle_decision, stream_subtitle_codec, ' \
'transcode_hw_decoding, transcode_hw_encoding, ' \
'video_decision, audio_decision, transcode_decision, width, height, container, ' \
'transcode_container, transcode_video_codec, transcode_audio_codec, transcode_audio_channels, ' \
'transcode_width, transcode_height, ' \
'session_history_metadata.media_type, title, grandparent_title, original_title ' \
'FROM session_history_media_info ' \
'JOIN session_history ON session_history_media_info.id = session_history.id ' \
'JOIN session_history_metadata ON session_history_media_info.id = session_history_metadata.id ' \
'WHERE session_history_media_info.id = ? %s' % user_cond
result = monitor_db.select(query, args=[row_id])
elif session_key:
query = 'SELECT bitrate, video_resolution, ' \
'optimized_version, optimized_version_profile, optimized_version_title, ' \
'synced_version, synced_version_profile, ' \
'container, video_codec, video_bitrate, video_width, video_height, video_framerate, aspect_ratio, ' \
'audio_codec, audio_bitrate, audio_channels, subtitle_codec, ' \
'stream_bitrate, stream_video_resolution, quality_profile, stream_container_decision, stream_container, ' \
'stream_video_decision, stream_video_codec, stream_video_bitrate, stream_video_width, stream_video_height, ' \
'stream_video_framerate, ' \
'stream_audio_decision, stream_audio_codec, stream_audio_bitrate, stream_audio_channels, ' \
'subtitles, stream_subtitle_decision, stream_subtitle_codec, ' \
'transcode_hw_decoding, transcode_hw_encoding, ' \
'video_decision, audio_decision, transcode_decision, width, height, container, ' \
'transcode_container, transcode_video_codec, transcode_audio_codec, transcode_audio_channels, ' \
'transcode_width, transcode_height, ' \
'media_type, title, grandparent_title, original_title ' \
'FROM sessions ' \
'WHERE session_key = ? %s' % user_cond
result = monitor_db.select(query, args=[session_key])
else:
return None
stream_output = {}
for item in result:
pre_tautulli = 0
# For backwards compatibility. Pick one new Tautulli key to check and override with old values.
if not item['stream_container']:
item['stream_video_resolution'] = item['video_resolution']
item['stream_container'] = item['transcode_container'] or item['container']
item['stream_video_decision'] = item['video_decision']
item['stream_video_codec'] = item['transcode_video_codec'] or item['video_codec']
item['stream_video_width'] = item['transcode_width'] or item['width']
item['stream_video_height'] = item['transcode_height'] or item['height']
item['stream_audio_decision'] = item['audio_decision']
item['stream_audio_codec'] = item['transcode_audio_codec'] or item['audio_codec']
item['stream_audio_channels'] = item['transcode_audio_channels'] or item['audio_channels']
item['video_width'] = item['width']
item['video_height'] = item['height']
pre_tautulli = 1
stream_output = {'bitrate': item['bitrate'],
'video_resolution': item['video_resolution'],
'optimized_version': item['optimized_version'],
'optimized_version_profile': item['optimized_version_profile'],
'optimized_version_title': item['optimized_version_title'],
'synced_version': item['synced_version'],
'synced_version_profile': item['synced_version_profile'],
'container': item['container'],
'video_codec': item['video_codec'],
'video_bitrate': item['video_bitrate'],
'video_width': item['video_width'],
'video_height': item['video_height'],
'video_framerate': item['video_framerate'],
'aspect_ratio': item['aspect_ratio'],
'audio_codec': item['audio_codec'],
'audio_bitrate': item['audio_bitrate'],
'audio_channels': item['audio_channels'],
'subtitle_codec': item['subtitle_codec'],
'stream_bitrate': item['stream_bitrate'],
'stream_video_resolution': item['stream_video_resolution'],
'quality_profile': item['quality_profile'],
'stream_container_decision': item['stream_container_decision'],
'stream_container': item['stream_container'],
'stream_video_decision': item['stream_video_decision'],
'stream_video_codec': item['stream_video_codec'],
'stream_video_bitrate': item['stream_video_bitrate'],
'stream_video_width': item['stream_video_width'],
'stream_video_height': item['stream_video_height'],
'stream_video_framerate': item['stream_video_framerate'],
'stream_audio_decision': item['stream_audio_decision'],
'stream_audio_codec': item['stream_audio_codec'],
'stream_audio_bitrate': item['stream_audio_bitrate'],
'stream_audio_channels': item['stream_audio_channels'],
'subtitles': item['subtitles'],
'stream_subtitle_decision': item['stream_subtitle_decision'],
'stream_subtitle_codec': item['stream_subtitle_codec'],
'transcode_hw_decoding': item['transcode_hw_decoding'],
'transcode_hw_encoding': item['transcode_hw_encoding'],
'video_decision': item['video_decision'],
'audio_decision': item['audio_decision'],
'media_type': item['media_type'],
'title': item['title'],
'grandparent_title': item['grandparent_title'],
'original_title': item['original_title'],
'current_session': 1 if session_key else 0,
'pre_tautulli': pre_tautulli
}
stream_output = {k: v or '' for k, v in stream_output.iteritems()}
return stream_output
def get_metadata_details(self, rating_key):
monitor_db = database.MonitorDatabase()
if rating_key:
query = 'SELECT session_history_metadata.id, ' \
'session_history_metadata.rating_key, session_history_metadata.parent_rating_key, ' \
'session_history_metadata.grandparent_rating_key, session_history_metadata.title, ' \
'session_history_metadata.parent_title, session_history_metadata.grandparent_title, ' \
'session_history_metadata.original_title, session_history_metadata.full_title, ' \
'library_sections.section_name, ' \
'session_history_metadata.media_index, session_history_metadata.parent_media_index, ' \
'session_history_metadata.section_id, session_history_metadata.thumb, ' \
'session_history_metadata.parent_thumb, session_history_metadata.grandparent_thumb, ' \
'session_history_metadata.art, session_history_metadata.media_type, session_history_metadata.year, ' \
'session_history_metadata.originally_available_at, session_history_metadata.added_at, ' \
'session_history_metadata.updated_at, session_history_metadata.last_viewed_at, ' \
'session_history_metadata.content_rating, session_history_metadata.summary, ' \
'session_history_metadata.tagline, session_history_metadata.rating, session_history_metadata.duration, ' \
'session_history_metadata.guid, session_history_metadata.directors, session_history_metadata.writers, ' \
'session_history_metadata.actors, session_history_metadata.genres, session_history_metadata.studio, ' \
'session_history_metadata.labels, ' \
'session_history_media_info.container, session_history_media_info.bitrate, ' \
'session_history_media_info.video_codec, session_history_media_info.video_resolution, ' \
'session_history_media_info.video_framerate, session_history_media_info.audio_codec, ' \
'session_history_media_info.audio_channels ' \
'FROM session_history_metadata ' \
'JOIN library_sections ON session_history_metadata.section_id = library_sections.section_id ' \
'JOIN session_history_media_info ON session_history_metadata.id = session_history_media_info.id ' \
'WHERE session_history_metadata.rating_key = ? ' \
'ORDER BY session_history_metadata.id DESC ' \
'LIMIT 1'
result = monitor_db.select(query=query, args=[rating_key])
else:
result = []
metadata_list = []
for item in result:
directors = item['directors'].split(';') if item['directors'] else []
writers = item['writers'].split(';') if item['writers'] else []
actors = item['actors'].split(';') if item['actors'] else []
genres = item['genres'].split(';') if item['genres'] else []
labels = item['labels'].split(';') if item['labels'] else []
media_info = [{'container': item['container'],
'bitrate': item['bitrate'],
'video_codec': item['video_codec'],
'video_resolution': item['video_resolution'],
'video_framerate': item['video_framerate'],
'audio_codec': item['audio_codec'],
'audio_channels': item['audio_channels']
}]
metadata = {'media_type': item['media_type'],
'rating_key': item['rating_key'],
'parent_rating_key': item['parent_rating_key'],
'grandparent_rating_key': item['grandparent_rating_key'],
'grandparent_title': item['grandparent_title'],
'original_title': item['original_title'],
'parent_media_index': item['parent_media_index'],
'parent_title': item['parent_title'],
'media_index': item['media_index'],
'studio': item['studio'],
'title': item['title'],
'content_rating': item['content_rating'],
'summary': item['summary'],
'tagline': item['tagline'],
'rating': item['rating'],
'duration': item['duration'],
'year': item['year'],
'thumb': item['thumb'],
'parent_thumb': item['parent_thumb'],
'grandparent_thumb': item['grandparent_thumb'],
'art': item['art'],
'originally_available_at': item['originally_available_at'],
'added_at': item['added_at'],
'updated_at': item['updated_at'],
'last_viewed_at': item['last_viewed_at'],
'guid': item['guid'],
'directors': directors,
'writers': writers,
'actors': actors,
'genres': genres,
'labels': labels,
'library_name': item['section_name'],
'section_id': item['section_id'],
'media_info': media_info
}
metadata_list.append(metadata)
filtered_metadata_list = session.filter_session_info(metadata_list, filter_key='section_id')
if filtered_metadata_list:
return filtered_metadata_list[0]
else:
return []
def get_total_duration(self, custom_where=None):
monitor_db = database.MonitorDatabase()
# Split up custom wheres
if custom_where:
where = 'WHERE ' + ' AND '.join([w[0] + ' = "' + w[1] + '"' for w in custom_where])
else:
where = ''
try:
query = 'SELECT SUM(CASE WHEN stopped > 0 THEN (stopped - started) ELSE 0 END) - ' \
'SUM(CASE WHEN paused_counter IS NULL THEN 0 ELSE paused_counter END) AS total_duration ' \
'FROM session_history ' \
'JOIN session_history_metadata ON session_history_metadata.id = session_history.id ' \
'JOIN session_history_media_info ON session_history_media_info.id = session_history.id ' \
'%s ' % where
result = monitor_db.select(query)
except Exception as e:
logger.warn(u"Tautulli DataFactory :: Unable to execute database query for get_total_duration: %s." % e)
return None
total_duration = 0
for item in result:
total_duration = item['total_duration']
return total_duration
def get_session_ip(self, session_key=''):
monitor_db = database.MonitorDatabase()
ip_address = 'N/A'
user_cond = ''
if session.get_session_user_id():
user_cond = 'AND user_id = %s ' % session.get_session_user_id()
if session_key:
try:
query = 'SELECT ip_address FROM sessions WHERE session_key = %d %s' % (int(session_key), user_cond)
result = monitor_db.select(query)
except Exception as e:
logger.warn(u"Tautulli DataFactory :: Unable to execute database query for get_session_ip: %s." % e)
return ip_address
else:
return ip_address
for item in result:
ip_address = item['ip_address']
return ip_address
def get_img_info(self, img=None, rating_key=None, width=None, height=None,
opacity=None, background=None, blur=None, fallback=None,
order_by='', service=None):
monitor_db = database.MonitorDatabase()
img_info = []
where_params = []
args = []
if img is not None:
where_params.append('img')
args.append(img)
if rating_key is not None:
where_params.append('rating_key')
args.append(rating_key)
if width is not None:
where_params.append('width')
args.append(width)
if height is not None:
where_params.append('height')
args.append(height)
if opacity is not None:
where_params.append('opacity')
args.append(opacity)
if background is not None:
where_params.append('background')
args.append(background)
if blur is not None:
where_params.append('blur')
args.append(blur)
if fallback is not None:
where_params.append('fallback')
args.append(fallback)
where = ''
if where_params:
where = 'WHERE ' + ' AND '.join([w + ' = ?' for w in where_params])
if order_by:
order_by = 'ORDER BY ' + order_by + ' DESC'
if service == 'imgur':
query = 'SELECT imgur_title AS img_title, imgur_url AS img_url FROM imgur_lookup ' \
'JOIN image_hash_lookup ON imgur_lookup.img_hash = image_hash_lookup.img_hash ' \
'%s %s' % (where, order_by)
elif service == 'cloudinary':
query = 'SELECT cloudinary_title AS img_title, cloudinary_url AS img_url FROM cloudinary_lookup ' \
'JOIN image_hash_lookup ON cloudinary_lookup.img_hash = image_hash_lookup.img_hash ' \
'%s %s' % (where, order_by)
else:
logger.warn(u"Tautulli DataFactory :: Unable to execute database query for get_img_info: "
"service not provided.")
return img_info
try:
img_info = monitor_db.select(query, args=args)
except Exception as e:
logger.warn(u"Tautulli DataFactory :: Unable to execute database query for get_img_info: %s." % e)
return img_info
def set_img_info(self, img_hash=None, img_title=None, img_url=None, delete_hash=None, service=None):
monitor_db = database.MonitorDatabase()
keys = {'img_hash': img_hash}
if service == 'imgur':
table = 'imgur_lookup'
values = {'imgur_title': img_title,
'imgur_url': img_url,
'delete_hash': delete_hash}
elif service == 'cloudinary':
table = 'cloudinary_lookup'
values = {'cloudinary_title': img_title,
'cloudinary_url': img_url}
else:
logger.warn(u"Tautulli DataFactory :: Unable to execute database query for set_img_info: "
"service not provided.")
return
monitor_db.upsert(table, key_dict=keys, value_dict=values)
def delete_img_info(self, rating_key=None, service='', delete_all=False):
monitor_db = database.MonitorDatabase()
if not delete_all:
service = helpers.get_img_service()
if not rating_key and not delete_all:
logger.error(u"Tautulli DataFactory :: Unable to delete hosted images: rating_key not provided.")
return False
where = ''
args = []
log_msg = ''
if rating_key:
where = 'WHERE rating_key = ?'
args = [rating_key]
log_msg = ' for rating_key %s' % rating_key
if service.lower() == 'imgur':
# Delete from Imgur
query = 'SELECT imgur_title, delete_hash, fallback FROM imgur_lookup ' \
'JOIN image_hash_lookup ON imgur_lookup.img_hash = image_hash_lookup.img_hash %s' % where
results = monitor_db.select(query, args=args)
for imgur_info in results:
if imgur_info['delete_hash']:
helpers.delete_from_imgur(delete_hash=imgur_info['delete_hash'],
img_title=imgur_info['imgur_title'],
fallback=imgur_info['fallback'])
logger.info(u"Tautulli DataFactory :: Deleting Imgur info%s from the database."
% log_msg)
result = monitor_db.action('DELETE FROM imgur_lookup WHERE img_hash '
'IN (SELECT img_hash FROM image_hash_lookup %s)' % where,
args)
elif service.lower() == 'cloudinary':
# Delete from Cloudinary
query = 'SELECT cloudinary_title, rating_key, fallback FROM cloudinary_lookup ' \
'JOIN image_hash_lookup ON cloudinary_lookup.img_hash = image_hash_lookup.img_hash %s ' \
'GROUP BY rating_key' % where
results = monitor_db.select(query, args=args)
for cloudinary_info in results:
helpers.delete_from_cloudinary(rating_key=cloudinary_info['rating_key'])
logger.info(u"Tautulli DataFactory :: Deleting Cloudinary info%s from the database."
% log_msg)
result = monitor_db.action('DELETE FROM cloudinary_lookup WHERE img_hash '
'IN (SELECT img_hash FROM image_hash_lookup %s)' % where,
args)
else:
logger.error(u"Tautulli DataFactory :: Unable to delete hosted images: invalid service '%s' provided."
% service)
return service
def get_poster_info(self, rating_key='', metadata=None, service=None):
poster_key = ''
if str(rating_key).isdigit():
poster_key = rating_key
elif metadata:
if metadata['media_type'] in ('movie', 'show', 'artist', 'collection'):
poster_key = metadata['rating_key']
elif metadata['media_type'] in ('season', 'album'):
poster_key = metadata['rating_key']
elif metadata['media_type'] in ('episode', 'track'):
poster_key = metadata['parent_rating_key']
poster_info = {}
if poster_key:
service = service or helpers.get_img_service()
if service:
img_info = self.get_img_info(rating_key=poster_key,
order_by='height',
fallback='poster',
service=service)
if img_info:
poster_info = {'poster_title': img_info[0]['img_title'],
'poster_url': img_info[0]['img_url'],
'img_service': service.capitalize()}
return poster_info
def get_lookup_info(self, rating_key='', metadata=None):
monitor_db = database.MonitorDatabase()
lookup_key = ''
if str(rating_key).isdigit():
lookup_key = rating_key
elif metadata:
if metadata['media_type'] in ('movie', 'show', 'artist'):
lookup_key = metadata['rating_key']
elif metadata['media_type'] in ('season', 'album'):
lookup_key = metadata['parent_rating_key']
elif metadata['media_type'] in ('episode', 'track'):
lookup_key = metadata['grandparent_rating_key']
lookup_info = {'tvmaze_id': '',
'themoviedb_id': ''}
if lookup_key:
try:
query = 'SELECT tvmaze_id FROM tvmaze_lookup ' \
'WHERE rating_key = ?'
tvmaze_info = monitor_db.select_single(query, args=[lookup_key])
if tvmaze_info:
lookup_info['tvmaze_id'] = tvmaze_info['tvmaze_id']
query = 'SELECT themoviedb_id FROM themoviedb_lookup ' \
'WHERE rating_key = ?'
themoviedb_info = monitor_db.select_single(query, args=[lookup_key])
if themoviedb_info:
lookup_info['themoviedb_id'] = themoviedb_info['themoviedb_id']
except Exception as e:
logger.warn(u"Tautulli DataFactory :: Unable to execute database query for get_lookup_info: %s." % e)
return lookup_info
def delete_lookup_info(self, rating_key='', title=''):
monitor_db = database.MonitorDatabase()
if rating_key:
logger.info(u"Tautulli DataFactory :: Deleting lookup info for '%s' (rating_key %s) from the database."
% (title, rating_key))
result_tvmaze = monitor_db.action('DELETE FROM tvmaze_lookup WHERE rating_key = ?', [rating_key])
result_themoviedb = monitor_db.action('DELETE FROM themoviedb_lookup WHERE rating_key = ?', [rating_key])
return True if (result_tvmaze or result_themoviedb) else False
def get_search_query(self, rating_key=''):
monitor_db = database.MonitorDatabase()
if rating_key:
query = 'SELECT rating_key, parent_rating_key, grandparent_rating_key, title, parent_title, grandparent_title, ' \
'media_index, parent_media_index, year, media_type ' \
'FROM session_history_metadata ' \
'WHERE rating_key = ? ' \
'OR parent_rating_key = ? ' \
'OR grandparent_rating_key = ? ' \
'LIMIT 1'
result = monitor_db.select(query=query, args=[rating_key, rating_key, rating_key])
else:
result = []
query = {}
query_string = None
media_type = None
for item in result:
title = item['title']
parent_title = item['parent_title']
grandparent_title = item['grandparent_title']
media_index = item['media_index']
parent_media_index = item['parent_media_index']
year = item['year']
if str(item['rating_key']) == rating_key:
query_string = item['title']
media_type = item['media_type']
elif str(item['parent_rating_key']) == rating_key:
if item['media_type'] == 'episode':
query_string = item['grandparent_title']
media_type = 'season'
elif item['media_type'] == 'track':
query_string = item['parent_title']
media_type = 'album'
elif str(item['grandparent_rating_key']) == rating_key:
if item['media_type'] == 'episode':
query_string = item['grandparent_title']
media_type = 'show'
elif item['media_type'] == 'track':
query_string = item['grandparent_title']
media_type = 'artist'
if query_string and media_type:
query = {'query_string': query_string,
'title': title,
'parent_title': parent_title,
'grandparent_title': grandparent_title,
'media_index': media_index,
'parent_media_index': parent_media_index,
'year': year,
'media_type': media_type,
'rating_key': rating_key
}
else:
return None
return query
def get_rating_keys_list(self, rating_key='', media_type=''):
monitor_db = database.MonitorDatabase()
if media_type == 'movie':
key_list = {0: {'rating_key': int(rating_key)}}
return key_list
if media_type == 'artist' or media_type == 'album' or media_type == 'track':
match_type = 'title'
else:
match_type = 'index'
# Get the grandparent rating key
try:
query = 'SELECT rating_key, parent_rating_key, grandparent_rating_key ' \
'FROM session_history_metadata ' \
'WHERE rating_key = ? ' \
'OR parent_rating_key = ? ' \
'OR grandparent_rating_key = ? ' \
'LIMIT 1'
result = monitor_db.select(query=query, args=[rating_key, rating_key, rating_key])
grandparent_rating_key = result[0]['grandparent_rating_key']
except Exception as e:
logger.warn(u"Tautulli DataFactory :: Unable to execute database query for get_rating_keys_list: %s." % e)
return {}
query = 'SELECT rating_key, parent_rating_key, grandparent_rating_key, title, parent_title, grandparent_title, ' \
'media_index, parent_media_index ' \
'FROM session_history_metadata ' \
'WHERE {0} = ? ' \
'GROUP BY {1} ' \
'ORDER BY {1} DESC '
# get grandparent_rating_keys
grandparents = {}
result = monitor_db.select(query=query.format('grandparent_rating_key', 'grandparent_rating_key'),
args=[grandparent_rating_key])
for item in result:
# get parent_rating_keys
parents = {}
result = monitor_db.select(query=query.format('grandparent_rating_key', 'parent_rating_key'),
args=[item['grandparent_rating_key']])
for item in result:
# get rating_keys
children = {}
result = monitor_db.select(query=query.format('parent_rating_key', 'rating_key'),
args=[item['parent_rating_key']])
for item in result:
key = item['media_index'] if item['media_index'] else item['title']
children.update({key: {'rating_key': item['rating_key']}})
key = item['parent_media_index'] if match_type == 'index' else item['parent_title']
parents.update({key:
{'rating_key': item['parent_rating_key'],
'children': children}
})
key = 0 if match_type == 'index' else item['grandparent_title']
grandparents.update({key:
{'rating_key': item['grandparent_rating_key'],
'children': parents}
})
key_list = grandparents
return key_list
def delete_session_history_rows(self, row_id=None):
monitor_db = database.MonitorDatabase()
if row_id.isdigit():
logger.info(u"Tautulli DataFactory :: Deleting row id %s from the session history database." % row_id)
session_history_del = \
monitor_db.action('DELETE FROM session_history WHERE id = ?', [row_id])
session_history_media_info_del = \
monitor_db.action('DELETE FROM session_history_media_info WHERE id = ?', [row_id])
session_history_metadata_del = \
monitor_db.action('DELETE FROM session_history_metadata WHERE id = ?', [row_id])
return 'Deleted rows %s.' % row_id
else:
return 'Unable to delete rows. Input row not valid.'
def update_metadata(self, old_key_list='', new_key_list='', media_type=''):
pms_connect = pmsconnect.PmsConnect()
monitor_db = database.MonitorDatabase()
# function to map rating keys pairs
def get_pairs(old, new):
pairs = {}
for k, v in old.iteritems():
if k in new:
pairs.update({v['rating_key']: new[k]['rating_key']})
if 'children' in old[k]:
pairs.update(get_pairs(old[k]['children'], new[k]['children']))
return pairs
# map rating keys pairs
mapping = {}
if old_key_list and new_key_list:
mapping = get_pairs(old_key_list, new_key_list)
if mapping:
logger.info(u"Tautulli DataFactory :: Updating metadata in the database.")
for old_key, new_key in mapping.iteritems():
metadata = pms_connect.get_metadata_details(new_key)
if metadata:
if metadata['media_type'] == 'show' or metadata['media_type'] == 'artist':
# check grandparent_rating_key (2 tables)
monitor_db.action('UPDATE session_history SET grandparent_rating_key = ? WHERE grandparent_rating_key = ?',
[new_key, old_key])
monitor_db.action('UPDATE session_history_metadata SET grandparent_rating_key = ? WHERE grandparent_rating_key = ?',
[new_key, old_key])
elif metadata['media_type'] == 'season' or metadata['media_type'] == 'album':
# check parent_rating_key (2 tables)
monitor_db.action('UPDATE session_history SET parent_rating_key = ? WHERE parent_rating_key = ?',
[new_key, old_key])
monitor_db.action('UPDATE session_history_metadata SET parent_rating_key = ? WHERE parent_rating_key = ?',
[new_key, old_key])
else:
# check rating_key (2 tables)
monitor_db.action('UPDATE session_history SET rating_key = ? WHERE rating_key = ?',
[new_key, old_key])
monitor_db.action('UPDATE session_history_media_info SET rating_key = ? WHERE rating_key = ?',
[new_key, old_key])
# update session_history_metadata table
self.update_metadata_details(old_key, new_key, metadata)
return 'Updated metadata in database.'
else:
return 'Unable to update metadata in database. No changes were made.'
def update_metadata_details(self, old_rating_key='', new_rating_key='', metadata=None):
if metadata:
# Create full_title
if metadata['media_type'] == 'episode':
full_title = '%s - %s' % (metadata['grandparent_title'], metadata['title'])
elif metadata['media_type'] == 'track':
full_title = '%s - %s' % (metadata['title'],
metadata['original_title'] or metadata['grandparent_title'])
else:
full_title = metadata['title']
directors = ";".join(metadata['directors'])
writers = ";".join(metadata['writers'])
actors = ";".join(metadata['actors'])
genres = ";".join(metadata['genres'])
labels = ";".join(metadata['labels'])
#logger.info(u"Tautulli DataFactory :: Updating metadata in the database for rating key: %s." % new_rating_key)
monitor_db = database.MonitorDatabase()
# Update the session_history_metadata table
query = 'UPDATE session_history_metadata SET rating_key = ?, parent_rating_key = ?, ' \
'grandparent_rating_key = ?, title = ?, parent_title = ?, grandparent_title = ?, ' \
'original_title = ?, full_title = ?, ' \
'media_index = ?, parent_media_index = ?, section_id = ?, thumb = ?, parent_thumb = ?, ' \
'grandparent_thumb = ?, art = ?, media_type = ?, year = ?, originally_available_at = ?, ' \
'added_at = ?, updated_at = ?, last_viewed_at = ?, content_rating = ?, summary = ?, ' \
'tagline = ?, rating = ?, duration = ?, guid = ?, directors = ?, writers = ?, actors = ?, ' \
'genres = ?, studio = ?, labels = ? ' \
'WHERE rating_key = ?'
args = [metadata['rating_key'], metadata['parent_rating_key'], metadata['grandparent_rating_key'],
metadata['title'], metadata['parent_title'], metadata['grandparent_title'],
metadata['original_title'], full_title,
metadata['media_index'], metadata['parent_media_index'], metadata['section_id'], metadata['thumb'],
metadata['parent_thumb'], metadata['grandparent_thumb'], metadata['art'], metadata['media_type'],
metadata['year'], metadata['originally_available_at'], metadata['added_at'], metadata['updated_at'],
metadata['last_viewed_at'], metadata['content_rating'], metadata['summary'], metadata['tagline'],
metadata['rating'], metadata['duration'], metadata['guid'], directors, writers, actors, genres,
metadata['studio'], labels,
old_rating_key]
monitor_db.action(query=query, args=args)
def get_notification_log(self, kwargs=None):
data_tables = datatables.DataTables()
columns = ['notify_log.id',
'notify_log.timestamp',
'notify_log.session_key',
'notify_log.rating_key',
'notify_log.user_id',
'notify_log.user',
'notify_log.notifier_id',
'notify_log.agent_id',
'notify_log.agent_name',
'notify_log.notify_action',
'notify_log.subject_text',
'notify_log.body_text',
'notify_log.success'
]
try:
query = data_tables.ssp_query(table_name='notify_log',
columns=columns,
custom_where=[],
group_by=[],
join_types=[],
join_tables=[],
join_evals=[],
kwargs=kwargs)
except Exception as e:
logger.warn(u"Tautulli DataFactory :: Unable to execute database query for get_notification_log: %s." % e)
return {'recordsFiltered': 0,
'recordsTotal': 0,
'draw': 0,
'data': 'null',
'error': 'Unable to execute database query.'}
notifications = query['result']
rows = []
for item in notifications:
if item['body_text']:
body_text = item['body_text'].replace('\r\n', '<br />').replace('\n', '<br />')
else:
body_text = ''
row = {'id': item['id'],
'timestamp': item['timestamp'],
'session_key': item['session_key'],
'rating_key': item['rating_key'],
'user_id': item['user_id'],
'user': item['user'],
'notifier_id': item['notifier_id'],
'agent_id': item['agent_id'],
'agent_name': item['agent_name'],
'notify_action': item['notify_action'],
'subject_text': item['subject_text'],
'body_text': body_text,
'success': item['success']
}
rows.append(row)
dict = {'recordsFiltered': query['filteredCount'],
'recordsTotal': query['totalCount'],
'data': rows,
'draw': query['draw']
}
return dict
def delete_notification_log(self):
monitor_db = database.MonitorDatabase()
try:
logger.info(u"Tautulli DataFactory :: Clearing notification logs from database.")
monitor_db.action('DELETE FROM notify_log')
monitor_db.action('VACUUM')
return True
except Exception as e:
logger.warn(u"Tautulli DataFactory :: Unable to execute database query for delete_notification_log: %s." % e)
return False
def get_newsletter_log(self, kwargs=None):
data_tables = datatables.DataTables()
columns = ['newsletter_log.id',
'newsletter_log.timestamp',
'newsletter_log.newsletter_id',
'newsletter_log.agent_id',
'newsletter_log.agent_name',
'newsletter_log.notify_action',
'newsletter_log.subject_text',
'newsletter_log.body_text',
'newsletter_log.start_date',
'newsletter_log.end_date',
'newsletter_log.uuid',
'newsletter_log.success'
]
try:
query = data_tables.ssp_query(table_name='newsletter_log',
columns=columns,
custom_where=[],
group_by=[],
join_types=[],
join_tables=[],
join_evals=[],
kwargs=kwargs)
except Exception as e:
logger.warn(u"Tautulli DataFactory :: Unable to execute database query for get_newsletter_log: %s." % e)
return {'recordsFiltered': 0,
'recordsTotal': 0,
'draw': 0,
'data': 'null',
'error': 'Unable to execute database query.'}
newsletters = query['result']
rows = []
for item in newsletters:
row = {'id': item['id'],
'timestamp': item['timestamp'],
'newsletter_id': item['newsletter_id'],
'agent_id': item['agent_id'],
'agent_name': item['agent_name'],
'notify_action': item['notify_action'],
'subject_text': item['subject_text'],
'body_text': item['body_text'],
'start_date': item['start_date'],
'end_date': item['end_date'],
'uuid': item['uuid'],
'success': item['success']
}
rows.append(row)
dict = {'recordsFiltered': query['filteredCount'],
'recordsTotal': query['totalCount'],
'data': rows,
'draw': query['draw']
}
return dict
def delete_newsletter_log(self):
monitor_db = database.MonitorDatabase()
try:
logger.info(u"Tautulli DataFactory :: Clearing newsletter logs from database.")
monitor_db.action('DELETE FROM newsletter_log')
monitor_db.action('VACUUM')
return True
except Exception as e:
logger.warn(u"Tautulli DataFactory :: Unable to execute database query for delete_newsletter_log: %s." % e)
return False
def get_user_devices(self, user_id=''):
monitor_db = database.MonitorDatabase()
if user_id:
try:
query = 'SELECT machine_id FROM session_history WHERE user_id = ? GROUP BY machine_id'
result = monitor_db.select(query=query, args=[user_id])
except Exception as e:
logger.warn(u"Tautulli DataFactory :: Unable to execute database query for get_user_devices: %s." % e)
return []
else:
return []
return [d['machine_id'] for d in result]
def get_recently_added_item(self, rating_key=''):
monitor_db = database.MonitorDatabase()
if rating_key:
try:
query = 'SELECT * FROM recently_added WHERE rating_key = ?'
result = monitor_db.select(query=query, args=[rating_key])
except Exception as e:
logger.warn(u"Tautulli DataFactory :: Unable to execute database query for get_recently_added_item: %s." % e)
return []
else:
return []
return result
def set_recently_added_item(self, rating_key=''):
monitor_db = database.MonitorDatabase()
pms_connect = pmsconnect.PmsConnect()
metadata = pms_connect.get_metadata_details(rating_key)
keys = {'rating_key': metadata['rating_key']}
values = {'added_at': metadata['added_at'],
'section_id': metadata['section_id'],
'parent_rating_key': metadata['parent_rating_key'],
'grandparent_rating_key': metadata['grandparent_rating_key'],
'media_type': metadata['media_type'],
'media_info': json.dumps(metadata['media_info'])
}
try:
monitor_db.upsert(table_name='recently_added', key_dict=keys, value_dict=values)
except Exception as e:
logger.warn(u"Tautulli DataFactory :: Unable to execute database query for set_recently_added_item: %s." % e)
return False
return True
| [
"# This file is part of Tautulli.\n",
"#\n",
"# Tautulli is free software: you can redistribute it and/or modify\n",
"# it under the terms of the GNU General Public License as published by\n",
"# the Free Software Foundation, either version 3 of the License, or\n",
"# (at your option) any later version.\n",
"#\n",
"# Tautulli is distributed in the hope that it will be useful,\n",
"# but WITHOUT ANY WARRANTY; without even the implied warranty of\n",
"# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n",
"# GNU General Public License for more details.\n",
"#\n",
"# You should have received a copy of the GNU General Public License\n",
"# along with Tautulli. If not, see <http://www.gnu.org/licenses/>.\n",
"\n",
"import json\n",
"from itertools import groupby\n",
"\n",
"import plexpy\n",
"import common\n",
"import database\n",
"import datatables\n",
"import helpers\n",
"import logger\n",
"import pmsconnect\n",
"import session\n",
"\n",
"\n",
"class DataFactory(object):\n",
" \"\"\"\n",
" Retrieve and process data from the monitor database\n",
" \"\"\"\n",
"\n",
" def __init__(self):\n",
" pass\n",
"\n",
" def get_datatables_history(self, kwargs=None, custom_where=None, grouping=None):\n",
" data_tables = datatables.DataTables()\n",
"\n",
" if custom_where is None:\n",
" custon_where = []\n",
"\n",
" if grouping is None:\n",
" grouping = plexpy.CONFIG.GROUP_HISTORY_TABLES\n",
"\n",
" if session.get_session_user_id():\n",
" session_user_id = str(session.get_session_user_id())\n",
" added = False\n",
"\n",
" for c_where in custom_where:\n",
" if 'user_id' in c_where[0]:\n",
" # This currently only works if c_where[1] is not a list or tuple\n",
" if str(c_where[1]) == session_user_id:\n",
" added = True\n",
" break\n",
" else:\n",
" c_where[1] = (c_where[1], session_user_id)\n",
" added = True\n",
"\n",
" if not added:\n",
" custom_where.append(['session_history.user_id', session.get_session_user_id()])\n",
"\n",
" group_by = ['session_history.reference_id'] if grouping else ['session_history.id']\n",
"\n",
" columns = [\n",
" 'session_history.reference_id',\n",
" 'session_history.id',\n",
" 'MAX(started) AS date',\n",
" 'MIN(started) AS started',\n",
" 'MAX(stopped) AS stopped',\n",
" 'SUM(CASE WHEN stopped > 0 THEN (stopped - started) ELSE 0 END) - \\\n",
" SUM(CASE WHEN paused_counter IS NULL THEN 0 ELSE paused_counter END) AS duration',\n",
" 'SUM(CASE WHEN paused_counter IS NULL THEN 0 ELSE paused_counter END) AS paused_counter',\n",
" 'session_history.user_id',\n",
" 'session_history.user',\n",
" '(CASE WHEN users.friendly_name IS NULL OR TRIM(users.friendly_name) = \"\" \\\n",
" THEN users.username ELSE users.friendly_name END) AS friendly_name',\n",
" 'platform',\n",
" 'player',\n",
" 'ip_address',\n",
" 'session_history.media_type',\n",
" 'session_history_metadata.rating_key',\n",
" 'session_history_metadata.parent_rating_key',\n",
" 'session_history_metadata.grandparent_rating_key',\n",
" 'session_history_metadata.full_title',\n",
" 'session_history_metadata.title',\n",
" 'session_history_metadata.parent_title',\n",
" 'session_history_metadata.grandparent_title',\n",
" 'session_history_metadata.original_title',\n",
" 'session_history_metadata.year',\n",
" 'session_history_metadata.media_index',\n",
" 'session_history_metadata.parent_media_index',\n",
" 'session_history_metadata.thumb',\n",
" 'session_history_metadata.parent_thumb',\n",
" 'session_history_metadata.grandparent_thumb',\n",
" 'MAX((CASE WHEN (view_offset IS NULL OR view_offset = \"\") THEN 0.1 ELSE view_offset * 1.0 END) / \\\n",
" (CASE WHEN (session_history_metadata.duration IS NULL OR session_history_metadata.duration = \"\") \\\n",
" THEN 1.0 ELSE session_history_metadata.duration * 1.0 END) * 100) AS percent_complete',\n",
" 'session_history_media_info.transcode_decision',\n",
" 'COUNT(*) AS group_count',\n",
" 'GROUP_CONCAT(session_history.id) AS group_ids',\n",
" 'NULL AS state',\n",
" 'NULL AS session_key'\n",
" ]\n",
"\n",
" if plexpy.CONFIG.HISTORY_TABLE_ACTIVITY:\n",
" table_name_union = 'sessions'\n",
" # Very hacky way to match the custom where parameters for the unioned table\n",
" custom_where_union = [[c[0].split('.')[-1], c[1]] for c in custom_where]\n",
" group_by_union = ['session_key']\n",
"\n",
" columns_union = [\n",
" 'NULL AS reference_id',\n",
" 'NULL AS id',\n",
" 'started AS date',\n",
" 'started',\n",
" 'stopped',\n",
" 'SUM(CASE WHEN stopped > 0 THEN (stopped - started) ELSE (strftime(\"%s\", \"now\") - started) END) - \\\n",
" SUM(CASE WHEN paused_counter IS NULL THEN 0 ELSE paused_counter END) AS duration',\n",
" 'SUM(CASE WHEN paused_counter IS NULL THEN 0 ELSE paused_counter END) AS paused_counter',\n",
" 'user_id',\n",
" 'user',\n",
" '(CASE WHEN friendly_name IS NULL OR TRIM(friendly_name) = \"\" \\\n",
" THEN user ELSE friendly_name END) AS friendly_name',\n",
" 'platform',\n",
" 'player',\n",
" 'ip_address',\n",
" 'media_type',\n",
" 'rating_key',\n",
" 'parent_rating_key',\n",
" 'grandparent_rating_key',\n",
" 'full_title',\n",
" 'title',\n",
" 'parent_title',\n",
" 'grandparent_title',\n",
" 'original_title',\n",
" 'year',\n",
" 'media_index',\n",
" 'parent_media_index',\n",
" 'thumb',\n",
" 'parent_thumb',\n",
" 'grandparent_thumb',\n",
" 'MAX((CASE WHEN (view_offset IS NULL OR view_offset = \"\") THEN 0.1 ELSE view_offset * 1.0 END) / \\\n",
" (CASE WHEN (duration IS NULL OR duration = \"\") \\\n",
" THEN 1.0 ELSE duration * 1.0 END) * 100) AS percent_complete',\n",
" 'transcode_decision',\n",
" 'NULL AS group_count',\n",
" 'NULL AS group_ids',\n",
" 'state',\n",
" 'session_key'\n",
" ]\n",
"\n",
" else:\n",
" table_name_union = None\n",
" custom_where_union = group_by_union = columns_union = []\n",
"\n",
" try:\n",
" query = data_tables.ssp_query(table_name='session_history',\n",
" table_name_union=table_name_union,\n",
" columns=columns,\n",
" columns_union=columns_union,\n",
" custom_where=custom_where,\n",
" custom_where_union=custom_where_union,\n",
" group_by=group_by,\n",
" group_by_union=group_by_union,\n",
" join_types=['LEFT OUTER JOIN',\n",
" 'JOIN',\n",
" 'JOIN'],\n",
" join_tables=['users',\n",
" 'session_history_metadata',\n",
" 'session_history_media_info'],\n",
" join_evals=[['session_history.user_id', 'users.user_id'],\n",
" ['session_history.id', 'session_history_metadata.id'],\n",
" ['session_history.id', 'session_history_media_info.id']],\n",
" kwargs=kwargs)\n",
" except Exception as e:\n",
" logger.warn(u\"Tautulli DataFactory :: Unable to execute database query for get_history: %s.\" % e)\n",
" return {'recordsFiltered': 0,\n",
" 'recordsTotal': 0,\n",
" 'draw': 0,\n",
" 'data': 'null',\n",
" 'error': 'Unable to execute database query.'}\n",
"\n",
" history = query['result']\n",
"\n",
" filter_duration = 0\n",
" total_duration = self.get_total_duration(custom_where=custom_where)\n",
"\n",
" watched_percent = {'movie': plexpy.CONFIG.MOVIE_WATCHED_PERCENT,\n",
" 'episode': plexpy.CONFIG.TV_WATCHED_PERCENT,\n",
" 'track': plexpy.CONFIG.MUSIC_WATCHED_PERCENT,\n",
" 'photo': 0,\n",
" 'clip': plexpy.CONFIG.TV_WATCHED_PERCENT\n",
" }\n",
"\n",
" rows = []\n",
" for item in history:\n",
" filter_duration += int(item['duration'])\n",
"\n",
" if item['media_type'] == 'episode' and item['parent_thumb']:\n",
" thumb = item['parent_thumb']\n",
" elif item['media_type'] == 'episode':\n",
" thumb = item['grandparent_thumb']\n",
" else:\n",
" thumb = item['thumb']\n",
"\n",
" if item['percent_complete'] >= watched_percent[item['media_type']]:\n",
" watched_status = 1\n",
" elif item['percent_complete'] >= watched_percent[item['media_type']]/2:\n",
" watched_status = 0.5\n",
" else:\n",
" watched_status = 0\n",
"\n",
" # Rename Mystery platform names\n",
" platform = common.PLATFORM_NAME_OVERRIDES.get(item['platform'], item['platform'])\n",
"\n",
" row = {'reference_id': item['reference_id'],\n",
" 'id': item['id'],\n",
" 'date': item['date'],\n",
" 'started': item['started'],\n",
" 'stopped': item['stopped'],\n",
" 'duration': item['duration'],\n",
" 'paused_counter': item['paused_counter'],\n",
" 'user_id': item['user_id'],\n",
" 'user': item['user'],\n",
" 'friendly_name': item['friendly_name'],\n",
" 'platform': platform,\n",
" 'player': item['player'],\n",
" 'ip_address': item['ip_address'],\n",
" 'media_type': item['media_type'],\n",
" 'rating_key': item['rating_key'],\n",
" 'parent_rating_key': item['parent_rating_key'],\n",
" 'grandparent_rating_key': item['grandparent_rating_key'],\n",
" 'full_title': item['full_title'],\n",
" 'title': item['parent_title'],\n",
" 'parent_title': item['parent_title'],\n",
" 'grandparent_title': item['grandparent_title'],\n",
" 'original_title': item['original_title'],\n",
" 'year': item['year'],\n",
" 'media_index': item['media_index'],\n",
" 'parent_media_index': item['parent_media_index'],\n",
" 'thumb': thumb,\n",
" 'transcode_decision': item['transcode_decision'],\n",
" 'percent_complete': int(round(item['percent_complete'])),\n",
" 'watched_status': watched_status,\n",
" 'group_count': item['group_count'],\n",
" 'group_ids': item['group_ids'],\n",
" 'state': item['state'],\n",
" 'session_key': item['session_key']\n",
" }\n",
"\n",
" rows.append(row)\n",
"\n",
" dict = {'recordsFiltered': query['filteredCount'],\n",
" 'recordsTotal': query['totalCount'],\n",
" 'data': session.friendly_name_to_username(rows),\n",
" 'draw': query['draw'],\n",
" 'filter_duration': helpers.human_duration(filter_duration, sig='dhm'),\n",
" 'total_duration': helpers.human_duration(total_duration, sig='dhm')\n",
" }\n",
"\n",
" return dict\n",
"\n",
" def get_home_stats(self, grouping=None, time_range=30, stats_type='plays', stats_count=10, stats_cards=None):\n",
" monitor_db = database.MonitorDatabase()\n",
"\n",
" if grouping is None:\n",
" grouping = plexpy.CONFIG.GROUP_HISTORY_TABLES\n",
" if stats_cards is None:\n",
" stats_cards = plexpy.CONFIG.HOME_STATS_CARDS\n",
"\n",
" movie_watched_percent = plexpy.CONFIG.MOVIE_WATCHED_PERCENT\n",
" tv_watched_percent = plexpy.CONFIG.TV_WATCHED_PERCENT\n",
" music_watched_percent = plexpy.CONFIG.MUSIC_WATCHED_PERCENT\n",
"\n",
" group_by = 'session_history.reference_id' if grouping else 'session_history.id'\n",
" sort_type = 'total_duration' if stats_type == 'duration' else 'total_plays'\n",
"\n",
" home_stats = []\n",
"\n",
" for stat in stats_cards:\n",
" if stat == 'top_movies':\n",
" top_movies = []\n",
" try:\n",
" query = 'SELECT t.id, t.full_title, t.rating_key, t.thumb, t.section_id, ' \\\n",
" 't.art, t.media_type, t.content_rating, t.labels, t.started, ' \\\n",
" 'MAX(t.started) AS last_watch, COUNT(t.id) AS total_plays, SUM(t.d) AS total_duration ' \\\n",
" 'FROM (SELECT *, SUM(CASE WHEN stopped > 0 THEN (stopped - started) - ' \\\n",
" ' (CASE WHEN paused_counter IS NULL THEN 0 ELSE paused_counter END) ELSE 0 END) ' \\\n",
" ' AS d ' \\\n",
" ' FROM session_history ' \\\n",
" ' JOIN session_history_metadata ON session_history_metadata.id = session_history.id ' \\\n",
" ' WHERE datetime(session_history.stopped, \"unixepoch\", \"localtime\") ' \\\n",
" ' >= datetime(\"now\", \"-%s days\", \"localtime\") ' \\\n",
" ' AND session_history.media_type = \"movie\" ' \\\n",
" ' GROUP BY %s) AS t ' \\\n",
" 'GROUP BY t.full_title ' \\\n",
" 'ORDER BY %s DESC, started DESC ' \\\n",
" 'LIMIT %s ' % (time_range, group_by, sort_type, stats_count)\n",
" result = monitor_db.select(query)\n",
" except Exception as e:\n",
" logger.warn(u\"Tautulli DataFactory :: Unable to execute database query for get_home_stats: top_movies: %s.\" % e)\n",
" return None\n",
"\n",
" for item in result:\n",
" row = {'title': item['full_title'],\n",
" 'total_plays': item['total_plays'],\n",
" 'total_duration': item['total_duration'],\n",
" 'users_watched': '',\n",
" 'rating_key': item['rating_key'],\n",
" 'last_play': item['last_watch'],\n",
" 'grandparent_thumb': '',\n",
" 'thumb': item['thumb'],\n",
" 'art': item['art'],\n",
" 'section_id': item['section_id'],\n",
" 'media_type': item['media_type'],\n",
" 'content_rating': item['content_rating'],\n",
" 'labels': item['labels'].split(';') if item['labels'] else (),\n",
" 'user': '',\n",
" 'friendly_name': '',\n",
" 'platform': '',\n",
" 'platform': '',\n",
" 'row_id': item['id']\n",
" }\n",
" top_movies.append(row)\n",
"\n",
" home_stats.append({'stat_id': stat,\n",
" 'stat_type': sort_type,\n",
" 'stat_title': 'Most Watched Movies',\n",
" 'rows': session.mask_session_info(top_movies)})\n",
"\n",
" elif stat == 'popular_movies':\n",
" popular_movies = []\n",
" try:\n",
" query = 'SELECT t.id, t.full_title, t.rating_key, t.thumb, t.section_id, ' \\\n",
" 't.art, t.media_type, t.content_rating, t.labels, t.started, ' \\\n",
" 'COUNT(DISTINCT t.user_id) AS users_watched, ' \\\n",
" 'MAX(t.started) AS last_watch, COUNT(t.id) as total_plays, SUM(t.d) AS total_duration ' \\\n",
" 'FROM (SELECT *, SUM(CASE WHEN stopped > 0 THEN (stopped - started) - ' \\\n",
" ' (CASE WHEN paused_counter IS NULL THEN 0 ELSE paused_counter END) ELSE 0 END) ' \\\n",
" ' AS d ' \\\n",
" ' FROM session_history ' \\\n",
" ' JOIN session_history_metadata ON session_history_metadata.id = session_history.id ' \\\n",
" ' WHERE datetime(session_history.stopped, \"unixepoch\", \"localtime\") ' \\\n",
" ' >= datetime(\"now\", \"-%s days\", \"localtime\") ' \\\n",
" ' AND session_history.media_type = \"movie\" ' \\\n",
" ' GROUP BY %s) AS t ' \\\n",
" 'GROUP BY t.full_title ' \\\n",
" 'ORDER BY users_watched DESC, %s DESC, started DESC ' \\\n",
" 'LIMIT %s ' % (time_range, group_by, sort_type, stats_count)\n",
" result = monitor_db.select(query)\n",
" except Exception as e:\n",
" logger.warn(u\"Tautulli DataFactory :: Unable to execute database query for get_home_stats: popular_movies: %s.\" % e)\n",
" return None\n",
"\n",
" for item in result:\n",
" row = {'title': item['full_title'],\n",
" 'users_watched': item['users_watched'],\n",
" 'rating_key': item['rating_key'],\n",
" 'last_play': item['last_watch'],\n",
" 'total_plays': item['total_plays'],\n",
" 'grandparent_thumb': '',\n",
" 'thumb': item['thumb'],\n",
" 'art': item['art'],\n",
" 'section_id': item['section_id'],\n",
" 'media_type': item['media_type'],\n",
" 'content_rating': item['content_rating'],\n",
" 'labels': item['labels'].split(';') if item['labels'] else (),\n",
" 'user': '',\n",
" 'friendly_name': '',\n",
" 'platform': '',\n",
" 'row_id': item['id']\n",
" }\n",
" popular_movies.append(row)\n",
"\n",
" home_stats.append({'stat_id': stat,\n",
" 'stat_title': 'Most Popular Movies',\n",
" 'rows': session.mask_session_info(popular_movies)})\n",
"\n",
" elif stat == 'top_tv':\n",
" top_tv = []\n",
" try:\n",
" query = 'SELECT t.id, t.grandparent_title, t.grandparent_rating_key, t.grandparent_thumb, t.section_id, ' \\\n",
" 't.art, t.media_type, t.content_rating, t.labels, t.started, ' \\\n",
" 'MAX(t.started) AS last_watch, COUNT(t.id) AS total_plays, SUM(t.d) AS total_duration ' \\\n",
" 'FROM (SELECT *, SUM(CASE WHEN stopped > 0 THEN (stopped - started) - ' \\\n",
" ' (CASE WHEN paused_counter IS NULL THEN 0 ELSE paused_counter END) ELSE 0 END) ' \\\n",
" ' AS d ' \\\n",
" ' FROM session_history ' \\\n",
" ' JOIN session_history_metadata ON session_history_metadata.id = session_history.id ' \\\n",
" ' WHERE datetime(session_history.stopped, \"unixepoch\", \"localtime\") ' \\\n",
" ' >= datetime(\"now\", \"-%s days\", \"localtime\") ' \\\n",
" ' AND session_history.media_type = \"episode\" ' \\\n",
" ' GROUP BY %s) AS t ' \\\n",
" 'GROUP BY t.grandparent_title ' \\\n",
" 'ORDER BY %s DESC, started DESC ' \\\n",
" 'LIMIT %s ' % (time_range, group_by, sort_type, stats_count)\n",
" result = monitor_db.select(query)\n",
" except Exception as e:\n",
" logger.warn(u\"Tautulli DataFactory :: Unable to execute database query for get_home_stats: top_tv: %s.\" % e)\n",
" return None\n",
"\n",
" for item in result:\n",
" row = {'title': item['grandparent_title'],\n",
" 'total_plays': item['total_plays'],\n",
" 'total_duration': item['total_duration'],\n",
" 'users_watched': '',\n",
" 'rating_key': item['grandparent_rating_key'],\n",
" 'last_play': item['last_watch'],\n",
" 'grandparent_thumb': item['grandparent_thumb'],\n",
" 'thumb': item['grandparent_thumb'],\n",
" 'art': item['art'],\n",
" 'section_id': item['section_id'],\n",
" 'media_type': item['media_type'],\n",
" 'content_rating': item['content_rating'],\n",
" 'labels': item['labels'].split(';') if item['labels'] else (),\n",
" 'user': '',\n",
" 'friendly_name': '',\n",
" 'platform': '',\n",
" 'row_id': item['id']\n",
" }\n",
" top_tv.append(row)\n",
"\n",
" home_stats.append({'stat_id': stat,\n",
" 'stat_type': sort_type,\n",
" 'stat_title': 'Most Watched TV Shows',\n",
" 'rows': session.mask_session_info(top_tv)})\n",
"\n",
" elif stat == 'popular_tv':\n",
" popular_tv = []\n",
" try:\n",
" query = 'SELECT t.id, t.grandparent_title, t.grandparent_rating_key, t.grandparent_thumb, t.section_id, ' \\\n",
" 't.art, t.media_type, t.content_rating, t.labels, t.started, ' \\\n",
" 'COUNT(DISTINCT t.user_id) AS users_watched, ' \\\n",
" 'MAX(t.started) AS last_watch, COUNT(t.id) as total_plays, SUM(t.d) AS total_duration ' \\\n",
" 'FROM (SELECT *, SUM(CASE WHEN stopped > 0 THEN (stopped - started) - ' \\\n",
" ' (CASE WHEN paused_counter IS NULL THEN 0 ELSE paused_counter END) ELSE 0 END) ' \\\n",
" ' AS d ' \\\n",
" ' FROM session_history ' \\\n",
" ' JOIN session_history_metadata ON session_history_metadata.id = session_history.id ' \\\n",
" ' WHERE datetime(session_history.stopped, \"unixepoch\", \"localtime\") ' \\\n",
" ' >= datetime(\"now\", \"-%s days\", \"localtime\") ' \\\n",
" ' AND session_history.media_type = \"episode\" ' \\\n",
" ' GROUP BY %s) AS t ' \\\n",
" 'GROUP BY t.grandparent_title ' \\\n",
" 'ORDER BY users_watched DESC, %s DESC, started DESC ' \\\n",
" 'LIMIT %s ' % (time_range, group_by, sort_type, stats_count)\n",
" result = monitor_db.select(query)\n",
" except Exception as e:\n",
" logger.warn(u\"Tautulli DataFactory :: Unable to execute database query for get_home_stats: popular_tv: %s.\" % e)\n",
" return None\n",
"\n",
" for item in result:\n",
" row = {'title': item['grandparent_title'],\n",
" 'users_watched': item['users_watched'],\n",
" 'rating_key': item['grandparent_rating_key'],\n",
" 'last_play': item['last_watch'],\n",
" 'total_plays': item['total_plays'],\n",
" 'grandparent_thumb': item['grandparent_thumb'],\n",
" 'thumb': item['grandparent_thumb'],\n",
" 'art': item['art'],\n",
" 'section_id': item['section_id'],\n",
" 'media_type': item['media_type'],\n",
" 'content_rating': item['content_rating'],\n",
" 'labels': item['labels'].split(';') if item['labels'] else (),\n",
" 'user': '',\n",
" 'friendly_name': '',\n",
" 'platform': '',\n",
" 'row_id': item['id']\n",
" }\n",
" popular_tv.append(row)\n",
"\n",
" home_stats.append({'stat_id': stat,\n",
" 'stat_title': 'Most Popular TV Shows',\n",
" 'rows': session.mask_session_info(popular_tv)})\n",
"\n",
" elif stat == 'top_music':\n",
" top_music = []\n",
" try:\n",
" query = 'SELECT t.id, t.grandparent_title, t.original_title, ' \\\n",
" 't.grandparent_rating_key, t.grandparent_thumb, t.section_id, ' \\\n",
" 't.art, t.media_type, t.content_rating, t.labels, t.started, ' \\\n",
" 'MAX(t.started) AS last_watch, COUNT(t.id) AS total_plays, SUM(t.d) AS total_duration ' \\\n",
" 'FROM (SELECT *, SUM(CASE WHEN stopped > 0 THEN (stopped - started) - ' \\\n",
" ' (CASE WHEN paused_counter IS NULL THEN 0 ELSE paused_counter END) ELSE 0 END) ' \\\n",
" ' AS d ' \\\n",
" ' FROM session_history ' \\\n",
" ' JOIN session_history_metadata ON session_history_metadata.id = session_history.id ' \\\n",
" ' WHERE datetime(session_history.stopped, \"unixepoch\", \"localtime\") ' \\\n",
" ' >= datetime(\"now\", \"-%s days\", \"localtime\") ' \\\n",
" ' AND session_history.media_type = \"track\" ' \\\n",
" ' GROUP BY %s) AS t ' \\\n",
" 'GROUP BY t.original_title, t.grandparent_title ' \\\n",
" 'ORDER BY %s DESC, started DESC ' \\\n",
" 'LIMIT %s ' % (time_range, group_by, sort_type, stats_count)\n",
" result = monitor_db.select(query)\n",
" except Exception as e:\n",
" logger.warn(u\"Tautulli DataFactory :: Unable to execute database query for get_home_stats: top_music: %s.\" % e)\n",
" return None\n",
"\n",
" for item in result:\n",
" row = {'title': item['original_title'] or item['grandparent_title'],\n",
" 'total_plays': item['total_plays'],\n",
" 'total_duration': item['total_duration'],\n",
" 'users_watched': '',\n",
" 'rating_key': item['grandparent_rating_key'],\n",
" 'last_play': item['last_watch'],\n",
" 'grandparent_thumb': item['grandparent_thumb'],\n",
" 'thumb': item['grandparent_thumb'],\n",
" 'art': item['art'],\n",
" 'section_id': item['section_id'],\n",
" 'media_type': item['media_type'],\n",
" 'content_rating': item['content_rating'],\n",
" 'labels': item['labels'].split(';') if item['labels'] else (),\n",
" 'user': '',\n",
" 'friendly_name': '',\n",
" 'platform': '',\n",
" 'row_id': item['id']\n",
" }\n",
" top_music.append(row)\n",
"\n",
" home_stats.append({'stat_id': stat,\n",
" 'stat_type': sort_type,\n",
" 'stat_title': 'Most Played Artists',\n",
" 'rows': session.mask_session_info(top_music)})\n",
"\n",
" elif stat == 'popular_music':\n",
" popular_music = []\n",
" try:\n",
" query = 'SELECT t.id, t.grandparent_title, t.original_title, ' \\\n",
" 't.grandparent_rating_key, t.grandparent_thumb, t.section_id, ' \\\n",
" 't.art, t.media_type, t.content_rating, t.labels, t.started, ' \\\n",
" 'COUNT(DISTINCT t.user_id) AS users_watched, ' \\\n",
" 'MAX(t.started) AS last_watch, COUNT(t.id) as total_plays, SUM(t.d) AS total_duration ' \\\n",
" 'FROM (SELECT *, SUM(CASE WHEN stopped > 0 THEN (stopped - started) - ' \\\n",
" ' (CASE WHEN paused_counter IS NULL THEN 0 ELSE paused_counter END) ELSE 0 END) ' \\\n",
" ' AS d ' \\\n",
" ' FROM session_history ' \\\n",
" ' JOIN session_history_metadata ON session_history_metadata.id = session_history.id ' \\\n",
" ' WHERE datetime(session_history.stopped, \"unixepoch\", \"localtime\") ' \\\n",
" ' >= datetime(\"now\", \"-%s days\", \"localtime\") ' \\\n",
" ' AND session_history.media_type = \"track\" ' \\\n",
" ' GROUP BY %s) AS t ' \\\n",
" 'GROUP BY t.original_title, t.grandparent_title ' \\\n",
" 'ORDER BY users_watched DESC, %s DESC, started DESC ' \\\n",
" 'LIMIT %s ' % (time_range, group_by, sort_type, stats_count)\n",
" result = monitor_db.select(query)\n",
" except Exception as e:\n",
" logger.warn(u\"Tautulli DataFactory :: Unable to execute database query for get_home_stats: popular_music: %s.\" % e)\n",
" return None\n",
"\n",
" for item in result:\n",
" row = {'title': item['original_title'] or item['grandparent_title'],\n",
" 'users_watched': item['users_watched'],\n",
" 'rating_key': item['grandparent_rating_key'],\n",
" 'last_play': item['last_watch'],\n",
" 'total_plays': item['total_plays'],\n",
" 'grandparent_thumb': item['grandparent_thumb'],\n",
" 'thumb': item['grandparent_thumb'],\n",
" 'art': item['art'],\n",
" 'section_id': item['section_id'],\n",
" 'media_type': item['media_type'],\n",
" 'content_rating': item['content_rating'],\n",
" 'labels': item['labels'].split(';') if item['labels'] else (),\n",
" 'user': '',\n",
" 'friendly_name': '',\n",
" 'platform': '',\n",
" 'row_id': item['id']\n",
" }\n",
" popular_music.append(row)\n",
"\n",
" home_stats.append({'stat_id': stat,\n",
" 'stat_title': 'Most Popular Artists',\n",
" 'rows': session.mask_session_info(popular_music)})\n",
"\n",
" elif stat == 'top_users':\n",
" top_users = []\n",
" try:\n",
" query = 'SELECT t.user, t.user_id, t.user_thumb, t.custom_thumb, t.started, ' \\\n",
" '(CASE WHEN t.friendly_name IS NULL THEN t.username ELSE t.friendly_name END) ' \\\n",
" ' AS friendly_name, ' \\\n",
" 'MAX(t.started) AS last_watch, COUNT(t.id) AS total_plays, SUM(t.d) AS total_duration ' \\\n",
" 'FROM (SELECT *, SUM(CASE WHEN stopped > 0 THEN (stopped - started) - ' \\\n",
" ' (CASE WHEN paused_counter IS NULL THEN 0 ELSE paused_counter END) ELSE 0 END) ' \\\n",
" ' AS d, users.thumb AS user_thumb, users.custom_avatar_url AS custom_thumb ' \\\n",
" ' FROM session_history ' \\\n",
" ' JOIN session_history_metadata ON session_history_metadata.id = session_history.id ' \\\n",
" ' LEFT OUTER JOIN users ON session_history.user_id = users.user_id ' \\\n",
" ' WHERE datetime(session_history.stopped, \"unixepoch\", \"localtime\") ' \\\n",
" ' >= datetime(\"now\", \"-%s days\", \"localtime\") ' \\\n",
" ' GROUP BY %s) AS t ' \\\n",
" 'GROUP BY t.user_id ' \\\n",
" 'ORDER BY %s DESC, started DESC ' \\\n",
" 'LIMIT %s ' % (time_range, group_by, sort_type, stats_count)\n",
" result = monitor_db.select(query)\n",
" except Exception as e:\n",
" logger.warn(u\"Tautulli DataFactory :: Unable to execute database query for get_home_stats: top_users: %s.\" % e)\n",
" return None\n",
"\n",
" for item in result:\n",
" if item['custom_thumb'] and item['custom_thumb'] != item['user_thumb']:\n",
" user_thumb = item['custom_thumb']\n",
" elif item['user_thumb']:\n",
" user_thumb = item['user_thumb']\n",
" else:\n",
" user_thumb = common.DEFAULT_USER_THUMB\n",
"\n",
" row = {'user': item['user'],\n",
" 'user_id': item['user_id'],\n",
" 'friendly_name': item['friendly_name'],\n",
" 'total_plays': item['total_plays'],\n",
" 'total_duration': item['total_duration'],\n",
" 'last_play': item['last_watch'],\n",
" 'user_thumb': user_thumb,\n",
" 'grandparent_thumb': '',\n",
" 'art': '',\n",
" 'users_watched': '',\n",
" 'rating_key': '',\n",
" 'title': '',\n",
" 'platform': '',\n",
" 'row_id': ''\n",
" }\n",
" top_users.append(row)\n",
"\n",
" home_stats.append({'stat_id': stat,\n",
" 'stat_type': sort_type,\n",
" 'stat_title': 'Most Active Users',\n",
" 'rows': session.mask_session_info(top_users, mask_metadata=False)})\n",
"\n",
" elif stat == 'top_platforms':\n",
" top_platform = []\n",
"\n",
" try:\n",
" query = 'SELECT t.platform, t.started, ' \\\n",
" 'MAX(t.started) AS last_watch, COUNT(t.id) AS total_plays, SUM(t.d) AS total_duration ' \\\n",
" 'FROM (SELECT *, SUM(CASE WHEN stopped > 0 THEN (stopped - started) - ' \\\n",
" ' (CASE WHEN paused_counter IS NULL THEN 0 ELSE paused_counter END) ELSE 0 END) ' \\\n",
" ' AS d ' \\\n",
" ' FROM session_history ' \\\n",
" ' JOIN session_history_metadata ON session_history_metadata.id = session_history.id ' \\\n",
" ' WHERE datetime(session_history.stopped, \"unixepoch\", \"localtime\") ' \\\n",
" ' >= datetime(\"now\", \"-%s days\", \"localtime\") ' \\\n",
" ' GROUP BY %s) AS t ' \\\n",
" 'GROUP BY t.platform ' \\\n",
" 'ORDER BY %s DESC, started DESC ' \\\n",
" 'LIMIT %s ' % (time_range, group_by, sort_type, stats_count)\n",
" result = monitor_db.select(query)\n",
" except Exception as e:\n",
" logger.warn(u\"Tautulli DataFactory :: Unable to execute database query for get_home_stats: top_platforms: %s.\" % e)\n",
" return None\n",
"\n",
" for item in result:\n",
" # Rename Mystery platform names\n",
" platform = common.PLATFORM_NAME_OVERRIDES.get(item['platform'], item['platform'])\n",
" platform_name = next((v for k, v in common.PLATFORM_NAMES.iteritems() if k in platform.lower()), 'default')\n",
"\n",
" row = {'total_plays': item['total_plays'],\n",
" 'total_duration': item['total_duration'],\n",
" 'last_play': item['last_watch'],\n",
" 'platform': platform,\n",
" 'platform_name': platform_name,\n",
" 'title': '',\n",
" 'thumb': '',\n",
" 'grandparent_thumb': '',\n",
" 'art': '',\n",
" 'users_watched': '',\n",
" 'rating_key': '',\n",
" 'user': '',\n",
" 'friendly_name': '',\n",
" 'row_id': ''\n",
" }\n",
" top_platform.append(row)\n",
"\n",
" home_stats.append({'stat_id': stat,\n",
" 'stat_type': sort_type,\n",
" 'stat_title': 'Most Active Platforms',\n",
" 'rows': session.mask_session_info(top_platform, mask_metadata=False)})\n",
"\n",
" elif stat == 'last_watched':\n",
" last_watched = []\n",
" try:\n",
" query = 'SELECT t.id, t.full_title, t.rating_key, t.thumb, t.grandparent_thumb, ' \\\n",
" 't.user, t.user_id, t.custom_avatar_url as user_thumb, t.player, t.section_id, ' \\\n",
" 't.art, t.media_type, t.content_rating, t.labels, ' \\\n",
" '(CASE WHEN t.friendly_name IS NULL THEN t.username ELSE t.friendly_name END) ' \\\n",
" ' AS friendly_name, ' \\\n",
" 'MAX(t.started) AS last_watch, ' \\\n",
" '((CASE WHEN t.view_offset IS NULL THEN 0.1 ELSE t.view_offset * 1.0 END) / ' \\\n",
" ' (CASE WHEN t.duration IS NULL THEN 1.0 ELSE t.duration * 1.0 END) * 100) ' \\\n",
" ' AS percent_complete ' \\\n",
" 'FROM (SELECT * FROM session_history ' \\\n",
" ' JOIN session_history_metadata ON session_history_metadata.id = session_history.id ' \\\n",
" ' LEFT OUTER JOIN users ON session_history.user_id = users.user_id ' \\\n",
" ' WHERE datetime(session_history.stopped, \"unixepoch\", \"localtime\") ' \\\n",
" ' >= datetime(\"now\", \"-%s days\", \"localtime\") ' \\\n",
" ' AND (session_history.media_type = \"movie\" ' \\\n",
" ' OR session_history_metadata.media_type = \"episode\") ' \\\n",
" ' GROUP BY %s) AS t ' \\\n",
" 'WHERE t.media_type == \"movie\" AND percent_complete >= %s ' \\\n",
" ' OR t.media_type == \"episode\" AND percent_complete >= %s ' \\\n",
" 'GROUP BY t.id ' \\\n",
" 'ORDER BY last_watch DESC ' \\\n",
" 'LIMIT %s' % (time_range, group_by, movie_watched_percent, tv_watched_percent, stats_count)\n",
" result = monitor_db.select(query)\n",
" except Exception as e:\n",
" logger.warn(u\"Tautulli DataFactory :: Unable to execute database query for get_home_stats: last_watched: %s.\" % e)\n",
" return None\n",
"\n",
" for item in result:\n",
" if not item['grandparent_thumb'] or item['grandparent_thumb'] == '':\n",
" thumb = item['thumb']\n",
" else:\n",
" thumb = item['grandparent_thumb']\n",
"\n",
" row = {'row_id': item['id'],\n",
" 'user': item['user'],\n",
" 'friendly_name': item['friendly_name'],\n",
" 'user_id': item['user_id'],\n",
" 'user_thumb': item['user_thumb'],\n",
" 'title': item['full_title'],\n",
" 'rating_key': item['rating_key'],\n",
" 'thumb': thumb,\n",
" 'grandparent_thumb': item['grandparent_thumb'],\n",
" 'art': item['art'],\n",
" 'section_id': item['section_id'],\n",
" 'media_type': item['media_type'],\n",
" 'content_rating': item['content_rating'],\n",
" 'labels': item['labels'].split(';') if item['labels'] else (),\n",
" 'last_watch': item['last_watch'],\n",
" 'player': item['player']\n",
" }\n",
" last_watched.append(row)\n",
"\n",
" home_stats.append({'stat_id': stat,\n",
" 'stat_title': 'Recently Watched',\n",
" 'rows': session.mask_session_info(last_watched)})\n",
"\n",
" elif stat == 'most_concurrent':\n",
"\n",
" def calc_most_concurrent(title, result):\n",
" '''\n",
" Function to calculate most concurrent streams\n",
" Input: Stat title, SQLite query result\n",
" Output: Dict {title, count, started, stopped}\n",
" '''\n",
" times = []\n",
" for item in result:\n",
" times.append({'time': str(item['started']) + 'B', 'count': 1})\n",
" times.append({'time': str(item['stopped']) + 'A', 'count': -1})\n",
" times = sorted(times, key=lambda k: k['time']) \n",
"\n",
" count = 0\n",
" last_count = 0\n",
" last_start = ''\n",
" concurrent = {'title': title,\n",
" 'count': 0,\n",
" 'started': None,\n",
" 'stopped': None\n",
" }\n",
"\n",
" for d in times:\n",
" if d['count'] == 1:\n",
" count += d['count']\n",
" if count >= last_count:\n",
" last_start = d['time']\n",
" else:\n",
" if count >= last_count:\n",
" last_count = count\n",
" concurrent['count'] = count\n",
" concurrent['started'] = last_start[:-1]\n",
" concurrent['stopped'] = d['time'][:-1]\n",
" count += d['count']\n",
"\n",
" return concurrent\n",
"\n",
" most_concurrent = []\n",
"\n",
" try:\n",
" base_query = 'SELECT session_history.started, session_history.stopped ' \\\n",
" 'FROM session_history ' \\\n",
" 'JOIN session_history_media_info ON session_history.id = session_history_media_info.id ' \\\n",
" 'WHERE datetime(stopped, \"unixepoch\", \"localtime\") ' \\\n",
" '>= datetime(\"now\", \"-%s days\", \"localtime\") ' % time_range\n",
"\n",
" title = 'Concurrent Streams'\n",
" query = base_query\n",
" result = monitor_db.select(query)\n",
" if result:\n",
" most_concurrent.append(calc_most_concurrent(title, result))\n",
"\n",
" title = 'Concurrent Transcodes'\n",
" query = base_query \\\n",
" + 'AND session_history_media_info.transcode_decision = \"transcode\" '\n",
" result = monitor_db.select(query)\n",
" if result:\n",
" most_concurrent.append(calc_most_concurrent(title, result))\n",
"\n",
" title = 'Concurrent Direct Streams'\n",
" query = base_query \\\n",
" + 'AND session_history_media_info.transcode_decision = \"copy\" '\n",
" result = monitor_db.select(query)\n",
" if result:\n",
" most_concurrent.append(calc_most_concurrent(title, result))\n",
"\n",
" title = 'Concurrent Direct Plays'\n",
" query = base_query \\\n",
" + 'AND session_history_media_info.transcode_decision = \"direct play\" '\n",
" result = monitor_db.select(query)\n",
" if result:\n",
" most_concurrent.append(calc_most_concurrent(title, result))\n",
" except Exception as e:\n",
" logger.warn(u\"Tautulli DataFactory :: Unable to execute database query for get_home_stats: most_concurrent: %s.\" % e)\n",
" return None\n",
"\n",
" home_stats.append({'stat_id': stat,\n",
" 'stat_title': 'Most Concurrent Streams',\n",
" 'rows': most_concurrent})\n",
"\n",
" return home_stats\n",
"\n",
" def get_library_stats(self, library_cards=[]):\n",
" monitor_db = database.MonitorDatabase()\n",
"\n",
" if session.get_session_shared_libraries():\n",
" library_cards = session.get_session_shared_libraries()\n",
"\n",
" if 'first_run_wizard' in library_cards:\n",
" return None\n",
"\n",
" library_stats = []\n",
"\n",
" try:\n",
" query = 'SELECT section_id, section_name, section_type, thumb AS library_thumb, ' \\\n",
" 'custom_thumb_url AS custom_thumb, art, count, parent_count, child_count ' \\\n",
" 'FROM library_sections ' \\\n",
" 'WHERE section_id IN (%s) ' \\\n",
" 'ORDER BY section_type, count DESC, parent_count DESC, child_count DESC ' % ','.join(library_cards)\n",
" result = monitor_db.select(query)\n",
" except Exception as e:\n",
" logger.warn(u\"Tautulli DataFactory :: Unable to execute database query for get_library_stats: %s.\" % e)\n",
" return None\n",
"\n",
" for item in result:\n",
" if item['custom_thumb'] and item['custom_thumb'] != item['library_thumb']:\n",
" library_thumb = item['custom_thumb']\n",
" elif item['library_thumb']:\n",
" library_thumb = item['library_thumb']\n",
" else:\n",
" library_thumb = common.DEFAULT_COVER_THUMB\n",
"\n",
" library = {'section_id': item['section_id'],\n",
" 'section_name': item['section_name'],\n",
" 'section_type': item['section_type'],\n",
" 'thumb': library_thumb,\n",
" 'art': item['art'],\n",
" 'count': item['count'],\n",
" 'child_count': item['parent_count'],\n",
" 'grandchild_count': item['child_count']\n",
" }\n",
" library_stats.append(library)\n",
"\n",
" library_stats = {k: list(v) for k, v in groupby(library_stats, key=lambda x: x['section_type'])}\n",
"\n",
" return library_stats\n",
"\n",
" def get_stream_details(self, row_id=None, session_key=None):\n",
" monitor_db = database.MonitorDatabase()\n",
"\n",
" user_cond = ''\n",
" table = 'session_history' if row_id else 'sessions'\n",
" if session.get_session_user_id():\n",
" user_cond = 'AND %s.user_id = %s ' % (table, session.get_session_user_id())\n",
"\n",
" if row_id:\n",
" query = 'SELECT bitrate, video_resolution, ' \\\n",
" 'optimized_version, optimized_version_profile, optimized_version_title, ' \\\n",
" 'synced_version, synced_version_profile, ' \\\n",
" 'container, video_codec, video_bitrate, video_width, video_height, video_framerate, aspect_ratio, ' \\\n",
" 'audio_codec, audio_bitrate, audio_channels, subtitle_codec, ' \\\n",
" 'stream_bitrate, stream_video_resolution, quality_profile, stream_container_decision, stream_container, ' \\\n",
" 'stream_video_decision, stream_video_codec, stream_video_bitrate, stream_video_width, stream_video_height, ' \\\n",
" 'stream_video_framerate, ' \\\n",
" 'stream_audio_decision, stream_audio_codec, stream_audio_bitrate, stream_audio_channels, ' \\\n",
" 'subtitles, stream_subtitle_decision, stream_subtitle_codec, ' \\\n",
" 'transcode_hw_decoding, transcode_hw_encoding, ' \\\n",
" 'video_decision, audio_decision, transcode_decision, width, height, container, ' \\\n",
" 'transcode_container, transcode_video_codec, transcode_audio_codec, transcode_audio_channels, ' \\\n",
" 'transcode_width, transcode_height, ' \\\n",
" 'session_history_metadata.media_type, title, grandparent_title, original_title ' \\\n",
" 'FROM session_history_media_info ' \\\n",
" 'JOIN session_history ON session_history_media_info.id = session_history.id ' \\\n",
" 'JOIN session_history_metadata ON session_history_media_info.id = session_history_metadata.id ' \\\n",
" 'WHERE session_history_media_info.id = ? %s' % user_cond\n",
" result = monitor_db.select(query, args=[row_id])\n",
" elif session_key:\n",
" query = 'SELECT bitrate, video_resolution, ' \\\n",
" 'optimized_version, optimized_version_profile, optimized_version_title, ' \\\n",
" 'synced_version, synced_version_profile, ' \\\n",
" 'container, video_codec, video_bitrate, video_width, video_height, video_framerate, aspect_ratio, ' \\\n",
" 'audio_codec, audio_bitrate, audio_channels, subtitle_codec, ' \\\n",
" 'stream_bitrate, stream_video_resolution, quality_profile, stream_container_decision, stream_container, ' \\\n",
" 'stream_video_decision, stream_video_codec, stream_video_bitrate, stream_video_width, stream_video_height, ' \\\n",
" 'stream_video_framerate, ' \\\n",
" 'stream_audio_decision, stream_audio_codec, stream_audio_bitrate, stream_audio_channels, ' \\\n",
" 'subtitles, stream_subtitle_decision, stream_subtitle_codec, ' \\\n",
" 'transcode_hw_decoding, transcode_hw_encoding, ' \\\n",
" 'video_decision, audio_decision, transcode_decision, width, height, container, ' \\\n",
" 'transcode_container, transcode_video_codec, transcode_audio_codec, transcode_audio_channels, ' \\\n",
" 'transcode_width, transcode_height, ' \\\n",
" 'media_type, title, grandparent_title, original_title ' \\\n",
" 'FROM sessions ' \\\n",
" 'WHERE session_key = ? %s' % user_cond\n",
" result = monitor_db.select(query, args=[session_key])\n",
" else:\n",
" return None\n",
"\n",
" stream_output = {}\n",
"\n",
" for item in result:\n",
" pre_tautulli = 0\n",
"\n",
" # For backwards compatibility. Pick one new Tautulli key to check and override with old values.\n",
" if not item['stream_container']:\n",
" item['stream_video_resolution'] = item['video_resolution']\n",
" item['stream_container'] = item['transcode_container'] or item['container']\n",
" item['stream_video_decision'] = item['video_decision']\n",
" item['stream_video_codec'] = item['transcode_video_codec'] or item['video_codec']\n",
" item['stream_video_width'] = item['transcode_width'] or item['width']\n",
" item['stream_video_height'] = item['transcode_height'] or item['height']\n",
" item['stream_audio_decision'] = item['audio_decision']\n",
" item['stream_audio_codec'] = item['transcode_audio_codec'] or item['audio_codec']\n",
" item['stream_audio_channels'] = item['transcode_audio_channels'] or item['audio_channels']\n",
" item['video_width'] = item['width']\n",
" item['video_height'] = item['height']\n",
" pre_tautulli = 1\n",
"\n",
" stream_output = {'bitrate': item['bitrate'],\n",
" 'video_resolution': item['video_resolution'],\n",
" 'optimized_version': item['optimized_version'],\n",
" 'optimized_version_profile': item['optimized_version_profile'],\n",
" 'optimized_version_title': item['optimized_version_title'],\n",
" 'synced_version': item['synced_version'],\n",
" 'synced_version_profile': item['synced_version_profile'],\n",
" 'container': item['container'],\n",
" 'video_codec': item['video_codec'],\n",
" 'video_bitrate': item['video_bitrate'],\n",
" 'video_width': item['video_width'],\n",
" 'video_height': item['video_height'],\n",
" 'video_framerate': item['video_framerate'],\n",
" 'aspect_ratio': item['aspect_ratio'],\n",
" 'audio_codec': item['audio_codec'],\n",
" 'audio_bitrate': item['audio_bitrate'],\n",
" 'audio_channels': item['audio_channels'],\n",
" 'subtitle_codec': item['subtitle_codec'],\n",
" 'stream_bitrate': item['stream_bitrate'],\n",
" 'stream_video_resolution': item['stream_video_resolution'],\n",
" 'quality_profile': item['quality_profile'],\n",
" 'stream_container_decision': item['stream_container_decision'],\n",
" 'stream_container': item['stream_container'],\n",
" 'stream_video_decision': item['stream_video_decision'],\n",
" 'stream_video_codec': item['stream_video_codec'],\n",
" 'stream_video_bitrate': item['stream_video_bitrate'],\n",
" 'stream_video_width': item['stream_video_width'],\n",
" 'stream_video_height': item['stream_video_height'],\n",
" 'stream_video_framerate': item['stream_video_framerate'],\n",
" 'stream_audio_decision': item['stream_audio_decision'],\n",
" 'stream_audio_codec': item['stream_audio_codec'],\n",
" 'stream_audio_bitrate': item['stream_audio_bitrate'],\n",
" 'stream_audio_channels': item['stream_audio_channels'],\n",
" 'subtitles': item['subtitles'],\n",
" 'stream_subtitle_decision': item['stream_subtitle_decision'],\n",
" 'stream_subtitle_codec': item['stream_subtitle_codec'],\n",
" 'transcode_hw_decoding': item['transcode_hw_decoding'],\n",
" 'transcode_hw_encoding': item['transcode_hw_encoding'],\n",
" 'video_decision': item['video_decision'],\n",
" 'audio_decision': item['audio_decision'],\n",
" 'media_type': item['media_type'],\n",
" 'title': item['title'],\n",
" 'grandparent_title': item['grandparent_title'],\n",
" 'original_title': item['original_title'],\n",
" 'current_session': 1 if session_key else 0,\n",
" 'pre_tautulli': pre_tautulli\n",
" }\n",
"\n",
" stream_output = {k: v or '' for k, v in stream_output.iteritems()}\n",
" return stream_output\n",
"\n",
" def get_metadata_details(self, rating_key):\n",
" monitor_db = database.MonitorDatabase()\n",
"\n",
" if rating_key:\n",
" query = 'SELECT session_history_metadata.id, ' \\\n",
" 'session_history_metadata.rating_key, session_history_metadata.parent_rating_key, ' \\\n",
" 'session_history_metadata.grandparent_rating_key, session_history_metadata.title, ' \\\n",
" 'session_history_metadata.parent_title, session_history_metadata.grandparent_title, ' \\\n",
" 'session_history_metadata.original_title, session_history_metadata.full_title, ' \\\n",
" 'library_sections.section_name, ' \\\n",
" 'session_history_metadata.media_index, session_history_metadata.parent_media_index, ' \\\n",
" 'session_history_metadata.section_id, session_history_metadata.thumb, ' \\\n",
" 'session_history_metadata.parent_thumb, session_history_metadata.grandparent_thumb, ' \\\n",
" 'session_history_metadata.art, session_history_metadata.media_type, session_history_metadata.year, ' \\\n",
" 'session_history_metadata.originally_available_at, session_history_metadata.added_at, ' \\\n",
" 'session_history_metadata.updated_at, session_history_metadata.last_viewed_at, ' \\\n",
" 'session_history_metadata.content_rating, session_history_metadata.summary, ' \\\n",
" 'session_history_metadata.tagline, session_history_metadata.rating, session_history_metadata.duration, ' \\\n",
" 'session_history_metadata.guid, session_history_metadata.directors, session_history_metadata.writers, ' \\\n",
" 'session_history_metadata.actors, session_history_metadata.genres, session_history_metadata.studio, ' \\\n",
" 'session_history_metadata.labels, ' \\\n",
" 'session_history_media_info.container, session_history_media_info.bitrate, ' \\\n",
" 'session_history_media_info.video_codec, session_history_media_info.video_resolution, ' \\\n",
" 'session_history_media_info.video_framerate, session_history_media_info.audio_codec, ' \\\n",
" 'session_history_media_info.audio_channels ' \\\n",
" 'FROM session_history_metadata ' \\\n",
" 'JOIN library_sections ON session_history_metadata.section_id = library_sections.section_id ' \\\n",
" 'JOIN session_history_media_info ON session_history_metadata.id = session_history_media_info.id ' \\\n",
" 'WHERE session_history_metadata.rating_key = ? ' \\\n",
" 'ORDER BY session_history_metadata.id DESC ' \\\n",
" 'LIMIT 1'\n",
" result = monitor_db.select(query=query, args=[rating_key])\n",
" else:\n",
" result = []\n",
"\n",
" metadata_list = []\n",
"\n",
" for item in result:\n",
" directors = item['directors'].split(';') if item['directors'] else []\n",
" writers = item['writers'].split(';') if item['writers'] else []\n",
" actors = item['actors'].split(';') if item['actors'] else []\n",
" genres = item['genres'].split(';') if item['genres'] else []\n",
" labels = item['labels'].split(';') if item['labels'] else []\n",
"\n",
" media_info = [{'container': item['container'],\n",
" 'bitrate': item['bitrate'],\n",
" 'video_codec': item['video_codec'],\n",
" 'video_resolution': item['video_resolution'],\n",
" 'video_framerate': item['video_framerate'],\n",
" 'audio_codec': item['audio_codec'],\n",
" 'audio_channels': item['audio_channels']\n",
" }]\n",
"\n",
" metadata = {'media_type': item['media_type'],\n",
" 'rating_key': item['rating_key'],\n",
" 'parent_rating_key': item['parent_rating_key'],\n",
" 'grandparent_rating_key': item['grandparent_rating_key'],\n",
" 'grandparent_title': item['grandparent_title'],\n",
" 'original_title': item['original_title'],\n",
" 'parent_media_index': item['parent_media_index'],\n",
" 'parent_title': item['parent_title'],\n",
" 'media_index': item['media_index'],\n",
" 'studio': item['studio'],\n",
" 'title': item['title'],\n",
" 'content_rating': item['content_rating'],\n",
" 'summary': item['summary'],\n",
" 'tagline': item['tagline'],\n",
" 'rating': item['rating'],\n",
" 'duration': item['duration'],\n",
" 'year': item['year'],\n",
" 'thumb': item['thumb'],\n",
" 'parent_thumb': item['parent_thumb'],\n",
" 'grandparent_thumb': item['grandparent_thumb'],\n",
" 'art': item['art'],\n",
" 'originally_available_at': item['originally_available_at'],\n",
" 'added_at': item['added_at'],\n",
" 'updated_at': item['updated_at'],\n",
" 'last_viewed_at': item['last_viewed_at'],\n",
" 'guid': item['guid'],\n",
" 'directors': directors,\n",
" 'writers': writers,\n",
" 'actors': actors,\n",
" 'genres': genres,\n",
" 'labels': labels,\n",
" 'library_name': item['section_name'],\n",
" 'section_id': item['section_id'],\n",
" 'media_info': media_info\n",
" }\n",
" metadata_list.append(metadata)\n",
"\n",
" filtered_metadata_list = session.filter_session_info(metadata_list, filter_key='section_id')\n",
" \n",
" if filtered_metadata_list:\n",
" return filtered_metadata_list[0]\n",
" else:\n",
" return []\n",
"\n",
" def get_total_duration(self, custom_where=None):\n",
" monitor_db = database.MonitorDatabase()\n",
"\n",
" # Split up custom wheres\n",
" if custom_where:\n",
" where = 'WHERE ' + ' AND '.join([w[0] + ' = \"' + w[1] + '\"' for w in custom_where])\n",
" else:\n",
" where = ''\n",
" \n",
" try:\n",
" query = 'SELECT SUM(CASE WHEN stopped > 0 THEN (stopped - started) ELSE 0 END) - ' \\\n",
" 'SUM(CASE WHEN paused_counter IS NULL THEN 0 ELSE paused_counter END) AS total_duration ' \\\n",
" 'FROM session_history ' \\\n",
" 'JOIN session_history_metadata ON session_history_metadata.id = session_history.id ' \\\n",
" 'JOIN session_history_media_info ON session_history_media_info.id = session_history.id ' \\\n",
" '%s ' % where\n",
" result = monitor_db.select(query)\n",
" except Exception as e:\n",
" logger.warn(u\"Tautulli DataFactory :: Unable to execute database query for get_total_duration: %s.\" % e)\n",
" return None\n",
"\n",
" total_duration = 0\n",
" for item in result:\n",
" total_duration = item['total_duration']\n",
"\n",
" return total_duration\n",
"\n",
" def get_session_ip(self, session_key=''):\n",
" monitor_db = database.MonitorDatabase()\n",
"\n",
" ip_address = 'N/A'\n",
"\n",
" user_cond = ''\n",
" if session.get_session_user_id():\n",
" user_cond = 'AND user_id = %s ' % session.get_session_user_id()\n",
"\n",
" if session_key:\n",
" try:\n",
" query = 'SELECT ip_address FROM sessions WHERE session_key = %d %s' % (int(session_key), user_cond)\n",
" result = monitor_db.select(query)\n",
" except Exception as e:\n",
" logger.warn(u\"Tautulli DataFactory :: Unable to execute database query for get_session_ip: %s.\" % e)\n",
" return ip_address\n",
" else:\n",
" return ip_address\n",
"\n",
" for item in result:\n",
" ip_address = item['ip_address']\n",
"\n",
" return ip_address\n",
"\n",
" def get_img_info(self, img=None, rating_key=None, width=None, height=None,\n",
" opacity=None, background=None, blur=None, fallback=None,\n",
" order_by='', service=None):\n",
" monitor_db = database.MonitorDatabase()\n",
"\n",
" img_info = []\n",
"\n",
" where_params = []\n",
" args = []\n",
"\n",
" if img is not None:\n",
" where_params.append('img')\n",
" args.append(img)\n",
" if rating_key is not None:\n",
" where_params.append('rating_key')\n",
" args.append(rating_key)\n",
" if width is not None:\n",
" where_params.append('width')\n",
" args.append(width)\n",
" if height is not None:\n",
" where_params.append('height')\n",
" args.append(height)\n",
" if opacity is not None:\n",
" where_params.append('opacity')\n",
" args.append(opacity)\n",
" if background is not None:\n",
" where_params.append('background')\n",
" args.append(background)\n",
" if blur is not None:\n",
" where_params.append('blur')\n",
" args.append(blur)\n",
" if fallback is not None:\n",
" where_params.append('fallback')\n",
" args.append(fallback)\n",
"\n",
" where = ''\n",
" if where_params:\n",
" where = 'WHERE ' + ' AND '.join([w + ' = ?' for w in where_params])\n",
"\n",
" if order_by:\n",
" order_by = 'ORDER BY ' + order_by + ' DESC'\n",
"\n",
" if service == 'imgur':\n",
" query = 'SELECT imgur_title AS img_title, imgur_url AS img_url FROM imgur_lookup ' \\\n",
" 'JOIN image_hash_lookup ON imgur_lookup.img_hash = image_hash_lookup.img_hash ' \\\n",
" '%s %s' % (where, order_by)\n",
" elif service == 'cloudinary':\n",
" query = 'SELECT cloudinary_title AS img_title, cloudinary_url AS img_url FROM cloudinary_lookup ' \\\n",
" 'JOIN image_hash_lookup ON cloudinary_lookup.img_hash = image_hash_lookup.img_hash ' \\\n",
" '%s %s' % (where, order_by)\n",
" else:\n",
" logger.warn(u\"Tautulli DataFactory :: Unable to execute database query for get_img_info: \"\n",
" \"service not provided.\")\n",
" return img_info\n",
"\n",
" try:\n",
" img_info = monitor_db.select(query, args=args)\n",
" except Exception as e:\n",
" logger.warn(u\"Tautulli DataFactory :: Unable to execute database query for get_img_info: %s.\" % e)\n",
"\n",
" return img_info\n",
"\n",
" def set_img_info(self, img_hash=None, img_title=None, img_url=None, delete_hash=None, service=None):\n",
" monitor_db = database.MonitorDatabase()\n",
"\n",
" keys = {'img_hash': img_hash}\n",
"\n",
" if service == 'imgur':\n",
" table = 'imgur_lookup'\n",
" values = {'imgur_title': img_title,\n",
" 'imgur_url': img_url,\n",
" 'delete_hash': delete_hash}\n",
" elif service == 'cloudinary':\n",
" table = 'cloudinary_lookup'\n",
" values = {'cloudinary_title': img_title,\n",
" 'cloudinary_url': img_url}\n",
" else:\n",
" logger.warn(u\"Tautulli DataFactory :: Unable to execute database query for set_img_info: \"\n",
" \"service not provided.\")\n",
" return\n",
"\n",
" monitor_db.upsert(table, key_dict=keys, value_dict=values)\n",
"\n",
" def delete_img_info(self, rating_key=None, service='', delete_all=False):\n",
" monitor_db = database.MonitorDatabase()\n",
"\n",
" if not delete_all:\n",
" service = helpers.get_img_service()\n",
"\n",
" if not rating_key and not delete_all:\n",
" logger.error(u\"Tautulli DataFactory :: Unable to delete hosted images: rating_key not provided.\")\n",
" return False\n",
"\n",
" where = ''\n",
" args = []\n",
" log_msg = ''\n",
" if rating_key:\n",
" where = 'WHERE rating_key = ?'\n",
" args = [rating_key]\n",
" log_msg = ' for rating_key %s' % rating_key\n",
"\n",
" if service.lower() == 'imgur':\n",
" # Delete from Imgur\n",
" query = 'SELECT imgur_title, delete_hash, fallback FROM imgur_lookup ' \\\n",
" 'JOIN image_hash_lookup ON imgur_lookup.img_hash = image_hash_lookup.img_hash %s' % where\n",
" results = monitor_db.select(query, args=args)\n",
"\n",
" for imgur_info in results:\n",
" if imgur_info['delete_hash']:\n",
" helpers.delete_from_imgur(delete_hash=imgur_info['delete_hash'],\n",
" img_title=imgur_info['imgur_title'],\n",
" fallback=imgur_info['fallback'])\n",
"\n",
" logger.info(u\"Tautulli DataFactory :: Deleting Imgur info%s from the database.\"\n",
" % log_msg)\n",
" result = monitor_db.action('DELETE FROM imgur_lookup WHERE img_hash '\n",
" 'IN (SELECT img_hash FROM image_hash_lookup %s)' % where,\n",
" args)\n",
"\n",
" elif service.lower() == 'cloudinary':\n",
" # Delete from Cloudinary\n",
" query = 'SELECT cloudinary_title, rating_key, fallback FROM cloudinary_lookup ' \\\n",
" 'JOIN image_hash_lookup ON cloudinary_lookup.img_hash = image_hash_lookup.img_hash %s ' \\\n",
" 'GROUP BY rating_key' % where\n",
" results = monitor_db.select(query, args=args)\n",
"\n",
" for cloudinary_info in results:\n",
" helpers.delete_from_cloudinary(rating_key=cloudinary_info['rating_key'])\n",
"\n",
" logger.info(u\"Tautulli DataFactory :: Deleting Cloudinary info%s from the database.\"\n",
" % log_msg)\n",
" result = monitor_db.action('DELETE FROM cloudinary_lookup WHERE img_hash '\n",
" 'IN (SELECT img_hash FROM image_hash_lookup %s)' % where,\n",
" args)\n",
"\n",
" else:\n",
" logger.error(u\"Tautulli DataFactory :: Unable to delete hosted images: invalid service '%s' provided.\"\n",
" % service)\n",
"\n",
" return service\n",
"\n",
" def get_poster_info(self, rating_key='', metadata=None, service=None):\n",
" poster_key = ''\n",
" if str(rating_key).isdigit():\n",
" poster_key = rating_key\n",
" elif metadata:\n",
" if metadata['media_type'] in ('movie', 'show', 'artist', 'collection'):\n",
" poster_key = metadata['rating_key']\n",
" elif metadata['media_type'] in ('season', 'album'):\n",
" poster_key = metadata['rating_key']\n",
" elif metadata['media_type'] in ('episode', 'track'):\n",
" poster_key = metadata['parent_rating_key']\n",
"\n",
" poster_info = {}\n",
"\n",
" if poster_key:\n",
" service = service or helpers.get_img_service()\n",
"\n",
" if service:\n",
" img_info = self.get_img_info(rating_key=poster_key,\n",
" order_by='height',\n",
" fallback='poster',\n",
" service=service)\n",
" if img_info:\n",
" poster_info = {'poster_title': img_info[0]['img_title'],\n",
" 'poster_url': img_info[0]['img_url'],\n",
" 'img_service': service.capitalize()}\n",
"\n",
" return poster_info\n",
"\n",
" def get_lookup_info(self, rating_key='', metadata=None):\n",
" monitor_db = database.MonitorDatabase()\n",
"\n",
" lookup_key = ''\n",
" if str(rating_key).isdigit():\n",
" lookup_key = rating_key\n",
" elif metadata:\n",
" if metadata['media_type'] in ('movie', 'show', 'artist'):\n",
" lookup_key = metadata['rating_key']\n",
" elif metadata['media_type'] in ('season', 'album'):\n",
" lookup_key = metadata['parent_rating_key']\n",
" elif metadata['media_type'] in ('episode', 'track'):\n",
" lookup_key = metadata['grandparent_rating_key']\n",
"\n",
" lookup_info = {'tvmaze_id': '',\n",
" 'themoviedb_id': ''}\n",
"\n",
" if lookup_key:\n",
" try:\n",
" query = 'SELECT tvmaze_id FROM tvmaze_lookup ' \\\n",
" 'WHERE rating_key = ?'\n",
" tvmaze_info = monitor_db.select_single(query, args=[lookup_key])\n",
" if tvmaze_info:\n",
" lookup_info['tvmaze_id'] = tvmaze_info['tvmaze_id']\n",
"\n",
" query = 'SELECT themoviedb_id FROM themoviedb_lookup ' \\\n",
" 'WHERE rating_key = ?'\n",
" themoviedb_info = monitor_db.select_single(query, args=[lookup_key])\n",
" if themoviedb_info:\n",
" lookup_info['themoviedb_id'] = themoviedb_info['themoviedb_id']\n",
" except Exception as e:\n",
" logger.warn(u\"Tautulli DataFactory :: Unable to execute database query for get_lookup_info: %s.\" % e)\n",
"\n",
" return lookup_info\n",
"\n",
" def delete_lookup_info(self, rating_key='', title=''):\n",
" monitor_db = database.MonitorDatabase()\n",
"\n",
" if rating_key:\n",
" logger.info(u\"Tautulli DataFactory :: Deleting lookup info for '%s' (rating_key %s) from the database.\"\n",
" % (title, rating_key))\n",
" result_tvmaze = monitor_db.action('DELETE FROM tvmaze_lookup WHERE rating_key = ?', [rating_key])\n",
" result_themoviedb = monitor_db.action('DELETE FROM themoviedb_lookup WHERE rating_key = ?', [rating_key])\n",
" return True if (result_tvmaze or result_themoviedb) else False\n",
"\n",
" def get_search_query(self, rating_key=''):\n",
" monitor_db = database.MonitorDatabase()\n",
"\n",
" if rating_key:\n",
" query = 'SELECT rating_key, parent_rating_key, grandparent_rating_key, title, parent_title, grandparent_title, ' \\\n",
" 'media_index, parent_media_index, year, media_type ' \\\n",
" 'FROM session_history_metadata ' \\\n",
" 'WHERE rating_key = ? ' \\\n",
" 'OR parent_rating_key = ? ' \\\n",
" 'OR grandparent_rating_key = ? ' \\\n",
" 'LIMIT 1'\n",
" result = monitor_db.select(query=query, args=[rating_key, rating_key, rating_key])\n",
" else:\n",
" result = []\n",
"\n",
" query = {}\n",
" query_string = None\n",
" media_type = None\n",
"\n",
" for item in result:\n",
" title = item['title']\n",
" parent_title = item['parent_title']\n",
" grandparent_title = item['grandparent_title']\n",
" media_index = item['media_index']\n",
" parent_media_index = item['parent_media_index']\n",
" year = item['year']\n",
"\n",
" if str(item['rating_key']) == rating_key:\n",
" query_string = item['title']\n",
" media_type = item['media_type']\n",
"\n",
" elif str(item['parent_rating_key']) == rating_key:\n",
" if item['media_type'] == 'episode':\n",
" query_string = item['grandparent_title']\n",
" media_type = 'season'\n",
" elif item['media_type'] == 'track':\n",
" query_string = item['parent_title']\n",
" media_type = 'album'\n",
"\n",
" elif str(item['grandparent_rating_key']) == rating_key:\n",
" if item['media_type'] == 'episode':\n",
" query_string = item['grandparent_title']\n",
" media_type = 'show'\n",
" elif item['media_type'] == 'track':\n",
" query_string = item['grandparent_title']\n",
" media_type = 'artist'\n",
"\n",
" if query_string and media_type:\n",
" query = {'query_string': query_string,\n",
" 'title': title,\n",
" 'parent_title': parent_title,\n",
" 'grandparent_title': grandparent_title,\n",
" 'media_index': media_index,\n",
" 'parent_media_index': parent_media_index,\n",
" 'year': year,\n",
" 'media_type': media_type,\n",
" 'rating_key': rating_key\n",
" }\n",
" else:\n",
" return None\n",
"\n",
" return query\n",
"\n",
" def get_rating_keys_list(self, rating_key='', media_type=''):\n",
" monitor_db = database.MonitorDatabase()\n",
"\n",
" if media_type == 'movie':\n",
" key_list = {0: {'rating_key': int(rating_key)}}\n",
" return key_list\n",
"\n",
" if media_type == 'artist' or media_type == 'album' or media_type == 'track':\n",
" match_type = 'title'\n",
" else:\n",
" match_type = 'index'\n",
"\n",
" # Get the grandparent rating key\n",
" try:\n",
" query = 'SELECT rating_key, parent_rating_key, grandparent_rating_key ' \\\n",
" 'FROM session_history_metadata ' \\\n",
" 'WHERE rating_key = ? ' \\\n",
" 'OR parent_rating_key = ? ' \\\n",
" 'OR grandparent_rating_key = ? ' \\\n",
" 'LIMIT 1'\n",
" result = monitor_db.select(query=query, args=[rating_key, rating_key, rating_key])\n",
"\n",
" grandparent_rating_key = result[0]['grandparent_rating_key']\n",
"\n",
" except Exception as e:\n",
" logger.warn(u\"Tautulli DataFactory :: Unable to execute database query for get_rating_keys_list: %s.\" % e)\n",
" return {}\n",
"\n",
" query = 'SELECT rating_key, parent_rating_key, grandparent_rating_key, title, parent_title, grandparent_title, ' \\\n",
" 'media_index, parent_media_index ' \\\n",
" 'FROM session_history_metadata ' \\\n",
" 'WHERE {0} = ? ' \\\n",
" 'GROUP BY {1} ' \\\n",
" 'ORDER BY {1} DESC '\n",
"\n",
" # get grandparent_rating_keys\n",
" grandparents = {}\n",
" result = monitor_db.select(query=query.format('grandparent_rating_key', 'grandparent_rating_key'),\n",
" args=[grandparent_rating_key])\n",
" for item in result:\n",
" # get parent_rating_keys\n",
" parents = {}\n",
" result = monitor_db.select(query=query.format('grandparent_rating_key', 'parent_rating_key'),\n",
" args=[item['grandparent_rating_key']])\n",
" for item in result:\n",
" # get rating_keys\n",
" children = {}\n",
" result = monitor_db.select(query=query.format('parent_rating_key', 'rating_key'),\n",
" args=[item['parent_rating_key']])\n",
" for item in result:\n",
" key = item['media_index'] if item['media_index'] else item['title']\n",
" children.update({key: {'rating_key': item['rating_key']}})\n",
"\n",
" key = item['parent_media_index'] if match_type == 'index' else item['parent_title']\n",
" parents.update({key:\n",
" {'rating_key': item['parent_rating_key'],\n",
" 'children': children}\n",
" })\n",
"\n",
" key = 0 if match_type == 'index' else item['grandparent_title']\n",
" grandparents.update({key:\n",
" {'rating_key': item['grandparent_rating_key'],\n",
" 'children': parents}\n",
" })\n",
"\n",
" key_list = grandparents\n",
"\n",
" return key_list\n",
"\n",
" def delete_session_history_rows(self, row_id=None):\n",
" monitor_db = database.MonitorDatabase()\n",
"\n",
" if row_id.isdigit():\n",
" logger.info(u\"Tautulli DataFactory :: Deleting row id %s from the session history database.\" % row_id)\n",
" session_history_del = \\\n",
" monitor_db.action('DELETE FROM session_history WHERE id = ?', [row_id])\n",
" session_history_media_info_del = \\\n",
" monitor_db.action('DELETE FROM session_history_media_info WHERE id = ?', [row_id])\n",
" session_history_metadata_del = \\\n",
" monitor_db.action('DELETE FROM session_history_metadata WHERE id = ?', [row_id])\n",
"\n",
" return 'Deleted rows %s.' % row_id\n",
" else:\n",
" return 'Unable to delete rows. Input row not valid.'\n",
"\n",
" def update_metadata(self, old_key_list='', new_key_list='', media_type=''):\n",
" pms_connect = pmsconnect.PmsConnect()\n",
" monitor_db = database.MonitorDatabase()\n",
"\n",
" # function to map rating keys pairs\n",
" def get_pairs(old, new):\n",
" pairs = {}\n",
" for k, v in old.iteritems():\n",
" if k in new:\n",
" pairs.update({v['rating_key']: new[k]['rating_key']})\n",
" if 'children' in old[k]:\n",
" pairs.update(get_pairs(old[k]['children'], new[k]['children']))\n",
"\n",
" return pairs\n",
"\n",
" # map rating keys pairs\n",
" mapping = {}\n",
" if old_key_list and new_key_list:\n",
" mapping = get_pairs(old_key_list, new_key_list)\n",
"\n",
" if mapping:\n",
" logger.info(u\"Tautulli DataFactory :: Updating metadata in the database.\")\n",
" for old_key, new_key in mapping.iteritems():\n",
" metadata = pms_connect.get_metadata_details(new_key)\n",
"\n",
" if metadata:\n",
" if metadata['media_type'] == 'show' or metadata['media_type'] == 'artist':\n",
" # check grandparent_rating_key (2 tables)\n",
" monitor_db.action('UPDATE session_history SET grandparent_rating_key = ? WHERE grandparent_rating_key = ?', \n",
" [new_key, old_key])\n",
" monitor_db.action('UPDATE session_history_metadata SET grandparent_rating_key = ? WHERE grandparent_rating_key = ?', \n",
" [new_key, old_key])\n",
" elif metadata['media_type'] == 'season' or metadata['media_type'] == 'album':\n",
" # check parent_rating_key (2 tables)\n",
" monitor_db.action('UPDATE session_history SET parent_rating_key = ? WHERE parent_rating_key = ?', \n",
" [new_key, old_key])\n",
" monitor_db.action('UPDATE session_history_metadata SET parent_rating_key = ? WHERE parent_rating_key = ?', \n",
" [new_key, old_key])\n",
" else:\n",
" # check rating_key (2 tables)\n",
" monitor_db.action('UPDATE session_history SET rating_key = ? WHERE rating_key = ?', \n",
" [new_key, old_key])\n",
" monitor_db.action('UPDATE session_history_media_info SET rating_key = ? WHERE rating_key = ?', \n",
" [new_key, old_key])\n",
"\n",
" # update session_history_metadata table\n",
" self.update_metadata_details(old_key, new_key, metadata)\n",
"\n",
" return 'Updated metadata in database.'\n",
" else:\n",
" return 'Unable to update metadata in database. No changes were made.'\n",
"\n",
" def update_metadata_details(self, old_rating_key='', new_rating_key='', metadata=None):\n",
"\n",
" if metadata:\n",
" # Create full_title\n",
" if metadata['media_type'] == 'episode':\n",
" full_title = '%s - %s' % (metadata['grandparent_title'], metadata['title'])\n",
" elif metadata['media_type'] == 'track':\n",
" full_title = '%s - %s' % (metadata['title'],\n",
" metadata['original_title'] or metadata['grandparent_title'])\n",
" else:\n",
" full_title = metadata['title']\n",
"\n",
" directors = \";\".join(metadata['directors'])\n",
" writers = \";\".join(metadata['writers'])\n",
" actors = \";\".join(metadata['actors'])\n",
" genres = \";\".join(metadata['genres'])\n",
" labels = \";\".join(metadata['labels'])\n",
"\n",
" #logger.info(u\"Tautulli DataFactory :: Updating metadata in the database for rating key: %s.\" % new_rating_key)\n",
" monitor_db = database.MonitorDatabase()\n",
"\n",
" # Update the session_history_metadata table\n",
" query = 'UPDATE session_history_metadata SET rating_key = ?, parent_rating_key = ?, ' \\\n",
" 'grandparent_rating_key = ?, title = ?, parent_title = ?, grandparent_title = ?, ' \\\n",
" 'original_title = ?, full_title = ?, ' \\\n",
" 'media_index = ?, parent_media_index = ?, section_id = ?, thumb = ?, parent_thumb = ?, ' \\\n",
" 'grandparent_thumb = ?, art = ?, media_type = ?, year = ?, originally_available_at = ?, ' \\\n",
" 'added_at = ?, updated_at = ?, last_viewed_at = ?, content_rating = ?, summary = ?, ' \\\n",
" 'tagline = ?, rating = ?, duration = ?, guid = ?, directors = ?, writers = ?, actors = ?, ' \\\n",
" 'genres = ?, studio = ?, labels = ? ' \\\n",
" 'WHERE rating_key = ?'\n",
"\n",
" args = [metadata['rating_key'], metadata['parent_rating_key'], metadata['grandparent_rating_key'],\n",
" metadata['title'], metadata['parent_title'], metadata['grandparent_title'],\n",
" metadata['original_title'], full_title,\n",
" metadata['media_index'], metadata['parent_media_index'], metadata['section_id'], metadata['thumb'],\n",
" metadata['parent_thumb'], metadata['grandparent_thumb'], metadata['art'], metadata['media_type'],\n",
" metadata['year'], metadata['originally_available_at'], metadata['added_at'], metadata['updated_at'],\n",
" metadata['last_viewed_at'], metadata['content_rating'], metadata['summary'], metadata['tagline'], \n",
" metadata['rating'], metadata['duration'], metadata['guid'], directors, writers, actors, genres,\n",
" metadata['studio'], labels,\n",
" old_rating_key]\n",
"\n",
" monitor_db.action(query=query, args=args)\n",
"\n",
" def get_notification_log(self, kwargs=None):\n",
" data_tables = datatables.DataTables()\n",
"\n",
" columns = ['notify_log.id',\n",
" 'notify_log.timestamp',\n",
" 'notify_log.session_key',\n",
" 'notify_log.rating_key',\n",
" 'notify_log.user_id',\n",
" 'notify_log.user',\n",
" 'notify_log.notifier_id',\n",
" 'notify_log.agent_id',\n",
" 'notify_log.agent_name',\n",
" 'notify_log.notify_action',\n",
" 'notify_log.subject_text',\n",
" 'notify_log.body_text',\n",
" 'notify_log.success'\n",
" ]\n",
" try:\n",
" query = data_tables.ssp_query(table_name='notify_log',\n",
" columns=columns,\n",
" custom_where=[],\n",
" group_by=[],\n",
" join_types=[],\n",
" join_tables=[],\n",
" join_evals=[],\n",
" kwargs=kwargs)\n",
" except Exception as e:\n",
" logger.warn(u\"Tautulli DataFactory :: Unable to execute database query for get_notification_log: %s.\" % e)\n",
" return {'recordsFiltered': 0,\n",
" 'recordsTotal': 0,\n",
" 'draw': 0,\n",
" 'data': 'null',\n",
" 'error': 'Unable to execute database query.'}\n",
"\n",
" notifications = query['result']\n",
"\n",
" rows = []\n",
" for item in notifications:\n",
" if item['body_text']:\n",
" body_text = item['body_text'].replace('\\r\\n', '<br />').replace('\\n', '<br />')\n",
" else:\n",
" body_text = ''\n",
"\n",
" row = {'id': item['id'],\n",
" 'timestamp': item['timestamp'],\n",
" 'session_key': item['session_key'],\n",
" 'rating_key': item['rating_key'],\n",
" 'user_id': item['user_id'],\n",
" 'user': item['user'],\n",
" 'notifier_id': item['notifier_id'],\n",
" 'agent_id': item['agent_id'],\n",
" 'agent_name': item['agent_name'],\n",
" 'notify_action': item['notify_action'],\n",
" 'subject_text': item['subject_text'],\n",
" 'body_text': body_text,\n",
" 'success': item['success']\n",
" }\n",
"\n",
" rows.append(row)\n",
"\n",
" dict = {'recordsFiltered': query['filteredCount'],\n",
" 'recordsTotal': query['totalCount'],\n",
" 'data': rows,\n",
" 'draw': query['draw']\n",
" }\n",
"\n",
" return dict\n",
"\n",
" def delete_notification_log(self):\n",
" monitor_db = database.MonitorDatabase()\n",
"\n",
" try:\n",
" logger.info(u\"Tautulli DataFactory :: Clearing notification logs from database.\")\n",
" monitor_db.action('DELETE FROM notify_log')\n",
" monitor_db.action('VACUUM')\n",
" return True\n",
" except Exception as e:\n",
" logger.warn(u\"Tautulli DataFactory :: Unable to execute database query for delete_notification_log: %s.\" % e)\n",
" return False\n",
"\n",
" def get_newsletter_log(self, kwargs=None):\n",
" data_tables = datatables.DataTables()\n",
"\n",
" columns = ['newsletter_log.id',\n",
" 'newsletter_log.timestamp',\n",
" 'newsletter_log.newsletter_id',\n",
" 'newsletter_log.agent_id',\n",
" 'newsletter_log.agent_name',\n",
" 'newsletter_log.notify_action',\n",
" 'newsletter_log.subject_text',\n",
" 'newsletter_log.body_text',\n",
" 'newsletter_log.start_date',\n",
" 'newsletter_log.end_date',\n",
" 'newsletter_log.uuid',\n",
" 'newsletter_log.success'\n",
" ]\n",
" try:\n",
" query = data_tables.ssp_query(table_name='newsletter_log',\n",
" columns=columns,\n",
" custom_where=[],\n",
" group_by=[],\n",
" join_types=[],\n",
" join_tables=[],\n",
" join_evals=[],\n",
" kwargs=kwargs)\n",
" except Exception as e:\n",
" logger.warn(u\"Tautulli DataFactory :: Unable to execute database query for get_newsletter_log: %s.\" % e)\n",
" return {'recordsFiltered': 0,\n",
" 'recordsTotal': 0,\n",
" 'draw': 0,\n",
" 'data': 'null',\n",
" 'error': 'Unable to execute database query.'}\n",
"\n",
" newsletters = query['result']\n",
"\n",
" rows = []\n",
" for item in newsletters:\n",
" row = {'id': item['id'],\n",
" 'timestamp': item['timestamp'],\n",
" 'newsletter_id': item['newsletter_id'],\n",
" 'agent_id': item['agent_id'],\n",
" 'agent_name': item['agent_name'],\n",
" 'notify_action': item['notify_action'],\n",
" 'subject_text': item['subject_text'],\n",
" 'body_text': item['body_text'],\n",
" 'start_date': item['start_date'],\n",
" 'end_date': item['end_date'],\n",
" 'uuid': item['uuid'],\n",
" 'success': item['success']\n",
" }\n",
"\n",
" rows.append(row)\n",
"\n",
" dict = {'recordsFiltered': query['filteredCount'],\n",
" 'recordsTotal': query['totalCount'],\n",
" 'data': rows,\n",
" 'draw': query['draw']\n",
" }\n",
"\n",
" return dict\n",
"\n",
" def delete_newsletter_log(self):\n",
" monitor_db = database.MonitorDatabase()\n",
"\n",
" try:\n",
" logger.info(u\"Tautulli DataFactory :: Clearing newsletter logs from database.\")\n",
" monitor_db.action('DELETE FROM newsletter_log')\n",
" monitor_db.action('VACUUM')\n",
" return True\n",
" except Exception as e:\n",
" logger.warn(u\"Tautulli DataFactory :: Unable to execute database query for delete_newsletter_log: %s.\" % e)\n",
" return False\n",
"\n",
" def get_user_devices(self, user_id=''):\n",
" monitor_db = database.MonitorDatabase()\n",
"\n",
" if user_id:\n",
" try:\n",
" query = 'SELECT machine_id FROM session_history WHERE user_id = ? GROUP BY machine_id'\n",
" result = monitor_db.select(query=query, args=[user_id])\n",
" except Exception as e:\n",
" logger.warn(u\"Tautulli DataFactory :: Unable to execute database query for get_user_devices: %s.\" % e)\n",
" return []\n",
" else:\n",
" return []\n",
"\n",
" return [d['machine_id'] for d in result]\n",
"\n",
" def get_recently_added_item(self, rating_key=''):\n",
" monitor_db = database.MonitorDatabase()\n",
"\n",
" if rating_key:\n",
" try:\n",
" query = 'SELECT * FROM recently_added WHERE rating_key = ?'\n",
" result = monitor_db.select(query=query, args=[rating_key])\n",
" except Exception as e:\n",
" logger.warn(u\"Tautulli DataFactory :: Unable to execute database query for get_recently_added_item: %s.\" % e)\n",
" return []\n",
" else:\n",
" return []\n",
"\n",
" return result\n",
"\n",
" def set_recently_added_item(self, rating_key=''):\n",
" monitor_db = database.MonitorDatabase()\n",
"\n",
" pms_connect = pmsconnect.PmsConnect()\n",
" metadata = pms_connect.get_metadata_details(rating_key)\n",
"\n",
" keys = {'rating_key': metadata['rating_key']}\n",
"\n",
" values = {'added_at': metadata['added_at'],\n",
" 'section_id': metadata['section_id'],\n",
" 'parent_rating_key': metadata['parent_rating_key'],\n",
" 'grandparent_rating_key': metadata['grandparent_rating_key'],\n",
" 'media_type': metadata['media_type'],\n",
" 'media_info': json.dumps(metadata['media_info'])\n",
" }\n",
"\n",
" try:\n",
" monitor_db.upsert(table_name='recently_added', key_dict=keys, value_dict=values)\n",
" except Exception as e:\n",
" logger.warn(u\"Tautulli DataFactory :: Unable to execute database query for set_recently_added_item: %s.\" % e)\n",
" return False\n",
"\n",
" return True\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011764705882352941,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011764705882352941,
0,
0,
0,
0,
0,
0,
0,
0,
0.010416666666666666,
0,
0.010869565217391304,
0,
0,
0,
0,
0,
0,
0,
0,
0.010416666666666666,
0.00980392156862745,
0,
0,
0.011363636363636364,
0.012195121951219513,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.009009009009009009,
0.008928571428571428,
0.009900990099009901,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011363636363636364,
0.011764705882352941,
0,
0,
0,
0,
0,
0,
0,
0,
0.008620689655172414,
0.01,
0.009433962264150943,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.008695652173913044,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012345679012345678,
0,
0,
0,
0,
0,
0,
0.012048192771084338,
0.011627906976744186,
0.01,
0.009174311926605505,
0.008928571428571428,
0,
0,
0.00909090909090909,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011904761904761904,
0,
0,
0,
0,
0,
0.010638297872340425,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011494252873563218,
0.011904761904761904,
0,
0,
0,
0,
0.008771929824561403,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011363636363636364,
0.011904761904761904,
0,
0,
0,
0,
0,
0,
0,
0.010309278350515464,
0.010752688172043012,
0.00847457627118644,
0.00980392156862745,
0.00847457627118644,
0,
0,
0.00847457627118644,
0.00980392156862745,
0.011904761904761904,
0.012345679012345678,
0,
0,
0,
0.011235955056179775,
0,
0,
0.007518796992481203,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011111111111111112,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012048192771084338,
0,
0,
0,
0,
0.010309278350515464,
0.010752688172043012,
0,
0.00847457627118644,
0.00980392156862745,
0.00847457627118644,
0,
0,
0.00847457627118644,
0.00980392156862745,
0.011904761904761904,
0.012345679012345678,
0,
0,
0.011904761904761904,
0.011235955056179775,
0,
0,
0.0072992700729927005,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011111111111111112,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011494252873563218,
0,
0,
0,
0,
0.0078125,
0.010752688172043012,
0.00847457627118644,
0.00980392156862745,
0.00847457627118644,
0,
0,
0.00847457627118644,
0.00980392156862745,
0.011904761904761904,
0.012048192771084338,
0,
0,
0,
0.011235955056179775,
0,
0,
0.007751937984496124,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011111111111111112,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.0078125,
0.010752688172043012,
0,
0.00847457627118644,
0.00980392156862745,
0.00847457627118644,
0,
0,
0.00847457627118644,
0.00980392156862745,
0.011904761904761904,
0.012048192771084338,
0,
0,
0.011904761904761904,
0.011235955056179775,
0,
0,
0.007518796992481203,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011111111111111112,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012048192771084338,
0,
0,
0,
0,
0.011764705882352941,
0.010638297872340425,
0.010752688172043012,
0.00847457627118644,
0.00980392156862745,
0.00847457627118644,
0,
0,
0.00847457627118644,
0.00980392156862745,
0.011904761904761904,
0.012345679012345678,
0,
0,
0,
0.011235955056179775,
0,
0,
0.007575757575757576,
0,
0,
0,
0.011235955056179775,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011111111111111112,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012195121951219513,
0,
0,
0,
0,
0.011764705882352941,
0.010638297872340425,
0.010752688172043012,
0,
0.00847457627118644,
0.00980392156862745,
0.00847457627118644,
0,
0,
0.00847457627118644,
0.00980392156862745,
0.011904761904761904,
0.012345679012345678,
0,
0,
0.011904761904761904,
0.011235955056179775,
0,
0,
0.007352941176470588,
0,
0,
0,
0.011235955056179775,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011111111111111112,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011627906976744186,
0,
0,
0,
0,
0.01,
0.00909090909090909,
0,
0.00847457627118644,
0.00980392156862745,
0.00847457627118644,
0.008849557522123894,
0,
0.00847457627118644,
0.009900990099009901,
0.00980392156862745,
0.011904761904761904,
0,
0,
0,
0.011235955056179775,
0,
0,
0.007575757575757576,
0,
0,
0,
0.010869565217391304,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.045454545454545456,
0,
0,
0,
0,
0,
0.009708737864077669,
0,
0,
0,
0,
0,
0,
0.00847457627118644,
0.00980392156862745,
0.00847457627118644,
0,
0,
0.00847457627118644,
0.00980392156862745,
0.011904761904761904,
0,
0,
0,
0.011235955056179775,
0,
0,
0.007352941176470588,
0,
0,
0,
0,
0.00980392156862745,
0.0078125,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.009433962264150943,
0,
0,
0,
0,
0.009615384615384616,
0.009009009009009009,
0.012195121951219513,
0.00909090909090909,
0,
0,
0.009259259259259259,
0.009174311926605505,
0,
0,
0.00847457627118644,
0.009900990099009901,
0.00980392156862745,
0.011904761904761904,
0.012195121951219513,
0.010416666666666666,
0,
0.011111111111111112,
0.010869565217391304,
0,
0,
0.008333333333333333,
0,
0,
0.007407407407407408,
0,
0,
0,
0.011235955056179775,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011111111111111112,
0,
0,
0,
0,
0,
0,
0,
0.011764705882352941,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011494252873563218,
0.011363636363636364,
0.014705882352941176,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010638297872340425,
0,
0.008064516129032258,
0.011363636363636364,
0.010752688172043012,
0,
0,
0,
0,
0,
0.011904761904761904,
0,
0,
0,
0.021052631578947368,
0,
0,
0.011904761904761904,
0,
0,
0,
0.022222222222222223,
0,
0,
0.011904761904761904,
0,
0,
0,
0.020618556701030927,
0,
0,
0.011904761904761904,
0,
0.007246376811594203,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010416666666666666,
0.010309278350515464,
0,
0,
0.008333333333333333,
0,
0,
0.008620689655172414,
0,
0,
0,
0.011494252873563218,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.009523809523809525,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011363636363636364,
0,
0,
0,
0.010416666666666666,
0,
0.00819672131147541,
0.011764705882352941,
0.0078125,
0.007633587786259542,
0,
0.008849557522123894,
0.011764705882352941,
0,
0.009708737864077669,
0.00847457627118644,
0,
0.009708737864077669,
0,
0.01,
0.00847457627118644,
0,
0,
0,
0,
0.010416666666666666,
0,
0.00819672131147541,
0.011764705882352941,
0.0078125,
0.007633587786259542,
0,
0.008849557522123894,
0.011764705882352941,
0,
0.009708737864077669,
0.00847457627118644,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.009259259259259259,
0,
0,
0.010869565217391304,
0,
0.01020408163265306,
0.011627906976744186,
0.011235955056179775,
0,
0.01020408163265306,
0.009345794392523364,
0,
0,
0,
0,
0,
0,
0,
0.010752688172043012,
0.011235955056179775,
0,
0.011494252873563218,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011235955056179775,
0,
0.010752688172043012,
0,
0.011764705882352941,
0,
0.012048192771084338,
0,
0.012345679012345678,
0.011494252873563218,
0.011764705882352941,
0,
0.012048192771084338,
0.011764705882352941,
0,
0.01098901098901099,
0.011764705882352941,
0.011764705882352941,
0.011764705882352941,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.009433962264150943,
0.009433962264150943,
0.009259259259259259,
0.009708737864077669,
0,
0.009259259259259259,
0.010638297872340425,
0.009259259259259259,
0.008130081300813009,
0.00909090909090909,
0.009708737864077669,
0.01,
0.007874015748031496,
0.007936507936507936,
0.008064516129032258,
0,
0.010101010101010102,
0.00909090909090909,
0.009174311926605505,
0,
0,
0.008620689655172414,
0.008333333333333333,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012195121951219513,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012195121951219513,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011904761904761904,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.009900990099009901,
0.1111111111111111,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010416666666666666,
0,
0,
0.1111111111111111,
0,
0.010309278350515464,
0.008928571428571428,
0,
0.009345794392523364,
0.009009009009009009,
0,
0,
0,
0.008547008547008548,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.008620689655172414,
0,
0,
0.008547008547008548,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010309278350515464,
0.00980392156862745,
0,
0,
0.008928571428571428,
0.009345794392523364,
0,
0,
0.009708737864077669,
0,
0,
0,
0,
0,
0,
0.009009009009009009,
0,
0,
0,
0.009523809523809525,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.009708737864077669,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.00909090909090909,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011764705882352941,
0.00909090909090909,
0,
0,
0,
0,
0.011764705882352941,
0.012048192771084338,
0,
0,
0.010869565217391304,
0,
0.012195121951219513,
0.010309278350515464,
0,
0,
0,
0,
0.010638297872340425,
0.00909090909090909,
0,
0,
0,
0,
0.011235955056179775,
0,
0.010309278350515464,
0,
0.011494252873563218,
0.010309278350515464,
0,
0,
0,
0.008695652173913044,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011904761904761904,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012345679012345678,
0,
0,
0,
0,
0,
0.011764705882352941,
0,
0.011904761904761904,
0,
0.00847457627118644,
0,
0,
0,
0,
0,
0,
0,
0.008620689655172414,
0,
0.00909090909090909,
0.00847457627118644,
0,
0,
0,
0,
0,
0,
0.007874015748031496,
0,
0,
0,
0,
0,
0,
0.010526315789473684,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011764705882352941,
0,
0,
0,
0,
0,
0,
0.011627906976744186,
0,
0,
0,
0,
0,
0.010526315789473684,
0,
0,
0,
0,
0.008403361344537815,
0,
0,
0.008130081300813009,
0,
0,
0,
0,
0,
0,
0,
0,
0.009345794392523364,
0,
0,
0,
0,
0.009433962264150943,
0,
0,
0,
0,
0.01020408163265306,
0,
0,
0.011363636363636364,
0,
0,
0.01,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.008695652173913044,
0,
0.011363636363636364,
0,
0.010101010101010102,
0,
0.010309278350515464,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011363636363636364,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011494252873563218,
0,
0,
0,
0,
0.010526315789473684,
0,
0.015037593984962405,
0,
0.014084507042253521,
0,
0.01020408163265306,
0,
0.016260162601626018,
0,
0.015151515151515152,
0,
0,
0,
0.01834862385321101,
0,
0.016666666666666666,
0,
0,
0,
0.012345679012345678,
0,
0,
0,
0.012195121951219513,
0,
0.010869565217391304,
0,
0,
0,
0,
0.010869565217391304,
0,
0,
0.009708737864077669,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.016129032258064516,
0,
0,
0,
0.01,
0.009523809523809525,
0,
0.009009009009009009,
0.008928571428571428,
0.009259259259259259,
0.008771929824561403,
0,
0,
0,
0.009009009009009009,
0.010416666666666666,
0,
0.008333333333333333,
0.00847457627118644,
0.008264462809917356,
0.01680672268907563,
0.008620689655172414,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.008403361344537815,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010416666666666666,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010638297872340425,
0,
0,
0,
0,
0.00819672131147541,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.008547008547008548,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010869565217391304,
0,
0,
0,
0,
0.008333333333333333,
0,
0,
0,
0,
0,
0,
0,
0.009708737864077669,
0,
0,
0.008403361344537815,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.007936507936507936,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010752688172043012,
0,
0.00819672131147541,
0,
0,
0
] | 1,801 | 0.002122 | false |
# -*- coding: utf-8 -*-
from Plugins.Extensions.MediaPortal.plugin import _
from Plugins.Extensions.MediaPortal.resources.imports import *
from Plugins.Extensions.MediaPortal.resources.putpattvlink import PutpattvLink
class putpattvGenreScreen(MPScreen):
def __init__(self, session):
self.plugin_path = mp_globals.pluginPath
self.skin_path = mp_globals.pluginPath + mp_globals.skinsPath
path = "%s/%s/defaultGenreScreenCover.xml" % (self.skin_path, config.mediaportal.skin.value)
if not fileExists(path):
path = self.skin_path + mp_globals.skinFallback + "/defaultGenreScreenCover.xml"
with open(path, "r") as f:
self.skin = f.read()
f.close()
MPScreen.__init__(self, session)
self["actions"] = ActionMap(["MP_Actions"], {
"0" : self.closeAll,
"ok" : self.keyOK,
"cancel" : self.keyCancel,
"up" : self.keyUp,
"down" : self.keyDown,
"right" : self.keyRight,
"left" : self.keyLeft
}, -1)
self['title'] = Label("putpat.tv")
self['ContentTitle'] = Label("Kanal Auswahl:")
self.keyLocked = True
self.suchString = ''
self.genreliste = []
self.ml = MenuList([], enableWrapAround=True, content=eListboxPythonMultiContent)
self['liste'] = self.ml
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
# http://www.putpat.tv/ws.xml?client=miniplayer&method=Channel.all
self.genreliste.append(("--- Search ---", "callSuchen"))
self.genreliste.append(("Charts", "2"))
self.genreliste.append(("Heimat", "3"))
self.genreliste.append(("Retro", "4"))
self.genreliste.append(("2Rock", "5"))
self.genreliste.append(("Vibes", "6"))
self.genreliste.append(("Hooray!", "7"))
self.genreliste.append(("INTRO TV", "9"))
self.genreliste.append(("JAZZthing.TV", "11"))
self.genreliste.append(("Festival Guide", "12"))
self.genreliste.append(("studiVZ", "15"))
self.genreliste.append(("meinVZ", "16"))
self.genreliste.append(("MELT Festival", "29"))
self.genreliste.append(("Splash! Festival", "30"))
self.genreliste.append(("Berlin Festival", "31"))
self.genreliste.append(("Flux TV", "34"))
self.genreliste.append(("Introducing", "36"))
self.genreliste.append(("Pop10", "39"))
self.genreliste.append(("Rock Hard", "41"))
self.genreliste.append(("Sneakerfreaker", "43"))
self.genreliste.append(("Paradise TV", "45"))
self.genreliste.append(("PUTPAT one", "46"))
self.genreliste.append(("detektor.fm", "47"))
self.genreliste.append(("Party", "48"))
self.genreliste.append(("HD-Kanal", "49"))
self.genreliste.append(("Chiemsee Festival", "50"))
self.genreliste.append(("Hurricane/Southside Festival", "51"))
self.genreliste.append(("Highfield Festival", "52"))
self.genreliste.append(("M'era Luna", "53"))
self.genreliste.append(("FazeMag", "54"))
self.genreliste.append(("Seat Cupra Camp", "55"))
self.ml.setList(map(self._defaultlistcenter, self.genreliste))
self.ml.moveToIndex(0)
self.keyLocked = False
self.showInfos()
def showInfos(self):
Image = self['liste'].getCurrent()[0][1]
Image = 'http://files.putpat.tv/artwork/channelgraphics/%s/channelteaser_500.png' % Image
CoverHelper(self['coverArt']).getCover(Image)
def keyOK(self):
if self.keyLocked:
return
streamGenreName = self['liste'].getCurrent()[0][0]
if streamGenreName == "--- Search ---":
self.suchen()
else:
streamGenreLink = self['liste'].getCurrent()[0][1]
self.session.open(putpattvFilmScreen, streamGenreLink, streamGenreName)
def SuchenCallback(self, callback = None, entry = None):
if callback is not None and len(callback):
self.suchString = callback.replace(' ', '%20')
streamGenreLink = '%s' % (self.suchString)
selfGenreName = "--- Search ---"
self.session.open(putpattvFilmScreen, streamGenreLink, selfGenreName)
class putpattvFilmScreen(MPScreen, ThumbsHelper):
def __init__(self, session, CatLink, catName):
self.CatLink = CatLink
self.catName = catName
self.plugin_path = mp_globals.pluginPath
self.skin_path = mp_globals.pluginPath + mp_globals.skinsPath
path = "%s/%s/defaultListWideScreen.xml" % (self.skin_path, config.mediaportal.skin.value)
if not fileExists(path):
path = self.skin_path + mp_globals.skinFallback + "/defaultListWideScreen.xml"
with open(path, "r") as f:
self.skin = f.read()
f.close()
MPScreen.__init__(self, session)
ThumbsHelper.__init__(self)
self["actions"] = ActionMap(["MP_Actions"], {
"0" : self.closeAll,
"ok" : self.keyOK,
"cancel" : self.keyCancel,
"5" : self.keyShowThumb,
"up" : self.keyUp,
"down" : self.keyDown,
"right" : self.keyRight,
"left" : self.keyLeft
}, -1)
self['title'] = Label("Titel Auswahl")
self['ContentTitle'] = Label("Genre: %s" % self.catName)
self['F2'] = Label(_("Page"))
self.keyLocked = True
self.page = 1
self.filmliste = []
self.ml = MenuList([], enableWrapAround=True, content=eListboxPythonMultiContent)
self['liste'] = self.ml
self.onLayoutFinish.append(self.loadPage)
def loadPage(self):
self.keyLocked = True
self['name'].setText(self.catName)
self.filmliste = []
if self.catName == '--- Search ---':
url = "http://www.putpat.tv/ws.xml?limit=100&client=putpatplayer&partnerId=1&searchterm=%s&method=Asset.quickbarSearch" % (self.CatLink)
else:
url = "http://www.putpat.tv/ws.xml?method=Channel.clips&partnerId=1&client=putpatplayer&maxClips=500&channelId=%s&streamingId=tvrl&streamingMethod=http" % (self.CatLink)
getPage(url, headers={'Content-Type':'application/x-www-form-urlencoded'}).addCallback(self.loadData).addErrback(self.dataError)
def loadData(self, data):
if self.catName == '--- Search ---':
Search = re.findall('<video-file-id\stype="integer">(.*?)</video-file-id>.*?<token>(.*?)</token>.*?<description>(.*?)</description>', data, re.S)
if Search:
for (Image, Token, Title) in Search:
if len(Image) == 4:
Image = '0' + Image
Image = 'http://files.putpat.tv/artwork/posterframes/00%s/00%s/v00%s_posterframe_putpat_small.jpg' % (Image[0:3], Image, Image)
self.filmliste.append((decodeHtml(Title), None, Token, Image))
else:
Movies = re.findall('<clip>.*?<medium>(.*?)</medium>.*?<title>(.*?)</title>.*?<display-artist-title>(.*?)</display-artist-title>.*?<video-file-id\stype="integer">(.*?)</video-file-id>', data, re.S)
if Movies:
for (Url, Title, Artist, Image) in Movies:
Title = Artist + ' - ' + Title
Url = Url.replace('&','&')
if len(Image) == 4:
Image = '0' + Image
Image = 'http://files.putpat.tv/artwork/posterframes/00%s/00%s/v00%s_posterframe_putpat_small.jpg' % (Image[0:3], Image, Image)
if not (re.search('pop10_trenner.*?', Title, re.S) or re.search('Pop10 Trenner', Title, re.S) or re.search('pop10_pspot', Title, re.S) or re.search('pop10_opn_neu', Title, re.S) or re.search('PutPat Top Ten', Title, re.S)):
self.filmliste.append((decodeHtml(Title), Url, None, Image))
if len(self.filmliste) == 0:
self.filmliste.append((_("No songs found!"),'','',''))
self.ml.setList(map(self._defaultlistleft, self.filmliste))
self.ml.moveToIndex(0)
self.keyLocked = False
self.th_ThumbsQuery(self.filmliste, 0, 1, 3, None, None, 1, 1, mode=1)
self.showInfos()
def showInfos(self):
Image = self['liste'].getCurrent()[0][3]
CoverHelper(self['coverArt']).getCover(Image)
def keyOK(self):
if self.keyLocked:
return
self.session.open(
PutpatTvPlayer,
self.filmliste,
playIdx = self['liste'].getSelectedIndex(),
playAll = True,
listTitle = self.catName
)
class PutpatTvPlayer(SimplePlayer):
def __init__(self, session, playList, playIdx=0, playAll=False, listTitle=None):
print "PutpatTvPlayer:"
SimplePlayer.__init__(self, session, playList, playIdx=playIdx, playAll=playAll, listTitle=listTitle, ltype='putpattv')
def getVideo(self):
url = self.playList[self.playIdx][1]
Title = self.playList[self.playIdx][0]
token = self.playList[self.playIdx][2]
Image = self.playList[self.playIdx][3]
PutpattvLink(self.session).getLink(self.playStream, self.dataError, Title, url, token, Image) | [
"# -*- coding: utf-8 -*-\n",
"from Plugins.Extensions.MediaPortal.plugin import _\n",
"from Plugins.Extensions.MediaPortal.resources.imports import *\n",
"from Plugins.Extensions.MediaPortal.resources.putpattvlink import PutpattvLink\n",
"\n",
"class putpattvGenreScreen(MPScreen):\n",
"\n",
"\tdef __init__(self, session):\n",
"\t\tself.plugin_path = mp_globals.pluginPath\n",
"\t\tself.skin_path = mp_globals.pluginPath + mp_globals.skinsPath\n",
"\t\tpath = \"%s/%s/defaultGenreScreenCover.xml\" % (self.skin_path, config.mediaportal.skin.value)\n",
"\t\tif not fileExists(path):\n",
"\t\t\tpath = self.skin_path + mp_globals.skinFallback + \"/defaultGenreScreenCover.xml\"\n",
"\t\twith open(path, \"r\") as f:\n",
"\t\t\tself.skin = f.read()\n",
"\t\t\tf.close()\n",
"\t\tMPScreen.__init__(self, session)\n",
"\n",
"\t\tself[\"actions\"] = ActionMap([\"MP_Actions\"], {\n",
"\t\t\t\"0\"\t\t: self.closeAll,\n",
"\t\t\t\"ok\" : self.keyOK,\n",
"\t\t\t\"cancel\" : self.keyCancel,\n",
"\t\t\t\"up\" : self.keyUp,\n",
"\t\t\t\"down\" : self.keyDown,\n",
"\t\t\t\"right\" : self.keyRight,\n",
"\t\t\t\"left\" : self.keyLeft\n",
"\t\t}, -1)\n",
"\n",
"\t\tself['title'] = Label(\"putpat.tv\")\n",
"\t\tself['ContentTitle'] = Label(\"Kanal Auswahl:\")\n",
"\n",
"\t\tself.keyLocked = True\n",
"\t\tself.suchString = ''\n",
"\n",
"\t\tself.genreliste = []\n",
"\t\tself.ml = MenuList([], enableWrapAround=True, content=eListboxPythonMultiContent)\n",
"\t\tself['liste'] = self.ml\n",
"\n",
"\t\tself.onLayoutFinish.append(self.layoutFinished)\n",
"\n",
"\tdef layoutFinished(self):\n",
"\t\t# http://www.putpat.tv/ws.xml?client=miniplayer&method=Channel.all\n",
"\t\tself.genreliste.append((\"--- Search ---\", \"callSuchen\"))\n",
"\t\tself.genreliste.append((\"Charts\", \"2\"))\n",
"\t\tself.genreliste.append((\"Heimat\", \"3\"))\n",
"\t\tself.genreliste.append((\"Retro\", \"4\"))\n",
"\t\tself.genreliste.append((\"2Rock\", \"5\"))\n",
"\t\tself.genreliste.append((\"Vibes\", \"6\"))\n",
"\t\tself.genreliste.append((\"Hooray!\", \"7\"))\n",
"\t\tself.genreliste.append((\"INTRO TV\", \"9\"))\n",
"\t\tself.genreliste.append((\"JAZZthing.TV\", \"11\"))\n",
"\t\tself.genreliste.append((\"Festival Guide\", \"12\"))\n",
"\t\tself.genreliste.append((\"studiVZ\", \"15\"))\n",
"\t\tself.genreliste.append((\"meinVZ\", \"16\"))\n",
"\t\tself.genreliste.append((\"MELT Festival\", \"29\"))\n",
"\t\tself.genreliste.append((\"Splash! Festival\", \"30\"))\n",
"\t\tself.genreliste.append((\"Berlin Festival\", \"31\"))\n",
"\t\tself.genreliste.append((\"Flux TV\", \"34\"))\n",
"\t\tself.genreliste.append((\"Introducing\", \"36\"))\n",
"\t\tself.genreliste.append((\"Pop10\", \"39\"))\n",
"\t\tself.genreliste.append((\"Rock Hard\", \"41\"))\n",
"\t\tself.genreliste.append((\"Sneakerfreaker\", \"43\"))\n",
"\t\tself.genreliste.append((\"Paradise TV\", \"45\"))\n",
"\t\tself.genreliste.append((\"PUTPAT one\", \"46\"))\n",
"\t\tself.genreliste.append((\"detektor.fm\", \"47\"))\n",
"\t\tself.genreliste.append((\"Party\", \"48\"))\n",
"\t\tself.genreliste.append((\"HD-Kanal\", \"49\"))\n",
"\t\tself.genreliste.append((\"Chiemsee Festival\", \"50\"))\n",
"\t\tself.genreliste.append((\"Hurricane/Southside Festival\", \"51\"))\n",
"\t\tself.genreliste.append((\"Highfield Festival\", \"52\"))\n",
"\t\tself.genreliste.append((\"M'era Luna\", \"53\"))\n",
"\t\tself.genreliste.append((\"FazeMag\", \"54\"))\n",
"\t\tself.genreliste.append((\"Seat Cupra Camp\", \"55\"))\n",
"\t\tself.ml.setList(map(self._defaultlistcenter, self.genreliste))\n",
"\t\tself.ml.moveToIndex(0)\n",
"\t\tself.keyLocked = False\n",
"\t\tself.showInfos()\n",
"\n",
"\tdef showInfos(self):\n",
"\t\tImage = self['liste'].getCurrent()[0][1]\n",
"\t\tImage = 'http://files.putpat.tv/artwork/channelgraphics/%s/channelteaser_500.png' % Image\n",
"\t\tCoverHelper(self['coverArt']).getCover(Image)\n",
"\n",
"\tdef keyOK(self):\n",
"\t\tif self.keyLocked:\n",
"\t\t\treturn\n",
"\t\tstreamGenreName = self['liste'].getCurrent()[0][0]\n",
"\t\tif streamGenreName == \"--- Search ---\":\n",
"\t\t\tself.suchen()\n",
"\t\telse:\n",
"\t\t\tstreamGenreLink = self['liste'].getCurrent()[0][1]\n",
"\t\t\tself.session.open(putpattvFilmScreen, streamGenreLink, streamGenreName)\n",
"\n",
"\tdef SuchenCallback(self, callback = None, entry = None):\n",
"\t\tif callback is not None and len(callback):\n",
"\t\t\tself.suchString = callback.replace(' ', '%20')\n",
"\t\t\tstreamGenreLink = '%s' % (self.suchString)\n",
"\t\t\tselfGenreName = \"--- Search ---\"\n",
"\t\t\tself.session.open(putpattvFilmScreen, streamGenreLink, selfGenreName)\n",
"\n",
"class putpattvFilmScreen(MPScreen, ThumbsHelper):\n",
"\n",
"\tdef __init__(self, session, CatLink, catName):\n",
"\t\tself.CatLink = CatLink\n",
"\t\tself.catName = catName\n",
"\t\tself.plugin_path = mp_globals.pluginPath\n",
"\t\tself.skin_path = mp_globals.pluginPath + mp_globals.skinsPath\n",
"\t\tpath = \"%s/%s/defaultListWideScreen.xml\" % (self.skin_path, config.mediaportal.skin.value)\n",
"\t\tif not fileExists(path):\n",
"\t\t\tpath = self.skin_path + mp_globals.skinFallback + \"/defaultListWideScreen.xml\"\n",
"\t\twith open(path, \"r\") as f:\n",
"\t\t\tself.skin = f.read()\n",
"\t\t\tf.close()\n",
"\t\tMPScreen.__init__(self, session)\n",
"\t\tThumbsHelper.__init__(self)\n",
"\n",
"\t\tself[\"actions\"] = ActionMap([\"MP_Actions\"], {\n",
"\t\t\t\"0\"\t\t: self.closeAll,\n",
"\t\t\t\"ok\" : self.keyOK,\n",
"\t\t\t\"cancel\" : self.keyCancel,\n",
"\t\t\t\"5\" : self.keyShowThumb,\n",
"\t\t\t\"up\" : self.keyUp,\n",
"\t\t\t\"down\" : self.keyDown,\n",
"\t\t\t\"right\" : self.keyRight,\n",
"\t\t\t\"left\" : self.keyLeft\n",
"\t\t}, -1)\n",
"\n",
"\t\tself['title'] = Label(\"Titel Auswahl\")\n",
"\t\tself['ContentTitle'] = Label(\"Genre: %s\" % self.catName)\n",
"\t\tself['F2'] = Label(_(\"Page\"))\n",
"\n",
"\t\tself.keyLocked = True\n",
"\t\tself.page = 1\n",
"\n",
"\t\tself.filmliste = []\n",
"\t\tself.ml = MenuList([], enableWrapAround=True, content=eListboxPythonMultiContent)\n",
"\t\tself['liste'] = self.ml\n",
"\n",
"\t\tself.onLayoutFinish.append(self.loadPage)\n",
"\n",
"\tdef loadPage(self):\n",
"\t\tself.keyLocked = True\n",
"\t\tself['name'].setText(self.catName)\n",
"\t\tself.filmliste = []\n",
"\t\tif self.catName == '--- Search ---':\n",
"\t\t\turl = \"http://www.putpat.tv/ws.xml?limit=100&client=putpatplayer&partnerId=1&searchterm=%s&method=Asset.quickbarSearch\" % (self.CatLink)\n",
"\t\telse:\n",
"\t\t\turl = \"http://www.putpat.tv/ws.xml?method=Channel.clips&partnerId=1&client=putpatplayer&maxClips=500&channelId=%s&streamingId=tvrl&streamingMethod=http\" % (self.CatLink)\n",
"\t\tgetPage(url, headers={'Content-Type':'application/x-www-form-urlencoded'}).addCallback(self.loadData).addErrback(self.dataError)\n",
"\n",
"\tdef loadData(self, data):\n",
"\t\tif self.catName == '--- Search ---':\n",
"\t\t\tSearch = re.findall('<video-file-id\\stype=\"integer\">(.*?)</video-file-id>.*?<token>(.*?)</token>.*?<description>(.*?)</description>', data, re.S)\n",
"\t\t\tif Search:\n",
"\t\t\t\tfor (Image, Token, Title) in Search:\n",
"\t\t\t\t\tif len(Image) == 4:\n",
"\t\t\t\t\t\tImage = '0' + Image\n",
"\t\t\t\t\tImage = 'http://files.putpat.tv/artwork/posterframes/00%s/00%s/v00%s_posterframe_putpat_small.jpg' % (Image[0:3], Image, Image)\n",
"\t\t\t\t\tself.filmliste.append((decodeHtml(Title), None, Token, Image))\n",
"\t\telse:\n",
"\t\t\tMovies = re.findall('<clip>.*?<medium>(.*?)</medium>.*?<title>(.*?)</title>.*?<display-artist-title>(.*?)</display-artist-title>.*?<video-file-id\\stype=\"integer\">(.*?)</video-file-id>', data, re.S)\n",
"\t\t\tif Movies:\n",
"\t\t\t\tfor (Url, Title, Artist, Image) in Movies:\n",
"\t\t\t\t\tTitle = Artist + ' - ' + Title\n",
"\t\t\t\t\tUrl = Url.replace('&','&')\n",
"\t\t\t\t\tif len(Image) == 4:\n",
"\t\t\t\t\t\tImage = '0' + Image\n",
"\t\t\t\t\tImage = 'http://files.putpat.tv/artwork/posterframes/00%s/00%s/v00%s_posterframe_putpat_small.jpg' % (Image[0:3], Image, Image)\n",
"\t\t\t\t\tif not (re.search('pop10_trenner.*?', Title, re.S) or re.search('Pop10 Trenner', Title, re.S) or re.search('pop10_pspot', Title, re.S) or re.search('pop10_opn_neu', Title, re.S) or re.search('PutPat Top Ten', Title, re.S)):\n",
"\t\t\t\t\t\tself.filmliste.append((decodeHtml(Title), Url, None, Image))\n",
"\t\tif len(self.filmliste) == 0:\n",
"\t\t\tself.filmliste.append((_(\"No songs found!\"),'','',''))\n",
"\t\tself.ml.setList(map(self._defaultlistleft, self.filmliste))\n",
"\t\tself.ml.moveToIndex(0)\n",
"\t\tself.keyLocked = False\n",
"\t\tself.th_ThumbsQuery(self.filmliste, 0, 1, 3, None, None, 1, 1, mode=1)\n",
"\t\tself.showInfos()\n",
"\n",
"\tdef showInfos(self):\n",
"\t\tImage = self['liste'].getCurrent()[0][3]\n",
"\t\tCoverHelper(self['coverArt']).getCover(Image)\n",
"\n",
"\tdef keyOK(self):\n",
"\t\tif self.keyLocked:\n",
"\t\t\treturn\n",
"\t\tself.session.open(\n",
"\t\t\tPutpatTvPlayer,\n",
"\t\t\tself.filmliste,\n",
"\t\t\tplayIdx = self['liste'].getSelectedIndex(),\n",
"\t\t\tplayAll = True,\n",
"\t\t\tlistTitle = self.catName\n",
"\t\t\t)\n",
"\n",
"class PutpatTvPlayer(SimplePlayer):\n",
"\n",
"\tdef __init__(self, session, playList, playIdx=0, playAll=False, listTitle=None):\n",
"\t\tprint \"PutpatTvPlayer:\"\n",
"\t\tSimplePlayer.__init__(self, session, playList, playIdx=playIdx, playAll=playAll, listTitle=listTitle, ltype='putpattv')\n",
"\n",
"\tdef getVideo(self):\n",
"\t\turl = self.playList[self.playIdx][1]\n",
"\t\tTitle = self.playList[self.playIdx][0]\n",
"\t\ttoken = self.playList[self.playIdx][2]\n",
"\t\tImage = self.playList[self.playIdx][3]\n",
"\t\tPutpattvLink(self.session).getLink(self.playStream, self.dataError, Title, url, token, Image)"
] | [
0,
0,
0,
0,
0,
0.02702702702702703,
0,
0.03333333333333333,
0.023255813953488372,
0.015625,
0.021052631578947368,
0.037037037037037035,
0.023809523809523808,
0.034482758620689655,
0.041666666666666664,
0.07692307692307693,
0.02857142857142857,
0,
0.020833333333333332,
0.08,
0.09090909090909091,
0.06666666666666667,
0.09090909090909091,
0.07692307692307693,
0.07142857142857142,
0.08,
0.1111111111111111,
0,
0.02702702702702703,
0.02040816326530612,
0,
0.041666666666666664,
0.043478260869565216,
0,
0.043478260869565216,
0.023809523809523808,
0.038461538461538464,
0,
0.02,
0,
0.037037037037037035,
0.014492753623188406,
0.01694915254237288,
0.023809523809523808,
0.023809523809523808,
0.024390243902439025,
0.024390243902439025,
0.024390243902439025,
0.023255813953488372,
0.022727272727272728,
0.02040816326530612,
0.0196078431372549,
0.022727272727272728,
0.023255813953488372,
0.02,
0.018867924528301886,
0.019230769230769232,
0.022727272727272728,
0.020833333333333332,
0.023809523809523808,
0.021739130434782608,
0.0196078431372549,
0.020833333333333332,
0.02127659574468085,
0.020833333333333332,
0.023809523809523808,
0.022222222222222223,
0.018518518518518517,
0.015384615384615385,
0.01818181818181818,
0.02127659574468085,
0.022727272727272728,
0.019230769230769232,
0.015384615384615385,
0.04,
0.04,
0.05263157894736842,
0,
0.045454545454545456,
0.023255813953488372,
0.021739130434782608,
0.020833333333333332,
0,
0.05555555555555555,
0.047619047619047616,
0.1,
0.018867924528301886,
0.023809523809523808,
0.058823529411764705,
0.125,
0.018518518518518517,
0.013333333333333334,
0,
0.08620689655172414,
0.022222222222222223,
0.02,
0.021739130434782608,
0.027777777777777776,
0.0136986301369863,
0,
0.02,
0,
0.020833333333333332,
0.04,
0.04,
0.023255813953488372,
0.015625,
0.021505376344086023,
0.037037037037037035,
0.024390243902439025,
0.034482758620689655,
0.041666666666666664,
0.07692307692307693,
0.02857142857142857,
0.03333333333333333,
0,
0.020833333333333332,
0.08,
0.09090909090909091,
0.06666666666666667,
0.07142857142857142,
0.09090909090909091,
0.07692307692307693,
0.07142857142857142,
0.08,
0.1111111111111111,
0,
0.024390243902439025,
0.01694915254237288,
0.03125,
0,
0.041666666666666664,
0.0625,
0,
0.045454545454545456,
0.023809523809523808,
0.038461538461538464,
0,
0.022727272727272728,
0,
0.047619047619047616,
0.041666666666666664,
0.02702702702702703,
0.045454545454545456,
0.02564102564102564,
0.014285714285714285,
0.125,
0.011560693641618497,
0.022900763358778626,
0,
0.037037037037037035,
0.02564102564102564,
0.020134228187919462,
0.07142857142857142,
0.024390243902439025,
0.04,
0.038461538461538464,
0.015037593984962405,
0.014705882352941176,
0.125,
0.014925373134328358,
0.07142857142857142,
0.02127659574468085,
0.027777777777777776,
0.05555555555555555,
0.04,
0.038461538461538464,
0.015037593984962405,
0.008733624454148471,
0.014925373134328358,
0.03225806451612903,
0.06896551724137931,
0.016129032258064516,
0.04,
0.04,
0.0136986301369863,
0.05263157894736842,
0,
0.045454545454545456,
0.023255813953488372,
0.020833333333333332,
0,
0.05555555555555555,
0.047619047619047616,
0.1,
0.047619047619047616,
0.05263157894736842,
0.05263157894736842,
0.06382978723404255,
0.15789473684210525,
0.10714285714285714,
0.2,
0,
0.027777777777777776,
0,
0.024390243902439025,
0.038461538461538464,
0.01639344262295082,
0,
0.047619047619047616,
0.02564102564102564,
0.024390243902439025,
0.024390243902439025,
0.024390243902439025,
0.031578947368421054
] | 205 | 0.034459 | false |
# coding=utf-8
# EPM Plugins
import Plugins as ep
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
@ep.DatasetFunctionPlugin('Visualizar Histograma', 1)#Decorator criado pela Elipse para que o Plugin seja executado pelo EPM Studio.
def histograma():
# Verifica se existe apenas uma pena selecionada no Dataset Analysis.
if len(ep.EpmDatasetPens.SelectedPens) != 1:
ep.showMsgBox("EPM - Histograma",
"Execute a consulta do Dataset Analysis \n e selecione uma pena antes de aplicar a funcao.",
"Warning")
return 0
# Passa para a variavel 'epm_tag' a variavel selecionada.
epm_tag = ep.EpmDatasetPens.SelectedPens[0].Values
x = epm_tag["Value"]
mu = epm_tag["Value"].mean() # Mediana da amostra
sigma = epm_tag["Value"].std() # Desvio padrao da amostra
# Quantidade de valores que desejamos agrupar. (barras verdes)
num_bins = 20
# Gera o histograma
n, bins, ignore = plt.hist(x, num_bins, normed=1, facecolor='green', alpha=0.5)
# Adiciona uma linha indicando a curva de normalidade (vermelha tracejada)
y = mlab.normpdf(bins, mu, sigma)
plt.plot(bins, y, 'r--')
plt.title("Histograma \n $\mu=" + "{:.3f}".format(mu) + "$, $\sigma=" + "{:.3f}".format(sigma) + "$")
# Mostra na tela o gráfico
plt.subplots_adjust(left=0.15)
plt.show()
| [
"# coding=utf-8\n",
"\n",
"# EPM Plugins\n",
"import Plugins as ep\n",
"\n",
"import matplotlib.pyplot as plt\n",
"import matplotlib.mlab as mlab\n",
"\n",
"\n",
"@ep.DatasetFunctionPlugin('Visualizar Histograma', 1)#Decorator criado pela Elipse para que o Plugin seja executado pelo EPM Studio.\n",
"def histograma():\n",
" # Verifica se existe apenas uma pena selecionada no Dataset Analysis.\n",
" if len(ep.EpmDatasetPens.SelectedPens) != 1:\n",
" ep.showMsgBox(\"EPM - Histograma\",\n",
" \"Execute a consulta do Dataset Analysis \\n e selecione uma pena antes de aplicar a funcao.\",\n",
" \"Warning\")\n",
" return 0\n",
"\n",
" # Passa para a variavel 'epm_tag' a variavel selecionada.\n",
" epm_tag = ep.EpmDatasetPens.SelectedPens[0].Values\n",
" x = epm_tag[\"Value\"]\n",
" mu = epm_tag[\"Value\"].mean() # Mediana da amostra\n",
" sigma = epm_tag[\"Value\"].std() # Desvio padrao da amostra\n",
"\n",
" # Quantidade de valores que desejamos agrupar. (barras verdes)\n",
" num_bins = 20\n",
" # Gera o histograma\n",
" n, bins, ignore = plt.hist(x, num_bins, normed=1, facecolor='green', alpha=0.5)\n",
" # Adiciona uma linha indicando a curva de normalidade (vermelha tracejada)\n",
" y = mlab.normpdf(bins, mu, sigma)\n",
" plt.plot(bins, y, 'r--')\n",
" plt.title(\"Histograma \\n $\\mu=\" + \"{:.3f}\".format(mu) + \"$, $\\sigma=\" + \"{:.3f}\".format(sigma) + \"$\")\n",
"\n",
" # Mostra na tela o gráfico\n",
" plt.subplots_adjust(left=0.15)\n",
" plt.show()\n",
"\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.022556390977443608,
0,
0,
0,
0,
0.008695652173913044,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011904761904761904,
0,
0,
0,
0.028037383177570093,
0,
0,
0,
0,
1
] | 37 | 0.028951 | false |
#!/usr/bin/env python3
# -*- coding, utf-8 -*-
# TODO
# TODO
# Copyright © 2014 Василий Горохов-Апельсинов
# This file is part of code for my bachelor's thesis.
#
# Code for my bachelor's thesis is free software: you can redistribute
# it and/or modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# Code for my bachelor's thesis is distributed in the hope that it will
# be useful, but WITHOUT ANY WARRANTY; without even the implied warranty
# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with code for my bachelor's thesis. If not, see
# <http://www.gnu.org/licenses/>.
# Requirements: Python 3 (works with 3.3), MatPlotLib, Pandas
# Data
tableName = 'MICEX_SBER';
# Code
outFileName = tableName + '.count_of_transactions.CSV'
from common.connect import connect
conn = connect()
import matplotlib.pyplot as plt
import pandas as pd
print('Getting data...')
df = pd.read_sql('''
SELECT
day,
hour,
COUNT(id) AS "'''+tableName+'''"
FROM
(
SELECT
day::date,
hour
FROM
generate_series((SELECT min(date_trunc('day', moment)) FROM "MICEX_SBER"), (SELECT max(date_trunc('day', moment)) FROM "MICEX_SBER"), '1 day'::interval) AS days(day),
generate_series(0, 23) AS hours(hour)
) AS intervals
LEFT JOIN "'''+tableName+'''" AS data
ON
intervals.day = date_trunc('day', data.moment)
AND intervals.hour = extract(HOUR FROM data.moment)
GROUP BY
day, hour
ORDER BY
day, hour
''', conn)
df = df.pivot(index = 'hour', columns = 'day', values = tableName)
print('Writing output to `{:s}`...'.format(outFileName))
df.to_csv(outFileName, sep = ';')
print('Done.')
del conn
| [
"#!/usr/bin/env python3\n",
"# -*- coding, utf-8 -*-\n",
"\n",
"# TODO\n",
"# TODO\n",
"# Copyright © 2014 Василий Горохов-Апельсинов\n",
"\n",
"# This file is part of code for my bachelor's thesis.\n",
"#\n",
"# Code for my bachelor's thesis is free software: you can redistribute\n",
"# it and/or modify it under the terms of the GNU General Public License\n",
"# as published by the Free Software Foundation, either version 3 of the\n",
"# License, or (at your option) any later version.\n",
"#\n",
"# Code for my bachelor's thesis is distributed in the hope that it will\n",
"# be useful, but WITHOUT ANY WARRANTY; without even the implied warranty\n",
"# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n",
"# GNU General Public License for more details.\n",
"#\n",
"# You should have received a copy of the GNU General Public License\n",
"# along with code for my bachelor's thesis. If not, see\n",
"# <http://www.gnu.org/licenses/>.\n",
"\n",
"# Requirements: Python 3 (works with 3.3), MatPlotLib, Pandas\n",
"\n",
"\n",
"# Data\n",
"tableName = 'MICEX_SBER';\n",
"\n",
"\n",
"# Code\n",
"outFileName = tableName + '.count_of_transactions.CSV'\n",
"from common.connect import connect\n",
"conn = connect()\n",
"\n",
"import matplotlib.pyplot as plt\n",
"import pandas as pd\n",
"\n",
"print('Getting data...')\n",
"df = pd.read_sql('''\n",
"\tSELECT\n",
"\t\tday,\n",
"\t\thour,\n",
"\t\tCOUNT(id) AS \"'''+tableName+'''\"\n",
"\tFROM\n",
"\t\t(\n",
"\t\t\tSELECT\n",
"\t\t\t\tday::date,\n",
"\t\t\t\thour\n",
"\t\t\tFROM\n",
"\t\t\t\tgenerate_series((SELECT min(date_trunc('day', moment)) FROM \"MICEX_SBER\"), (SELECT max(date_trunc('day', moment)) FROM \"MICEX_SBER\"), '1 day'::interval) AS days(day),\n",
"\t\t\t\tgenerate_series(0, 23) AS hours(hour)\n",
"\t\t) AS intervals\n",
"\t\tLEFT JOIN \"'''+tableName+'''\" AS data\n",
"\t\tON\n",
"\t\tintervals.day = date_trunc('day', data.moment)\n",
"\t\tAND intervals.hour = extract(HOUR FROM data.moment)\n",
"\tGROUP BY\n",
"\t\tday, hour\n",
"\tORDER BY\n",
"\t\tday, hour\n",
"''', conn)\n",
"df = df.pivot(index = 'hour', columns = 'day', values = tableName)\n",
"\n",
"print('Writing output to `{:s}`...'.format(outFileName))\n",
"df.to_csv(outFileName, sep = ';')\n",
"\n",
"print('Done.')\n",
"del conn\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.038461538461538464,
0,
0,
0,
0,
0.02857142857142857,
0,
0,
0.03125,
0.05,
0,
0,
0,
0.125,
0.14285714285714285,
0.125,
0.02857142857142857,
0.16666666666666666,
0.25,
0.1,
0.06666666666666667,
0.1111111111111111,
0.125,
0.011695906432748537,
0.023809523809523808,
0.058823529411764705,
0.025,
0.2,
0.02040816326530612,
0.018518518518518517,
0.1,
0.08333333333333333,
0.1,
0.08333333333333333,
0,
0.08955223880597014,
0,
0,
0.058823529411764705,
0,
0,
0
] | 69 | 0.032789 | false |
# ############################################################################
#
# Copyright (c) Microsoft Corporation.
#
# This source code is subject to terms and conditions of the Apache License, Version 2.0. A
# copy of the license can be found in the License.html file at the root of this distribution. If
# you cannot locate the Apache License, Version 2.0, please send an email to
# vspython@microsoft.com. By using this source code in any fashion, you are agreeing to be bound
# by the terms of the Apache License, Version 2.0.
#
# You must not remove this notice, or any other, from this software.
#
# ###########################################################################
def main():
import os
import sys
import unittest
from optparse import OptionParser
parser = OptionParser(prog = 'visualstudio_py_testlauncher', usage = 'Usage: %prog [<option>] <test names>... ')
parser.add_option('-s', '--secret', metavar='<secret>', help='restrict server to only allow clients that specify <secret> when connecting')
parser.add_option('-p', '--port', type='int', metavar='<port>', help='listen for debugger connections on <port>')
parser.add_option('-x', '--mixed-mode', action='store_true', help='wait for mixed-mode debugger to attach')
parser.add_option('-t', '--test', type='str', dest='tests', action='append', help='specifies a test to run')
parser.add_option('-m', '--module', type='str', help='name of the module to import the tests from')
(opts, _) = parser.parse_args()
sys.path[0] = os.getcwd()
if opts.secret and opts.port:
from ptvsd.visualstudio_py_debugger import DONT_DEBUG, DEBUG_ENTRYPOINTS, get_code
from ptvsd.attach_server import DEFAULT_PORT, enable_attach, wait_for_attach
DONT_DEBUG.append(os.path.normcase(__file__))
DEBUG_ENTRYPOINTS.add(get_code(main))
enable_attach(opts.secret, ('127.0.0.1', getattr(opts, 'port', DEFAULT_PORT)), redirect_output = True)
wait_for_attach()
elif opts.mixed_mode:
# For mixed-mode attach, there's no ptvsd and hence no wait_for_attach(),
# so we have to use Win32 API in a loop to do the same thing.
from time import sleep
from ctypes import windll, c_char
while True:
if windll.kernel32.IsDebuggerPresent() != 0:
break
sleep(0.1)
try:
debugger_helper = windll['Microsoft.PythonTools.Debugger.Helper.x86.dll']
except WinError:
debugger_helper = windll['Microsoft.PythonTools.Debugger.Helper.x64.dll']
isTracing = c_char.in_dll(debugger_helper, "isTracing")
while True:
if isTracing.value != 0:
break
sleep(0.1)
__import__(opts.module)
module = sys.modules[opts.module]
test = unittest.defaultTestLoader.loadTestsFromNames(opts.tests, module)
runner = unittest.TextTestRunner(verbosity=0)
result = runner.run(test)
sys.exit(not result.wasSuccessful())
if __name__ == '__main__':
main()
| [
" # ############################################################################\n",
" #\n",
" # Copyright (c) Microsoft Corporation. \n",
" #\n",
" # This source code is subject to terms and conditions of the Apache License, Version 2.0. A \n",
" # copy of the license can be found in the License.html file at the root of this distribution. If \n",
" # you cannot locate the Apache License, Version 2.0, please send an email to \n",
" # vspython@microsoft.com. By using this source code in any fashion, you are agreeing to be bound \n",
" # by the terms of the Apache License, Version 2.0.\n",
" #\n",
" # You must not remove this notice, or any other, from this software.\n",
" #\n",
" # ###########################################################################\n",
"\n",
"def main():\n",
" import os\n",
" import sys\n",
" import unittest\n",
" from optparse import OptionParser\n",
" \n",
" parser = OptionParser(prog = 'visualstudio_py_testlauncher', usage = 'Usage: %prog [<option>] <test names>... ')\n",
" parser.add_option('-s', '--secret', metavar='<secret>', help='restrict server to only allow clients that specify <secret> when connecting')\n",
" parser.add_option('-p', '--port', type='int', metavar='<port>', help='listen for debugger connections on <port>')\n",
" parser.add_option('-x', '--mixed-mode', action='store_true', help='wait for mixed-mode debugger to attach')\n",
" parser.add_option('-t', '--test', type='str', dest='tests', action='append', help='specifies a test to run')\n",
" parser.add_option('-m', '--module', type='str', help='name of the module to import the tests from')\n",
" \n",
" (opts, _) = parser.parse_args()\n",
" \n",
" sys.path[0] = os.getcwd()\n",
" \n",
" if opts.secret and opts.port:\n",
" from ptvsd.visualstudio_py_debugger import DONT_DEBUG, DEBUG_ENTRYPOINTS, get_code\n",
" from ptvsd.attach_server import DEFAULT_PORT, enable_attach, wait_for_attach\n",
"\n",
" DONT_DEBUG.append(os.path.normcase(__file__))\n",
" DEBUG_ENTRYPOINTS.add(get_code(main))\n",
"\n",
" enable_attach(opts.secret, ('127.0.0.1', getattr(opts, 'port', DEFAULT_PORT)), redirect_output = True)\n",
" wait_for_attach()\n",
" elif opts.mixed_mode:\n",
" # For mixed-mode attach, there's no ptvsd and hence no wait_for_attach(), \n",
" # so we have to use Win32 API in a loop to do the same thing.\n",
" from time import sleep\n",
" from ctypes import windll, c_char\n",
" while True:\n",
" if windll.kernel32.IsDebuggerPresent() != 0:\n",
" break\n",
" sleep(0.1)\n",
" try:\n",
" debugger_helper = windll['Microsoft.PythonTools.Debugger.Helper.x86.dll']\n",
" except WinError:\n",
" debugger_helper = windll['Microsoft.PythonTools.Debugger.Helper.x64.dll']\n",
" isTracing = c_char.in_dll(debugger_helper, \"isTracing\")\n",
" while True:\n",
" if isTracing.value != 0:\n",
" break\n",
" sleep(0.1)\n",
" \n",
" __import__(opts.module)\n",
" module = sys.modules[opts.module]\n",
" test = unittest.defaultTestLoader.loadTestsFromNames(opts.tests, module)\n",
" runner = unittest.TextTestRunner(verbosity=0)\n",
" \n",
" result = runner.run(test)\n",
" sys.exit(not result.wasSuccessful())\n",
"\n",
"if __name__ == '__main__':\n",
" main()\n"
] | [
0.025,
0.6666666666666666,
0.07317073170731707,
0.6666666666666666,
0.0425531914893617,
0.04040404040404041,
0.0379746835443038,
0.04040404040404041,
0.038461538461538464,
0.6666666666666666,
0.02857142857142857,
0.6666666666666666,
0.02531645569620253,
0,
0,
0,
0,
0,
0,
0.2,
0.042735042735042736,
0.006944444444444444,
0.00847457627118644,
0.008928571428571428,
0.008849557522123894,
0.009615384615384616,
0.2,
0,
0.2,
0,
0.2,
0,
0.01098901098901099,
0.011764705882352941,
0,
0,
0,
0,
0.02702702702702703,
0,
0,
0.024096385542168676,
0,
0,
0,
0,
0,
0,
0,
0,
0.011627906976744186,
0,
0.011627906976744186,
0,
0,
0,
0,
0,
0.058823529411764705,
0,
0,
0,
0,
0.2,
0,
0,
0,
0.037037037037037035,
0
] | 69 | 0.062276 | false |
# -*- coding: utf-8 -*-
# Module: context_search
# License: GPL v.3 https://www.gnu.org/copyleft/gpl.html
import xbmc
import xbmcgui
import re
from unitedsearch import UnitedSearch, plugin
_ = plugin.initialize_gettext()
us = UnitedSearch()
def main():
Label = xbmc.getInfoLabel('ListItem.Label')
Title = xbmc.getInfoLabel('ListItem.Title')
TVShowTitle = xbmc.getInfoLabel('ListItem.TVShowTitle')
ChannelName = xbmc.getInfoLabel('ListItem.ChannelName')
if TVShowTitle:
keyword = TVShowTitle
elif Title:
keyword = Title
else:
keyword = Label
keyword = re.sub(r'\([^>]*\)', '', keyword)
keyword = re.sub(r'\[[^>]*\]', '', keyword)
keyword = keyword.strip()
keywords = keyword.split('/')
plugin.log_error(keywords)
if len(keywords) > 1:
for i, cur_keyword in enumerate(keywords):
keywords[i] = cur_keyword.strip()
keywords.insert(0, keyword)
ret = xbmcgui.Dialog().select(_('Select Title'), keywords)
if ret > 0:
keyword = keywords[ret]
else:
keyword = ''
cont_edit_keyword = plugin.get_setting('cont_edit_keyword')
if keyword and cont_edit_keyword:
kbd = xbmc.Keyboard()
kbd.setDefault(keyword)
kbd.setHeading(_('Search'))
kbd.doModal()
if kbd.isConfirmed():
keyword = kbd.getText()
else:
keyword = ''
if keyword:
params = {'keyword': keyword,
'only_search': 'True',
}
us.search(params)
url = plugin.get_url(action='search_results', item=0)
if ChannelName:
xbmc.executebuiltin("ActivateWindow(videos, %s)" % (url))
else:
xbmc.executebuiltin('Container.Update("%s")' % url)
if __name__ == '__main__':
main() | [
"# -*- coding: utf-8 -*-\n",
"# Module: context_search\n",
"# License: GPL v.3 https://www.gnu.org/copyleft/gpl.html\n",
"\n",
"import xbmc\n",
"import xbmcgui\n",
"import re\n",
"\n",
"from unitedsearch import UnitedSearch, plugin\n",
"\n",
"_ = plugin.initialize_gettext()\n",
"\n",
"us = UnitedSearch()\n",
"\n",
"def main():\n",
" Label = xbmc.getInfoLabel('ListItem.Label')\n",
" Title = xbmc.getInfoLabel('ListItem.Title')\n",
" TVShowTitle = xbmc.getInfoLabel('ListItem.TVShowTitle')\n",
" ChannelName = xbmc.getInfoLabel('ListItem.ChannelName')\n",
"\n",
" if TVShowTitle:\n",
" keyword = TVShowTitle\n",
" elif Title:\n",
" keyword = Title\n",
" else:\n",
" keyword = Label\n",
"\n",
" keyword = re.sub(r'\\([^>]*\\)', '', keyword)\n",
" keyword = re.sub(r'\\[[^>]*\\]', '', keyword)\n",
" keyword = keyword.strip()\n",
"\n",
" keywords = keyword.split('/')\n",
" plugin.log_error(keywords)\n",
" if len(keywords) > 1:\n",
" for i, cur_keyword in enumerate(keywords):\n",
" keywords[i] = cur_keyword.strip()\n",
"\n",
" keywords.insert(0, keyword)\n",
" ret = xbmcgui.Dialog().select(_('Select Title'), keywords)\n",
" if ret > 0:\n",
" keyword = keywords[ret]\n",
" else:\n",
" keyword = ''\n",
"\n",
" cont_edit_keyword = plugin.get_setting('cont_edit_keyword')\n",
"\n",
" if keyword and cont_edit_keyword:\n",
" kbd = xbmc.Keyboard()\n",
" kbd.setDefault(keyword)\n",
" kbd.setHeading(_('Search'))\n",
" kbd.doModal()\n",
" if kbd.isConfirmed():\n",
" keyword = kbd.getText()\n",
" else:\n",
" keyword = ''\n",
"\n",
" if keyword:\n",
" params = {'keyword': keyword,\n",
" 'only_search': 'True',\n",
" }\n",
" us.search(params)\n",
"\n",
" url = plugin.get_url(action='search_results', item=0)\n",
" if ChannelName:\n",
" xbmc.executebuiltin(\"ActivateWindow(videos, %s)\" % (url))\n",
" else:\n",
" xbmc.executebuiltin('Container.Update(\"%s\")' % url)\n",
"\n",
"\n",
"if __name__ == '__main__':\n",
" main()"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.08333333333333333,
0.018518518518518517,
0.018518518518518517,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.1
] | 71 | 0.003104 | false |
#!/usr/bin/env python
##This Python file uses the following encoding: utf-8
##
## This file is part of the Ezhil Language project.
##
## (C) 2017 Muthiah Annamalai
## Licensed under GPL Version 3
## Ezhil Language Foundation
from __future__ import print_function
import os
import codecs
import gi
gi.require_version('Gtk','3.0')
from gi.repository import Gtk, GLib
from iyakki import MPRunner
from random import randint
_DEBUG = False
# This class will show a success/failure error message:
class PopupWindow:
@staticmethod
def display_message(window,success_flag,text_msg):
passfail = [u"பிழையுடன்",u"சரியாக"]
dialog = Gtk.MessageDialog(window, 0, Gtk.MessageType.INFO,
Gtk.ButtonsType.OK_CANCEL,u"எழில் கீற்று இயக்கியதில் %s இயக்கி முடிந்தது."%passfail[int(success_flag)])
dialog.format_secondary_text(text_msg)
dialog.set_position(Gtk.WindowPosition.CENTER_ALWAYS)
response = dialog.run()
dialog.destroy() #OK or Cancel don't matter
#if response == Gtk.ResponseType.CANCEL:
#pass
#elif response == Gtk.ResponseType.OK:
#self.settings.set_license_accepted()
return False
# This class adds two items to popup menuitem
#
class PopupForTextView(object):
MODES = frozenset(['EXECUTE_SELECTION','SHOW_HELP'])
def __init__(self,text_view,mode,ref=None):
object.__init__(self)
self.text_view = text_view
assert( mode in PopupForTextView.MODES)
self.mode = mode
self.text_view.connect("populate-popup",PopupForTextView.populateContextMenu,self)
self.curr_row = ref and ref.curr_row+2 or 100
self.sepitem = None
def get_selected_text(self):
tb = self.text_view.get_buffer()
try:
(start,end) = tb.get_selection_bounds()
return tb.get_text(start,end,True)
except Exception as ex:
return None
def add_separator(self,popup):
self.sepitem = Gtk.SeparatorMenuItem.new()
popup.attach(self.sepitem,0,1,self.curr_row,self.curr_row+1)
self.sepitem.show()
self.curr_row += 1
def get_mode_callback_label(self):
if self.mode == 'EXECUTE_SELECTION':
return (u"இயக்கு",PopupForTextView.executeCallback)
else:
return (u"உதவி குறிப்பு",PopupForTextView.searchCallback)
@staticmethod
def searchCallback(menu_item,user_data):
assert( isinstance(user_data,PopupForTextView) )
selection = user_data.get_selected_text()
if not selection:
return
#print(u"search/help callback [%s]"%selection)
@staticmethod
def executeCallback(menu_item,user_data):
assert( isinstance(user_data,PopupForTextView) )
selection = user_data.get_selected_text()
if not selection:
return
#print(u"execute callback on [%s]"%selection)
filename = "tmp_%d.ezhil"%randint(0,10000)
if _DEBUG: print(u"dummy file => %s"%filename)
with codecs.open(filename,"wb") as fp:
fp.write(u"# Ezhil code snippet\n")
fp.write(selection)
window = Gtk.Window.new(Gtk.WindowType.TOPLEVEL)
try:
runner = MPRunner(timeout=10)
GLib.idle_add( lambda : Gtk.events_pending() and Gtk.main_iteration() )
runner.run(filename)
if _DEBUG: print(runner.res_std_out)
PopupWindow.display_message(window,runner.is_success,runner.res_std_out)
except Exception as ioe:
if _DEBUG: print("Exception: ",str(ioe))
PopupWindow.display_message(window=window,success_flag=False,text_msg=str(ioe))
window.destroy()
os.unlink(filename)
@staticmethod
def populateContextMenu(text_view,popup,user_data):
assert( isinstance(user_data,PopupForTextView) )
label,cb = user_data.get_mode_callback_label()
exmnuitem = Gtk.MenuItem.new_with_label(label)
user_data.add_separator(popup)
top,bot = user_data.curr_row,user_data.curr_row+1
user_data.curr_row += 1
popup.attach(exmnuitem,0,1,top,bot)
exmnuitem.show()
exmnuitem.connect("activate",cb,user_data)
| [
"#!/usr/bin/env python\n",
"##This Python file uses the following encoding: utf-8\n",
"##\n",
"## This file is part of the Ezhil Language project.\n",
"## \n",
"## (C) 2017 Muthiah Annamalai\n",
"## Licensed under GPL Version 3\n",
"## Ezhil Language Foundation\n",
"from __future__ import print_function\n",
"import os\n",
"import codecs\n",
"\n",
"import gi\n",
"gi.require_version('Gtk','3.0')\n",
"from gi.repository import Gtk, GLib\n",
"\n",
"from iyakki import MPRunner\n",
"from random import randint\n",
"_DEBUG = False\n",
"\n",
"# This class will show a success/failure error message:\n",
"class PopupWindow:\n",
" @staticmethod\n",
" def display_message(window,success_flag,text_msg):\n",
" passfail = [u\"பிழையுடன்\",u\"சரியாக\"]\n",
" dialog = Gtk.MessageDialog(window, 0, Gtk.MessageType.INFO,\n",
" Gtk.ButtonsType.OK_CANCEL,u\"எழில் கீற்று இயக்கியதில் %s இயக்கி முடிந்தது.\"%passfail[int(success_flag)])\n",
" dialog.format_secondary_text(text_msg)\n",
" dialog.set_position(Gtk.WindowPosition.CENTER_ALWAYS)\n",
" response = dialog.run()\n",
" dialog.destroy() #OK or Cancel don't matter\n",
" #if response == Gtk.ResponseType.CANCEL:\n",
" #pass\n",
" #elif response == Gtk.ResponseType.OK:\n",
" #self.settings.set_license_accepted()\n",
" return False\n",
"\n",
"# This class adds two items to popup menuitem\n",
"# \n",
"class PopupForTextView(object):\n",
" MODES = frozenset(['EXECUTE_SELECTION','SHOW_HELP'])\n",
" def __init__(self,text_view,mode,ref=None):\n",
" object.__init__(self)\n",
" self.text_view = text_view\n",
" assert( mode in PopupForTextView.MODES)\n",
" self.mode = mode\n",
" self.text_view.connect(\"populate-popup\",PopupForTextView.populateContextMenu,self)\n",
" self.curr_row = ref and ref.curr_row+2 or 100\n",
" self.sepitem = None\n",
" \n",
" def get_selected_text(self):\n",
" tb = self.text_view.get_buffer()\n",
" try:\n",
" (start,end) = tb.get_selection_bounds()\n",
" return tb.get_text(start,end,True)\n",
" except Exception as ex:\n",
" return None\n",
" \n",
" def add_separator(self,popup):\n",
" self.sepitem = Gtk.SeparatorMenuItem.new()\n",
" popup.attach(self.sepitem,0,1,self.curr_row,self.curr_row+1)\n",
" self.sepitem.show()\n",
" self.curr_row += 1\n",
" \n",
" def get_mode_callback_label(self):\n",
" if self.mode == 'EXECUTE_SELECTION':\n",
" return (u\"இயக்கு\",PopupForTextView.executeCallback)\n",
" else:\n",
" return (u\"உதவி குறிப்பு\",PopupForTextView.searchCallback)\n",
" \n",
" @staticmethod\n",
" def searchCallback(menu_item,user_data):\n",
" assert( isinstance(user_data,PopupForTextView) )\n",
" selection = user_data.get_selected_text()\n",
" if not selection:\n",
" return\n",
" #print(u\"search/help callback [%s]\"%selection)\n",
" \n",
" @staticmethod\n",
" def executeCallback(menu_item,user_data):\n",
" assert( isinstance(user_data,PopupForTextView) )\n",
" selection = user_data.get_selected_text()\n",
" if not selection:\n",
" return\n",
" #print(u\"execute callback on [%s]\"%selection)\n",
" \n",
" filename = \"tmp_%d.ezhil\"%randint(0,10000)\n",
" if _DEBUG: print(u\"dummy file => %s\"%filename)\n",
" with codecs.open(filename,\"wb\") as fp:\n",
" fp.write(u\"# Ezhil code snippet\\n\")\n",
" fp.write(selection)\n",
" window = Gtk.Window.new(Gtk.WindowType.TOPLEVEL)\n",
" try:\n",
" runner = MPRunner(timeout=10)\n",
" GLib.idle_add( lambda : Gtk.events_pending() and Gtk.main_iteration() )\n",
" runner.run(filename)\n",
" if _DEBUG: print(runner.res_std_out)\n",
" PopupWindow.display_message(window,runner.is_success,runner.res_std_out)\n",
" except Exception as ioe:\n",
" if _DEBUG: print(\"Exception: \",str(ioe))\n",
" PopupWindow.display_message(window=window,success_flag=False,text_msg=str(ioe))\n",
" window.destroy()\n",
" os.unlink(filename)\n",
" \n",
" @staticmethod\n",
" def populateContextMenu(text_view,popup,user_data):\n",
" assert( isinstance(user_data,PopupForTextView) )\n",
" label,cb = user_data.get_mode_callback_label()\n",
" exmnuitem = Gtk.MenuItem.new_with_label(label)\n",
" user_data.add_separator(popup)\n",
" top,bot = user_data.curr_row,user_data.curr_row+1\n",
" user_data.curr_row += 1\n",
" popup.attach(exmnuitem,0,1,top,bot)\n",
" exmnuitem.show()\n",
" exmnuitem.connect(\"activate\",cb,user_data)\n"
] | [
0,
0.018518518518518517,
0,
0.019230769230769232,
0.25,
0.03333333333333333,
0.03125,
0.034482758620689655,
0,
0,
0,
0,
0,
0.03125,
0.027777777777777776,
0,
0.03571428571428571,
0.037037037037037035,
0,
0,
0,
0.05263157894736842,
0,
0.03636363636363636,
0.022727272727272728,
0,
0.03571428571428571,
0,
0,
0,
0.038461538461538464,
0.02040816326530612,
0.07142857142857142,
0.02127659574468085,
0.021739130434782608,
0,
0,
0,
0.3333333333333333,
0.03125,
0.017543859649122806,
0.08333333333333333,
0,
0,
0.020833333333333332,
0,
0.03296703296703297,
0,
0,
0.1111111111111111,
0,
0,
0,
0.019230769230769232,
0.0425531914893617,
0,
0,
0.1111111111111111,
0.02857142857142857,
0,
0.057971014492753624,
0,
0,
0.1111111111111111,
0,
0,
0.015625,
0,
0.014285714285714285,
0.1111111111111111,
0,
0.022222222222222223,
0.05263157894736842,
0,
0,
0,
0.01818181818181818,
0.1111111111111111,
0,
0.021739130434782608,
0.05263157894736842,
0,
0,
0,
0.018518518518518517,
0.1111111111111111,
0.0392156862745098,
0.03636363636363636,
0.02127659574468085,
0,
0,
0,
0,
0,
0.047619047619047616,
0,
0.02040816326530612,
0.03529411764705882,
0,
0.03773584905660377,
0.03260869565217391,
0,
0,
0.1111111111111111,
0,
0.03571428571428571,
0.05263157894736842,
0.01818181818181818,
0,
0,
0.034482758620689655,
0,
0.09090909090909091,
0,
0.0392156862745098
] | 115 | 0.026454 | false |
#The MIT License (MIT)
#Copyright (c) 2014 Microsoft Corporation
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
import pydocumentdb.murmur_hash as murmur_hash
import pydocumentdb.consistent_hash_ring as consistent_hash_ring
class HashPartitionResolver(object):
"""HashPartitionResolver implements partitioning based on the value of a hash function, allowing you to evenly
distribute requests and data across a number of partitions.
"""
def __init__(self, partition_key_extractor, collection_links, default_number_of_virtual_nodes_per_collection = 128, hash_generator = None):
"""
:Parameters:
- `partition_key_extractor`: lambda, returning the partition key from the document passed.
- `collection_links`: list, the links of collections participating in partitioning.
- `default_number_of_virtual_nodes_per_collection`: int, number of virtual nodes per collection.
- `hash_generator`: HashGenerator, the hash generator to be used for hashing algorithm.
"""
if partition_key_extractor is None:
raise ValueError("partition_key_extractor is None.")
if collection_links is None:
raise ValueError("collection_links is None.")
if default_number_of_virtual_nodes_per_collection <= 0:
raise ValueError("The number of virtual nodes per collection must greater than 0.")
self.partition_key_extractor = partition_key_extractor
self.collection_links = collection_links
if hash_generator is None:
hash_generator = murmur_hash._MurmurHash()
self.consistent_hash_ring = consistent_hash_ring._ConsistentHashRing(self.collection_links, default_number_of_virtual_nodes_per_collection, hash_generator)
def ResolveForCreate(self, document):
"""Resolves the collection for creating the document based on the partition key.
:Parameters:
- `document`: dict, the document to be created.
:Returns:
str, collection Self link or Name based link which should handle the Create operation.
"""
if document is None:
raise ValueError("document is None.")
partition_key = self.partition_key_extractor(document)
return self.consistent_hash_ring.GetCollectionNode(partition_key)
def ResolveForRead(self, partition_key):
"""Resolves the collection for reading/querying the documents based on the partition key.
:Parameters:
- `document`: dict, the document to be read/queried.
:Returns:
list, collection Self link(s) or Name based link(s) which should handle the Read operation.
"""
if partition_key is None:
return self.collection_links
else:
return [self.consistent_hash_ring.GetCollectionNode(partition_key)]
| [
"#The MIT License (MIT)\r\n",
"#Copyright (c) 2014 Microsoft Corporation\r\n",
"\r\n",
"#Permission is hereby granted, free of charge, to any person obtaining a copy\r\n",
"#of this software and associated documentation files (the \"Software\"), to deal\r\n",
"#in the Software without restriction, including without limitation the rights\r\n",
"#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\r\n",
"#copies of the Software, and to permit persons to whom the Software is\r\n",
"#furnished to do so, subject to the following conditions:\r\n",
"\r\n",
"#The above copyright notice and this permission notice shall be included in all\r\n",
"#copies or substantial portions of the Software.\r\n",
"\r\n",
"#THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\r\n",
"#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\r\n",
"#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\r\n",
"#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\r\n",
"#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\r\n",
"#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r\n",
"#SOFTWARE.\r\n",
"\r\n",
"import pydocumentdb.murmur_hash as murmur_hash\r\n",
"import pydocumentdb.consistent_hash_ring as consistent_hash_ring\r\n",
"\r\n",
"class HashPartitionResolver(object):\r\n",
" \"\"\"HashPartitionResolver implements partitioning based on the value of a hash function, allowing you to evenly\r\n",
" distribute requests and data across a number of partitions.\r\n",
" \"\"\"\r\n",
" def __init__(self, partition_key_extractor, collection_links, default_number_of_virtual_nodes_per_collection = 128, hash_generator = None):\r\n",
" \"\"\"\r\n",
" :Parameters:\r\n",
" - `partition_key_extractor`: lambda, returning the partition key from the document passed.\r\n",
" - `collection_links`: list, the links of collections participating in partitioning.\r\n",
" - `default_number_of_virtual_nodes_per_collection`: int, number of virtual nodes per collection.\r\n",
" - `hash_generator`: HashGenerator, the hash generator to be used for hashing algorithm.\r\n",
" \"\"\"\r\n",
" if partition_key_extractor is None:\r\n",
" raise ValueError(\"partition_key_extractor is None.\")\r\n",
" if collection_links is None:\r\n",
" raise ValueError(\"collection_links is None.\")\r\n",
" if default_number_of_virtual_nodes_per_collection <= 0:\r\n",
" raise ValueError(\"The number of virtual nodes per collection must greater than 0.\")\r\n",
" \r\n",
" self.partition_key_extractor = partition_key_extractor\r\n",
" self.collection_links = collection_links\r\n",
"\r\n",
" if hash_generator is None:\r\n",
" hash_generator = murmur_hash._MurmurHash()\r\n",
"\r\n",
" self.consistent_hash_ring = consistent_hash_ring._ConsistentHashRing(self.collection_links, default_number_of_virtual_nodes_per_collection, hash_generator)\r\n",
"\r\n",
" def ResolveForCreate(self, document):\r\n",
" \"\"\"Resolves the collection for creating the document based on the partition key.\r\n",
" \r\n",
" :Parameters:\r\n",
" - `document`: dict, the document to be created.\r\n",
"\r\n",
" :Returns:\r\n",
" str, collection Self link or Name based link which should handle the Create operation.\r\n",
" \"\"\"\r\n",
" if document is None:\r\n",
" raise ValueError(\"document is None.\")\r\n",
"\r\n",
" partition_key = self.partition_key_extractor(document)\r\n",
" return self.consistent_hash_ring.GetCollectionNode(partition_key)\r\n",
"\r\n",
" def ResolveForRead(self, partition_key):\r\n",
" \"\"\"Resolves the collection for reading/querying the documents based on the partition key.\r\n",
"\r\n",
" :Parameters:\r\n",
" - `document`: dict, the document to be read/queried.\r\n",
"\r\n",
" :Returns:\r\n",
" list, collection Self link(s) or Name based link(s) which should handle the Read operation.\r\n",
" \"\"\"\r\n",
" if partition_key is None:\r\n",
" return self.collection_links\r\n",
" else:\r\n",
" return [self.consistent_hash_ring.GetCollectionNode(partition_key)]\r\n"
] | [
0.041666666666666664,
0.023255813953488372,
0,
0.012658227848101266,
0.0125,
0.012658227848101266,
0.013157894736842105,
0.013888888888888888,
0.01694915254237288,
0,
0.012345679012345678,
0.02,
0,
0.012987012987012988,
0.013333333333333334,
0.01282051282051282,
0.0136986301369863,
0.0125,
0.0125,
0.08333333333333333,
0,
0,
0,
0,
0.02631578947368421,
0.008620689655172414,
0,
0,
0.034482758620689655,
0,
0,
0.009615384615384616,
0.010309278350515464,
0.00909090909090909,
0.009900990099009901,
0,
0,
0,
0,
0,
0,
0.010309278350515464,
0.1,
0,
0,
0,
0,
0,
0,
0.006060606060606061,
0,
0,
0.011111111111111112,
0.1,
0,
0,
0,
0,
0.01,
0,
0,
0,
0,
0,
0,
0,
0,
0.010101010101010102,
0,
0,
0,
0,
0,
0.009523809523809525,
0,
0,
0,
0,
0
] | 79 | 0.008933 | false |
#-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
import ast
import sys
import warnings
if sys.version_info < (3,):
from urllib2 import quote as url_quote
from urllib2 import unquote as url_unquote
else:
from urllib.parse import quote as url_quote
from urllib.parse import unquote as url_unquote
from datetime import datetime
from xml.sax.saxutils import escape as xml_escape
try:
from xml.etree import cElementTree as ETree
except ImportError:
from xml.etree import ElementTree as ETree
from .constants import DEV_ACCOUNT_NAME
from ._common_models import (
WindowsAzureData,
Feed,
_Base64String,
HeaderDict,
_unicode_type,
_dict_of,
_list_of,
_scalar_list_of,
_xml_attribute,
)
from ._common_conversion import (
_decode_base64_to_text,
)
from ._common_error import (
_ERROR_VALUE_SHOULD_BE_BYTES,
_WARNING_VALUE_SHOULD_BE_BYTES,
)
_etree_entity_feed_namespaces = {
'atom': 'http://www.w3.org/2005/Atom',
'm': 'http://schemas.microsoft.com/ado/2007/08/dataservices/metadata',
'd': 'http://schemas.microsoft.com/ado/2007/08/dataservices',
}
def _make_etree_ns_attr_name(ns, name):
return '{' + ns + '}' + name
def _get_etree_tag_name_without_ns(tag):
val = tag.partition('}')[2]
return val
def _get_etree_text(element):
text = element.text
return text if text is not None else ''
def _get_readable_id(id_name, id_prefix_to_skip):
"""simplified an id to be more friendly for us people"""
# id_name is in the form 'https://namespace.host.suffix/name'
# where name may contain a forward slash!
pos = id_name.find('//')
if pos != -1:
pos += 2
if id_prefix_to_skip:
pos = id_name.find(id_prefix_to_skip, pos)
if pos != -1:
pos += len(id_prefix_to_skip)
pos = id_name.find('/', pos)
if pos != -1:
return id_name[pos + 1:]
return id_name
def _create_entry(entry_body):
''' Adds common part of entry to a given entry body and return the whole
xml. '''
updated_str = datetime.utcnow().isoformat()
if datetime.utcnow().utcoffset() is None:
updated_str += '+00:00'
entry_start = '''<?xml version="1.0" encoding="utf-8" standalone="yes"?>
<entry xmlns:d="http://schemas.microsoft.com/ado/2007/08/dataservices" xmlns:m="http://schemas.microsoft.com/ado/2007/08/dataservices/metadata" xmlns="http://www.w3.org/2005/Atom" >
<title /><updated>{updated}</updated><author><name /></author><id />
<content type="application/xml">
{body}</content></entry>'''
return entry_start.format(updated=updated_str, body=entry_body)
def _to_datetime(strtime):
return datetime.strptime(strtime, "%Y-%m-%dT%H:%M:%S.%f")
_KNOWN_SERIALIZATION_XFORMS = {
'include_apis': 'IncludeAPIs',
'message_id': 'MessageId',
'content_md5': 'Content-MD5',
'last_modified': 'Last-Modified',
'cache_control': 'Cache-Control',
'copy_id': 'CopyId',
}
def _get_serialization_name(element_name):
"""converts a Python name into a serializable name"""
known = _KNOWN_SERIALIZATION_XFORMS.get(element_name)
if known is not None:
return known
if element_name.startswith('x_ms_'):
return element_name.replace('_', '-')
if element_name.endswith('_id'):
element_name = element_name.replace('_id', 'ID')
for name in ['content_', 'last_modified', 'if_', 'cache_control']:
if element_name.startswith(name):
element_name = element_name.replace('_', '-_')
return ''.join(name.capitalize() for name in element_name.split('_'))
def _convert_class_to_xml(source, xml_prefix=True):
if source is None:
return ''
xmlstr = ''
if xml_prefix:
xmlstr = '<?xml version="1.0" encoding="utf-8"?>'
if isinstance(source, list):
for value in source:
xmlstr += _convert_class_to_xml(value, False)
elif isinstance(source, WindowsAzureData):
class_name = source.__class__.__name__
xmlstr += '<' + class_name + '>'
for name, value in vars(source).items():
if value is not None:
if isinstance(value, list) or \
isinstance(value, WindowsAzureData):
xmlstr += _convert_class_to_xml(value, False)
else:
xmlstr += ('<' + _get_serialization_name(name) + '>' +
xml_escape(str(value)) + '</' +
_get_serialization_name(name) + '>')
xmlstr += '</' + class_name + '>'
return xmlstr
def _set_continuation_from_response_headers(feeds, response):
x_ms_continuation = HeaderDict()
for name, value in response.headers:
if 'x-ms-continuation' in name:
x_ms_continuation[name[len('x-ms-continuation') + 1:]] = value
if x_ms_continuation:
setattr(feeds, 'x_ms_continuation', x_ms_continuation)
def _get_request_body_bytes_only(param_name, param_value):
'''Validates the request body passed in and converts it to bytes
if our policy allows it.'''
if param_value is None:
return b''
if isinstance(param_value, bytes):
return param_value
raise TypeError(_ERROR_VALUE_SHOULD_BE_BYTES.format(param_name))
def _get_request_body(request_body):
'''Converts an object into a request body. If it's None
we'll return an empty string, if it's one of our objects it'll
convert it to XML and return it. Otherwise we just use the object
directly'''
if request_body is None:
return b''
if isinstance(request_body, WindowsAzureData):
request_body = _convert_class_to_xml(request_body)
if isinstance(request_body, bytes):
return request_body
if isinstance(request_body, _unicode_type):
return request_body.encode('utf-8')
request_body = str(request_body)
if isinstance(request_body, _unicode_type):
return request_body.encode('utf-8')
return request_body
def _update_request_uri_query_local_storage(request, use_local_storage):
''' create correct uri and query for the request '''
uri, query = _update_request_uri_query(request)
if use_local_storage:
return '/' + DEV_ACCOUNT_NAME + uri, query
return uri, query
def _update_request_uri_query(request):
'''pulls the query string out of the URI and moves it into
the query portion of the request object. If there are already
query parameters on the request the parameters in the URI will
appear after the existing parameters'''
if '?' in request.path:
request.path, _, query_string = request.path.partition('?')
if query_string:
query_params = query_string.split('&')
for query in query_params:
if '=' in query:
name, _, value = query.partition('=')
request.query.append((name, value))
request.path = url_quote(request.path, '/()$=\',')
# add encoded queries to request.path.
if request.query:
request.path += '?'
for name, value in request.query:
if value is not None:
request.path += name + '=' + url_quote(value, '/()$=\',') + '&'
request.path = request.path[:-1]
return request.path, request.query
def _parse_response_for_dict(response):
''' Extracts name-values from response header. Filter out the standard
http headers.'''
if response is None:
return None
http_headers = ['server', 'date', 'location', 'host',
'via', 'proxy-connection', 'connection']
return_dict = HeaderDict()
if response.headers:
for name, value in response.headers:
if not name.lower() in http_headers:
return_dict[name] = value
return return_dict
def _parse_response_for_dict_prefix(response, prefixes):
''' Extracts name-values for names starting with prefix from response
header. Filter out the standard http headers.'''
if response is None:
return None
return_dict = {}
orig_dict = _parse_response_for_dict(response)
if orig_dict:
for name, value in orig_dict.items():
for prefix_value in prefixes:
if name.lower().startswith(prefix_value.lower()):
return_dict[name] = value
break
return return_dict
else:
return None
def _parse_response_for_dict_filter(response, filter):
''' Extracts name-values for names in filter from response header. Filter
out the standard http headers.'''
if response is None:
return None
return_dict = {}
orig_dict = _parse_response_for_dict(response)
if orig_dict:
for name, value in orig_dict.items():
if name.lower() in filter:
return_dict[name] = value
return return_dict
else:
return None
class _ETreeXmlToObject(object):
@staticmethod
def parse_response(response, return_type):
'''
Parse the HTTPResponse's body and fill all the data into a class of
return_type.
'''
root = ETree.fromstring(response.body)
xml_name = getattr(return_type, '_xml_name', return_type.__name__)
if root.tag == xml_name:
return _ETreeXmlToObject._parse_response_body_from_xml_node(root, return_type)
return None
@staticmethod
def parse_enum_results_list(response, return_type, resp_type, item_type):
"""resp_body is the XML we received
resp_type is a string, such as Containers,
return_type is the type we're constructing, such as ContainerEnumResults
item_type is the type object of the item to be created, such as Container
This function then returns a ContainerEnumResults object with the
containers member populated with the results.
"""
# parsing something like:
# <EnumerationResults ... >
# <Queues>
# <Queue>
# <Something />
# <SomethingElse />
# </Queue>
# </Queues>
# </EnumerationResults>
return_obj = return_type()
root = ETree.fromstring(response.body)
items = []
for container_element in root.findall(resp_type):
for item_element in container_element.findall(resp_type[:-1]):
items.append(_ETreeXmlToObject.fill_instance_element(item_element, item_type))
for name, value in vars(return_obj).items():
# queues, Queues, this is the list its self which we populated
# above
if name == resp_type.lower():
# the list its self.
continue
value = _ETreeXmlToObject.fill_data_member(root, name, value)
if value is not None:
setattr(return_obj, name, value)
setattr(return_obj, resp_type.lower(), items)
return return_obj
@staticmethod
def parse_simple_list(response, return_type, item_type, list_name):
respbody = response.body
res = return_type()
res_items = []
root = ETree.fromstring(respbody)
type_name = type.__name__
item_name = item_type.__name__
for item in root.findall(item_name):
res_items.append(_ETreeXmlToObject.fill_instance_element(item, item_type))
setattr(res, list_name, res_items)
return res
@staticmethod
def convert_response_to_feeds(response, convert_func):
if response is None:
return None
feeds = _list_of(Feed)
_set_continuation_from_response_headers(feeds, response)
root = ETree.fromstring(response.body)
# some feeds won't have the 'feed' element, just a single 'entry' element
root_name = _get_etree_tag_name_without_ns(root.tag)
if root_name == 'feed':
entries = root.findall("./atom:entry", _etree_entity_feed_namespaces)
elif root_name == 'entry':
entries = [root]
else:
raise NotImplementedError()
for entry in entries:
feeds.append(convert_func(entry))
return feeds
@staticmethod
def get_entry_properties_from_element(element, include_id, id_prefix_to_skip=None, use_title_as_id=False):
''' get properties from element tree element '''
properties = {}
etag = element.attrib.get(_make_etree_ns_attr_name(_etree_entity_feed_namespaces['m'], 'etag'), None)
if etag is not None:
properties['etag'] = etag
updated = element.findtext('./atom:updated', '', _etree_entity_feed_namespaces)
if updated:
properties['updated'] = updated
author_name = element.findtext('./atom:author/atom:name', '', _etree_entity_feed_namespaces)
if author_name:
properties['author'] = author_name
if include_id:
if use_title_as_id:
title = element.findtext('./atom:title', '', _etree_entity_feed_namespaces)
if title:
properties['name'] = title
else:
id = element.findtext('./atom:id', '', _etree_entity_feed_namespaces)
if id:
properties['name'] = _get_readable_id(id, id_prefix_to_skip)
return properties
@staticmethod
def fill_instance_element(element, return_type):
"""Converts a DOM element into the specified object"""
return _ETreeXmlToObject._parse_response_body_from_xml_node(element, return_type)
@staticmethod
def fill_data_member(xmldoc, element_name, data_member):
element = xmldoc.find(_get_serialization_name(element_name))
if element is None:
return None
value = _get_etree_text(element)
if data_member is None:
return value
elif isinstance(data_member, datetime):
return _to_datetime(value)
elif type(data_member) is bool:
return value.lower() != 'false'
else:
return type(data_member)(value)
@staticmethod
def _parse_response_body_from_xml_node(node, return_type):
'''
parse the xml and fill all the data into a class of return_type
'''
return_obj = return_type()
_ETreeXmlToObject._fill_data_to_return_object(node, return_obj)
return return_obj
@staticmethod
def _fill_instance_child(xmldoc, element_name, return_type):
'''Converts a child of the current dom element to the specified type.
'''
element = xmldoc.find(_get_serialization_name(element_name))
if element is None:
return None
return_obj = return_type()
_ETreeXmlToObject._fill_data_to_return_object(element, return_obj)
return return_obj
@staticmethod
def _fill_data_to_return_object(node, return_obj):
members = dict(vars(return_obj))
for name, value in members.items():
if isinstance(value, _list_of):
setattr(return_obj,
name,
_ETreeXmlToObject._fill_list_of(node,
value.list_type,
value.xml_element_name))
elif isinstance(value, _scalar_list_of):
setattr(return_obj,
name,
_ETreeXmlToObject._fill_scalar_list_of(node,
value.list_type,
_get_serialization_name(name),
value.xml_element_name))
elif isinstance(value, _dict_of):
setattr(return_obj,
name,
_ETreeXmlToObject._fill_dict_of(node,
_get_serialization_name(name),
value.pair_xml_element_name,
value.key_xml_element_name,
value.value_xml_element_name))
elif isinstance(value, _xml_attribute):
real_value = node.attrib.get(value.xml_element_name, None)
if real_value is not None:
setattr(return_obj, name, real_value)
elif isinstance(value, WindowsAzureData):
setattr(return_obj,
name,
_ETreeXmlToObject._fill_instance_child(node, name, value.__class__))
elif isinstance(value, dict):
setattr(return_obj,
name,
_ETreeXmlToObject._fill_dict(node, _get_serialization_name(name)))
elif isinstance(value, _Base64String):
value = _ETreeXmlToObject.fill_data_member(node, name, '')
if value is not None:
value = _decode_base64_to_text(value)
# always set the attribute, so we don't end up returning an object
# with type _Base64String
setattr(return_obj, name, value)
else:
value = _ETreeXmlToObject.fill_data_member(node, name, value)
if value is not None:
setattr(return_obj, name, value)
@staticmethod
def _fill_list_of(xmldoc, element_type, xml_element_name):
return [_ETreeXmlToObject._parse_response_body_from_xml_node(xmlelement, element_type) \
for xmlelement in xmldoc.findall(xml_element_name)]
@staticmethod
def _fill_scalar_list_of(xmldoc, element_type, parent_xml_element_name,
xml_element_name):
'''Converts an xml fragment into a list of scalar types. The parent xml
element contains a flat list of xml elements which are converted into the
specified scalar type and added to the list.
Example:
xmldoc=
<Endpoints>
<Endpoint>http://{storage-service-name}.blob.core.windows.net/</Endpoint>
<Endpoint>http://{storage-service-name}.queue.core.windows.net/</Endpoint>
<Endpoint>http://{storage-service-name}.table.core.windows.net/</Endpoint>
</Endpoints>
element_type=str
parent_xml_element_name='Endpoints'
xml_element_name='Endpoint'
'''
raise NotImplementedError('_scalar_list_of not supported')
@staticmethod
def _fill_dict(xmldoc, element_name):
container_element = xmldoc.find(element_name)
if container_element is not None:
return_obj = {}
for item_element in container_element.getchildren():
return_obj[item_element.tag] = _get_etree_text(item_element)
return return_obj
return None
@staticmethod
def _fill_dict_of(xmldoc, parent_xml_element_name, pair_xml_element_name,
key_xml_element_name, value_xml_element_name):
'''Converts an xml fragment into a dictionary. The parent xml element
contains a list of xml elements where each element has a child element for
the key, and another for the value.
Example:
xmldoc=
<ExtendedProperties>
<ExtendedProperty>
<Name>Ext1</Name>
<Value>Val1</Value>
</ExtendedProperty>
<ExtendedProperty>
<Name>Ext2</Name>
<Value>Val2</Value>
</ExtendedProperty>
</ExtendedProperties>
element_type=str
parent_xml_element_name='ExtendedProperties'
pair_xml_element_name='ExtendedProperty'
key_xml_element_name='Name'
value_xml_element_name='Value'
'''
raise NotImplementedError('_dict_of not supported')
| [
"#-------------------------------------------------------------------------\r\n",
"# Copyright (c) Microsoft. All rights reserved.\r\n",
"#\r\n",
"# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n",
"# you may not use this file except in compliance with the License.\r\n",
"# You may obtain a copy of the License at\r\n",
"# http://www.apache.org/licenses/LICENSE-2.0\r\n",
"#\r\n",
"# Unless required by applicable law or agreed to in writing, software\r\n",
"# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n",
"# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n",
"# See the License for the specific language governing permissions and\r\n",
"# limitations under the License.\r\n",
"#--------------------------------------------------------------------------\r\n",
"import ast\r\n",
"import sys\r\n",
"import warnings\r\n",
"\r\n",
"if sys.version_info < (3,):\r\n",
" from urllib2 import quote as url_quote\r\n",
" from urllib2 import unquote as url_unquote\r\n",
"else:\r\n",
" from urllib.parse import quote as url_quote\r\n",
" from urllib.parse import unquote as url_unquote\r\n",
"\r\n",
"from datetime import datetime\r\n",
"from xml.sax.saxutils import escape as xml_escape\r\n",
"\r\n",
"try:\r\n",
" from xml.etree import cElementTree as ETree\r\n",
"except ImportError:\r\n",
" from xml.etree import ElementTree as ETree\r\n",
"\r\n",
"from .constants import DEV_ACCOUNT_NAME\r\n",
"from ._common_models import (\r\n",
" WindowsAzureData,\r\n",
" Feed,\r\n",
" _Base64String,\r\n",
" HeaderDict,\r\n",
" _unicode_type,\r\n",
" _dict_of,\r\n",
" _list_of,\r\n",
" _scalar_list_of,\r\n",
" _xml_attribute,\r\n",
")\r\n",
"from ._common_conversion import (\r\n",
" _decode_base64_to_text,\r\n",
")\r\n",
"from ._common_error import (\r\n",
" _ERROR_VALUE_SHOULD_BE_BYTES,\r\n",
" _WARNING_VALUE_SHOULD_BE_BYTES,\r\n",
")\r\n",
"\r\n",
"_etree_entity_feed_namespaces = {\r\n",
" 'atom': 'http://www.w3.org/2005/Atom',\r\n",
" 'm': 'http://schemas.microsoft.com/ado/2007/08/dataservices/metadata',\r\n",
" 'd': 'http://schemas.microsoft.com/ado/2007/08/dataservices',\r\n",
"}\r\n",
"\r\n",
"\r\n",
"def _make_etree_ns_attr_name(ns, name):\r\n",
" return '{' + ns + '}' + name\r\n",
"\r\n",
"\r\n",
"def _get_etree_tag_name_without_ns(tag):\r\n",
" val = tag.partition('}')[2]\r\n",
" return val\r\n",
"\r\n",
"\r\n",
"def _get_etree_text(element):\r\n",
" text = element.text\r\n",
" return text if text is not None else ''\r\n",
"\r\n",
"\r\n",
"def _get_readable_id(id_name, id_prefix_to_skip):\r\n",
" \"\"\"simplified an id to be more friendly for us people\"\"\"\r\n",
" # id_name is in the form 'https://namespace.host.suffix/name'\r\n",
" # where name may contain a forward slash!\r\n",
" pos = id_name.find('//')\r\n",
" if pos != -1:\r\n",
" pos += 2\r\n",
" if id_prefix_to_skip:\r\n",
" pos = id_name.find(id_prefix_to_skip, pos)\r\n",
" if pos != -1:\r\n",
" pos += len(id_prefix_to_skip)\r\n",
" pos = id_name.find('/', pos)\r\n",
" if pos != -1:\r\n",
" return id_name[pos + 1:]\r\n",
" return id_name\r\n",
"\r\n",
"\r\n",
"def _create_entry(entry_body):\r\n",
" ''' Adds common part of entry to a given entry body and return the whole\r\n",
" xml. '''\r\n",
" updated_str = datetime.utcnow().isoformat()\r\n",
" if datetime.utcnow().utcoffset() is None:\r\n",
" updated_str += '+00:00'\r\n",
"\r\n",
" entry_start = '''<?xml version=\"1.0\" encoding=\"utf-8\" standalone=\"yes\"?>\r\n",
"<entry xmlns:d=\"http://schemas.microsoft.com/ado/2007/08/dataservices\" xmlns:m=\"http://schemas.microsoft.com/ado/2007/08/dataservices/metadata\" xmlns=\"http://www.w3.org/2005/Atom\" >\r\n",
"<title /><updated>{updated}</updated><author><name /></author><id />\r\n",
"<content type=\"application/xml\">\r\n",
" {body}</content></entry>'''\r\n",
" return entry_start.format(updated=updated_str, body=entry_body)\r\n",
"\r\n",
"\r\n",
"def _to_datetime(strtime):\r\n",
" return datetime.strptime(strtime, \"%Y-%m-%dT%H:%M:%S.%f\")\r\n",
"\r\n",
"_KNOWN_SERIALIZATION_XFORMS = {\r\n",
" 'include_apis': 'IncludeAPIs',\r\n",
" 'message_id': 'MessageId',\r\n",
" 'content_md5': 'Content-MD5',\r\n",
" 'last_modified': 'Last-Modified',\r\n",
" 'cache_control': 'Cache-Control',\r\n",
" 'copy_id': 'CopyId',\r\n",
"}\r\n",
"\r\n",
"\r\n",
"def _get_serialization_name(element_name):\r\n",
" \"\"\"converts a Python name into a serializable name\"\"\"\r\n",
" known = _KNOWN_SERIALIZATION_XFORMS.get(element_name)\r\n",
" if known is not None:\r\n",
" return known\r\n",
"\r\n",
" if element_name.startswith('x_ms_'):\r\n",
" return element_name.replace('_', '-')\r\n",
" if element_name.endswith('_id'):\r\n",
" element_name = element_name.replace('_id', 'ID')\r\n",
" for name in ['content_', 'last_modified', 'if_', 'cache_control']:\r\n",
" if element_name.startswith(name):\r\n",
" element_name = element_name.replace('_', '-_')\r\n",
"\r\n",
" return ''.join(name.capitalize() for name in element_name.split('_'))\r\n",
"\r\n",
"\r\n",
"def _convert_class_to_xml(source, xml_prefix=True):\r\n",
" if source is None:\r\n",
" return ''\r\n",
"\r\n",
" xmlstr = ''\r\n",
" if xml_prefix:\r\n",
" xmlstr = '<?xml version=\"1.0\" encoding=\"utf-8\"?>'\r\n",
"\r\n",
" if isinstance(source, list):\r\n",
" for value in source:\r\n",
" xmlstr += _convert_class_to_xml(value, False)\r\n",
" elif isinstance(source, WindowsAzureData):\r\n",
" class_name = source.__class__.__name__\r\n",
" xmlstr += '<' + class_name + '>'\r\n",
" for name, value in vars(source).items():\r\n",
" if value is not None:\r\n",
" if isinstance(value, list) or \\\r\n",
" isinstance(value, WindowsAzureData):\r\n",
" xmlstr += _convert_class_to_xml(value, False)\r\n",
" else:\r\n",
" xmlstr += ('<' + _get_serialization_name(name) + '>' +\r\n",
" xml_escape(str(value)) + '</' +\r\n",
" _get_serialization_name(name) + '>')\r\n",
" xmlstr += '</' + class_name + '>'\r\n",
" return xmlstr\r\n",
"\r\n",
"\r\n",
"def _set_continuation_from_response_headers(feeds, response):\r\n",
" x_ms_continuation = HeaderDict()\r\n",
" for name, value in response.headers:\r\n",
" if 'x-ms-continuation' in name:\r\n",
" x_ms_continuation[name[len('x-ms-continuation') + 1:]] = value\r\n",
" if x_ms_continuation:\r\n",
" setattr(feeds, 'x_ms_continuation', x_ms_continuation)\r\n",
"\r\n",
"\r\n",
"def _get_request_body_bytes_only(param_name, param_value):\r\n",
" '''Validates the request body passed in and converts it to bytes\r\n",
" if our policy allows it.'''\r\n",
" if param_value is None:\r\n",
" return b''\r\n",
"\r\n",
" if isinstance(param_value, bytes):\r\n",
" return param_value\r\n",
"\r\n",
" raise TypeError(_ERROR_VALUE_SHOULD_BE_BYTES.format(param_name))\r\n",
"\r\n",
"\r\n",
"def _get_request_body(request_body):\r\n",
" '''Converts an object into a request body. If it's None\r\n",
" we'll return an empty string, if it's one of our objects it'll\r\n",
" convert it to XML and return it. Otherwise we just use the object\r\n",
" directly'''\r\n",
" if request_body is None:\r\n",
" return b''\r\n",
"\r\n",
" if isinstance(request_body, WindowsAzureData):\r\n",
" request_body = _convert_class_to_xml(request_body)\r\n",
"\r\n",
" if isinstance(request_body, bytes):\r\n",
" return request_body\r\n",
"\r\n",
" if isinstance(request_body, _unicode_type):\r\n",
" return request_body.encode('utf-8')\r\n",
"\r\n",
" request_body = str(request_body)\r\n",
" if isinstance(request_body, _unicode_type):\r\n",
" return request_body.encode('utf-8')\r\n",
"\r\n",
" return request_body\r\n",
"\r\n",
"\r\n",
"def _update_request_uri_query_local_storage(request, use_local_storage):\r\n",
" ''' create correct uri and query for the request '''\r\n",
" uri, query = _update_request_uri_query(request)\r\n",
" if use_local_storage:\r\n",
" return '/' + DEV_ACCOUNT_NAME + uri, query\r\n",
" return uri, query\r\n",
"\r\n",
"\r\n",
"def _update_request_uri_query(request):\r\n",
" '''pulls the query string out of the URI and moves it into\r\n",
" the query portion of the request object. If there are already\r\n",
" query parameters on the request the parameters in the URI will\r\n",
" appear after the existing parameters'''\r\n",
"\r\n",
" if '?' in request.path:\r\n",
" request.path, _, query_string = request.path.partition('?')\r\n",
" if query_string:\r\n",
" query_params = query_string.split('&')\r\n",
" for query in query_params:\r\n",
" if '=' in query:\r\n",
" name, _, value = query.partition('=')\r\n",
" request.query.append((name, value))\r\n",
"\r\n",
" request.path = url_quote(request.path, '/()$=\\',')\r\n",
"\r\n",
" # add encoded queries to request.path.\r\n",
" if request.query:\r\n",
" request.path += '?'\r\n",
" for name, value in request.query:\r\n",
" if value is not None:\r\n",
" request.path += name + '=' + url_quote(value, '/()$=\\',') + '&'\r\n",
" request.path = request.path[:-1]\r\n",
"\r\n",
" return request.path, request.query\r\n",
"\r\n",
"\r\n",
"\r\n",
"\r\n",
"def _parse_response_for_dict(response):\r\n",
" ''' Extracts name-values from response header. Filter out the standard\r\n",
" http headers.'''\r\n",
"\r\n",
" if response is None:\r\n",
" return None\r\n",
" http_headers = ['server', 'date', 'location', 'host',\r\n",
" 'via', 'proxy-connection', 'connection']\r\n",
" return_dict = HeaderDict()\r\n",
" if response.headers:\r\n",
" for name, value in response.headers:\r\n",
" if not name.lower() in http_headers:\r\n",
" return_dict[name] = value\r\n",
"\r\n",
" return return_dict\r\n",
"\r\n",
"\r\n",
"def _parse_response_for_dict_prefix(response, prefixes):\r\n",
" ''' Extracts name-values for names starting with prefix from response\r\n",
" header. Filter out the standard http headers.'''\r\n",
"\r\n",
" if response is None:\r\n",
" return None\r\n",
" return_dict = {}\r\n",
" orig_dict = _parse_response_for_dict(response)\r\n",
" if orig_dict:\r\n",
" for name, value in orig_dict.items():\r\n",
" for prefix_value in prefixes:\r\n",
" if name.lower().startswith(prefix_value.lower()):\r\n",
" return_dict[name] = value\r\n",
" break\r\n",
" return return_dict\r\n",
" else:\r\n",
" return None\r\n",
"\r\n",
"\r\n",
"def _parse_response_for_dict_filter(response, filter):\r\n",
" ''' Extracts name-values for names in filter from response header. Filter\r\n",
" out the standard http headers.'''\r\n",
" if response is None:\r\n",
" return None\r\n",
" return_dict = {}\r\n",
" orig_dict = _parse_response_for_dict(response)\r\n",
" if orig_dict:\r\n",
" for name, value in orig_dict.items():\r\n",
" if name.lower() in filter:\r\n",
" return_dict[name] = value\r\n",
" return return_dict\r\n",
" else:\r\n",
" return None\r\n",
"\r\n",
"\r\n",
"class _ETreeXmlToObject(object):\r\n",
" @staticmethod\r\n",
" def parse_response(response, return_type):\r\n",
" '''\r\n",
" Parse the HTTPResponse's body and fill all the data into a class of\r\n",
" return_type.\r\n",
" '''\r\n",
" root = ETree.fromstring(response.body)\r\n",
" xml_name = getattr(return_type, '_xml_name', return_type.__name__) \r\n",
" if root.tag == xml_name:\r\n",
" return _ETreeXmlToObject._parse_response_body_from_xml_node(root, return_type)\r\n",
"\r\n",
" return None\r\n",
"\r\n",
"\r\n",
" @staticmethod\r\n",
" def parse_enum_results_list(response, return_type, resp_type, item_type):\r\n",
" \"\"\"resp_body is the XML we received\r\n",
" resp_type is a string, such as Containers,\r\n",
" return_type is the type we're constructing, such as ContainerEnumResults\r\n",
" item_type is the type object of the item to be created, such as Container\r\n",
"\r\n",
" This function then returns a ContainerEnumResults object with the\r\n",
" containers member populated with the results.\r\n",
" \"\"\"\r\n",
"\r\n",
" # parsing something like:\r\n",
" # <EnumerationResults ... >\r\n",
" # <Queues>\r\n",
" # <Queue>\r\n",
" # <Something />\r\n",
" # <SomethingElse />\r\n",
" # </Queue>\r\n",
" # </Queues>\r\n",
" # </EnumerationResults>\r\n",
" return_obj = return_type()\r\n",
" root = ETree.fromstring(response.body)\r\n",
"\r\n",
" items = []\r\n",
"\r\n",
" for container_element in root.findall(resp_type):\r\n",
" for item_element in container_element.findall(resp_type[:-1]):\r\n",
" items.append(_ETreeXmlToObject.fill_instance_element(item_element, item_type))\r\n",
"\r\n",
" for name, value in vars(return_obj).items():\r\n",
" # queues, Queues, this is the list its self which we populated\r\n",
" # above\r\n",
" if name == resp_type.lower():\r\n",
" # the list its self.\r\n",
" continue\r\n",
" value = _ETreeXmlToObject.fill_data_member(root, name, value)\r\n",
" if value is not None:\r\n",
" setattr(return_obj, name, value)\r\n",
"\r\n",
" setattr(return_obj, resp_type.lower(), items)\r\n",
" return return_obj\r\n",
"\r\n",
"\r\n",
" @staticmethod\r\n",
" def parse_simple_list(response, return_type, item_type, list_name):\r\n",
" respbody = response.body\r\n",
" res = return_type()\r\n",
" res_items = []\r\n",
" root = ETree.fromstring(respbody)\r\n",
" type_name = type.__name__\r\n",
" item_name = item_type.__name__\r\n",
" for item in root.findall(item_name):\r\n",
" res_items.append(_ETreeXmlToObject.fill_instance_element(item, item_type))\r\n",
"\r\n",
" setattr(res, list_name, res_items)\r\n",
" return res\r\n",
"\r\n",
"\r\n",
" @staticmethod\r\n",
" def convert_response_to_feeds(response, convert_func):\r\n",
"\r\n",
" if response is None:\r\n",
" return None\r\n",
"\r\n",
" feeds = _list_of(Feed)\r\n",
"\r\n",
" _set_continuation_from_response_headers(feeds, response)\r\n",
"\r\n",
" root = ETree.fromstring(response.body)\r\n",
"\r\n",
" # some feeds won't have the 'feed' element, just a single 'entry' element\r\n",
" root_name = _get_etree_tag_name_without_ns(root.tag)\r\n",
" if root_name == 'feed':\r\n",
" entries = root.findall(\"./atom:entry\", _etree_entity_feed_namespaces)\r\n",
" elif root_name == 'entry':\r\n",
" entries = [root]\r\n",
" else:\r\n",
" raise NotImplementedError()\r\n",
"\r\n",
" for entry in entries:\r\n",
" feeds.append(convert_func(entry))\r\n",
"\r\n",
" return feeds\r\n",
"\r\n",
"\r\n",
" @staticmethod\r\n",
" def get_entry_properties_from_element(element, include_id, id_prefix_to_skip=None, use_title_as_id=False):\r\n",
" ''' get properties from element tree element '''\r\n",
" properties = {}\r\n",
"\r\n",
" etag = element.attrib.get(_make_etree_ns_attr_name(_etree_entity_feed_namespaces['m'], 'etag'), None)\r\n",
" if etag is not None:\r\n",
" properties['etag'] = etag\r\n",
"\r\n",
" updated = element.findtext('./atom:updated', '', _etree_entity_feed_namespaces)\r\n",
" if updated:\r\n",
" properties['updated'] = updated\r\n",
"\r\n",
" author_name = element.findtext('./atom:author/atom:name', '', _etree_entity_feed_namespaces)\r\n",
" if author_name:\r\n",
" properties['author'] = author_name\r\n",
"\r\n",
" if include_id:\r\n",
" if use_title_as_id:\r\n",
" title = element.findtext('./atom:title', '', _etree_entity_feed_namespaces)\r\n",
" if title:\r\n",
" properties['name'] = title\r\n",
" else:\r\n",
" id = element.findtext('./atom:id', '', _etree_entity_feed_namespaces)\r\n",
" if id:\r\n",
" properties['name'] = _get_readable_id(id, id_prefix_to_skip)\r\n",
"\r\n",
" return properties\r\n",
"\r\n",
"\r\n",
" @staticmethod\r\n",
" def fill_instance_element(element, return_type):\r\n",
" \"\"\"Converts a DOM element into the specified object\"\"\"\r\n",
" return _ETreeXmlToObject._parse_response_body_from_xml_node(element, return_type)\r\n",
"\r\n",
"\r\n",
" @staticmethod\r\n",
" def fill_data_member(xmldoc, element_name, data_member):\r\n",
" element = xmldoc.find(_get_serialization_name(element_name))\r\n",
" if element is None:\r\n",
" return None\r\n",
"\r\n",
" value = _get_etree_text(element)\r\n",
"\r\n",
" if data_member is None:\r\n",
" return value\r\n",
" elif isinstance(data_member, datetime):\r\n",
" return _to_datetime(value)\r\n",
" elif type(data_member) is bool:\r\n",
" return value.lower() != 'false'\r\n",
" else:\r\n",
" return type(data_member)(value)\r\n",
"\r\n",
"\r\n",
" @staticmethod\r\n",
" def _parse_response_body_from_xml_node(node, return_type):\r\n",
" '''\r\n",
" parse the xml and fill all the data into a class of return_type\r\n",
" '''\r\n",
" return_obj = return_type()\r\n",
" _ETreeXmlToObject._fill_data_to_return_object(node, return_obj)\r\n",
"\r\n",
" return return_obj\r\n",
"\r\n",
"\r\n",
" @staticmethod\r\n",
" def _fill_instance_child(xmldoc, element_name, return_type):\r\n",
" '''Converts a child of the current dom element to the specified type.\r\n",
" '''\r\n",
" element = xmldoc.find(_get_serialization_name(element_name))\r\n",
" if element is None:\r\n",
" return None\r\n",
"\r\n",
" return_obj = return_type()\r\n",
" _ETreeXmlToObject._fill_data_to_return_object(element, return_obj)\r\n",
"\r\n",
" return return_obj\r\n",
"\r\n",
"\r\n",
" @staticmethod\r\n",
" def _fill_data_to_return_object(node, return_obj):\r\n",
" members = dict(vars(return_obj))\r\n",
" for name, value in members.items():\r\n",
" if isinstance(value, _list_of):\r\n",
" setattr(return_obj,\r\n",
" name,\r\n",
" _ETreeXmlToObject._fill_list_of(node,\r\n",
" value.list_type,\r\n",
" value.xml_element_name))\r\n",
" elif isinstance(value, _scalar_list_of):\r\n",
" setattr(return_obj,\r\n",
" name,\r\n",
" _ETreeXmlToObject._fill_scalar_list_of(node,\r\n",
" value.list_type,\r\n",
" _get_serialization_name(name),\r\n",
" value.xml_element_name))\r\n",
" elif isinstance(value, _dict_of):\r\n",
" setattr(return_obj,\r\n",
" name,\r\n",
" _ETreeXmlToObject._fill_dict_of(node,\r\n",
" _get_serialization_name(name),\r\n",
" value.pair_xml_element_name,\r\n",
" value.key_xml_element_name,\r\n",
" value.value_xml_element_name))\r\n",
" elif isinstance(value, _xml_attribute):\r\n",
" real_value = node.attrib.get(value.xml_element_name, None)\r\n",
" if real_value is not None:\r\n",
" setattr(return_obj, name, real_value)\r\n",
" elif isinstance(value, WindowsAzureData):\r\n",
" setattr(return_obj,\r\n",
" name,\r\n",
" _ETreeXmlToObject._fill_instance_child(node, name, value.__class__))\r\n",
" elif isinstance(value, dict):\r\n",
" setattr(return_obj,\r\n",
" name,\r\n",
" _ETreeXmlToObject._fill_dict(node, _get_serialization_name(name)))\r\n",
" elif isinstance(value, _Base64String):\r\n",
" value = _ETreeXmlToObject.fill_data_member(node, name, '')\r\n",
" if value is not None:\r\n",
" value = _decode_base64_to_text(value)\r\n",
" # always set the attribute, so we don't end up returning an object\r\n",
" # with type _Base64String\r\n",
" setattr(return_obj, name, value)\r\n",
" else:\r\n",
" value = _ETreeXmlToObject.fill_data_member(node, name, value)\r\n",
" if value is not None:\r\n",
" setattr(return_obj, name, value)\r\n",
"\r\n",
"\r\n",
" @staticmethod\r\n",
" def _fill_list_of(xmldoc, element_type, xml_element_name):\r\n",
" return [_ETreeXmlToObject._parse_response_body_from_xml_node(xmlelement, element_type) \\\r\n",
" for xmlelement in xmldoc.findall(xml_element_name)]\r\n",
"\r\n",
"\r\n",
" @staticmethod\r\n",
" def _fill_scalar_list_of(xmldoc, element_type, parent_xml_element_name,\r\n",
" xml_element_name):\r\n",
" '''Converts an xml fragment into a list of scalar types. The parent xml\r\n",
" element contains a flat list of xml elements which are converted into the\r\n",
" specified scalar type and added to the list.\r\n",
" Example:\r\n",
" xmldoc=\r\n",
" <Endpoints>\r\n",
" <Endpoint>http://{storage-service-name}.blob.core.windows.net/</Endpoint>\r\n",
" <Endpoint>http://{storage-service-name}.queue.core.windows.net/</Endpoint>\r\n",
" <Endpoint>http://{storage-service-name}.table.core.windows.net/</Endpoint>\r\n",
" </Endpoints>\r\n",
" element_type=str\r\n",
" parent_xml_element_name='Endpoints'\r\n",
" xml_element_name='Endpoint'\r\n",
" '''\r\n",
" raise NotImplementedError('_scalar_list_of not supported')\r\n",
"\r\n",
"\r\n",
" @staticmethod\r\n",
" def _fill_dict(xmldoc, element_name):\r\n",
" container_element = xmldoc.find(element_name)\r\n",
" if container_element is not None:\r\n",
" return_obj = {}\r\n",
" for item_element in container_element.getchildren():\r\n",
" return_obj[item_element.tag] = _get_etree_text(item_element)\r\n",
" return return_obj\r\n",
" return None\r\n",
"\r\n",
"\r\n",
" @staticmethod\r\n",
" def _fill_dict_of(xmldoc, parent_xml_element_name, pair_xml_element_name,\r\n",
" key_xml_element_name, value_xml_element_name):\r\n",
" '''Converts an xml fragment into a dictionary. The parent xml element\r\n",
" contains a list of xml elements where each element has a child element for\r\n",
" the key, and another for the value.\r\n",
" Example:\r\n",
" xmldoc=\r\n",
" <ExtendedProperties>\r\n",
" <ExtendedProperty>\r\n",
" <Name>Ext1</Name>\r\n",
" <Value>Val1</Value>\r\n",
" </ExtendedProperty>\r\n",
" <ExtendedProperty>\r\n",
" <Name>Ext2</Name>\r\n",
" <Value>Val2</Value>\r\n",
" </ExtendedProperty>\r\n",
" </ExtendedProperties>\r\n",
" element_type=str\r\n",
" parent_xml_element_name='ExtendedProperties'\r\n",
" pair_xml_element_name='ExtendedProperty'\r\n",
" key_xml_element_name='Name'\r\n",
" value_xml_element_name='Value'\r\n",
" '''\r\n",
" raise NotImplementedError('_dict_of not supported')\r\n"
] | [
0.013157894736842105,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012987012987012988,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.00546448087431694,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.030303030303030304,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.017241379310344827,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.024390243902439025,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012987012987012988,
0,
0.010869565217391304,
0,
0,
0,
0,
0.05263157894736842,
0,
0,
0,
0.012195121951219513,
0.012048192771084338,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010416666666666666,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.05263157894736842,
0,
0,
0,
0,
0,
0,
0,
0,
0.011363636363636364,
0,
0,
0,
0,
0,
0.05263157894736842,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012048192771084338,
0,
0,
0.012048192771084338,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.05263157894736842,
0.008928571428571428,
0,
0,
0,
0.009009009009009009,
0,
0,
0,
0.011235955056179775,
0,
0,
0,
0.00980392156862745,
0,
0,
0,
0,
0,
0.010752688172043012,
0,
0,
0,
0.011494252873563218,
0,
0.012195121951219513,
0,
0,
0,
0,
0.05263157894736842,
0,
0,
0.01098901098901099,
0,
0,
0.05263157894736842,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.05263157894736842,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.05263157894736842,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.05263157894736842,
0,
0,
0,
0,
0,
0,
0,
0.017857142857142856,
0.015625,
0,
0,
0,
0,
0.015873015873015872,
0.012987012987012988,
0.014084507042253521,
0,
0,
0,
0,
0.014285714285714285,
0.014705882352941176,
0.014925373134328358,
0.014285714285714285,
0,
0,
0,
0,
0,
0,
0,
0.010638297872340425,
0,
0,
0,
0.010869565217391304,
0,
0,
0,
0,
0.011904761904761904,
0,
0,
0,
0,
0,
0,
0,
0,
0.05263157894736842,
0,
0.01020408163265306,
0.015384615384615385,
0,
0,
0.05263157894736842,
0,
0,
0.012195121951219513,
0.012048192771084338,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.05263157894736842,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.05263157894736842,
0,
0,
0,
0.011904761904761904,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 589 | 0.00203 | false |
#-*- coding: UTF-8 -*-
"""
Scraper for http://www.baidu.com
taxigps
"""
import urllib
import socket
import re
import chardet
import difflib
from utilities import *
__title__ = "Baidu"
__priority__ = '120'
__lrc__ = True
socket.setdefaulttimeout(10)
class LyricsFetcher:
def __init__( self ):
self.BASE_URL = 'http://box.zhangmen.baidu.com/x?op=12&count=1&title=%s$$%s$$$$'
self.LRC_URL = 'http://box.zhangmen.baidu.com/bdlrc/%d/%d.lrc'
def get_lyrics(self, song):
log( "%s: searching lyrics for %s - %s" % (__title__, song.artist, song.title))
lyrics = Lyrics()
lyrics.song = song
lyrics.source = __title__
lyrics.lrc = __lrc__
try:
url = self.BASE_URL % (song.title, song.artist)
xml_str = urllib.urlopen(url).read()
lrcid_pattern = re.compile(r'<lrcid>(.+?)</lrcid>')
lrcid = int(re.search(lrcid_pattern, xml_str).group(1))
if lrcid == 0:
return None
lrc_url = self.LRC_URL % (lrcid/100, lrcid)
lyr = urllib.urlopen(lrc_url).read()
except:
log( "%s: %s::%s (%d) [%s]" % (
__title__, self.__class__.__name__,
sys.exc_info()[ 2 ].tb_frame.f_code.co_name,
sys.exc_info()[ 2 ].tb_lineno,
sys.exc_info()[ 1 ]
))
return None
enc = chardet.detect(lyr)
lyr = lyr.decode(enc['encoding'], 'ignore')
lyrics.lyrics = lyr
return lyrics
| [
"#-*- coding: UTF-8 -*-\n",
"\"\"\"\n",
"Scraper for http://www.baidu.com\n",
"\n",
"taxigps\n",
"\"\"\"\n",
"\n",
"import urllib\n",
"import socket\n",
"import re\n",
"import chardet\n",
"import difflib\n",
"from utilities import *\n",
"\n",
"__title__ = \"Baidu\"\n",
"__priority__ = '120'\n",
"__lrc__ = True\n",
"\n",
"socket.setdefaulttimeout(10)\n",
"\n",
"class LyricsFetcher:\n",
" def __init__( self ):\n",
" self.BASE_URL = 'http://box.zhangmen.baidu.com/x?op=12&count=1&title=%s$$%s$$$$'\n",
" self.LRC_URL = 'http://box.zhangmen.baidu.com/bdlrc/%d/%d.lrc'\n",
"\n",
" def get_lyrics(self, song):\n",
" log( \"%s: searching lyrics for %s - %s\" % (__title__, song.artist, song.title))\n",
" lyrics = Lyrics()\n",
" lyrics.song = song\n",
" lyrics.source = __title__\n",
" lyrics.lrc = __lrc__\n",
"\n",
" try:\n",
" url = self.BASE_URL % (song.title, song.artist)\n",
" xml_str = urllib.urlopen(url).read()\n",
" lrcid_pattern = re.compile(r'<lrcid>(.+?)</lrcid>')\n",
" lrcid = int(re.search(lrcid_pattern, xml_str).group(1))\n",
" if lrcid == 0:\n",
" return None\n",
" lrc_url = self.LRC_URL % (lrcid/100, lrcid)\n",
" lyr = urllib.urlopen(lrc_url).read()\n",
" except:\n",
" log( \"%s: %s::%s (%d) [%s]\" % (\n",
" __title__, self.__class__.__name__,\n",
" sys.exc_info()[ 2 ].tb_frame.f_code.co_name,\n",
" sys.exc_info()[ 2 ].tb_lineno,\n",
" sys.exc_info()[ 1 ]\n",
" ))\n",
" return None\n",
"\n",
" enc = chardet.detect(lyr)\n",
" lyr = lyr.decode(enc['encoding'], 'ignore')\n",
" lyrics.lyrics = lyr\n",
" return lyrics\n"
] | [
0.043478260869565216,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.047619047619047616,
0.07692307692307693,
0.011235955056179775,
0,
0,
0,
0.022727272727272728,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.0625,
0.022727272727272728,
0,
0.03125,
0.04,
0.05128205128205128,
0,
0,
0,
0,
0,
0,
0
] | 54 | 0.007588 | false |
# -*- coding: utf-8 -*-
# ProjectEuler/src/python/problem406.py
#
# Guessing Game
# =============
# Published on Sunday, 16th December 2012, 07:00 am
#
# We are trying to find a hidden number selected from the set of integers {1,
# 2, ..., n} by asking questions. Each number (question) we ask, we get one
# of three possible answers: "Your guess is lower than the hidden number"
# (and you incur a cost of a), or "Your guess is higher than the hidden
# number" (and you incur a cost of b), or "Yes, that's it!" (and the game
# ends). Given the value of n, a, and b, an optimal strategy minimizes the
# total cost for the worst possible case. For example, if n = 5, a = 2, and b =
# 3, then we may begin by asking "2" as our first question. If we are told that
# 2 is higher than the hidden number (for a cost of b=3), then we are sure that
# "1" is the hidden number (for a total cost of 3). If we are told that 2 is
# lower than the hidden number (for a cost of a=2), then our next question will
# be "4". If we are told that 4 is higher than the hidden number (for a cost
# of b=3), then we are sure that "3" is the hidden number (for a total cost of
# 2+3=5). If we are told that 4 is lower than the hidden number (for a cost of
# a=2), then we are sure that "5" is the hidden number (for a total cost of
# 2+2=4). Thus, the worst-case cost achieved by this strategy is 5. It can
# also be shown that this is the lowest worst-case cost that can be achieved.
# So, in fact, we have just described an optimal strategy for the given values
# of n, a, and b. Let C(n, a, b) be the worst-case cost achieved by an optimal
# strategy for the given values of n, a, and b. Here are a few examples: C(5,
# 2, 3) = 5 C(500, 2, 3) = 13.22073197... C(20000, 5, 7) = 82 C(2000000, 5,
# 7) = 49.63755955... Let Fk be the Fibonacci numbers: Fk = Fk-1 + Fk-2 with
# base cases F1 = F2 = 1.Find 1k30 C(1012, k, Fk), and give your answer rounded
# to 8 decimal places behind the decimal point.
import projecteuler as pe
def main():
pass
if __name__ == "__main__":
main()
| [
"# -*- coding: utf-8 -*-\n",
"# ProjectEuler/src/python/problem406.py\n",
"#\n",
"# Guessing Game\n",
"# =============\n",
"# Published on Sunday, 16th December 2012, 07:00 am\n",
"#\n",
"# We are trying to find a hidden number selected from the set of integers {1,\n",
"# 2, ..., n} by asking questions. Each number (question) we ask, we get one\n",
"# of three possible answers: \"Your guess is lower than the hidden number\"\n",
"# (and you incur a cost of a), or \"Your guess is higher than the hidden\n",
"# number\" (and you incur a cost of b), or \"Yes, that's it!\" (and the game\n",
"# ends). Given the value of n, a, and b, an optimal strategy minimizes the\n",
"# total cost for the worst possible case. For example, if n = 5, a = 2, and b =\n",
"# 3, then we may begin by asking \"2\" as our first question. If we are told that\n",
"# 2 is higher than the hidden number (for a cost of b=3), then we are sure that\n",
"# \"1\" is the hidden number (for a total cost of 3). If we are told that 2 is\n",
"# lower than the hidden number (for a cost of a=2), then our next question will\n",
"# be \"4\". If we are told that 4 is higher than the hidden number (for a cost\n",
"# of b=3), then we are sure that \"3\" is the hidden number (for a total cost of\n",
"# 2+3=5). If we are told that 4 is lower than the hidden number (for a cost of\n",
"# a=2), then we are sure that \"5\" is the hidden number (for a total cost of\n",
"# 2+2=4). Thus, the worst-case cost achieved by this strategy is 5. It can\n",
"# also be shown that this is the lowest worst-case cost that can be achieved.\n",
"# So, in fact, we have just described an optimal strategy for the given values\n",
"# of n, a, and b. Let C(n, a, b) be the worst-case cost achieved by an optimal\n",
"# strategy for the given values of n, a, and b. Here are a few examples: C(5,\n",
"# 2, 3) = 5 C(500, 2, 3) = 13.22073197... C(20000, 5, 7) = 82 C(2000000, 5,\n",
"# 7) = 49.63755955... Let Fk be the Fibonacci numbers: Fk = Fk-1 + Fk-2 with\n",
"# base cases F1 = F2 = 1.Find 1k30 C(1012, k, Fk), and give your answer rounded\n",
"# to 8 decimal places behind the decimal point.\n",
"\n",
"import projecteuler as pe\n",
"\n",
"def main():\n",
" pass\n",
"\n",
"if __name__ == \"__main__\":\n",
" main()\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.08333333333333333,
0,
0,
0.037037037037037035,
0
] | 39 | 0.003086 | false |
# GlamRefstring converter
# Coded by Vali (c)2011
# Modded and recoded by MCelliotG for use in Glamour skins or standalone
# If you use this Converter for other skins and rename it, please keep the lines above adding your credits below
from Components.Converter.Converter import Converter
from Components.Element import cached
from Screens.InfoBar import InfoBar
class GlamRefString(Converter, object):
CURRENT = 0
EVENT = 1
def __init__(self, type):
Converter.__init__(self, type)
self.CHANSEL = None
self.type = {
"CurrentRef": self.CURRENT,
"ServicelistRef": self.EVENT
}[type]
@cached
def getText(self):
if (self.type == self.EVENT):
servname= str(self.source.service.toString())
return servname
elif (self.type == self.CURRENT):
if self.CHANSEL == None:
self.CHANSEL = InfoBar.instance.servicelist
if len(InfoBar.instance.session.dialog_stack)>1:
for zz in InfoBar.instance.session.dialog_stack:
if (str(zz[0]) == "<class 'Screens.MovieSelection.MovieSelection'>") or (str(InfoBar.instance.session.dialog_stack[1][0]) == "<class 'Screens.InfoBar.MoviePlayer'>"):
return self.source.text
vSrv = self.CHANSEL.servicelist.getCurrent()
return str(vSrv.toString())
else:
return "na"
text = property(getText)
| [
"#\tGlamRefstring converter\n",
"#\tCoded by Vali (c)2011\n",
"#\tModded and recoded by MCelliotG for use in Glamour skins or standalone\n",
"#\tIf you use this Converter for other skins and rename it, please keep the lines above adding your credits below\n",
"\n",
"from Components.Converter.Converter import Converter\n",
"from Components.Element import cached\n",
"from Screens.InfoBar import InfoBar\n",
"\n",
"class GlamRefString(Converter, object):\n",
"\tCURRENT = 0\n",
"\tEVENT = 1\n",
"\t\n",
"\tdef __init__(self, type):\n",
"\t\tConverter.__init__(self, type)\n",
"\t\tself.CHANSEL = None\n",
"\t\tself.type = {\n",
"\t\t\t\t\"CurrentRef\": self.CURRENT,\n",
"\t\t\t\t\"ServicelistRef\": self.EVENT\n",
"\t\t\t}[type]\n",
"\n",
"\t@cached\n",
"\tdef getText(self):\n",
"\t\tif (self.type == self.EVENT):\n",
"\t\t\tservname= str(self.source.service.toString())\n",
"\t\t\treturn servname\n",
"\t\telif (self.type == self.CURRENT):\n",
"\t\t\tif self.CHANSEL == None:\n",
"\t\t\t\tself.CHANSEL = InfoBar.instance.servicelist\n",
"\t\t\tif len(InfoBar.instance.session.dialog_stack)>1:\n",
"\t\t\t\tfor zz in InfoBar.instance.session.dialog_stack:\n",
"\t\t\t\t\tif (str(zz[0]) == \"<class 'Screens.MovieSelection.MovieSelection'>\") or (str(InfoBar.instance.session.dialog_stack[1][0]) == \"<class 'Screens.InfoBar.MoviePlayer'>\"):\n",
"\t\t\t\t\t\treturn self.source.text\n",
"\t\t\tvSrv = self.CHANSEL.servicelist.getCurrent()\n",
"\t\t\treturn str(vSrv.toString())\n",
"\t\telse:\n",
"\t\t\treturn \"na\"\n",
"\n",
"\ttext = property(getText)\n",
" "
] | [
0.038461538461538464,
0.041666666666666664,
0.0136986301369863,
0.017699115044247787,
0,
0,
0,
0,
0,
0.025,
0.07692307692307693,
0.09090909090909091,
1,
0.037037037037037035,
0.030303030303030304,
0.045454545454545456,
0.0625,
0.03125,
0.030303030303030304,
0.09090909090909091,
0,
0.1111111111111111,
0.05,
0.03125,
0.04081632653061224,
0.05263157894736842,
0.027777777777777776,
0.07142857142857142,
0.020833333333333332,
0.038461538461538464,
0.018867924528301886,
0.011627906976744186,
0.03333333333333333,
0.020833333333333332,
0.03225806451612903,
0.125,
0.06666666666666667,
0,
0.038461538461538464,
3
] | 40 | 0.135587 | false |
# -*- coding: utf-8 -*-
"""
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License,
or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, see <http://www.gnu.org/licenses/>.
"""
import re
from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
class BezvadataCz(SimpleHoster):
__name__ = "BezvadataCz"
__type__ = "hoster"
__pattern__ = r'http://(?:www\.)?bezvadata.cz/stahnout/.*'
__version__ = "0.24"
__description__ = """BezvaData.cz hoster plugin"""
__author_name__ = "zoidberg"
__author_mail__ = "zoidberg@mujmail.cz"
FILE_NAME_PATTERN = r'<p><b>Soubor: (?P<N>[^<]+)</b></p>'
FILE_SIZE_PATTERN = r'<li><strong>Velikost:</strong> (?P<S>[^<]+)</li>'
OFFLINE_PATTERN = r'<title>BezvaData \| Soubor nenalezen</title>'
def setup(self):
self.multiDL = self.resumeDownload = True
def handleFree(self):
#download button
m = re.search(r'<a class="stahnoutSoubor".*?href="(.*?)"', self.html)
if m is None:
self.parseError("page1 URL")
url = "http://bezvadata.cz%s" % m.group(1)
#captcha form
self.html = self.load(url)
self.checkErrors()
for _ in xrange(5):
action, inputs = self.parseHtmlForm('frm-stahnoutFreeForm')
if not inputs:
self.parseError("FreeForm")
m = re.search(r'<img src="data:image/png;base64,(.*?)"', self.html)
if m is None:
self.parseError("captcha img")
#captcha image is contained in html page as base64encoded data but decryptCaptcha() expects image url
self.load, proper_load = self.loadcaptcha, self.load
try:
inputs['captcha'] = self.decryptCaptcha(m.group(1), imgtype='png')
finally:
self.load = proper_load
if '<img src="data:image/png;base64' in self.html:
self.invalidCaptcha()
else:
self.correctCaptcha()
break
else:
self.fail("No valid captcha code entered")
#download url
self.html = self.load("http://bezvadata.cz%s" % action, post=inputs)
self.checkErrors()
m = re.search(r'<a class="stahnoutSoubor2" href="(.*?)">', self.html)
if m is None:
self.parseError("page2 URL")
url = "http://bezvadata.cz%s" % m.group(1)
self.logDebug("DL URL %s" % url)
#countdown
m = re.search(r'id="countdown">(\d\d):(\d\d)<', self.html)
wait_time = (int(m.group(1)) * 60 + int(m.group(2)) + 1) if m else 120
self.wait(wait_time, False)
self.download(url)
def checkErrors(self):
if 'images/button-download-disable.png' in self.html:
self.longWait(5 * 60, 24) # parallel dl limit
elif '<div class="infobox' in self.html:
self.tempOffline()
def loadcaptcha(self, data, *args, **kwargs):
return data.decode("base64")
getInfo = create_getInfo(BezvadataCz)
| [
"# -*- coding: utf-8 -*-\n",
"\n",
"\"\"\"\n",
" This program is free software; you can redistribute it and/or modify\n",
" it under the terms of the GNU General Public License as published by\n",
" the Free Software Foundation; either version 3 of the License,\n",
" or (at your option) any later version.\n",
"\n",
" This program is distributed in the hope that it will be useful,\n",
" but WITHOUT ANY WARRANTY; without even the implied warranty of\n",
" MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n",
" See the GNU General Public License for more details.\n",
"\n",
" You should have received a copy of the GNU General Public License\n",
" along with this program; if not, see <http://www.gnu.org/licenses/>.\n",
"\"\"\"\n",
"\n",
"import re\n",
"from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo\n",
"\n",
"\n",
"class BezvadataCz(SimpleHoster):\n",
" __name__ = \"BezvadataCz\"\n",
" __type__ = \"hoster\"\n",
" __pattern__ = r'http://(?:www\\.)?bezvadata.cz/stahnout/.*'\n",
" __version__ = \"0.24\"\n",
" __description__ = \"\"\"BezvaData.cz hoster plugin\"\"\"\n",
" __author_name__ = \"zoidberg\"\n",
" __author_mail__ = \"zoidberg@mujmail.cz\"\n",
"\n",
" FILE_NAME_PATTERN = r'<p><b>Soubor: (?P<N>[^<]+)</b></p>'\n",
" FILE_SIZE_PATTERN = r'<li><strong>Velikost:</strong> (?P<S>[^<]+)</li>'\n",
" OFFLINE_PATTERN = r'<title>BezvaData \\| Soubor nenalezen</title>'\n",
"\n",
" def setup(self):\n",
" self.multiDL = self.resumeDownload = True\n",
"\n",
" def handleFree(self):\n",
" #download button\n",
" m = re.search(r'<a class=\"stahnoutSoubor\".*?href=\"(.*?)\"', self.html)\n",
" if m is None:\n",
" self.parseError(\"page1 URL\")\n",
" url = \"http://bezvadata.cz%s\" % m.group(1)\n",
"\n",
" #captcha form\n",
" self.html = self.load(url)\n",
" self.checkErrors()\n",
" for _ in xrange(5):\n",
" action, inputs = self.parseHtmlForm('frm-stahnoutFreeForm')\n",
" if not inputs:\n",
" self.parseError(\"FreeForm\")\n",
"\n",
" m = re.search(r'<img src=\"data:image/png;base64,(.*?)\"', self.html)\n",
" if m is None:\n",
" self.parseError(\"captcha img\")\n",
"\n",
" #captcha image is contained in html page as base64encoded data but decryptCaptcha() expects image url\n",
" self.load, proper_load = self.loadcaptcha, self.load\n",
" try:\n",
" inputs['captcha'] = self.decryptCaptcha(m.group(1), imgtype='png')\n",
" finally:\n",
" self.load = proper_load\n",
"\n",
" if '<img src=\"data:image/png;base64' in self.html:\n",
" self.invalidCaptcha()\n",
" else:\n",
" self.correctCaptcha()\n",
" break\n",
" else:\n",
" self.fail(\"No valid captcha code entered\")\n",
"\n",
" #download url\n",
" self.html = self.load(\"http://bezvadata.cz%s\" % action, post=inputs)\n",
" self.checkErrors()\n",
" m = re.search(r'<a class=\"stahnoutSoubor2\" href=\"(.*?)\">', self.html)\n",
" if m is None:\n",
" self.parseError(\"page2 URL\")\n",
" url = \"http://bezvadata.cz%s\" % m.group(1)\n",
" self.logDebug(\"DL URL %s\" % url)\n",
"\n",
" #countdown\n",
" m = re.search(r'id=\"countdown\">(\\d\\d):(\\d\\d)<', self.html)\n",
" wait_time = (int(m.group(1)) * 60 + int(m.group(2)) + 1) if m else 120\n",
" self.wait(wait_time, False)\n",
"\n",
" self.download(url)\n",
"\n",
" def checkErrors(self):\n",
" if 'images/button-download-disable.png' in self.html:\n",
" self.longWait(5 * 60, 24) # parallel dl limit\n",
" elif '<div class=\"infobox' in self.html:\n",
" self.tempOffline()\n",
"\n",
" def loadcaptcha(self, data, *args, **kwargs):\n",
" return data.decode(\"base64\")\n",
"\n",
"\n",
"getInfo = create_getInfo(BezvadataCz)\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.04,
0,
0,
0,
0,
0,
0.045454545454545456,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.017543859649122806,
0,
0,
0.012048192771084338,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.045454545454545456,
0,
0,
0,
0,
0,
0,
0,
0,
0.05263157894736842,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 98 | 0.002175 | false |
import numpy as np
from theano import config, shared, function
import theano.tensor as t
from ae.encoder import AutoEncoder
from nn.convolutionalLayer import ConvolutionalLayer
from theano.tensor.nnet import conv2d
class ConvolutionalAutoEncoder(ConvolutionalLayer, AutoEncoder) :
'''This class describes a Contractive AutoEncoder (CAE) for a convolutional
layer. This differs from the normal CAE in that it is useful for
contextual information like imagery or text.
If the decoded message matches the original input, the encoders is
considered lossless. Otherwise the loss is calculated and the encoder
is updated, so it can better encode the input when encountered again.
Over time the object will extract regular patterns in the data which
are frequently encountered.
CAEs can be stacked and trained in a greedy layerwise manner, and the
trained CAEs can be used to initialize a Neural Network into a better
regularized state than random initialization. Lastly this technique can
be used when the amount of unlabeled data far outweighs the amount of
labeled data.
layerID : unique name identifier for this layer
inputSize : (batch size, channels, rows, columns)
kernelSize : (number of kernels, channels, rows, columns)
regType : type of regularization term to use
default None : perform no additional regularization
L1 : Least Absolute Deviation
L2 : Least Squares
downsampleFactor : (rowFactor, columnFactor)
learningRate : learning rate for all neurons
momentumRate : rate of momentum for all neurons
NOTE: momentum allows for higher learning rates
contractionRate : variance (dimensionality) reduction rate
None uses '1 / numNeurons'
dropout : rate of retention in a given neuron during training
NOTE: input layers should be around .8 or .9
hidden layers should be around .5 or .6
output layers should always be 1.
initialWeights : weights to initialize the network
None generates random weights for the layer
initialHidThresh : thresholds to initialize the forward network
None generates random thresholds for the layer
initialVisThresh : thresholds to initialize the backward network
None generates random thresholds for the layer
activation : the sigmoid function to use for activation
this must be a function with a derivative form
forceSparsity : round the output of the neurons to {0,1}
this put more emphasis on the pattern extraction
randomNumGen : generator for the initial weight values
'''
def __init__ (self, layerID, inputSize, kernelSize,
downsampleFactor, regType=None,
learningRate=0.001, momentumRate=0.9,
dropout=None, contractionRate=None,
initialWeights=None, initialHidThresh=None,
initialVisThresh=None, activation=t.nnet.sigmoid,
forceSparsity=True, randomNumGen=None) :
from nn.reg import Regularization
ConvolutionalLayer.__init__(self, layerID=layerID,
inputSize=inputSize,
kernelSize=kernelSize,
downsampleFactor=downsampleFactor,
learningRate=learningRate,
momentumRate=momentumRate,
dropout=dropout,
initialWeights=initialWeights,
initialThresholds=initialHidThresh,
activation=activation,
randomNumGen=randomNumGen)
AutoEncoder.__init__(self, forceSparsity,
1. / np.prod(kernelSize[:]) \
if contractionRate is None else contractionRate)
# setup initial values for the hidden thresholds
if initialVisThresh is None :
initialVisThresh = np.zeros((self._inputSize[1],),
dtype=config.floatX)
self._thresholdsBack = shared(value=initialVisThresh, borrow=True)
self._regularization = Regularization(regType, self._contractionRate)
def __str__(self) :
'''Output Layer to String.'''
from nn.layer import Layer
s = ''
s += '\tLayer Type : ConvolutionalAutoEncoder\n'
s += Layer.__str__(self)
return s
def _setActivation(self, out) :
from nn.layer import Layer
from theano.tensor import round
act = Layer._setActivation(self, out)
return round(act, mode='half_away_from_zero') \
if self._forceSparse else act
def __getstate__(self) :
'''Save layer pickle'''
from dataset.shared import fromShared
dict = ConvolutionalLayer.__getstate__(self)
dict['_thresholdsBack'] = fromShared(self._thresholdsBack)
# remove the functions -- they will be rebuilt JIT
if 'reconstruction' in dict : del dict['reconstruction']
if '_costs' in dict : del dict['_costs']
if '_costLabels' in dict : del dict['_costLabels']
if '_updates' in dict : del dict['_updates']
if 'trainLayer' in dict : del dict['trainLayer']
return dict
def __setstate__(self, dict) :
'''Load layer pickle'''
from theano import shared
# remove any current functions from the object so we force the
# theano functions to be rebuilt with the new buffers
if hasattr(self, 'reconstruction') : delattr(self, 'reconstruction')
if hasattr(self, '_costs') : delattr(self, '_costs')
if hasattr(self, '_costLabels') : delattr(self, '_costLabels')
if hasattr(self, '_updates') : delattr(self, '_updates')
if hasattr(self, 'trainLayer') : delattr(self, 'trainLayer')
ConvolutionalLayer.__setstate__(self, dict)
initialThresholdsBack = self._thresholdsBack
self._thresholdsBack = shared(value=initialThresholdsBack, borrow=True)
def _unpool_2d(self, input, upsampleFactor) :
'''This method performs the opposite of pool_2d. This uses the index
which produced the largest input during pooling in order to produce
the sparse upsample.
'''
if input.ndim < len(self.getOutputSize()) :
from dataset.shared import getShape
input = input.reshape([getShape(input)[0]] +
list(self.getOutputSize()[1:]))
return input.repeat(upsampleFactor[0], axis=2).repeat(
upsampleFactor[1], axis=3) \
if upsampleFactor[0] > 1 else input
def _getWeightsBack(self) :
'''Calculate the weights used for decoding.'''
kernelSize = self.getKernelSize()
kernelBackSize = (kernelSize[1], kernelSize[0],
kernelSize[2], kernelSize[3])
return t.reshape(self._weights, (kernelBackSize))
def _decode(self, input) :
from nn.layer import Layer
weightsBack = self._getWeightsBack()
deconvolve = conv2d(input, weightsBack, self.getFeatureSize(),
tuple(weightsBack.shape.eval()),
border_mode='full')
out = deconvolve + self._thresholdsBack.dimshuffle('x', 0, 'x', 'x')
return Layer._setActivation(self, out)
def finalize(self, networkInput, layerInput) :
'''Setup the computation graph for this layer.
networkInput : the input variable tuple for the network
format (inClass, inTrain)
layerInput : the input variable tuple for this layer
format (inClass, inTrain)
'''
from nn.costUtils import calcLoss, leastSquares, \
calcSparsityConstraint, compileUpdate
from dataset.shared import getShape
ConvolutionalLayer.finalize(self, networkInput, layerInput)
weightsBack = self._getWeightsBack()
self._costs = []
self._costLabels = []
# setup the decoder --
# this take the output of the feedforward process as input and
# and runs the output back through the network in reverse. The net
# effect is to reconstruct the input, and ultimately to see how well
# the network is at encoding the message.
decodedInput = self.buildDecoder(self.output[0])
# DEBUG: For Debugging purposes only
self.reconstruction = function([networkInput[0]], decodedInput)
# NOTE: Sparsity is not a useful constraint on convolutional layers
# contraction is only applicable in the non-binary case
if not self._forceSparse :
# compute the jacobian cost of the output --
# This works as a sparsity constraint in case the hidden vector is
# larger than the input vector.
unpooling = self._unpool_2d(self.output[0], self._downsampleFactor)
jacobianMat = conv2d(unpooling * (1. - unpooling), weightsBack,
self.getFeatureSize(),
tuple(weightsBack.shape.eval()),
border_mode='full')
self._costs.append(leastSquares(jacobianMat, self._contractionRate))
self._costLabels.append('Jacob')
# add regularization if it was user requested
regularization = self._regularization.calculate([self])
if regularization is not None :
self._costs.append(regularization)
self._costLabels.append('Regularization')
# create the negative log likelihood function --
# this is our cost function with respect to the original input
# NOTE: The jacobian was computed however takes much longer to process
# and does not help convergence or regularization. It was removed
self._costs.append(calcLoss(
self.input[0], decodedInput, self._activation,
scaleFactor=1. / self.getInputSize()[1]))
self._costLabels.append('Local Cost')
gradients = t.grad(t.sum(self._costs) / getShape(networkInput[0])[0],
self.getWeights())
self._updates = compileUpdate(self.getWeights(), gradients,
self._learningRate, self._momentumRate)
# TODO: this needs to be stackable and take the input to the first
# layer, not just the input of this layer. This will ensure
# the other layers are activated to get the input to this layer
# DEBUG: For Debugging purposes only
self.trainLayer = function([networkInput[0]], self._costs,
updates=self._updates)
def buildDecoder(self, input) :
'''Calculate the decoding component. This should be used after the
encoder has been created. The decoder is ran in the opposite
direction.
'''
# NOTE: the output may come back as a different shape than it left
# so we reshape here just in case.
return self._decode(self._unpool_2d(input, self._downsampleFactor))
def getWeights(self) :
'''Update to account for the decode thresholds.'''
return [self._weights, self._thresholds, self._thresholdsBack]
def getUpdates(self) :
'''This allows the Stacker to build the layerwise training.'''
return (self._costs, self._updates)
def getCostLabels(self) :
'''Return the labels associated with the cost functions applied.'''
return self._costLabels
| [
"import numpy as np\n",
"from theano import config, shared, function\n",
"import theano.tensor as t\n",
"from ae.encoder import AutoEncoder\n",
"from nn.convolutionalLayer import ConvolutionalLayer\n",
"from theano.tensor.nnet import conv2d\n",
"\n",
"class ConvolutionalAutoEncoder(ConvolutionalLayer, AutoEncoder) :\n",
" '''This class describes a Contractive AutoEncoder (CAE) for a convolutional\n",
" layer. This differs from the normal CAE in that it is useful for\n",
" contextual information like imagery or text.\n",
"\n",
" If the decoded message matches the original input, the encoders is\n",
" considered lossless. Otherwise the loss is calculated and the encoder \n",
" is updated, so it can better encode the input when encountered again.\n",
" Over time the object will extract regular patterns in the data which\n",
" are frequently encountered.\n",
"\n",
" CAEs can be stacked and trained in a greedy layerwise manner, and the\n",
" trained CAEs can be used to initialize a Neural Network into a better\n",
" regularized state than random initialization. Lastly this technique can\n",
" be used when the amount of unlabeled data far outweighs the amount of \n",
" labeled data.\n",
"\n",
" layerID : unique name identifier for this layer\n",
" inputSize : (batch size, channels, rows, columns)\n",
" kernelSize : (number of kernels, channels, rows, columns)\n",
" regType : type of regularization term to use\n",
" default None : perform no additional regularization\n",
" L1 : Least Absolute Deviation\n",
" L2 : Least Squares\n",
" downsampleFactor : (rowFactor, columnFactor)\n",
" learningRate : learning rate for all neurons\n",
" momentumRate : rate of momentum for all neurons\n",
" NOTE: momentum allows for higher learning rates\n",
" contractionRate : variance (dimensionality) reduction rate\n",
" None uses '1 / numNeurons'\n",
" dropout : rate of retention in a given neuron during training\n",
" NOTE: input layers should be around .8 or .9\n",
" hidden layers should be around .5 or .6\n",
" output layers should always be 1.\n",
" initialWeights : weights to initialize the network\n",
" None generates random weights for the layer\n",
" initialHidThresh : thresholds to initialize the forward network\n",
" None generates random thresholds for the layer\n",
" initialVisThresh : thresholds to initialize the backward network\n",
" None generates random thresholds for the layer\n",
" activation : the sigmoid function to use for activation\n",
" this must be a function with a derivative form\n",
" forceSparsity : round the output of the neurons to {0,1}\n",
" this put more emphasis on the pattern extraction\n",
" randomNumGen : generator for the initial weight values\n",
" '''\n",
" def __init__ (self, layerID, inputSize, kernelSize, \n",
" downsampleFactor, regType=None,\n",
" learningRate=0.001, momentumRate=0.9, \n",
" dropout=None, contractionRate=None,\n",
" initialWeights=None, initialHidThresh=None,\n",
" initialVisThresh=None, activation=t.nnet.sigmoid,\n",
" forceSparsity=True, randomNumGen=None) :\n",
" from nn.reg import Regularization\n",
" ConvolutionalLayer.__init__(self, layerID=layerID,\n",
" inputSize=inputSize,\n",
" kernelSize=kernelSize,\n",
" downsampleFactor=downsampleFactor,\n",
" learningRate=learningRate,\n",
" momentumRate=momentumRate,\n",
" dropout=dropout,\n",
" initialWeights=initialWeights,\n",
" initialThresholds=initialHidThresh,\n",
" activation=activation,\n",
" randomNumGen=randomNumGen)\n",
" AutoEncoder.__init__(self, forceSparsity, \n",
" 1. / np.prod(kernelSize[:]) \\\n",
" if contractionRate is None else contractionRate)\n",
"\n",
" # setup initial values for the hidden thresholds\n",
" if initialVisThresh is None :\n",
" initialVisThresh = np.zeros((self._inputSize[1],),\n",
" dtype=config.floatX)\n",
" self._thresholdsBack = shared(value=initialVisThresh, borrow=True)\n",
" self._regularization = Regularization(regType, self._contractionRate)\n",
"\n",
" def __str__(self) :\n",
" '''Output Layer to String.'''\n",
" from nn.layer import Layer\n",
" s = ''\n",
" s += '\\tLayer Type : ConvolutionalAutoEncoder\\n'\n",
" s += Layer.__str__(self)\n",
" return s\n",
"\n",
" def _setActivation(self, out) :\n",
" from nn.layer import Layer\n",
" from theano.tensor import round\n",
" act = Layer._setActivation(self, out)\n",
" return round(act, mode='half_away_from_zero') \\\n",
" if self._forceSparse else act\n",
"\n",
" def __getstate__(self) :\n",
" '''Save layer pickle'''\n",
" from dataset.shared import fromShared\n",
" dict = ConvolutionalLayer.__getstate__(self)\n",
" dict['_thresholdsBack'] = fromShared(self._thresholdsBack)\n",
" # remove the functions -- they will be rebuilt JIT\n",
" if 'reconstruction' in dict : del dict['reconstruction']\n",
" if '_costs' in dict : del dict['_costs']\n",
" if '_costLabels' in dict : del dict['_costLabels']\n",
" if '_updates' in dict : del dict['_updates']\n",
" if 'trainLayer' in dict : del dict['trainLayer']\n",
" return dict\n",
"\n",
" def __setstate__(self, dict) :\n",
" '''Load layer pickle'''\n",
" from theano import shared\n",
" # remove any current functions from the object so we force the\n",
" # theano functions to be rebuilt with the new buffers\n",
" if hasattr(self, 'reconstruction') : delattr(self, 'reconstruction')\n",
" if hasattr(self, '_costs') : delattr(self, '_costs')\n",
" if hasattr(self, '_costLabels') : delattr(self, '_costLabels')\n",
" if hasattr(self, '_updates') : delattr(self, '_updates')\n",
" if hasattr(self, 'trainLayer') : delattr(self, 'trainLayer')\n",
" ConvolutionalLayer.__setstate__(self, dict)\n",
" initialThresholdsBack = self._thresholdsBack\n",
" self._thresholdsBack = shared(value=initialThresholdsBack, borrow=True)\n",
"\n",
" def _unpool_2d(self, input, upsampleFactor) :\n",
" '''This method performs the opposite of pool_2d. This uses the index\n",
" which produced the largest input during pooling in order to produce\n",
" the sparse upsample.\n",
" '''\n",
" if input.ndim < len(self.getOutputSize()) :\n",
" from dataset.shared import getShape\n",
" input = input.reshape([getShape(input)[0]] +\n",
" list(self.getOutputSize()[1:]))\n",
" return input.repeat(upsampleFactor[0], axis=2).repeat(\n",
" upsampleFactor[1], axis=3) \\\n",
" if upsampleFactor[0] > 1 else input\n",
"\n",
" def _getWeightsBack(self) :\n",
" '''Calculate the weights used for decoding.'''\n",
" kernelSize = self.getKernelSize()\n",
" kernelBackSize = (kernelSize[1], kernelSize[0], \n",
" kernelSize[2], kernelSize[3])\n",
" return t.reshape(self._weights, (kernelBackSize))\n",
"\n",
" def _decode(self, input) :\n",
" from nn.layer import Layer\n",
" weightsBack = self._getWeightsBack()\n",
" deconvolve = conv2d(input, weightsBack, self.getFeatureSize(),\n",
" tuple(weightsBack.shape.eval()),\n",
" border_mode='full')\n",
" out = deconvolve + self._thresholdsBack.dimshuffle('x', 0, 'x', 'x')\n",
" return Layer._setActivation(self, out)\n",
"\n",
" def finalize(self, networkInput, layerInput) :\n",
" '''Setup the computation graph for this layer.\n",
" networkInput : the input variable tuple for the network\n",
" format (inClass, inTrain)\n",
" layerInput : the input variable tuple for this layer\n",
" format (inClass, inTrain)\n",
" '''\n",
" from nn.costUtils import calcLoss, leastSquares, \\\n",
" calcSparsityConstraint, compileUpdate\n",
" from dataset.shared import getShape\n",
" ConvolutionalLayer.finalize(self, networkInput, layerInput)\n",
"\n",
" weightsBack = self._getWeightsBack()\n",
" self._costs = []\n",
" self._costLabels = []\n",
"\n",
" # setup the decoder --\n",
" # this take the output of the feedforward process as input and\n",
" # and runs the output back through the network in reverse. The net\n",
" # effect is to reconstruct the input, and ultimately to see how well\n",
" # the network is at encoding the message.\n",
" decodedInput = self.buildDecoder(self.output[0])\n",
"\n",
" # DEBUG: For Debugging purposes only\n",
" self.reconstruction = function([networkInput[0]], decodedInput)\n",
"\n",
" # NOTE: Sparsity is not a useful constraint on convolutional layers\n",
"\n",
" # contraction is only applicable in the non-binary case \n",
" if not self._forceSparse :\n",
" # compute the jacobian cost of the output --\n",
" # This works as a sparsity constraint in case the hidden vector is\n",
" # larger than the input vector.\n",
" unpooling = self._unpool_2d(self.output[0], self._downsampleFactor)\n",
" jacobianMat = conv2d(unpooling * (1. - unpooling), weightsBack,\n",
" self.getFeatureSize(),\n",
" tuple(weightsBack.shape.eval()),\n",
" border_mode='full')\n",
" self._costs.append(leastSquares(jacobianMat, self._contractionRate))\n",
" self._costLabels.append('Jacob')\n",
"\n",
" # add regularization if it was user requested\n",
" regularization = self._regularization.calculate([self])\n",
" if regularization is not None :\n",
" self._costs.append(regularization)\n",
" self._costLabels.append('Regularization')\n",
"\n",
" # create the negative log likelihood function --\n",
" # this is our cost function with respect to the original input\n",
" # NOTE: The jacobian was computed however takes much longer to process\n",
" # and does not help convergence or regularization. It was removed\n",
" self._costs.append(calcLoss(\n",
" self.input[0], decodedInput, self._activation,\n",
" scaleFactor=1. / self.getInputSize()[1]))\n",
" self._costLabels.append('Local Cost')\n",
"\n",
" gradients = t.grad(t.sum(self._costs) / getShape(networkInput[0])[0],\n",
" self.getWeights())\n",
" self._updates = compileUpdate(self.getWeights(), gradients,\n",
" self._learningRate, self._momentumRate)\n",
"\n",
" # TODO: this needs to be stackable and take the input to the first\n",
" # layer, not just the input of this layer. This will ensure\n",
" # the other layers are activated to get the input to this layer\n",
" # DEBUG: For Debugging purposes only\n",
" self.trainLayer = function([networkInput[0]], self._costs,\n",
" updates=self._updates)\n",
"\n",
" def buildDecoder(self, input) :\n",
" '''Calculate the decoding component. This should be used after the\n",
" encoder has been created. The decoder is ran in the opposite\n",
" direction.\n",
" '''\n",
" # NOTE: the output may come back as a different shape than it left\n",
" # so we reshape here just in case.\n",
" return self._decode(self._unpool_2d(input, self._downsampleFactor))\n",
"\n",
" def getWeights(self) :\n",
" '''Update to account for the decode thresholds.'''\n",
" return [self._weights, self._thresholds, self._thresholdsBack]\n",
"\n",
" def getUpdates(self) :\n",
" '''This allows the Stacker to build the layerwise training.'''\n",
" return (self._costs, self._updates)\n",
"\n",
" def getCostLabels(self) :\n",
" '''Return the labels associated with the cost functions applied.'''\n",
" return self._costLabels\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0.030303030303030304,
0,
0,
0,
0,
0,
0.01282051282051282,
0,
0,
0,
0,
0,
0,
0,
0.01282051282051282,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.03508771929824561,
0,
0.017543859649122806,
0,
0,
0,
0.01694915254237288,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.0196078431372549,
0.01694915254237288,
0,
0,
0,
0.02631578947368421,
0,
0,
0,
0,
0,
0.041666666666666664,
0,
0,
0,
0,
0,
0,
0,
0.027777777777777776,
0,
0,
0,
0,
0.022222222222222223,
0,
0.034482758620689655,
0,
0,
0,
0,
0,
0.03076923076923077,
0.04081632653061224,
0.03389830508474576,
0.03773584905660377,
0.03508771929824561,
0,
0,
0.02857142857142857,
0,
0,
0,
0,
0.025974025974025976,
0.03278688524590164,
0.028169014084507043,
0.03076923076923077,
0.028985507246376812,
0,
0,
0,
0,
0.02,
0,
0,
0,
0,
0.019230769230769232,
0,
0,
0,
0,
0,
0.0196078431372549,
0,
0.03125,
0,
0,
0.017543859649122806,
0,
0,
0,
0.03225806451612903,
0,
0,
0,
0,
0,
0,
0,
0,
0.0196078431372549,
0,
0,
0,
0,
0,
0,
0,
0.014084507042253521,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.015384615384615385,
0.02857142857142857,
0,
0,
0,
0,
0,
0,
0,
0,
0.012345679012345678,
0,
0,
0,
0,
0.025,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.027777777777777776,
0,
0,
0,
0,
0,
0,
0,
0,
0.037037037037037035,
0,
0,
0,
0.037037037037037035,
0,
0,
0,
0.03333333333333333,
0,
0
] | 242 | 0.004373 | false |
import json
import random
import aiohttp
import discord
async def numberfact(cmd, message, args):
types = ['trivia', 'date', 'math', 'year']
ran_type = random.choice(types)
embed = discord.Embed(color=0x1abc9c)
if not args:
url = 'http://numbersapi.com/random/' + ran_type + '?json'
else:
number = args[0]
if len(args) > 1:
fact_type = args[1]
fact_type = fact_type.lower()
if fact_type not in types:
fact_type = ran_type
embed.set_footer(text='Invalid fact type, defaulted to random.')
else:
fact_type = ran_type
url = 'http://numbersapi.com/' + number + '/' + fact_type + '?json'
async with aiohttp.ClientSession() as session:
async with session.get(url) as data:
data = await data.read()
data = json.loads(data)
fact = data['text']
embed.add_field(name=':four: Number Fact', value='```\n' + fact + '\n```')
await message.channel.send(None, embed=embed)
| [
"import json\n",
"import random\n",
"import aiohttp\n",
"import discord\n",
"\n",
"async def numberfact(cmd, message, args):\n",
" types = ['trivia', 'date', 'math', 'year']\n",
" ran_type = random.choice(types)\n",
" embed = discord.Embed(color=0x1abc9c)\n",
" if not args:\n",
" url = 'http://numbersapi.com/random/' + ran_type + '?json'\n",
" else:\n",
" number = args[0]\n",
" if len(args) > 1:\n",
" fact_type = args[1]\n",
" fact_type = fact_type.lower()\n",
" if fact_type not in types:\n",
" fact_type = ran_type\n",
" embed.set_footer(text='Invalid fact type, defaulted to random.')\n",
" else:\n",
" fact_type = ran_type\n",
" url = 'http://numbersapi.com/' + number + '/' + fact_type + '?json'\n",
" async with aiohttp.ClientSession() as session:\n",
" async with session.get(url) as data:\n",
" data = await data.read()\n",
" data = json.loads(data)\n",
" fact = data['text']\n",
" embed.add_field(name=':four: Number Fact', value='```\\n' + fact + '\\n```')\n",
" await message.channel.send(None, embed=embed)\n"
] | [
0,
0,
0,
0,
0,
0.023809523809523808,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012345679012345678,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 29 | 0.001247 | false |
# A bunch of 10 cypher-text encrypted under the same key is provided. We have to decrypt another cypher-text. The idea is that if we exor two cypher-text we get the exor of the corresponding plain-texts. The combinations of two exored ASCII characters is quite limited and in particular when one of this characters is a white space the number of possibilities is reduced to one. The idea is to calculate the exor of every cypher-text combination and then we compare the obtained results.
# For example if all the combinations of the message 1 with the other message get in position one a combination that is only possible with one space, we will know that in that particular position the message 1 has an space. In addition we know what letter is in that position in the other message by looking the possible ASCII characters combinations.
def strxor(a, b): # xor two strings of different lengths
if len(a) > len(b):
return "".join([chr(ord(x) ^ ord(y)) for (x, y) in zip(a[:len(b)], b)])
else:
return "".join([chr(ord(x) ^ ord(y)) for (x, y) in zip(a, b[:len(a)])])
def getAll():
return (getC1(),getC2(),getC3(),getC4(),getC5(),getC6(),getC7(),getC8(),getC9(),getC10(),getT())
def getC1():
return "315c4eeaa8b5f8aaf9174145bf43e1784b8fa00dc71d885a804e5ee9fa40b16349c146fb778cdf2d3aff021dfff5b403b510d0d0455468aeb98622b137dae857553ccd8883a7bc37520e06e515d22c954eba5025b8cc57ee59418ce7dc6bc41556bdb36bbca3e8774301fbcaa3b83b220809560987815f65286764703de0f3d524400a19b159610b11ef3e"
def getC2():
return "234c02ecbbfbafa3ed18510abd11fa724fcda2018a1a8342cf064bbde548b12b07df44ba7191d9606ef4081ffde5ad46a5069d9f7f543bedb9c861bf29c7e205132eda9382b0bc2c5c4b45f919cf3a9f1cb74151f6d551f4480c82b2cb24cc5b028aa76eb7b4ab24171ab3cdadb8356f"
def getC3():
return "32510ba9a7b2bba9b8005d43a304b5714cc0bb0c8a34884dd91304b8ad40b62b07df44ba6e9d8a2368e51d04e0e7b207b70b9b8261112bacb6c866a232dfe257527dc29398f5f3251a0d47e503c66e935de81230b59b7afb5f41afa8d661cb"
def getC4():
return "32510ba9aab2a8a4fd06414fb517b5605cc0aa0dc91a8908c2064ba8ad5ea06a029056f47a8ad3306ef5021eafe1ac01a81197847a5c68a1b78769a37bc8f4575432c198ccb4ef63590256e305cd3a9544ee4160ead45aef520489e7da7d835402bca670bda8eb775200b8dabbba246b130f040d8ec6447e2c767f3d30ed81ea2e4c1404e1315a1010e7229be6636aaa"
def getC5():
return "3f561ba9adb4b6ebec54424ba317b564418fac0dd35f8c08d31a1fe9e24fe56808c213f17c81d9607cee021dafe1e001b21ade877a5e68bea88d61b93ac5ee0d562e8e9582f5ef375f0a4ae20ed86e935de81230b59b73fb4302cd95d770c65b40aaa065f2a5e33a5a0bb5dcaba43722130f042f8ec85b7c2070"
def getC6():
return "32510bfbacfbb9befd54415da243e1695ecabd58c519cd4bd2061bbde24eb76a19d84aba34d8de287be84d07e7e9a30ee714979c7e1123a8bd9822a33ecaf512472e8e8f8db3f9635c1949e640c621854eba0d79eccf52ff111284b4cc61d11902aebc66f2b2e436434eacc0aba938220b084800c2ca4e693522643573b2c4ce35050b0cf774201f0fe52ac9f26d71b6cf61a711cc229f77ace7aa88a2f19983122b11be87a59c355d25f8e4"
def getC7():
return "32510bfbacfbb9befd54415da243e1695ecabd58c519cd4bd90f1fa6ea5ba47b01c909ba7696cf606ef40c04afe1ac0aa8148dd066592ded9f8774b529c7ea125d298e8883f5e9305f4b44f915cb2bd05af51373fd9b4af511039fa2d96f83414aaaf261bda2e97b170fb5cce2a53e675c154c0d9681596934777e2275b381ce2e40582afe67650b13e72287ff2270abcf73bb028932836fbdecfecee0a3b894473c1bbeb6b4913a536ce4f9b13f1efff71ea313c8661dd9a4ce"
def getC8():
return "315c4eeaa8b5f8bffd11155ea506b56041c6a00c8a08854dd21a4bbde54ce56801d943ba708b8a3574f40c00fff9e00fa1439fd0654327a3bfc860b92f89ee04132ecb9298f5fd2d5e4b45e40ecc3b9d59e9417df7c95bba410e9aa2ca24c5474da2f276baa3ac325918b2daada43d6712150441c2e04f6565517f317da9d3"
def getC9():
return "271946f9bbb2aeadec111841a81abc300ecaa01bd8069d5cc91005e9fe4aad6e04d513e96d99de2569bc5e50eeeca709b50a8a987f4264edb6896fb537d0a716132ddc938fb0f836480e06ed0fcd6e9759f40462f9cf57f4564186a2c1778f1543efa270bda5e933421cbe88a4a52222190f471e9bd15f652b653b7071aec59a2705081ffe72651d08f822c9ed6d76e48b63ab15d0208573a7eef027"
def getC10():
return "466d06ece998b7a2fb1d464fed2ced7641ddaa3cc31c9941cf110abbf409ed39598005b3399ccfafb61d0315fca0a314be138a9f32503bedac8067f03adbf3575c3b8edc9ba7f537530541ab0f9f3cd04ff50d66f1d559ba520e89a2cb2a83"
def getT():
return "32510ba9babebbbefd001547a810e67149caee11d945cd7fc81a05e9f85aac650e9052ba6a8cd8257bf14d13e6f0a803b54fde9e77472dbff89d71b57bddef121336cb85ccb8f3315f4b52e301d16e9f52f904"
valid_range = range(ord("a"), ord("z")+1) + range(ord("A"), ord("Z")+1) + [ord(" "), ]
valid_range = range(ord("a"), ord("z")+1) + range(ord("A"), ord("Z")+1) + [ord(" "), ]
dict_comb = {}
dict_space = {}
for i in valid_range:
for j in valid_range:
x = strxor(chr(i), chr(j)).encode("hex")
if x not in dict_comb.keys():
dict_comb[x] = {}
if chr(i) not in dict_comb[x].keys():
dict_comb[x][chr(i)] = {}
(dict_comb[x])[chr(i)] = chr(j)
(dict_comb[x])[chr(j)] = chr(i)
for i in valid_range:
x = strxor(chr(i), " ").encode("hex")
if x not in dict_space.keys():
dict_space[x] = chr(i)
a = getAll()
ct = {i+1: a[i] for i in range(len(a))}
list = {}
for i in ct.keys():
list[i] = {j: [l.encode("hex") for l in strxor(ct[i],ct[j])] for j in ct.keys()}
soluciones = {i+1: "" for i in range(len(ct))}
for indice in range(1024):
for i in ct.keys():
try:
a = [list[i][j][indice] for j in ct.keys()]
except IndexError:
break
if [l for l in a if l not in dict_space.keys()]:
if i == list.keys()[-1]:
soluciones = {n: soluciones[n] + "?" for n in soluciones.keys()}
break
else:
soluciones = {n: soluciones[n] + dict_space[a[n-1]] for n in soluciones.keys()}
break
print(soluciones)
def change_value(sol, numct, pos, valor):
a = [list[numct][j][pos-1] for j in ct.keys()]
solu = {}
for i in sol.keys():
try:
b = dict_comb[a[i-1]][valor]
except KeyError:
b = '?'
solu[i] = sol[i][:pos-1] + b + sol[i][pos:]
return solu | [
"# A bunch of 10 cypher-text encrypted under the same key is provided. We have to decrypt another cypher-text. The idea is that if we exor two cypher-text we get the exor of the corresponding plain-texts. The combinations of two exored ASCII characters is quite limited and in particular when one of this characters is a white space the number of possibilities is reduced to one. The idea is to calculate the exor of every cypher-text combination and then we compare the obtained results.\n",
"# For example if all the combinations of the message 1 with the other message get in position one a combination that is only possible with one space, we will know that in that particular position the message 1 has an space. In addition we know what letter is in that position in the other message by looking the possible ASCII characters combinations.\n",
"\n",
"\n",
"def strxor(a, b): # xor two strings of different lengths\n",
" if len(a) > len(b):\n",
" return \"\".join([chr(ord(x) ^ ord(y)) for (x, y) in zip(a[:len(b)], b)])\n",
" else:\n",
" return \"\".join([chr(ord(x) ^ ord(y)) for (x, y) in zip(a, b[:len(a)])])\n",
"\n",
"\n",
"def getAll():\n",
" return (getC1(),getC2(),getC3(),getC4(),getC5(),getC6(),getC7(),getC8(),getC9(),getC10(),getT())\n",
"\n",
"\n",
"def getC1():\n",
" return \"315c4eeaa8b5f8aaf9174145bf43e1784b8fa00dc71d885a804e5ee9fa40b16349c146fb778cdf2d3aff021dfff5b403b510d0d0455468aeb98622b137dae857553ccd8883a7bc37520e06e515d22c954eba5025b8cc57ee59418ce7dc6bc41556bdb36bbca3e8774301fbcaa3b83b220809560987815f65286764703de0f3d524400a19b159610b11ef3e\"\n",
"\n",
"\n",
"def getC2():\n",
" return \"234c02ecbbfbafa3ed18510abd11fa724fcda2018a1a8342cf064bbde548b12b07df44ba7191d9606ef4081ffde5ad46a5069d9f7f543bedb9c861bf29c7e205132eda9382b0bc2c5c4b45f919cf3a9f1cb74151f6d551f4480c82b2cb24cc5b028aa76eb7b4ab24171ab3cdadb8356f\"\n",
"\n",
"\n",
"def getC3():\n",
" return \"32510ba9a7b2bba9b8005d43a304b5714cc0bb0c8a34884dd91304b8ad40b62b07df44ba6e9d8a2368e51d04e0e7b207b70b9b8261112bacb6c866a232dfe257527dc29398f5f3251a0d47e503c66e935de81230b59b7afb5f41afa8d661cb\"\n",
"\n",
"\n",
"def getC4():\n",
" return \"32510ba9aab2a8a4fd06414fb517b5605cc0aa0dc91a8908c2064ba8ad5ea06a029056f47a8ad3306ef5021eafe1ac01a81197847a5c68a1b78769a37bc8f4575432c198ccb4ef63590256e305cd3a9544ee4160ead45aef520489e7da7d835402bca670bda8eb775200b8dabbba246b130f040d8ec6447e2c767f3d30ed81ea2e4c1404e1315a1010e7229be6636aaa\"\n",
"\n",
"\n",
"def getC5():\n",
" return \"3f561ba9adb4b6ebec54424ba317b564418fac0dd35f8c08d31a1fe9e24fe56808c213f17c81d9607cee021dafe1e001b21ade877a5e68bea88d61b93ac5ee0d562e8e9582f5ef375f0a4ae20ed86e935de81230b59b73fb4302cd95d770c65b40aaa065f2a5e33a5a0bb5dcaba43722130f042f8ec85b7c2070\"\n",
"\n",
"\n",
"def getC6():\n",
" return \"32510bfbacfbb9befd54415da243e1695ecabd58c519cd4bd2061bbde24eb76a19d84aba34d8de287be84d07e7e9a30ee714979c7e1123a8bd9822a33ecaf512472e8e8f8db3f9635c1949e640c621854eba0d79eccf52ff111284b4cc61d11902aebc66f2b2e436434eacc0aba938220b084800c2ca4e693522643573b2c4ce35050b0cf774201f0fe52ac9f26d71b6cf61a711cc229f77ace7aa88a2f19983122b11be87a59c355d25f8e4\"\n",
"\n",
"\n",
"def getC7():\n",
" return \"32510bfbacfbb9befd54415da243e1695ecabd58c519cd4bd90f1fa6ea5ba47b01c909ba7696cf606ef40c04afe1ac0aa8148dd066592ded9f8774b529c7ea125d298e8883f5e9305f4b44f915cb2bd05af51373fd9b4af511039fa2d96f83414aaaf261bda2e97b170fb5cce2a53e675c154c0d9681596934777e2275b381ce2e40582afe67650b13e72287ff2270abcf73bb028932836fbdecfecee0a3b894473c1bbeb6b4913a536ce4f9b13f1efff71ea313c8661dd9a4ce\"\n",
"\n",
"\n",
"def getC8():\n",
" return \"315c4eeaa8b5f8bffd11155ea506b56041c6a00c8a08854dd21a4bbde54ce56801d943ba708b8a3574f40c00fff9e00fa1439fd0654327a3bfc860b92f89ee04132ecb9298f5fd2d5e4b45e40ecc3b9d59e9417df7c95bba410e9aa2ca24c5474da2f276baa3ac325918b2daada43d6712150441c2e04f6565517f317da9d3\"\n",
"\n",
"\n",
"def getC9():\n",
" return \"271946f9bbb2aeadec111841a81abc300ecaa01bd8069d5cc91005e9fe4aad6e04d513e96d99de2569bc5e50eeeca709b50a8a987f4264edb6896fb537d0a716132ddc938fb0f836480e06ed0fcd6e9759f40462f9cf57f4564186a2c1778f1543efa270bda5e933421cbe88a4a52222190f471e9bd15f652b653b7071aec59a2705081ffe72651d08f822c9ed6d76e48b63ab15d0208573a7eef027\"\n",
"\n",
"\n",
"def getC10():\n",
" return \"466d06ece998b7a2fb1d464fed2ced7641ddaa3cc31c9941cf110abbf409ed39598005b3399ccfafb61d0315fca0a314be138a9f32503bedac8067f03adbf3575c3b8edc9ba7f537530541ab0f9f3cd04ff50d66f1d559ba520e89a2cb2a83\"\n",
"\n",
"\n",
"def getT():\n",
" return \"32510ba9babebbbefd001547a810e67149caee11d945cd7fc81a05e9f85aac650e9052ba6a8cd8257bf14d13e6f0a803b54fde9e77472dbff89d71b57bddef121336cb85ccb8f3315f4b52e301d16e9f52f904\"\n",
"\n",
"valid_range = range(ord(\"a\"), ord(\"z\")+1) + range(ord(\"A\"), ord(\"Z\")+1) + [ord(\" \"), ]\n",
"valid_range = range(ord(\"a\"), ord(\"z\")+1) + range(ord(\"A\"), ord(\"Z\")+1) + [ord(\" \"), ]\n",
"\n",
"dict_comb = {}\n",
"dict_space = {}\n",
"for i in valid_range:\n",
" for j in valid_range:\n",
" x = strxor(chr(i), chr(j)).encode(\"hex\")\n",
" if x not in dict_comb.keys():\n",
" dict_comb[x] = {}\n",
" if chr(i) not in dict_comb[x].keys():\n",
" dict_comb[x][chr(i)] = {}\n",
" (dict_comb[x])[chr(i)] = chr(j)\n",
" (dict_comb[x])[chr(j)] = chr(i)\n",
"\n",
"for i in valid_range:\n",
" x = strxor(chr(i), \" \").encode(\"hex\")\n",
" if x not in dict_space.keys():\n",
" dict_space[x] = chr(i)\n",
"\n",
"a = getAll()\n",
"ct = {i+1: a[i] for i in range(len(a))}\n",
"\n",
"list = {}\n",
"for i in ct.keys():\n",
" list[i] = {j: [l.encode(\"hex\") for l in strxor(ct[i],ct[j])] for j in ct.keys()}\n",
"\n",
"soluciones = {i+1: \"\" for i in range(len(ct))}\n",
"\n",
"for indice in range(1024):\n",
" for i in ct.keys():\n",
" try:\n",
" a = [list[i][j][indice] for j in ct.keys()]\n",
" except IndexError:\n",
" break\n",
" if [l for l in a if l not in dict_space.keys()]:\n",
" if i == list.keys()[-1]:\n",
" soluciones = {n: soluciones[n] + \"?\" for n in soluciones.keys()}\n",
" break\n",
" else:\n",
" soluciones = {n: soluciones[n] + dict_space[a[n-1]] for n in soluciones.keys()}\n",
" break\n",
"\n",
"print(soluciones)\n",
"\n",
"\n",
"def change_value(sol, numct, pos, valor):\n",
" a = [list[numct][j][pos-1] for j in ct.keys()]\n",
" solu = {}\n",
" for i in sol.keys():\n",
" try:\n",
" b = dict_comb[a[i-1]][valor]\n",
" except KeyError:\n",
" b = '?'\n",
" solu[i] = sol[i][:pos-1] + b + sol[i][pos:]\n",
" return solu"
] | [
0.0020491803278688526,
0.002840909090909091,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.10891089108910891,
0,
0,
0,
0.003424657534246575,
0,
0,
0,
0.004201680672268907,
0,
0,
0,
0.004901960784313725,
0,
0,
0,
0.0033112582781456954,
0,
0,
0,
0.003875968992248062,
0,
0,
0,
0.002793296089385475,
0,
0,
0,
0.0025906735751295338,
0,
0,
0,
0.0037313432835820895,
0,
0,
0,
0.003067484662576687,
0,
0,
0,
0.004901960784313725,
0,
0,
0,
0.005555555555555556,
0,
0.022988505747126436,
0.011494252873563218,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.03529411764705882,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.017543859649122806,
0,
0.012345679012345678,
0,
0,
0.010869565217391304,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.06666666666666667
] | 114 | 0.002924 | false |
# -*- coding: utf-8 -*-
#------------------------------------------------------------
# nec tv - XBMC Add-on by necula tv (daniel79mil@gmail.com)
# Version 0.1.0 (03.12.2014)
#------------------------------------------------------------
# License: GPL (http://www.gnu.org/licenses/gpl-3.0.html)
# Gracias a la librería plugintools de nec tv (www.mimediacenter.info
import os
import sys
import urllib
import urllib2
import re
import shutil
import zipfile
import xbmc
import xbmcgui
import xbmcaddon
import xbmcplugin
import plugintools, nstream, ioncube
from framescrape import *
from resources.tools.resolvers import *
from resources.tools.update import *
from resources.tools.scrape import *
from resources.tools.torrentvru import *
from resources.tools.vaughnlive import *
from resources.tools.ninestream import *
from resources.tools.vercosas import *
from resources.tools.torrent1 import *
from resources.tools.directwatch import *
from resources.tools.freetvcast import *
from resources.tools.freebroadcast import *
from resources.tools.shidurlive import *
from resources.tools.latuerka import *
from resources.tools.laligatv import *
from resources.tools.updater import *
from resources.tools.castalba import *
from resources.tools.castdos import *
from resources.tools.new_regex import *
from resources.tools.sportseven import *
from resources.tools.streamingfreetv import *
from resources.tools.dailymotion import *
from resources.tools.getposter import *
home = xbmc.translatePath(os.path.join('special://home/addons/plugin.video.nec tv/', ''))
tools = xbmc.translatePath(os.path.join('special://home/addons/plugin.video.nec tv/resources/tools', ''))
addons = xbmc.translatePath(os.path.join('special://home/addons/', ''))
art = xbmc.translatePath(os.path.join('special://home/addons/plugin.video.nec tv/art', ''))
tmp = xbmc.translatePath(os.path.join('special://home/addons/plugin.video.nec tv/tmp', ''))
playlists = xbmc.translatePath(os.path.join('special://home/addons/playlists', ''))
icon = art + 'icon.png'
fanart = 'fanart.jpg'
# Entry point
def run():
plugintools.log("---> nec tv.run <---")
# Obteniendo parámetros...
params = plugintools.get_params()
if params.get("action") is None:
main_list(params)
else:
action = params.get("action")
url = params.get("url")
exec action+"(params)"
if not os.path.exists(playlists) :
os.makedirs(playlists)
plugintools.close_item_list()
# Main menu
def main_list(params):
plugintools.log("[latinototal-0.1.0].main_list "+repr(params))
# Control del skin de latinototal
mastermenu = xml_skin()
plugintools.log("XML menu: "+mastermenu)
try:
data = plugintools.read(mastermenu)
except:
mastermenu = 'http://pastebin.com/raw.php?i=n9BF6Cwe'
data = plugintools.read(mastermenu)
xbmc.executebuiltin("Notification(%s,%s,%i,%s)" % ('nec tv', "XML no reconocido...", 3 , art+'icon.png'))
matches = plugintools.find_multiple_matches(data,'<menu_info>(.*?)</menu_info>')
for entry in matches:
title = plugintools.find_single_match(entry,'<title>(.*?)</title>')
date = plugintools.find_single_match(entry,'<date>(.*?)</date>')
thumbnail = plugintools.find_single_match(entry,'<thumbnail>(.*?)</thumbnail>')
fanart = plugintools.find_single_match(entry,'<fanart>(.*?)</fanart>')
plugintools.add_item( action="" , title = title + date , fanart = fanart , thumbnail=thumbnail , folder = False , isPlayable = False )
data = plugintools.read(mastermenu)
matches = plugintools.find_multiple_matches(data,'<channel>(.*?)</channel>')
for entry in matches:
title = plugintools.find_single_match(entry,'<name>(.*?)</name>')
thumbnail = plugintools.find_single_match(entry,'<thumbnail>(.*?)</thumbnail>')
fanart = plugintools.find_single_match(entry,'<fanart>(.*?)</fanart>')
action = plugintools.find_single_match(entry,'<action>(.*?)</action>')
last_update = plugintools.find_single_match(entry,'<last_update>(.*?)</last_update>')
url = plugintools.find_single_match(entry,'<url>(.*?)</url>')
date = plugintools.find_single_match(entry,'<last_update>(.*?)</last_update>')
# Control paternal
pekes_no = plugintools.get_setting("pekes_no")
if pekes_no == "true" :
print "Control paternal en marcha"
if title.find("Adultos") >= 0 :
plugintools.log("Activando control paternal...")
else:
fixed = title
plugintools.log("fixed= "+fixed)
if fixed == "Actualizaciones":
plugintools.add_item( action = action , plot = fixed , title = '[COLOR red]' + fixed + '[/COLOR]' , fanart = fanart , thumbnail = thumbnail , url = url , folder = True , isPlayable = False )
elif fixed == 'Agenda TV':
plugintools.add_item( action = action , plot = fixed , title = '[COLOR red]' + fixed + '[/COLOR]' , fanart = fanart , thumbnail = thumbnail , url = url , folder = True , isPlayable = False )
else:
plugintools.add_item( action = action , plot = fixed , title = '[COLOR lightyellow]' + fixed + '[/COLOR]' , fanart = fanart , thumbnail = thumbnail , url = url , folder = True , isPlayable = False )
else:
fixed = title
if fixed == "Actualizaciones":
plugintools.add_item( action = action , plot = fixed , title = '[COLOR red]' + fixed + '[/COLOR]' , fanart = fanart , thumbnail = thumbnail , url = url , folder = True , isPlayable = False )
elif fixed == "Agenda TV":
plugintools.add_item( action = action , plot = fixed , title = '[COLOR red]' + fixed + '[/COLOR]' , fanart = fanart , thumbnail = thumbnail , url = url , folder = True , isPlayable = False )
else:
plugintools.add_item( action = action , plot = fixed , title = '[COLOR lightyellow]' + fixed + '[/COLOR]' , fanart = fanart , thumbnail = thumbnail , url = url , folder = True , isPlayable = False )
def play(params):
plugintools.log("[nec tv-0.1.0].play "+repr(params))
# plugintools.direct_play(params.get("url"))
# xbmc.Player(xbmc.PLAYER_CORE_AUTO).play(params.get("url"))
url = params.get("url")
# Notificación de inicio de resolver en caso de enlace RTMP
if url.startswith("http") == True:
if url.find("allmyvideos") >= 0 :
allmyvideos(params)
elif url.find("streamcloud") >= 0 :
streamcloud(params)
elif url.find("vidspot") >= 0 :
vidspot(params)
elif url.find("played.to") >= 0 :
playedto(params)
elif url.find("vk.com") >= 0 :
vk(params)
elif url.find("nowvideo") >= 0 :
nowvideo(params)
elif url.find("tumi") >= 0 :
tumi(params)
elif url.find("streamin.to") >= 0 :
streaminto(params)
else:
url = params.get("url")
plugintools.play_resolved_url(url)
elif url.startswith("rtp") >= 0: # Control para enlaces de Movistar TV
plugintools.play_resolved_url(url)
else:
plugintools.play_resolved_url(url)
while OnPlayBackStarted() == False:
print "No se está reproduciendo..."
time.sleep(3)
if OnPlayBackStarted():
print "En reproducción!"
else:
print "No ha empezado"
def runPlugin(url):
xbmc.executebuiltin('XBMC.RunPlugin(' + url +')')
def live_items_withlink(params):
plugintools.log("[latinototal-0.1.0].live_items_withlink "+repr(params))
data = plugintools.read(params.get("url"))
# ToDo: Agregar función lectura de cabecera (fanart, thumbnail, título, últ. actualización)
header_xml(params)
fanart = plugintools.find_single_match(data, '<fanart>(.*?)</fanart>') # Localizamos fanart de la lista
if fanart == "":
fanart = art + 'fanart.jpg'
author = plugintools.find_single_match(data, '<poster>(.*?)</poster>') # Localizamos autor de la lista (encabezado)
matches = plugintools.find_multiple_matches(data,'<item>(.*?)</item>')
for entry in matches:
title = plugintools.find_single_match(entry,'<title>(.*?)</title>')
title = title.replace("<![CDATA[", "")
title = title.replace("]]>", "")
thumbnail = plugintools.find_single_match(entry,'<thumbnail>(.*?)</thumbnail>')
url = plugintools.find_single_match(entry,'<link>(.*?)</link>')
url = url.replace("<![CDATA[", "")
url = url.replace("]]>", "")
plugintools.add_item(action = "play" , title = title , url = url , fanart = fanart , thumbnail = thumbnail , folder = False , isPlayable = True )
def xml_lists(params):
plugintools.log("[nec tv-0.1.0].xml_lists "+repr(params))
data = plugintools.read( params.get("url") )
name_channel = params.get("title")
name_channel = parser_title(name_channel)
plugintools.log("name_channel= "+name_channel)
pattern = '<name>'+name_channel+'(.*?)</channel>'
data = plugintools.find_single_match(data, pattern)
plugintools.add_item( action="" , title='[B][COLOR yellow]'+name_channel+'[/B][/COLOR]' , thumbnail= art + 'special.png' , fanart = fanart , folder = False , isPlayable = False )
# Control paternal
pekes_no = plugintools.get_setting("pekes_no")
subchannel = re.compile('<subchannel>([^<]+)<name>([^<]+)</name>([^<]+)<thumbnail>([^<]+)</thumbnail>([^<]+)<fanart>([^<]+)</fanart>([^<]+)<action>([^<]+)</action>([^<]+)<url>([^<]+)</url>([^<]+)</subchannel>').findall(data)
for biny, ciny, diny, winy, pixy, dixy, boxy, susy, lexy, muny, kiny in subchannel:
if pekes_no == "true" :
print "Control paternal en marcha"
if ciny.find("XXX") >= 0 :
plugintools.log("Activando control paternal...")
else:
plugintools.add_item( action = susy , title = ciny , url= muny , thumbnail = winy , fanart = dixy , extra = dixy , page = dixy , folder = True , isPlayable = False )
params["fanart"]=dixy
# params["thumbnail"]=pixy
else:
plugintools.add_item( action = susy , title = ciny , url= muny , thumbnail = winy , fanart = dixy , extra = dixy , page = dixy , folder = True , isPlayable = False )
params["fanart"]=dixy
# params["thumbnail"]=pixy
def getstreams_now(params):
plugintools.log("[nec tv-0.1.0].getstreams_now "+repr(params))
data = plugintools.read( params.get("url") )
poster = plugintools.find_single_match(data, '<poster>(.*?)</poster>')
plugintools.add_item(action="" , title='[COLOR blue][B]'+poster+'[/B][/COLOR]', url="", folder =False, isPlayable=False)
matches = plugintools.find_multiple_matches(data,'<title>(.*?)</link>')
for entry in matches:
title = plugintools.find_single_match(entry,'(.*?)</title>')
url = plugintools.find_single_match(entry,'<link> ([^<]+)')
plugintools.add_item( action="play" , title=title , url=url , folder = False , isPlayable = True )
# Soporte de listas de canales por categorías (Livestreams, XBMC México, Motor SportsTV, etc.).
def livestreams_channels(params):
plugintools.log("[nec tv-0.1.0].livestreams_channels "+repr(params))
data = plugintools.read( params.get("url") )
# Extract directory list
thumbnail = params.get("thumbnail")
if thumbnail == "":
thumbnail = 'icon.jpg'
plugintools.log(thumbnail)
else:
plugintools.log(thumbnail)
if thumbnail == art + 'icon.png':
matches = plugintools.find_multiple_matches(data,'<channel>(.*?)</items>')
for entry in matches:
title = plugintools.find_single_match(entry,'<name>(.*?)</name>')
thumbnail = plugintools.find_single_match(entry,'<thumbnail>(.*?)</thumbnail>')
fanart = plugintools.find_single_match(entry,'<fanart>(.*?)</fanart>')
plugintools.add_item( action="livestreams_subchannels" , title=title , url=params.get("url") , thumbnail=thumbnail , fanart=fanart , folder = True , isPlayable = False )
else:
matches = plugintools.find_multiple_matches(data,'<channel>(.*?)</items>')
for entry in matches:
title = plugintools.find_single_match(entry,'<name>(.*?)</name>')
thumbnail = plugintools.find_single_match(entry,'<thumbnail>(.*?)</thumbnail>')
fanart = plugintools.find_single_match(entry,'<fanart>(.*?)</fanart>')
plugintools.add_item( action="livestreams_items" , title=title , url=params.get("url") , fanart=fanart , thumbnail=thumbnail , folder = True , isPlayable = False )
def livestreams_subchannels(params):
plugintools.log("[nec tv-0.1.0].livestreams_subchannels "+repr(params))
data = plugintools.read( params.get("url") )
# title_channel = params.get("title")
title_channel = params.get("title")
name_subchannel = '<name>'+title_channel+'</name>'
data = plugintools.find_single_match(data, name_subchannel+'(.*?)</channel>')
info = plugintools.find_single_match(data, '<info>(.*?)</info>')
title = params.get("title")
plugintools.add_item( action="" , title='[B]'+title+'[/B] [COLOR yellow]'+info+'[/COLOR]' , folder = False , isPlayable = False )
subchannel = plugintools.find_multiple_matches(data , '<name>(.*?)</name>')
for entry in subchannel:
plugintools.add_item( action="livestreams_subitems" , title=entry , url=params.get("url") , thumbnail=art+'motorsports-xbmc.jpg' , folder = True , isPlayable = False )
# Pendiente de cargar thumbnail personalizado y fanart...
def livestreams_subitems(params):
plugintools.log("[nec tv-0.1.0].livestreams_subitems "+repr(params))
title_subchannel = params.get("title")
data = plugintools.read( params.get("url") )
source = plugintools.find_single_match(data , title_subchannel+'(.*?)<subchannel>')
titles = re.compile('<title>([^<]+)</title>([^<]+)<link>([^<]+)</link>').findall(source)
url = params.get("url")
title = params.get("title")
thumbnail = params.get("thumbnail")
for entry, quirry, winy in titles:
winy = winy.replace("amp;","")
plugintools.add_item( action="play" , title = entry , url = winy , thumbnail = thumbnail , folder = False , isPlayable = True )
def livestreams_items(params):
plugintools.log("[nec tv-0.1.0].livestreams_items "+repr(params))
title_subchannel = params.get("title")
plugintools.log("title= "+title_subchannel)
title_subchannel_fixed = title_subchannel.replace("ñ", "ñ")
title_subchannel_fixed = title_subchannel_fixed.replace("\\xc3\\xb1", "ñ")
title_subchannel_fixed = plugintools.find_single_match(title_subchannel_fixed, '([^[]+)')
title_subchannel_fixed = title_subchannel_fixed.encode('utf-8', 'ignore')
plugintools.log("subcanal= "+title_subchannel_fixed)
if title_subchannel_fixed.find("+") >= 0:
title_subchannel_fixed = title_subchannel_fixed.split("+")
title_subchannel_fixed = title_subchannel_fixed[1]
title_subchannel_fixxed = title_subchannel_fixed[0]
if title_subchannel_fixed == "":
title_subchannel_fixed = title_subchannel_fixxed
data = plugintools.read( params.get("url") )
source = plugintools.find_single_match(data , title_subchannel_fixed+'(.*?)</channel>')
plugintools.log("source= "+source)
fanart_channel = plugintools.find_single_match(source, '<fanart>(.*?)</fanart>')
titles = re.compile('<title>([^<]+)</title>([^<]+)<link>([^<]+)</link>([^<]+)<thumbnail>([^<]+)</thumbnail>').findall(source)
url = params.get("url")
title = params.get("title")
thumbnail = params.get("thumbnail")
for entry, quirry, winy, xiry, miry in titles:
plugintools.log("title= "+entry)
plugintools.log("url= "+winy)
winy = winy.replace("amp;","")
plugintools.add_item( action="play" , title = entry , url = winy , thumbnail = miry , fanart = fanart_channel , folder = False , isPlayable = True )
def xml_items(params):
plugintools.log("[nec tv-0.1.0].xml_items "+repr(params))
data = plugintools.read( params.get("url") )
thumbnail = params.get("thumbnail")
#Todo: Implementar una variable que permita seleccionar qué tipo de parseo hacer
if thumbnail == "title_link.png":
matches = plugintools.find_multiple_matches(data,'<item>(.*?)</item>')
for entry in matches:
title = plugintools.find_single_match(entry,'<title>(.*?)</title>')
thumbnail = plugintools.find_single_match(entry,'<thumbnail>(.*?)</thumbnail>')
url = plugintools.find_single_match(entry,'<link>([^<]+)</link>')
fanart = plugintools.find_single_match(entry,'<fanart>([^<]+)</fanart>')
plugintools.add_item( action = "play" , title = title , url = url , thumbnail = thumbnail , fanart = fanart , plot = title , folder = False , isPlayable = True )
if thumbnail == "name_rtmp.png":
matches = plugintools.find_multiple_matches(data,'<channel>(.*?)</channel>')
for entry in matches:
title = plugintools.find_single_match(entry,'<name>(.*?)</name>')
url = plugintools.find_single_match(entry,'<rtmp>([^<]+)</rtmp>')
plugintools.add_item( action = "play" , title = title , url = url , fanart = art + 'fanart.jpg' , plot = title , folder = False , isPlayable = True )
def simpletv_items(params):
plugintools.log("[nec tv-0.1.0].simpletv_items "+repr(params))
saving_url = 0
# Obtenemos fanart y thumbnail del diccionario
thumbnail = params.get("thumbnail")
plugintools.log("thumbnail= "+thumbnail)
if thumbnail == "" :
thumbnail = art + 'icon.png'
# Parche para solucionar un bug por el cuál el diccionario params no retorna la variable fanart
fanart = params.get("extra")
if fanart == " " :
fanart = params.get("fanart")
if fanart == " " :
fanart = art + 'fanart.png'
title = params.get("plot")
texto= params.get("texto")
busqueda = ""
if title == 'search':
title = title + '.txt'
plugintools.log("title= "+title)
else:
title = title + '.m3u'
if title == 'search.txt':
busqueda = 'search.txt'
filename = title
file = open(tmp + 'search.txt', "r")
file.seek(0)
data = file.readline()
if data == "":
ok = plugintools.message("nec tv", "Sin resultados")
return ok
else:
title = params.get("title")
title = parser_title(title)
ext = params.get("ext")
title_plot = params.get("plot")
if title_plot == "":
filename = title + "." + ext
if ext is None:
filename = title
else:
plugintools.log("ext= "+ext)
filename = title + "." + ext
file = open(playlists + filename, "r")
file.seek(0)
data = file.readline()
plugintools.log("data= "+data)
if data == "":
print "No es posible leer el archivo!"
data = file.readline()
plugintools.log("data= "+data)
else:
file.seek(0)
num_items = len(file.readlines())
print num_items
plugintools.log("filename= "+filename)
plugintools.add_item(action="" , title = '[COLOR lightyellow][B][I]playlist / '+ filename + '[/B][/I][/COLOR]' , url = playlists + title , fanart = fanart , thumbnail = thumbnail , folder = False , isPlayable = False)
# Lectura de items en lista m3u. ToDo: Control de errores, implementar lectura de fanart y thumbnail
# Control para evitar error en búsquedas (cat is null)
cat = ""
i = -1
file.seek(0)
data = file.readline()
while i <= num_items:
if data.startswith("#EXTINF:-1") == True:
title = data.replace("#EXTINF:-1", "")
title = title.replace(",", "")
title = title.replace("-AZBOX *", "")
title = title.replace("-AZBOX-*", "")
if title.startswith("$") == True: # Control para lanzar scraper IMDB
title = title.replace("$","")
images = m3u_items(title)
title_fixed = images[3]
datamovie = {}
datamovie = getposter(title_fixed)
save_title(title_fixed, datamovie, filename)
getdatafilm = 1 # Control para cargar datos de película
saving_url = 1 # Control para guardar URL
if datamovie == {}:
title = '[COLOR lightyellow][B]'+title+' - [/B][I][COLOR orange][IMDB: [B]'+datamovie["Rating"]+'[/B]][/I][/COLOR] '
thumbnail = datamovie["Poster"];fanart = datamovie["Fanart"]
# Control de la línea del título en caso de búsqueda
if busqueda == 'search.txt':
title_search = title.split('"')
print 'title',title
titulo = title_search[0]
titulo = titulo.strip()
origen = title_search[1]
origen = origen.strip()
data = file.readline()
i = i + 1
else:
images = m3u_items(title)
thumbnail = images[0]
fanart = images[1]
cat = images[2]
title = images[3]
origen = title.split(",")
title = title.strip()
plugintools.log("title= "+title)
data = file.readline()
i = i + 1
if title.startswith("#") == True:
title = title.replace("#", "")
plugintools.add_item(action="", title = title , url = "", thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = False)
data = file.readline()
print data
i = i + 1
continue
# Control para determinadas listas de decos sat
if title.startswith(' $ExtFilter="') == True:
if busqueda == 'search.txt':
title = title.replace('$ExtFilter="', "")
title_search = title.split('"')
titulo = title_search[1]
origen = title_search[2]
origen = origen.strip()
data = file.readline()
i = i + 1
else:
title = title.replace('$ExtFilter="', "")
category = title.split('"')
tipo = category[0]
tipo = tipo.strip()
title = category[1]
title = title.strip()
print title
data = file.readline()
i = i + 1
if data != "":
title = title.replace("radio=true", "")
url = data.strip()
if url.startswith("serie") == True:
url = data.strip()
if cat == "":
if busqueda == 'search.txt':
url = url.replace("serie:", "")
params["fanart"] = fanart
plugintools.log("fanart= "+fanart)
plugintools.add_item( action = "seriecatcher" , title = '[COLOR white]' + title + ' [COLOR purple][Serie online][/COLOR][COLOR white][I] (' + origen + ')[/I][/COLOR]', url = url , thumbnail = thumbnail , fanart = fanart , folder = True , isPlayable = False )
data = file.readline()
i = i + 1
continue
else:
url = url.replace("serie:", "")
params["fanart"] = fanart
plugintools.log("fanart= "+fanart)
plugintools.add_item( action = "seriecatcher" , title = '[COLOR white]' + title + ' [COLOR purple][Serie online][/COLOR]', url = url , thumbnail = thumbnail , fanart = fanart , folder = True , isPlayable = False )
data = file.readline()
i = i + 1
continue
else:
if busqueda == 'search.txt':
plugintools.add_item( action = "longurl" , title = '[COLOR red][I]' + cat + ' / [/I][/COLOR][COLOR white] ' + title + ' [COLOR purple][Serie online][/COLOR][COLOR white][I] (' + origen + ')[/I][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
data = file.readline()
i = i + 1
continue
else:
plugintools.add_item( action = "longurl" , title = '[COLOR red][I]' + cat + ' / [/I][/COLOR][COLOR white] ' + title + ' [COLOR purple][Serie online][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
data = file.readline()
i = i + 1
continue
if data.startswith("http") == True:
url = data.strip()
if cat != "": # Controlamos el caso de subcategoría de canales
if busqueda == 'search.txt':
if url.startswith("serie") == True:
url = url.replace("serie:", "")
params["fanart"] = fanart
plugintools.log("fanart= "+fanart)
plugintools.add_item( action = "seriecatcher" , title = '[COLOR white]' + title + ' [COLOR purple][Serie online][/COLOR][COLOR lightsalmon](' + origen + ')[/I][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = True , isPlayable = False )
data = file.readline()
i = i + 1
continue
elif url.find("allmyvideos") >= 0:
title = title.split('"')
title = title[0]
title = title.strip()
plugintools.add_item( action = "allmyvideos" , title = '[COLOR white]' + title + '[COLOR lightyellow] [Allmyvideos][/COLOR][I][COLOR lightsalmon] (' + origen + ')[/COLOR][/I]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
if saving_url == 1:
plugintools.log("URL= "+url)
save_url(url, filename)
data = file.readline()
i = i + 1
continue
elif url.find("streamcloud") >= 0:
title = title.split('"')
title = title[0]
title = title.strip()
plugintools.add_item( action = "streamcloud" , title = '[COLOR white]' + title + '[COLOR lightskyblue] [Streamcloud][/COLOR][I][COLOR lightsalmon] (' + origen + ')[/COLOR][/I]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
if saving_url == 1:
plugintools.log("URL= "+url)
save_url(url, filename)
data = file.readline()
i = i + 1
continue
elif url.find("vidspot") >= 0:
title = title.split('"')
title = title[0]
title = title.strip()
plugintools.add_item( action = "vidspot" , title = '[COLOR white]' + title + '[COLOR palegreen] [Vidspot][/COLOR][I][COLOR lightsalmon] (' + origen + ')[/COLOR][/I]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
if saving_url == 1:
plugintools.log("URL= "+url)
save_url(url, filename)
data = file.readline()
i = i + 1
continue
elif url.find("played.to") >= 0:
title = title.split('"')
title = title[0]
title = title.strip()
plugintools.add_item( action = "playedto" , title = '[COLOR white]' + title + '[COLOR lavender] [Played.to][/COLOR][I][COLOR lightsalmon] (' + origen + ')[/COLOR][/I]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
if saving_url == 1:
plugintools.log("URL= "+url)
save_url(url, filename)
data = file.readline()
i = i + 1
continue
elif url.find("vk.com") >= 0:
title = title.split('"')
title = title[0]
title = title.strip()
plugintools.add_item( action = "vk" , title = '[COLOR white]' + title + '[COLOR royalblue] [Vk][/COLOR][I][COLOR lightsalmon] (' + origen + ')[/COLOR][/I]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
if saving_url == 1:
plugintools.log("URL= "+url)
save_url(url, filename)
data = file.readline()
i = i + 1
continue
elif url.find("nowvideo") >= 0:
title = title.split('"')
title = title[0]
title = title.strip()
plugintools.add_item( action = "nowvideo" , title = '[COLOR white]' + title + '[COLOR red] [Nowvideo][/COLOR][I][COLOR lightsalmon] (' + origen + ')[/COLOR][/I]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
if saving_url == 1:
plugintools.log("URL= "+url)
save_url(url, filename)
data = file.readline()
i = i + 1
continue
elif url.find("tumi") >= 0:
title = title.split('"')
title = title[0]
title = title.strip()
plugintools.add_item( action = "tumi" , title = '[COLOR white]' + title + '[COLOR forestgreen] [Tumi][/COLOR][I][COLOR lightsalmon] (' + origen + ')[/COLOR][/I]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
if saving_url == 1:
plugintools.log("URL= "+url)
save_url(url, filename)
data = file.readline()
i = i + 1
continue
elif url.find("streamin.to") >= 0:
title = title.split('"')
title = title[0]
title = title.strip()
plugintools.add_item( action = "streaminto" , title = '[COLOR white]' + title + '[COLOR orange] [streamin.to][/COLOR][I][COLOR lightsalmon] (' + origen + ')[/COLOR][/I]', url = url , thumbnail = thumbnail , show = show, fanart = fanart , folder = False , isPlayable = True )
if saving_url == 1:
plugintools.log("URL= "+url)
save_url(url, filename)
if saving_url == 1:
plugintools.log("URL= "+url)
save_url(url, filename)
data = file.readline()
i = i + 1
continue
elif url.find("www.youtube.com") >= 0: # Video youtube
plugintools.log("linea titulo= "+title_search)
title = title.split('"')
title = title[0]
title = title.strip()
videoid = url.replace("https://www.youtube.com/watch?=", "")
url = 'plugin://plugin.video.youtube/?path=/root/video&action=play_video&videoid=' + videoid
plugintools.add_item( action = "play" , title = '[COLOR white]' + title + ' [[COLOR red]You[COLOR white]tube Video][I] (' + origen + ')[/I][/COLOR]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
if saving_url == 1:
plugintools.log("URL= "+url)
save_url(url, filename)
data = file.readline()
i = i + 1
continue
elif url.find("www.dailymotion.com/playlist") >= 0: # Playlist
id_playlist = dailym_getplaylist(url)
if id_playlist != "":
url = "https://api.dailymotion.com/playlist/"+id_playlist+"/videos"
if thumbnail == "":
thumbnail = 'http://press.dailymotion.com/wp-old/wp-content/uploads/logo-Dailymotion.png'
plugintools.add_item( action="dailym_pl" , title=title + ' [COLOR lightyellow][B][Dailymotion[/B] playlist][/COLOR]' , fanart=fanart, thumbnail=thumbnail, url=url , folder=True, isPlayable=False)
if saving_url == 1:
plugintools.log("URL= "+url)
save_url(url, filename)
data = file.readline()
i = i + 1
continue
else:
data = file.readline()
i = i + 1
continue
elif url.find("dailymotion.com/video") >= 0:
video_id = dailym_getvideo(url)
if video_id != "":
thumbnail = "https://api.dailymotion.com/thumbnail/video/"+video_id+""
url = "plugin://plugin.video.dailymotion_com/?url="+video_id+"&mode=playVideo"
# Appends a new item to the xbmc item list
# API Dailymotion list of video parameters: http://www.dailymotion.com/doc/api/obj-video.html
plugintools.add_item( action="play" , title=title + ' [COLOR lightyellow][B][Dailymotion[/B] video][/COLOR]' , url=url , thumbnail = thumbnail , fanart = fanart, isPlayable=True, folder=False )
if saving_url == 1:
plugintools.log("URL= "+url)
save_url(url, filename)
data = file.readline()
i = i + 1
continue
else:
data = file.readline()
i = i + 1
continue
elif url.endswith("m3u8") == True:
title = title.split('"')
title = title[0]
title = title.strip()
plugintools.add_item( action = "play" , title = '[COLOR white]' + title + '[COLOR purple] [m3u8][/COLOR][I][COLOR lightsalmon] (' + origen + ')[/COLOR][/I]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
if saving_url == 1:
plugintools.log("URL= "+url)
save_url(url, filename)
data = file.readline()
i = i + 1
continue
else:
title = title_search.split('"')
title = title[0]
title = title.strip()
plugintools.add_item( action = "longurl" , title = '[COLOR white]' + title + '[COLOR lightblue] [HTTP][/COLOR][I][COLOR lightsalmon] (' + origen + ')[/COLOR][/I]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
if saving_url == 1:
plugintools.log("URL= "+url)
save_url(url, filename)
data = file.readline()
i = i + 1
continue
else:
if url.startswith("serie") == True:
url = url.replace("serie:", "")
params["fanart"] = fanart
plugintools.log("fanart= "+fanart)
plugintools.add_item( action = "seriecatcher" , title = '[COLOR red][I]' + cat + ' / [/I][/COLOR][COLOR white] ' + title + ' [COLOR purple][Serie online][/COLOR]', url = url , thumbnail = thumbnail , fanart = fanart , folder = True , isPlayable = False )
if saving_url == 1:
plugintools.log("URL= "+url)
save_url(url, filename)
data = file.readline()
i = i + 1
continue
elif url.find("allmyvideos") >= 0:
plugintools.add_item( action = "allmyvideos" , title = '[COLOR red][I]' + cat + ' / [/I][/COLOR][COLOR white] ' + title + '[COLOR lightyellow] [Allmyvideos][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
data = file.readline()
i = i + 1
continue
elif url.find("streamcloud") >= 0:
plugintools.add_item( action = "streamcloud" , title = '[COLOR red][I]' + cat + ' / [/I][/COLOR][COLOR white] ' + title + '[COLOR lightskyblue] [Streamcloud][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
if saving_url == 1:
plugintools.log("URL= "+url)
save_url(url, filename)
data = file.readline()
i = i + 1
continue
elif url.find("vidspot") == True:
plugintools.add_item( action = "vidspot" , title = '[COLOR red][I]' + cat + ' / [/I][/COLOR][COLOR white] ' + title + '[COLOR palegreen] [Vidspot][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
if saving_url == 1:
plugintools.log("URL= "+url)
save_url(url, filename)
data = file.readline()
i = i + 1
continue
elif url.find("played.to") >= 0:
plugintools.add_item( action = "playedto" , title = '[COLOR red][I]' + cat + ' / [/I][/COLOR][COLOR white] ' + title + '[COLOR lavender] [Played.to][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
if saving_url == 1:
plugintools.log("URL= "+url)
save_url(url, filename)
data = file.readline()
i = i + 1
continue
elif url.find("vk") >= 0:
plugintools.add_item( action = "vk" , title = '[COLOR red][I]' + cat + ' / [/I][/COLOR][COLOR white] ' + title + '[COLOR royalblue] [Vk][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
if saving_url == 1:
plugintools.log("URL= "+url)
save_url(url, filename)
data = file.readline()
i = i + 1
continue
elif url.find("nowvideo") >= 0:
plugintools.add_item( action = "nowvideo" , title = '[COLOR red][I]' + cat + ' / [/I][/COLOR][COLOR white] ' + title + '[COLOR red] [Nowvideo][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
if saving_url == 1:
plugintools.log("URL= "+url)
save_url(url, filename)
data = file.readline()
i = i + 1
continue
elif url.find("tumi") >= 0:
plugintools.add_item( action = "tumi" , title = '[COLOR red][I]' + cat + ' / [/I][/COLOR][COLOR white] ' + title + '[COLOR forestgreen] [Tumi][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
if saving_url == 1:
plugintools.log("URL= "+url)
save_url(url, filename)
data = file.readline()
i = i + 1
continue
elif url.find("streamin.to") >= 0:
plugintools.add_item( action = "streaminto" , title = '[COLOR red][I]' + cat + ' / [/I][/COLOR][COLOR white] ' + title + '[COLOR orange] [streamin.to][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
if saving_url == 1:
plugintools.log("URL= "+url)
save_url(url, filename)
data = file.readline()
i = i + 1
continue
elif url.find("9stream") >= 0:
plugintools.add_item( action = "ninestreams" , title = '[COLOR red][I]' + cat + ' / [/I][/COLOR][COLOR white] ' + title + '[COLOR green] [9stream][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
if saving_url == 1:
plugintools.log("URL= "+url)
save_url(url, filename)
data = file.readline()
i = i + 1
continue
elif url.find("www.youtube.com") >= 0: # Video youtube
title = title.split('"')
title = title[0]
title =title.strip()
videoid = url.replace("https://www.youtube.com/watch?=", "")
url = 'plugin://plugin.video.youtube/?path=/root/video&action=play_video&videoid=' + videoid
plugintools.add_item( action = "play" , title = '[COLOR red][I]' + cat + ' / [/I][/COLOR][COLOR white] ' + title + ' [[COLOR red]You[COLOR white]tube Video][/COLOR]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
if saving_url == 1:
plugintools.log("URL= "+url)
save_url(url, filename)
data = file.readline()
i = i + 1
continue
elif url.find("www.dailymotion.com/playlist") >= 0: # Playlist
id_playlist = dailym_getplaylist(url)
if id_playlist != "":
plugintools.log("id_playlist= "+id_playlist)
if thumbnail == "":
thumbnail = 'http://press.dailymotion.com/wp-old/wp-content/uploads/logo-Dailymotion.png'
url = "https://api.dailymotion.com/playlist/"+id_playlist+"/videos"
plugintools.add_item( action="dailym_pl" , title='[COLOR red][I]'+cat+' / [/I][/COLOR] '+title+' [COLOR lightyellow][B][Dailymotion[/B] playlist][/COLOR]', url=url , fanart = fanart , thumbnail=thumbnail , folder=True, isPlayable=False)
if saving_url == 1:
plugintools.log("URL= "+url)
save_url(url, filename)
data = file.readline()
i = i + 1
continue
else:
data = file.readline()
i = i + 1
continue
elif url.find("dailymotion.com/video") >= 0:
video_id = dailym_getvideo(url)
if video_id != "":
thumbnail = "https://api.dailymotion.com/thumbnail/video/"+video_id+""
url = "plugin://plugin.video.dailymotion_com/?url="+video_id+"&mode=playVideo"
# Appends a new item to the xbmc item list
# API Dailymotion list of video parameters: http://www.dailymotion.com/doc/api/obj-video.html
plugintools.add_item( action="play" , title='[COLOR red][I]' + cat + ' / [/I][/COLOR] '+title+' [COLOR lightyellow][B][Dailymotion[/B] video][/COLOR]', url=url , thumbnail = thumbnail , fanart= fanart , isPlayable=True, folder=False )
if saving_url == 1:
plugintools.log("URL= "+url)
save_url(url, filename)
data = file.readline()
i = i + 1
continue
else:
data = file.readline()
i = i + 1
continue
elif url.endswith("m3u8") == True:
title = title.split('"')
title = title[0]
title = title.strip()
plugintools.add_item( action = "play" , title = '[COLOR red][I]' + cat + ' / [/I][/COLOR][COLOR white] ' + title + '[COLOR purple] [m3u8][/COLOR]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
if saving_url == 1:
plugintools.log("URL= "+url)
save_url(url, filename)
data = file.readline()
i = i + 1
continue
else:
plugintools.add_item( action = "longurl" , title = '[COLOR red][I]' + cat + ' / [/I][/COLOR][COLOR white] ' + title + '[COLOR blue] [HTTP][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
if saving_url == 1:
plugintools.log("URL= "+url)
save_url(url, filename)
data = file.readline()
i = i + 1
continue
# Sin categoría de canales
else:
if busqueda == 'search.txt':
if url.startswith("serie") == True:
url = url.replace("serie:", "")
params["fanart"] = fanart
plugintools.log("fanart= "+fanart)
plugintools.add_item( action = "seriecatcher" , title = '[COLOR white]' + title + ' [COLOR purple][Serie online][/COLOR][COLOR lightsalmon](' + origen + ')[/I][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = True , isPlayable = False )
if saving_url == 1:
plugintools.log("URL= "+url)
save_url(url, filename)
data = file.readline()
i = i + 1
continue
elif url.find("allmyvideos") >= 0:
title = title.split('"')
title = title[0]
title = title.strip()
plugintools.add_item( action = "allmyvideos" , title = '[COLOR white]' + title + '[COLOR lightyellow] [Allmyvideos][/COLOR][I][COLOR lightsalmon] (' + origen + ')[/COLOR][/I]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
if saving_url == 1:
plugintools.log("URL= "+url)
save_url(url, filename)
data = file.readline()
i = i + 1
continue
elif url.find("streamcloud") >= 0:
title = title.split('"')
title = title[0]
title = title.strip()
plugintools.add_item( action = "streamcloud" , title = '[COLOR white]' + titulo + '[COLOR lightskyblue] [Streamcloud][/COLOR][I][COLOR lightsalmon] (' + origen + ')[/COLOR][/I]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
if saving_url == 1:
plugintools.log("URL= "+url)
save_url(url, filename)
data = file.readline()
i = i + 1
continue
elif url.find("vidspot") >= 0:
title = title.split('"')
title = title[0]
title = title.strip()
plugintools.add_item( action = "vidspot" , title = '[COLOR white]' + title + '[COLOR palegreen] [Vidspot][/COLOR][I][COLOR lightsalmon] (' + origen + ')[/COLOR][/I]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
if saving_url == 1:
plugintools.log("URL= "+url)
save_url(url, filename)
data = file.readline()
i = i + 1
continue
elif url.find("played.to") >= 0:
title = title.split('"')
title = title[0]
title = title.strip()
plugintools.add_item( action = "playedto" , title = '[COLOR white]' + title + '[COLOR lavender] [Played.to][/COLOR][I][COLOR lightsalmon] (' + origen + ')[/COLOR][/I]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
if saving_url == 1:
plugintools.log("URL= "+url)
save_url(url, filename)
data = file.readline()
i = i + 1
continue
elif url.find("vk.com") >= 0:
title = title.split('"')
title = title[0]
title = title.strip()
plugintools.add_item( action = "vk" , title = '[COLOR white]' + title + '[COLOR royalblue] [Vk][/COLOR][I][COLOR lightsalmon] (' + origen + ')[/COLOR][/I]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
if saving_url == 1:
plugintools.log("URL= "+url)
save_url(url, filename)
data = file.readline()
i = i + 1
continue
elif url.find("nowvideo") >= 0:
title = title.split('"')
title = title[0]
title = title.strip()
plugintools.add_item( action = "nowvideo" , title = '[COLOR white]' + title + '[COLOR red] [Nowvideo][/COLOR][I][COLOR lightsalmon] (' + origen + ')[/COLOR][/I]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
if saving_url == 1:
plugintools.log("URL= "+url)
save_url(url, filename)
data = file.readline()
i = i + 1
continue
elif url.find("tumi.tv") >= 0:
title = title.split('"')
title = title[0]
title = title.strip()
plugintools.add_item( action = "tumi" , title = '[COLOR white]' + title + '[COLOR forestgreen] [Tumi][/COLOR][I][COLOR lightsalmon] (' + origen + ')[/COLOR][/I]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
if saving_url == 1:
plugintools.log("URL= "+url)
save_url(url, filename)
data = file.readline()
i = i + 1
continue
elif url.find("streamin.to") >= 0:
title = title.split('"')
title = title[0]
title = title.strip()
plugintools.add_item( action = "streaminto" , title = '[COLOR white]' + title + '[COLOR orange] [streamin.to][/COLOR][I][COLOR lightsalmon] (' + origen + ')[/COLOR][/I]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
if saving_url == 1:
plugintools.log("URL= "+url)
save_url(url, filename)
data = file.readline()
i = i + 1
continue
elif url.find("www.youtube.com") >= 0:
title = title.split('"')
title = title[0]
title = title.strip()
videoid = url.replace("https://www.youtube.com/watch?=", "")
url = 'plugin://plugin.video.youtube/?path=/root/video&action=play_video&videoid=' + videoid
plugintools.add_item( action = "youtube_videos" , title = '[COLOR white][' + title + ' [[COLOR red]You[/COLOR][COLOR white]tube Video][I] (' + origen + ')[/I][/COLOR]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
if saving_url == 1:
plugintools.log("URL= "+url)
save_url(url, filename)
data = file.readline()
i = i + 1
continue
elif url.find("www.dailymotion.com/playlist") >= 0: # Playlist
id_playlist = dailym_getplaylist(url)
if id_playlist != "":
if thumbnail == "":
thumbnail = 'http://press.dailymotion.com/wp-old/wp-content/uploads/logo-Dailymotion.png'
url = "https://api.dailymotion.com/playlist/"+id_playlist+"/videos"
plugintools.add_item( action="dailym_pl" , title=title+' [COLOR lightyellow][B][Dailymotion[/B] playlist][/COLOR][I][COLOR lightsalmon] (' + origen + ')[/COLOR][/I]', url=url , fanart = fanart , thumbnail=thumbnail , folder=True, isPlayable=False)
if saving_url == 1:
plugintools.log("URL= "+url)
save_url(url, filename)
data = file.readline()
i = i + 1
continue
else:
data = file.readline()
i = i + 1
continue
elif url.find("dailymotion.com/video") >= 0:
video_id = dailym_getvideo(url)
if video_id != "":
thumbnail = "https://api.dailymotion.com/thumbnail/video/"+video_id+""
url = "plugin://plugin.video.dailymotion_com/?url="+video_id+"&mode=playVideo"
# Appends a new item to the xbmc item list
# API Dailymotion list of video parameters: http://www.dailymotion.com/doc/api/obj-video.html
plugintools.add_item( action="play" , title=title+' [COLOR lightyellow][B][Dailymotion[/B] video][/COLOR][I][COLOR lightsalmon] (' + origen + ')[/COLOR][/I]', url=url , fanart = fanart , thumbnail = thumbnail , isPlayable=True, folder=False )
if saving_url == 1:
plugintools.log("URL= "+url)
save_url(url, filename)
data = file.readline()
i = i + 1
continue
else:
data = file.readline()
i = i + 1
continue
elif url.endswith("m3u8") == True:
title = title.split('"')
title = title[0]
title = title.strip()
plugintools.add_item( action = "play" , title = '[COLOR white]' + title + ' [COLOR purple][m3u8][/COLOR][I][COLOR lightsalmon] (' + origen + ')[/COLOR][/I]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
if saving_url == 1:
plugintools.log("URL= "+url)
save_url(url, filename)
data = file.readline()
i = i + 1
continue
else:
title = title_search[0]
title = title.strip()
plugintools.add_item( action = "longurl" , title = '[COLOR white]' + title + ' [COLOR blue][HTTP][/COLOR][I][COLOR lightsalmon] (' + origen + ')[/COLOR][/I]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
if saving_url == 1:
plugintools.log("URL= "+url)
save_url(url, filename)
data = file.readline()
i = i + 1
continue
else:
if url.find("allmyvideos") >= 0:
plugintools.add_item( action = "allmyvideos" , title = '[COLOR white]' + title + ' [COLOR lightyellow][Allmyvideos][/COLOR]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
if saving_url == 1:
plugintools.log("URL= "+url)
save_url(url, filename)
data = file.readline()
i = i + 1
continue
elif url.find("streamcloud") >= 0:
plugintools.add_item( action = "streamcloud" , title = '[COLOR white]' + title + ' [COLOR lightskyblue][Streamcloud][/COLOR]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
if saving_url == 1:
plugintools.log("URL= "+url)
save_url(url, filename)
data = file.readline()
i = i + 1
continue
elif url.find("vidspot") >= 0:
plugintools.add_item( action = "vidspot" , title = '[COLOR white]' + title + ' [COLOR palegreen][Vidspot][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
if saving_url == 1:
plugintools.log("URL= "+url)
save_url(url, filename)
data = file.readline()
i = i + 1
continue
elif url.find("played.to") >= 0:
plugintools.add_item( action = "playedto" , title = '[COLOR white]' + title + ' [COLOR lavender][Played.to][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
if saving_url == 1:
plugintools.log("URL= "+url)
save_url(url, filename)
data = file.readline()
i = i + 1
continue
elif url.find("vk.com") >= 0:
plugintools.add_item( action = "vk" , title = '[COLOR white]' + title + ' [COLOR royalblue][Vk][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
if saving_url == 1:
plugintools.log("URL= "+url)
save_url(url, filename)
data = file.readline()
i = i + 1
continue
elif url.find("nowvideo") >= 0:
plugintools.add_item( action = "nowvideo" , title = '[COLOR white]' + title + '[COLOR red] [Nowvideo][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
if saving_url == 1:
plugintools.log("URL= "+url)
save_url(url, filename)
data = file.readline()
i = i + 1
continue
elif url.find("tumi.tv") >= 0:
plugintools.add_item( action = "tumi" , title = '[COLOR white]' + title + '[COLOR forestgreen] [Tumi][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
if saving_url == 1:
plugintools.log("URL= "+url)
save_url(url, filename)
data = file.readline()
i = i + 1
continue
elif url.find("streamin.to") >= 0:
plugintools.add_item( action = "streaminto" , title = '[COLOR white]' + title + '[COLOR orange] [streamin.to][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
if saving_url == 1:
plugintools.log("URL= "+url)
save_url(url, filename)
data = file.readline()
i = i + 1
continue
elif url.find("www.youtube.com") >= 0:
title = title.split('"')
title = title[0]
title = title.strip()
videoid = url.replace("https://www.youtube.com/watch?v=", "")
print 'videoid',videoid
url = 'plugin://plugin.video.youtube/?path=/root/video&action=play_video&videoid=' + videoid
plugintools.add_item( action = "youtube_videos" , title = '[COLOR white]' + title + ' [[COLOR red]You[/COLOR][COLOR white]tube Video][/COLOR]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
if saving_url == 1:
plugintools.log("URL= "+url)
save_url(url, filename)
data = file.readline()
i = i + 1
continue
elif url.find("www.dailymotion.com/playlist") >= 0: # Playlist
id_playlist = dailym_getplaylist(url)
if id_playlist != "":
plugintools.log("id_playlist= "+id_playlist)
thumbnail=art+'/lnh_logo.png'
url = "https://api.dailymotion.com/playlist/"+id_playlist+"/videos"
plugintools.add_item( action="dailym_pl" , title=title + ' [COLOR lightyellow][B][Dailymotion[/B] playlist][/COLOR]' , url=url , fanart = fanart , thumbnail=thumbnail , folder=True)
if saving_url == 1:
plugintools.log("URL= "+url)
save_url(url, filename)
data = file.readline()
i = i + 1
continue
else:
data = file.readline()
i = i + 1
continue
elif url.find("dailymotion.com/video") >= 0:
video_id = dailym_getvideo(url)
if video_id != "":
thumbnail = "https://api.dailymotion.com/thumbnail/video/"+video_id+""
url = "plugin://plugin.video.dailymotion_com/?url="+video_id+"&mode=playVideo"
#plugintools.log("url= "+url)
# Appends a new item to the xbmc item list
# API Dailymotion list of video parameters: http://www.dailymotion.com/doc/api/obj-video.html
plugintools.add_item( action="play" , title=title + ' [COLOR lightyellow][B][Dailymotion[/B] video][/COLOR]' , url=url , thumbnail = thumbnail , fanart = fanart , isPlayable=True, folder=False )
if saving_url == 1:
plugintools.log("URL= "+url)
save_url(url, filename)
data = file.readline()
i = i + 1
continue
else:
data = file.readline()
i = i + 1
continue
elif url.endswith("m3u8") == True:
title = title.split('"')
title = title[0]
title = title.strip()
plugintools.add_item( action = "play" , title = '[COLOR white]' + title + ' [COLOR purple][m3u8][/COLOR]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
if saving_url == 1:
plugintools.log("URL= "+url)
save_url(url, filename)
data = file.readline()
i = i + 1
continue
else:
plugintools.add_item( action = "longurl" , title = '[COLOR red][I]' + '[/I][/COLOR][COLOR white]' + title + ' [COLOR blue][HTTP][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
data = file.readline()
i = i + 1
continue
if data.startswith("rtmp") == True or data.startswith("rtsp") == True:
url = data
url = parse_url(url)
if cat != "": # Controlamos el caso de subcategoría de canales
if busqueda == 'search.txt':
params["url"] = url
server_rtmp(params)
server = params.get("server")
plugintools.log("params en simpletv" +repr(params) )
url = params.get("url")
plugintools.add_item( action = "launch_rtmp" , title = '[COLOR white]' + titulo + '[COLOR green] [' + server + '][/COLOR][I][COLOR lightgreen] (' + origen + ')[/COLOR][/I]', url = params.get("url") , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
params["server"] = server
print url
data = file.readline()
i = i + 1
continue
else:
params["url"] = url
server_rtmp(params)
server = params.get("server")
plugintools.log("params en simpletv" +repr(params) )
plugintools.log("fanart= "+fanart)
url = params.get("url")
plugintools.add_item( action = "launch_rtmp" , title = '[COLOR red][I]' + cat + ' / [/I][/COLOR][COLOR white] ' + title + '[COLOR green] [' + server + '][/COLOR]' , url = params.get("url") , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
print url
data = file.readline()
i = i + 1
continue
else:
if busqueda == 'search.txt':
params["url"] = url
server_rtmp(params)
server = params.get("server")
plugintools.log("params en simpletv" +repr(params) )
url = params.get("url")
plugintools.add_item( action = "launch_rtmp" , title = '[COLOR white]' + titulo + '[COLOR green] [' + server + '][/COLOR][I][COLOR lightgreen] (' + origen + ')[/COLOR][/I]' , url = params.get("url") , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
print url
data = file.readline()
i = i + 1
continue
else:
params["url"] = url
server_rtmp(params)
server = params.get("server")
plugintools.log("fanart= "+fanart)
plugintools.log("params en simpletv" +repr(params) )
url = params.get("url")
plugintools.add_item( action = "launch_rtmp" , title = '[COLOR white]' + title + '[COLOR green] ['+ server + '][/COLOR]' , url = params.get("url") , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
print url
data = file.readline()
i = i + 1
continue
if data.startswith("udp") == True or data.startswith("rtp") == True:
# print "udp"
url = data
url = parse_url(url)
plugintools.log("url retornada= "+url)
if cat != "": # Controlamos el caso de subcategoría de canales
if busqueda == 'search.txt':
plugintools.add_item( action = "play" , title = '[COLOR white]' + titulo + '[COLOR red] [UDP][/COLOR][I][COLOR lightgreen] (' + origen + ')[/COLOR][/I]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
data = file.readline()
i = i + 1
continue
else:
plugintools.add_item( action = "play" , title = '[COLOR red][I]' + cat + ' / [/I][/COLOR][COLOR white] ' + title + '[COLOR red] [UDP][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
data = file.readline()
i = i + 1
continue
else:
if busqueda == 'search.txt':
plugintools.add_item( action = "play" , title = '[COLOR white]' + titulo + '[COLOR red] [UDP][/COLOR][I][COLOR lightgreen] (' + origen + ')[/COLOR][/I]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
data = file.readline()
i = i + 1
continue
else:
plugintools.add_item( action = "play" , title = '[COLOR white]' + title + '[COLOR red] [UDP][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
data = file.readline()
i = i + 1
continue
if data.startswith("mms") == True or data.startswith("rtp") == True:
# print "udp"
url = data
url = parse_url(url)
plugintools.log("url retornada= "+url)
if cat != "": # Controlamos el caso de subcategoría de canales
if busqueda == 'search.txt':
plugintools.add_item( action = "play" , title = '[COLOR white]' + titulo + '[COLOR red] [MMS][/COLOR][I][COLOR lightgreen] (' + origen + ')[/COLOR][/I]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
data = file.readline()
i = i + 1
continue
else:
plugintools.add_item( action = "play" , title = '[COLOR red][I]' + cat + ' / [/I][/COLOR][COLOR white] ' + title + '[COLOR red] [MMS][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
data = file.readline()
i = i + 1
continue
else:
if busqueda == 'search.txt':
plugintools.add_item( action = "play" , title = '[COLOR white]' + titulo + '[COLOR red] [MMS][/COLOR][I][COLOR lightgreen] (' + origen + ')[/COLOR][/I]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
data = file.readline()
i = i + 1
continue
else:
plugintools.add_item( action = "play" , title = '[COLOR white]' + title + '[COLOR red] [MMS][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
data = file.readline()
i = i + 1
continue
if data.startswith("plugin") == True:
title = title.split('"')
title = title[0]
title = title.strip()
title = title.replace("#EXTINF:-1,", "")
url = data
url = url.strip()
if url.find("youtube") >= 0 :
if cat != "":
if busqueda == 'search.txt':
plugintools.add_item( action = "play" , title = '[COLOR red][I]' + cat + ' / [/I][/COLOR][COLOR white] ' + title + '[COLOR white] [You[COLOR red]Tube[/COLOR][COLOR white] Video][/COLOR][I][COLOR lightblue] (' + origen + ')[/COLOR][/I]', url = url , thumbnail = art + "icon.png" , fanart = art + 'fanart.jpg' , folder = False , isPlayable = True )
data = file.readline()
i = i + 1
continue
else:
plugintools.add_item( action = "play" , title = '[COLOR red][I]' + cat + ' / [/I][/COLOR][COLOR white] ' + title + '[COLOR white] [You[COLOR red]Tube[/COLOR][COLOR white] Video][/COLOR]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
data = file.readline()
i = i + 1
continue
else:
if busqueda == 'search.txt':
plugintools.add_item( action = "play" , title = '[COLOR white] ' + title + '[COLOR white] [You[COLOR red]Tube[/COLOR][COLOR white] Video][/COLOR][I][COLOR lightblue] (' + origen + ')[/COLOR][/I]', url = url , thumbnail = art + "icon.png" , fanart = art + 'fanart.jpg' , folder = False , isPlayable = True )
data = file.readline()
i = i + 1
continue
else:
plugintools.add_item( action = "play" , title = '[COLOR white] ' + title + '[COLOR white] [You[COLOR red]Tube[/COLOR][COLOR white] Video][/COLOR]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
data = file.readline()
i = i + 1
continue
elif url.find("mode=1") >= 0 :
if cat != "":
if busqueda == 'search.txt':
plugintools.add_item( action = "play" , title = '[COLOR red][I]' + cat + ' / [/I][/COLOR][COLOR white] ' + title + ' [COLOR lightblue] [Acestream][/COLOR][I][COLOR lightblue] (' + origen + ')[/COLOR][/I]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
data = file.readline()
i = i + 1
continue
else:
plugintools.add_item( action = "play" , title = '[COLOR red][I]' + cat + ' / [/I][/COLOR][COLOR white] ' + title + '[COLOR lightblue] [Acestream][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
data = file.readline()
i = i + 1
continue
else:
if busqueda == 'search.txt':
plugintools.add_item( action = "play" , title = '[COLOR white]' + title + ' [COLOR lightblue] [Acestream][/COLOR][I][COLOR lightblue] (' + origen + ')[/COLOR][/I]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
data = file.readline()
i = i + 1
continue
else:
plugintools.add_item( action = "play" , title = '[COLOR white]' + title + '[COLOR lightblue] [Acestream][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
data = file.readline()
i = i + 1
continue
elif url.find("mode=2") >= 0 :
if cat != "":
if busqueda == 'search.txt':
plugintools.add_item( action = "play" , title = '[COLOR red][I]' + cat + ' / [/I][/COLOR][COLOR white] ' + title + ' [COLOR darkorange] [Sopcast][/COLOR][I][COLOR lightblue] (' + origen + ')[/COLOR][/I]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
data = file.readline()
i = i + 1
continue
else:
plugintools.add_item( action = "play" , title = '[COLOR red][I]' + cat + ' / [/I][/COLOR][COLOR white] ' + title + '[COLOR darkorange] [Sopcast][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
data = file.readline()
i = i + 1
continue
else:
if busqueda == 'search.txt':
plugintools.add_item( action = "play" , title = '[COLOR white] ' + title + ' [COLOR darkorange] [Sopcast][/COLOR][I][COLOR lightblue] (' + origen + ')[/COLOR][/I]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
data = file.readline()
i = i + 1
continue
else:
plugintools.add_item( action = "play" , title = '[COLOR white] ' + title + '[COLOR darkorange] [Sopcast][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
data = file.readline()
i = i + 1
continue
elif data.startswith("magnet") == True:
if cat != "":
if busqueda == 'search.txt':
url = urllib.quote_plus(data)
title = parser_title(title)
#plugin://plugin.video.stream/play/<URL_ENCODED_LINK>
url = 'plugin://plugin.video.stream/play/' + url
plugintools.add_item( action = "play" , title = '[COLOR red][I]' + cat + ' / [/I][/COLOR][COLOR white] ' + title + '[COLOR orangered] [Torrent][/COLOR][I][COLOR lightblue] (' + origen + ')[/COLOR][/I]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
if saving_url == 1:
plugintools.log("URL= "+url)
save_url(url, filename)
data = file.readline()
i = i + 1
continue
else:
#plugin://plugin.video.stream/play/<URL_ENCODED_LINK>
data = data.strip()
url = urllib.quote_plus(data).strip()
title = parser_title(title)
url = 'plugin://plugin.video.stream/play/' + url
plugintools.add_item( action = "play" , title = '[COLOR red][I]' + cat + ' / [/I][/COLOR][COLOR white] ' + title + ' [COLOR orangered][Torrent][/COLOR]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
if saving_url == 1:
plugintools.log("URL= "+url)
save_url(url, filename)
data = file.readline()
i = i + 1
continue
else:
if busqueda == 'search.txt':
#plugin://plugin.video.stream/play/<URL_ENCODED_LINK>
url = urllib.quote_plus(data)
url = 'plugin://plugin.video.stream/play/' + url
title = parser_title(title)
plugintools.add_item( action = "play" , title = '[COLOR white]' + title + '[COLOR orangered] [Torrent][/COLOR][I][COLOR lightblue] (' + origen + ')[/COLOR][/I]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
if saving_url == 1:
plugintools.log("URL= "+url)
save_url(url, filename)
data = file.readline()
i = i + 1
continue
else:
title = parser_title(title)
data = data.strip()
url = urllib.quote_plus(data)
url = 'plugin://plugin.video.stream/play/' + url
plugintools.add_item( action = "play" , title = '[COLOR white]' + title + ' [COLOR orangered][Torrent][/COLOR]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
if saving_url == 1:
plugintools.log("URL= "+url)
save_url(url, filename)
data = file.readline()
i = i + 1
continue
elif data.startswith("sop") == True:
if cat != "":
if busqueda == 'search.txt':
title = title.split('"')
title = title[0]
title = title.replace("#EXTINF:-1,", "")
# plugin://plugin.video.p2p-streams/?url=sop://124.232.150.188:3912/11265&mode=2&name=Titulo+canal+Sopcast
url = 'plugin://plugin.video.p2p-streams/?url=' + data + '&mode=2&name='
url = url.strip()
plugintools.add_item( action = "play" , title = '[COLOR red][I]' + cat + ' / [/I][/COLOR][COLOR white] ' + title + '[COLOR darkorange] [Sopcast][/COLOR][I][COLOR lightblue] (' + origen + ')[/COLOR][/I]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
data = file.readline()
i = i + 1
continue
else:
title = title.split('"')
title = title[0]
title = title.replace("#EXTINF:-1,", "")
url = 'plugin://plugin.video.p2p-streams/?url=' + data + '&mode=2&name='
plugintools.add_item( action = "play" , title = '[COLOR red][I]' + cat + ' / [/I][/COLOR][COLOR white] ' + title + ' [COLOR darkorange][Sopcast][/COLOR]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
data = file.readline()
i = i + 1
continue
else:
if busqueda == 'search.txt':
title = title.split('"')
title = title[0]
title = title.replace("#EXTINF:-1,", "")
url = 'plugin://plugin.video.p2p-streams/?url=' + data + '&mode=2&name='
plugintools.add_item( action = "play" , title = '[COLOR white]' + title + '[COLOR darkorange] [Sopcast][/COLOR][I][COLOR lightblue] (' + origen + ')[/COLOR][/I]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
data = file.readline()
i = i + 1
continue
else:
title = title.split('"')
title = title[0]
title = title.replace("#EXTINF:-1,", "")
url = 'plugin://plugin.video.p2p-streams/?url=' + data + '&mode=2&name='
plugintools.add_item( action = "play" , title = '[COLOR white]' + title + ' [COLOR darkorange][Sopcast][/COLOR]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
data = file.readline()
i = i + 1
continue
elif data.startswith("ace") == True:
if cat != "":
if busqueda == 'search.txt':
# plugin://plugin.video.p2p-streams/?url=a55f96dd386b7722380802b6afffc97ff98903ac&mode=1&name=Sky+Sports+title
title = parser_title(title)
title = title.strip()
title_fixed = title.replace(" ", "+")
url = data.replace("ace:", "")
url = url.strip()
url = 'plugin://plugin.video.p2p-streams/?url=' + url + '&mode=1&name=' + title_fixed
plugintools.add_item(action="play" , title = '[COLOR red][I]' + cat + ' / [/I][/COLOR][COLOR white] ' + title + ' [COLOR lightblue][Acestream][/COLOR] [COLOR lightblue][I](' + origen + ')[/COLOR][/I]' , url = url, thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True)
data = file.readline()
data = data.strip()
i = i + 1
continue
else:
title = parser_title(title)
print 'data',data
url = data.replace("ace:", "")
url = url.strip()
print 'url',url
url = 'plugin://plugin.video.p2p-streams/?url=' + url + '&mode=1&name='
plugintools.add_item(action="play" , title = '[COLOR red][I]' + cat + ' / [/I][/COLOR][COLOR white] ' + title + ' [COLOR lightblue][Acestream][/COLOR]' , url = url, thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True)
data = file.readline()
data = data.strip()
i = i + 1
continue
else:
if busqueda == 'search.txt':
# plugin://plugin.video.p2p-streams/?url=a55f96dd386b7722380802b6afffc97ff98903ac&mode=1&name=Sky+Sports+title
title = parser_title(title)
url = data.replace("ace:", "")
url = url.strip()
url = 'plugin://plugin.video.p2p-streams/?url=' + url + '&mode=1&name='
plugintools.add_item(action="play" , title = '[COLOR white]' + title + ' [COLOR lightblue][Acestream][/COLOR] [COLOR lightblue][I](' + origen + ')[/COLOR][/I]' , url = url, thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True)
data = file.readline()
data = data.strip()
i = i + 1
continue
else:
title = parser_title(title)
print 'data',data
url = data.replace("ace:", "")
url = url.strip()
print 'url',url
url = 'plugin://plugin.video.p2p-streams/?url=' + url + '&mode=1&name='
plugintools.add_item(action="play" , title = '[COLOR white]' + title + ' [COLOR lightblue][Acestream][/COLOR]' , url = url, thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True)
data = file.readline()
data = data.strip()
i = i + 1
continue
# Youtube playlist & channel
elif data.startswith("yt") == True:
if data.startswith("yt_playlist") == True:
if busqueda == 'search.txt':
title = title.split('"')
title = title[0]
title = title.replace("#EXTINF:-1,", "")
youtube_playlist = data.replace("yt_playlist(", "")
youtube_playlist = youtube_playlist.replace(")", "")
plugintools.log("youtube_playlist= "+youtube_playlist)
url = 'http://gdata.youtube.com/feeds/api/playlists/' + youtube_playlist
plugintools.add_item( action = "youtube_videos" , title = '[[COLOR white]' + title + ' [COLOR red][You[COLOR white]Tube Playlist][/COLOR] [I][COLOR lightblue](' + origen + ')[/I][/COLOR]', url = url , thumbnail = thumbnail , fanart = fanart , folder = True , isPlayable = False )
if saving_url == 1:
plugintools.log("URL= "+url)
save_url(url, filename)
data = file.readline()
i = i + 1
continue
else:
title = title.split('"')
title = title[0]
title = title.replace("#EXTINF:-1,", "")
plugintools.log("title= "+title)
youtube_playlist = data.replace("yt_playlist(", "")
youtube_playlist = youtube_playlist.replace(")", "")
plugintools.log("youtube_playlist= "+youtube_playlist)
url = 'http://gdata.youtube.com/feeds/api/playlists/' + youtube_playlist
plugintools.add_item( action = "youtube_videos" , title = '[COLOR white]' + title + ' [COLOR red][You[COLOR white]Tube Playlist][/COLOR]', url = url , thumbnail = thumbnail , fanart = fanart , folder = True , isPlayable = False )
if saving_url == 1:
plugintools.log("URL= "+url)
save_url(url, filename)
data = file.readline()
i = i + 1
continue
elif data.startswith("yt_channel") == True:
if busqueda == 'search.txt':
title = title.split('"')
title = title[0]
title = title.replace("#EXTINF:-1,", "")
youtube_channel = data.replace("yt_channel(", "")
youtube_channel = youtube_channel.replace(")", "")
plugintools.log("youtube_user= "+youtube_channel)
url = 'http://gdata.youtube.com/feeds/api/users/' + youtube_channel + '/playlists?v=2&start-index=1&max-results=30'
plugintools.add_item( action = "youtube_playlists" , title = '[[COLOR white]' + title + ' [COLOR red][You[COLOR white]Tube Channel][/COLOR] [I][COLOR lightblue](' + origen + ')[/I][/COLOR]', url = url , thumbnail = thumbnail , fanart = fanart , folder = True , isPlayable = False )
if saving_url == 1:
plugintools.log("URL= "+url)
save_url(url, filename)
data = file.readline()
i = i + 1
continue
else:
title = title.split('"')
title = title[0]
title = title.replace("#EXTINF:-1,", "")
plugintools.log("title= "+title)
youtube_channel = data.replace("yt_channel(", "")
youtube_channel = youtube_channel.replace(")", "")
youtube_channel = youtube_channel.strip()
url = 'http://gdata.youtube.com/feeds/api/users/' + youtube_channel + '/playlists?v=2&start-index=1&max-results=30'
plugintools.log("url= "+url)
plugintools.add_item( action = "youtube_playlists" , title = '[COLOR white]' + title + ' [COLOR red][You[COLOR white]Tube Channel][/COLOR]', url = url , thumbnail = thumbnail , fanart = fanart , folder = True , isPlayable = False )
if saving_url == 1:
plugintools.log("URL= "+url)
save_url(url, filename)
data = file.readline()
i = i + 1
continue
elif data.startswith("m3u") == True:
if busqueda == 'search.txt':
url = data.replace("m3u:", "")
plugintools.add_item( action = "getfile_http" , title = title + ' [I][COLOR lightblue](' + origen + ')[/I][/COLOR]', url = url , thumbnail = thumbnail , fanart = fanart , folder = True , isPlayable = False )
if saving_url == 1:
plugintools.log("URL= "+url)
save_url(url, filename)
data = file.readline()
i = i + 1
continue
else:
url = data.replace("m3u:", "")
plugintools.add_item( action = "getfile_http" , title = title + ' [COLOR orange][Lista [B]M3U[/B]][/COLOR]', url = url , thumbnail = thumbnail , fanart = fanart , folder = True , isPlayable = False )
if saving_url == 1:
plugintools.log("URL= "+url)
save_url(url, filename)
data = file.readline()
i = i + 1
continue
elif data.startswith("plx") == True:
if busqueda == 'search.txt':
url = data.replace("plx:", "")
# Se añade parámetro plot porque en las listas PLX no tengo en una función separada la descarga (FIX IT!)
plugintools.add_item( action = "plx_items" , plot = "" , title = title + ' [I][/COLOR][COLOR lightblue](' + origen + ')[/I][/COLOR]', url = url , thumbnail = thumbnail , fanart = fanart , folder = True , isPlayable = False )
if saving_url == 1:
plugintools.log("URL= "+url)
save_url(url, filename)
data = file.readline()
i = i + 1
continue
else:
url = data.replace("plx:", "")
# Se añade parámetro plot porque en las listas PLX no tengo en una función separada la descarga (FIX IT!)
plugintools.add_item( action = "plx_items" , plot = "" , title = title + ' [COLOR orange][Lista [B]PLX[/B]][/COLOR]', url = url , thumbnail = thumbnail , fanart = fanart , folder = True , isPlayable = False )
if saving_url == 1:
plugintools.log("URL= "+url)
save_url(url, filename)
data = file.readline()
i = i + 1
continue
else:
data = file.readline()
i = i + 1
continue
else:
data = file.readline()
i = i + 1
file.close()
if title == 'search.txt':
os.remove(tmp + title)
def myplaylists_m3u (params): # Mis listas M3U
plugintools.log("[nec tv-0.1.0].myplaylists_m3u "+repr(params))
thumbnail = params.get("thumbnail")
plugintools.add_item(action="play" , title = "[COLOR red][B][Tutorial][/B][COLOR lightyellow]: [/COLOR][COLOR blue][I][Youtube][/I][/COLOR]" , thumbnail = art + "icon.png" , url = "plugin://plugin.video.youtube/?path=/root/video&action=play_video&videoid=8i0KouM-4-U" , folder = False , isPlayable = True )
plugintools.add_item(action="search_channel" , title = "[B][COLOR lightyellow]Buscador de canales[/COLOR][/B][COLOR lightblue][I] Nuevo![/I][/COLOR]" , thumbnail = art + "search.png" , fanart = art + 'fanart.jpg' , folder = True , isPlayable = False )
ficheros = os.listdir(playlists) # Lectura de archivos en carpeta /playlists. Cuidado con las barras inclinadas en Windows
# Control paternal
pekes_no = plugintools.get_setting("pekes_no")
for entry in ficheros:
plot = entry.split(".")
plot = plot[0]
plugintools.log("entry= "+entry)
if pekes_no == "true" :
print "Control paternal en marcha"
if entry.find("XXX") >= 0 :
plugintools.log("Activando control paternal...")
else:
if entry.endswith("plx") == True: # Control para según qué extensión del archivo se elija thumbnail y función a ejecutar
entry = entry.replace(".plx", "")
plugintools.add_item(action="plx_items" , plot = plot , title = '[COLOR white]' + entry + '[/COLOR][COLOR green][B][I].plx[/I][/B][/COLOR]' , url = playlists + entry , thumbnail = art + 'plx3.png' , fanart = art + 'fanart.jpg' , folder = True , isPlayable = False )
if entry.endswith("p2p") == True:
entry = entry.replace(".p2p", "")
plugintools.add_item(action="p2p_items" , plot = plot , title = '[COLOR white]' + entry + '[COLOR blue][B][I].p2p[/I][/B][/COLOR]', url = playlists + entry , thumbnail = art + 'p2p.png' , fanart = art + 'fanart.jpg' , folder = True , isPlayable = False )
if entry.endswith("m3u") == True:
entry = entry.replace(".m3u", "")
plugintools.add_item(action="simpletv_items" , plot = plot , title = '[COLOR white]' + entry + '[COLOR red][B][I].m3u[/I][/B][/COLOR]', url = playlists + entry , thumbnail = art + 'm3u7.png' , fanart = art + 'fanart.jpg' , folder = True , isPlayable = False )
if entry.endswith("jsn") == True:
entry = entry.replace(".jsn", "")
plugintools.add_item(action="json_items" , plot = plot , title = '[COLOR white]' + entry + '[COLOR red][B][I].m3u[/I][/B][/COLOR]', url = playlists + entry , thumbnail = art + 'm3u7.png' , fanart = art + 'fanart.jpg' , folder = True , isPlayable = False )
else:
if entry.endswith("plx") == True: # Control para según qué extensión del archivo se elija thumbnail y función a ejecutar
entry = entry.replace(".plx", "")
plugintools.add_item(action="plx_items" , plot = plot , title = '[COLOR white]' + entry + '[/COLOR][COLOR green][B][I].plx[/I][/B][/COLOR]' , url = playlists + entry , thumbnail = art + 'plx3.png' , fanart = art + 'fanart.jpg' , folder = True , isPlayable = False )
if entry.endswith("p2p") == True:
entry = entry.replace(".p2p", "")
plugintools.add_item(action="p2p_items" , plot = plot , title = '[COLOR white]' + entry + '[COLOR blue][B][I].p2p[/I][/B][/COLOR]', url = playlists + entry , thumbnail = art + 'p2p.png' , fanart = art + 'fanart.jpg' , folder = True , isPlayable = False )
if entry.endswith("m3u") == True:
entry = entry.replace(".m3u", "")
plugintools.add_item(action="simpletv_items" , plot = plot , title = '[COLOR white]' + entry + '[COLOR red][B][I].m3u[/I][/B][/COLOR]', url = playlists + entry , thumbnail = art + 'm3u7.png' , fanart = art + 'fanart.jpg' , folder = True , isPlayable = False )
if entry.endswith("jsn") == True:
entry = entry.replace(".jsn", "")
plugintools.add_item(action="json_items" , plot = plot , title = '[COLOR white]' + entry + '[COLOR red][B][I].m3u[/I][/B][/COLOR]', url = playlists + entry , thumbnail = art + 'm3u7.png' , fanart = art + 'fanart.jpg' , folder = True , isPlayable = False )
def playlists_m3u(params): # Biblioteca online
plugintools.log("[nec tv-0.1.0].playlists_m3u "+repr(params))
data = plugintools.read( params.get("url") )
name_channel = params.get("plot")
pattern = '<name>'+name_channel+'(.*?)</channel>'
data = plugintools.find_single_match(data, pattern)
online = '[COLOR yellowgreen][I][Auto][/I][/COLOR]'
params["ext"] = 'm3u'
plugintools.add_item( action="" , title='[B][COLOR yellow]'+name_channel+'[/B][/COLOR] - [B][I][COLOR lightyellow]latinototal19@gmail.com [/COLOR][/B][/I]' , thumbnail= art + 'icon.png' , folder = False , isPlayable = False )
subchannel = re.compile('<subchannel>([^<]+)<name>([^<]+)</name>([^<]+)<thumbnail>([^<]+)</thumbnail>([^<]+)<url>([^<]+)</url>([^<]+)</subchannel>').findall(data)
# Sustituir por una lista!!!
for biny, ciny, diny, winy, pixy, dixy, boxy in subchannel:
if ciny == "Vcx7 IPTV":
plugintools.add_item( action="getfile_http" , plot = ciny , title = '[COLOR lightyellow]' + ciny + '[/COLOR] ' + online , url= dixy , thumbnail = art + winy , fanart = art + 'fanart.jpg' , folder = True , isPlayable = False )
params["ext"] = "m3u"
title = ciny
params["title"]=title
elif ciny == "Largo Barbate M3U":
plugintools.add_item( action="getfile_http" , plot = ciny , title = '[COLOR lightyellow]' + ciny + '[/COLOR] ' + online , url= dixy , thumbnail = art + winy , fanart = art + 'fanart.jpg' , folder = True , isPlayable = False )
title = ciny
params["title"]=title
elif ciny == "XBMC Mexico":
plugintools.add_item( action="getfile_http" , plot = ciny , title = '[COLOR lightyellow]' + ciny + '[/COLOR] ' + online , url= dixy , thumbnail = art + winy , fanart = art + 'fanart.jpg' , folder = True , isPlayable = False )
title = ciny
params["title"]=title
elif ciny == "allSat":
plugintools.add_item( action="getfile_http" , plot = ciny , title = '[COLOR lightyellow]' + ciny + '[/COLOR] ' + online , url= dixy , thumbnail = art + winy , fanart = art + 'fanart.jpg' , folder = True , isPlayable = False )
title = ciny
params["title"]=title
elif ciny == "AND Wonder":
plugintools.add_item( action="getfile_http" , plot = ciny , title = '[COLOR lightyellow]' + ciny + '[/COLOR] ' + online , url= dixy , thumbnail = art + winy , fanart = art + 'fanart.jpg' , folder = True , isPlayable = False )
title = ciny
params["title"]=title
elif ciny == "FenixTV":
plugintools.add_item( action="getfile_http" , plot = ciny , title = '[COLOR lightyellow]' + ciny + '[/COLOR] ' + online , url= dixy , thumbnail = art + winy , fanart = art + 'fanart.jpg' , folder = True , isPlayable = False )
title = ciny
params["title"]=title
else:
plot = ciny.split("[")
plot = plot[0]
plugintools.add_item( action="getfile_http" , plot = plot , title = '[COLOR lightyellow]' + ciny + '[/COLOR] ' , url= dixy , thumbnail = art + winy , fanart = art + 'fanart.jpg' , folder = True , isPlayable = False )
plugintools.log("[nec tv-0.1.0].playlists_m3u "+repr(params))
def getfile_http(params): # Descarga de lista M3U + llamada a simpletv_items para que liste los items
plugintools.log("[nec tv-0.1.0].getfile_http "+repr(params))
url = params.get("url")
params["ext"] = "m3u"
getfile_url(params)
simpletv_items(params)
def parse_url(url):
# plugintools.log("url entrante= "+url)
if url != "":
url = url.strip()
url = url.replace("rtmp://$OPT:rtmp-raw=", "")
return url
else:
plugintools.log("error en url= ") # Mostrar diálogo de error al parsear url (por no existir, por ejemplo)
def getfile_url(params):
plugintools.log("[nec tv-0.1.0].getfile_url " +repr(params))
ext = params.get("ext")
title = params.get("title")
if ext == 'plx':
filename = parser_title(title)
params["plot"]=filename
filename = title + ".plx" # El título del archivo con extensión (m3u, p2p, plx)
elif ext == 'm3u':
filename = params.get("plot")
# Vamos a quitar el formato al texto para que sea el nombre del archivo
filename = parser_title(title)
filename = filename + ".m3u" # El título del archivo con extensión (m3u, p2p, plx)
else:
ext == 'p2p'
filename = parser_title(title)
filename = filename + ".p2p" # El título del archivo con extensión (m3u, p2p, plx)
if filename.endswith("plx") == True :
filename = parser_title(filename)
plugintools.log("filename= "+filename)
url = params.get("url")
plugintools.log("url= "+url)
try:
response = urllib2.urlopen(url)
body = response.read()
except:
# Control si la lista está en el cuerpo del HTTP
request_headers=[]
request_headers.append(["User-Agent","Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_3) AppleWebKit/537.31 (KHTML, like Gecko) Chrome/26.0.1410.65 Safari/537.31"])
body,response_headers = plugintools.read_body_and_headers(url, headers=request_headers)
#open the file for writing
fh = open(playlists + filename, "wb")
# read from request while writing to file
fh.write(body)
fh.close()
file = open(playlists + filename, "r")
file.seek(0)
data = file.readline()
data = data.strip()
lista_items = {'linea': data}
file.seek(0)
lista_items = {'plot': data}
file.seek(0)
def header_xml(params):
plugintools.log("[nec tv-0.1.0].header_xml "+repr(params))
url = params.get("url")
params.get("title")
data = plugintools.read(url)
# plugintools.log("data= "+data)
author = plugintools.find_single_match(data, '<poster>(.*?)</poster>')
author = author.strip()
fanart = plugintools.find_single_match(data, '<fanart>(.*?)</fanart>')
message = plugintools.find_single_match(data, '<message>(.*?)</message>')
desc = plugintools.find_single_match(data, '<description>(.*?)</description>')
thumbnail = plugintools.find_single_match(data, '<thumbnail>(.*?)</thumbnail>')
if author != "":
if message != "":
plugintools.add_item(action="" , plot = author , title = '[COLOR green][B]' + author + '[/B][/COLOR][I] ' + message + '[/I]', url = "" , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = False )
return fanart
else:
plugintools.add_item(action="" , plot = author , title = '[COLOR green][B]' + author + '[/B][/COLOR]', url = "" , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = False )
return fanart
else:
if desc != "":
plugintools.add_item(action="" , plot = author , title = '[COLOR green][B]' + desc + '[/B][/COLOR]', url = "" , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = False )
return fanart
else:
return fanart
def search_channel(params):
plugintools.log("[nec tv-0.1.0].search " + repr(params))
buscar = params.get("plot")
# plugintools.log("buscar texto: "+buscar)
if buscar == "":
last_search = plugintools.get_setting("last_search")
texto = plugintools.keyboard_input(last_search)
plugintools.set_setting("last_search",texto)
params["texto"]=texto
texto = texto.lower()
cat = ""
if texto == "":
errormsg = plugintools.message("nec tv","Por favor, introduzca el canal a buscar")
return errormsg
else:
texto = buscar
texto = texto.lower()
plugintools.log("texto a buscar= "+texto)
cat = ""
results = open(tmp + 'search.txt', "wb")
results.seek(0)
results.close()
# Listamos archivos de la biblioteca local
ficheros = os.listdir(playlists) # Lectura de archivos en carpeta /playlists. Cuidado con las barras inclinadas en Windows
for entry in ficheros:
if entry.endswith("m3u") == True:
print "Archivo tipo m3u"
plot = entry.split(".")
plot = plot[0] # plot es la variable que recoge el nombre del archivo (sin extensión txt)
# Abrimos el primer archivo
filename = plot + '.m3u'
plugintools.log("Archivo M3U: "+filename)
arch = open(playlists + filename, "r")
num_items = len(arch.readlines())
print num_items
i = 0 # Controlamos que no se salga del bucle while antes de que lea el último registro de la lista
arch.seek(0)
data = arch.readline()
data = data.strip()
plugintools.log("data linea= "+data)
texto = texto.strip()
plugintools.log("data_antes= "+data)
plugintools.log("texto a buscar= "+texto)
data = arch.readline()
data = data.strip()
i = i + 1
while i <= num_items :
if data.startswith('#EXTINF:-1') == True:
data = data.replace('#EXTINF:-1,', "") # Ignoramos la primera parte de la línea
data = data.replace(",", "")
title = data.strip() # Ya tenemos el título
if data.find('$ExtFilter="') >= 0:
data = data.replace('$ExtFilter="', "")
if data.find(' $ExtFilter="') >= 0:
data = data.replace('$ExtFilter="', "")
title = title.replace("-AZBOX*", "")
title = title.replace("AZBOX *", "")
images = m3u_items(title)
print 'images',images
thumbnail = images[0]
fanart = images[1]
cat = images[2]
title = images[3]
plugintools.log("title= "+title)
minus = title.lower()
data = arch.readline()
data = data.strip()
i = i + 1
if minus.find(texto) >= 0:
# if re.match(texto, title, re.IGNORECASE):
# plugintools.log("Concidencia hallada. Obtenemos url del canal: " + texto)
if data.startswith("http") == True:
url = data.strip()
if cat != "": # Controlamos el caso de subcategoría de canales
results = open(tmp + 'search.txt', "a")
results.write("#EXTINF:-1," + title + '"' + filename + '\n')
results.write(url + '\n\n')
results.close()
data = arch.readline()
i = i + 1
continue
else:
results = open(tmp + 'search.txt', "a")
results.write("#EXTINF:-1," + title + '"' + filename + '\n')
results.write(url + '\n\n')
results.close()
data = arch.readline()
i = i + 1
continue
if data.startswith("rtmp") == True:
url = data
url = parse_url(url)
if cat != "": # Controlamos el caso de subcategoría de canales
results = open(tmp + 'search.txt', "a")
results.write("#EXTINF:-1," + title + '"' + filename + '\n')
results.write(url + '\n\n')
results.close()
data = arch.readline()
i = i + 1
continue
else:
results = open(tmp + 'search.txt', "a")
results.write("#EXTINF:-1," + title + '"' + filename + '\n')
results.write(url + '\n\n')
results.close()
data = arch.readline()
i = i + 1
continue
if data.startswith("yt") == True:
print "CORRECTO"
url = data
results = open(tmp + 'search.txt', "a")
results.write("#EXTINF:-1," + title + '"' + filename + '\n')
results.write(url + '\n\n')
results.close()
data = arch.readline()
i = i + 1
continue
else:
data = arch.readline()
data = data.strip()
plugintools.log("data_buscando_title= "+data)
i = i + 1
else:
data = arch.readline()
data = data.strip()
plugintools.log("data_final_while= "+data)
i = i + 1
continue
# Listamos archivos de la biblioteca local
ficheros = os.listdir(playlists) # Lectura de archivos en carpeta /playlists. Cuidado con las barras inclinadas en Windows
for entry in ficheros:
if entry.endswith('p2p') == True:
plot = entry.split(".")
plot = plot[0] # plot es la variable que recoge el nombre del archivo (sin extensión txt)
# Abrimos el primer archivo
plugintools.log("texto a buscar= "+texto)
filename = plot + '.p2p'
arch = open(playlists + filename, "r")
num_items = len(arch.readlines())
plugintools.log("archivo= "+filename)
i = 0 # Controlamos que no se salga del bucle while antes de que lea el último registro de la lista
arch.seek(0)
while i <= num_items:
data = arch.readline()
data = data.strip()
title = data
texto = texto.strip()
plugintools.log("linea a buscar title= "+data)
i = i + 1
if data.startswith("#") == True:
data = arch.readline()
data = data.strip()
i = i + 1
continue
if data.startswith("default=") == True:
data = arch.readline()
data = data.strip()
i = i + 1
continue
if data.startswith("art=") == True:
data = arch.readline()
data = data.strip()
i = i + 1
continue
if data != "":
title = data.strip() # Ya tenemos el título
plugintools.log("title= "+title)
minus = title.lower()
if minus.find(texto) >= 0:
plugintools.log("title= "+title)
data = arch.readline()
i = i + 1
#print i
plugintools.log("linea a comprobar url= "+data)
if data.startswith("sop") == True:
# plugin://plugin.video.p2p-streams/?url=sop://124.232.150.188:3912/11265&mode=2&name=Titulo+canal+Sopcast
title_fixed = title.replace(" " , "+")
url = 'plugin://plugin.video.p2p-streams/?url=' + data + '&mode=2&name=' + title_fixed
url = url.strip()
results = open(tmp + 'search.txt', "a")
results.write("#EXTINF:-1," + title + '"' + filename + '\n') # Hay que cambiar esto! No puede agregar #EXTINF:-1, si no es una lista m3u
results.write(url + '\n\n')
results.close()
data = arch.readline()
i = i + 1
continue
elif data.startswith("magnet") == True:
# magnet:?xt=urn:btih:6CE983D676F2643430B177E2430042E4E65427...
title_fixed = title.split('"')
title = title_fixed[0]
plugintools.log("title magnet= "+title)
url = data
plugintools.log("url magnet= "+url)
results = open(tmp + 'search.txt', "a")
results.write("#EXTINF:-1," + title + '"' + filename + '\n')
results.write(url + '\n\n')
results.close()
data = arch.readline()
i = i + 1
continue
elif data.find("://") == -1:
# plugin://plugin.video.p2p-streams/?url=a55f96dd386b7722380802b6afffc97ff98903ac&mode=1&name=Sky+Sports+title
title_fixed = title.split('"')
title = title_fixed[0]
title_fixed = title.replace(" " , "+")
url = 'plugin://plugin.video.p2p-streams/?url=' + data + '&mode=1&name=' + title_fixed
results = open(tmp + 'search.txt', "a")
results.write("#EXTINF:-1," + title + '"' + filename + '\n') # Hay que cambiar esto! No puede agregar #EXTINF:-1, si no es una lista m3u
results.write(url + '\n\n')
results.close()
data = arch.readline()
i = i + 1
continue
else:
plugintools.log("no coinciden titulo y texto a buscar")
for entry in ficheros:
if entry.endswith('plx') == True:
plot = entry.split(".")
plot = plot[0] # plot es la variable que recoge el nombre del archivo (sin extensión)
# Abrimos el primer archivo
plugintools.log("texto a buscar= "+texto)
filename = plot + '.plx'
plugintools.log("archivo PLX: "+filename)
arch = open(playlists + filename, "r")
num_items = len(arch.readlines())
print num_items
i = 0
arch.seek(0)
while i <= num_items:
data = arch.readline()
data = data.strip()
i = i + 1
print i
if data.startswith("#") == True:
continue
if (data == 'type=video') or (data == 'type=audio') == True:
data = arch.readline()
i = i + 1
print i
data = data.replace("name=", "")
data = data.strip()
title = data
minus = title.lower()
if minus.find(texto) >= 0:
plugintools.log("Título coincidente= "+title)
data = arch.readline()
plugintools.log("Siguiente linea= "+data)
i = i + 1
print i
print "Analizamos..."
while data <> "" :
if data.startswith("thumb") == True:
data = arch.readline()
plugintools.log("data_plx= "+data)
i = i + 1
print i
continue
if data.startswith("date") == True:
data = arch.readline()
plugintools.log("data_plx= "+data)
i = i + 1
print i
continue
if data.startswith("background") == True:
data = arch.readline()
plugintools.log("data_plx= "+data)
i = i + 1
print i
continue
if data.startswith("URL") == True:
data = data.replace("URL=", "")
data = data.strip()
url = data
parse_url(url)
plugintools.log("URL= "+url)
results = open(tmp + 'search.txt', "a")
results.write("#EXTINF:-1," + title + '"' + filename + '\n')
results.write(url + '\n\n')
results.close()
data = arch.readline()
i = i + 1
break
arch.close()
results.close()
params["plot"] = 'search' # Pasamos a la lista de variables (params) el valor del archivo de resultados para que lo abra la función simpletv_items
params['texto']= texto # Agregamos al diccionario una nueva variable que contiene el texto a buscar
simpletv_items(params)
def agendatv(params):
plugintools.log("[nec tv-0.1.0].agendatv "+repr(params))
hora_partidos = []
lista_equipos=[]
campeonato=[]
canales=[]
url = params.get("url")
data = plugintools.read(url)
plugintools.log("data= "+data)
matches = plugintools.find_multiple_matches(data,'<tr>(.*?)</tr>')
horas = plugintools.find_multiple_matches(data, 'color=#990000>(.*?)</td>')
txt = plugintools.find_multiple_matches(data, 'color="#000099"><b>(.*?)</td>')
tv = plugintools.find_multiple_matches(data, '<td align="left"><font face="Verdana, Arial, Helvetica, sans-serif" size="1" ><b>([^<]+)</b></font></td>')
# <b><a href="indexf.php?comp=Súper Final Argentino">Súper Final Argentino </td>
for entry in matches:
torneo = plugintools.find_single_match(entry, '<a href=(.*?)">')
torneo = torneo.replace(" ", "")
torneo = torneo.replace("indexf.php?comp=", "")
torneo = torneo.replace('>', "")
torneo = torneo.replace('"', "")
torneo = torneo.replace("\n", "")
torneo = torneo.strip()
torneo = torneo.replace('\xfa', 'ú')
torneo = torneo.replace('\xe9', 'é')
torneo = torneo.replace('\xf3', 'ó')
torneo = torneo.replace('\xfa', 'ú')
torneo = torneo.replace('\xaa', 'ª')
torneo = torneo.replace('\xe1', 'á')
torneo = torneo.replace('\xf1', 'ñ')
torneo = torneo.replace('indexuf.php?comp=', "")
torneo = torneo.replace('indexfi.php?comp=', "")
plugintools.log("string encoded= "+torneo)
if torneo != "":
plugintools.log("torneo= "+torneo)
campeonato.append(torneo)
# ERROR! Hay que añadir las jornadas, tal como estaba antes!!
# Vamos a crear dos listas; una de los equipos que se enfrentan cada partido y otra de las horas de juego
for dato in txt:
lista_equipos.append(dato)
for tiempo in horas:
hora_partidos.append(tiempo)
# <td align="left"><font face="Verdana, Arial, Helvetica, sans-serif" size="1" ><b> Canal + Fútbol</b></font></td>
# <td align="left"><font face="Verdana, Arial, Helvetica, sans-serif" size="1" ><b> IB3</b></font></td>
for kanal in tv:
kanal = kanal.replace(" ", "")
kanal = kanal.strip()
kanal = kanal.replace('\xfa', 'ú')
kanal = kanal.replace('\xe9', 'é')
kanal = kanal.replace('\xf3', 'ó')
kanal = kanal.replace('\xfa', 'ú')
kanal = kanal.replace('\xaa', 'ª')
kanal = kanal.replace('\xe1', 'á')
kanal = kanal.replace('\xf1', 'ñ')
canales.append(kanal)
print lista_equipos
print hora_partidos # Casualmente en esta lista se nos ha añadido los días de partido
print campeonato
print canales
i = 0 # Contador de equipos
j = 0 # Contador de horas
k = 0 # Contador de competición
max_equipos = len(lista_equipos) - 2
print max_equipos
for entry in matches:
while j <= max_equipos:
# plugintools.log("entry= "+entry)
fecha = plugintools.find_single_match(entry, 'color=#990000><b>(.*?)</b></td>')
fecha = fecha.replace("á", "á")
fecha = fecha.strip()
gametime = hora_partidos[i]
gametime = gametime.replace("<b>", "")
gametime = gametime.replace("</b>", "")
gametime = gametime.strip()
gametime = gametime.replace('é', 'é')
gametime = gametime.replace('á', 'á')
gametime = gametime.replace('é', 'é')
gametime = gametime.replace('á', 'á')
print gametime.find(":")
if gametime.find(":") == 2:
i = i + 1
#print i
local = lista_equipos[j]
local = local.strip()
local = local.replace('\xfa', 'ú')
local = local.replace('\xe9', 'é')
local = local.replace('\xf3', 'ó')
local = local.replace('\xfa', 'ú')
local = local.replace('\xaa', 'ª')
local = local.replace('\xe1', 'á')
local = local.replace('\xf1', 'ñ')
j = j + 1
print j
visitante = lista_equipos[j]
visitante = visitante.strip()
visitante = visitante.replace('\xfa', 'ú')
visitante = visitante.replace('\xe9', 'é')
visitante = visitante.replace('\xf3', 'ó')
visitante = visitante.replace('\xfa', 'ú')
visitante = visitante.replace('\xaa', 'ª')
visitante = visitante.replace('\xe1', 'á')
visitante = visitante.replace('\xf1', 'ñ')
local = local.replace('é', 'é')
local = local.replace('á', 'á')
j = j + 1
print j
tipo = campeonato[k]
channel = canales[k]
channel = channel.replace('\xfa', 'ú')
channel = channel.replace('\xe9', 'é')
channel = channel.replace('\xf3', 'ó')
channel = channel.replace('\xfa', 'ú')
channel = channel.replace('\xaa', 'ª')
channel = channel.replace('\xe1', 'á')
channel = channel.replace('\xf1', 'ñ')
channel = channel.replace('\xc3\xba', 'ú')
channel = channel.replace('Canal +', 'Canal+')
title = '[B][COLOR khaki]' + tipo + ':[/B][/COLOR] ' + '[COLOR lightyellow]' + '(' + gametime + ')[COLOR white] ' + local + ' vs ' + visitante + '[/COLOR][COLOR lightblue][I] (' + channel + ')[/I][/COLOR]'
plugintools.add_item(plot = channel , action="contextMenu", title=title , url = "", fanart = art + 'agendatv.jpg', thumbnail = art + 'icon.png' , folder = True, isPlayable = False)
# diccionario[clave] = valor
plugintools.log("channel= "+channel)
params["plot"] = channel
# plugintools.add_item(plot = channel , action = "search_channel", title = '[COLOR lightblue]' + channel + '[/COLOR]', url= "", thumbnail = art + 'icon.png', fanart = fanart , folder = True, isPlayable = False)
k = k + 1
print k
plugintools.log("title= "+title)
else:
plugintools.add_item(action="", title='[B][COLOR red]' + gametime + '[/B][/COLOR]', thumbnail = art + 'icon.png' , fanart = art + 'agendatv.jpg' , folder = True, isPlayable = False)
i = i + 1
def encode_string(url):
d = { '\xc1':'A',
'\xc9':'E',
'\xcd':'I',
'\xd3':'O',
'\xda':'U',
'\xdc':'U',
'\xd1':'N',
'\xc7':'C',
'\xed':'i',
'\xf3':'o',
'\xf1':'n',
'\xe7':'c',
'\xba':'',
'\xb0':'',
'\x3a':'',
'\xe1':'a',
'\xe2':'a',
'\xe3':'a',
'\xe4':'a',
'\xe5':'a',
'\xe8':'e',
'\xe9':'e',
'\xea':'e',
'\xeb':'e',
'\xec':'i',
'\xed':'i',
'\xee':'i',
'\xef':'i',
'\xf2':'o',
'\xf3':'o',
'\xf4':'o',
'\xf5':'o',
'\xf0':'o',
'\xf9':'u',
'\xfa':'u',
'\xfb':'u',
'\xfc':'u',
'\xe5':'a'
}
nueva_cadena = url
for c in d.keys():
plugintools.log("caracter= "+c)
nueva_cadena = nueva_cadena.replace(c,d[c])
auxiliar = nueva_cadena.encode('utf-8')
url = nueva_cadena
return nueva_cadena
def plx_items(params):
plugintools.log("[nec tv-0.1.0].plx_items" +repr(params))
fanart = ""
thumbnail = ""
# Control para elegir el título (plot, si formateamos el título / title , si no existe plot)
if params.get("plot") == "":
title = params.get("title").strip() + '.plx'
title = parser_title(title)
title = title.strip()
filename = title
params["plot"]=filename
params["ext"] = 'plx'
getfile_url(params)
title = params.get("title")
else:
title = params.get("plot")
title = title.strip()
title = parser_title(title)
plugintools.log("Lectura del archivo PLX")
title = title.replace(" .plx", ".plx")
title = title.strip()
file = open(playlists + parser_title(title) + '.plx', "r")
file.seek(0)
num_items = len(file.readlines())
print num_items
file.seek(0)
# Lectura del título y fanart de la lista
background = art + 'fanart.jpg'
logo = art + 'plx3.png'
file.seek(0)
data = file.readline()
while data <> "":
plugintools.log("data= "+data)
if data.startswith("background=") == True:
data = data.replace("background=", "")
background = data.strip()
plugintools.log("background= "+background)
if background == "":
background = params.get("extra")
if background == "":
background = art + 'fanart.jpg'
data = file.readline()
continue
if data.startswith("title=") == True:
name = data.replace("title=", "")
name = name.strip()
plugintools.log("name= "+name)
if name == "Select sort order for this list":
name = "Seleccione criterio para ordenar ésta lista... "
data = file.readline()
continue
if data.startswith("logo=") == True:
data = data.replace("logo=", "")
logo = data.strip()
plugintools.log("logo= "+logo)
title = parser_title(title)
if thumbnail == "":
thumbnail = art + 'plx3.png'
plugintools.add_item(action="" , title = '[COLOR lightyellow][B][I]playlist / '+ title + '[/B][/I][/COLOR]', url = playlists + title , thumbnail = logo , fanart = background , folder = False , isPlayable = False)
plugintools.log("fanart= "+fanart)
plugintools.add_item(action="" , title = '[I][B]' + name + '[/B][/I]' , url = "" , thumbnail = logo , fanart = background , folder = False , isPlayable = False)
data = file.readline()
break
else:
data = file.readline()
try:
data = file.readline()
plugintools.log("data= "+data)
if data.startswith("background=") == True:
data = data.replace("background=", "")
data = data.strip()
fanart = data
background = fanart
plugintools.log("fanart= "+fanart)
else:
# data = file.readline()
if data.startswith("background=") == True:
print "Archivo plx!"
data = data.replace("background=", "")
fanart = data.strip()
plugintools.log("fanart= "+fanart)
else:
if data.startswith("title=") == True:
name = data.replace("title=", "")
name = name.strip()
plugintools.log("name= "+name)
except:
plugintools.log("ERROR: Unable to load PLX file")
data = file.readline()
try:
if data.startswith("title=") == True:
data = data.replace("title=", "")
name = data.strip()
plugintools.log("title= "+title)
plugintools.add_item(action="" , title = '[COLOR lightyellow][B][I]playlist / '+ title +'[/I][/B][/COLOR]' , url = playlists + title , thumbnail = logo , fanart = fanart , folder = False , isPlayable = False)
plugintools.add_item(action="" , title = '[I][B]' + name + '[/B][/I]' , url = "" , thumbnail = art + "icon.png" , fanart = fanart , folder = False , isPlayable = False)
except:
plugintools.log("Unable to read PLX title")
# Lectura de items
i = 0
file.seek(0)
while i <= num_items:
data = file.readline()
data = data.strip()
i = i + 1
print i
if data.startswith("#") == True:
continue
elif data.startswith("rating") == True:
continue
elif data.startswith("description") == True:
continue
if (data == 'type=comment') == True:
data = file.readline()
i = i + 1
print i
while data <> "" :
if data.startswith("name") == True:
title = data.replace("name=", "")
data = file.readline()
data = data.strip()
i = i + 1
print i
continue
elif data.startswith("thumb") == True:
data = data.replace("thumb=", "")
data = data.strip()
thumbnail = data
if thumbnail == "":
thumbnail = logo
data = file.readline()
data = data.strip()
i = i + 1
print i
continue
elif data.startswith("background") == True:
data = data.replace("background=", "")
fanart = data.strip()
if fanart == "":
fanart = background
data = file.readline()
data = data.strip()
i = i + 1
print i
continue
plugintools.add_item(action="", title = title , url = "", thumbnail = thumbnail , fanart = fanart , folder = False, isPlayable = False)
if (data == 'type=video') or (data == 'type=audio') == True:
data = file.readline()
i = i + 1
print i
while data <> "" :
if data.startswith("#") == True:
data = file.readline()
data = data.strip()
i = i + 1
print i
continue
elif data.startswith("description") == True:
data = file.readline()
data = data.strip()
i = i + 1
print i
continue
elif data.startswith("rating") == True:
data = file.readline()
data = data.strip()
i = i + 1
print i
continue
elif data.startswith("name") == True:
data = data.replace("name=", "")
data = data.strip()
title = data
if title == "[COLOR=FF00FF00]by user-assigned order[/COLOR]" :
title = "Seleccione criterio para ordenar ésta lista... "
if title == "by user-assigned order" :
title = "Según se han agregado en la lista"
if title == "by date added, oldest first" :
title = "Por fecha de agregación, las más antiguas primero"
if title == "by date added, newest first" :
title = "Por fecha de agregación, las más nuevas primero"
data = file.readline()
data = data.strip()
i = i + 1
print i
elif data.startswith("thumb") == True:
data = data.replace("thumb=", "")
data = data.strip()
thumbnail = data
if thumbnail == "":
thumbnail = logo
data = file.readline()
data = data.strip()
i = i + 1
print i
continue
elif data.startswith("date") == True:
data = file.readline()
i = i + 1
print i
continue
elif data.startswith("background") == True:
data = data.replace("background=", "")
fanart = data.strip()
if fanart == "":
fanart = background
data = file.readline()
data = data.strip()
i = i + 1
print i
continue
elif data.startswith("URL") == True:
# Control para el caso de que no se haya definido fanart en cada entrada de la lista => Se usa el fanart general
if fanart == "":
fanart = background
data = data.replace("URL=", "")
data = data.strip()
url = data
parse_url(url)
if url.startswith("yt_channel") == True:
youtube_channel = url.replace("yt_channel(", "")
youtube_channel = youtube_channel.replace(")", "")
url = 'http://gdata.youtube.com/feeds/api/users/' + youtube_channel + '/playlists?v=2&start-index=1&max-results=30'
plugintools.add_item(action="youtube_playlists" , title = title + ' [[COLOR red]You[COLOR white]tube Channel][/COLOR]', url = url , thumbnail = thumbnail , fanart = fanart , folder = True , isPlayable = False)
break
elif url.startswith("yt_playlist") == True:
youtube_playlist = url.replace("yt_playlist(", "")
youtube_playlist = youtube_playlist.replace(")", "")
plugintools.log("youtube_playlist= "+youtube_playlist)
url = 'http://gdata.youtube.com/feeds/api/playlists/' + youtube_playlist + '?v=2'
plugintools.add_item( action = "youtube_videos" , title = title + ' [COLOR red][You[COLOR white]tube Playlist][/COLOR] [I][COLOR lightblue][/I][/COLOR]', url = url , thumbnail = art + "icon.png" , fanart = art + 'fanart.jpg' , folder = True , isPlayable = False )
data = file.readline()
i = i + 1
break
# Sintaxis yt(...) a extinguir pero mantengo por Darío:
elif url.startswith("yt") == True:
url = url.replace("yt(", "")
youtube_user = url.replace(")", "")
url = 'http://gdata.youtube.com/feeds/api/users/' + youtube_user + '/playlists?v=2&start-index=1&max-results=30'
plugintools.log("URL= "+url)
plugintools.log("FANART = "+fanart)
plugintools.add_item(action="youtube_playlists" , title = title + ' [COLOR red][You[COLOR white]tube Playlist][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = True , isPlayable = False)
break
elif url.startswith("serie") == True:
url = url.replace("serie:", "")
plugintools.log("URL= "+url)
plugintools.log("FANART = "+fanart)
plugintools.add_item(action="seriecatcher" , title = title + ' [COLOR purple][Serie online][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , extra = fanart , folder = True , isPlayable = False)
break
elif url.startswith("http") == True:
if url.find("allmyvideos") >= 0:
plugintools.add_item(action="allmyvideos" , title = title + ' [COLOR lightyellow][Allmyvideos][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True)
plugintools.log("URL= "+url)
break
elif url.find("streamcloud") >= 0:
plugintools.add_item(action="streamcloud" , title = title + ' [COLOR lightskyblue][Streamcloud][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True)
plugintools.log("URL= "+url)
plugintools.log("FANART = "+fanart)
break
elif url.find("played.to") >= 0:
plugintools.add_item(action="playedto" , title = title + ' [COLOR lavender][Played.to][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True)
plugintools.log("URL= "+url)
plugintools.log("FANART = "+fanart)
break
elif url.find("vidspot") >= 0:
plugintools.add_item(action="vidspot" , title = title + ' [COLOR palegreen][Vidspot][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True)
plugintools.log("URL= "+url)
plugintools.log("FANART = "+fanart)
break
elif url.find("vk.com") >= 0:
plugintools.add_item(action="vk" , title = title + ' [COLOR royalblue][Vk][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True)
plugintools.log("URL= "+url)
plugintools.log("FANART = "+fanart)
break
if url.find("nowvideo") >= 0:
plugintools.add_item(action="nowvideo" , title = title + ' [COLOR red][Nowvideo][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True)
plugintools.log("URL= "+url)
break
if url.find("tumi.tv") >= 0:
plugintools.add_item(action="tumi" , title = title + ' [COLOR forestgreen][Tumi][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True)
plugintools.log("URL= "+url)
break
if url.find("streamin.to") >= 0:
plugintools.add_item(action="streaminto" , title = title + ' [COLOR forestgreen][streamin.to][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True)
plugintools.log("URL= "+url)
break
elif url.endswith("flv") == True:
plugintools.log("URL= "+url)
plugintools.log("FANART = "+fanart)
plugintools.add_item( action = "play" , title = title + ' [COLOR cyan][Flash][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
break
elif url.endswith("m3u8") == True:
plugintools.log("URL= "+url)
plugintools.log("FANART = "+fanart)
plugintools.add_item( action = "play" , title = title + ' [COLOR purple][m3u8][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
break
elif url.find("youtube.com") >= 0:
plugintools.log("URL= "+url)
plugintools.log("FANART = "+fanart)
videoid = url.replace("https://www.youtube.com/watch?v=", "")
url = 'plugin://plugin.video.youtube/?path=/root/video&action=play_video&videoid=' + videoid
plugintools.add_item( action = "play" , title = title + ' [[COLOR red]You[COLOR white]tube Video][/COLOR]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
break
else:
plugintools.log("URL= "+url)
plugintools.add_item( action = "play" , title = title + ' [COLOR white][HTTP][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
break
elif url.startswith("rtmp") == True:
params["url"] = url
server_rtmp(params)
server = params.get("server")
url = params.get("url")
plugintools.add_item( action = "launch_rtmp" , title = title + '[COLOR green] [' + server + '][/COLOR]' , url = params.get("url") , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
break
elif url.startswith("plugin") == True:
if url.find("plugin.video.youtube") >= 0:
plugintools.log("URL= "+url)
plugintools.add_item( action = "play" , title = title + ' [COLOR white] [[COLOR red]You[COLOR white]tube Video][/COLOR][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
break
if url.find("plugin.video.p2p-streams") >= 0:
if url.find("mode=1") >= 0:
title = parser_title(title)
url = url.strip()
plugintools.add_item(action="play" , title = title + ' [COLOR lightblue][Acestream][/COLOR]' , url = url, thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True)
elif url.find("mode=2") >= 0:
title = parser_title(title)
url = url.strip()
plugintools.add_item(action="play" , title = title_fixed + ' [COLOR lightblue][Sopcast][/COLOR]' , url = url, thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True)
elif url.startswith("sop") == True:
# plugin://plugin.video.p2p-streams/?url=sop://124.232.150.188:3912/11265&mode=2&name=Titulo+canal+Sopcast
title = parser_title(title)
url = 'plugin://plugin.video.p2p-streams/?url=' + url + '&mode=2&name='
url = url.strip()
plugintools.add_item(action="play" , title = title + ' [COLOR lightgreen][Sopcast][/COLOR]' , url = url, thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True)
data = file.readline()
data = data.strip()
i = i + 1
#print i
continue
elif url.startswith("ace") == True:
title = parser_title(title)
url = url.replace("ace:", "")
url = 'plugin://plugin.video.p2p-streams/?url=' + url + '&mode=1&name='
url = url.strip()
plugintools.add_item(action="play" , title = title + ' [COLOR lightblue][Acestream][/COLOR]' , url = url, thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True)
data = file.readline()
data = data.strip()
i = i + 1
#print i
continue
elif url.startswith("magnet") >= 0:
url = urllib.quote_plus(data)
title = parser_title(title)
url = 'plugin://plugin.video.pulsar/play?uri=' + url
plugintools.add_item(action="play" , title = title + ' [COLOR orangered][Torrent][/COLOR]' , url = url, thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True)
else:
plugintools.add_item(action="play" , title = title , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True)
plugintools.log("URL = "+url)
break
elif data == "" :
break
else:
data = file.readline()
data = data.strip()
i = i + 1
print i
if (data == 'type=playlist') == True:
# Control si no se definió fanart en cada entrada de la lista => Se usa fanart global de la lista
if fanart == "":
fanart = background
data = file.readline()
i = i + 1
print i
while data <> "" :
if data.startswith("name") == True :
data = data.replace("name=", "")
title = data.strip()
if title == '>>>' :
title = title.replace(">>>", "[I][COLOR lightyellow]Siguiente[/I][/COLOR]")
data = file.readline()
data = data.strip()
i = i + 1
elif title == '<<<' :
title = title.replace("<<<", "[I][COLOR lightyellow]Anterior[/I][/COLOR]")
data = file.readline()
data = data.strip()
i = i + 1
elif title.find("Sorted by user-assigned order") >= 0:
title = "[I][COLOR lightyellow]Ordenar listas por...[/I][/COLOR]"
data = file.readline()
data = data.strip()
i = i + 1
elif title.find("Sorted A-Z") >= 0:
title = "[I][COLOR lightyellow][COLOR lightyellow]De la A a la Z[/I][/COLOR]"
data = file.readline()
data = data.strip()
i = i + 1
elif title.find("Sorted Z-A") >= 0:
title = "[I][COLOR lightyellow]De la Z a la A[/I][/COLOR]"
data = file.readline()
data = data.strip()
i = i + 1
elif title.find("Sorted by date added, newest first") >= 0:
title = "Ordenado por: Las + recientes primero..."
data = file.readline()
data = data.strip()
i = i + 1
elif title.find("Sorted by date added, oldest first") >= 0:
title = "Ordenado por: Las + antiguas primero..."
data = file.readline()
data = data.strip()
i = i + 1
elif title.find("by user-assigned order") >= 0:
title = "[COLOR lightyellow]Ordenar listas por...[/COLOR]"
data = file.readline()
data = data.strip()
i = i + 1
elif title.find("by date added, newest first") >= 0 :
title = "Las + recientes primero..."
data = file.readline()
data = data.strip()
i = i + 1
elif title.find("by date added, oldest first") >= 0 :
title = "Las + antiguas primero..."
data = file.readline()
data = data.strip()
i = i + 1
elif data.startswith("thumb") == True:
data = data.replace("thumb=", "")
data = data.strip()
thumbnail = data
data = file.readline()
data = data.strip()
i = i + 1
print i
continue
elif data.startswith("URL") == True:
data = data.replace("URL=", "")
data = data.strip()
url = data
parse_url(url)
if url.startswith("m3u") == True:
url = url.replace("m3u:", "")
plugintools.add_item(action="getfile_http" , title = title , url = url , thumbnail = thumbnail , fanart = fanart , folder = True , isPlayable = False)
elif url.startswith("plx") == True:
url = url.replace("plx:", "")
plugintools.add_item(action="plx_items" , title = title , url = url , thumbnail = thumbnail , fanart = fanart , folder = True , isPlayable = False)
elif data == "" :
break
else:
data = file.readline()
data = data.strip()
i = i + 1
print i
continue
file.close()
# Purga de listas erróneas creadas al abrir listas PLX (por los playlists de ordenación que crea Navixtreme)
if os.path.isfile(playlists + 'Siguiente.plx'):
os.remove(playlists + 'Siguiente.plx')
print "Correcto!"
else:
pass
if os.path.isfile(playlists + 'Ordenar listas por....plx'):
os.remove(playlists + 'Ordenar listas por....plx')
print "Ordenar listas por....plx eliminado!"
print "Correcto!"
else:
print "No es posible!"
pass
if os.path.isfile(playlists + 'A-Z.plx'):
os.remove(playlists + 'A-Z.plx')
print "A-Z.plx eliminado!"
else:
print "No es posible!"
pass
if os.path.isfile(playlists + 'De la A a la Z.plx'):
os.remove(playlists + 'De la A a la Z.plx')
print "De la A a la Z.plx eliminado!"
else:
print "No es posible!"
pass
if os.path.isfile(playlists + 'Z-A.plx'):
os.remove(playlists + 'Z-A.plx')
print "Z-A.plx eliminado!"
else:
print "No es posible!"
pass
if os.path.isfile(playlists + 'De la Z a la A.plx'):
os.remove(playlists + 'De la Z a la A.plx')
print "De la Z a la A.plx eliminado!"
else:
print "No es posible!"
pass
if os.path.isfile(playlists + 'Las + antiguas primero....plx'):
os.remove(playlists + 'Las + antiguas primero....plx')
print "Las más antiguas primero....plx eliminado!"
else:
print "No es posible!"
pass
if os.path.isfile(playlists + 'by date added, oldest first.plx'):
os.remove(playlists + 'by date added, oldest first.plx')
print "by date added, oldest first.plx eliminado!"
else:
print "No es posible!"
pass
if os.path.isfile(playlists + 'Las + recientes primero....plx'):
os.remove(playlists + 'Las + recientes primero....plx')
else:
print "No es posible!"
pass
if os.path.isfile(playlists + 'by date added, newest first.plx'):
os.remove(playlists + 'by date added, newest first.plx')
print "by date added, newest first.plx eliminado!"
else:
print "No es posible!"
pass
if os.path.isfile(playlists + 'Sorted by user-assigned order.plx'):
os.remove(playlists + 'Sorted by user-assigned order.plx')
print "Sorted by user-assigned order.plx eliminado!"
else:
print "No es posible!"
pass
if os.path.isfile(playlists + 'Ordenado por.plx'):
os.remove(playlists + 'Ordenado por.plx')
print "Correcto!"
else:
print "No es posible!"
pass
if os.path.isfile(playlists + 'Ordenado por'):
os.remove(playlists + 'Ordenado por')
print "Correcto!"
else:
print "No es posible!"
pass
def futbolenlatv(params):
plugintools.log("[nec tv-0.1.0].futbolenlaTV "+repr(params))
hora_partidos = []
lista_equipos=[]
campeonato=[]
canales=[]
url = params.get("url")
print url
fecha = get_fecha()
dia_manana = params.get("plot")
data = plugintools.read(url)
if dia_manana == "": # Control para si es agenda de hoy o mañana
plugintools.add_item(action="", title = '[COLOR green][B]FutbolenlaTV.com[/B][/COLOR] - [COLOR lightblue][I]Agenda para el día '+ fecha + '[/I][/COLOR]', folder = False , isPlayable = False )
else:
dia_manana = dia_manana.split("-")
dia_manana = dia_manana[2] + "/" + dia_manana[1] + "/" + dia_manana[0]
plugintools.add_item(action="", title = '[COLOR green][B]FutbolenlaTV.com[/B][/COLOR] - [COLOR lightblue][I]Agenda para el día '+ dia_manana + '[/I][/COLOR]', folder = False , isPlayable = False )
bloque = plugintools.find_multiple_matches(data,'<span class="cuerpo-partido">(.*?)</div>')
for entry in bloque:
category = plugintools.find_single_match(entry, '<i class=(.*?)</i>')
category = category.replace("ftvi-", "")
category = category.replace('comp">', '')
category = category.replace('"', '')
category = category.replace("-", " ")
category = category.replace("Futbol", "Fútbol")
category = category.strip()
category = category.capitalize()
plugintools.log("cat= "+category)
champ = plugintools.find_single_match(entry, '<span class="com-detalle">(.*?)</span>')
champ = encode_string(champ)
champ = champ.strip()
event = plugintools.find_single_match(entry, '<span class="bloque">(.*?)</span>')
event = encode_string(event)
event = event.strip()
momentum = plugintools.find_single_match(entry, '<time itemprop="startDate" datetime=([^<]+)</time>')
# plugintools.log("momentum= "+momentum)
momentum = momentum.split(">")
momentum = momentum[1]
gametime = plugintools.find_multiple_matches(entry, '<span class="n">(.*?)</span>')
for tiny in gametime:
day = tiny
month = tiny
sport = plugintools.find_single_match(entry, '<meta itemprop="eventType" content=(.*?)/>')
sport = sport.replace('"', '')
sport = sport.strip()
if sport == "Partido de fútbol":
sport = "Fútbol"
# plugintools.log("sport= "+sport)
gameday = plugintools.find_single_match(entry, '<span class="dia">(.*?)</span>')
rivals = plugintools.find_multiple_matches(entry, '<span>([^<]+)</span>([^<]+)<span>([^<]+)</span>')
rivales = ""
for diny in rivals:
print diny
items = len(diny)
items = items - 1
i = -1
diny[i].strip()
while i <= items:
if diny[i] == "":
del diny[0]
i = i + 1
else:
print diny[i]
rival = diny[i]
rival = encode_string(rival)
rival = rival.strip()
plugintools.log("rival= "+rival)
if rival == "-":
i = i + 1
continue
else:
if rivales != "":
rivales = rivales + " vs " + rival
plugintools.log("rivales= "+rivales)
break
else:
rivales = rival
plugintools.log("rival= "+rival)
i = i + 1
tv = plugintools.find_single_match(entry, '<span class="hidden-phone hidden-tablet canales"([^<]+)</span>')
tv = tv.replace(">", "")
tv = encode_string(tv)
if tv == "":
continue
else:
tv = tv.replace("(Canal+, Astra", "")
tv = tv.split(",")
tv_a = tv[0]
tv_a = tv_a.rstrip()
tv_a = tv_a.lstrip()
tv_a = tv_a.replace(")", "")
plugintools.log("tv_a= "+tv_a)
print len(tv)
if len(tv) == 2:
tv_b = tv[1]
tv_b = tv_b.lstrip()
tv_b = tv_b.rstrip()
tv_b = tv_b.replace(")", "")
tv_b = tv_b.replace("(Bar+ dial 333-334", "")
tv_b = tv_b.replace("(Canal+", "")
tv = tv_a + " / " + tv_b
plot = tv
plugintools.log("plot= "+plot)
elif len(tv) == 3:
tv_b = tv[1]
tv_b = tv_b.lstrip()
tv_b = tv_b.rstrip()
tv_b = tv_b.replace(")", "")
tv_b = tv_b.replace("(Bar+ dial 333-334", "")
tv_b = tv_b.replace("(Canal+", "")
tv_c = tv[2]
tv_c = tv_c.lstrip()
tv_c = tv_c.rstrip()
tv_c = tv_c.replace(")", "")
tv_c = tv_c.replace("(Bar+ dial 333-334", "")
tv_c = tv_c.replace("(Canal+", "")
tv = tv_a + " / " + tv_b + " / " + tv_c
plot = tv
plugintools.log("plot= "+plot)
elif len(tv) == 4:
tv_b = tv[1]
tv_b = tv_b.lstrip()
tv_b = tv_b.rstrip()
tv_b = tv_b.replace(")", "")
tv_b = tv_b.replace("(Bar+ dial 333-334", "")
tv_b = tv_b.replace("(Canal+", "")
tv_c = tv[2]
tv_c = tv_c.lstrip()
tv_c = tv_c.rstrip()
tv_c = tv_c.replace(")", "")
tv_c = tv_c.replace("(Bar+ dial 333-334", "")
tv_c = tv_c.replace("(Canal+", "")
tv_d = tv[3]
tv_d = tv_d.lstrip()
tv_d = tv_d.rstrip()
tv_d = tv_d.replace(")", "")
tv_d = tv_d.replace("(Bar+ dial 333-334", "")
tv_d = tv_d.replace("(Canal+", "")
tv = tv_a + " / " + tv_b + " / " + tv_c + " / " + tv_d
plot = tv
plugintools.log("plot= "+plot)
elif len(tv) == 5:
tv_b = tv[1]
tv_b = tv_b.lstrip()
tv_b = tv_b.rstrip()
tv_b = tv_b.replace(")", "")
tv_b = tv_b.replace("(Bar+ dial 333-334", "")
tv_b = tv_b.replace("(Canal+", "")
tv_c = tv[2]
tv_c = tv_c.lstrip()
tv_c = tv_c.rstrip()
tv_c = tv_c.replace(")", "")
tv_c = tv_c.replace("(Bar+ dial 333-334", "")
tv_c = tv_c.replace("(Canal+", "")
tv_d = tv[3]
tv_d = tv_d.lstrip()
tv_d = tv_d.rstrip()
tv_d = tv_d.replace(")", "")
tv_d = tv_d.replace("(Bar+ dial 333-334", "")
tv_d = tv_d.replace("(Canal+", "")
tv_e = tv[4]
tv_e = tv_e.lstrip()
tv_e = tv_e.rstrip()
tv_e = tv_e.replace(")", "")
tv_e = tv_e.replace("(Bar+ dial 333-334", "")
tv_e = tv_e.replace("(Canal+", "")
tv = tv_a + " / " + tv_b + " / " + tv_c + " / " + tv_d + " / " + tv_e
# tv = tv.replace(")", "")
plot = tv
plugintools.log("plot= "+plot)
elif len(tv) == 6:
tv_b = tv[1]
tv_b = tv_b.lstrip()
tv_b = tv_b.rstrip()
tv_b = tv_b.replace(")", "")
tv_b = tv_b.replace("(Bar+ dial 333-334", "")
tv_b = tv_b.replace("(Canal+", "")
tv_c = tv[2]
tv_c = tv_c.lstrip()
tv_c = tv_c.rstrip()
tv_c = tv_c.replace(")", "")
tv_c = tv_c.replace("(Bar+ dial 333-334", "")
tv_c = tv_c.replace("(Canal+", "")
tv_d = tv[3]
tv_d = tv_d.lstrip()
tv_d = tv_d.rstrip()
tv_d = tv_d.replace(")", "")
tv_d = tv_d.replace("(Bar+ dial 333-334", "")
tv_d = tv_d.replace("(Canal+", "")
tv_e = tv[4]
tv_e = tv_e.lstrip()
tv_e = tv_e.rstrip()
tv_e = tv_e.replace(")", "")
tv_e = tv_e.replace("(Bar+ dial 333-334", "")
tv_e = tv_e.replace("(Canal+", "")
tv_f = tv[5]
tv_f = tv_f.lstrip()
tv_f = tv_f.rstrip()
tv_f = tv_f.replace(")", "")
tv_f = tv_f.replace("(Bar+ dial 333-334", "")
tv_f = tv_f.replace("(Canal+", "")
tv = tv_a + " / " + tv_b + " / " + tv_c + " / " + tv_d + " / " + tv_e + " / " + tv_f
# tv = tv.replace(")", "")
plot = tv
plugintools.log("plot= "+plot)
elif len(tv) == 7:
tv_b = tv[1]
tv_b = tv_b.lstrip()
tv_b = tv_b.rstrip()
tv_b = tv_b.replace(")", "")
tv_b = tv_b.replace("(Bar+ dial 333-334", "")
tv_b = tv_b.replace("(Canal+", "")
tv_c = tv[2]
tv_c = tv_c.lstrip()
tv_c = tv_c.rstrip()
tv_c = tv_c.replace(")", "")
tv_c = tv_c.replace("(Bar+ dial 333-334", "")
tv_c = tv_c.replace("(Canal+", "")
tv_d = tv[3]
tv_d = tv_d.lstrip()
tv_d = tv_d.rstrip()
tv_d = tv_d.replace(")", "")
tv_d = tv_d.replace("(Bar+ dial 333-334", "")
tv_d = tv_d.replace("(Canal+", "")
tv_e = tv[4]
tv_e = tv_e.lstrip()
tv_e = tv_e.rstrip()
tv_e = tv_e.replace(")", "")
tv_e = tv_e.replace("(Bar+ dial 333-334", "")
tv_e = tv_e.replace("(Canal+", "")
tv_f = tv[5]
tv_f = tv_f.lstrip()
tv_f = tv_f.rstrip()
tv_f = tv_f.replace(")", "")
tv_f = tv_f.replace("(Bar+ dial 333-334", "")
tv_f = tv_f.replace("(Canal+", "")
tv_g = tv[6]
tv_g = tv_g.lstrip()
tv_g = tv_g.rstrip()
tv_g = tv_g.replace(")", "")
tv_g = tv_g.replace("(Bar+ dial 333-334", "")
tv_g = tv_g.replace("(Canal+", "")
tv = tv_a + " / " + tv_b + " / " + tv_c + " / " + tv_d + " / " + tv_e + " / " + tv_f + " / " + tv_g
plot = tv
plugintools.log("plot= "+plot)
else:
tv = tv_a
plot = tv_a
plugintools.log("plot= "+plot)
plugintools.add_item(action="contextMenu", plot = plot , title = momentum + "h " + '[COLOR lightyellow][B]' + category + '[/B][/COLOR] ' + '[COLOR green]' + champ + '[/COLOR]' + " " + '[COLOR lightyellow][I]' + rivales + '[/I][/COLOR] [I][COLOR red]' + plot + '[/I][/COLOR]' , thumbnail = 'http://i2.bssl.es/telelocura/2009/05/futbol-tv.jpg' , fanart = art + 'agenda2.jpg' , folder = True, isPlayable = False)
# plugintools.add_item(action="contextMenu", title = '[COLOR yellow][I]' + tv + '[/I][/COLOR]', thumbnail = 'http://i2.bssl.es/telelocura/2009/05/futbol-tv.jpg' , fanart = art + 'agenda2.jpg' , plot = plot , folder = True, isPlayable = False)
# plugintools.add_item(action="contextMenu", title = gameday + '/' + day + "(" + momentum + ") " + '[COLOR lightyellow][B]' + category + '[/B][/COLOR] ' + champ + ": " + rivales , plot = plot , thumbnail = 'http://i2.bssl.es/telelocura/2009/05/futbol-tv.jpg' , fanart = art + 'agenda2.jpg' , folder = True, isPlayable = False)
# plugintools.add_item(action="contextMenu", title = '[COLOR yellow][I]' + tv + '[/I][/COLOR]' , thumbnail = 'http://i2.bssl.es/telelocura/2009/05/futbol-tv.jpg' , fanart = art + 'agenda2.jpg' , plot = plot , folder = True, isPlayable = False)
def encode_string(txt):
plugintools.log("[nec tv-0.1.0].encode_string: "+txt)
txt = txt.replace("ç", "ç")
txt = txt.replace('é', 'é')
txt = txt.replace('á', 'á')
txt = txt.replace('é', 'é')
txt = txt.replace('á', 'á')
txt = txt.replace('ñ', 'ñ')
txt = txt.replace('ú', 'ú')
txt = txt.replace('í', 'í')
txt = txt.replace('ó', 'ó')
txt = txt.replace(''', "'")
txt = txt.replace(" ", "")
txt = txt.replace(" ", "")
txt = txt.replace(''', "'")
return txt
def splive_items(params):
plugintools.log("[nec tv-0.1.0].SPlive_items "+repr(params))
data = plugintools.read( params.get("url") )
channel = plugintools.find_multiple_matches(data,'<channel>(.*?)</channel>')
for entry in channel:
# plugintools.log("channel= "+channel)
title = plugintools.find_single_match(entry,'<name>(.*?)</name>')
category = plugintools.find_single_match(entry,'<category>(.*?)</category>')
thumbnail = plugintools.find_single_match(entry,'<link_logo>(.*?)</link_logo>')
rtmp = plugintools.find_single_match(entry,'<rtmp>([^<]+)</rtmp>')
isIliveTo = plugintools.find_single_match(entry,'<isIliveTo>([^<]+)</isIliveTo>')
rtmp = rtmp.strip()
pageurl = plugintools.find_single_match(entry,'<url_html>([^<]+)</url_html>')
link_logo = plugintools.find_single_match(entry,'<link_logo>([^<]+)</link_logo>')
if pageurl == "SinProgramacion":
pageurl = ""
playpath = plugintools.find_single_match(entry, '<playpath>([^<]+)</playpath>')
playpath = playpath.replace("Referer: ", "")
token = plugintools.find_single_match(entry, '<token>([^<]+)</token>')
iliveto = 'rtmp://188.122.91.73/edge'
if isIliveTo == "0":
if token == "0":
url = rtmp
url = url.replace("&", "&")
parse_url(url)
plugintools.add_item( action = "play" , title = title , url = url , thumbnail = thumbnail , fanart = fanart , plot = title , folder = False , isPlayable = True )
plugintools.log("url= "+url)
else:
url = rtmp + " pageUrl=" + pageurl + " " + 'token=' + token + playpath + " live=1"
parse_url(url)
plugintools.add_item( action = "play" , title = title , url = url , thumbnail = thumbnail , fanart = fanart , plot = title , folder = False , isPlayable = True )
plugintools.log("url= "+url)
if isIliveTo == "1":
if token == "1":
url = iliveto + " pageUrl=" + pageurl + " " + 'token=' + token + playpath + " live=1"
url = url.replace("&", "&")
parse_url(url)
plugintools.add_item( action = "play" , title = title , url = url , thumbnail = thumbnail , fanart = fanart , plot = title , folder = False , isPlayable = True )
plugintools.log("url= "+url)
else:
url = iliveto + ' swfUrl=' + rtmp + " playpath=" + playpath + " pageUrl=" + pageurl
url = url.replace("&", "&")
parse_url(url)
plugintools.add_item( action = "play" , title = title , url = url , thumbnail = thumbnail , fanart = fanart , plot = title , folder = False , isPlayable = True )
plugintools.log("url= "+url)
def get_fecha():
from datetime import datetime
ahora = datetime.now()
anno_actual = ahora.year
mes_actual = ahora.month
dia_actual = ahora.day
fecha = str(dia_actual) + "/" + str(mes_actual) + "/" + str(anno_actual)
plugintools.log("fecha de hoy= "+fecha)
return fecha
def p2p_items(params):
plugintools.log("[nec tv-0.1.0].p2p_items" +repr(params))
# Vamos a localizar el título
title = params.get("plot")
if title == "":
title = params.get("title")
data = plugintools.read("http://pastebin.com/raw.php?i=n9BF6Cwe")
subcanal = plugintools.find_single_match(data,'<name>' + title + '(.*?)</subchannel>')
thumbnail = plugintools.find_single_match(subcanal, '<thumbnail>(.*?)</thumbnail>')
fanart = plugintools.find_single_match(subcanal, '<fanart>(.*?)</fanart>')
plugintools.log("thumbnail= "+thumbnail)
# Controlamos el caso en que no haya thumbnail en el menú de latinototal
if thumbnail == "":
thumbnail = art + 'p2p.png'
elif thumbnail == 'name_rtmp.png':
thumbnail = art + 'p2p.png'
if fanart == "":
fanart = art + 'p2p.png'
# Comprobamos si la lista ha sido descargada o no
plot = params.get("plot")
if plot == "":
title = params.get("title")
title = parser_title(title)
filename = title + '.p2p'
getfile_url(params)
else:
print "Lista ya descargada (plot no vacío)"
filename = params.get("plot")
params["ext"] = 'p2p'
params["plot"]=filename
filename = filename + '.p2p'
plugintools.log("Lectura del archivo P2P")
plugintools.add_item(action="" , title='[COLOR lightyellow][I][B]' + title + '[/B][/I][/COLOR]' , thumbnail=thumbnail , fanart=fanart , folder=False, isPlayable=False)
# Abrimos el archivo P2P y calculamos número de líneas
file = open(playlists + filename, "r")
file.seek(0)
data = file.readline()
num_items = len(file.readlines())
print num_items
file.seek(0)
data = file.readline()
if data.startswith("default") == True:
data = data.replace("default=", "")
data = data.split(",")
thumbnail = data[0]
fanart = data[1]
plugintools.log("fanart= "+fanart)
# Leemos entradas
i = 0
file.seek(0)
data = file.readline()
data = data.strip()
while i <= num_items:
if data == "":
data = file.readline()
data = data.strip()
# plugintools.log("linea vacia= "+data)
i = i + 1
#print i
continue
elif data.startswith("default") == True:
data = file.readline()
data = data.strip()
i = i + 1
#print i
continue
elif data.startswith("#") == True:
title = data.replace("#", "")
plugintools.log("title comentario= "+title)
plugintools.add_item(action="play" , title = title , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True)
data = file.readline()
data = data.strip()
i = i + 1
continue
else:
title = data
title = title.strip()
plugintools.log("title= "+title)
data = file.readline()
data = data.strip()
i = i + 1
#print i
plugintools.log("linea URL= "+data)
if data.startswith("sop") == True:
print "empieza el sopcast..."
# plugin://plugin.video.p2p-streams/?url=sop://124.232.150.188:3912/11265&mode=2&name=Titulo+canal+Sopcast
title_fixed = parser_title(title)
title = title.replace(" " , "+")
url = 'plugin://plugin.video.p2p-streams/?url=' + data + '&mode=2&name=' + title_fixed
url = url.strip()
plugintools.add_item(action="play" , title = title_fixed + ' [COLOR lightgreen][Sopcast][/COLOR]' , url = url, thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True)
data = file.readline()
data = data.strip()
i = i + 1
#print i
continue
elif data.startswith("magnet") == True:
print "empieza el torrent..."
url = urllib.quote_plus(data)
title_fixed = parser_title(title)
#plugin://plugin.video.pulsar/play?uri=<URL_ENCODED_LINK>
url = 'plugin://plugin.video.pulsar/play?uri=' + url
plugintools.add_item(action="play" , title = title_fixed + ' [COLOR orangered][Torrent][/COLOR]' , url = url, thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True)
data = file.readline()
data = data.strip()
i = i + 1
continue
else:
print "empieza el acestream..."
# plugin://plugin.video.p2p-streams/?url=a55f96dd386b7722380802b6afffc97ff98903ac&mode=1&name=Sky+Sports+title
title = parser_title(title)
print title
url = 'plugin://plugin.video.p2p-streams/?url=' + data + '&mode=1&name='
plugintools.add_item(action="play" , title = title + ' [COLOR lightblue][Acestream][/COLOR]' , url = url, thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True)
data = file.readline()
data = data.strip()
i = i + 1
#print i
def contextMenu(params):
plugintools.log("[nec tv-0.1.0].contextMenu " +repr(params))
dialog = xbmcgui.Dialog()
plot = params.get("plot")
canales = plot.split("/")
len_canales = len(canales)
print len_canales
plugintools.log("canales= "+repr(canales))
if len_canales == 1:
tv_a = canales[0]
tv_a = parse_channel(tv_a)
search_channel(params)
selector = ""
else:
if len_canales == 2:
print "len_2"
tv_a = canales[0]
tv_a = parse_channel(tv_a)
tv_b = canales[1]
tv_b = parse_channel(tv_b)
selector = dialog.select('palcoTV', [tv_a, tv_b])
elif len_canales == 3:
tv_a = canales[0]
tv_a = parse_channel(tv_a)
tv_b = canales[1]
tv_b = parse_channel(tv_b)
tv_c = canales[2]
tv_c = parse_channel(tv_c)
selector = dialog.select('latinototal', [tv_a, tv_b, tv_c])
elif len_canales == 4:
tv_a = canales[0]
tv_a = parse_channel(tv_a)
tv_b = canales[1]
tv_b = parse_channel(tv_b)
tv_c = canales[2]
tv_c = parse_channel(tv_c)
tv_d = canales[3]
tv_d = parse_channel(tv_d)
selector = dialog.select('latinototal', [tv_a, tv_b, tv_c, tv_d])
elif len_canales == 5:
tv_a = canales[0]
tv_a = parse_channel(tv_a)
tv_b = canales[1]
tv_b = parse_channel(tv_b)
tv_c = canales[2]
tv_c = parse_channel(tv_c)
tv_d = canales[3]
tv_d = parse_channel(tv_d)
tv_e = canales[4]
tv_e = parse_channel(tv_e)
selector = dialog.select('latinototal', [tv_a, tv_b, tv_c, tv_d, tv_e])
elif len_canales == 6:
tv_a = canales[0]
tv_a = parse_channel(tv_a)
tv_b = canales[1]
tv_b = parse_channel(tv_b)
tv_c = canales[2]
tv_c = parse_channel(tv_c)
tv_d = canales[3]
tv_d = parse_channel(tv_d)
tv_e = canales[4]
tv_e = parse_channel(tv_e)
tv_f = canales[5]
tv_f = parse_channel(tv_f)
selector = dialog.select('latinototal', [tv_a , tv_b, tv_c, tv_d, tv_e, tv_f])
elif len_canales == 7:
tv_a = canales[0]
tv_a = parse_channel(tv_a)
tv_b = canales[1]
tv_b = parse_channel(tv_b)
tv_c = canales[2]
tv_c = parse_channel(tv_c)
tv_d = canales[3]
tv_d = parse_channel(tv_d)
tv_e = canales[4]
tv_e = parse_channel(tv_e)
tv_f = canales[5]
tv_f = parse_channel(tv_f)
tv_g = canales[6]
tv_g = parse_channel(tv_g)
selector = dialog.select('latinototal', [tv_a , tv_b, tv_c, tv_d, tv_e, tv_f, tv_g])
if selector == 0:
print selector
if tv_a.startswith("Gol") == True:
tv_a = "Gol"
params["plot"] = tv_a
plugintools.log("tv= "+tv_a)
search_channel(params)
elif selector == 1:
print selector
if tv_b.startswith("Gol") == True:
tv_b = "Gol"
params["plot"] = tv_b
plugintools.log("tv= "+tv_b)
search_channel(params)
elif selector == 2:
print selector
if tv_c.startswith("Gol") == True:
tv_c = "Gol"
params["plot"] = tv_c
plugintools.log("tv= "+tv_c)
search_channel(params)
elif selector == 3:
print selector
if tv_d.startswith("Gol") == True:
tv_d = "Gol"
params["plot"] = tv_d
plugintools.log("tv= "+tv_d)
search_channel(params)
elif selector == 4:
print selector
if tv_e.startswith("Gol") == True:
tv_e = "Gol"
params["plot"] = tv_e
plugintools.log("tv= "+tv_e)
search_channel(params)
elif selector == 5:
print selector
if tv_f.startswith("Gol") == True:
tv_f = "Gol"
params["plot"] = tv_f
plugintools.log("tv= "+tv_f)
search_channel(params)
elif selector == 6:
print selector
if tv_g.startswith("Gol") == True:
tv_g = "Gol"
params["plot"] = tv_g
plugintools.log("tv= "+tv_g)
search_channel(params)
else:
pass
def magnet_items(params):
plugintools.log("[nec tv-0.1.0].magnet_items" +repr(params))
plot = params.get("plot")
title = params.get("title")
fanart = ""
thumbnail = ""
if plot != "":
filename = params.get("plot")
params["ext"] = 'p2p'
params["plot"]=filename
title = plot + '.p2p'
else:
getfile_url(params)
title = params.get("title")
title = title + '.p2p'
# Abrimos el archivo P2P y calculamos número de líneas
file = open(playlists + title, "r")
file.seek(0)
data = file.readline()
num_items = len(file.readlines())
# Leemos entradas
file.seek(0)
i = 0
while i <= num_items:
data = file.readline()
i = i + 1
#print i
if data != "":
data = data.strip()
title = data
data = file.readline()
i = i + 1
#print i
data = data.strip()
if data.startswith("magnet:") == True:
# plugin://plugin.video.p2p-streams/?url=sop://124.232.150.188:3912/11265&mode=2&name=Titulo+canal+Sopcast
title_fixed = title.replace(" " , "+")
url_fixed = urllib.quote_plus(link)
url = url.strip()
#plugin://plugin.video.pulsar/play?uri=<URL_ENCODED_LINK>
url = 'plugin://plugin.video.pulsar/play?uri=' + url
plugintools.add_item(action="play" , title = data + ' [COLOR orangered][Torrent][/COLOR]' , url = url, thumbnail = art + 'p2p.png' , fanart = art + 'fanart.jpg' , folder = False , isPlayable = True)
else:
data = file.readline()
i = i + 1
#print i
else:
data = file.readline()
i = i + 1
#print i
def parse_channel(txt):
plugintools.log("[nec tv-0.1.0].encode_string: "+txt)
txt = txt.rstrip()
txt = txt.lstrip()
return txt
def futbolenlatv_manana(params):
plugintools.log("[nec tv-0.1.0].futbolenlatv " + repr(params))
# Fecha de mañana
import datetime
today = datetime.date.today()
manana = today + datetime.timedelta(days=1)
anno_manana = manana.year
mes_manana = manana.month
if mes_manana == 1:
mes_manana = "enero"
elif mes_manana == 2:
mes_manana = "febrero"
elif mes_manana == 3:
mes_manana = "marzo"
elif mes_manana == 4:
mes_manana = "abril"
elif mes_manana == 5:
mes_manana = "mayo"
elif mes_manana == 6:
mes_manana = "junio"
elif mes_manana == 7:
mes_manana = "julio"
elif mes_manana == 8:
mes_manana = "agosto"
elif mes_manana == 9:
mes_manana = "septiembre"
elif mes_manana == 10:
mes_manana = "octubre"
elif mes_manana == 11:
mes_manana = "noviembre"
elif mes_manana == 12:
mes_manana = "diciembre"
dia_manana = manana.day
plot = str(anno_manana) + "-" + str(mes_manana) + "-" + str(dia_manana)
print manana
url = 'http://www.futbolenlatv.com/m/Fecha/' + plot + '/agenda/false/false'
plugintools.log("URL mañana= "+url)
params["url"] = url
params["plot"] = plot
futbolenlatv(params)
def parser_title(title):
plugintools.log("[nec tv-0.1.0].parser_title " + title)
cyd = title
cyd = cyd.replace("[COLOR lightyellow]", "")
cyd = cyd.replace("[COLOR green]", "")
cyd = cyd.replace("[COLOR red]", "")
cyd = cyd.replace("[COLOR blue]", "")
cyd = cyd.replace("[COLOR royalblue]", "")
cyd = cyd.replace("[COLOR white]", "")
cyd = cyd.replace("[COLOR pink]", "")
cyd = cyd.replace("[COLOR cyan]", "")
cyd = cyd.replace("[COLOR steelblue]", "")
cyd = cyd.replace("[COLOR forestgreen]", "")
cyd = cyd.replace("[COLOR olive]", "")
cyd = cyd.replace("[COLOR khaki]", "")
cyd = cyd.replace("[COLOR lightsalmon]", "")
cyd = cyd.replace("[COLOR orange]", "")
cyd = cyd.replace("[COLOR lightgreen]", "")
cyd = cyd.replace("[COLOR lightblue]", "")
cyd = cyd.replace("[COLOR lightpink]", "")
cyd = cyd.replace("[COLOR skyblue]", "")
cyd = cyd.replace("[COLOR darkorange]", "")
cyd = cyd.replace("[COLOR greenyellow]", "")
cyd = cyd.replace("[COLOR yellow]", "")
cyd = cyd.replace("[COLOR yellowgreen]", "")
cyd = cyd.replace("[COLOR orangered]", "")
cyd = cyd.replace("[COLOR grey]", "")
cyd = cyd.replace("[COLOR gold]", "")
cyd = cyd.replace("[COLOR=FF00FF00]", "")
cyd = cyd.replace("[/COLOR]", "")
cyd = cyd.replace("[B]", "")
cyd = cyd.replace("[/B]", "")
cyd = cyd.replace("[I]", "")
cyd = cyd.replace("[/I]", "")
cyd = cyd.replace("[Auto]", "")
cyd = cyd.replace("[Parser]", "")
cyd = cyd.replace("[TinyURL]", "")
cyd = cyd.replace("[Auto]", "")
# Control para evitar filenames con corchetes
cyd = cyd.replace(" [Lista M3U]", "")
cyd = cyd.replace(" [Lista PLX]", "")
cyd = cyd.replace(" [Multilink]", "")
cyd = cyd.replace(" [COLOR orange][Lista [B]PLX[/B]][/COLOR]", "")
cyd = cyd.replace(" [COLOR orange][Lista [B]M3U[/B]][/COLOR]", "")
cyd = cyd.replace(" [COLOR lightyellow][B][Dailymotion[/B] playlist][/COLOR]", "")
cyd = cyd.replace(" [COLOR lightyellow][B][Dailymotion[/B] video][/COLOR]", "")
title = cyd
title = title.strip()
if title.endswith(" .plx") == True:
title = title.replace(" .plx", ".plx")
plugintools.log("title_parsed= "+title)
return title
def json_items(params):
plugintools.log("[nec tv-0.1.0].json_items "+repr(params))
data = plugintools.read(params.get("url"))
# Título y autor de la lista
match = plugintools.find_single_match(data, '"name"(.*?)"url"')
match = match.split(",")
namelist = match[0].strip()
author = match[1].strip()
namelist = namelist.replace('"', "")
namelist = namelist.replace(": ", "")
author = author.replace('"author":', "")
author = author.replace('"', "")
fanart = params.get("extra")
thumbnail = params.get("thumbnail")
plugintools.log("title= "+namelist)
plugintools.log("author= "+author)
plugintools.add_item(action="", title = '[B][COLOR lightyellow]' + namelist + '[/B][/COLOR]' , url = "" , thumbnail = thumbnail , fanart = fanart, isPlayable = False , folder = False)
# Items de la lista
data = plugintools.find_single_match(data, '"stations"(.*?)]')
matches = plugintools.find_multiple_matches(data, '"name"(.*?)}')
for entry in matches:
if entry.find("isHost") <= 0:
title = plugintools.find_single_match(entry,'(.*?)\n')
title = title.replace(": ", "")
title = title.replace('"', "")
title = title.replace(",", "")
url = plugintools.find_single_match(entry,'"url":(.*?)\n')
url = url.replace('"', "")
url = url.strip()
params["url"]=url
server_rtmp(params)
thumbnail = plugintools.find_single_match(entry,'"image":(.*?)\n')
thumbnail = thumbnail.replace('"', "")
thumbnail = thumbnail.replace(',', "")
thumbnail = thumbnail.strip()
plugintools.log("thumbnail= "+thumbnail)
# Control por si en la lista no aparece el logo en cada entrada
if thumbnail == "" :
thumbnail = params.get("thumbnail")
plugintools.add_item( action="play" , title = '[COLOR white] ' + title + '[COLOR green] ['+ params.get("server") + '][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
else:
title = plugintools.find_single_match(entry,'(.*?)\n')
title = title.replace(": ", "")
title = title.replace('"', "")
title = title.replace(",", "")
url = plugintools.find_single_match(entry,'"url":(.*?)\n')
url = url.replace('"', "")
url = url.strip()
if url.find("allmyvideos")>= 0:
url = url.replace(",", "")
plugintools.log("url= "+url)
fanart = params.get("extra")
thumbnail = plugintools.find_single_match(entry,'"image":(.*?)\n')
thumbnail = thumbnail.replace('"', "")
thumbnail = thumbnail.replace(',', "")
thumbnail = thumbnail.strip()
plugintools.log("thumbnail= "+thumbnail)
if thumbnail == "":
thumbnail = params.get("thumbnail")
plugintools.add_item( action="allmyvideos" , title = title + ' [COLOR lightyellow][Allmyvideos][/COLOR]' , url = url , fanart = fanart , thumbnail = thumbnail , folder = False , isPlayable = True )
elif url.find("streamcloud") >= 0:
url = url.replace(",", "")
plugintools.log("url= "+url)
fanart = params.get("extra")
thumbnail = plugintools.find_single_match(entry,'"image":(.*?)\n')
thumbnail = thumbnail.replace('"', "")
thumbnail = thumbnail.replace(',', "")
thumbnail = thumbnail.strip()
plugintools.log("thumbnail= "+thumbnail)
if thumbnail == "":
thumbnail = params.get("thumbnail")
plugintools.add_item( action="streamcloud" , title = title + ' [COLOR lightskyblue][Streamcloud][/COLOR]' , url = url , fanart = fanart , thumbnail = thumbnail , folder = False , isPlayable = True )
elif url.find("played.to") >= 0:
url = url.replace(",", "")
plugintools.log("url= "+url)
fanart = params.get("extra")
thumbnail = plugintools.find_single_match(entry,'"image":(.*?)\n')
thumbnail = thumbnail.replace('"', "")
thumbnail = thumbnail.replace(',', "")
thumbnail = thumbnail.strip()
plugintools.log("thumbnail= "+thumbnail)
if thumbnail == "":
thumbnail = params.get("thumbnail")
plugintools.add_item( action="playedto" , title = title + ' [COLOR lavender][Played.to][/COLOR]' , url = url , fanart = fanart , thumbnail = thumbnail , folder = False , isPlayable = True )
elif url.find("vidspot") >= 0:
url = url.replace(",", "")
plugintools.log("url= "+url)
fanart = params.get("extra")
thumbnail = plugintools.find_single_match(entry,'"image":(.*?)\n')
thumbnail = thumbnail.replace('"', "")
thumbnail = thumbnail.replace(',', "")
thumbnail = thumbnail.strip()
plugintools.log("thumbnail= "+thumbnail)
if thumbnail == "":
thumbnail = params.get("thumbnail")
plugintools.add_item( action="vidspot" , title = title + ' [COLOR palegreen][Vidspot][/COLOR]' , url = url , fanart = fanart , thumbnail = thumbnail , folder = False , isPlayable = True )
if url.find("vk.com")>= 0:
url = url.replace(",", "")
plugintools.log("url= "+url)
fanart = params.get("extra")
thumbnail = plugintools.find_single_match(entry,'"image":(.*?)\n')
thumbnail = thumbnail.replace('"', "")
thumbnail = thumbnail.replace(',', "")
thumbnail = thumbnail.strip()
plugintools.log("thumbnail= "+thumbnail)
if thumbnail == "":
thumbnail = params.get("thumbnail")
plugintools.add_item( action="vk" , title = title + ' [COLOR royalblue][Vk][/COLOR]' , url = url , fanart = fanart , thumbnail = thumbnail , folder = False , isPlayable = True )
if url.find("nowvideo")>= 0:
url = url.replace(",", "")
plugintools.log("url= "+url)
fanart = params.get("extra")
thumbnail = plugintools.find_single_match(entry,'"image":(.*?)\n')
thumbnail = thumbnail.replace('"', "")
thumbnail = thumbnail.replace(',', "")
thumbnail = thumbnail.strip()
plugintools.log("thumbnail= "+thumbnail)
if thumbnail == "":
thumbnail = params.get("thumbnail")
plugintools.add_item( action="nowvideo" , title = title + ' [COLOR palegreen][Nowvideo][/COLOR]' , url = url , fanart = fanart , thumbnail = thumbnail , folder = False , isPlayable = True )
if url.find("tumi")>= 0:
url = url.replace(",", "")
plugintools.log("url= "+url)
fanart = params.get("extra")
thumbnail = plugintools.find_single_match(entry,'"image":(.*?)\n')
thumbnail = thumbnail.replace('"', "")
thumbnail = thumbnail.replace(',', "")
thumbnail = thumbnail.strip()
plugintools.log("thumbnail= "+thumbnail)
if thumbnail == "":
thumbnail = params.get("thumbnail")
plugintools.add_item( action="tumi" , title = title + ' [COLOR forestgreen][Tumi][/COLOR]' , url = url , fanart = fanart , thumbnail = thumbnail , folder = False , isPlayable = True )
if url.find("streamin.to")>= 0:
url = url.replace(",", "")
plugintools.log("url= "+url)
fanart = params.get("extra")
thumbnail = plugintools.find_single_match(entry,'"image":(.*?)\n')
thumbnail = thumbnail.replace('"', "")
thumbnail = thumbnail.replace(',', "")
thumbnail = thumbnail.strip()
plugintools.log("thumbnail= "+thumbnail)
if thumbnail == "":
thumbnail = params.get("thumbnail")
plugintools.add_item( action="streaminto" , title = title + ' [COLOR orange][streamin.to][/COLOR]' , url = url , fanart = fanart , thumbnail = thumbnail , folder = False , isPlayable = True )
else:
# Canales no reproducibles en XBMC (de momento)
params["url"]=url
server_rtmp(params)
plugintools.add_item( action="play" , title = '[COLOR red] ' + title + ' ['+ params.get("server") + '][/COLOR]' , url = url , fanart = fanart , thumbnail = thumbnail , folder = False , isPlayable = True )
if title == "":
plugintools.log("url= "+url)
fanart = params.get("extra")
thumbnail = plugintools.find_single_match(entry,'"image":(.*?)\n')
thumbnail = thumbnail.replace('"', "")
thumbnail = thumbnail.replace(',', "")
thumbnail = thumbnail.strip()
plugintools.log("thumbnail= "+thumbnail)
if thumbnail == "":
thumbnail = params.get("thumbnail")
def youtube_playlists(params):
plugintools.log("[nec tv-0.1.0].youtube_playlists "+repr(params))
data = plugintools.read( params.get("url") )
pattern = ""
matches = plugintools.find_multiple_matches(data,"<entry(.*?)</entry>")
for entry in matches:
plugintools.log("entry="+entry)
title = plugintools.find_single_match(entry,"<titl[^>]+>([^<]+)</title>")
plot = plugintools.find_single_match(entry,"<media\:descriptio[^>]+>([^<]+)</media\:description>")
thumbnail = plugintools.find_single_match(entry,"<media\:thumbnail url='([^']+)'")
url = plugintools.find_single_match(entry,"<content type\='application/atom\+xml\;type\=feed' src='([^']+)'/>")
fanart = art + 'youtube.png'
plugintools.add_item( action="youtube_videos" , title=title , plot=plot , url=url , thumbnail=thumbnail , fanart=fanart , folder=True )
plugintools.log("fanart= "+fanart)
# Muestra todos los vídeos del playlist de Youtube
def youtube_videos(params):
plugintools.log("[nec tv-0.1.0].youtube_videos "+repr(params))
# Fetch video list from YouTube feed
data = plugintools.read( params.get("url") )
plugintools.log("data= "+data)
# Extract items from feed
pattern = ""
matches = plugintools.find_multiple_matches(data,"<entry(.*?)</entry>")
for entry in matches:
plugintools.log("entry="+entry)
# Not the better way to parse XML, but clean and easy
title = plugintools.find_single_match(entry,"<titl[^>]+>([^<]+)</title>")
title = title.replace("I Love Handball | ","")
plot = plugintools.find_single_match(entry,"<summa[^>]+>([^<]+)</summa")
thumbnail = plugintools.find_single_match(entry,"<media\:thumbnail url='([^']+)'")
fanart = art+'youtube.png'
video_id = plugintools.find_single_match(entry,"http\://www.youtube.com/watch\?v\=([0-9A-Za-z_-]{11})")
url = "plugin://plugin.video.youtube/?path=/root/video&action=play_video&videoid="+video_id
# Appends a new item to the xbmc item list
plugintools.add_item( action="play" , title=title , plot=plot , url=url , thumbnail=thumbnail , fanart=fanart , isPlayable=True, folder=False )
def server_rtmp(params):
plugintools.log("[nec tv-0.1.0].server_rtmp " + repr(params))
url = params.get("url")
plugintools.log("URL= "+url)
if url.find("iguide.to") >= 0:
params["server"] = 'iguide'
if url.find("timeout") < 0:
url = url + ' timeout=15'
params["url"]=url
return params
if url.find("freetvcast.pw") >= 0:
params["server"] = 'freetvcast'
return params
elif url.find("9stream") >= 0:
params["server"] = '9stream'
if url.find("timeout") < 0:
url = url + ' timeout=15'
params["url"]=url
return params
elif url.find("freebroadcast") >= 0:
params["server"] = 'freebroadcast'
if url.find("timeout") < 0:
url = url + ' timeout=15'
params["url"]=url
return params
elif url.find("goodgame.ru") >= 0:
params["server"] = 'goodgame.ru'
if url.find("timeout") < 0:
url = url + ' timeout=15'
params["url"]=url
return params
elif url.find("hdcast") >= 0:
params["server"] = 'hdcast'
if url.find("timeout") < 0:
url = url + ' timeout=15'
params["url"]=url
return params
elif url.find("sharecast") >= 0:
params["server"] = 'sharecast'
if url.find("timeout") < 0:
url = url + ' timeout=15'
params["url"]=url
return params
elif url.find("cast247") >= 0:
params["server"] = 'cast247'
if url.find("timeout") < 0:
url = url + ' timeout=15'
params["url"]=url
return params
elif url.find("castalba") >= 0:
params["server"] = 'castalba'
if url.find("timeout") < 0:
url = url + ' timeout=15'
params["url"]=url
return params
elif url.find("direct2watch") >= 0:
params["server"] = 'direct2watch'
if url.find("timeout") < 0:
url = url + ' timeout=15'
params["url"]=url
return params
elif url.find("vaughnlive") >= 0:
params["server"] = 'vaughnlive'
if url.find("timeout") < 0:
url = url + ' timeout=15'
params["url"]=url
return params
elif url.find("streamingfreetv") >= 0:
params["server"] = 'streamingfreetv'
return params
elif url.find("totalplay") >= 0:
params["server"] = 'totalplay'
if url.find("timeout") < 0:
url = url + ' timeout=15'
params["url"]=url
return params
elif url.find("shidurlive") >= 0:
params["server"] = 'shidurlive'
return params
elif url.find("everyon") >= 0:
params["server"] = 'everyon'
if url.find("timeout") < 0:
url = url + ' timeout=15'
params["url"]=url
return params
elif url.find("iviplanet") >= 0:
params["server"] = 'iviplanet'
if url.find("timeout") < 0:
url = url + ' timeout=15'
params["url"]=url
return params
elif url.find("cxnlive") >= 0:
params["server"] = 'cxnlive'
if url.find("timeout") < 0:
url = url + ' timeout=15'
params["url"]=url
return params
elif url.find("ucaster") >= 0:
params["server"] = 'ucaster'
if url.find("timeout") < 0:
url = url + ' timeout=15'
params["url"]=url
return params
elif url.find("mediapro") >= 0:
params["server"] = 'mediapro'
if url.find("timeout") < 0:
url = url + ' timeout=15'
params["url"]=url
return params
elif url.find("veemi") >= 0:
params["server"] = 'veemi'
if url.find("timeout") < 0:
url = url + ' timeout=15'
params["url"]=url
return params
elif url.find("yukons.net") >= 0:
params["server"] = 'yukons.net'
if url.find("timeout") < 0:
url = url + ' timeout=15'
params["url"]=url
return params
elif url.find("janjua") >= 0:
params["server"] = 'janjua'
if url.find("timeout") < 0:
url = url + ' timeout=15'
params["url"]=url
return params
elif url.find("mips") >= 0:
params["server"] = 'mips'
if url.find("timeout") < 0:
url = url + ' timeout=15'
params["url"]=url
return params
elif url.find("zecast") >= 0:
params["server"] = 'zecast'
if url.find("timeout") < 0:
url = url + ' timeout=15'
params["url"]=url
return params
elif url.find("vertvdirecto") >= 0:
params["server"] = 'vertvdirecto'
if url.find("timeout") < 0:
url = url + ' timeout=15'
params["url"]=url
return params
elif url.find("filotv") >= 0:
params["server"] = 'filotv'
if url.find("timeout") < 0:
url = url + ' timeout=15'
params["url"]=url
return params
elif url.find("dinozap") >= 0:
params["server"] = 'dinozap'
if url.find("timeout") < 0:
url = url + ' timeout=15'
params["url"]=url
return params
elif url.find("ezcast") >= 0:
params["server"] = 'ezcast'
if url.find("timeout") < 0:
url = url + ' timeout=15'
params["url"]=url
return params
elif url.find("flashstreaming") >= 0:
params["server"] = 'flashstreaming'
if url.find("timeout") < 0:
url = url + ' timeout=15'
params["url"]=url
return params
elif url.find("shidurlive") >= 0:
params["server"] = 'shidurlive'
if url.find("timeout") < 0:
url = url + ' timeout=15'
params["url"]=url
return params
elif url.find("multistream") >= 0:
params["server"] = 'multistream'
if url.find("timeout") < 0:
url = url + ' timeout=15'
params["url"]=url
return params
elif url.find("playfooty") >= 0:
params["server"] = 'playfooty'
if url.find("timeout") < 0:
url = url + ' timeout=15'
params["url"]=url
return params
elif url.find("flashtv") >= 0:
params["server"] = 'flashtv'
if url.find("timeout") < 0:
url = url + ' timeout=15'
params["url"]=url
return params
elif url.find("04stream") >= 0:
params["server"] = '04stream'
if url.find("timeout") < 0:
url = url + ' timeout=15'
params["url"]=url
return params
elif url.find("vercosas") >= 0:
params["server"] = 'vercosasgratis'
if url.find("timeout") < 0:
url = url + ' timeout=15'
params["url"]=url
return params
elif url.find("dcast") >= 0:
params["server"] = 'dcast'
if url.find("timeout") < 0:
url = url + ' timeout=15'
params["url"]=url
return params
elif url.find("playfooty") >= 0:
params["server"] = 'playfooty'
if url.find("timeout") < 0:
url = url + ' timeout=15'
params["url"]=url
return params
elif url.find("pvtserverz") >= 0:
params["server"] = 'pvtserverz'
if url.find("timeout") < 0:
url = url + ' timeout=15'
params["url"]=url
return params
else:
params["server"] = 'undefined'
if url.find("timeout") < 0:
url = url + ' timeout=15'
params["url"]=url
return params
def launch_rtmp(params):
plugintools.log("[nec tv-0.1.0].launch_rtmp " + repr(params))
url = params.get("url")
title = params.get("title")
title = title.replace("[/COLOR]", "")
title = title.strip()
plugintools.log("Vamos a buscar en el título: "+title)
if title.endswith("[9stream]") == True:
params["server"] = '9stream'
ninestreams(params)
elif title.endswith("[iguide]") == True:
plugintools.log("es un iguide!")
params["server"] = 'iguide'
# DEBUG: Keyboard: scancode: 0x01, sym: 0x001b, unicode: 0x001b, modifier: 0x0
#pDialog = xbmcgui.DialogProgress()
#msg = pDialog.create('latinototal', 'Intentando reproducir RTMP...')
plugintools.play_resolved_url(url)
#xbmc.sleep(15000)
#plugintools.stop_resolved_url(url)
elif title.endswith("[streamingfreetv]") == True:
print 'streamingfreetv'
params["server"] = 'streamingfreetv'
streamingfreetv(params)
elif title.endswith("[vercosasgratis]") == True:
print 'vercosasgratis'
params["server"] = 'vercosasgratis'
vercosas(params)
elif title.endswith("[freebroadcast]") == True:
print 'freebroadcast'
params["server"] = 'freebroadcast'
freebroadcast(params)
elif title.endswith("[ucaster]") == True:
params["server"] = 'ucaster'
plugintools.play_resolved_url(url)
elif title.endswith("[direct2watch]") == True:
params["server"] = 'direct2watch'
directwatch(params)
elif title.endswith("[shidurlive]") == True:
params["server"] = 'shidurlive'
shidurlive(params)
elif title.endswith("[cast247]") == True:
params["server"] = 'cast247'
castdos(params)
elif url.find("hdcast") >= 0:
params["server"] = 'hdcast'
plugintools.play_resolved_url(url)
elif url.find("janjua") >= 0:
params["server"] = 'janjua'
plugintools.play_resolved_url(url)
elif url.find("mips") >= 0:
params["server"] = 'mips'
plugintools.play_resolved_url(url)
elif url.find("zecast") >= 0:
params["server"] = 'zecast'
plugintools.play_resolved_url(url)
elif url.find("filotv") >= 0:
params["server"] = 'filotv'
print "filotv"
plugintools.play_resolved_url(url)
elif url.find("ezcast") >= 0:
params["server"] = 'ezcast'
plugintools.play_resolved_url(url)
elif url.find("flashstreaming") >= 0:
params["server"] = 'flashstreaming'
plugintools.play_resolved_url(url)
elif url.find("shidurlive") >= 0:
params["server"] = 'shidurlive'
plugintools.play_resolved_url(url)
elif url.find("multistream") >= 0:
params["server"] = 'multistream'
print "multistream"
plugintools.play_resolved_url(url)
elif url.find("playfooty") >= 0:
params["server"] = 'playfooty'
plugintools.play_resolved_url(url)
elif url.find("flashtv") >= 0:
params["server"] = 'flashtv'
print "flashtv"
plugintools.play_resolved_url(url)
elif url.find("freetvcast") >= 0:
params["server"] = 'freetvcast'
print "freetvcast"
freetvcast(params)
elif url.find("04stream") >= 0:
params["server"] = '04stream'
plugintools.play_resolved_url(url)
elif url.find("sharecast") >= 0:
params["server"] = 'sharecast'
plugintools.play_resolved_url(url)
elif url.find("vaughnlive") >= 0:
params["server"] = 'vaughnlive'
resolve_vaughnlive(params)
elif url.find("goodcast") >= 0:
params["server"] = 'goodcast'
plugintools.play_resolved_url(url)
elif url.find("dcast.tv") >= 0:
params["server"] = 'dcast.tv'
plugintools.play_resolved_url(url)
elif url.find("castalba") >= 0:
params["server"] = 'castalba'
castalba(params)
elif url.find("tutelehd.com") >= 0:
params["server"] = 'tutelehd.com'
plugintools.play_resolved_url(url)
elif url.find("flexstream") >= 0:
params["server"] = 'flexstream'
plugintools.play_resolved_url(url)
elif url.find("xxcast") >= 0:
params["server"] = 'xxcast'
plugintools.play_resolved_url(url)
elif url.find("vipi.tv") >= 0:
params["server"] = 'vipi.tv'
plugintools.play_resolved_url(url)
elif url.find("watchjsc") >= 0:
params["server"] = 'watchjsc'
plugintools.play_resolved_url(url)
elif url.find("zenex.tv") >= 0:
params["server"] = 'zenex.tv'
plugintools.play_resolved_url(url)
elif url.find("castto") >= 0:
params["server"] = 'castto'
plugintools.play_resolved_url(url)
elif url.find("tvzune") >= 0:
params["server"] = 'tvzune'
plugintools.play_resolved_url(url)
elif url.find("flashcast") >= 0:
params["server"] = 'flashcast'
plugintools.play_resolved_url(url)
elif url.find("ilive.to") >= 0:
params["server"] = 'ilive.to'
print "iliveto"
plugintools.play_resolved_url(url)
elif url.find("Direct2Watch") >= 0:
params["server"] = 'Direct2Watch'
print "direct2watch"
plugintools.play_resolved_url(url)
else:
print "No ha encontrado launcher"
params["server"] = 'undefined'
print "ninguno"
plugintools.play_resolved_url(url)
def peliseries(params):
plugintools.log("[nec tv-0.1.0].peliseries " +repr(params))
# Abrimos archivo remoto
url = params.get("url")
filepelis = urllib2.urlopen(url)
# Creamos archivo local para pegar las entradas
plot = params.get("plot")
plot = parser_title(plot)
if plot == "":
title = params.get("title")
title = parser_title(title)
filename = title + ".m3u"
fh = open(playlists + filename, "wb")
else:
filename = params.get("plot") + ".m3u"
fh = open(playlists + filename, "wb")
plugintools.log("filename= "+filename)
url = params.get("url")
plugintools.log("url= "+url)
#open the file for writing
fw = open(playlists + filename, "wb")
#open the file for writing
fh = open(playlists + 'filepelis.m3u', "wb")
fh.write(filepelis.read())
fh.close()
fw = open(playlists + filename, "wb")
fr = open(playlists + 'filepelis.m3u', "r")
fr.seek(0)
num_items = len(fr.readlines())
print num_items
fw.seek(0)
fr.seek(0)
data = fr.readline()
fanart = params.get("extra")
thumbnail = params.get("thumbnail")
fw.write('#EXTM3U:"background"='+fanart+',"thumbnail"='+thumbnail)
fw.write("#EXTINF:-1,[COLOR lightyellow][I]playlists / " + filename + '[/I][/COLOR]' + '\n\n')
i = 0
while i <= num_items:
if data == "":
data = fr.readline()
data = data.strip()
plugintools.log("data= " +data)
i = i + 1
print i
continue
elif data.find("http") >= 0 :
data = data.split("http")
chapter = data[0]
chapter = chapter.strip()
url = "http" + data[1]
url = url.strip()
plugintools.log("url= "+url)
fw.write("\n#EXTINF:-1," + chapter + '\n')
fw.write(url + '\n\n')
data = fr.readline()
plugintools.log("data= " +data)
i = i + 1
print i
continue
else:
data = fr.readline()
data = data.strip()
plugintools.log("data= "+data)
i = i + 1
print i
continue
fw.close()
fr.close()
params["ext"]='m3u'
filename = filename.replace(".m3u", "")
params["plot"]=filename
params["title"]=filename
# Capturamos de nuevo thumbnail y fanart
os.remove(playlists + 'filepelis.m3u')
simpletv_items(params)
def tinyurl(params):
plugintools.log("[nec tv-0.1.0].tinyurl "+repr(params))
url = params.get("url")
url_getlink = 'http://www.getlinkinfo.com/info?link=' +url
plugintools.log("url_fixed= "+url_getlink)
request_headers=[]
request_headers.append(["User-Agent","Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_3) AppleWebKit/537.31 (KHTML, like Gecko) Chrome/26.0.1410.65 Safari/537.31"])
body,response_headers = plugintools.read_body_and_headers(url_getlink, headers=request_headers)
plugintools.log("data= "+body)
r = plugintools.find_multiple_matches(body, '<dt class="link-effective-url">Effective URL</dt>(.*?)</a></dd>')
xbmc.executebuiltin("Notification(%s,%s,%i,%s)" % ('PalcoTV', "Redireccionando enlace...", 3 , art+'icon.png'))
for entry in r:
entry = entry.replace("<dd><a href=", "")
entry = entry.replace('rel="nofollow">', "")
entry = entry.split('"')
entry = entry[1]
entry = entry.strip()
plugintools.log("vamos= "+entry)
if entry.startswith("http"):
plugintools.play_resolved_url(entry)
# Conexión con el servicio longURL.org para obtener URL original
def longurl(params):
plugintools.log("[nec tv-0.1.0].longURL "+repr(params))
url = params.get("url")
url_getlink = 'http://api.longurl.org/v2/expand?url=' +url
plugintools.log("url_fixed= "+url_getlink)
try:
request_headers=[]
request_headers.append(["User-Agent","Application-Name/3.7"])
body,response_headers = plugintools.read_body_and_headers(url_getlink, headers=request_headers)
plugintools.log("data= "+body)
# <long-url><![CDATA[http://85.25.43.51:8080/DE_skycomedy?u=euorocard:p=besplatna]]></long-url>
# xbmc.executebuiltin("Notification(%s,%s,%i,%s)" % ('nec tv', "Redireccionando enlace...", 3 , art+'icon.png'))
longurl = plugintools.find_single_match(body, '<long-url>(.*?)</long-url>')
longurl = longurl.replace("<![CDATA[", "")
longurl = longurl.replace("]]>", "")
plugintools.log("longURL= "+longurl)
if longurl.startswith("http"):
plugintools.play_resolved_url(longurl)
except:
play(params)
def opentxt(self):
texto = xbmcgui.ControlTextBox (100, 250, 300, 300, textColor='0xFFFFFFFF')
texto.setText('log.txt')
texto.setVisible(window)
def encode_url(url):
url_fixed= urlencode(url)
print url_fixed
def seriecatcher(params):
plugintools.log("[nec tv-0.1.0].seriecatcher "+repr(params))
url = params.get("url")
fanart = params.get("extra")
data = plugintools.read(url)
temp = plugintools.find_multiple_matches(data, '<i class=\"glyphicon\"></i>(.*?)</a>')
SelectTemp(params, temp)
def GetSerieChapters(params):
plugintools.log("[nec tv-0.1.0].GetSerieChapters "+repr(params))
season = params.get("season")
data = plugintools.read(params.get("url"))
season = plugintools.find_multiple_matches(data, season + '(.*?)</table>')
season = season[0]
for entry in season:
url_cap = plugintools.find_multiple_matches(season, '<a href=\"/capitulo(.*?)\" class=\"color4\"')
title = plugintools.find_multiple_matches(season, 'class=\"color4\">(.*?)</a>')
num_items = len(url_cap)
i = 1
while i <= num_items:
url_cap_fixed = 'http://seriesadicto.com/capitulo/' + url_cap[i-1]
title_fixed = title[i-1]
fanart = params.get("extra")
GetSerieLinks(fanart , url_cap_fixed, i, title_fixed)
i = i + 1
def GetSerieLinks(fanart , url_cap_fixed, i, title_fixed):
plugintools.log("[nec tv-0.1.0].GetSerieLinks")
data = plugintools.read(url_cap_fixed)
amv = plugintools.find_multiple_matches(data, 'allmyvideos.net/(.*?)"')
strcld = plugintools.find_multiple_matches(data, 'streamcloud.eu/(.*?)"')
vdspt = plugintools.find_multiple_matches(data, 'vidspot.net/(.*?)"')
plydt = plugintools.find_multiple_matches(data, 'played.to/(.*?)"')
thumbnail = plugintools.find_single_match(data, 'src=\"/img/series/(.*?)"')
thumbnail_fixed = 'http://seriesadicto.com/img/series/' + thumbnail
for entry in amv:
amv_url = 'http://allmyvideos.net/' + entry
plugintools.add_item(action="play" , title = title_fixed + '[COLOR lightyellow] [Allmyvideos][/COLOR]', url = amv_url , thumbnail = thumbnail_fixed , fanart = fanart , folder = False , isPlayable = True)
for entry in strcld:
strcld_url = 'http://streamcloud.eu/' + entry
plugintools.add_item(action="play" , title = title_fixed + '[COLOR lightskyblue] [Streamcloud][/COLOR]', url = strcld_url , thumbnail = thumbnail_fixed , fanart = fanart , folder = False , isPlayable = True)
for entry in vdspt:
vdspt_url = 'http://vidspot.net/' + entry
plugintools.add_item(action="play" , title = title_fixed + '[COLOR palegreen] [Vidspot][/COLOR]', url = vdspt_url , thumbnail = thumbnail_fixed , fanart = fanart , folder = False , isPlayable = True)
for entry in plydt:
plydt_url = 'http://played.to/' + entry
plugintools.add_item(action="play" , title = title_fixed + '[COLOR lavender] [Played.to][/COLOR]', url = plydt_url , thumbnail = thumbnail_fixed , fanart = fanart , folder = False , isPlayable = True)
for entry in plydt:
plydt_url = 'vk.com' + entry
plugintools.add_item(action="play" , title = title_fixed + '[COLOR royalblue] [Vk][/COLOR]', url = plydt_url , thumbnail = thumbnail_fixed , fanart = fanart , folder = False , isPlayable = True)
for entry in plydt:
plydt_url = 'nowvideo.sx' + entry
plugintools.add_item(action="play" , title = title_fixed + '[COLOR red] [Nowvideo][/COLOR]', url = plydt_url , thumbnail = thumbnail_fixed , fanart = fanart , folder = False , isPlayable = True)
for entry in plydt:
plydt_url = 'http://tumi.tv/' + entry
plugintools.add_item(action="play" , title = title_fixed + '[COLOR forestgreen] [Tumi][/COLOR]', url = plydt_url , thumbnail = thumbnail_fixed , fanart = fanart , folder = False , isPlayable = True)
def SelectTemp(params, temp):
plugintools.log("[nec tv-0.1.0].SelectTemp "+repr(params))
seasons = len(temp)
dialog = xbmcgui.Dialog()
if seasons == 1:
selector = dialog.select('latinototal', [temp[0]])
if seasons == 2:
selector = dialog.select('latinototal', [temp[0], temp[1]])
if seasons == 3:
selector = dialog.select('latinototal', [temp[0],temp[1], temp[2]])
if seasons == 4:
selector = dialog.select('latinototal', [temp[0], temp[1],temp[2], temp[3]])
if seasons == 5:
selector = dialog.select('latinototal', [temp[0], temp[1],temp[2], temp[3], temp[4]])
if seasons == 6:
selector = dialog.select('latinototal', [temp[0], temp[1],temp[2], temp[3], temp[4], temp[5]])
if seasons == 7:
selector = dialog.select('latinototal', [temp[0], temp[1],temp[2], temp[3], temp[4], temp[5], temp[6]])
if seasons == 8:
selector = dialog.select('latinototal', [temp[0], temp[1],temp[2], temp[3], temp[4], temp[5], temp[6], temp[7]])
if seasons == 9:
selector = dialog.select('latinototal', [temp[0], temp[1],temp[2], temp[3], temp[4], temp[5], temp[6], temp[7], temp[8]])
if seasons == 10:
selector = dialog.select('latinototal', [temp[0], temp[1],temp[2], temp[3], temp[4], temp[5], temp[6], temp[7], temp[8], temp[9]])
i = 0
while i<= seasons :
if selector == i:
params["season"] = temp[i]
GetSerieChapters(params)
i = i + 1
def m3u_items(title):
plugintools.log("[nec tv-0.1.0].m3u_items= "+title)
thumbnail = art + 'icon.png'
fanart = art + 'fanart.jpg'
only_title = title
if title.find("tvg-logo") >= 0:
thumbnail = re.compile('tvg-logo="(.*?)"').findall(title)
num_items = len(thumbnail)
print 'num_items',num_items
if num_items == 0:
thumbnail = 'm3u.png'
else:
thumbnail = thumbnail[0]
#plugintools.log("thumbnail= "+thumbnail)
only_title = only_title.replace('tvg-logo="', "")
only_title = only_title.replace(thumbnail, "")
if title.find("tvg-wall") >= 0:
fanart = re.compile('tvg-wall="(.*?)"').findall(title)
fanart = fanart[0]
only_title = only_title.replace('tvg-wall="', "")
only_title = only_title.replace(fanart, "")
if title.find("group-title") >= 0:
cat = re.compile('group-title="(.*?)"').findall(title)
if len(cat) == 0:
cat = ""
else:
cat = cat[0]
plugintools.log("m3u_categoria= "+cat)
only_title = only_title.replace('group-title=', "")
only_title = only_title.replace(cat, "")
else:
cat = ""
if title.find("tvg-id") >= 0:
title = title.replace('”', '"')
title = title.replace('“', '"')
tvgid = re.compile('tvg-id="(.*?)"').findall(title)
print 'tvgid',tvgid
tvgid = tvgid[0]
plugintools.log("m3u_categoria= "+tvgid)
only_title = only_title.replace('tvg-id=', "")
only_title = only_title.replace(tvgid, "")
else:
tvgid = ""
if title.find("tvg-name") >= 0:
tvgname = re.compile('tvg-name="(.*?)').findall(title)
tvgname = tvgname[0]
plugintools.log("m3u_categoria= "+tvgname)
only_title = only_title.replace('tvg-name=', "")
only_title = only_title.replace(tvgname, "")
else:
tvgname = ""
only_title = only_title.replace('"', "")
#plugintools.log("m3u_thumbnail= "+thumbnail)
#plugintools.log("m3u_fanart= "+fanart)
#plugintools.log("only_title= "+only_title)
return thumbnail, fanart, cat, only_title, tvgid, tvgname
def xml_skin():
plugintools.log("[nec tv-0.1.0].xml_skin")
mastermenu = plugintools.get_setting("mastermenu")
xmlmaster = plugintools.get_setting("xmlmaster")
SelectXMLmenu = plugintools.get_setting("SelectXMLmenu")
# values="Latino Total|Pastebin|Personalizado"
if xmlmaster == 'true':
if SelectXMLmenu == '0':
mastermenu = 'http://pastebin.com/raw.php?i=n9BF6Cwe'
plugintools.log("[PalcoTV.xml_skin: "+SelectXMLmenu)
# Control para ver la intro de PalcoTV
ver_intro = plugintools.get_setting("ver_intro")
if ver_intro == "true":
xbmc.Player(xbmc.PLAYER_CORE_AUTO).play(art + 'intro.mp4')
elif SelectXMLmenu == '1': # Pastebin
id_pastebin = plugintools.get_setting("id_pastebin")
if id_pastebin == "":
plugintools.log("[PalcoTV.xml_skin: No definido")
mastermenu = 'http://pastebin.com/raw.php?i=n9BF6Cwe'
else:
mastermenu = 'http://pastebin.com/raw.php?i=' +id_pastebin
plugintools.log("[PalcoTV.xml_skin: "+mastermenu)
elif SelectXMLmenu == '2': # Personalizado
mastermenu = plugintools.get_setting("mastermenu")
if mastermenu == "":
plugintools.log("[PalcoTV.xml_skin: No definido")
mastermenu = 'http://pastebin.com/raw.php?i=n9BF6Cwe'
# Control para ver la intro de PalcoTV
ver_intro = plugintools.get_setting("ver_intro")
if ver_intro == "true":
xbmc.Player(xbmc.PLAYER_CORE_AUTO).play(art + 'intro.mp4')
else:
# xmlmaster = False (no activado), menú por defecto
mastermenu = 'http://pastebin.com/raw.php?i=n9BF6Cwe'
# Control para ver la intro de latinototal
ver_intro = plugintools.get_setting("ver_intro")
if ver_intro == "true":
xbmc.Player(xbmc.PLAYER_CORE_AUTO).play(art + 'intro.mp4')
return mastermenu
run()
| [
"# -*- coding: utf-8 -*-\n",
"#------------------------------------------------------------\n",
"# nec tv - XBMC Add-on by necula tv (daniel79mil@gmail.com)\n",
"# Version 0.1.0 (03.12.2014)\n",
"#------------------------------------------------------------\n",
"# License: GPL (http://www.gnu.org/licenses/gpl-3.0.html)\n",
"# Gracias a la librería plugintools de nec tv (www.mimediacenter.info\n",
"\n",
"\n",
"import os\n",
"import sys\n",
"import urllib\n",
"import urllib2\n",
"import re\n",
"import shutil\n",
"import zipfile\n",
"\n",
"import xbmc\n",
"import xbmcgui\n",
"import xbmcaddon\n",
"import xbmcplugin\n",
"\n",
"import plugintools, nstream, ioncube\n",
"\n",
"from framescrape import *\n",
"from resources.tools.resolvers import *\n",
"from resources.tools.update import *\n",
"from resources.tools.scrape import *\n",
"from resources.tools.torrentvru import *\n",
"from resources.tools.vaughnlive import *\n",
"from resources.tools.ninestream import *\n",
"from resources.tools.vercosas import *\n",
"from resources.tools.torrent1 import *\n",
"from resources.tools.directwatch import *\n",
"from resources.tools.freetvcast import *\n",
"from resources.tools.freebroadcast import *\n",
"from resources.tools.shidurlive import *\n",
"from resources.tools.latuerka import *\n",
"from resources.tools.laligatv import *\n",
"from resources.tools.updater import *\n",
"from resources.tools.castalba import *\n",
"from resources.tools.castdos import *\n",
"from resources.tools.new_regex import *\n",
"from resources.tools.sportseven import *\n",
"from resources.tools.streamingfreetv import *\n",
"from resources.tools.dailymotion import *\n",
"from resources.tools.getposter import *\n",
"\n",
"\n",
"\n",
"\n",
"\n",
"home = xbmc.translatePath(os.path.join('special://home/addons/plugin.video.nec tv/', ''))\n",
"tools = xbmc.translatePath(os.path.join('special://home/addons/plugin.video.nec tv/resources/tools', ''))\n",
"addons = xbmc.translatePath(os.path.join('special://home/addons/', ''))\n",
"art = xbmc.translatePath(os.path.join('special://home/addons/plugin.video.nec tv/art', ''))\n",
"tmp = xbmc.translatePath(os.path.join('special://home/addons/plugin.video.nec tv/tmp', ''))\n",
"playlists = xbmc.translatePath(os.path.join('special://home/addons/playlists', ''))\n",
"\n",
"icon = art + 'icon.png'\n",
"fanart = 'fanart.jpg'\n",
"\n",
"\n",
"\n",
"\n",
"# Entry point\n",
"def run():\n",
"\n",
" plugintools.log(\"---> nec tv.run <---\")\n",
" \n",
" # Obteniendo parámetros...\n",
" params = plugintools.get_params() \n",
" \n",
" if params.get(\"action\") is None:\n",
" main_list(params)\n",
" else:\n",
" action = params.get(\"action\")\n",
" url = params.get(\"url\")\n",
" exec action+\"(params)\"\n",
" \n",
" if not os.path.exists(playlists) :\n",
" os.makedirs(playlists)\n",
"\n",
" \n",
"\n",
" plugintools.close_item_list()\n",
"\n",
"\n",
" \n",
"# Main menu\n",
"\n",
"def main_list(params):\n",
" plugintools.log(\"[latinototal-0.1.0].main_list \"+repr(params))\n",
" \n",
" # Control del skin de latinototal\n",
" mastermenu = xml_skin()\n",
" plugintools.log(\"XML menu: \"+mastermenu)\n",
" try:\n",
" data = plugintools.read(mastermenu)\n",
" except:\n",
" mastermenu = 'http://pastebin.com/raw.php?i=n9BF6Cwe'\n",
" data = plugintools.read(mastermenu)\n",
" xbmc.executebuiltin(\"Notification(%s,%s,%i,%s)\" % ('nec tv', \"XML no reconocido...\", 3 , art+'icon.png')) \n",
"\n",
" matches = plugintools.find_multiple_matches(data,'<menu_info>(.*?)</menu_info>')\n",
" for entry in matches:\n",
" title = plugintools.find_single_match(entry,'<title>(.*?)</title>')\n",
" date = plugintools.find_single_match(entry,'<date>(.*?)</date>')\n",
" thumbnail = plugintools.find_single_match(entry,'<thumbnail>(.*?)</thumbnail>')\n",
" fanart = plugintools.find_single_match(entry,'<fanart>(.*?)</fanart>')\n",
" plugintools.add_item( action=\"\" , title = title + date , fanart = fanart , thumbnail=thumbnail , folder = False , isPlayable = False )\n",
"\n",
" data = plugintools.read(mastermenu) \n",
" matches = plugintools.find_multiple_matches(data,'<channel>(.*?)</channel>')\n",
" for entry in matches:\n",
" title = plugintools.find_single_match(entry,'<name>(.*?)</name>')\n",
" thumbnail = plugintools.find_single_match(entry,'<thumbnail>(.*?)</thumbnail>')\n",
" fanart = plugintools.find_single_match(entry,'<fanart>(.*?)</fanart>')\n",
" action = plugintools.find_single_match(entry,'<action>(.*?)</action>')\n",
" last_update = plugintools.find_single_match(entry,'<last_update>(.*?)</last_update>')\n",
" url = plugintools.find_single_match(entry,'<url>(.*?)</url>')\n",
" date = plugintools.find_single_match(entry,'<last_update>(.*?)</last_update>')\n",
"\n",
" # Control paternal\n",
" pekes_no = plugintools.get_setting(\"pekes_no\")\n",
" if pekes_no == \"true\" :\n",
" print \"Control paternal en marcha\"\n",
" if title.find(\"Adultos\") >= 0 :\n",
" plugintools.log(\"Activando control paternal...\")\n",
" else:\n",
" fixed = title\n",
" plugintools.log(\"fixed= \"+fixed)\n",
" if fixed == \"Actualizaciones\":\n",
" plugintools.add_item( action = action , plot = fixed , title = '[COLOR red]' + fixed + '[/COLOR]' , fanart = fanart , thumbnail = thumbnail , url = url , folder = True , isPlayable = False )\n",
" elif fixed == 'Agenda TV':\n",
" plugintools.add_item( action = action , plot = fixed , title = '[COLOR red]' + fixed + '[/COLOR]' , fanart = fanart , thumbnail = thumbnail , url = url , folder = True , isPlayable = False ) \n",
" else:\n",
" plugintools.add_item( action = action , plot = fixed , title = '[COLOR lightyellow]' + fixed + '[/COLOR]' , fanart = fanart , thumbnail = thumbnail , url = url , folder = True , isPlayable = False )\n",
" else:\n",
" fixed = title\n",
" if fixed == \"Actualizaciones\":\n",
" plugintools.add_item( action = action , plot = fixed , title = '[COLOR red]' + fixed + '[/COLOR]' , fanart = fanart , thumbnail = thumbnail , url = url , folder = True , isPlayable = False )\n",
" elif fixed == \"Agenda TV\":\n",
" plugintools.add_item( action = action , plot = fixed , title = '[COLOR red]' + fixed + '[/COLOR]' , fanart = fanart , thumbnail = thumbnail , url = url , folder = True , isPlayable = False )\n",
" else:\n",
" plugintools.add_item( action = action , plot = fixed , title = '[COLOR lightyellow]' + fixed + '[/COLOR]' , fanart = fanart , thumbnail = thumbnail , url = url , folder = True , isPlayable = False )\n",
" \n",
" \n",
"\n",
"def play(params):\n",
" plugintools.log(\"[nec tv-0.1.0].play \"+repr(params))\n",
" # plugintools.direct_play(params.get(\"url\"))\n",
" # xbmc.Player(xbmc.PLAYER_CORE_AUTO).play(params.get(\"url\"))\n",
" url = params.get(\"url\")\n",
"\n",
" # Notificación de inicio de resolver en caso de enlace RTMP\n",
"\n",
" if url.startswith(\"http\") == True:\n",
" if url.find(\"allmyvideos\") >= 0 :\n",
" allmyvideos(params)\n",
" elif url.find(\"streamcloud\") >= 0 :\n",
" streamcloud(params)\n",
" elif url.find(\"vidspot\") >= 0 :\n",
" vidspot(params)\n",
" elif url.find(\"played.to\") >= 0 :\n",
" playedto(params)\n",
" elif url.find(\"vk.com\") >= 0 :\n",
" vk(params)\n",
" elif url.find(\"nowvideo\") >= 0 :\n",
" nowvideo(params)\n",
" elif url.find(\"tumi\") >= 0 :\n",
" tumi(params)\n",
" elif url.find(\"streamin.to\") >= 0 :\n",
" streaminto(params) \n",
" else:\n",
" url = params.get(\"url\")\n",
" plugintools.play_resolved_url(url)\n",
"\n",
" elif url.startswith(\"rtp\") >= 0: # Control para enlaces de Movistar TV\n",
" plugintools.play_resolved_url(url)\n",
" \n",
" else:\n",
" plugintools.play_resolved_url(url)\n",
" while OnPlayBackStarted() == False:\n",
" print \"No se está reproduciendo...\"\n",
" time.sleep(3)\n",
" if OnPlayBackStarted():\n",
" print \"En reproducción!\"\n",
" else:\n",
" print \"No ha empezado\"\n",
" \n",
" \n",
" \n",
"def runPlugin(url):\n",
" xbmc.executebuiltin('XBMC.RunPlugin(' + url +')')\n",
"\n",
"\n",
"def live_items_withlink(params):\n",
" plugintools.log(\"[latinototal-0.1.0].live_items_withlink \"+repr(params))\n",
" data = plugintools.read(params.get(\"url\"))\n",
"\n",
" # ToDo: Agregar función lectura de cabecera (fanart, thumbnail, título, últ. actualización)\n",
" header_xml(params)\n",
"\n",
" fanart = plugintools.find_single_match(data, '<fanart>(.*?)</fanart>') # Localizamos fanart de la lista\n",
" if fanart == \"\":\n",
" fanart = art + 'fanart.jpg'\n",
" \n",
" author = plugintools.find_single_match(data, '<poster>(.*?)</poster>') # Localizamos autor de la lista (encabezado)\n",
" \n",
" matches = plugintools.find_multiple_matches(data,'<item>(.*?)</item>')\n",
" for entry in matches:\n",
" title = plugintools.find_single_match(entry,'<title>(.*?)</title>')\n",
" title = title.replace(\"<![CDATA[\", \"\")\n",
" title = title.replace(\"]]>\", \"\")\n",
" thumbnail = plugintools.find_single_match(entry,'<thumbnail>(.*?)</thumbnail>')\n",
" url = plugintools.find_single_match(entry,'<link>(.*?)</link>')\n",
" url = url.replace(\"<![CDATA[\", \"\")\n",
" url = url.replace(\"]]>\", \"\")\n",
" plugintools.add_item(action = \"play\" , title = title , url = url , fanart = fanart , thumbnail = thumbnail , folder = False , isPlayable = True )\n",
" \n",
"\n",
" \n",
"def xml_lists(params):\n",
" plugintools.log(\"[nec tv-0.1.0].xml_lists \"+repr(params))\n",
" data = plugintools.read( params.get(\"url\") )\n",
" name_channel = params.get(\"title\")\n",
" name_channel = parser_title(name_channel)\n",
" plugintools.log(\"name_channel= \"+name_channel)\n",
" pattern = '<name>'+name_channel+'(.*?)</channel>'\n",
" data = plugintools.find_single_match(data, pattern)\n",
"\n",
" plugintools.add_item( action=\"\" , title='[B][COLOR yellow]'+name_channel+'[/B][/COLOR]' , thumbnail= art + 'special.png' , fanart = fanart , folder = False , isPlayable = False )\n",
" \n",
" # Control paternal\n",
" pekes_no = plugintools.get_setting(\"pekes_no\")\n",
" \n",
" subchannel = re.compile('<subchannel>([^<]+)<name>([^<]+)</name>([^<]+)<thumbnail>([^<]+)</thumbnail>([^<]+)<fanart>([^<]+)</fanart>([^<]+)<action>([^<]+)</action>([^<]+)<url>([^<]+)</url>([^<]+)</subchannel>').findall(data)\n",
" for biny, ciny, diny, winy, pixy, dixy, boxy, susy, lexy, muny, kiny in subchannel:\n",
" if pekes_no == \"true\" :\n",
" print \"Control paternal en marcha\"\n",
" if ciny.find(\"XXX\") >= 0 :\n",
" plugintools.log(\"Activando control paternal...\")\n",
" else: \n",
" plugintools.add_item( action = susy , title = ciny , url= muny , thumbnail = winy , fanart = dixy , extra = dixy , page = dixy , folder = True , isPlayable = False )\n",
" params[\"fanart\"]=dixy\n",
" # params[\"thumbnail\"]=pixy\n",
" \n",
" else: \n",
" plugintools.add_item( action = susy , title = ciny , url= muny , thumbnail = winy , fanart = dixy , extra = dixy , page = dixy , folder = True , isPlayable = False )\n",
" params[\"fanart\"]=dixy\n",
" # params[\"thumbnail\"]=pixy \n",
" \n",
"\n",
" \n",
"def getstreams_now(params):\n",
" plugintools.log(\"[nec tv-0.1.0].getstreams_now \"+repr(params))\n",
" \n",
" data = plugintools.read( params.get(\"url\") )\n",
" poster = plugintools.find_single_match(data, '<poster>(.*?)</poster>')\n",
" plugintools.add_item(action=\"\" , title='[COLOR blue][B]'+poster+'[/B][/COLOR]', url=\"\", folder =False, isPlayable=False)\n",
" matches = plugintools.find_multiple_matches(data,'<title>(.*?)</link>')\n",
" \n",
" for entry in matches:\n",
" title = plugintools.find_single_match(entry,'(.*?)</title>')\n",
" url = plugintools.find_single_match(entry,'<link> ([^<]+)')\n",
" plugintools.add_item( action=\"play\" , title=title , url=url , folder = False , isPlayable = True )\n",
" \n",
" \n",
"\n",
"# Soporte de listas de canales por categorías (Livestreams, XBMC México, Motor SportsTV, etc.). \n",
"\n",
"def livestreams_channels(params):\n",
" plugintools.log(\"[nec tv-0.1.0].livestreams_channels \"+repr(params))\n",
" data = plugintools.read( params.get(\"url\") )\n",
" \n",
" # Extract directory list\n",
" thumbnail = params.get(\"thumbnail\")\n",
" \n",
" if thumbnail == \"\":\n",
" thumbnail = 'icon.jpg'\n",
" plugintools.log(thumbnail)\n",
" else:\n",
" plugintools.log(thumbnail)\n",
" \n",
" if thumbnail == art + 'icon.png':\n",
" matches = plugintools.find_multiple_matches(data,'<channel>(.*?)</items>')\n",
" for entry in matches:\n",
" title = plugintools.find_single_match(entry,'<name>(.*?)</name>')\n",
" thumbnail = plugintools.find_single_match(entry,'<thumbnail>(.*?)</thumbnail>')\n",
" fanart = plugintools.find_single_match(entry,'<fanart>(.*?)</fanart>')\n",
" plugintools.add_item( action=\"livestreams_subchannels\" , title=title , url=params.get(\"url\") , thumbnail=thumbnail , fanart=fanart , folder = True , isPlayable = False )\n",
"\n",
" else:\n",
" matches = plugintools.find_multiple_matches(data,'<channel>(.*?)</items>')\n",
" for entry in matches:\n",
" title = plugintools.find_single_match(entry,'<name>(.*?)</name>')\n",
" thumbnail = plugintools.find_single_match(entry,'<thumbnail>(.*?)</thumbnail>')\n",
" fanart = plugintools.find_single_match(entry,'<fanart>(.*?)</fanart>')\n",
" plugintools.add_item( action=\"livestreams_items\" , title=title , url=params.get(\"url\") , fanart=fanart , thumbnail=thumbnail , folder = True , isPlayable = False )\n",
" \n",
" \n",
"def livestreams_subchannels(params):\n",
" plugintools.log(\"[nec tv-0.1.0].livestreams_subchannels \"+repr(params))\n",
"\n",
" data = plugintools.read( params.get(\"url\") )\n",
" # title_channel = params.get(\"title\")\n",
" title_channel = params.get(\"title\")\n",
" name_subchannel = '<name>'+title_channel+'</name>'\n",
" data = plugintools.find_single_match(data, name_subchannel+'(.*?)</channel>')\n",
" info = plugintools.find_single_match(data, '<info>(.*?)</info>')\n",
" title = params.get(\"title\")\n",
" plugintools.add_item( action=\"\" , title='[B]'+title+'[/B] [COLOR yellow]'+info+'[/COLOR]' , folder = False , isPlayable = False )\n",
"\n",
" subchannel = plugintools.find_multiple_matches(data , '<name>(.*?)</name>')\n",
" for entry in subchannel:\n",
" plugintools.add_item( action=\"livestreams_subitems\" , title=entry , url=params.get(\"url\") , thumbnail=art+'motorsports-xbmc.jpg' , folder = True , isPlayable = False )\n",
"\n",
"\n",
"# Pendiente de cargar thumbnail personalizado y fanart...\n",
"def livestreams_subitems(params):\n",
" plugintools.log(\"[nec tv-0.1.0].livestreams_subitems \"+repr(params))\n",
"\n",
" title_subchannel = params.get(\"title\")\n",
" data = plugintools.read( params.get(\"url\") )\n",
" source = plugintools.find_single_match(data , title_subchannel+'(.*?)<subchannel>')\n",
"\n",
" titles = re.compile('<title>([^<]+)</title>([^<]+)<link>([^<]+)</link>').findall(source)\n",
" url = params.get(\"url\")\n",
" title = params.get(\"title\")\n",
" thumbnail = params.get(\"thumbnail\")\n",
" \n",
" for entry, quirry, winy in titles:\n",
" winy = winy.replace(\"amp;\",\"\")\n",
" plugintools.add_item( action=\"play\" , title = entry , url = winy , thumbnail = thumbnail , folder = False , isPlayable = True )\n",
"\n",
"\n",
"def livestreams_items(params):\n",
" plugintools.log(\"[nec tv-0.1.0].livestreams_items \"+repr(params))\n",
"\n",
" title_subchannel = params.get(\"title\")\n",
" plugintools.log(\"title= \"+title_subchannel) \n",
" title_subchannel_fixed = title_subchannel.replace(\"ñ\", \"ñ\")\n",
" title_subchannel_fixed = title_subchannel_fixed.replace(\"\\\\xc3\\\\xb1\", \"ñ\") \n",
" title_subchannel_fixed = plugintools.find_single_match(title_subchannel_fixed, '([^[]+)')\n",
" title_subchannel_fixed = title_subchannel_fixed.encode('utf-8', 'ignore')\n",
" plugintools.log(\"subcanal= \"+title_subchannel_fixed)\n",
" if title_subchannel_fixed.find(\"+\") >= 0:\n",
" title_subchannel_fixed = title_subchannel_fixed.split(\"+\")\n",
" title_subchannel_fixed = title_subchannel_fixed[1]\n",
" title_subchannel_fixxed = title_subchannel_fixed[0]\n",
" if title_subchannel_fixed == \"\":\n",
" title_subchannel_fixed = title_subchannel_fixxed\n",
" \n",
" data = plugintools.read( params.get(\"url\") )\n",
" source = plugintools.find_single_match(data , title_subchannel_fixed+'(.*?)</channel>')\n",
" plugintools.log(\"source= \"+source)\n",
" fanart_channel = plugintools.find_single_match(source, '<fanart>(.*?)</fanart>')\n",
" titles = re.compile('<title>([^<]+)</title>([^<]+)<link>([^<]+)</link>([^<]+)<thumbnail>([^<]+)</thumbnail>').findall(source)\n",
" \n",
" url = params.get(\"url\")\n",
" title = params.get(\"title\")\n",
" thumbnail = params.get(\"thumbnail\")\n",
" \n",
" for entry, quirry, winy, xiry, miry in titles:\n",
" plugintools.log(\"title= \"+entry)\n",
" plugintools.log(\"url= \"+winy)\n",
" winy = winy.replace(\"amp;\",\"\")\n",
" plugintools.add_item( action=\"play\" , title = entry , url = winy , thumbnail = miry , fanart = fanart_channel , folder = False , isPlayable = True )\n",
"\n",
"\n",
"def xml_items(params):\n",
" plugintools.log(\"[nec tv-0.1.0].xml_items \"+repr(params))\n",
" data = plugintools.read( params.get(\"url\") )\n",
" thumbnail = params.get(\"thumbnail\")\n",
"\n",
" #Todo: Implementar una variable que permita seleccionar qué tipo de parseo hacer\n",
" if thumbnail == \"title_link.png\":\n",
" matches = plugintools.find_multiple_matches(data,'<item>(.*?)</item>')\n",
" for entry in matches:\n",
" title = plugintools.find_single_match(entry,'<title>(.*?)</title>')\n",
" thumbnail = plugintools.find_single_match(entry,'<thumbnail>(.*?)</thumbnail>')\n",
" url = plugintools.find_single_match(entry,'<link>([^<]+)</link>')\n",
" fanart = plugintools.find_single_match(entry,'<fanart>([^<]+)</fanart>')\n",
" plugintools.add_item( action = \"play\" , title = title , url = url , thumbnail = thumbnail , fanart = fanart , plot = title , folder = False , isPlayable = True )\n",
"\n",
" if thumbnail == \"name_rtmp.png\":\n",
" matches = plugintools.find_multiple_matches(data,'<channel>(.*?)</channel>')\n",
" for entry in matches:\n",
" title = plugintools.find_single_match(entry,'<name>(.*?)</name>')\n",
" url = plugintools.find_single_match(entry,'<rtmp>([^<]+)</rtmp>')\n",
" plugintools.add_item( action = \"play\" , title = title , url = url , fanart = art + 'fanart.jpg' , plot = title , folder = False , isPlayable = True )\n",
"\n",
" \n",
"def simpletv_items(params):\n",
" plugintools.log(\"[nec tv-0.1.0].simpletv_items \"+repr(params))\n",
"\n",
" saving_url = 0\n",
"\n",
" # Obtenemos fanart y thumbnail del diccionario\n",
" thumbnail = params.get(\"thumbnail\")\n",
" plugintools.log(\"thumbnail= \"+thumbnail)\n",
" if thumbnail == \"\" :\n",
" thumbnail = art + 'icon.png'\n",
"\n",
" # Parche para solucionar un bug por el cuál el diccionario params no retorna la variable fanart\n",
" fanart = params.get(\"extra\")\n",
" if fanart == \" \" :\n",
" fanart = params.get(\"fanart\")\n",
" if fanart == \" \" :\n",
" fanart = art + 'fanart.png'\n",
" \n",
" title = params.get(\"plot\")\n",
" texto= params.get(\"texto\")\n",
" busqueda = \"\"\n",
" if title == 'search':\n",
" title = title + '.txt'\n",
" plugintools.log(\"title= \"+title)\n",
" else:\n",
" title = title + '.m3u'\n",
"\n",
" if title == 'search.txt':\n",
" busqueda = 'search.txt'\n",
" filename = title\n",
" file = open(tmp + 'search.txt', \"r\")\n",
" file.seek(0)\n",
" data = file.readline()\n",
" if data == \"\":\n",
" ok = plugintools.message(\"nec tv\", \"Sin resultados\")\n",
" return ok\n",
" else:\n",
" title = params.get(\"title\")\n",
" title = parser_title(title)\n",
" ext = params.get(\"ext\")\n",
" title_plot = params.get(\"plot\")\n",
" if title_plot == \"\":\n",
" filename = title + \".\" + ext\n",
"\n",
" if ext is None:\n",
" filename = title\n",
" else:\n",
" plugintools.log(\"ext= \"+ext)\n",
" filename = title + \".\" + ext\n",
" \n",
" file = open(playlists + filename, \"r\")\n",
" file.seek(0)\n",
" data = file.readline()\n",
" plugintools.log(\"data= \"+data)\n",
" \n",
" if data == \"\":\n",
" print \"No es posible leer el archivo!\"\n",
" data = file.readline()\n",
" plugintools.log(\"data= \"+data)\n",
" else:\n",
" file.seek(0)\n",
" num_items = len(file.readlines())\n",
" print num_items\n",
" plugintools.log(\"filename= \"+filename)\n",
" plugintools.add_item(action=\"\" , title = '[COLOR lightyellow][B][I]playlist / '+ filename + '[/B][/I][/COLOR]' , url = playlists + title , fanart = fanart , thumbnail = thumbnail , folder = False , isPlayable = False)\n",
"\n",
" \n",
" # Lectura de items en lista m3u. ToDo: Control de errores, implementar lectura de fanart y thumbnail\n",
"\n",
" # Control para evitar error en búsquedas (cat is null)\n",
" cat = \"\"\n",
"\n",
" i = -1\n",
" file.seek(0)\n",
" data = file.readline()\n",
" while i <= num_items:\n",
" if data.startswith(\"#EXTINF:-1\") == True:\n",
" title = data.replace(\"#EXTINF:-1\", \"\")\n",
" title = title.replace(\",\", \"\")\n",
" title = title.replace(\"-AZBOX *\", \"\")\n",
" title = title.replace(\"-AZBOX-*\", \"\")\n",
" \n",
" if title.startswith(\"$\") == True: # Control para lanzar scraper IMDB\n",
" title = title.replace(\"$\",\"\")\n",
" images = m3u_items(title) \n",
" title_fixed = images[3]\n",
" datamovie = {}\n",
" datamovie = getposter(title_fixed)\n",
" save_title(title_fixed, datamovie, filename)\n",
" getdatafilm = 1 # Control para cargar datos de película\n",
" saving_url = 1 # Control para guardar URL\n",
" if datamovie == {}:\n",
" title = '[COLOR lightyellow][B]'+title+' - [/B][I][COLOR orange][IMDB: [B]'+datamovie[\"Rating\"]+'[/B]][/I][/COLOR] '\n",
" thumbnail = datamovie[\"Poster\"];fanart = datamovie[\"Fanart\"]\n",
"\n",
" # Control de la línea del título en caso de búsqueda \n",
" if busqueda == 'search.txt':\n",
" title_search = title.split('\"')\n",
" print 'title',title\n",
" titulo = title_search[0]\n",
" titulo = titulo.strip()\n",
" origen = title_search[1]\n",
" origen = origen.strip()\n",
" data = file.readline()\n",
" i = i + 1 \n",
" else:\n",
" images = m3u_items(title)\n",
" thumbnail = images[0]\n",
" fanart = images[1]\n",
" cat = images[2]\n",
" title = images[3]\n",
" origen = title.split(\",\") \n",
" title = title.strip()\n",
" plugintools.log(\"title= \"+title)\n",
" data = file.readline()\n",
" i = i + 1 \n",
"\n",
" if title.startswith(\"#\") == True:\n",
" title = title.replace(\"#\", \"\")\n",
" plugintools.add_item(action=\"\", title = title , url = \"\", thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = False)\n",
" data = file.readline()\n",
" print data\n",
" i = i + 1\n",
" continue \n",
"\n",
" # Control para determinadas listas de decos sat\n",
" if title.startswith(' $ExtFilter=\"') == True:\n",
" if busqueda == 'search.txt':\n",
" title = title.replace('$ExtFilter=\"', \"\")\n",
" title_search = title.split('\"')\n",
" titulo = title_search[1]\n",
" origen = title_search[2]\n",
" origen = origen.strip()\n",
" data = file.readline()\n",
" i = i + 1 \n",
" else:\n",
" title = title.replace('$ExtFilter=\"', \"\")\n",
" category = title.split('\"')\n",
" tipo = category[0]\n",
" tipo = tipo.strip()\n",
" title = category[1]\n",
" title = title.strip()\n",
" print title\n",
" data = file.readline()\n",
" i = i + 1\n",
" \n",
" if data != \"\":\n",
" title = title.replace(\"radio=true\", \"\") \n",
" url = data.strip()\n",
" if url.startswith(\"serie\") == True:\n",
" url = data.strip()\n",
" if cat == \"\":\n",
" if busqueda == 'search.txt': \n",
" url = url.replace(\"serie:\", \"\")\n",
" params[\"fanart\"] = fanart\n",
" plugintools.log(\"fanart= \"+fanart)\n",
" plugintools.add_item( action = \"seriecatcher\" , title = '[COLOR white]' + title + ' [COLOR purple][Serie online][/COLOR][COLOR white][I] (' + origen + ')[/I][/COLOR]', url = url , thumbnail = thumbnail , fanart = fanart , folder = True , isPlayable = False )\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
" else:\n",
" url = url.replace(\"serie:\", \"\")\n",
" params[\"fanart\"] = fanart\n",
" plugintools.log(\"fanart= \"+fanart)\n",
" plugintools.add_item( action = \"seriecatcher\" , title = '[COLOR white]' + title + ' [COLOR purple][Serie online][/COLOR]', url = url , thumbnail = thumbnail , fanart = fanart , folder = True , isPlayable = False )\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue \n",
" else:\n",
" if busqueda == 'search.txt':\n",
" plugintools.add_item( action = \"longurl\" , title = '[COLOR red][I]' + cat + ' / [/I][/COLOR][COLOR white] ' + title + ' [COLOR purple][Serie online][/COLOR][COLOR white][I] (' + origen + ')[/I][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
" else:\n",
" plugintools.add_item( action = \"longurl\" , title = '[COLOR red][I]' + cat + ' / [/I][/COLOR][COLOR white] ' + title + ' [COLOR purple][Serie online][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
"\n",
" \n",
" if data.startswith(\"http\") == True:\n",
" url = data.strip()\n",
" if cat != \"\": # Controlamos el caso de subcategoría de canales\n",
" if busqueda == 'search.txt':\n",
" if url.startswith(\"serie\") == True:\n",
" url = url.replace(\"serie:\", \"\")\n",
" params[\"fanart\"] = fanart\n",
" plugintools.log(\"fanart= \"+fanart)\n",
" plugintools.add_item( action = \"seriecatcher\" , title = '[COLOR white]' + title + ' [COLOR purple][Serie online][/COLOR][COLOR lightsalmon](' + origen + ')[/I][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = True , isPlayable = False )\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
" elif url.find(\"allmyvideos\") >= 0:\n",
" title = title.split('\"')\n",
" title = title[0]\n",
" title = title.strip()\n",
" plugintools.add_item( action = \"allmyvideos\" , title = '[COLOR white]' + title + '[COLOR lightyellow] [Allmyvideos][/COLOR][I][COLOR lightsalmon] (' + origen + ')[/COLOR][/I]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )\n",
" if saving_url == 1:\n",
" plugintools.log(\"URL= \"+url)\n",
" save_url(url, filename)\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
" elif url.find(\"streamcloud\") >= 0:\n",
" title = title.split('\"')\n",
" title = title[0]\n",
" title = title.strip() \n",
" plugintools.add_item( action = \"streamcloud\" , title = '[COLOR white]' + title + '[COLOR lightskyblue] [Streamcloud][/COLOR][I][COLOR lightsalmon] (' + origen + ')[/COLOR][/I]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )\n",
" if saving_url == 1:\n",
" plugintools.log(\"URL= \"+url)\n",
" save_url(url, filename)\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
" elif url.find(\"vidspot\") >= 0:\n",
" title = title.split('\"')\n",
" title = title[0]\n",
" title = title.strip() \n",
" plugintools.add_item( action = \"vidspot\" , title = '[COLOR white]' + title + '[COLOR palegreen] [Vidspot][/COLOR][I][COLOR lightsalmon] (' + origen + ')[/COLOR][/I]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )\n",
" if saving_url == 1:\n",
" plugintools.log(\"URL= \"+url)\n",
" save_url(url, filename)\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
" elif url.find(\"played.to\") >= 0:\n",
" title = title.split('\"')\n",
" title = title[0]\n",
" title = title.strip() \n",
" plugintools.add_item( action = \"playedto\" , title = '[COLOR white]' + title + '[COLOR lavender] [Played.to][/COLOR][I][COLOR lightsalmon] (' + origen + ')[/COLOR][/I]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )\n",
" if saving_url == 1:\n",
" plugintools.log(\"URL= \"+url)\n",
" save_url(url, filename)\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
"\n",
" elif url.find(\"vk.com\") >= 0:\n",
" title = title.split('\"')\n",
" title = title[0]\n",
" title = title.strip() \n",
" plugintools.add_item( action = \"vk\" , title = '[COLOR white]' + title + '[COLOR royalblue] [Vk][/COLOR][I][COLOR lightsalmon] (' + origen + ')[/COLOR][/I]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )\n",
" if saving_url == 1:\n",
" plugintools.log(\"URL= \"+url)\n",
" save_url(url, filename)\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
"\n",
" elif url.find(\"nowvideo\") >= 0:\n",
" title = title.split('\"')\n",
" title = title[0]\n",
" title = title.strip() \n",
" plugintools.add_item( action = \"nowvideo\" , title = '[COLOR white]' + title + '[COLOR red] [Nowvideo][/COLOR][I][COLOR lightsalmon] (' + origen + ')[/COLOR][/I]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )\n",
" if saving_url == 1:\n",
" plugintools.log(\"URL= \"+url)\n",
" save_url(url, filename)\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
" \n",
" elif url.find(\"tumi\") >= 0:\n",
" title = title.split('\"')\n",
" title = title[0]\n",
" title = title.strip() \n",
" plugintools.add_item( action = \"tumi\" , title = '[COLOR white]' + title + '[COLOR forestgreen] [Tumi][/COLOR][I][COLOR lightsalmon] (' + origen + ')[/COLOR][/I]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )\n",
" if saving_url == 1:\n",
" plugintools.log(\"URL= \"+url)\n",
" save_url(url, filename)\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
"\n",
" elif url.find(\"streamin.to\") >= 0:\n",
" title = title.split('\"')\n",
" title = title[0]\n",
" title = title.strip() \n",
" plugintools.add_item( action = \"streaminto\" , title = '[COLOR white]' + title + '[COLOR orange] [streamin.to][/COLOR][I][COLOR lightsalmon] (' + origen + ')[/COLOR][/I]', url = url , thumbnail = thumbnail , show = show, fanart = fanart , folder = False , isPlayable = True )\n",
" if saving_url == 1:\n",
" plugintools.log(\"URL= \"+url)\n",
" save_url(url, filename) \n",
" if saving_url == 1:\n",
" plugintools.log(\"URL= \"+url)\n",
" save_url(url, filename)\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue \n",
"\n",
" elif url.find(\"www.youtube.com\") >= 0: # Video youtube\n",
" plugintools.log(\"linea titulo= \"+title_search)\n",
" title = title.split('\"')\n",
" title = title[0]\n",
" title = title.strip()\n",
" videoid = url.replace(\"https://www.youtube.com/watch?=\", \"\")\n",
" url = 'plugin://plugin.video.youtube/?path=/root/video&action=play_video&videoid=' + videoid\n",
" plugintools.add_item( action = \"play\" , title = '[COLOR white]' + title + ' [[COLOR red]You[COLOR white]tube Video][I] (' + origen + ')[/I][/COLOR]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )\n",
" if saving_url == 1:\n",
" plugintools.log(\"URL= \"+url)\n",
" save_url(url, filename)\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
" \n",
" elif url.find(\"www.dailymotion.com/playlist\") >= 0: # Playlist\n",
" id_playlist = dailym_getplaylist(url)\n",
" if id_playlist != \"\":\n",
" url = \"https://api.dailymotion.com/playlist/\"+id_playlist+\"/videos\"\n",
" if thumbnail == \"\":\n",
" thumbnail = 'http://press.dailymotion.com/wp-old/wp-content/uploads/logo-Dailymotion.png'\n",
" plugintools.add_item( action=\"dailym_pl\" , title=title + ' [COLOR lightyellow][B][Dailymotion[/B] playlist][/COLOR]' , fanart=fanart, thumbnail=thumbnail, url=url , folder=True, isPlayable=False)\n",
" if saving_url == 1:\n",
" plugintools.log(\"URL= \"+url)\n",
" save_url(url, filename)\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
" else:\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
" \n",
" elif url.find(\"dailymotion.com/video\") >= 0:\n",
" video_id = dailym_getvideo(url)\n",
" if video_id != \"\":\n",
" thumbnail = \"https://api.dailymotion.com/thumbnail/video/\"+video_id+\"\"\n",
" url = \"plugin://plugin.video.dailymotion_com/?url=\"+video_id+\"&mode=playVideo\"\n",
" # Appends a new item to the xbmc item list\n",
" # API Dailymotion list of video parameters: http://www.dailymotion.com/doc/api/obj-video.html\n",
" plugintools.add_item( action=\"play\" , title=title + ' [COLOR lightyellow][B][Dailymotion[/B] video][/COLOR]' , url=url , thumbnail = thumbnail , fanart = fanart, isPlayable=True, folder=False )\n",
" if saving_url == 1:\n",
" plugintools.log(\"URL= \"+url)\n",
" save_url(url, filename) \n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
" else:\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue \n",
"\n",
" elif url.endswith(\"m3u8\") == True:\n",
" title = title.split('\"')\n",
" title = title[0]\n",
" title = title.strip() \n",
" plugintools.add_item( action = \"play\" , title = '[COLOR white]' + title + '[COLOR purple] [m3u8][/COLOR][I][COLOR lightsalmon] (' + origen + ')[/COLOR][/I]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )\n",
" if saving_url == 1:\n",
" plugintools.log(\"URL= \"+url)\n",
" save_url(url, filename)\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue \n",
" \n",
" else:\n",
" title = title_search.split('\"')\n",
" title = title[0]\n",
" title = title.strip() \n",
" plugintools.add_item( action = \"longurl\" , title = '[COLOR white]' + title + '[COLOR lightblue] [HTTP][/COLOR][I][COLOR lightsalmon] (' + origen + ')[/COLOR][/I]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )\n",
" if saving_url == 1:\n",
" plugintools.log(\"URL= \"+url)\n",
" save_url(url, filename)\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
" else:\n",
" if url.startswith(\"serie\") == True:\n",
" url = url.replace(\"serie:\", \"\")\n",
" params[\"fanart\"] = fanart\n",
" plugintools.log(\"fanart= \"+fanart)\n",
" plugintools.add_item( action = \"seriecatcher\" , title = '[COLOR red][I]' + cat + ' / [/I][/COLOR][COLOR white] ' + title + ' [COLOR purple][Serie online][/COLOR]', url = url , thumbnail = thumbnail , fanart = fanart , folder = True , isPlayable = False )\n",
" if saving_url == 1:\n",
" plugintools.log(\"URL= \"+url)\n",
" save_url(url, filename)\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
" \n",
" elif url.find(\"allmyvideos\") >= 0: \n",
" plugintools.add_item( action = \"allmyvideos\" , title = '[COLOR red][I]' + cat + ' / [/I][/COLOR][COLOR white] ' + title + '[COLOR lightyellow] [Allmyvideos][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
" \n",
" elif url.find(\"streamcloud\") >= 0: \n",
" plugintools.add_item( action = \"streamcloud\" , title = '[COLOR red][I]' + cat + ' / [/I][/COLOR][COLOR white] ' + title + '[COLOR lightskyblue] [Streamcloud][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )\n",
" if saving_url == 1:\n",
" plugintools.log(\"URL= \"+url)\n",
" save_url(url, filename)\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
"\n",
" elif url.find(\"vidspot\") == True: \n",
" plugintools.add_item( action = \"vidspot\" , title = '[COLOR red][I]' + cat + ' / [/I][/COLOR][COLOR white] ' + title + '[COLOR palegreen] [Vidspot][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )\n",
" if saving_url == 1:\n",
" plugintools.log(\"URL= \"+url)\n",
" save_url(url, filename)\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
"\n",
" elif url.find(\"played.to\") >= 0: \n",
" plugintools.add_item( action = \"playedto\" , title = '[COLOR red][I]' + cat + ' / [/I][/COLOR][COLOR white] ' + title + '[COLOR lavender] [Played.to][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )\n",
" if saving_url == 1:\n",
" plugintools.log(\"URL= \"+url)\n",
" save_url(url, filename)\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
" \n",
" elif url.find(\"vk\") >= 0: \n",
" plugintools.add_item( action = \"vk\" , title = '[COLOR red][I]' + cat + ' / [/I][/COLOR][COLOR white] ' + title + '[COLOR royalblue] [Vk][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )\n",
" if saving_url == 1:\n",
" plugintools.log(\"URL= \"+url)\n",
" save_url(url, filename)\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue \n",
"\n",
" elif url.find(\"nowvideo\") >= 0: \n",
" plugintools.add_item( action = \"nowvideo\" , title = '[COLOR red][I]' + cat + ' / [/I][/COLOR][COLOR white] ' + title + '[COLOR red] [Nowvideo][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )\n",
" if saving_url == 1:\n",
" plugintools.log(\"URL= \"+url)\n",
" save_url(url, filename)\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
"\n",
" elif url.find(\"tumi\") >= 0: \n",
" plugintools.add_item( action = \"tumi\" , title = '[COLOR red][I]' + cat + ' / [/I][/COLOR][COLOR white] ' + title + '[COLOR forestgreen] [Tumi][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )\n",
" if saving_url == 1:\n",
" plugintools.log(\"URL= \"+url)\n",
" save_url(url, filename)\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
"\n",
" elif url.find(\"streamin.to\") >= 0: \n",
" plugintools.add_item( action = \"streaminto\" , title = '[COLOR red][I]' + cat + ' / [/I][/COLOR][COLOR white] ' + title + '[COLOR orange] [streamin.to][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )\n",
" if saving_url == 1:\n",
" plugintools.log(\"URL= \"+url)\n",
" save_url(url, filename)\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue \n",
"\n",
" elif url.find(\"9stream\") >= 0: \n",
" plugintools.add_item( action = \"ninestreams\" , title = '[COLOR red][I]' + cat + ' / [/I][/COLOR][COLOR white] ' + title + '[COLOR green] [9stream][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )\n",
" if saving_url == 1:\n",
" plugintools.log(\"URL= \"+url)\n",
" save_url(url, filename)\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue \n",
"\n",
" elif url.find(\"www.youtube.com\") >= 0: # Video youtube\n",
" title = title.split('\"')\n",
" title = title[0]\n",
" title =title.strip()\n",
" videoid = url.replace(\"https://www.youtube.com/watch?=\", \"\")\n",
" url = 'plugin://plugin.video.youtube/?path=/root/video&action=play_video&videoid=' + videoid\n",
" plugintools.add_item( action = \"play\" , title = '[COLOR red][I]' + cat + ' / [/I][/COLOR][COLOR white] ' + title + ' [[COLOR red]You[COLOR white]tube Video][/COLOR]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )\n",
" if saving_url == 1:\n",
" plugintools.log(\"URL= \"+url)\n",
" save_url(url, filename)\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue \n",
" \n",
" elif url.find(\"www.dailymotion.com/playlist\") >= 0: # Playlist\n",
" id_playlist = dailym_getplaylist(url)\n",
" if id_playlist != \"\":\n",
" plugintools.log(\"id_playlist= \"+id_playlist)\n",
" if thumbnail == \"\":\n",
" thumbnail = 'http://press.dailymotion.com/wp-old/wp-content/uploads/logo-Dailymotion.png'\n",
" url = \"https://api.dailymotion.com/playlist/\"+id_playlist+\"/videos\"\n",
" plugintools.add_item( action=\"dailym_pl\" , title='[COLOR red][I]'+cat+' / [/I][/COLOR] '+title+' [COLOR lightyellow][B][Dailymotion[/B] playlist][/COLOR]', url=url , fanart = fanart , thumbnail=thumbnail , folder=True, isPlayable=False)\n",
" if saving_url == 1:\n",
" plugintools.log(\"URL= \"+url)\n",
" save_url(url, filename)\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
" else:\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
"\n",
" elif url.find(\"dailymotion.com/video\") >= 0:\n",
" video_id = dailym_getvideo(url)\n",
" if video_id != \"\":\n",
" thumbnail = \"https://api.dailymotion.com/thumbnail/video/\"+video_id+\"\"\n",
" url = \"plugin://plugin.video.dailymotion_com/?url=\"+video_id+\"&mode=playVideo\"\n",
" # Appends a new item to the xbmc item list\n",
" # API Dailymotion list of video parameters: http://www.dailymotion.com/doc/api/obj-video.html\n",
" plugintools.add_item( action=\"play\" , title='[COLOR red][I]' + cat + ' / [/I][/COLOR] '+title+' [COLOR lightyellow][B][Dailymotion[/B] video][/COLOR]', url=url , thumbnail = thumbnail , fanart= fanart , isPlayable=True, folder=False )\n",
" if saving_url == 1:\n",
" plugintools.log(\"URL= \"+url)\n",
" save_url(url, filename)\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
" else:\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
" \n",
" elif url.endswith(\"m3u8\") == True:\n",
" title = title.split('\"')\n",
" title = title[0]\n",
" title = title.strip() \n",
" plugintools.add_item( action = \"play\" , title = '[COLOR red][I]' + cat + ' / [/I][/COLOR][COLOR white] ' + title + '[COLOR purple] [m3u8][/COLOR]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )\n",
" if saving_url == 1:\n",
" plugintools.log(\"URL= \"+url)\n",
" save_url(url, filename)\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
" else: \n",
" plugintools.add_item( action = \"longurl\" , title = '[COLOR red][I]' + cat + ' / [/I][/COLOR][COLOR white] ' + title + '[COLOR blue] [HTTP][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )\n",
" if saving_url == 1:\n",
" plugintools.log(\"URL= \"+url)\n",
" save_url(url, filename)\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
" # Sin categoría de canales \n",
" else:\n",
" if busqueda == 'search.txt':\n",
" if url.startswith(\"serie\") == True:\n",
" url = url.replace(\"serie:\", \"\")\n",
" params[\"fanart\"] = fanart\n",
" plugintools.log(\"fanart= \"+fanart)\n",
" plugintools.add_item( action = \"seriecatcher\" , title = '[COLOR white]' + title + ' [COLOR purple][Serie online][/COLOR][COLOR lightsalmon](' + origen + ')[/I][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = True , isPlayable = False )\n",
" if saving_url == 1:\n",
" plugintools.log(\"URL= \"+url)\n",
" save_url(url, filename)\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
" elif url.find(\"allmyvideos\") >= 0:\n",
" title = title.split('\"')\n",
" title = title[0]\n",
" title = title.strip() \n",
" plugintools.add_item( action = \"allmyvideos\" , title = '[COLOR white]' + title + '[COLOR lightyellow] [Allmyvideos][/COLOR][I][COLOR lightsalmon] (' + origen + ')[/COLOR][/I]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )\n",
" if saving_url == 1:\n",
" plugintools.log(\"URL= \"+url)\n",
" save_url(url, filename)\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
"\n",
" elif url.find(\"streamcloud\") >= 0:\n",
" title = title.split('\"')\n",
" title = title[0]\n",
" title = title.strip() \n",
" plugintools.add_item( action = \"streamcloud\" , title = '[COLOR white]' + titulo + '[COLOR lightskyblue] [Streamcloud][/COLOR][I][COLOR lightsalmon] (' + origen + ')[/COLOR][/I]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )\n",
" if saving_url == 1:\n",
" plugintools.log(\"URL= \"+url)\n",
" save_url(url, filename)\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
" \n",
" \n",
" elif url.find(\"vidspot\") >= 0:\n",
" title = title.split('\"')\n",
" title = title[0]\n",
" title = title.strip() \n",
" plugintools.add_item( action = \"vidspot\" , title = '[COLOR white]' + title + '[COLOR palegreen] [Vidspot][/COLOR][I][COLOR lightsalmon] (' + origen + ')[/COLOR][/I]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )\n",
" if saving_url == 1:\n",
" plugintools.log(\"URL= \"+url)\n",
" save_url(url, filename)\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
"\n",
" \n",
" elif url.find(\"played.to\") >= 0:\n",
" title = title.split('\"')\n",
" title = title[0]\n",
" title = title.strip() \n",
" plugintools.add_item( action = \"playedto\" , title = '[COLOR white]' + title + '[COLOR lavender] [Played.to][/COLOR][I][COLOR lightsalmon] (' + origen + ')[/COLOR][/I]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )\n",
" if saving_url == 1:\n",
" plugintools.log(\"URL= \"+url)\n",
" save_url(url, filename)\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
"\n",
"\n",
" elif url.find(\"vk.com\") >= 0:\n",
" title = title.split('\"')\n",
" title = title[0]\n",
" title = title.strip() \n",
" plugintools.add_item( action = \"vk\" , title = '[COLOR white]' + title + '[COLOR royalblue] [Vk][/COLOR][I][COLOR lightsalmon] (' + origen + ')[/COLOR][/I]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )\n",
" if saving_url == 1:\n",
" plugintools.log(\"URL= \"+url)\n",
" save_url(url, filename)\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
"\n",
"\n",
" elif url.find(\"nowvideo\") >= 0:\n",
" title = title.split('\"')\n",
" title = title[0]\n",
" title = title.strip() \n",
" plugintools.add_item( action = \"nowvideo\" , title = '[COLOR white]' + title + '[COLOR red] [Nowvideo][/COLOR][I][COLOR lightsalmon] (' + origen + ')[/COLOR][/I]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )\n",
" if saving_url == 1:\n",
" plugintools.log(\"URL= \"+url)\n",
" save_url(url, filename)\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
"\n",
"\n",
" elif url.find(\"tumi.tv\") >= 0:\n",
" title = title.split('\"')\n",
" title = title[0]\n",
" title = title.strip() \n",
" plugintools.add_item( action = \"tumi\" , title = '[COLOR white]' + title + '[COLOR forestgreen] [Tumi][/COLOR][I][COLOR lightsalmon] (' + origen + ')[/COLOR][/I]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )\n",
" if saving_url == 1:\n",
" plugintools.log(\"URL= \"+url)\n",
" save_url(url, filename)\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
"\n",
"\n",
" elif url.find(\"streamin.to\") >= 0:\n",
" title = title.split('\"')\n",
" title = title[0]\n",
" title = title.strip() \n",
" plugintools.add_item( action = \"streaminto\" , title = '[COLOR white]' + title + '[COLOR orange] [streamin.to][/COLOR][I][COLOR lightsalmon] (' + origen + ')[/COLOR][/I]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )\n",
" if saving_url == 1:\n",
" plugintools.log(\"URL= \"+url)\n",
" save_url(url, filename)\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
" \n",
"\n",
" elif url.find(\"www.youtube.com\") >= 0:\n",
" title = title.split('\"')\n",
" title = title[0]\n",
" title = title.strip()\n",
" videoid = url.replace(\"https://www.youtube.com/watch?=\", \"\")\n",
" url = 'plugin://plugin.video.youtube/?path=/root/video&action=play_video&videoid=' + videoid \n",
" plugintools.add_item( action = \"youtube_videos\" , title = '[COLOR white][' + title + ' [[COLOR red]You[/COLOR][COLOR white]tube Video][I] (' + origen + ')[/I][/COLOR]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )\n",
" if saving_url == 1:\n",
" plugintools.log(\"URL= \"+url)\n",
" save_url(url, filename)\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
"\n",
"\n",
" elif url.find(\"www.dailymotion.com/playlist\") >= 0: # Playlist\n",
" id_playlist = dailym_getplaylist(url)\n",
" if id_playlist != \"\":\n",
" if thumbnail == \"\":\n",
" thumbnail = 'http://press.dailymotion.com/wp-old/wp-content/uploads/logo-Dailymotion.png' \n",
" url = \"https://api.dailymotion.com/playlist/\"+id_playlist+\"/videos\"\n",
" plugintools.add_item( action=\"dailym_pl\" , title=title+' [COLOR lightyellow][B][Dailymotion[/B] playlist][/COLOR][I][COLOR lightsalmon] (' + origen + ')[/COLOR][/I]', url=url , fanart = fanart , thumbnail=thumbnail , folder=True, isPlayable=False)\n",
" if saving_url == 1:\n",
" plugintools.log(\"URL= \"+url)\n",
" save_url(url, filename)\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue \n",
" else:\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
"\n",
" elif url.find(\"dailymotion.com/video\") >= 0:\n",
" video_id = dailym_getvideo(url)\n",
" if video_id != \"\":\n",
" thumbnail = \"https://api.dailymotion.com/thumbnail/video/\"+video_id+\"\"\n",
" url = \"plugin://plugin.video.dailymotion_com/?url=\"+video_id+\"&mode=playVideo\"\n",
" # Appends a new item to the xbmc item list\n",
" # API Dailymotion list of video parameters: http://www.dailymotion.com/doc/api/obj-video.html\n",
" plugintools.add_item( action=\"play\" , title=title+' [COLOR lightyellow][B][Dailymotion[/B] video][/COLOR][I][COLOR lightsalmon] (' + origen + ')[/COLOR][/I]', url=url , fanart = fanart , thumbnail = thumbnail , isPlayable=True, folder=False )\n",
" if saving_url == 1:\n",
" plugintools.log(\"URL= \"+url)\n",
" save_url(url, filename)\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
" else:\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
" \n",
" elif url.endswith(\"m3u8\") == True:\n",
" title = title.split('\"')\n",
" title = title[0]\n",
" title = title.strip() \n",
" plugintools.add_item( action = \"play\" , title = '[COLOR white]' + title + ' [COLOR purple][m3u8][/COLOR][I][COLOR lightsalmon] (' + origen + ')[/COLOR][/I]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )\n",
" if saving_url == 1:\n",
" plugintools.log(\"URL= \"+url)\n",
" save_url(url, filename)\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
" \n",
" \n",
" else: \n",
" title = title_search[0]\n",
" title = title.strip() \n",
" plugintools.add_item( action = \"longurl\" , title = '[COLOR white]' + title + ' [COLOR blue][HTTP][/COLOR][I][COLOR lightsalmon] (' + origen + ')[/COLOR][/I]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )\n",
" if saving_url == 1:\n",
" plugintools.log(\"URL= \"+url)\n",
" save_url(url, filename)\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
"\n",
"\n",
" else:\n",
" if url.find(\"allmyvideos\") >= 0: \n",
" plugintools.add_item( action = \"allmyvideos\" , title = '[COLOR white]' + title + ' [COLOR lightyellow][Allmyvideos][/COLOR]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )\n",
" if saving_url == 1:\n",
" plugintools.log(\"URL= \"+url)\n",
" save_url(url, filename)\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
"\n",
"\n",
" elif url.find(\"streamcloud\") >= 0: \n",
" plugintools.add_item( action = \"streamcloud\" , title = '[COLOR white]' + title + ' [COLOR lightskyblue][Streamcloud][/COLOR]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )\n",
" if saving_url == 1:\n",
" plugintools.log(\"URL= \"+url)\n",
" save_url(url, filename)\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
"\n",
"\n",
" elif url.find(\"vidspot\") >= 0: \n",
" plugintools.add_item( action = \"vidspot\" , title = '[COLOR white]' + title + ' [COLOR palegreen][Vidspot][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )\n",
" if saving_url == 1:\n",
" plugintools.log(\"URL= \"+url)\n",
" save_url(url, filename)\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
"\n",
" \n",
" elif url.find(\"played.to\") >= 0: \n",
" plugintools.add_item( action = \"playedto\" , title = '[COLOR white]' + title + ' [COLOR lavender][Played.to][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )\n",
" if saving_url == 1:\n",
" plugintools.log(\"URL= \"+url)\n",
" save_url(url, filename)\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
"\n",
"\n",
" elif url.find(\"vk.com\") >= 0: \n",
" plugintools.add_item( action = \"vk\" , title = '[COLOR white]' + title + ' [COLOR royalblue][Vk][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )\n",
" if saving_url == 1:\n",
" plugintools.log(\"URL= \"+url)\n",
" save_url(url, filename)\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
"\n",
"\n",
" elif url.find(\"nowvideo\") >= 0: \n",
" plugintools.add_item( action = \"nowvideo\" , title = '[COLOR white]' + title + '[COLOR red] [Nowvideo][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )\n",
" if saving_url == 1:\n",
" plugintools.log(\"URL= \"+url)\n",
" save_url(url, filename)\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
"\n",
"\n",
" elif url.find(\"tumi.tv\") >= 0: \n",
" plugintools.add_item( action = \"tumi\" , title = '[COLOR white]' + title + '[COLOR forestgreen] [Tumi][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )\n",
" if saving_url == 1:\n",
" plugintools.log(\"URL= \"+url)\n",
" save_url(url, filename)\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
"\n",
"\n",
" elif url.find(\"streamin.to\") >= 0: \n",
" plugintools.add_item( action = \"streaminto\" , title = '[COLOR white]' + title + '[COLOR orange] [streamin.to][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )\n",
" if saving_url == 1:\n",
" plugintools.log(\"URL= \"+url)\n",
" save_url(url, filename)\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
" \n",
"\n",
" elif url.find(\"www.youtube.com\") >= 0:\n",
" title = title.split('\"')\n",
" title = title[0]\n",
" title = title.strip()\n",
" videoid = url.replace(\"https://www.youtube.com/watch?v=\", \"\")\n",
" print 'videoid',videoid\n",
" url = 'plugin://plugin.video.youtube/?path=/root/video&action=play_video&videoid=' + videoid \n",
" plugintools.add_item( action = \"youtube_videos\" , title = '[COLOR white]' + title + ' [[COLOR red]You[/COLOR][COLOR white]tube Video][/COLOR]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )\n",
" if saving_url == 1:\n",
" plugintools.log(\"URL= \"+url)\n",
" save_url(url, filename)\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
"\n",
"\n",
" elif url.find(\"www.dailymotion.com/playlist\") >= 0: # Playlist\n",
" id_playlist = dailym_getplaylist(url)\n",
" if id_playlist != \"\":\n",
" plugintools.log(\"id_playlist= \"+id_playlist)\n",
" thumbnail=art+'/lnh_logo.png'\n",
" url = \"https://api.dailymotion.com/playlist/\"+id_playlist+\"/videos\" \n",
" plugintools.add_item( action=\"dailym_pl\" , title=title + ' [COLOR lightyellow][B][Dailymotion[/B] playlist][/COLOR]' , url=url , fanart = fanart , thumbnail=thumbnail , folder=True)\n",
" if saving_url == 1:\n",
" plugintools.log(\"URL= \"+url)\n",
" save_url(url, filename)\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
" else:\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
"\n",
" elif url.find(\"dailymotion.com/video\") >= 0:\n",
" video_id = dailym_getvideo(url)\n",
" if video_id != \"\":\n",
" thumbnail = \"https://api.dailymotion.com/thumbnail/video/\"+video_id+\"\"\n",
" url = \"plugin://plugin.video.dailymotion_com/?url=\"+video_id+\"&mode=playVideo\"\n",
" #plugintools.log(\"url= \"+url)\n",
" # Appends a new item to the xbmc item list\n",
" # API Dailymotion list of video parameters: http://www.dailymotion.com/doc/api/obj-video.html\n",
" plugintools.add_item( action=\"play\" , title=title + ' [COLOR lightyellow][B][Dailymotion[/B] video][/COLOR]' , url=url , thumbnail = thumbnail , fanart = fanart , isPlayable=True, folder=False )\n",
" if saving_url == 1:\n",
" plugintools.log(\"URL= \"+url)\n",
" save_url(url, filename)\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
" else:\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue \n",
" \n",
" elif url.endswith(\"m3u8\") == True:\n",
" title = title.split('\"')\n",
" title = title[0]\n",
" title = title.strip() \n",
" plugintools.add_item( action = \"play\" , title = '[COLOR white]' + title + ' [COLOR purple][m3u8][/COLOR]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )\n",
" if saving_url == 1:\n",
" plugintools.log(\"URL= \"+url)\n",
" save_url(url, filename)\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
" else:\n",
" plugintools.add_item( action = \"longurl\" , title = '[COLOR red][I]' + '[/I][/COLOR][COLOR white]' + title + ' [COLOR blue][HTTP][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
" \n",
" if data.startswith(\"rtmp\") == True or data.startswith(\"rtsp\") == True:\n",
" url = data\n",
" url = parse_url(url)\n",
" if cat != \"\": # Controlamos el caso de subcategoría de canales\n",
" if busqueda == 'search.txt':\n",
" params[\"url\"] = url\n",
" server_rtmp(params) \n",
" server = params.get(\"server\")\n",
" plugintools.log(\"params en simpletv\" +repr(params) )\n",
" url = params.get(\"url\")\n",
" plugintools.add_item( action = \"launch_rtmp\" , title = '[COLOR white]' + titulo + '[COLOR green] [' + server + '][/COLOR][I][COLOR lightgreen] (' + origen + ')[/COLOR][/I]', url = params.get(\"url\") , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )\n",
" params[\"server\"] = server\n",
" print url \n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
" else:\n",
" params[\"url\"] = url\n",
" server_rtmp(params) \n",
" server = params.get(\"server\")\n",
" plugintools.log(\"params en simpletv\" +repr(params) )\n",
" plugintools.log(\"fanart= \"+fanart)\n",
" url = params.get(\"url\")\n",
" plugintools.add_item( action = \"launch_rtmp\" , title = '[COLOR red][I]' + cat + ' / [/I][/COLOR][COLOR white] ' + title + '[COLOR green] [' + server + '][/COLOR]' , url = params.get(\"url\") , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )\n",
" print url \n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
" \n",
" else:\n",
" if busqueda == 'search.txt':\n",
" params[\"url\"] = url\n",
" server_rtmp(params) \n",
" server = params.get(\"server\")\n",
" plugintools.log(\"params en simpletv\" +repr(params) )\n",
" url = params.get(\"url\") \n",
" plugintools.add_item( action = \"launch_rtmp\" , title = '[COLOR white]' + titulo + '[COLOR green] [' + server + '][/COLOR][I][COLOR lightgreen] (' + origen + ')[/COLOR][/I]' , url = params.get(\"url\") , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )\n",
" print url \n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
" else:\n",
" params[\"url\"] = url\n",
" server_rtmp(params) \n",
" server = params.get(\"server\")\n",
" plugintools.log(\"fanart= \"+fanart)\n",
" plugintools.log(\"params en simpletv\" +repr(params) )\n",
" url = params.get(\"url\") \n",
" plugintools.add_item( action = \"launch_rtmp\" , title = '[COLOR white]' + title + '[COLOR green] ['+ server + '][/COLOR]' , url = params.get(\"url\") , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )\n",
" print url\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
"\n",
" if data.startswith(\"udp\") == True or data.startswith(\"rtp\") == True:\n",
" # print \"udp\"\n",
" url = data\n",
" url = parse_url(url)\n",
" plugintools.log(\"url retornada= \"+url)\n",
" if cat != \"\": # Controlamos el caso de subcategoría de canales\n",
" if busqueda == 'search.txt':\n",
" plugintools.add_item( action = \"play\" , title = '[COLOR white]' + titulo + '[COLOR red] [UDP][/COLOR][I][COLOR lightgreen] (' + origen + ')[/COLOR][/I]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
" else:\n",
" plugintools.add_item( action = \"play\" , title = '[COLOR red][I]' + cat + ' / [/I][/COLOR][COLOR white] ' + title + '[COLOR red] [UDP][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
" \n",
" else:\n",
" if busqueda == 'search.txt':\n",
" plugintools.add_item( action = \"play\" , title = '[COLOR white]' + titulo + '[COLOR red] [UDP][/COLOR][I][COLOR lightgreen] (' + origen + ')[/COLOR][/I]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
" else:\n",
" plugintools.add_item( action = \"play\" , title = '[COLOR white]' + title + '[COLOR red] [UDP][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
"\n",
" if data.startswith(\"mms\") == True or data.startswith(\"rtp\") == True:\n",
" # print \"udp\"\n",
" url = data\n",
" url = parse_url(url)\n",
" plugintools.log(\"url retornada= \"+url)\n",
" if cat != \"\": # Controlamos el caso de subcategoría de canales\n",
" if busqueda == 'search.txt':\n",
" plugintools.add_item( action = \"play\" , title = '[COLOR white]' + titulo + '[COLOR red] [MMS][/COLOR][I][COLOR lightgreen] (' + origen + ')[/COLOR][/I]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
" else:\n",
" plugintools.add_item( action = \"play\" , title = '[COLOR red][I]' + cat + ' / [/I][/COLOR][COLOR white] ' + title + '[COLOR red] [MMS][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
" \n",
" else:\n",
" if busqueda == 'search.txt':\n",
" plugintools.add_item( action = \"play\" , title = '[COLOR white]' + titulo + '[COLOR red] [MMS][/COLOR][I][COLOR lightgreen] (' + origen + ')[/COLOR][/I]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
" else:\n",
" plugintools.add_item( action = \"play\" , title = '[COLOR white]' + title + '[COLOR red] [MMS][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue \n",
"\n",
" if data.startswith(\"plugin\") == True:\n",
" title = title.split('\"')\n",
" title = title[0]\n",
" title = title.strip()\n",
" title = title.replace(\"#EXTINF:-1,\", \"\")\n",
" url = data\n",
" url = url.strip()\n",
"\n",
" if url.find(\"youtube\") >= 0 :\n",
" if cat != \"\": \n",
" if busqueda == 'search.txt':\n",
" plugintools.add_item( action = \"play\" , title = '[COLOR red][I]' + cat + ' / [/I][/COLOR][COLOR white] ' + title + '[COLOR white] [You[COLOR red]Tube[/COLOR][COLOR white] Video][/COLOR][I][COLOR lightblue] (' + origen + ')[/COLOR][/I]', url = url , thumbnail = art + \"icon.png\" , fanart = art + 'fanart.jpg' , folder = False , isPlayable = True )\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
" else:\n",
" plugintools.add_item( action = \"play\" , title = '[COLOR red][I]' + cat + ' / [/I][/COLOR][COLOR white] ' + title + '[COLOR white] [You[COLOR red]Tube[/COLOR][COLOR white] Video][/COLOR]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
" else:\n",
" if busqueda == 'search.txt':\n",
" plugintools.add_item( action = \"play\" , title = '[COLOR white] ' + title + '[COLOR white] [You[COLOR red]Tube[/COLOR][COLOR white] Video][/COLOR][I][COLOR lightblue] (' + origen + ')[/COLOR][/I]', url = url , thumbnail = art + \"icon.png\" , fanart = art + 'fanart.jpg' , folder = False , isPlayable = True )\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
" else:\n",
" plugintools.add_item( action = \"play\" , title = '[COLOR white] ' + title + '[COLOR white] [You[COLOR red]Tube[/COLOR][COLOR white] Video][/COLOR]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue \n",
" \n",
" elif url.find(\"mode=1\") >= 0 :\n",
" if cat != \"\":\n",
" if busqueda == 'search.txt':\n",
" plugintools.add_item( action = \"play\" , title = '[COLOR red][I]' + cat + ' / [/I][/COLOR][COLOR white] ' + title + ' [COLOR lightblue] [Acestream][/COLOR][I][COLOR lightblue] (' + origen + ')[/COLOR][/I]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
" else:\n",
" plugintools.add_item( action = \"play\" , title = '[COLOR red][I]' + cat + ' / [/I][/COLOR][COLOR white] ' + title + '[COLOR lightblue] [Acestream][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
" else:\n",
" if busqueda == 'search.txt':\n",
" plugintools.add_item( action = \"play\" , title = '[COLOR white]' + title + ' [COLOR lightblue] [Acestream][/COLOR][I][COLOR lightblue] (' + origen + ')[/COLOR][/I]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
" else:\n",
" plugintools.add_item( action = \"play\" , title = '[COLOR white]' + title + '[COLOR lightblue] [Acestream][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue \n",
" \n",
" elif url.find(\"mode=2\") >= 0 :\n",
" if cat != \"\":\n",
" if busqueda == 'search.txt':\n",
" plugintools.add_item( action = \"play\" , title = '[COLOR red][I]' + cat + ' / [/I][/COLOR][COLOR white] ' + title + ' [COLOR darkorange] [Sopcast][/COLOR][I][COLOR lightblue] (' + origen + ')[/COLOR][/I]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
" else:\n",
" plugintools.add_item( action = \"play\" , title = '[COLOR red][I]' + cat + ' / [/I][/COLOR][COLOR white] ' + title + '[COLOR darkorange] [Sopcast][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
" else:\n",
" if busqueda == 'search.txt':\n",
" plugintools.add_item( action = \"play\" , title = '[COLOR white] ' + title + ' [COLOR darkorange] [Sopcast][/COLOR][I][COLOR lightblue] (' + origen + ')[/COLOR][/I]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
" else:\n",
" plugintools.add_item( action = \"play\" , title = '[COLOR white] ' + title + '[COLOR darkorange] [Sopcast][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
" \n",
" elif data.startswith(\"magnet\") == True:\n",
" if cat != \"\":\n",
" if busqueda == 'search.txt':\n",
" url = urllib.quote_plus(data)\n",
" title = parser_title(title)\n",
" #plugin://plugin.video.stream/play/<URL_ENCODED_LINK>\n",
" url = 'plugin://plugin.video.stream/play/' + url\n",
" plugintools.add_item( action = \"play\" , title = '[COLOR red][I]' + cat + ' / [/I][/COLOR][COLOR white] ' + title + '[COLOR orangered] [Torrent][/COLOR][I][COLOR lightblue] (' + origen + ')[/COLOR][/I]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )\n",
" if saving_url == 1:\n",
" plugintools.log(\"URL= \"+url)\n",
" save_url(url, filename)\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
"\n",
" else:\n",
" #plugin://plugin.video.stream/play/<URL_ENCODED_LINK>\n",
" data = data.strip()\n",
" url = urllib.quote_plus(data).strip() \n",
" title = parser_title(title)\n",
" url = 'plugin://plugin.video.stream/play/' + url\n",
" plugintools.add_item( action = \"play\" , title = '[COLOR red][I]' + cat + ' / [/I][/COLOR][COLOR white] ' + title + ' [COLOR orangered][Torrent][/COLOR]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )\n",
" if saving_url == 1:\n",
" plugintools.log(\"URL= \"+url)\n",
" save_url(url, filename)\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
"\n",
" else:\n",
" if busqueda == 'search.txt':\n",
" #plugin://plugin.video.stream/play/<URL_ENCODED_LINK>\n",
" url = urllib.quote_plus(data)\n",
" url = 'plugin://plugin.video.stream/play/' + url \n",
" title = parser_title(title)\n",
" plugintools.add_item( action = \"play\" , title = '[COLOR white]' + title + '[COLOR orangered] [Torrent][/COLOR][I][COLOR lightblue] (' + origen + ')[/COLOR][/I]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )\n",
" if saving_url == 1:\n",
" plugintools.log(\"URL= \"+url)\n",
" save_url(url, filename)\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
"\n",
" else:\n",
" title = parser_title(title)\n",
" data = data.strip()\n",
" url = urllib.quote_plus(data)\n",
" url = 'plugin://plugin.video.stream/play/' + url\n",
" plugintools.add_item( action = \"play\" , title = '[COLOR white]' + title + ' [COLOR orangered][Torrent][/COLOR]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )\n",
" if saving_url == 1:\n",
" plugintools.log(\"URL= \"+url)\n",
" save_url(url, filename)\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
"\n",
" \n",
" elif data.startswith(\"sop\") == True:\n",
" if cat != \"\":\n",
" if busqueda == 'search.txt':\n",
" title = title.split('\"')\n",
" title = title[0]\n",
" title = title.replace(\"#EXTINF:-1,\", \"\")\n",
" # plugin://plugin.video.p2p-streams/?url=sop://124.232.150.188:3912/11265&mode=2&name=Titulo+canal+Sopcast\n",
" url = 'plugin://plugin.video.p2p-streams/?url=' + data + '&mode=2&name='\n",
" url = url.strip()\n",
" plugintools.add_item( action = \"play\" , title = '[COLOR red][I]' + cat + ' / [/I][/COLOR][COLOR white] ' + title + '[COLOR darkorange] [Sopcast][/COLOR][I][COLOR lightblue] (' + origen + ')[/COLOR][/I]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
" else:\n",
" title = title.split('\"')\n",
" title = title[0]\n",
" title = title.replace(\"#EXTINF:-1,\", \"\")\n",
" url = 'plugin://plugin.video.p2p-streams/?url=' + data + '&mode=2&name='\n",
" plugintools.add_item( action = \"play\" , title = '[COLOR red][I]' + cat + ' / [/I][/COLOR][COLOR white] ' + title + ' [COLOR darkorange][Sopcast][/COLOR]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
" else:\n",
" if busqueda == 'search.txt':\n",
" title = title.split('\"')\n",
" title = title[0]\n",
" title = title.replace(\"#EXTINF:-1,\", \"\")\n",
" url = 'plugin://plugin.video.p2p-streams/?url=' + data + '&mode=2&name='\n",
" plugintools.add_item( action = \"play\" , title = '[COLOR white]' + title + '[COLOR darkorange] [Sopcast][/COLOR][I][COLOR lightblue] (' + origen + ')[/COLOR][/I]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
" else:\n",
" title = title.split('\"')\n",
" title = title[0]\n",
" title = title.replace(\"#EXTINF:-1,\", \"\")\n",
" url = 'plugin://plugin.video.p2p-streams/?url=' + data + '&mode=2&name='\n",
" plugintools.add_item( action = \"play\" , title = '[COLOR white]' + title + ' [COLOR darkorange][Sopcast][/COLOR]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue \n",
"\n",
" elif data.startswith(\"ace\") == True:\n",
" if cat != \"\":\n",
" if busqueda == 'search.txt':\n",
" # plugin://plugin.video.p2p-streams/?url=a55f96dd386b7722380802b6afffc97ff98903ac&mode=1&name=Sky+Sports+title\n",
" title = parser_title(title)\n",
" title = title.strip()\n",
" title_fixed = title.replace(\" \", \"+\")\n",
" url = data.replace(\"ace:\", \"\")\n",
" url = url.strip()\n",
" url = 'plugin://plugin.video.p2p-streams/?url=' + url + '&mode=1&name=' + title_fixed\n",
" plugintools.add_item(action=\"play\" , title = '[COLOR red][I]' + cat + ' / [/I][/COLOR][COLOR white] ' + title + ' [COLOR lightblue][Acestream][/COLOR] [COLOR lightblue][I](' + origen + ')[/COLOR][/I]' , url = url, thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True)\n",
" data = file.readline()\n",
" data = data.strip()\n",
" i = i + 1\n",
" continue\n",
" else:\n",
" title = parser_title(title)\n",
" print 'data',data\n",
" url = data.replace(\"ace:\", \"\")\n",
" url = url.strip()\n",
" print 'url',url\n",
" url = 'plugin://plugin.video.p2p-streams/?url=' + url + '&mode=1&name='\n",
" plugintools.add_item(action=\"play\" , title = '[COLOR red][I]' + cat + ' / [/I][/COLOR][COLOR white] ' + title + ' [COLOR lightblue][Acestream][/COLOR]' , url = url, thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True)\n",
" data = file.readline()\n",
" data = data.strip()\n",
" i = i + 1\n",
" continue\n",
" else:\n",
" if busqueda == 'search.txt':\n",
" # plugin://plugin.video.p2p-streams/?url=a55f96dd386b7722380802b6afffc97ff98903ac&mode=1&name=Sky+Sports+title\n",
" title = parser_title(title)\n",
" url = data.replace(\"ace:\", \"\")\n",
" url = url.strip()\n",
" url = 'plugin://plugin.video.p2p-streams/?url=' + url + '&mode=1&name='\n",
" plugintools.add_item(action=\"play\" , title = '[COLOR white]' + title + ' [COLOR lightblue][Acestream][/COLOR] [COLOR lightblue][I](' + origen + ')[/COLOR][/I]' , url = url, thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True)\n",
" data = file.readline()\n",
" data = data.strip()\n",
" i = i + 1\n",
" continue\n",
" else:\n",
" title = parser_title(title)\n",
" print 'data',data\n",
" url = data.replace(\"ace:\", \"\")\n",
" url = url.strip()\n",
" print 'url',url\n",
" url = 'plugin://plugin.video.p2p-streams/?url=' + url + '&mode=1&name='\n",
" plugintools.add_item(action=\"play\" , title = '[COLOR white]' + title + ' [COLOR lightblue][Acestream][/COLOR]' , url = url, thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True)\n",
" data = file.readline()\n",
" data = data.strip()\n",
" i = i + 1\n",
" continue \n",
" \n",
" # Youtube playlist & channel \n",
" elif data.startswith(\"yt\") == True:\n",
" if data.startswith(\"yt_playlist\") == True:\n",
" if busqueda == 'search.txt':\n",
" title = title.split('\"')\n",
" title = title[0]\n",
" title = title.replace(\"#EXTINF:-1,\", \"\")\n",
" youtube_playlist = data.replace(\"yt_playlist(\", \"\")\n",
" youtube_playlist = youtube_playlist.replace(\")\", \"\")\n",
" plugintools.log(\"youtube_playlist= \"+youtube_playlist)\n",
" url = 'http://gdata.youtube.com/feeds/api/playlists/' + youtube_playlist\n",
" plugintools.add_item( action = \"youtube_videos\" , title = '[[COLOR white]' + title + ' [COLOR red][You[COLOR white]Tube Playlist][/COLOR] [I][COLOR lightblue](' + origen + ')[/I][/COLOR]', url = url , thumbnail = thumbnail , fanart = fanart , folder = True , isPlayable = False )\n",
" if saving_url == 1:\n",
" plugintools.log(\"URL= \"+url)\n",
" save_url(url, filename)\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
"\n",
" else:\n",
" title = title.split('\"')\n",
" title = title[0]\n",
" title = title.replace(\"#EXTINF:-1,\", \"\")\n",
" plugintools.log(\"title= \"+title)\n",
" youtube_playlist = data.replace(\"yt_playlist(\", \"\")\n",
" youtube_playlist = youtube_playlist.replace(\")\", \"\")\n",
" plugintools.log(\"youtube_playlist= \"+youtube_playlist) \n",
" url = 'http://gdata.youtube.com/feeds/api/playlists/' + youtube_playlist\n",
" plugintools.add_item( action = \"youtube_videos\" , title = '[COLOR white]' + title + ' [COLOR red][You[COLOR white]Tube Playlist][/COLOR]', url = url , thumbnail = thumbnail , fanart = fanart , folder = True , isPlayable = False )\n",
" if saving_url == 1:\n",
" plugintools.log(\"URL= \"+url)\n",
" save_url(url, filename)\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
" \n",
"\n",
" elif data.startswith(\"yt_channel\") == True:\n",
" if busqueda == 'search.txt':\n",
" title = title.split('\"')\n",
" title = title[0]\n",
" title = title.replace(\"#EXTINF:-1,\", \"\")\n",
" youtube_channel = data.replace(\"yt_channel(\", \"\")\n",
" youtube_channel = youtube_channel.replace(\")\", \"\")\n",
" plugintools.log(\"youtube_user= \"+youtube_channel)\n",
" url = 'http://gdata.youtube.com/feeds/api/users/' + youtube_channel + '/playlists?v=2&start-index=1&max-results=30'\n",
" plugintools.add_item( action = \"youtube_playlists\" , title = '[[COLOR white]' + title + ' [COLOR red][You[COLOR white]Tube Channel][/COLOR] [I][COLOR lightblue](' + origen + ')[/I][/COLOR]', url = url , thumbnail = thumbnail , fanart = fanart , folder = True , isPlayable = False )\n",
" if saving_url == 1:\n",
" plugintools.log(\"URL= \"+url)\n",
" save_url(url, filename)\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
"\n",
" else:\n",
" title = title.split('\"')\n",
" title = title[0]\n",
" title = title.replace(\"#EXTINF:-1,\", \"\")\n",
" plugintools.log(\"title= \"+title)\n",
" youtube_channel = data.replace(\"yt_channel(\", \"\")\n",
" youtube_channel = youtube_channel.replace(\")\", \"\")\n",
" youtube_channel = youtube_channel.strip() \n",
" url = 'http://gdata.youtube.com/feeds/api/users/' + youtube_channel + '/playlists?v=2&start-index=1&max-results=30'\n",
" plugintools.log(\"url= \"+url)\n",
" plugintools.add_item( action = \"youtube_playlists\" , title = '[COLOR white]' + title + ' [COLOR red][You[COLOR white]Tube Channel][/COLOR]', url = url , thumbnail = thumbnail , fanart = fanart , folder = True , isPlayable = False )\n",
" if saving_url == 1:\n",
" plugintools.log(\"URL= \"+url)\n",
" save_url(url, filename)\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
"\n",
" \n",
" elif data.startswith(\"m3u\") == True:\n",
" if busqueda == 'search.txt':\n",
" url = data.replace(\"m3u:\", \"\")\n",
" plugintools.add_item( action = \"getfile_http\" , title = title + ' [I][COLOR lightblue](' + origen + ')[/I][/COLOR]', url = url , thumbnail = thumbnail , fanart = fanart , folder = True , isPlayable = False )\n",
" if saving_url == 1:\n",
" plugintools.log(\"URL= \"+url)\n",
" save_url(url, filename)\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
"\n",
" else:\n",
" url = data.replace(\"m3u:\", \"\")\n",
" plugintools.add_item( action = \"getfile_http\" , title = title + ' [COLOR orange][Lista [B]M3U[/B]][/COLOR]', url = url , thumbnail = thumbnail , fanart = fanart , folder = True , isPlayable = False )\n",
" if saving_url == 1:\n",
" plugintools.log(\"URL= \"+url)\n",
" save_url(url, filename)\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
"\n",
"\n",
" elif data.startswith(\"plx\") == True:\n",
" if busqueda == 'search.txt':\n",
" url = data.replace(\"plx:\", \"\")\n",
" # Se añade parámetro plot porque en las listas PLX no tengo en una función separada la descarga (FIX IT!)\n",
" plugintools.add_item( action = \"plx_items\" , plot = \"\" , title = title + ' [I][/COLOR][COLOR lightblue](' + origen + ')[/I][/COLOR]', url = url , thumbnail = thumbnail , fanart = fanart , folder = True , isPlayable = False )\n",
" if saving_url == 1:\n",
" plugintools.log(\"URL= \"+url)\n",
" save_url(url, filename)\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
"\n",
" else:\n",
" url = data.replace(\"plx:\", \"\")\n",
" # Se añade parámetro plot porque en las listas PLX no tengo en una función separada la descarga (FIX IT!)\n",
" plugintools.add_item( action = \"plx_items\" , plot = \"\" , title = title + ' [COLOR orange][Lista [B]PLX[/B]][/COLOR]', url = url , thumbnail = thumbnail , fanart = fanart , folder = True , isPlayable = False )\n",
" if saving_url == 1:\n",
" plugintools.log(\"URL= \"+url)\n",
" save_url(url, filename)\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
"\n",
" \n",
" else:\n",
" data = file.readline()\n",
" i = i + 1\n",
" continue\n",
"\n",
" else:\n",
" data = file.readline()\n",
" i = i + 1\n",
" \n",
"\n",
" file.close()\n",
" if title == 'search.txt':\n",
" os.remove(tmp + title)\n",
"\n",
"\n",
" \n",
"def myplaylists_m3u (params): # Mis listas M3U\n",
" plugintools.log(\"[nec tv-0.1.0].myplaylists_m3u \"+repr(params))\n",
" thumbnail = params.get(\"thumbnail\")\n",
" plugintools.add_item(action=\"play\" , title = \"[COLOR red][B][Tutorial][/B][COLOR lightyellow]: [/COLOR][COLOR blue][I][Youtube][/I][/COLOR]\" , thumbnail = art + \"icon.png\" , url = \"plugin://plugin.video.youtube/?path=/root/video&action=play_video&videoid=8i0KouM-4-U\" , folder = False , isPlayable = True )\n",
" plugintools.add_item(action=\"search_channel\" , title = \"[B][COLOR lightyellow]Buscador de canales[/COLOR][/B][COLOR lightblue][I] Nuevo![/I][/COLOR]\" , thumbnail = art + \"search.png\" , fanart = art + 'fanart.jpg' , folder = True , isPlayable = False )\n",
"\n",
" ficheros = os.listdir(playlists) # Lectura de archivos en carpeta /playlists. Cuidado con las barras inclinadas en Windows\n",
"\n",
" # Control paternal\n",
" pekes_no = plugintools.get_setting(\"pekes_no\")\n",
" \n",
" for entry in ficheros:\n",
" plot = entry.split(\".\") \n",
" plot = plot[0]\n",
" plugintools.log(\"entry= \"+entry)\n",
"\n",
" if pekes_no == \"true\" :\n",
" print \"Control paternal en marcha\"\n",
" if entry.find(\"XXX\") >= 0 :\n",
" plugintools.log(\"Activando control paternal...\")\n",
" \n",
" else: \n",
" if entry.endswith(\"plx\") == True: # Control para según qué extensión del archivo se elija thumbnail y función a ejecutar\n",
" entry = entry.replace(\".plx\", \"\")\n",
" plugintools.add_item(action=\"plx_items\" , plot = plot , title = '[COLOR white]' + entry + '[/COLOR][COLOR green][B][I].plx[/I][/B][/COLOR]' , url = playlists + entry , thumbnail = art + 'plx3.png' , fanart = art + 'fanart.jpg' , folder = True , isPlayable = False )\n",
" \n",
" if entry.endswith(\"p2p\") == True:\n",
" entry = entry.replace(\".p2p\", \"\")\n",
" plugintools.add_item(action=\"p2p_items\" , plot = plot , title = '[COLOR white]' + entry + '[COLOR blue][B][I].p2p[/I][/B][/COLOR]', url = playlists + entry , thumbnail = art + 'p2p.png' , fanart = art + 'fanart.jpg' , folder = True , isPlayable = False )\n",
" \n",
" if entry.endswith(\"m3u\") == True:\n",
" entry = entry.replace(\".m3u\", \"\")\n",
" plugintools.add_item(action=\"simpletv_items\" , plot = plot , title = '[COLOR white]' + entry + '[COLOR red][B][I].m3u[/I][/B][/COLOR]', url = playlists + entry , thumbnail = art + 'm3u7.png' , fanart = art + 'fanart.jpg' , folder = True , isPlayable = False )\n",
"\n",
" if entry.endswith(\"jsn\") == True:\n",
" entry = entry.replace(\".jsn\", \"\")\n",
" plugintools.add_item(action=\"json_items\" , plot = plot , title = '[COLOR white]' + entry + '[COLOR red][B][I].m3u[/I][/B][/COLOR]', url = playlists + entry , thumbnail = art + 'm3u7.png' , fanart = art + 'fanart.jpg' , folder = True , isPlayable = False )\n",
" \n",
" else:\n",
" \n",
" if entry.endswith(\"plx\") == True: # Control para según qué extensión del archivo se elija thumbnail y función a ejecutar\n",
" entry = entry.replace(\".plx\", \"\")\n",
" plugintools.add_item(action=\"plx_items\" , plot = plot , title = '[COLOR white]' + entry + '[/COLOR][COLOR green][B][I].plx[/I][/B][/COLOR]' , url = playlists + entry , thumbnail = art + 'plx3.png' , fanart = art + 'fanart.jpg' , folder = True , isPlayable = False )\n",
" \n",
" if entry.endswith(\"p2p\") == True:\n",
" entry = entry.replace(\".p2p\", \"\")\n",
" plugintools.add_item(action=\"p2p_items\" , plot = plot , title = '[COLOR white]' + entry + '[COLOR blue][B][I].p2p[/I][/B][/COLOR]', url = playlists + entry , thumbnail = art + 'p2p.png' , fanart = art + 'fanart.jpg' , folder = True , isPlayable = False )\n",
" \n",
" if entry.endswith(\"m3u\") == True:\n",
" entry = entry.replace(\".m3u\", \"\")\n",
" plugintools.add_item(action=\"simpletv_items\" , plot = plot , title = '[COLOR white]' + entry + '[COLOR red][B][I].m3u[/I][/B][/COLOR]', url = playlists + entry , thumbnail = art + 'm3u7.png' , fanart = art + 'fanart.jpg' , folder = True , isPlayable = False )\n",
"\n",
" if entry.endswith(\"jsn\") == True:\n",
" entry = entry.replace(\".jsn\", \"\")\n",
" plugintools.add_item(action=\"json_items\" , plot = plot , title = '[COLOR white]' + entry + '[COLOR red][B][I].m3u[/I][/B][/COLOR]', url = playlists + entry , thumbnail = art + 'm3u7.png' , fanart = art + 'fanart.jpg' , folder = True , isPlayable = False )\n",
"\n",
"\n",
"\n",
" \n",
"\n",
"def playlists_m3u(params): # Biblioteca online\n",
" plugintools.log(\"[nec tv-0.1.0].playlists_m3u \"+repr(params))\n",
" data = plugintools.read( params.get(\"url\") )\n",
" name_channel = params.get(\"plot\")\n",
" pattern = '<name>'+name_channel+'(.*?)</channel>'\n",
" data = plugintools.find_single_match(data, pattern)\n",
" online = '[COLOR yellowgreen][I][Auto][/I][/COLOR]'\n",
" params[\"ext\"] = 'm3u'\n",
" plugintools.add_item( action=\"\" , title='[B][COLOR yellow]'+name_channel+'[/B][/COLOR] - [B][I][COLOR lightyellow]latinototal19@gmail.com [/COLOR][/B][/I]' , thumbnail= art + 'icon.png' , folder = False , isPlayable = False ) \n",
" subchannel = re.compile('<subchannel>([^<]+)<name>([^<]+)</name>([^<]+)<thumbnail>([^<]+)</thumbnail>([^<]+)<url>([^<]+)</url>([^<]+)</subchannel>').findall(data)\n",
" # Sustituir por una lista!!!\n",
" for biny, ciny, diny, winy, pixy, dixy, boxy in subchannel:\n",
" if ciny == \"Vcx7 IPTV\":\n",
" plugintools.add_item( action=\"getfile_http\" , plot = ciny , title = '[COLOR lightyellow]' + ciny + '[/COLOR] ' + online , url= dixy , thumbnail = art + winy , fanart = art + 'fanart.jpg' , folder = True , isPlayable = False )\n",
" params[\"ext\"] = \"m3u\"\n",
" title = ciny\n",
" params[\"title\"]=title\n",
" elif ciny == \"Largo Barbate M3U\":\n",
" plugintools.add_item( action=\"getfile_http\" , plot = ciny , title = '[COLOR lightyellow]' + ciny + '[/COLOR] ' + online , url= dixy , thumbnail = art + winy , fanart = art + 'fanart.jpg' , folder = True , isPlayable = False )\n",
" title = ciny\n",
" params[\"title\"]=title\n",
" elif ciny == \"XBMC Mexico\":\n",
" plugintools.add_item( action=\"getfile_http\" , plot = ciny , title = '[COLOR lightyellow]' + ciny + '[/COLOR] ' + online , url= dixy , thumbnail = art + winy , fanart = art + 'fanart.jpg' , folder = True , isPlayable = False )\n",
" title = ciny\n",
" params[\"title\"]=title\n",
" elif ciny == \"allSat\":\n",
" plugintools.add_item( action=\"getfile_http\" , plot = ciny , title = '[COLOR lightyellow]' + ciny + '[/COLOR] ' + online , url= dixy , thumbnail = art + winy , fanart = art + 'fanart.jpg' , folder = True , isPlayable = False )\n",
" title = ciny\n",
" params[\"title\"]=title\n",
" elif ciny == \"AND Wonder\":\n",
" plugintools.add_item( action=\"getfile_http\" , plot = ciny , title = '[COLOR lightyellow]' + ciny + '[/COLOR] ' + online , url= dixy , thumbnail = art + winy , fanart = art + 'fanart.jpg' , folder = True , isPlayable = False )\n",
" title = ciny\n",
" params[\"title\"]=title\n",
" elif ciny == \"FenixTV\":\n",
" plugintools.add_item( action=\"getfile_http\" , plot = ciny , title = '[COLOR lightyellow]' + ciny + '[/COLOR] ' + online , url= dixy , thumbnail = art + winy , fanart = art + 'fanart.jpg' , folder = True , isPlayable = False )\n",
" title = ciny\n",
" params[\"title\"]=title\n",
" else:\n",
" plot = ciny.split(\"[\")\n",
" plot = plot[0]\n",
" plugintools.add_item( action=\"getfile_http\" , plot = plot , title = '[COLOR lightyellow]' + ciny + '[/COLOR] ' , url= dixy , thumbnail = art + winy , fanart = art + 'fanart.jpg' , folder = True , isPlayable = False )\n",
"\n",
"\n",
"\n",
" plugintools.log(\"[nec tv-0.1.0].playlists_m3u \"+repr(params))\n",
"\n",
" \n",
" \n",
"def getfile_http(params): # Descarga de lista M3U + llamada a simpletv_items para que liste los items\n",
" plugintools.log(\"[nec tv-0.1.0].getfile_http \"+repr(params))\n",
" url = params.get(\"url\")\n",
" params[\"ext\"] = \"m3u\"\n",
" getfile_url(params)\n",
" simpletv_items(params)\n",
" \n",
" \n",
"def parse_url(url):\n",
" # plugintools.log(\"url entrante= \"+url)\n",
"\n",
" if url != \"\":\n",
" url = url.strip()\n",
" url = url.replace(\"rtmp://$OPT:rtmp-raw=\", \"\") \n",
" return url\n",
" \n",
" else:\n",
" plugintools.log(\"error en url= \") # Mostrar diálogo de error al parsear url (por no existir, por ejemplo)\n",
"\n",
" \n",
" \n",
"def getfile_url(params):\n",
" plugintools.log(\"[nec tv-0.1.0].getfile_url \" +repr(params))\n",
" ext = params.get(\"ext\")\n",
" title = params.get(\"title\")\n",
"\n",
" if ext == 'plx':\n",
" filename = parser_title(title)\n",
" params[\"plot\"]=filename\n",
" filename = title + \".plx\" # El título del archivo con extensión (m3u, p2p, plx)\n",
" elif ext == 'm3u':\n",
" filename = params.get(\"plot\")\n",
" # Vamos a quitar el formato al texto para que sea el nombre del archivo\n",
" filename = parser_title(title)\n",
" filename = filename + \".m3u\" # El título del archivo con extensión (m3u, p2p, plx)\n",
" else:\n",
" ext == 'p2p'\n",
" filename = parser_title(title)\n",
" filename = filename + \".p2p\" # El título del archivo con extensión (m3u, p2p, plx)\n",
" \n",
" if filename.endswith(\"plx\") == True :\n",
" filename = parser_title(filename)\n",
"\n",
" plugintools.log(\"filename= \"+filename)\n",
" url = params.get(\"url\")\n",
" plugintools.log(\"url= \"+url)\n",
" \n",
" try:\n",
" response = urllib2.urlopen(url)\n",
" body = response.read()\n",
" except:\n",
" # Control si la lista está en el cuerpo del HTTP\n",
" request_headers=[]\n",
" request_headers.append([\"User-Agent\",\"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_3) AppleWebKit/537.31 (KHTML, like Gecko) Chrome/26.0.1410.65 Safari/537.31\"])\n",
" body,response_headers = plugintools.read_body_and_headers(url, headers=request_headers)\n",
"\n",
" #open the file for writing\n",
" fh = open(playlists + filename, \"wb\")\n",
"\n",
" # read from request while writing to file\n",
" fh.write(body)\n",
"\n",
" fh.close()\n",
"\n",
" file = open(playlists + filename, \"r\")\n",
" file.seek(0)\n",
" data = file.readline()\n",
" data = data.strip()\n",
"\n",
" lista_items = {'linea': data}\n",
" file.seek(0)\n",
" lista_items = {'plot': data}\n",
" file.seek(0)\n",
" \n",
"\n",
"\n",
"def header_xml(params):\n",
" plugintools.log(\"[nec tv-0.1.0].header_xml \"+repr(params))\n",
"\n",
" url = params.get(\"url\")\n",
" params.get(\"title\")\n",
" data = plugintools.read(url)\n",
" # plugintools.log(\"data= \"+data)\n",
" author = plugintools.find_single_match(data, '<poster>(.*?)</poster>')\n",
" author = author.strip()\n",
" fanart = plugintools.find_single_match(data, '<fanart>(.*?)</fanart>')\n",
" message = plugintools.find_single_match(data, '<message>(.*?)</message>')\n",
" desc = plugintools.find_single_match(data, '<description>(.*?)</description>')\n",
" thumbnail = plugintools.find_single_match(data, '<thumbnail>(.*?)</thumbnail>')\n",
" \n",
" if author != \"\":\n",
" if message != \"\":\n",
" plugintools.add_item(action=\"\" , plot = author , title = '[COLOR green][B]' + author + '[/B][/COLOR][I] ' + message + '[/I]', url = \"\" , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = False )\n",
" return fanart\n",
" else:\n",
" plugintools.add_item(action=\"\" , plot = author , title = '[COLOR green][B]' + author + '[/B][/COLOR]', url = \"\" , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = False )\n",
" return fanart\n",
" else:\n",
" if desc != \"\":\n",
" plugintools.add_item(action=\"\" , plot = author , title = '[COLOR green][B]' + desc + '[/B][/COLOR]', url = \"\" , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = False )\n",
" return fanart\n",
" else:\n",
" return fanart\n",
"\n",
"\n",
"def search_channel(params):\n",
" plugintools.log(\"[nec tv-0.1.0].search \" + repr(params))\n",
"\n",
" buscar = params.get(\"plot\")\n",
" # plugintools.log(\"buscar texto: \"+buscar)\n",
" if buscar == \"\":\n",
" last_search = plugintools.get_setting(\"last_search\")\n",
" texto = plugintools.keyboard_input(last_search)\n",
" plugintools.set_setting(\"last_search\",texto)\n",
" params[\"texto\"]=texto\n",
" texto = texto.lower()\n",
" cat = \"\"\n",
" if texto == \"\":\n",
" errormsg = plugintools.message(\"nec tv\",\"Por favor, introduzca el canal a buscar\")\n",
" return errormsg\n",
" \n",
" else:\n",
" texto = buscar\n",
" texto = texto.lower()\n",
" plugintools.log(\"texto a buscar= \"+texto)\n",
" cat = \"\"\n",
" \n",
" results = open(tmp + 'search.txt', \"wb\")\n",
" results.seek(0)\n",
" results.close()\n",
"\n",
" # Listamos archivos de la biblioteca local\n",
" ficheros = os.listdir(playlists) # Lectura de archivos en carpeta /playlists. Cuidado con las barras inclinadas en Windows\n",
" \n",
" for entry in ficheros:\n",
" if entry.endswith(\"m3u\") == True:\n",
" print \"Archivo tipo m3u\"\n",
" plot = entry.split(\".\")\n",
" plot = plot[0] # plot es la variable que recoge el nombre del archivo (sin extensión txt)\n",
" # Abrimos el primer archivo\n",
" filename = plot + '.m3u'\n",
" plugintools.log(\"Archivo M3U: \"+filename)\n",
" arch = open(playlists + filename, \"r\")\n",
" num_items = len(arch.readlines())\n",
" print num_items\n",
" i = 0 # Controlamos que no se salga del bucle while antes de que lea el último registro de la lista\n",
" arch.seek(0)\n",
" data = arch.readline()\n",
" data = data.strip()\n",
" plugintools.log(\"data linea= \"+data)\n",
" texto = texto.strip()\n",
" plugintools.log(\"data_antes= \"+data)\n",
" plugintools.log(\"texto a buscar= \"+texto)\n",
"\n",
" data = arch.readline()\n",
" data = data.strip()\n",
" i = i + 1 \n",
" while i <= num_items :\n",
" if data.startswith('#EXTINF:-1') == True:\n",
" data = data.replace('#EXTINF:-1,', \"\") # Ignoramos la primera parte de la línea\n",
" data = data.replace(\",\", \"\")\n",
" title = data.strip() # Ya tenemos el título\n",
" \n",
" if data.find('$ExtFilter=\"') >= 0:\n",
" data = data.replace('$ExtFilter=\"', \"\")\n",
"\n",
" if data.find(' $ExtFilter=\"') >= 0:\n",
" data = data.replace('$ExtFilter=\"', \"\")\n",
"\n",
" title = title.replace(\"-AZBOX*\", \"\")\n",
" title = title.replace(\"AZBOX *\", \"\") \n",
" \n",
" images = m3u_items(title)\n",
" print 'images',images\n",
" thumbnail = images[0]\n",
" fanart = images[1]\n",
" cat = images[2]\n",
" title = images[3]\n",
" plugintools.log(\"title= \"+title)\n",
" minus = title.lower() \n",
" data = arch.readline()\n",
" data = data.strip()\n",
" i = i + 1 \n",
"\n",
" if minus.find(texto) >= 0:\n",
" # if re.match(texto, title, re.IGNORECASE):\n",
" # plugintools.log(\"Concidencia hallada. Obtenemos url del canal: \" + texto)\n",
" if data.startswith(\"http\") == True:\n",
" url = data.strip()\n",
" if cat != \"\": # Controlamos el caso de subcategoría de canales\n",
" results = open(tmp + 'search.txt', \"a\")\n",
" results.write(\"#EXTINF:-1,\" + title + '\"' + filename + '\\n')\n",
" results.write(url + '\\n\\n')\n",
" results.close() \n",
" data = arch.readline()\n",
" i = i + 1 \n",
" continue\n",
" else:\n",
" results = open(tmp + 'search.txt', \"a\") \n",
" results.write(\"#EXTINF:-1,\" + title + '\"' + filename + '\\n')\n",
" results.write(url + '\\n\\n')\n",
" results.close()\n",
" data = arch.readline()\n",
" i = i + 1\n",
" continue\n",
" if data.startswith(\"rtmp\") == True:\n",
" url = data\n",
" url = parse_url(url)\n",
" if cat != \"\": # Controlamos el caso de subcategoría de canales\n",
" results = open(tmp + 'search.txt', \"a\")\n",
" results.write(\"#EXTINF:-1,\" + title + '\"' + filename + '\\n')\n",
" results.write(url + '\\n\\n')\n",
" results.close()\n",
" data = arch.readline()\n",
" i = i + 1\n",
" continue\n",
" else: \n",
" results = open(tmp + 'search.txt', \"a\")\n",
" results.write(\"#EXTINF:-1,\" + title + '\"' + filename + '\\n')\n",
" results.write(url + '\\n\\n')\n",
" results.close()\n",
" data = arch.readline()\n",
" i = i + 1\n",
" continue\n",
" if data.startswith(\"yt\") == True:\n",
" print \"CORRECTO\"\n",
" url = data\n",
" results = open(tmp + 'search.txt', \"a\")\n",
" results.write(\"#EXTINF:-1,\" + title + '\"' + filename + '\\n')\n",
" results.write(url + '\\n\\n')\n",
" results.close()\n",
" data = arch.readline()\n",
" i = i + 1\n",
" continue\n",
"\n",
" \n",
" else:\n",
" data = arch.readline()\n",
" data = data.strip()\n",
" plugintools.log(\"data_buscando_title= \"+data)\n",
" i = i + 1\n",
" \n",
" else:\n",
" data = arch.readline()\n",
" data = data.strip()\n",
" plugintools.log(\"data_final_while= \"+data)\n",
" i = i + 1\n",
" continue\n",
" \n",
"\n",
"\n",
" # Listamos archivos de la biblioteca local\n",
" ficheros = os.listdir(playlists) # Lectura de archivos en carpeta /playlists. Cuidado con las barras inclinadas en Windows\n",
" \n",
" for entry in ficheros:\n",
" if entry.endswith('p2p') == True:\n",
" plot = entry.split(\".\")\n",
" plot = plot[0] # plot es la variable que recoge el nombre del archivo (sin extensión txt)\n",
" # Abrimos el primer archivo\n",
" plugintools.log(\"texto a buscar= \"+texto)\n",
" filename = plot + '.p2p'\n",
" arch = open(playlists + filename, \"r\")\n",
" num_items = len(arch.readlines())\n",
" plugintools.log(\"archivo= \"+filename)\n",
" i = 0 # Controlamos que no se salga del bucle while antes de que lea el último registro de la lista\n",
" arch.seek(0) \n",
" while i <= num_items:\n",
" data = arch.readline()\n",
" data = data.strip()\n",
" title = data\n",
" texto = texto.strip()\n",
" plugintools.log(\"linea a buscar title= \"+data)\n",
" i = i + 1\n",
"\n",
" if data.startswith(\"#\") == True:\n",
" data = arch.readline()\n",
" data = data.strip()\n",
" i = i + 1\n",
" continue\n",
"\n",
" if data.startswith(\"default=\") == True:\n",
" data = arch.readline()\n",
" data = data.strip()\n",
" i = i + 1\n",
" continue\n",
"\n",
" if data.startswith(\"art=\") == True:\n",
" data = arch.readline()\n",
" data = data.strip()\n",
" i = i + 1\n",
" continue \n",
" \n",
" if data != \"\":\n",
" title = data.strip() # Ya tenemos el título\n",
" plugintools.log(\"title= \"+title)\n",
" minus = title.lower()\n",
" if minus.find(texto) >= 0:\n",
" plugintools.log(\"title= \"+title)\n",
" data = arch.readline()\n",
" i = i + 1\n",
" #print i\n",
" plugintools.log(\"linea a comprobar url= \"+data)\n",
" if data.startswith(\"sop\") == True:\n",
" # plugin://plugin.video.p2p-streams/?url=sop://124.232.150.188:3912/11265&mode=2&name=Titulo+canal+Sopcast\n",
" title_fixed = title.replace(\" \" , \"+\")\n",
" url = 'plugin://plugin.video.p2p-streams/?url=' + data + '&mode=2&name=' + title_fixed\n",
" url = url.strip()\n",
" results = open(tmp + 'search.txt', \"a\")\n",
" results.write(\"#EXTINF:-1,\" + title + '\"' + filename + '\\n') # Hay que cambiar esto! No puede agregar #EXTINF:-1, si no es una lista m3u\n",
" results.write(url + '\\n\\n')\n",
" results.close()\n",
" data = arch.readline()\n",
" i = i + 1\n",
" continue\n",
"\n",
" elif data.startswith(\"magnet\") == True: \n",
" # magnet:?xt=urn:btih:6CE983D676F2643430B177E2430042E4E65427...\n",
" title_fixed = title.split('\"')\n",
" title = title_fixed[0]\n",
" plugintools.log(\"title magnet= \"+title)\n",
" url = data\n",
" plugintools.log(\"url magnet= \"+url)\n",
" results = open(tmp + 'search.txt', \"a\")\n",
" results.write(\"#EXTINF:-1,\" + title + '\"' + filename + '\\n')\n",
" results.write(url + '\\n\\n')\n",
" results.close()\n",
" data = arch.readline()\n",
" i = i + 1\n",
" continue\n",
"\n",
" elif data.find(\"://\") == -1:\n",
" # plugin://plugin.video.p2p-streams/?url=a55f96dd386b7722380802b6afffc97ff98903ac&mode=1&name=Sky+Sports+title\n",
" title_fixed = title.split('\"')\n",
" title = title_fixed[0]\n",
" title_fixed = title.replace(\" \" , \"+\")\n",
" url = 'plugin://plugin.video.p2p-streams/?url=' + data + '&mode=1&name=' + title_fixed\n",
" results = open(tmp + 'search.txt', \"a\")\n",
" results.write(\"#EXTINF:-1,\" + title + '\"' + filename + '\\n') # Hay que cambiar esto! No puede agregar #EXTINF:-1, si no es una lista m3u\n",
" results.write(url + '\\n\\n')\n",
" results.close()\n",
" data = arch.readline()\n",
" i = i + 1\n",
" continue\n",
"\n",
" else:\n",
" plugintools.log(\"no coinciden titulo y texto a buscar\")\n",
"\n",
" \n",
" for entry in ficheros:\n",
" if entry.endswith('plx') == True:\n",
" plot = entry.split(\".\")\n",
" plot = plot[0] # plot es la variable que recoge el nombre del archivo (sin extensión)\n",
" # Abrimos el primer archivo\n",
" plugintools.log(\"texto a buscar= \"+texto)\n",
" filename = plot + '.plx'\n",
" plugintools.log(\"archivo PLX: \"+filename)\n",
" arch = open(playlists + filename, \"r\")\n",
" num_items = len(arch.readlines())\n",
" print num_items\n",
" i = 0\n",
" arch.seek(0)\n",
" while i <= num_items:\n",
" data = arch.readline()\n",
" data = data.strip() \n",
" i = i + 1\n",
" print i\n",
" \n",
" if data.startswith(\"#\") == True:\n",
" continue\n",
"\n",
" if (data == 'type=video') or (data == 'type=audio') == True:\n",
" data = arch.readline()\n",
" i = i + 1\n",
" print i\n",
" data = data.replace(\"name=\", \"\")\n",
" data = data.strip()\n",
" title = data\n",
" minus = title.lower()\n",
" if minus.find(texto) >= 0:\n",
" plugintools.log(\"Título coincidente= \"+title)\n",
" data = arch.readline()\n",
" plugintools.log(\"Siguiente linea= \"+data)\n",
" i = i + 1\n",
" print i\n",
" print \"Analizamos...\"\n",
" while data <> \"\" :\n",
" if data.startswith(\"thumb\") == True:\n",
" data = arch.readline()\n",
" plugintools.log(\"data_plx= \"+data)\n",
" i = i + 1\n",
" print i\n",
" continue\n",
" \n",
" if data.startswith(\"date\") == True:\n",
" data = arch.readline()\n",
" plugintools.log(\"data_plx= \"+data)\n",
" i = i + 1\n",
" print i\n",
" continue\n",
" \n",
" if data.startswith(\"background\") == True:\n",
" data = arch.readline()\n",
" plugintools.log(\"data_plx= \"+data)\n",
" i = i + 1\n",
" print i\n",
" continue\n",
" \n",
" if data.startswith(\"URL\") == True:\n",
" data = data.replace(\"URL=\", \"\")\n",
" data = data.strip()\n",
" url = data\n",
" parse_url(url)\n",
" plugintools.log(\"URL= \"+url)\n",
" results = open(tmp + 'search.txt', \"a\")\n",
" results.write(\"#EXTINF:-1,\" + title + '\"' + filename + '\\n')\n",
" results.write(url + '\\n\\n')\n",
" results.close()\n",
" data = arch.readline()\n",
" i = i + 1\n",
" break \n",
"\n",
" \n",
"\n",
" \n",
" arch.close()\n",
" results.close()\n",
" params[\"plot\"] = 'search' # Pasamos a la lista de variables (params) el valor del archivo de resultados para que lo abra la función simpletv_items\n",
" params['texto']= texto # Agregamos al diccionario una nueva variable que contiene el texto a buscar\n",
" simpletv_items(params)\n",
" \n",
"\n",
"\n",
"\n",
"def agendatv(params):\n",
" plugintools.log(\"[nec tv-0.1.0].agendatv \"+repr(params))\n",
"\n",
" hora_partidos = []\n",
" lista_equipos=[]\n",
" campeonato=[]\n",
" canales=[]\n",
"\n",
" url = params.get(\"url\") \n",
" data = plugintools.read(url)\n",
" plugintools.log(\"data= \"+data)\n",
"\t \n",
" matches = plugintools.find_multiple_matches(data,'<tr>(.*?)</tr>')\n",
" horas = plugintools.find_multiple_matches(data, 'color=#990000>(.*?)</td>')\n",
" txt = plugintools.find_multiple_matches(data, 'color=\"#000099\"><b>(.*?)</td>')\n",
" tv = plugintools.find_multiple_matches(data, '<td align=\"left\"><font face=\"Verdana, Arial, Helvetica, sans-serif\" size=\"1\" ><b>([^<]+)</b></font></td>')\n",
"\n",
" # <b><a href=\"indexf.php?comp=Súper Final Argentino\">Súper Final Argentino </td> \n",
" for entry in matches:\n",
" torneo = plugintools.find_single_match(entry, '<a href=(.*?)\">')\n",
" torneo = torneo.replace(\" \", \"\")\n",
" torneo = torneo.replace(\"indexf.php?comp=\", \"\")\n",
" torneo = torneo.replace('>', \"\")\n",
" torneo = torneo.replace('\"', \"\")\n",
" torneo = torneo.replace(\"\\n\", \"\")\n",
" torneo = torneo.strip()\n",
" torneo = torneo.replace('\\xfa', 'ú')\n",
" torneo = torneo.replace('\\xe9', 'é')\n",
" torneo = torneo.replace('\\xf3', 'ó')\n",
" torneo = torneo.replace('\\xfa', 'ú')\n",
" torneo = torneo.replace('\\xaa', 'ª')\n",
" torneo = torneo.replace('\\xe1', 'á')\n",
" torneo = torneo.replace('\\xf1', 'ñ')\n",
" torneo = torneo.replace('indexuf.php?comp=', \"\")\n",
" torneo = torneo.replace('indexfi.php?comp=', \"\")\n",
" plugintools.log(\"string encoded= \"+torneo)\n",
" if torneo != \"\":\n",
" plugintools.log(\"torneo= \"+torneo)\n",
" campeonato.append(torneo) \n",
"\n",
" # ERROR! Hay que añadir las jornadas, tal como estaba antes!!\n",
"\n",
" # Vamos a crear dos listas; una de los equipos que se enfrentan cada partido y otra de las horas de juego\n",
" \n",
" for dato in txt:\n",
" lista_equipos.append(dato)\n",
" \n",
" for tiempo in horas:\n",
" hora_partidos.append(tiempo)\n",
"\n",
" # <td align=\"left\"><font face=\"Verdana, Arial, Helvetica, sans-serif\" size=\"1\" ><b> Canal + Fútbol</b></font></td>\n",
" # <td align=\"left\"><font face=\"Verdana, Arial, Helvetica, sans-serif\" size=\"1\" ><b> IB3</b></font></td>\n",
"\n",
" for kanal in tv:\n",
" kanal = kanal.replace(\" \", \"\")\n",
" kanal = kanal.strip()\n",
" kanal = kanal.replace('\\xfa', 'ú')\n",
" kanal = kanal.replace('\\xe9', 'é')\n",
" kanal = kanal.replace('\\xf3', 'ó')\n",
" kanal = kanal.replace('\\xfa', 'ú')\n",
" kanal = kanal.replace('\\xaa', 'ª')\n",
" kanal = kanal.replace('\\xe1', 'á')\n",
" kanal = kanal.replace('\\xf1', 'ñ')\n",
" canales.append(kanal)\n",
"\n",
" \n",
" print lista_equipos\n",
" print hora_partidos # Casualmente en esta lista se nos ha añadido los días de partido\n",
" print campeonato\n",
" print canales\n",
" \n",
" i = 0 # Contador de equipos\n",
" j = 0 # Contador de horas\n",
" k = 0 # Contador de competición\n",
" max_equipos = len(lista_equipos) - 2\n",
" print max_equipos\n",
" for entry in matches:\n",
" while j <= max_equipos:\n",
" # plugintools.log(\"entry= \"+entry)\n",
" fecha = plugintools.find_single_match(entry, 'color=#990000><b>(.*?)</b></td>')\n",
" fecha = fecha.replace(\"á\", \"á\")\n",
" fecha = fecha.strip()\n",
" gametime = hora_partidos[i]\n",
" gametime = gametime.replace(\"<b>\", \"\")\n",
" gametime = gametime.replace(\"</b>\", \"\")\n",
" gametime = gametime.strip()\n",
" gametime = gametime.replace('é', 'é')\n",
" gametime = gametime.replace('á', 'á')\n",
" gametime = gametime.replace('é', 'é')\n",
" gametime = gametime.replace('á', 'á') \n",
" print gametime.find(\":\")\n",
" if gametime.find(\":\") == 2:\n",
" i = i + 1\n",
" #print i\n",
" local = lista_equipos[j]\n",
" local = local.strip()\n",
" local = local.replace('\\xfa', 'ú')\n",
" local = local.replace('\\xe9', 'é')\n",
" local = local.replace('\\xf3', 'ó')\n",
" local = local.replace('\\xfa', 'ú')\n",
" local = local.replace('\\xaa', 'ª')\n",
" local = local.replace('\\xe1', 'á')\n",
" local = local.replace('\\xf1', 'ñ')\n",
" j = j + 1\n",
" print j\n",
" visitante = lista_equipos[j]\n",
" visitante = visitante.strip()\n",
" visitante = visitante.replace('\\xfa', 'ú')\n",
" visitante = visitante.replace('\\xe9', 'é')\n",
" visitante = visitante.replace('\\xf3', 'ó')\n",
" visitante = visitante.replace('\\xfa', 'ú')\n",
" visitante = visitante.replace('\\xaa', 'ª')\n",
" visitante = visitante.replace('\\xe1', 'á')\n",
" visitante = visitante.replace('\\xf1', 'ñ')\n",
" local = local.replace('é', 'é')\n",
" local = local.replace('á', 'á') \n",
" j = j + 1\n",
" print j\n",
" tipo = campeonato[k]\n",
" channel = canales[k]\n",
" channel = channel.replace('\\xfa', 'ú')\n",
" channel = channel.replace('\\xe9', 'é')\n",
" channel = channel.replace('\\xf3', 'ó')\n",
" channel = channel.replace('\\xfa', 'ú')\n",
" channel = channel.replace('\\xaa', 'ª')\n",
" channel = channel.replace('\\xe1', 'á')\n",
" channel = channel.replace('\\xf1', 'ñ')\n",
" channel = channel.replace('\\xc3\\xba', 'ú')\n",
" channel = channel.replace('Canal +', 'Canal+')\n",
" title = '[B][COLOR khaki]' + tipo + ':[/B][/COLOR] ' + '[COLOR lightyellow]' + '(' + gametime + ')[COLOR white] ' + local + ' vs ' + visitante + '[/COLOR][COLOR lightblue][I] (' + channel + ')[/I][/COLOR]'\n",
" plugintools.add_item(plot = channel , action=\"contextMenu\", title=title , url = \"\", fanart = art + 'agendatv.jpg', thumbnail = art + 'icon.png' , folder = True, isPlayable = False)\n",
" # diccionario[clave] = valor\n",
" plugintools.log(\"channel= \"+channel)\n",
" params[\"plot\"] = channel\n",
" # plugintools.add_item(plot = channel , action = \"search_channel\", title = '[COLOR lightblue]' + channel + '[/COLOR]', url= \"\", thumbnail = art + 'icon.png', fanart = fanart , folder = True, isPlayable = False)\n",
" k = k + 1\n",
" print k\n",
" plugintools.log(\"title= \"+title)\n",
" else:\n",
" plugintools.add_item(action=\"\", title='[B][COLOR red]' + gametime + '[/B][/COLOR]', thumbnail = art + 'icon.png' , fanart = art + 'agendatv.jpg' , folder = True, isPlayable = False)\n",
" i = i + 1\n",
" \n",
"\n",
"\n",
"def encode_string(url):\n",
" \n",
"\n",
" d = { '\\xc1':'A',\n",
" '\\xc9':'E',\n",
" '\\xcd':'I',\n",
" '\\xd3':'O',\n",
" '\\xda':'U',\n",
" '\\xdc':'U',\n",
" '\\xd1':'N',\n",
" '\\xc7':'C',\n",
" '\\xed':'i',\n",
" '\\xf3':'o',\n",
" '\\xf1':'n',\n",
" '\\xe7':'c',\n",
" '\\xba':'',\n",
" '\\xb0':'',\n",
" '\\x3a':'',\n",
" '\\xe1':'a',\n",
" '\\xe2':'a',\n",
" '\\xe3':'a',\n",
" '\\xe4':'a',\n",
" '\\xe5':'a',\n",
" '\\xe8':'e',\n",
" '\\xe9':'e',\n",
" '\\xea':'e', \n",
" '\\xeb':'e', \n",
" '\\xec':'i',\n",
" '\\xed':'i',\n",
" '\\xee':'i',\n",
" '\\xef':'i',\n",
" '\\xf2':'o',\n",
" '\\xf3':'o',\n",
" '\\xf4':'o', \n",
" '\\xf5':'o',\n",
" '\\xf0':'o',\n",
" '\\xf9':'u',\n",
" '\\xfa':'u',\n",
" '\\xfb':'u', \n",
" '\\xfc':'u',\n",
" '\\xe5':'a' \n",
" }\n",
" \n",
" nueva_cadena = url\n",
" for c in d.keys():\n",
" plugintools.log(\"caracter= \"+c)\n",
" nueva_cadena = nueva_cadena.replace(c,d[c])\n",
"\n",
" auxiliar = nueva_cadena.encode('utf-8')\n",
" url = nueva_cadena\n",
" return nueva_cadena\n",
"\n",
"\n",
"\n",
"def plx_items(params):\n",
" plugintools.log(\"[nec tv-0.1.0].plx_items\" +repr(params))\n",
"\n",
" fanart = \"\"\n",
" thumbnail = \"\"\n",
"\n",
" # Control para elegir el título (plot, si formateamos el título / title , si no existe plot)\n",
" if params.get(\"plot\") == \"\":\n",
" title = params.get(\"title\").strip() + '.plx'\n",
" title = parser_title(title)\n",
" title = title.strip()\n",
" filename = title\n",
" params[\"plot\"]=filename\n",
" params[\"ext\"] = 'plx'\n",
" getfile_url(params)\n",
" title = params.get(\"title\")\n",
" else:\n",
" title = params.get(\"plot\")\n",
" title = title.strip()\n",
" title = parser_title(title) \n",
" plugintools.log(\"Lectura del archivo PLX\")\n",
"\n",
" title = title.replace(\" .plx\", \".plx\")\n",
" title = title.strip()\n",
" file = open(playlists + parser_title(title) + '.plx', \"r\")\n",
" file.seek(0)\n",
" num_items = len(file.readlines())\n",
" print num_items\n",
" file.seek(0) \n",
" \n",
" # Lectura del título y fanart de la lista\n",
" background = art + 'fanart.jpg'\n",
" logo = art + 'plx3.png'\n",
" file.seek(0)\n",
" data = file.readline()\n",
" while data <> \"\": \n",
" plugintools.log(\"data= \"+data)\n",
" if data.startswith(\"background=\") == True:\n",
" data = data.replace(\"background=\", \"\")\n",
" background = data.strip()\n",
" plugintools.log(\"background= \"+background)\n",
" if background == \"\":\n",
" background = params.get(\"extra\")\n",
" if background == \"\":\n",
" background = art + 'fanart.jpg'\n",
" \n",
" data = file.readline()\n",
" continue\n",
"\n",
" if data.startswith(\"title=\") == True:\n",
" name = data.replace(\"title=\", \"\")\n",
" name = name.strip()\n",
" plugintools.log(\"name= \"+name)\n",
" if name == \"Select sort order for this list\":\n",
" name = \"Seleccione criterio para ordenar ésta lista... \" \n",
" data = file.readline()\n",
" continue\n",
"\n",
" if data.startswith(\"logo=\") == True:\n",
" data = data.replace(\"logo=\", \"\")\n",
" logo = data.strip()\n",
" plugintools.log(\"logo= \"+logo)\n",
" title = parser_title(title)\n",
" if thumbnail == \"\":\n",
" thumbnail = art + 'plx3.png'\n",
"\n",
" plugintools.add_item(action=\"\" , title = '[COLOR lightyellow][B][I]playlist / '+ title + '[/B][/I][/COLOR]', url = playlists + title , thumbnail = logo , fanart = background , folder = False , isPlayable = False)\n",
" plugintools.log(\"fanart= \"+fanart)\n",
" plugintools.add_item(action=\"\" , title = '[I][B]' + name + '[/B][/I]' , url = \"\" , thumbnail = logo , fanart = background , folder = False , isPlayable = False) \n",
" \n",
" data = file.readline()\n",
" break\n",
"\n",
" else:\n",
" data = file.readline()\n",
"\n",
" \n",
" try: \n",
" data = file.readline()\n",
" plugintools.log(\"data= \"+data)\n",
" if data.startswith(\"background=\") == True:\n",
" data = data.replace(\"background=\", \"\")\n",
" data = data.strip()\n",
" fanart = data\n",
" background = fanart\n",
" plugintools.log(\"fanart= \"+fanart)\n",
" else:\n",
" # data = file.readline()\n",
" if data.startswith(\"background=\") == True:\n",
" print \"Archivo plx!\"\n",
" data = data.replace(\"background=\", \"\")\n",
" fanart = data.strip()\n",
" plugintools.log(\"fanart= \"+fanart)\n",
" else:\n",
" if data.startswith(\"title=\") == True:\n",
" name = data.replace(\"title=\", \"\")\n",
" name = name.strip()\n",
" plugintools.log(\"name= \"+name)\n",
" except:\n",
" plugintools.log(\"ERROR: Unable to load PLX file\")\n",
"\n",
"\n",
" data = file.readline()\n",
" try:\n",
" if data.startswith(\"title=\") == True:\n",
" data = data.replace(\"title=\", \"\")\n",
" name = data.strip() \n",
" plugintools.log(\"title= \"+title)\n",
" plugintools.add_item(action=\"\" , title = '[COLOR lightyellow][B][I]playlist / '+ title +'[/I][/B][/COLOR]' , url = playlists + title , thumbnail = logo , fanart = fanart , folder = False , isPlayable = False)\n",
" plugintools.add_item(action=\"\" , title = '[I][B]' + name + '[/B][/I]' , url = \"\" , thumbnail = art + \"icon.png\" , fanart = fanart , folder = False , isPlayable = False)\n",
" except:\n",
" plugintools.log(\"Unable to read PLX title\")\n",
"\n",
" \n",
" # Lectura de items\n",
"\n",
" i = 0\n",
" file.seek(0)\n",
" while i <= num_items:\n",
" data = file.readline()\n",
" data = data.strip()\n",
" i = i + 1\n",
" print i\n",
" \n",
" if data.startswith(\"#\") == True:\n",
" continue\n",
" elif data.startswith(\"rating\") == True:\n",
" continue\n",
" elif data.startswith(\"description\") == True:\n",
" continue\n",
"\n",
" if (data == 'type=comment') == True:\n",
" data = file.readline()\n",
" i = i + 1\n",
" print i\n",
" \n",
" while data <> \"\" :\n",
" if data.startswith(\"name\") == True:\n",
" title = data.replace(\"name=\", \"\")\n",
" data = file.readline()\n",
" data = data.strip()\n",
" i = i + 1\n",
" print i\n",
" continue\n",
" \n",
" elif data.startswith(\"thumb\") == True:\n",
" data = data.replace(\"thumb=\", \"\")\n",
" data = data.strip()\n",
" thumbnail = data\n",
" if thumbnail == \"\":\n",
" thumbnail = logo\n",
" data = file.readline()\n",
" data = data.strip()\n",
" i = i + 1\n",
" print i\n",
" continue\n",
" \n",
" elif data.startswith(\"background\") == True:\n",
" data = data.replace(\"background=\", \"\")\n",
" fanart = data.strip()\n",
" if fanart == \"\":\n",
" fanart = background\n",
" data = file.readline()\n",
" data = data.strip()\n",
" i = i + 1\n",
" print i\n",
" continue\n",
" \n",
" plugintools.add_item(action=\"\", title = title , url = \"\", thumbnail = thumbnail , fanart = fanart , folder = False, isPlayable = False)\n",
" \n",
" if (data == 'type=video') or (data == 'type=audio') == True:\n",
" data = file.readline()\n",
" i = i + 1\n",
" print i\n",
" \n",
" while data <> \"\" :\n",
" if data.startswith(\"#\") == True:\n",
" data = file.readline()\n",
" data = data.strip()\n",
" i = i + 1\n",
" print i\n",
" continue\n",
" elif data.startswith(\"description\") == True: \n",
" data = file.readline()\n",
" data = data.strip()\n",
" i = i + 1\n",
" print i\n",
" continue\n",
" elif data.startswith(\"rating\") == True:\n",
" data = file.readline()\n",
" data = data.strip()\n",
" i = i + 1\n",
" print i\n",
" continue\n",
" elif data.startswith(\"name\") == True:\n",
" data = data.replace(\"name=\", \"\")\n",
" data = data.strip()\n",
" title = data\n",
" if title == \"[COLOR=FF00FF00]by user-assigned order[/COLOR]\" :\n",
" title = \"Seleccione criterio para ordenar ésta lista... \"\n",
"\n",
" if title == \"by user-assigned order\" :\n",
" title = \"Según se han agregado en la lista\"\n",
" \n",
" if title == \"by date added, oldest first\" :\n",
" title = \"Por fecha de agregación, las más antiguas primero\"\n",
" \n",
" if title == \"by date added, newest first\" :\n",
" title = \"Por fecha de agregación, las más nuevas primero\"\n",
" \n",
" data = file.readline()\n",
" data = data.strip()\n",
" i = i + 1\n",
" print i\n",
" elif data.startswith(\"thumb\") == True:\n",
" data = data.replace(\"thumb=\", \"\")\n",
" data = data.strip()\n",
" thumbnail = data\n",
" if thumbnail == \"\":\n",
" thumbnail = logo\n",
" data = file.readline()\n",
" data = data.strip()\n",
" i = i + 1\n",
" print i\n",
" continue\n",
" elif data.startswith(\"date\") == True:\n",
" data = file.readline()\n",
" i = i + 1\n",
" print i\n",
" continue\n",
" elif data.startswith(\"background\") == True:\n",
" data = data.replace(\"background=\", \"\")\n",
" fanart = data.strip()\n",
" if fanart == \"\":\n",
" fanart = background\n",
" data = file.readline()\n",
" data = data.strip()\n",
" i = i + 1\n",
" print i\n",
" continue\n",
" \n",
" elif data.startswith(\"URL\") == True:\n",
" # Control para el caso de que no se haya definido fanart en cada entrada de la lista => Se usa el fanart general\n",
" if fanart == \"\":\n",
" fanart = background\n",
" data = data.replace(\"URL=\", \"\")\n",
" data = data.strip()\n",
" url = data\n",
" parse_url(url)\n",
" if url.startswith(\"yt_channel\") == True:\n",
" youtube_channel = url.replace(\"yt_channel(\", \"\")\n",
" youtube_channel = youtube_channel.replace(\")\", \"\")\n",
" url = 'http://gdata.youtube.com/feeds/api/users/' + youtube_channel + '/playlists?v=2&start-index=1&max-results=30'\n",
" plugintools.add_item(action=\"youtube_playlists\" , title = title + ' [[COLOR red]You[COLOR white]tube Channel][/COLOR]', url = url , thumbnail = thumbnail , fanart = fanart , folder = True , isPlayable = False)\n",
" break\n",
" \n",
" elif url.startswith(\"yt_playlist\") == True:\n",
" youtube_playlist = url.replace(\"yt_playlist(\", \"\")\n",
" youtube_playlist = youtube_playlist.replace(\")\", \"\")\n",
" plugintools.log(\"youtube_playlist= \"+youtube_playlist)\n",
" url = 'http://gdata.youtube.com/feeds/api/playlists/' + youtube_playlist + '?v=2'\n",
" plugintools.add_item( action = \"youtube_videos\" , title = title + ' [COLOR red][You[COLOR white]tube Playlist][/COLOR] [I][COLOR lightblue][/I][/COLOR]', url = url , thumbnail = art + \"icon.png\" , fanart = art + 'fanart.jpg' , folder = True , isPlayable = False )\n",
" data = file.readline()\n",
" i = i + 1\n",
" break\n",
" # Sintaxis yt(...) a extinguir pero mantengo por Darío:\n",
" elif url.startswith(\"yt\") == True:\n",
" url = url.replace(\"yt(\", \"\")\n",
" youtube_user = url.replace(\")\", \"\")\n",
" url = 'http://gdata.youtube.com/feeds/api/users/' + youtube_user + '/playlists?v=2&start-index=1&max-results=30'\n",
" plugintools.log(\"URL= \"+url)\n",
" plugintools.log(\"FANART = \"+fanart) \n",
" plugintools.add_item(action=\"youtube_playlists\" , title = title + ' [COLOR red][You[COLOR white]tube Playlist][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = True , isPlayable = False)\n",
" break\n",
"\n",
" elif url.startswith(\"serie\") == True:\n",
" url = url.replace(\"serie:\", \"\") \n",
" plugintools.log(\"URL= \"+url)\n",
" plugintools.log(\"FANART = \"+fanart) \n",
" plugintools.add_item(action=\"seriecatcher\" , title = title + ' [COLOR purple][Serie online][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , extra = fanart , folder = True , isPlayable = False)\n",
" break \n",
"\n",
" elif url.startswith(\"http\") == True:\n",
" if url.find(\"allmyvideos\") >= 0:\n",
" plugintools.add_item(action=\"allmyvideos\" , title = title + ' [COLOR lightyellow][Allmyvideos][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True)\n",
" plugintools.log(\"URL= \"+url)\n",
" break\n",
" \n",
" elif url.find(\"streamcloud\") >= 0:\n",
" plugintools.add_item(action=\"streamcloud\" , title = title + ' [COLOR lightskyblue][Streamcloud][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True)\n",
" plugintools.log(\"URL= \"+url)\n",
" plugintools.log(\"FANART = \"+fanart) \n",
" break\n",
" \n",
" elif url.find(\"played.to\") >= 0:\n",
" plugintools.add_item(action=\"playedto\" , title = title + ' [COLOR lavender][Played.to][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True)\n",
" plugintools.log(\"URL= \"+url)\n",
" plugintools.log(\"FANART = \"+fanart)\n",
" break\n",
" \n",
" elif url.find(\"vidspot\") >= 0:\n",
" plugintools.add_item(action=\"vidspot\" , title = title + ' [COLOR palegreen][Vidspot][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True)\n",
" plugintools.log(\"URL= \"+url)\n",
" plugintools.log(\"FANART = \"+fanart)\n",
" break\n",
" \n",
" elif url.find(\"vk.com\") >= 0:\n",
" plugintools.add_item(action=\"vk\" , title = title + ' [COLOR royalblue][Vk][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True)\n",
" plugintools.log(\"URL= \"+url)\n",
" plugintools.log(\"FANART = \"+fanart)\n",
" break\n",
"\n",
" if url.find(\"nowvideo\") >= 0:\n",
" plugintools.add_item(action=\"nowvideo\" , title = title + ' [COLOR red][Nowvideo][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True)\n",
" plugintools.log(\"URL= \"+url)\n",
" break\n",
"\n",
" if url.find(\"tumi.tv\") >= 0:\n",
" plugintools.add_item(action=\"tumi\" , title = title + ' [COLOR forestgreen][Tumi][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True)\n",
" plugintools.log(\"URL= \"+url)\n",
" break\n",
"\n",
" if url.find(\"streamin.to\") >= 0:\n",
" plugintools.add_item(action=\"streaminto\" , title = title + ' [COLOR forestgreen][streamin.to][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True)\n",
" plugintools.log(\"URL= \"+url)\n",
" break \n",
"\n",
" elif url.endswith(\"flv\") == True:\n",
" plugintools.log(\"URL= \"+url)\n",
" plugintools.log(\"FANART = \"+fanart) \n",
" plugintools.add_item( action = \"play\" , title = title + ' [COLOR cyan][Flash][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )\n",
" break\n",
"\n",
" elif url.endswith(\"m3u8\") == True:\n",
" plugintools.log(\"URL= \"+url)\n",
" plugintools.log(\"FANART = \"+fanart) \n",
" plugintools.add_item( action = \"play\" , title = title + ' [COLOR purple][m3u8][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )\n",
" break\n",
"\n",
" elif url.find(\"youtube.com\") >= 0:\n",
" plugintools.log(\"URL= \"+url)\n",
" plugintools.log(\"FANART = \"+fanart)\n",
" videoid = url.replace(\"https://www.youtube.com/watch?v=\", \"\")\n",
" url = 'plugin://plugin.video.youtube/?path=/root/video&action=play_video&videoid=' + videoid\n",
" plugintools.add_item( action = \"play\" , title = title + ' [[COLOR red]You[COLOR white]tube Video][/COLOR]', url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )\n",
" break \n",
" \n",
" else:\n",
" plugintools.log(\"URL= \"+url)\n",
" plugintools.add_item( action = \"play\" , title = title + ' [COLOR white][HTTP][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )\n",
" break\n",
" \n",
" elif url.startswith(\"rtmp\") == True:\n",
" params[\"url\"] = url\n",
" server_rtmp(params) \n",
" server = params.get(\"server\") \n",
" url = params.get(\"url\") \n",
" plugintools.add_item( action = \"launch_rtmp\" , title = title + '[COLOR green] [' + server + '][/COLOR]' , url = params.get(\"url\") , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )\n",
" break\n",
" \n",
" elif url.startswith(\"plugin\") == True:\n",
" if url.find(\"plugin.video.youtube\") >= 0:\n",
" plugintools.log(\"URL= \"+url) \n",
" plugintools.add_item( action = \"play\" , title = title + ' [COLOR white] [[COLOR red]You[COLOR white]tube Video][/COLOR][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )\n",
" break\n",
" if url.find(\"plugin.video.p2p-streams\") >= 0:\n",
" if url.find(\"mode=1\") >= 0:\n",
" title = parser_title(title)\n",
" url = url.strip()\n",
" plugintools.add_item(action=\"play\" , title = title + ' [COLOR lightblue][Acestream][/COLOR]' , url = url, thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True)\n",
" elif url.find(\"mode=2\") >= 0:\n",
" title = parser_title(title)\n",
" url = url.strip()\n",
" plugintools.add_item(action=\"play\" , title = title_fixed + ' [COLOR lightblue][Sopcast][/COLOR]' , url = url, thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True)\n",
" \n",
" elif url.startswith(\"sop\") == True:\n",
" # plugin://plugin.video.p2p-streams/?url=sop://124.232.150.188:3912/11265&mode=2&name=Titulo+canal+Sopcast\n",
" title = parser_title(title)\n",
" url = 'plugin://plugin.video.p2p-streams/?url=' + url + '&mode=2&name='\n",
" url = url.strip()\n",
" plugintools.add_item(action=\"play\" , title = title + ' [COLOR lightgreen][Sopcast][/COLOR]' , url = url, thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True)\n",
" data = file.readline()\n",
" data = data.strip()\n",
" i = i + 1\n",
" #print i\n",
" continue\n",
"\n",
" elif url.startswith(\"ace\") == True:\n",
" title = parser_title(title)\n",
" url = url.replace(\"ace:\", \"\")\n",
" url = 'plugin://plugin.video.p2p-streams/?url=' + url + '&mode=1&name='\n",
" url = url.strip()\n",
" plugintools.add_item(action=\"play\" , title = title + ' [COLOR lightblue][Acestream][/COLOR]' , url = url, thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True)\n",
" data = file.readline()\n",
" data = data.strip()\n",
" i = i + 1\n",
" #print i\n",
" continue \n",
" \n",
" elif url.startswith(\"magnet\") >= 0:\n",
" url = urllib.quote_plus(data)\n",
" title = parser_title(title)\n",
" url = 'plugin://plugin.video.pulsar/play?uri=' + url\n",
" plugintools.add_item(action=\"play\" , title = title + ' [COLOR orangered][Torrent][/COLOR]' , url = url, thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True)\n",
"\n",
" \n",
" else:\n",
" plugintools.add_item(action=\"play\" , title = title , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True)\n",
" plugintools.log(\"URL = \"+url)\n",
" break \n",
" \n",
" elif data == \"\" : \n",
" break\n",
" else:\n",
" data = file.readline()\n",
" data = data.strip()\n",
" i = i + 1\n",
" print i\n",
" \n",
" if (data == 'type=playlist') == True:\n",
" # Control si no se definió fanart en cada entrada de la lista => Se usa fanart global de la lista\n",
" if fanart == \"\":\n",
" fanart = background\n",
" data = file.readline()\n",
" i = i + 1\n",
" print i\n",
" while data <> \"\" : \n",
" if data.startswith(\"name\") == True :\n",
" data = data.replace(\"name=\", \"\")\n",
" title = data.strip() \n",
" if title == '>>>' :\n",
" title = title.replace(\">>>\", \"[I][COLOR lightyellow]Siguiente[/I][/COLOR]\")\n",
" data = file.readline()\n",
" data = data.strip()\n",
" i = i + 1\n",
" \n",
" elif title == '<<<' :\n",
" title = title.replace(\"<<<\", \"[I][COLOR lightyellow]Anterior[/I][/COLOR]\")\n",
" data = file.readline()\n",
" data = data.strip()\n",
" i = i + 1\n",
"\n",
" elif title.find(\"Sorted by user-assigned order\") >= 0:\n",
" title = \"[I][COLOR lightyellow]Ordenar listas por...[/I][/COLOR]\"\n",
" data = file.readline()\n",
" data = data.strip()\n",
" i = i + 1\n",
" \n",
" elif title.find(\"Sorted A-Z\") >= 0:\n",
" title = \"[I][COLOR lightyellow][COLOR lightyellow]De la A a la Z[/I][/COLOR]\"\n",
" data = file.readline()\n",
" data = data.strip()\n",
" i = i + 1 \n",
" \n",
" elif title.find(\"Sorted Z-A\") >= 0:\n",
" title = \"[I][COLOR lightyellow]De la Z a la A[/I][/COLOR]\"\n",
" data = file.readline()\n",
" data = data.strip()\n",
" i = i + 1 \n",
"\n",
" elif title.find(\"Sorted by date added, newest first\") >= 0:\n",
" title = \"Ordenado por: Las + recientes primero...\"\n",
" data = file.readline()\n",
" data = data.strip()\n",
" i = i + 1 \n",
"\n",
" elif title.find(\"Sorted by date added, oldest first\") >= 0:\n",
" title = \"Ordenado por: Las + antiguas primero...\"\n",
" data = file.readline()\n",
" data = data.strip()\n",
" i = i + 1 \n",
"\n",
" elif title.find(\"by user-assigned order\") >= 0:\n",
" title = \"[COLOR lightyellow]Ordenar listas por...[/COLOR]\"\n",
" data = file.readline()\n",
" data = data.strip()\n",
" i = i + 1 \n",
"\n",
" elif title.find(\"by date added, newest first\") >= 0 :\n",
" title = \"Las + recientes primero...\"\n",
" data = file.readline()\n",
" data = data.strip()\n",
" i = i + 1\n",
" elif title.find(\"by date added, oldest first\") >= 0 :\n",
" title = \"Las + antiguas primero...\"\n",
" data = file.readline()\n",
" data = data.strip()\n",
" i = i + 1\n",
" \n",
" elif data.startswith(\"thumb\") == True:\n",
" data = data.replace(\"thumb=\", \"\")\n",
" data = data.strip()\n",
" thumbnail = data\n",
" data = file.readline()\n",
" data = data.strip()\n",
" i = i + 1\n",
" print i\n",
" continue\n",
" \n",
" elif data.startswith(\"URL\") == True:\n",
" data = data.replace(\"URL=\", \"\")\n",
" data = data.strip()\n",
" url = data\n",
" parse_url(url)\n",
" if url.startswith(\"m3u\") == True:\n",
" url = url.replace(\"m3u:\", \"\")\n",
" plugintools.add_item(action=\"getfile_http\" , title = title , url = url , thumbnail = thumbnail , fanart = fanart , folder = True , isPlayable = False)\n",
" elif url.startswith(\"plx\") == True:\n",
" url = url.replace(\"plx:\", \"\")\n",
" plugintools.add_item(action=\"plx_items\" , title = title , url = url , thumbnail = thumbnail , fanart = fanart , folder = True , isPlayable = False)\n",
" \n",
" elif data == \"\" :\n",
" break\n",
" \n",
" else:\n",
" data = file.readline()\n",
" data = data.strip()\n",
" i = i + 1\n",
" print i\n",
" continue\n",
"\n",
"\n",
" file.close()\n",
"\n",
"\n",
" # Purga de listas erróneas creadas al abrir listas PLX (por los playlists de ordenación que crea Navixtreme)\n",
" \n",
" if os.path.isfile(playlists + 'Siguiente.plx'):\n",
" os.remove(playlists + 'Siguiente.plx')\n",
" print \"Correcto!\"\n",
" else:\n",
" pass\n",
" \n",
" if os.path.isfile(playlists + 'Ordenar listas por....plx'):\n",
" os.remove(playlists + 'Ordenar listas por....plx')\n",
" print \"Ordenar listas por....plx eliminado!\"\n",
" print \"Correcto!\"\n",
" else:\n",
" print \"No es posible!\"\n",
" pass\n",
"\n",
" if os.path.isfile(playlists + 'A-Z.plx'):\n",
" os.remove(playlists + 'A-Z.plx')\n",
" print \"A-Z.plx eliminado!\" \n",
" else:\n",
" print \"No es posible!\" \n",
" pass\n",
"\n",
" if os.path.isfile(playlists + 'De la A a la Z.plx'):\n",
" os.remove(playlists + 'De la A a la Z.plx')\n",
" print \"De la A a la Z.plx eliminado!\" \n",
" else:\n",
" print \"No es posible!\" \n",
" pass\n",
"\n",
" if os.path.isfile(playlists + 'Z-A.plx'):\n",
" os.remove(playlists + 'Z-A.plx')\n",
" print \"Z-A.plx eliminado!\" \n",
" else:\n",
" print \"No es posible!\"\n",
" pass\n",
"\n",
" if os.path.isfile(playlists + 'De la Z a la A.plx'):\n",
" os.remove(playlists + 'De la Z a la A.plx')\n",
" print \"De la Z a la A.plx eliminado!\" \n",
" else:\n",
" print \"No es posible!\" \n",
" pass\n",
"\n",
" if os.path.isfile(playlists + 'Las + antiguas primero....plx'):\n",
" os.remove(playlists + 'Las + antiguas primero....plx')\n",
" print \"Las más antiguas primero....plx eliminado!\" \n",
" else:\n",
" print \"No es posible!\" \n",
" pass\n",
"\n",
" if os.path.isfile(playlists + 'by date added, oldest first.plx'):\n",
" os.remove(playlists + 'by date added, oldest first.plx')\n",
" print \"by date added, oldest first.plx eliminado!\" \n",
" else:\n",
" print \"No es posible!\" \n",
" pass\n",
"\n",
" if os.path.isfile(playlists + 'Las + recientes primero....plx'):\n",
" os.remove(playlists + 'Las + recientes primero....plx') \n",
" else:\n",
" print \"No es posible!\" \n",
" pass\n",
"\n",
" if os.path.isfile(playlists + 'by date added, newest first.plx'):\n",
" os.remove(playlists + 'by date added, newest first.plx')\n",
" print \"by date added, newest first.plx eliminado!\" \n",
" else:\n",
" print \"No es posible!\" \n",
" pass\n",
"\n",
" if os.path.isfile(playlists + 'Sorted by user-assigned order.plx'):\n",
" os.remove(playlists + 'Sorted by user-assigned order.plx')\n",
" print \"Sorted by user-assigned order.plx eliminado!\"\n",
" else:\n",
" print \"No es posible!\" \n",
" pass \n",
"\n",
" if os.path.isfile(playlists + 'Ordenado por.plx'):\n",
" os.remove(playlists + 'Ordenado por.plx')\n",
" print \"Correcto!\" \n",
" else:\n",
" print \"No es posible!\" \n",
" pass\n",
"\n",
" if os.path.isfile(playlists + 'Ordenado por'):\n",
" os.remove(playlists + 'Ordenado por')\n",
" print \"Correcto!\" \n",
" else:\n",
" print \"No es posible!\" \n",
" pass \n",
"\n",
"\n",
"\n",
"def futbolenlatv(params):\n",
" plugintools.log(\"[nec tv-0.1.0].futbolenlaTV \"+repr(params))\n",
"\n",
" hora_partidos = []\n",
" lista_equipos=[]\n",
" campeonato=[]\n",
" canales=[]\n",
"\n",
" url = params.get(\"url\")\n",
" print url\n",
" fecha = get_fecha()\n",
" dia_manana = params.get(\"plot\")\n",
" data = plugintools.read(url)\n",
" \n",
" if dia_manana == \"\": # Control para si es agenda de hoy o mañana\n",
" plugintools.add_item(action=\"\", title = '[COLOR green][B]FutbolenlaTV.com[/B][/COLOR] - [COLOR lightblue][I]Agenda para el día '+ fecha + '[/I][/COLOR]', folder = False , isPlayable = False )\n",
" else:\n",
" dia_manana = dia_manana.split(\"-\")\n",
" dia_manana = dia_manana[2] + \"/\" + dia_manana[1] + \"/\" + dia_manana[0]\n",
" plugintools.add_item(action=\"\", title = '[COLOR green][B]FutbolenlaTV.com[/B][/COLOR] - [COLOR lightblue][I]Agenda para el día '+ dia_manana + '[/I][/COLOR]', folder = False , isPlayable = False )\n",
" \n",
"\t \n",
" bloque = plugintools.find_multiple_matches(data,'<span class=\"cuerpo-partido\">(.*?)</div>')\n",
" for entry in bloque:\n",
" category = plugintools.find_single_match(entry, '<i class=(.*?)</i>')\n",
" category = category.replace(\"ftvi-\", \"\")\n",
" category = category.replace('comp\">', '')\n",
" category = category.replace('\"', '')\n",
" category = category.replace(\"-\", \" \")\n",
" category = category.replace(\"Futbol\", \"Fútbol\")\n",
" category = category.strip()\n",
" category = category.capitalize()\n",
" plugintools.log(\"cat= \"+category)\n",
" champ = plugintools.find_single_match(entry, '<span class=\"com-detalle\">(.*?)</span>')\n",
" champ = encode_string(champ)\n",
" champ = champ.strip()\n",
" event = plugintools.find_single_match(entry, '<span class=\"bloque\">(.*?)</span>')\n",
" event = encode_string(event)\n",
" event = event.strip()\n",
" momentum = plugintools.find_single_match(entry, '<time itemprop=\"startDate\" datetime=([^<]+)</time>')\n",
" # plugintools.log(\"momentum= \"+momentum)\n",
" momentum = momentum.split(\">\")\n",
" momentum = momentum[1]\n",
"\n",
" gametime = plugintools.find_multiple_matches(entry, '<span class=\"n\">(.*?)</span>')\n",
" for tiny in gametime:\n",
" day = tiny\n",
" month = tiny\n",
" \n",
" sport = plugintools.find_single_match(entry, '<meta itemprop=\"eventType\" content=(.*?)/>')\n",
" sport = sport.replace('\"', '')\n",
" sport = sport.strip()\n",
" if sport == \"Partido de fútbol\":\n",
" sport = \"Fútbol\"\n",
" \n",
" # plugintools.log(\"sport= \"+sport)\n",
" \n",
" gameday = plugintools.find_single_match(entry, '<span class=\"dia\">(.*?)</span>')\n",
"\n",
" rivals = plugintools.find_multiple_matches(entry, '<span>([^<]+)</span>([^<]+)<span>([^<]+)</span>')\n",
" rivales = \"\"\n",
" \n",
" for diny in rivals:\n",
" print diny\n",
" items = len(diny)\n",
" items = items - 1\n",
" i = -1\n",
" diny[i].strip()\n",
" while i <= items:\n",
" if diny[i] == \"\":\n",
" del diny[0]\n",
" i = i + 1\n",
" else:\n",
" print diny[i]\n",
" rival = diny[i] \n",
" rival = encode_string(rival)\n",
" rival = rival.strip()\n",
" plugintools.log(\"rival= \"+rival)\n",
" if rival == \"-\":\n",
" i = i + 1\n",
" continue\n",
" else:\n",
" if rivales != \"\":\n",
" rivales = rivales + \" vs \" + rival\n",
" plugintools.log(\"rivales= \"+rivales)\n",
" break\n",
" else:\n",
" rivales = rival\n",
" plugintools.log(\"rival= \"+rival)\n",
" i = i + 1\n",
"\n",
"\n",
" tv = plugintools.find_single_match(entry, '<span class=\"hidden-phone hidden-tablet canales\"([^<]+)</span>')\n",
" tv = tv.replace(\">\", \"\")\n",
" tv = encode_string(tv) \n",
" if tv == \"\":\n",
" continue\n",
" else:\n",
" tv = tv.replace(\"(Canal+, Astra\", \"\")\n",
" tv = tv.split(\",\")\n",
" tv_a = tv[0]\n",
" tv_a = tv_a.rstrip()\n",
" tv_a = tv_a.lstrip()\n",
" tv_a = tv_a.replace(\")\", \"\")\n",
" plugintools.log(\"tv_a= \"+tv_a)\n",
" print len(tv)\n",
" if len(tv) == 2:\n",
" tv_b = tv[1]\n",
" tv_b = tv_b.lstrip()\n",
" tv_b = tv_b.rstrip()\n",
" tv_b = tv_b.replace(\")\", \"\")\n",
" tv_b = tv_b.replace(\"(Bar+ dial 333-334\", \"\")\n",
" tv_b = tv_b.replace(\"(Canal+\", \"\") \n",
" tv = tv_a + \" / \" + tv_b\n",
" plot = tv\n",
" plugintools.log(\"plot= \"+plot)\n",
" \n",
" elif len(tv) == 3:\n",
" tv_b = tv[1]\n",
" tv_b = tv_b.lstrip()\n",
" tv_b = tv_b.rstrip()\n",
" tv_b = tv_b.replace(\")\", \"\")\n",
" tv_b = tv_b.replace(\"(Bar+ dial 333-334\", \"\")\n",
" tv_b = tv_b.replace(\"(Canal+\", \"\") \n",
" tv_c = tv[2]\n",
" tv_c = tv_c.lstrip()\n",
" tv_c = tv_c.rstrip()\n",
" tv_c = tv_c.replace(\")\", \"\")\n",
" tv_c = tv_c.replace(\"(Bar+ dial 333-334\", \"\")\n",
" tv_c = tv_c.replace(\"(Canal+\", \"\") \n",
" tv = tv_a + \" / \" + tv_b + \" / \" + tv_c\n",
" plot = tv\n",
" plugintools.log(\"plot= \"+plot)\n",
"\n",
" elif len(tv) == 4:\n",
" tv_b = tv[1]\n",
" tv_b = tv_b.lstrip()\n",
" tv_b = tv_b.rstrip()\n",
" tv_b = tv_b.replace(\")\", \"\")\n",
" tv_b = tv_b.replace(\"(Bar+ dial 333-334\", \"\")\n",
" tv_b = tv_b.replace(\"(Canal+\", \"\") \n",
" tv_c = tv[2]\n",
" tv_c = tv_c.lstrip()\n",
" tv_c = tv_c.rstrip()\n",
" tv_c = tv_c.replace(\")\", \"\")\n",
" tv_c = tv_c.replace(\"(Bar+ dial 333-334\", \"\")\n",
" tv_c = tv_c.replace(\"(Canal+\", \"\") \n",
" tv_d = tv[3]\n",
" tv_d = tv_d.lstrip()\n",
" tv_d = tv_d.rstrip()\n",
" tv_d = tv_d.replace(\")\", \"\")\n",
" tv_d = tv_d.replace(\"(Bar+ dial 333-334\", \"\")\n",
" tv_d = tv_d.replace(\"(Canal+\", \"\") \n",
" tv = tv_a + \" / \" + tv_b + \" / \" + tv_c + \" / \" + tv_d \n",
" plot = tv\n",
" plugintools.log(\"plot= \"+plot)\n",
"\n",
" elif len(tv) == 5:\n",
" tv_b = tv[1]\n",
" tv_b = tv_b.lstrip()\n",
" tv_b = tv_b.rstrip()\n",
" tv_b = tv_b.replace(\")\", \"\")\n",
" tv_b = tv_b.replace(\"(Bar+ dial 333-334\", \"\")\n",
" tv_b = tv_b.replace(\"(Canal+\", \"\") \n",
" tv_c = tv[2]\n",
" tv_c = tv_c.lstrip()\n",
" tv_c = tv_c.rstrip()\n",
" tv_c = tv_c.replace(\")\", \"\")\n",
" tv_c = tv_c.replace(\"(Bar+ dial 333-334\", \"\")\n",
" tv_c = tv_c.replace(\"(Canal+\", \"\") \n",
" tv_d = tv[3]\n",
" tv_d = tv_d.lstrip()\n",
" tv_d = tv_d.rstrip()\n",
" tv_d = tv_d.replace(\")\", \"\")\n",
" tv_d = tv_d.replace(\"(Bar+ dial 333-334\", \"\")\n",
" tv_d = tv_d.replace(\"(Canal+\", \"\") \n",
" tv_e = tv[4]\n",
" tv_e = tv_e.lstrip()\n",
" tv_e = tv_e.rstrip()\n",
" tv_e = tv_e.replace(\")\", \"\")\n",
" tv_e = tv_e.replace(\"(Bar+ dial 333-334\", \"\")\n",
" tv_e = tv_e.replace(\"(Canal+\", \"\") \n",
" tv = tv_a + \" / \" + tv_b + \" / \" + tv_c + \" / \" + tv_d + \" / \" + tv_e\n",
" # tv = tv.replace(\")\", \"\") \n",
" plot = tv\n",
" plugintools.log(\"plot= \"+plot)\n",
"\n",
" elif len(tv) == 6:\n",
" tv_b = tv[1]\n",
" tv_b = tv_b.lstrip()\n",
" tv_b = tv_b.rstrip()\n",
" tv_b = tv_b.replace(\")\", \"\")\n",
" tv_b = tv_b.replace(\"(Bar+ dial 333-334\", \"\")\n",
" tv_b = tv_b.replace(\"(Canal+\", \"\") \n",
" tv_c = tv[2]\n",
" tv_c = tv_c.lstrip()\n",
" tv_c = tv_c.rstrip()\n",
" tv_c = tv_c.replace(\")\", \"\")\n",
" tv_c = tv_c.replace(\"(Bar+ dial 333-334\", \"\")\n",
" tv_c = tv_c.replace(\"(Canal+\", \"\") \n",
" tv_d = tv[3]\n",
" tv_d = tv_d.lstrip()\n",
" tv_d = tv_d.rstrip()\n",
" tv_d = tv_d.replace(\")\", \"\")\n",
" tv_d = tv_d.replace(\"(Bar+ dial 333-334\", \"\")\n",
" tv_d = tv_d.replace(\"(Canal+\", \"\") \n",
" tv_e = tv[4]\n",
" tv_e = tv_e.lstrip()\n",
" tv_e = tv_e.rstrip()\n",
" tv_e = tv_e.replace(\")\", \"\")\n",
" tv_e = tv_e.replace(\"(Bar+ dial 333-334\", \"\")\n",
" tv_e = tv_e.replace(\"(Canal+\", \"\") \n",
" tv_f = tv[5]\n",
" tv_f = tv_f.lstrip()\n",
" tv_f = tv_f.rstrip()\n",
" tv_f = tv_f.replace(\")\", \"\")\n",
" tv_f = tv_f.replace(\"(Bar+ dial 333-334\", \"\")\n",
" tv_f = tv_f.replace(\"(Canal+\", \"\") \n",
" tv = tv_a + \" / \" + tv_b + \" / \" + tv_c + \" / \" + tv_d + \" / \" + tv_e + \" / \" + tv_f\n",
" # tv = tv.replace(\")\", \"\") \n",
" plot = tv\n",
" plugintools.log(\"plot= \"+plot)\n",
"\n",
" elif len(tv) == 7:\n",
" tv_b = tv[1]\n",
" tv_b = tv_b.lstrip()\n",
" tv_b = tv_b.rstrip()\n",
" tv_b = tv_b.replace(\")\", \"\")\n",
" tv_b = tv_b.replace(\"(Bar+ dial 333-334\", \"\")\n",
" tv_b = tv_b.replace(\"(Canal+\", \"\") \n",
" tv_c = tv[2]\n",
" tv_c = tv_c.lstrip()\n",
" tv_c = tv_c.rstrip()\n",
" tv_c = tv_c.replace(\")\", \"\")\n",
" tv_c = tv_c.replace(\"(Bar+ dial 333-334\", \"\")\n",
" tv_c = tv_c.replace(\"(Canal+\", \"\") \n",
" tv_d = tv[3]\n",
" tv_d = tv_d.lstrip()\n",
" tv_d = tv_d.rstrip()\n",
" tv_d = tv_d.replace(\")\", \"\")\n",
" tv_d = tv_d.replace(\"(Bar+ dial 333-334\", \"\")\n",
" tv_d = tv_d.replace(\"(Canal+\", \"\") \n",
" tv_e = tv[4]\n",
" tv_e = tv_e.lstrip()\n",
" tv_e = tv_e.rstrip()\n",
" tv_e = tv_e.replace(\")\", \"\")\n",
" tv_e = tv_e.replace(\"(Bar+ dial 333-334\", \"\")\n",
" tv_e = tv_e.replace(\"(Canal+\", \"\") \n",
" tv_f = tv[5]\n",
" tv_f = tv_f.lstrip()\n",
" tv_f = tv_f.rstrip()\n",
" tv_f = tv_f.replace(\")\", \"\")\n",
" tv_f = tv_f.replace(\"(Bar+ dial 333-334\", \"\")\n",
" tv_f = tv_f.replace(\"(Canal+\", \"\") \n",
" tv_g = tv[6]\n",
" tv_g = tv_g.lstrip()\n",
" tv_g = tv_g.rstrip()\n",
" tv_g = tv_g.replace(\")\", \"\")\n",
" tv_g = tv_g.replace(\"(Bar+ dial 333-334\", \"\")\n",
" tv_g = tv_g.replace(\"(Canal+\", \"\") \n",
" tv = tv_a + \" / \" + tv_b + \" / \" + tv_c + \" / \" + tv_d + \" / \" + tv_e + \" / \" + tv_f + \" / \" + tv_g\n",
" plot = tv\n",
" plugintools.log(\"plot= \"+plot) \n",
" else:\n",
" tv = tv_a\n",
" plot = tv_a\n",
" plugintools.log(\"plot= \"+plot)\n",
" \n",
"\n",
" plugintools.add_item(action=\"contextMenu\", plot = plot , title = momentum + \"h \" + '[COLOR lightyellow][B]' + category + '[/B][/COLOR] ' + '[COLOR green]' + champ + '[/COLOR]' + \" \" + '[COLOR lightyellow][I]' + rivales + '[/I][/COLOR] [I][COLOR red]' + plot + '[/I][/COLOR]' , thumbnail = 'http://i2.bssl.es/telelocura/2009/05/futbol-tv.jpg' , fanart = art + 'agenda2.jpg' , folder = True, isPlayable = False)\n",
" # plugintools.add_item(action=\"contextMenu\", title = '[COLOR yellow][I]' + tv + '[/I][/COLOR]', thumbnail = 'http://i2.bssl.es/telelocura/2009/05/futbol-tv.jpg' , fanart = art + 'agenda2.jpg' , plot = plot , folder = True, isPlayable = False) \n",
" \n",
" # plugintools.add_item(action=\"contextMenu\", title = gameday + '/' + day + \"(\" + momentum + \") \" + '[COLOR lightyellow][B]' + category + '[/B][/COLOR] ' + champ + \": \" + rivales , plot = plot , thumbnail = 'http://i2.bssl.es/telelocura/2009/05/futbol-tv.jpg' , fanart = art + 'agenda2.jpg' , folder = True, isPlayable = False)\n",
" # plugintools.add_item(action=\"contextMenu\", title = '[COLOR yellow][I]' + tv + '[/I][/COLOR]' , thumbnail = 'http://i2.bssl.es/telelocura/2009/05/futbol-tv.jpg' , fanart = art + 'agenda2.jpg' , plot = plot , folder = True, isPlayable = False)\n",
" \n",
" \n",
"\n",
"def encode_string(txt):\n",
" plugintools.log(\"[nec tv-0.1.0].encode_string: \"+txt)\n",
" \n",
" txt = txt.replace(\"ç\", \"ç\")\n",
" txt = txt.replace('é', 'é')\n",
" txt = txt.replace('á', 'á')\n",
" txt = txt.replace('é', 'é')\n",
" txt = txt.replace('á', 'á')\n",
" txt = txt.replace('ñ', 'ñ')\n",
" txt = txt.replace('ú', 'ú')\n",
" txt = txt.replace('í', 'í')\n",
" txt = txt.replace('ó', 'ó') \n",
" txt = txt.replace(''', \"'\")\n",
" txt = txt.replace(\" \", \"\")\n",
" txt = txt.replace(\" \", \"\")\n",
" txt = txt.replace(''', \"'\")\n",
" return txt\n",
"\n",
"\n",
"\n",
"def splive_items(params):\n",
" plugintools.log(\"[nec tv-0.1.0].SPlive_items \"+repr(params))\n",
" data = plugintools.read( params.get(\"url\") )\n",
"\n",
" channel = plugintools.find_multiple_matches(data,'<channel>(.*?)</channel>')\n",
" \n",
" for entry in channel:\n",
" # plugintools.log(\"channel= \"+channel)\n",
" title = plugintools.find_single_match(entry,'<name>(.*?)</name>')\n",
" category = plugintools.find_single_match(entry,'<category>(.*?)</category>')\n",
" thumbnail = plugintools.find_single_match(entry,'<link_logo>(.*?)</link_logo>')\n",
" rtmp = plugintools.find_single_match(entry,'<rtmp>([^<]+)</rtmp>')\n",
" isIliveTo = plugintools.find_single_match(entry,'<isIliveTo>([^<]+)</isIliveTo>')\n",
" rtmp = rtmp.strip()\n",
" pageurl = plugintools.find_single_match(entry,'<url_html>([^<]+)</url_html>')\n",
" link_logo = plugintools.find_single_match(entry,'<link_logo>([^<]+)</link_logo>')\n",
" \n",
" if pageurl == \"SinProgramacion\":\n",
" pageurl = \"\"\n",
" \n",
" playpath = plugintools.find_single_match(entry, '<playpath>([^<]+)</playpath>')\n",
" playpath = playpath.replace(\"Referer: \", \"\")\n",
" token = plugintools.find_single_match(entry, '<token>([^<]+)</token>')\n",
"\n",
" iliveto = 'rtmp://188.122.91.73/edge'\n",
" \n",
" if isIliveTo == \"0\":\n",
" if token == \"0\":\n",
" url = rtmp\n",
" url = url.replace(\"&\", \"&\")\n",
" parse_url(url)\n",
" plugintools.add_item( action = \"play\" , title = title , url = url , thumbnail = thumbnail , fanart = fanart , plot = title , folder = False , isPlayable = True )\n",
" plugintools.log(\"url= \"+url)\n",
" else:\n",
" url = rtmp + \" pageUrl=\" + pageurl + \" \" + 'token=' + token + playpath + \" live=1\"\n",
" parse_url(url)\n",
" plugintools.add_item( action = \"play\" , title = title , url = url , thumbnail = thumbnail , fanart = fanart , plot = title , folder = False , isPlayable = True )\n",
" plugintools.log(\"url= \"+url)\n",
"\n",
" if isIliveTo == \"1\":\n",
" if token == \"1\": \n",
" url = iliveto + \" pageUrl=\" + pageurl + \" \" + 'token=' + token + playpath + \" live=1\"\n",
" url = url.replace(\"&\", \"&\")\n",
" parse_url(url)\n",
" plugintools.add_item( action = \"play\" , title = title , url = url , thumbnail = thumbnail , fanart = fanart , plot = title , folder = False , isPlayable = True )\n",
" plugintools.log(\"url= \"+url)\n",
" \n",
" else:\n",
" url = iliveto + ' swfUrl=' + rtmp + \" playpath=\" + playpath + \" pageUrl=\" + pageurl\n",
" url = url.replace(\"&\", \"&\")\n",
" parse_url(url)\n",
" plugintools.add_item( action = \"play\" , title = title , url = url , thumbnail = thumbnail , fanart = fanart , plot = title , folder = False , isPlayable = True )\n",
" plugintools.log(\"url= \"+url)\n",
" \n",
"\n",
"\n",
"def get_fecha():\n",
"\n",
" from datetime import datetime\n",
"\n",
" ahora = datetime.now()\n",
" anno_actual = ahora.year\n",
" mes_actual = ahora.month\n",
" dia_actual = ahora.day\n",
" fecha = str(dia_actual) + \"/\" + str(mes_actual) + \"/\" + str(anno_actual)\n",
" plugintools.log(\"fecha de hoy= \"+fecha)\n",
" return fecha\n",
"\n",
"\n",
"\n",
"\n",
"def p2p_items(params):\n",
" plugintools.log(\"[nec tv-0.1.0].p2p_items\" +repr(params))\n",
" \n",
" # Vamos a localizar el título \n",
" title = params.get(\"plot\")\n",
" if title == \"\":\n",
" title = params.get(\"title\")\n",
" \n",
" data = plugintools.read(\"http://pastebin.com/raw.php?i=n9BF6Cwe\")\n",
" subcanal = plugintools.find_single_match(data,'<name>' + title + '(.*?)</subchannel>')\n",
" thumbnail = plugintools.find_single_match(subcanal, '<thumbnail>(.*?)</thumbnail>')\n",
" fanart = plugintools.find_single_match(subcanal, '<fanart>(.*?)</fanart>')\n",
" plugintools.log(\"thumbnail= \"+thumbnail)\n",
"\n",
"\n",
" # Controlamos el caso en que no haya thumbnail en el menú de latinototal\n",
" if thumbnail == \"\":\n",
" thumbnail = art + 'p2p.png'\n",
" elif thumbnail == 'name_rtmp.png':\n",
" thumbnail = art + 'p2p.png' \n",
"\n",
" if fanart == \"\":\n",
" fanart = art + 'p2p.png'\n",
"\n",
" # Comprobamos si la lista ha sido descargada o no\n",
" plot = params.get(\"plot\")\n",
" \n",
" if plot == \"\":\n",
" title = params.get(\"title\")\n",
" title = parser_title(title)\n",
" filename = title + '.p2p'\n",
" getfile_url(params) \n",
" else:\n",
" print \"Lista ya descargada (plot no vacío)\"\n",
" filename = params.get(\"plot\")\n",
" params[\"ext\"] = 'p2p'\n",
" params[\"plot\"]=filename\n",
" filename = filename + '.p2p'\n",
" plugintools.log(\"Lectura del archivo P2P\") \n",
"\n",
" plugintools.add_item(action=\"\" , title='[COLOR lightyellow][I][B]' + title + '[/B][/I][/COLOR]' , thumbnail=thumbnail , fanart=fanart , folder=False, isPlayable=False)\n",
"\n",
" # Abrimos el archivo P2P y calculamos número de líneas \n",
" file = open(playlists + filename, \"r\")\n",
" file.seek(0)\n",
" data = file.readline()\n",
" num_items = len(file.readlines())\n",
" print num_items\n",
" file.seek(0)\n",
" data = file.readline()\n",
" if data.startswith(\"default\") == True:\n",
" data = data.replace(\"default=\", \"\")\n",
" data = data.split(\",\")\n",
" thumbnail = data[0]\n",
" fanart = data[1]\n",
" plugintools.log(\"fanart= \"+fanart)\n",
" \n",
" # Leemos entradas\n",
" i = 0\n",
" file.seek(0)\n",
" data = file.readline()\n",
" data = data.strip()\n",
" while i <= num_items: \n",
" if data == \"\":\n",
" data = file.readline()\n",
" data = data.strip()\n",
" # plugintools.log(\"linea vacia= \"+data)\n",
" i = i + 1\n",
" #print i\n",
" continue\n",
" \n",
" elif data.startswith(\"default\") == True:\n",
" data = file.readline()\n",
" data = data.strip()\n",
" i = i + 1\n",
" #print i\n",
" continue\n",
" \n",
" elif data.startswith(\"#\") == True:\n",
" title = data.replace(\"#\", \"\")\n",
" plugintools.log(\"title comentario= \"+title)\n",
" plugintools.add_item(action=\"play\" , title = title , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True)\n",
" data = file.readline()\n",
" data = data.strip()\n",
" i = i + 1\n",
" continue\n",
" \n",
" else:\n",
" title = data\n",
" title = title.strip()\n",
" plugintools.log(\"title= \"+title)\n",
" data = file.readline()\n",
" data = data.strip()\n",
" i = i + 1\n",
" #print i\n",
" plugintools.log(\"linea URL= \"+data)\n",
" if data.startswith(\"sop\") == True:\n",
" print \"empieza el sopcast...\"\n",
" # plugin://plugin.video.p2p-streams/?url=sop://124.232.150.188:3912/11265&mode=2&name=Titulo+canal+Sopcast\n",
" title_fixed = parser_title(title)\n",
" title = title.replace(\" \" , \"+\")\n",
" url = 'plugin://plugin.video.p2p-streams/?url=' + data + '&mode=2&name=' + title_fixed\n",
" url = url.strip()\n",
" plugintools.add_item(action=\"play\" , title = title_fixed + ' [COLOR lightgreen][Sopcast][/COLOR]' , url = url, thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True)\n",
" data = file.readline()\n",
" data = data.strip()\n",
" i = i + 1\n",
" #print i\n",
" continue\n",
" \n",
" elif data.startswith(\"magnet\") == True:\n",
" print \"empieza el torrent...\"\n",
" url = urllib.quote_plus(data)\n",
" title_fixed = parser_title(title)\n",
" #plugin://plugin.video.pulsar/play?uri=<URL_ENCODED_LINK>\n",
" url = 'plugin://plugin.video.pulsar/play?uri=' + url\n",
" plugintools.add_item(action=\"play\" , title = title_fixed + ' [COLOR orangered][Torrent][/COLOR]' , url = url, thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True)\n",
" data = file.readline()\n",
" data = data.strip()\n",
" i = i + 1\n",
" continue\n",
"\n",
" else:\n",
" print \"empieza el acestream...\"\n",
" # plugin://plugin.video.p2p-streams/?url=a55f96dd386b7722380802b6afffc97ff98903ac&mode=1&name=Sky+Sports+title\n",
" title = parser_title(title) \n",
" print title \n",
" url = 'plugin://plugin.video.p2p-streams/?url=' + data + '&mode=1&name='\n",
" plugintools.add_item(action=\"play\" , title = title + ' [COLOR lightblue][Acestream][/COLOR]' , url = url, thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True)\n",
" data = file.readline()\n",
" data = data.strip()\n",
" i = i + 1\n",
" #print i\n",
" \n",
" \n",
"\n",
"\n",
"def contextMenu(params):\n",
" plugintools.log(\"[nec tv-0.1.0].contextMenu \" +repr(params))\n",
"\n",
" dialog = xbmcgui.Dialog()\n",
" plot = params.get(\"plot\")\n",
" canales = plot.split(\"/\")\n",
" len_canales = len(canales)\n",
" print len_canales\n",
" plugintools.log(\"canales= \"+repr(canales))\n",
"\n",
" if len_canales == 1: \n",
" tv_a = canales[0]\n",
" tv_a = parse_channel(tv_a)\n",
" search_channel(params)\n",
" selector = \"\" \n",
" else:\n",
" if len_canales == 2:\n",
" print \"len_2\"\n",
" tv_a = canales[0]\n",
" tv_a = parse_channel(tv_a)\n",
" tv_b = canales[1]\n",
" tv_b = parse_channel(tv_b)\n",
" selector = dialog.select('palcoTV', [tv_a, tv_b])\n",
" \n",
" elif len_canales == 3:\n",
" tv_a = canales[0]\n",
" tv_a = parse_channel(tv_a)\n",
" tv_b = canales[1]\n",
" tv_b = parse_channel(tv_b)\n",
" tv_c = canales[2]\n",
" tv_c = parse_channel(tv_c) \n",
" selector = dialog.select('latinototal', [tv_a, tv_b, tv_c])\n",
" \n",
" elif len_canales == 4:\n",
" tv_a = canales[0]\n",
" tv_a = parse_channel(tv_a)\n",
" tv_b = canales[1]\n",
" tv_b = parse_channel(tv_b)\n",
" tv_c = canales[2]\n",
" tv_c = parse_channel(tv_c)\n",
" tv_d = canales[3]\n",
" tv_d = parse_channel(tv_d) \n",
" selector = dialog.select('latinototal', [tv_a, tv_b, tv_c, tv_d])\n",
" \n",
" elif len_canales == 5:\n",
" tv_a = canales[0]\n",
" tv_a = parse_channel(tv_a)\n",
" tv_b = canales[1]\n",
" tv_b = parse_channel(tv_b)\n",
" tv_c = canales[2]\n",
" tv_c = parse_channel(tv_c)\n",
" tv_d = canales[3]\n",
" tv_d = parse_channel(tv_d) \n",
" tv_e = canales[4]\n",
" tv_e = parse_channel(tv_e)\n",
" selector = dialog.select('latinototal', [tv_a, tv_b, tv_c, tv_d, tv_e])\n",
" \n",
" elif len_canales == 6:\n",
" tv_a = canales[0]\n",
" tv_a = parse_channel(tv_a)\n",
" tv_b = canales[1]\n",
" tv_b = parse_channel(tv_b)\n",
" tv_c = canales[2]\n",
" tv_c = parse_channel(tv_c)\n",
" tv_d = canales[3]\n",
" tv_d = parse_channel(tv_d) \n",
" tv_e = canales[4]\n",
" tv_e = parse_channel(tv_e)\n",
" tv_f = canales[5]\n",
" tv_f = parse_channel(tv_f) \n",
" selector = dialog.select('latinototal', [tv_a , tv_b, tv_c, tv_d, tv_e, tv_f])\n",
" \n",
" elif len_canales == 7:\n",
" tv_a = canales[0]\n",
" tv_a = parse_channel(tv_a)\n",
" tv_b = canales[1]\n",
" tv_b = parse_channel(tv_b)\n",
" tv_c = canales[2]\n",
" tv_c = parse_channel(tv_c)\n",
" tv_d = canales[3]\n",
" tv_d = parse_channel(tv_d) \n",
" tv_e = canales[4]\n",
" tv_e = parse_channel(tv_e)\n",
" tv_f = canales[5]\n",
" tv_f = parse_channel(tv_f)\n",
" tv_g = canales[6]\n",
" tv_g = parse_channel(tv_g) \n",
" selector = dialog.select('latinototal', [tv_a , tv_b, tv_c, tv_d, tv_e, tv_f, tv_g])\n",
" \n",
" if selector == 0:\n",
" print selector\n",
" if tv_a.startswith(\"Gol\") == True:\n",
" tv_a = \"Gol\"\n",
" params[\"plot\"] = tv_a\n",
" plugintools.log(\"tv= \"+tv_a)\n",
" search_channel(params)\n",
" elif selector == 1:\n",
" print selector\n",
" if tv_b.startswith(\"Gol\") == True:\n",
" tv_b = \"Gol\"\n",
" params[\"plot\"] = tv_b\n",
" plugintools.log(\"tv= \"+tv_b)\n",
" search_channel(params) \n",
" elif selector == 2:\n",
" print selector \n",
" if tv_c.startswith(\"Gol\") == True:\n",
" tv_c = \"Gol\"\n",
" params[\"plot\"] = tv_c\n",
" plugintools.log(\"tv= \"+tv_c)\n",
" search_channel(params)\n",
" elif selector == 3:\n",
" print selector \n",
" if tv_d.startswith(\"Gol\") == True:\n",
" tv_d = \"Gol\"\n",
" params[\"plot\"] = tv_d\n",
" plugintools.log(\"tv= \"+tv_d)\n",
" search_channel(params)\n",
" elif selector == 4:\n",
" print selector \n",
" if tv_e.startswith(\"Gol\") == True:\n",
" tv_e = \"Gol\"\n",
" params[\"plot\"] = tv_e\n",
" plugintools.log(\"tv= \"+tv_e)\n",
" search_channel(params) \n",
" elif selector == 5:\n",
" print selector \n",
" if tv_f.startswith(\"Gol\") == True:\n",
" tv_f = \"Gol\"\n",
" params[\"plot\"] = tv_f\n",
" plugintools.log(\"tv= \"+tv_f)\n",
" search_channel(params)\n",
" elif selector == 6:\n",
" print selector \n",
" if tv_g.startswith(\"Gol\") == True:\n",
" tv_g = \"Gol\"\n",
" params[\"plot\"] = tv_g\n",
" plugintools.log(\"tv= \"+tv_g)\n",
" search_channel(params)\n",
" else:\n",
" pass\n",
"\n",
"\n",
"\n",
"def magnet_items(params):\n",
" plugintools.log(\"[nec tv-0.1.0].magnet_items\" +repr(params))\n",
" \n",
" plot = params.get(\"plot\")\n",
" \n",
"\n",
" title = params.get(\"title\")\n",
" fanart = \"\"\n",
" thumbnail = \"\"\n",
" \n",
" if plot != \"\":\n",
" filename = params.get(\"plot\")\n",
" params[\"ext\"] = 'p2p'\n",
" params[\"plot\"]=filename\n",
" title = plot + '.p2p'\n",
" else:\n",
" getfile_url(params)\n",
" title = params.get(\"title\")\n",
" title = title + '.p2p'\n",
"\n",
" # Abrimos el archivo P2P y calculamos número de líneas\n",
" file = open(playlists + title, \"r\")\n",
" file.seek(0)\n",
" data = file.readline()\n",
" num_items = len(file.readlines())\n",
"\n",
" # Leemos entradas\n",
" file.seek(0)\n",
" i = 0\n",
" while i <= num_items:\n",
" data = file.readline()\n",
" i = i + 1\n",
" #print i\n",
" if data != \"\":\n",
" data = data.strip()\n",
" title = data\n",
" data = file.readline()\n",
" i = i + 1\n",
" #print i\n",
" data = data.strip()\n",
" if data.startswith(\"magnet:\") == True:\n",
" # plugin://plugin.video.p2p-streams/?url=sop://124.232.150.188:3912/11265&mode=2&name=Titulo+canal+Sopcast\n",
" title_fixed = title.replace(\" \" , \"+\")\n",
" url_fixed = urllib.quote_plus(link)\n",
" url = url.strip()\n",
" #plugin://plugin.video.pulsar/play?uri=<URL_ENCODED_LINK>\n",
" url = 'plugin://plugin.video.pulsar/play?uri=' + url\n",
" plugintools.add_item(action=\"play\" , title = data + ' [COLOR orangered][Torrent][/COLOR]' , url = url, thumbnail = art + 'p2p.png' , fanart = art + 'fanart.jpg' , folder = False , isPlayable = True)\n",
" else:\n",
" data = file.readline()\n",
" i = i + 1\n",
" #print i\n",
" else:\n",
" data = file.readline()\n",
" i = i + 1\n",
" #print i\n",
" \n",
"\n",
"def parse_channel(txt):\n",
" plugintools.log(\"[nec tv-0.1.0].encode_string: \"+txt)\n",
"\n",
" txt = txt.rstrip()\n",
" txt = txt.lstrip() \n",
" return txt\n",
"\n",
"\n",
"def futbolenlatv_manana(params):\n",
" plugintools.log(\"[nec tv-0.1.0].futbolenlatv \" + repr(params))\n",
" \n",
" # Fecha de mañana\n",
" import datetime\n",
"\n",
" today = datetime.date.today()\n",
" manana = today + datetime.timedelta(days=1)\n",
" anno_manana = manana.year\n",
" mes_manana = manana.month\n",
" if mes_manana == 1:\n",
" mes_manana = \"enero\"\n",
" elif mes_manana == 2:\n",
" mes_manana = \"febrero\"\n",
" elif mes_manana == 3:\n",
" mes_manana = \"marzo\"\n",
" elif mes_manana == 4:\n",
" mes_manana = \"abril\"\n",
" elif mes_manana == 5:\n",
" mes_manana = \"mayo\"\n",
" elif mes_manana == 6:\n",
" mes_manana = \"junio\"\n",
" elif mes_manana == 7:\n",
" mes_manana = \"julio\"\n",
" elif mes_manana == 8:\n",
" mes_manana = \"agosto\"\n",
" elif mes_manana == 9:\n",
" mes_manana = \"septiembre\"\n",
" elif mes_manana == 10:\n",
" mes_manana = \"octubre\"\n",
" elif mes_manana == 11:\n",
" mes_manana = \"noviembre\"\n",
" elif mes_manana == 12:\n",
" mes_manana = \"diciembre\"\n",
" \n",
" \n",
" dia_manana = manana.day\n",
" plot = str(anno_manana) + \"-\" + str(mes_manana) + \"-\" + str(dia_manana)\n",
" print manana\n",
"\n",
" url = 'http://www.futbolenlatv.com/m/Fecha/' + plot + '/agenda/false/false'\n",
" plugintools.log(\"URL mañana= \"+url)\n",
" params[\"url\"] = url\n",
" params[\"plot\"] = plot\n",
" futbolenlatv(params)\n",
" \n",
"\n",
"\n",
"\n",
"\n",
"def parser_title(title):\n",
" plugintools.log(\"[nec tv-0.1.0].parser_title \" + title)\n",
"\n",
" cyd = title\n",
"\n",
" cyd = cyd.replace(\"[COLOR lightyellow]\", \"\")\n",
" cyd = cyd.replace(\"[COLOR green]\", \"\")\n",
" cyd = cyd.replace(\"[COLOR red]\", \"\")\n",
" cyd = cyd.replace(\"[COLOR blue]\", \"\") \n",
" cyd = cyd.replace(\"[COLOR royalblue]\", \"\")\n",
" cyd = cyd.replace(\"[COLOR white]\", \"\")\n",
" cyd = cyd.replace(\"[COLOR pink]\", \"\")\n",
" cyd = cyd.replace(\"[COLOR cyan]\", \"\")\n",
" cyd = cyd.replace(\"[COLOR steelblue]\", \"\")\n",
" cyd = cyd.replace(\"[COLOR forestgreen]\", \"\")\n",
" cyd = cyd.replace(\"[COLOR olive]\", \"\")\n",
" cyd = cyd.replace(\"[COLOR khaki]\", \"\")\n",
" cyd = cyd.replace(\"[COLOR lightsalmon]\", \"\")\n",
" cyd = cyd.replace(\"[COLOR orange]\", \"\")\n",
" cyd = cyd.replace(\"[COLOR lightgreen]\", \"\")\n",
" cyd = cyd.replace(\"[COLOR lightblue]\", \"\")\n",
" cyd = cyd.replace(\"[COLOR lightpink]\", \"\")\n",
" cyd = cyd.replace(\"[COLOR skyblue]\", \"\")\n",
" cyd = cyd.replace(\"[COLOR darkorange]\", \"\") \n",
" cyd = cyd.replace(\"[COLOR greenyellow]\", \"\")\n",
" cyd = cyd.replace(\"[COLOR yellow]\", \"\")\n",
" cyd = cyd.replace(\"[COLOR yellowgreen]\", \"\")\n",
" cyd = cyd.replace(\"[COLOR orangered]\", \"\")\n",
" cyd = cyd.replace(\"[COLOR grey]\", \"\")\n",
" cyd = cyd.replace(\"[COLOR gold]\", \"\")\n",
" cyd = cyd.replace(\"[COLOR=FF00FF00]\", \"\") \n",
" \n",
" cyd = cyd.replace(\"[/COLOR]\", \"\")\n",
" cyd = cyd.replace(\"[B]\", \"\")\n",
" cyd = cyd.replace(\"[/B]\", \"\")\n",
" cyd = cyd.replace(\"[I]\", \"\")\n",
" cyd = cyd.replace(\"[/I]\", \"\")\n",
" cyd = cyd.replace(\"[Auto]\", \"\")\n",
" cyd = cyd.replace(\"[Parser]\", \"\") \n",
" cyd = cyd.replace(\"[TinyURL]\", \"\")\n",
" cyd = cyd.replace(\"[Auto]\", \"\")\n",
"\n",
" # Control para evitar filenames con corchetes\n",
" cyd = cyd.replace(\" [Lista M3U]\", \"\")\n",
" cyd = cyd.replace(\" [Lista PLX]\", \"\")\n",
" cyd = cyd.replace(\" [Multilink]\", \"\")\n",
" cyd = cyd.replace(\" [COLOR orange][Lista [B]PLX[/B]][/COLOR]\", \"\")\n",
" cyd = cyd.replace(\" [COLOR orange][Lista [B]M3U[/B]][/COLOR]\", \"\")\n",
" cyd = cyd.replace(\" [COLOR lightyellow][B][Dailymotion[/B] playlist][/COLOR]\", \"\")\n",
" cyd = cyd.replace(\" [COLOR lightyellow][B][Dailymotion[/B] video][/COLOR]\", \"\")\n",
"\n",
" title = cyd\n",
" title = title.strip()\n",
" if title.endswith(\" .plx\") == True:\n",
" title = title.replace(\" .plx\", \".plx\")\n",
" \n",
" plugintools.log(\"title_parsed= \"+title)\n",
" return title\n",
"\n",
"\n",
"def json_items(params):\n",
" plugintools.log(\"[nec tv-0.1.0].json_items \"+repr(params))\n",
" data = plugintools.read(params.get(\"url\"))\n",
"\n",
" # Título y autor de la lista\n",
" match = plugintools.find_single_match(data, '\"name\"(.*?)\"url\"')\n",
" match = match.split(\",\")\n",
" namelist = match[0].strip()\n",
" author = match[1].strip()\n",
" namelist = namelist.replace('\"', \"\")\n",
" namelist = namelist.replace(\": \", \"\")\n",
" author = author.replace('\"author\":', \"\")\n",
" author = author.replace('\"', \"\")\n",
" fanart = params.get(\"extra\")\n",
" thumbnail = params.get(\"thumbnail\")\n",
" plugintools.log(\"title= \"+namelist)\n",
" plugintools.log(\"author= \"+author)\n",
" plugintools.add_item(action=\"\", title = '[B][COLOR lightyellow]' + namelist + '[/B][/COLOR]' , url = \"\" , thumbnail = thumbnail , fanart = fanart, isPlayable = False , folder = False)\n",
"\n",
" # Items de la lista\n",
" data = plugintools.find_single_match(data, '\"stations\"(.*?)]')\n",
" matches = plugintools.find_multiple_matches(data, '\"name\"(.*?)}')\n",
" for entry in matches:\n",
" if entry.find(\"isHost\") <= 0:\n",
" title = plugintools.find_single_match(entry,'(.*?)\\n')\n",
" title = title.replace(\": \", \"\")\n",
" title = title.replace('\"', \"\")\n",
" title = title.replace(\",\", \"\")\n",
" url = plugintools.find_single_match(entry,'\"url\":(.*?)\\n')\n",
" url = url.replace('\"', \"\")\n",
" url = url.strip()\n",
" params[\"url\"]=url\n",
" server_rtmp(params) \n",
" thumbnail = plugintools.find_single_match(entry,'\"image\":(.*?)\\n')\n",
" thumbnail = thumbnail.replace('\"', \"\")\n",
" thumbnail = thumbnail.replace(',', \"\")\n",
" thumbnail = thumbnail.strip() \n",
" plugintools.log(\"thumbnail= \"+thumbnail)\n",
" # Control por si en la lista no aparece el logo en cada entrada\n",
" if thumbnail == \"\" :\n",
" thumbnail = params.get(\"thumbnail\")\n",
" \n",
" plugintools.add_item( action=\"play\" , title = '[COLOR white] ' + title + '[COLOR green] ['+ params.get(\"server\") + '][/COLOR]' , url = url , thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )\n",
"\n",
" else:\n",
" title = plugintools.find_single_match(entry,'(.*?)\\n')\n",
" title = title.replace(\": \", \"\")\n",
" title = title.replace('\"', \"\")\n",
" title = title.replace(\",\", \"\")\n",
" url = plugintools.find_single_match(entry,'\"url\":(.*?)\\n')\n",
" url = url.replace('\"', \"\")\n",
" url = url.strip() \n",
"\n",
" if url.find(\"allmyvideos\")>= 0:\n",
" url = url.replace(\",\", \"\")\n",
" plugintools.log(\"url= \"+url)\n",
" fanart = params.get(\"extra\")\n",
" thumbnail = plugintools.find_single_match(entry,'\"image\":(.*?)\\n')\n",
" thumbnail = thumbnail.replace('\"', \"\")\n",
" thumbnail = thumbnail.replace(',', \"\") \n",
" thumbnail = thumbnail.strip() \n",
" plugintools.log(\"thumbnail= \"+thumbnail)\n",
" if thumbnail == \"\":\n",
" thumbnail = params.get(\"thumbnail\")\n",
" \n",
" plugintools.add_item( action=\"allmyvideos\" , title = title + ' [COLOR lightyellow][Allmyvideos][/COLOR]' , url = url , fanart = fanart , thumbnail = thumbnail , folder = False , isPlayable = True )\n",
" \n",
" elif url.find(\"streamcloud\") >= 0:\n",
" url = url.replace(\",\", \"\")\n",
" plugintools.log(\"url= \"+url)\n",
" fanart = params.get(\"extra\")\n",
" thumbnail = plugintools.find_single_match(entry,'\"image\":(.*?)\\n')\n",
" thumbnail = thumbnail.replace('\"', \"\")\n",
" thumbnail = thumbnail.replace(',', \"\") \n",
" thumbnail = thumbnail.strip()\n",
" plugintools.log(\"thumbnail= \"+thumbnail)\n",
" if thumbnail == \"\":\n",
" thumbnail = params.get(\"thumbnail\")\n",
" \n",
" plugintools.add_item( action=\"streamcloud\" , title = title + ' [COLOR lightskyblue][Streamcloud][/COLOR]' , url = url , fanart = fanart , thumbnail = thumbnail , folder = False , isPlayable = True )\n",
" \n",
" elif url.find(\"played.to\") >= 0:\n",
" url = url.replace(\",\", \"\")\n",
" plugintools.log(\"url= \"+url)\n",
" fanart = params.get(\"extra\")\n",
" thumbnail = plugintools.find_single_match(entry,'\"image\":(.*?)\\n')\n",
" thumbnail = thumbnail.replace('\"', \"\")\n",
" thumbnail = thumbnail.replace(',', \"\") \n",
" thumbnail = thumbnail.strip()\n",
" plugintools.log(\"thumbnail= \"+thumbnail)\n",
" if thumbnail == \"\":\n",
" thumbnail = params.get(\"thumbnail\")\n",
" plugintools.add_item( action=\"playedto\" , title = title + ' [COLOR lavender][Played.to][/COLOR]' , url = url , fanart = fanart , thumbnail = thumbnail , folder = False , isPlayable = True )\n",
" \n",
" elif url.find(\"vidspot\") >= 0:\n",
" url = url.replace(\",\", \"\")\n",
" plugintools.log(\"url= \"+url)\n",
" fanart = params.get(\"extra\")\n",
" thumbnail = plugintools.find_single_match(entry,'\"image\":(.*?)\\n')\n",
" thumbnail = thumbnail.replace('\"', \"\")\n",
" thumbnail = thumbnail.replace(',', \"\") \n",
" thumbnail = thumbnail.strip()\n",
" plugintools.log(\"thumbnail= \"+thumbnail)\n",
" if thumbnail == \"\":\n",
" thumbnail = params.get(\"thumbnail\")\n",
"\n",
" plugintools.add_item( action=\"vidspot\" , title = title + ' [COLOR palegreen][Vidspot][/COLOR]' , url = url , fanart = fanart , thumbnail = thumbnail , folder = False , isPlayable = True ) \n",
"\n",
" if url.find(\"vk.com\")>= 0:\n",
" url = url.replace(\",\", \"\")\n",
" plugintools.log(\"url= \"+url)\n",
" fanart = params.get(\"extra\")\n",
" thumbnail = plugintools.find_single_match(entry,'\"image\":(.*?)\\n')\n",
" thumbnail = thumbnail.replace('\"', \"\")\n",
" thumbnail = thumbnail.replace(',', \"\") \n",
" thumbnail = thumbnail.strip() \n",
" plugintools.log(\"thumbnail= \"+thumbnail)\n",
" if thumbnail == \"\":\n",
" thumbnail = params.get(\"thumbnail\")\n",
"\n",
" plugintools.add_item( action=\"vk\" , title = title + ' [COLOR royalblue][Vk][/COLOR]' , url = url , fanart = fanart , thumbnail = thumbnail , folder = False , isPlayable = True ) \n",
"\n",
" if url.find(\"nowvideo\")>= 0:\n",
" url = url.replace(\",\", \"\")\n",
" plugintools.log(\"url= \"+url)\n",
" fanart = params.get(\"extra\")\n",
" thumbnail = plugintools.find_single_match(entry,'\"image\":(.*?)\\n')\n",
" thumbnail = thumbnail.replace('\"', \"\")\n",
" thumbnail = thumbnail.replace(',', \"\") \n",
" thumbnail = thumbnail.strip() \n",
" plugintools.log(\"thumbnail= \"+thumbnail)\n",
" if thumbnail == \"\":\n",
" thumbnail = params.get(\"thumbnail\")\n",
" \n",
" plugintools.add_item( action=\"nowvideo\" , title = title + ' [COLOR palegreen][Nowvideo][/COLOR]' , url = url , fanart = fanart , thumbnail = thumbnail , folder = False , isPlayable = True ) \n",
"\n",
" if url.find(\"tumi\")>= 0:\n",
" url = url.replace(\",\", \"\")\n",
" plugintools.log(\"url= \"+url)\n",
" fanart = params.get(\"extra\")\n",
" thumbnail = plugintools.find_single_match(entry,'\"image\":(.*?)\\n')\n",
" thumbnail = thumbnail.replace('\"', \"\")\n",
" thumbnail = thumbnail.replace(',', \"\") \n",
" thumbnail = thumbnail.strip() \n",
" plugintools.log(\"thumbnail= \"+thumbnail)\n",
" if thumbnail == \"\":\n",
" thumbnail = params.get(\"thumbnail\") \n",
" \n",
" plugintools.add_item( action=\"tumi\" , title = title + ' [COLOR forestgreen][Tumi][/COLOR]' , url = url , fanart = fanart , thumbnail = thumbnail , folder = False , isPlayable = True )\n",
"\n",
" if url.find(\"streamin.to\")>= 0:\n",
" url = url.replace(\",\", \"\")\n",
" plugintools.log(\"url= \"+url)\n",
" fanart = params.get(\"extra\")\n",
" thumbnail = plugintools.find_single_match(entry,'\"image\":(.*?)\\n')\n",
" thumbnail = thumbnail.replace('\"', \"\")\n",
" thumbnail = thumbnail.replace(',', \"\") \n",
" thumbnail = thumbnail.strip() \n",
" plugintools.log(\"thumbnail= \"+thumbnail)\n",
" if thumbnail == \"\":\n",
" thumbnail = params.get(\"thumbnail\") \n",
" \n",
" plugintools.add_item( action=\"streaminto\" , title = title + ' [COLOR orange][streamin.to][/COLOR]' , url = url , fanart = fanart , thumbnail = thumbnail , folder = False , isPlayable = True ) \n",
" \n",
" else:\n",
" # Canales no reproducibles en XBMC (de momento)\n",
" params[\"url\"]=url\n",
" server_rtmp(params)\n",
" plugintools.add_item( action=\"play\" , title = '[COLOR red] ' + title + ' ['+ params.get(\"server\") + '][/COLOR]' , url = url , fanart = fanart , thumbnail = thumbnail , folder = False , isPlayable = True )\n",
" \n",
" if title == \"\":\n",
" plugintools.log(\"url= \"+url)\n",
" fanart = params.get(\"extra\")\n",
" thumbnail = plugintools.find_single_match(entry,'\"image\":(.*?)\\n')\n",
" thumbnail = thumbnail.replace('\"', \"\")\n",
" thumbnail = thumbnail.replace(',', \"\")\n",
" thumbnail = thumbnail.strip() \n",
" plugintools.log(\"thumbnail= \"+thumbnail)\n",
" if thumbnail == \"\":\n",
" thumbnail = params.get(\"thumbnail\")\n",
" \n",
" \n",
" \n",
"\n",
"\n",
"def youtube_playlists(params):\n",
" plugintools.log(\"[nec tv-0.1.0].youtube_playlists \"+repr(params))\n",
" \n",
" data = plugintools.read( params.get(\"url\") )\n",
" \n",
" pattern = \"\"\n",
" matches = plugintools.find_multiple_matches(data,\"<entry(.*?)</entry>\")\n",
" \n",
" for entry in matches:\n",
" plugintools.log(\"entry=\"+entry)\n",
" \n",
" title = plugintools.find_single_match(entry,\"<titl[^>]+>([^<]+)</title>\")\n",
" plot = plugintools.find_single_match(entry,\"<media\\:descriptio[^>]+>([^<]+)</media\\:description>\")\n",
" thumbnail = plugintools.find_single_match(entry,\"<media\\:thumbnail url='([^']+)'\") \n",
" url = plugintools.find_single_match(entry,\"<content type\\='application/atom\\+xml\\;type\\=feed' src='([^']+)'/>\")\n",
" fanart = art + 'youtube.png'\n",
" \n",
" plugintools.add_item( action=\"youtube_videos\" , title=title , plot=plot , url=url , thumbnail=thumbnail , fanart=fanart , folder=True )\n",
" plugintools.log(\"fanart= \"+fanart)\n",
" \n",
"\n",
"\n",
"# Muestra todos los vídeos del playlist de Youtube\n",
"def youtube_videos(params):\n",
" plugintools.log(\"[nec tv-0.1.0].youtube_videos \"+repr(params))\n",
" \n",
" # Fetch video list from YouTube feed\n",
" data = plugintools.read( params.get(\"url\") )\n",
" plugintools.log(\"data= \"+data)\n",
" \n",
" # Extract items from feed\n",
" pattern = \"\"\n",
" matches = plugintools.find_multiple_matches(data,\"<entry(.*?)</entry>\")\n",
" \n",
" for entry in matches:\n",
" plugintools.log(\"entry=\"+entry)\n",
" \n",
" # Not the better way to parse XML, but clean and easy\n",
" title = plugintools.find_single_match(entry,\"<titl[^>]+>([^<]+)</title>\")\n",
" title = title.replace(\"I Love Handball | \",\"\")\n",
" plot = plugintools.find_single_match(entry,\"<summa[^>]+>([^<]+)</summa\")\n",
" thumbnail = plugintools.find_single_match(entry,\"<media\\:thumbnail url='([^']+)'\")\n",
" fanart = art+'youtube.png'\n",
" video_id = plugintools.find_single_match(entry,\"http\\://www.youtube.com/watch\\?v\\=([0-9A-Za-z_-]{11})\")\n",
" url = \"plugin://plugin.video.youtube/?path=/root/video&action=play_video&videoid=\"+video_id\n",
"\n",
" # Appends a new item to the xbmc item list\n",
" plugintools.add_item( action=\"play\" , title=title , plot=plot , url=url , thumbnail=thumbnail , fanart=fanart , isPlayable=True, folder=False )\n",
"\n",
"\n",
"\n",
"def server_rtmp(params):\n",
" plugintools.log(\"[nec tv-0.1.0].server_rtmp \" + repr(params))\n",
"\n",
" url = params.get(\"url\")\n",
" plugintools.log(\"URL= \"+url)\n",
" \n",
" if url.find(\"iguide.to\") >= 0:\n",
" params[\"server\"] = 'iguide'\n",
" if url.find(\"timeout\") < 0:\n",
" url = url + ' timeout=15'\n",
" params[\"url\"]=url \n",
" return params\n",
"\n",
" if url.find(\"freetvcast.pw\") >= 0:\n",
" params[\"server\"] = 'freetvcast'\n",
" return params \n",
"\n",
" elif url.find(\"9stream\") >= 0:\n",
" params[\"server\"] = '9stream'\n",
" if url.find(\"timeout\") < 0:\n",
" url = url + ' timeout=15'\n",
" params[\"url\"]=url \n",
" return params\n",
"\n",
" elif url.find(\"freebroadcast\") >= 0:\n",
" params[\"server\"] = 'freebroadcast'\n",
" if url.find(\"timeout\") < 0:\n",
" url = url + ' timeout=15'\n",
" params[\"url\"]=url \n",
" return params \n",
"\n",
" elif url.find(\"goodgame.ru\") >= 0:\n",
" params[\"server\"] = 'goodgame.ru'\n",
" if url.find(\"timeout\") < 0:\n",
" url = url + ' timeout=15'\n",
" params[\"url\"]=url \n",
" return params\n",
"\n",
" elif url.find(\"hdcast\") >= 0:\n",
" params[\"server\"] = 'hdcast'\n",
" if url.find(\"timeout\") < 0:\n",
" url = url + ' timeout=15'\n",
" params[\"url\"]=url \n",
" return params \n",
"\n",
" elif url.find(\"sharecast\") >= 0:\n",
" params[\"server\"] = 'sharecast'\n",
" if url.find(\"timeout\") < 0:\n",
" url = url + ' timeout=15'\n",
" params[\"url\"]=url \n",
" return params\n",
"\n",
" elif url.find(\"cast247\") >= 0:\n",
" params[\"server\"] = 'cast247'\n",
" if url.find(\"timeout\") < 0:\n",
" url = url + ' timeout=15'\n",
" params[\"url\"]=url \n",
" return params\n",
"\n",
" elif url.find(\"castalba\") >= 0:\n",
" params[\"server\"] = 'castalba'\n",
" if url.find(\"timeout\") < 0:\n",
" url = url + ' timeout=15'\n",
" params[\"url\"]=url \n",
" return params \n",
"\n",
" elif url.find(\"direct2watch\") >= 0:\n",
" params[\"server\"] = 'direct2watch'\n",
" if url.find(\"timeout\") < 0:\n",
" url = url + ' timeout=15'\n",
" params[\"url\"]=url \n",
" return params\n",
" \n",
" elif url.find(\"vaughnlive\") >= 0:\n",
" params[\"server\"] = 'vaughnlive'\n",
" if url.find(\"timeout\") < 0:\n",
" url = url + ' timeout=15'\n",
" params[\"url\"]=url \n",
" return params\n",
"\n",
" elif url.find(\"streamingfreetv\") >= 0:\n",
" params[\"server\"] = 'streamingfreetv'\n",
" return params \n",
"\n",
" elif url.find(\"totalplay\") >= 0:\n",
" params[\"server\"] = 'totalplay'\n",
" if url.find(\"timeout\") < 0:\n",
" url = url + ' timeout=15'\n",
" params[\"url\"]=url \n",
" return params \n",
"\n",
" elif url.find(\"shidurlive\") >= 0:\n",
" params[\"server\"] = 'shidurlive'\n",
" return params \n",
" \n",
" elif url.find(\"everyon\") >= 0:\n",
" params[\"server\"] = 'everyon'\n",
" if url.find(\"timeout\") < 0:\n",
" url = url + ' timeout=15'\n",
" params[\"url\"]=url \n",
" return params\n",
"\n",
" elif url.find(\"iviplanet\") >= 0:\n",
" params[\"server\"] = 'iviplanet'\n",
" if url.find(\"timeout\") < 0:\n",
" url = url + ' timeout=15'\n",
" params[\"url\"]=url \n",
" return params \n",
"\n",
" elif url.find(\"cxnlive\") >= 0:\n",
" params[\"server\"] = 'cxnlive'\n",
" if url.find(\"timeout\") < 0:\n",
" url = url + ' timeout=15'\n",
" params[\"url\"]=url \n",
" return params \n",
"\n",
" elif url.find(\"ucaster\") >= 0:\n",
" params[\"server\"] = 'ucaster'\n",
" if url.find(\"timeout\") < 0:\n",
" url = url + ' timeout=15'\n",
" params[\"url\"]=url \n",
" return params\n",
"\n",
" elif url.find(\"mediapro\") >= 0:\n",
" params[\"server\"] = 'mediapro'\n",
" if url.find(\"timeout\") < 0:\n",
" url = url + ' timeout=15'\n",
" params[\"url\"]=url \n",
" return params\n",
"\n",
" elif url.find(\"veemi\") >= 0:\n",
" params[\"server\"] = 'veemi'\n",
" if url.find(\"timeout\") < 0:\n",
" url = url + ' timeout=15'\n",
" params[\"url\"]=url \n",
" return params\n",
"\n",
" elif url.find(\"yukons.net\") >= 0:\n",
" params[\"server\"] = 'yukons.net'\n",
" if url.find(\"timeout\") < 0:\n",
" url = url + ' timeout=15'\n",
" params[\"url\"]=url \n",
" return params \n",
"\n",
" elif url.find(\"janjua\") >= 0:\n",
" params[\"server\"] = 'janjua'\n",
" if url.find(\"timeout\") < 0:\n",
" url = url + ' timeout=15'\n",
" params[\"url\"]=url \n",
" return params\n",
"\n",
" elif url.find(\"mips\") >= 0:\n",
" params[\"server\"] = 'mips'\n",
" if url.find(\"timeout\") < 0:\n",
" url = url + ' timeout=15'\n",
" params[\"url\"]=url \n",
" return params\n",
"\n",
" elif url.find(\"zecast\") >= 0:\n",
" params[\"server\"] = 'zecast'\n",
" if url.find(\"timeout\") < 0:\n",
" url = url + ' timeout=15'\n",
" params[\"url\"]=url \n",
" return params\n",
"\n",
" elif url.find(\"vertvdirecto\") >= 0:\n",
" params[\"server\"] = 'vertvdirecto'\n",
" if url.find(\"timeout\") < 0:\n",
" url = url + ' timeout=15'\n",
" params[\"url\"]=url \n",
" return params\n",
"\n",
" elif url.find(\"filotv\") >= 0:\n",
" params[\"server\"] = 'filotv'\n",
" if url.find(\"timeout\") < 0:\n",
" url = url + ' timeout=15'\n",
" params[\"url\"]=url \n",
" return params\n",
"\n",
" elif url.find(\"dinozap\") >= 0:\n",
" params[\"server\"] = 'dinozap'\n",
" if url.find(\"timeout\") < 0:\n",
" url = url + ' timeout=15'\n",
" params[\"url\"]=url \n",
" return params \n",
"\n",
" elif url.find(\"ezcast\") >= 0:\n",
" params[\"server\"] = 'ezcast'\n",
" if url.find(\"timeout\") < 0:\n",
" url = url + ' timeout=15'\n",
" params[\"url\"]=url\n",
" return params\n",
"\n",
" elif url.find(\"flashstreaming\") >= 0:\n",
" params[\"server\"] = 'flashstreaming'\n",
" if url.find(\"timeout\") < 0:\n",
" url = url + ' timeout=15'\n",
" params[\"url\"]=url \n",
" return params\n",
"\n",
" elif url.find(\"shidurlive\") >= 0:\n",
" params[\"server\"] = 'shidurlive'\n",
" if url.find(\"timeout\") < 0:\n",
" url = url + ' timeout=15'\n",
" params[\"url\"]=url \n",
" return params\n",
"\n",
" elif url.find(\"multistream\") >= 0:\n",
" params[\"server\"] = 'multistream'\n",
" if url.find(\"timeout\") < 0:\n",
" url = url + ' timeout=15'\n",
" params[\"url\"]=url \n",
" return params\n",
"\n",
" elif url.find(\"playfooty\") >= 0:\n",
" params[\"server\"] = 'playfooty'\n",
" if url.find(\"timeout\") < 0:\n",
" url = url + ' timeout=15'\n",
" params[\"url\"]=url \n",
" return params\n",
"\n",
" elif url.find(\"flashtv\") >= 0:\n",
" params[\"server\"] = 'flashtv'\n",
" if url.find(\"timeout\") < 0:\n",
" url = url + ' timeout=15'\n",
" params[\"url\"]=url \n",
" return params\n",
"\n",
" elif url.find(\"04stream\") >= 0:\n",
" params[\"server\"] = '04stream'\n",
" if url.find(\"timeout\") < 0:\n",
" url = url + ' timeout=15'\n",
" params[\"url\"]=url \n",
" return params\n",
"\n",
" elif url.find(\"vercosas\") >= 0:\n",
" params[\"server\"] = 'vercosasgratis'\n",
" if url.find(\"timeout\") < 0:\n",
" url = url + ' timeout=15'\n",
" params[\"url\"]=url \n",
" return params\n",
"\n",
" elif url.find(\"dcast\") >= 0:\n",
" params[\"server\"] = 'dcast'\n",
" if url.find(\"timeout\") < 0:\n",
" url = url + ' timeout=15'\n",
" params[\"url\"]=url \n",
" return params\n",
"\n",
" elif url.find(\"playfooty\") >= 0:\n",
" params[\"server\"] = 'playfooty'\n",
" if url.find(\"timeout\") < 0:\n",
" url = url + ' timeout=15'\n",
" params[\"url\"]=url \n",
" return params\n",
"\n",
" elif url.find(\"pvtserverz\") >= 0:\n",
" params[\"server\"] = 'pvtserverz'\n",
" if url.find(\"timeout\") < 0:\n",
" url = url + ' timeout=15'\n",
" params[\"url\"]=url \n",
" return params\n",
" \n",
" else:\n",
" params[\"server\"] = 'undefined'\n",
" if url.find(\"timeout\") < 0:\n",
" url = url + ' timeout=15'\n",
" params[\"url\"]=url\n",
" return params\n",
"\n",
"def launch_rtmp(params):\n",
" plugintools.log(\"[nec tv-0.1.0].launch_rtmp \" + repr(params))\n",
"\n",
" url = params.get(\"url\")\n",
" title = params.get(\"title\")\n",
" title = title.replace(\"[/COLOR]\", \"\")\n",
" title = title.strip()\n",
" plugintools.log(\"Vamos a buscar en el título: \"+title)\n",
"\n",
" if title.endswith(\"[9stream]\") == True:\n",
" params[\"server\"] = '9stream'\n",
" ninestreams(params)\n",
" \n",
" elif title.endswith(\"[iguide]\") == True:\n",
" plugintools.log(\"es un iguide!\")\n",
" params[\"server\"] = 'iguide'\n",
" # DEBUG: Keyboard: scancode: 0x01, sym: 0x001b, unicode: 0x001b, modifier: 0x0\n",
" #pDialog = xbmcgui.DialogProgress()\n",
" #msg = pDialog.create('latinototal', 'Intentando reproducir RTMP...')\n",
" plugintools.play_resolved_url(url)\n",
" #xbmc.sleep(15000)\n",
" #plugintools.stop_resolved_url(url)\n",
"\n",
" elif title.endswith(\"[streamingfreetv]\") == True:\n",
" print 'streamingfreetv'\n",
" params[\"server\"] = 'streamingfreetv'\n",
" streamingfreetv(params) \n",
" \n",
"\n",
" elif title.endswith(\"[vercosasgratis]\") == True:\n",
" print 'vercosasgratis'\n",
" params[\"server\"] = 'vercosasgratis'\n",
" vercosas(params)\n",
"\n",
" elif title.endswith(\"[freebroadcast]\") == True:\n",
" print 'freebroadcast'\n",
" params[\"server\"] = 'freebroadcast'\n",
" freebroadcast(params) \n",
"\n",
" elif title.endswith(\"[ucaster]\") == True:\n",
" params[\"server\"] = 'ucaster'\n",
" plugintools.play_resolved_url(url)\n",
"\n",
" elif title.endswith(\"[direct2watch]\") == True:\n",
" params[\"server\"] = 'direct2watch'\n",
" directwatch(params)\n",
"\n",
" elif title.endswith(\"[shidurlive]\") == True:\n",
" params[\"server\"] = 'shidurlive'\n",
" shidurlive(params)\n",
"\n",
" elif title.endswith(\"[cast247]\") == True:\n",
" params[\"server\"] = 'cast247'\n",
" castdos(params)\n",
"\n",
" elif url.find(\"hdcast\") >= 0:\n",
" params[\"server\"] = 'hdcast'\n",
" plugintools.play_resolved_url(url)\n",
"\n",
" elif url.find(\"janjua\") >= 0:\n",
" params[\"server\"] = 'janjua'\n",
" plugintools.play_resolved_url(url)\n",
"\n",
" elif url.find(\"mips\") >= 0:\n",
" params[\"server\"] = 'mips'\n",
" plugintools.play_resolved_url(url)\n",
"\n",
" elif url.find(\"zecast\") >= 0:\n",
" params[\"server\"] = 'zecast'\n",
" plugintools.play_resolved_url(url)\n",
"\n",
" elif url.find(\"filotv\") >= 0:\n",
" params[\"server\"] = 'filotv'\n",
" print \"filotv\"\n",
" plugintools.play_resolved_url(url)\n",
"\n",
" elif url.find(\"ezcast\") >= 0:\n",
" params[\"server\"] = 'ezcast'\n",
" plugintools.play_resolved_url(url)\n",
"\n",
" elif url.find(\"flashstreaming\") >= 0:\n",
" params[\"server\"] = 'flashstreaming'\n",
" plugintools.play_resolved_url(url)\n",
"\n",
" elif url.find(\"shidurlive\") >= 0:\n",
" params[\"server\"] = 'shidurlive'\n",
" plugintools.play_resolved_url(url)\n",
"\n",
" elif url.find(\"multistream\") >= 0:\n",
" params[\"server\"] = 'multistream'\n",
" print \"multistream\"\n",
" plugintools.play_resolved_url(url)\n",
"\n",
" elif url.find(\"playfooty\") >= 0:\n",
" params[\"server\"] = 'playfooty'\n",
" plugintools.play_resolved_url(url)\n",
"\n",
" elif url.find(\"flashtv\") >= 0:\n",
" params[\"server\"] = 'flashtv'\n",
" print \"flashtv\"\n",
" plugintools.play_resolved_url(url)\n",
"\n",
" elif url.find(\"freetvcast\") >= 0:\n",
" params[\"server\"] = 'freetvcast'\n",
" print \"freetvcast\"\n",
" freetvcast(params)\n",
"\n",
" elif url.find(\"04stream\") >= 0:\n",
" params[\"server\"] = '04stream'\n",
" plugintools.play_resolved_url(url) \n",
"\n",
" elif url.find(\"sharecast\") >= 0:\n",
" params[\"server\"] = 'sharecast'\n",
" plugintools.play_resolved_url(url) \n",
"\n",
" elif url.find(\"vaughnlive\") >= 0:\n",
" params[\"server\"] = 'vaughnlive'\n",
" resolve_vaughnlive(params)\n",
"\n",
" elif url.find(\"goodcast\") >= 0:\n",
" params[\"server\"] = 'goodcast'\n",
" plugintools.play_resolved_url(url) \n",
"\n",
" elif url.find(\"dcast.tv\") >= 0:\n",
" params[\"server\"] = 'dcast.tv'\n",
" plugintools.play_resolved_url(url) \n",
"\n",
" elif url.find(\"castalba\") >= 0:\n",
" params[\"server\"] = 'castalba'\n",
" castalba(params)\n",
"\n",
" elif url.find(\"tutelehd.com\") >= 0:\n",
" params[\"server\"] = 'tutelehd.com'\n",
" plugintools.play_resolved_url(url) \n",
"\n",
" elif url.find(\"flexstream\") >= 0:\n",
" params[\"server\"] = 'flexstream'\n",
" plugintools.play_resolved_url(url) \n",
"\n",
" elif url.find(\"xxcast\") >= 0:\n",
" params[\"server\"] = 'xxcast'\n",
" plugintools.play_resolved_url(url) \n",
"\n",
" elif url.find(\"vipi.tv\") >= 0:\n",
" params[\"server\"] = 'vipi.tv'\n",
" plugintools.play_resolved_url(url) \n",
"\n",
" elif url.find(\"watchjsc\") >= 0:\n",
" params[\"server\"] = 'watchjsc'\n",
" plugintools.play_resolved_url(url) \n",
"\n",
" elif url.find(\"zenex.tv\") >= 0:\n",
" params[\"server\"] = 'zenex.tv'\n",
" plugintools.play_resolved_url(url) \n",
"\n",
" elif url.find(\"castto\") >= 0:\n",
" params[\"server\"] = 'castto'\n",
" plugintools.play_resolved_url(url) \n",
"\n",
" elif url.find(\"tvzune\") >= 0:\n",
" params[\"server\"] = 'tvzune'\n",
" plugintools.play_resolved_url(url) \n",
"\n",
" elif url.find(\"flashcast\") >= 0:\n",
" params[\"server\"] = 'flashcast'\n",
" plugintools.play_resolved_url(url) \n",
"\n",
" elif url.find(\"ilive.to\") >= 0:\n",
" params[\"server\"] = 'ilive.to'\n",
" print \"iliveto\"\n",
" plugintools.play_resolved_url(url) \n",
"\n",
" elif url.find(\"Direct2Watch\") >= 0:\n",
" params[\"server\"] = 'Direct2Watch'\n",
" print \"direct2watch\"\n",
" plugintools.play_resolved_url(url)\n",
" \n",
" else:\n",
" print \"No ha encontrado launcher\"\n",
" params[\"server\"] = 'undefined'\n",
" print \"ninguno\"\n",
" plugintools.play_resolved_url(url) \n",
" \n",
" \n",
"\n",
"def peliseries(params):\n",
" plugintools.log(\"[nec tv-0.1.0].peliseries \" +repr(params))\n",
"\n",
" # Abrimos archivo remoto\n",
" url = params.get(\"url\")\n",
" filepelis = urllib2.urlopen(url)\n",
"\n",
" # Creamos archivo local para pegar las entradas\n",
" plot = params.get(\"plot\")\n",
" plot = parser_title(plot)\n",
" if plot == \"\":\n",
" title = params.get(\"title\")\n",
" title = parser_title(title)\n",
" filename = title + \".m3u\"\n",
" fh = open(playlists + filename, \"wb\")\n",
" else:\n",
" filename = params.get(\"plot\") + \".m3u\"\n",
" fh = open(playlists + filename, \"wb\")\n",
" \n",
" plugintools.log(\"filename= \"+filename)\n",
" url = params.get(\"url\")\n",
" plugintools.log(\"url= \"+url)\n",
"\n",
"\n",
" #open the file for writing\n",
" fw = open(playlists + filename, \"wb\")\n",
"\n",
" #open the file for writing\n",
" fh = open(playlists + 'filepelis.m3u', \"wb\")\n",
" fh.write(filepelis.read())\n",
"\n",
" fh.close()\n",
"\n",
" fw = open(playlists + filename, \"wb\")\n",
" fr = open(playlists + 'filepelis.m3u', \"r\")\n",
" fr.seek(0)\n",
" num_items = len(fr.readlines())\n",
" print num_items\n",
" fw.seek(0)\n",
" fr.seek(0)\n",
" data = fr.readline()\n",
" fanart = params.get(\"extra\")\n",
" thumbnail = params.get(\"thumbnail\")\n",
" fw.write('#EXTM3U:\"background\"='+fanart+',\"thumbnail\"='+thumbnail)\n",
" fw.write(\"#EXTINF:-1,[COLOR lightyellow][I]playlists / \" + filename + '[/I][/COLOR]' + '\\n\\n')\n",
" i = 0\n",
"\n",
" while i <= num_items:\n",
"\n",
" if data == \"\":\n",
" data = fr.readline()\n",
" data = data.strip()\n",
" plugintools.log(\"data= \" +data)\n",
" i = i + 1\n",
" print i\n",
" continue\n",
"\n",
" elif data.find(\"http\") >= 0 :\n",
" data = data.split(\"http\")\n",
" chapter = data[0]\n",
" chapter = chapter.strip()\n",
" url = \"http\" + data[1]\n",
" url = url.strip()\n",
" plugintools.log(\"url= \"+url)\n",
" fw.write(\"\\n#EXTINF:-1,\" + chapter + '\\n')\n",
" fw.write(url + '\\n\\n')\n",
" data = fr.readline()\n",
" plugintools.log(\"data= \" +data)\n",
" i = i + 1\n",
" print i\n",
" continue\n",
" \n",
" else:\n",
" data = fr.readline()\n",
" data = data.strip()\n",
" plugintools.log(\"data= \"+data)\n",
" i = i + 1\n",
" print i\n",
" continue \n",
"\n",
" fw.close()\n",
" fr.close()\n",
" params[\"ext\"]='m3u'\n",
" filename = filename.replace(\".m3u\", \"\")\n",
" params[\"plot\"]=filename\n",
" params[\"title\"]=filename\n",
"\n",
" # Capturamos de nuevo thumbnail y fanart\n",
" \n",
" os.remove(playlists + 'filepelis.m3u')\n",
" simpletv_items(params)\n",
" \n",
"\n",
"def tinyurl(params):\n",
" plugintools.log(\"[nec tv-0.1.0].tinyurl \"+repr(params))\n",
"\n",
" url = params.get(\"url\")\n",
" url_getlink = 'http://www.getlinkinfo.com/info?link=' +url\n",
"\n",
" plugintools.log(\"url_fixed= \"+url_getlink)\n",
"\n",
" request_headers=[]\n",
" request_headers.append([\"User-Agent\",\"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_3) AppleWebKit/537.31 (KHTML, like Gecko) Chrome/26.0.1410.65 Safari/537.31\"])\n",
" body,response_headers = plugintools.read_body_and_headers(url_getlink, headers=request_headers)\n",
" plugintools.log(\"data= \"+body)\n",
"\n",
" r = plugintools.find_multiple_matches(body, '<dt class=\"link-effective-url\">Effective URL</dt>(.*?)</a></dd>')\n",
" xbmc.executebuiltin(\"Notification(%s,%s,%i,%s)\" % ('PalcoTV', \"Redireccionando enlace...\", 3 , art+'icon.png'))\n",
" \n",
" for entry in r:\n",
" entry = entry.replace(\"<dd><a href=\", \"\")\n",
" entry = entry.replace('rel=\"nofollow\">', \"\")\n",
" entry = entry.split('\"')\n",
" entry = entry[1]\n",
" entry = entry.strip()\n",
" plugintools.log(\"vamos= \"+entry)\n",
" \n",
" if entry.startswith(\"http\"):\n",
" plugintools.play_resolved_url(entry)\n",
"\n",
"\n",
"\n",
"# Conexión con el servicio longURL.org para obtener URL original \n",
"def longurl(params):\n",
" plugintools.log(\"[nec tv-0.1.0].longURL \"+repr(params))\n",
"\n",
" url = params.get(\"url\")\n",
" url_getlink = 'http://api.longurl.org/v2/expand?url=' +url\n",
"\n",
" plugintools.log(\"url_fixed= \"+url_getlink)\n",
"\n",
" try:\n",
" request_headers=[]\n",
" request_headers.append([\"User-Agent\",\"Application-Name/3.7\"])\n",
" body,response_headers = plugintools.read_body_and_headers(url_getlink, headers=request_headers)\n",
" plugintools.log(\"data= \"+body)\n",
"\n",
" # <long-url><![CDATA[http://85.25.43.51:8080/DE_skycomedy?u=euorocard:p=besplatna]]></long-url>\n",
" # xbmc.executebuiltin(\"Notification(%s,%s,%i,%s)\" % ('nec tv', \"Redireccionando enlace...\", 3 , art+'icon.png'))\n",
" longurl = plugintools.find_single_match(body, '<long-url>(.*?)</long-url>')\n",
" longurl = longurl.replace(\"<![CDATA[\", \"\")\n",
" longurl = longurl.replace(\"]]>\", \"\")\n",
" plugintools.log(\"longURL= \"+longurl)\n",
" if longurl.startswith(\"http\"):\n",
" plugintools.play_resolved_url(longurl)\n",
"\n",
" except:\n",
" play(params)\n",
"\n",
"\n",
"\n",
"def opentxt(self):\n",
"\n",
" texto = xbmcgui.ControlTextBox (100, 250, 300, 300, textColor='0xFFFFFFFF')\n",
" texto.setText('log.txt')\n",
"\n",
" texto.setVisible(window)\n",
" \n",
" \n",
" \n",
"def encode_url(url):\n",
" url_fixed= urlencode(url)\n",
" print url_fixed\n",
"\n",
"\n",
"\n",
"def seriecatcher(params):\n",
" plugintools.log(\"[nec tv-0.1.0].seriecatcher \"+repr(params))\n",
" \n",
" url = params.get(\"url\")\n",
" fanart = params.get(\"extra\")\n",
" data = plugintools.read(url)\n",
" temp = plugintools.find_multiple_matches(data, '<i class=\\\"glyphicon\\\"></i>(.*?)</a>')\n",
" SelectTemp(params, temp)\n",
"\n",
"\n",
"def GetSerieChapters(params):\n",
" plugintools.log(\"[nec tv-0.1.0].GetSerieChapters \"+repr(params))\n",
"\n",
" season = params.get(\"season\")\n",
" data = plugintools.read(params.get(\"url\"))\n",
" \n",
" season = plugintools.find_multiple_matches(data, season + '(.*?)</table>')\n",
" season = season[0]\n",
" \n",
" for entry in season:\n",
" url_cap = plugintools.find_multiple_matches(season, '<a href=\\\"/capitulo(.*?)\\\" class=\\\"color4\\\"')\n",
" title = plugintools.find_multiple_matches(season, 'class=\\\"color4\\\">(.*?)</a>')\n",
"\n",
" num_items = len(url_cap) \n",
" i = 1\n",
" \n",
" while i <= num_items:\n",
" url_cap_fixed = 'http://seriesadicto.com/capitulo/' + url_cap[i-1]\n",
" title_fixed = title[i-1]\n",
" fanart = params.get(\"extra\")\n",
" GetSerieLinks(fanart , url_cap_fixed, i, title_fixed)\n",
" i = i + 1\n",
" \n",
" \n",
" \n",
"def GetSerieLinks(fanart , url_cap_fixed, i, title_fixed):\n",
" plugintools.log(\"[nec tv-0.1.0].GetSerieLinks\")\n",
" \n",
" data = plugintools.read(url_cap_fixed)\n",
" amv = plugintools.find_multiple_matches(data, 'allmyvideos.net/(.*?)\"')\n",
" strcld = plugintools.find_multiple_matches(data, 'streamcloud.eu/(.*?)\"')\n",
" vdspt = plugintools.find_multiple_matches(data, 'vidspot.net/(.*?)\"')\n",
" plydt = plugintools.find_multiple_matches(data, 'played.to/(.*?)\"')\n",
" thumbnail = plugintools.find_single_match(data, 'src=\\\"/img/series/(.*?)\"')\n",
" thumbnail_fixed = 'http://seriesadicto.com/img/series/' + thumbnail\n",
" \n",
" for entry in amv:\n",
" amv_url = 'http://allmyvideos.net/' + entry \n",
" plugintools.add_item(action=\"play\" , title = title_fixed + '[COLOR lightyellow] [Allmyvideos][/COLOR]', url = amv_url , thumbnail = thumbnail_fixed , fanart = fanart , folder = False , isPlayable = True)\n",
"\n",
" for entry in strcld:\n",
" strcld_url = 'http://streamcloud.eu/' + entry\n",
" plugintools.add_item(action=\"play\" , title = title_fixed + '[COLOR lightskyblue] [Streamcloud][/COLOR]', url = strcld_url , thumbnail = thumbnail_fixed , fanart = fanart , folder = False , isPlayable = True)\n",
"\n",
" for entry in vdspt:\n",
" vdspt_url = 'http://vidspot.net/' + entry\n",
" plugintools.add_item(action=\"play\" , title = title_fixed + '[COLOR palegreen] [Vidspot][/COLOR]', url = vdspt_url , thumbnail = thumbnail_fixed , fanart = fanart , folder = False , isPlayable = True)\n",
"\n",
" for entry in plydt:\n",
" plydt_url = 'http://played.to/' + entry\n",
" plugintools.add_item(action=\"play\" , title = title_fixed + '[COLOR lavender] [Played.to][/COLOR]', url = plydt_url , thumbnail = thumbnail_fixed , fanart = fanart , folder = False , isPlayable = True)\n",
"\n",
" for entry in plydt:\n",
" plydt_url = 'vk.com' + entry\n",
" plugintools.add_item(action=\"play\" , title = title_fixed + '[COLOR royalblue] [Vk][/COLOR]', url = plydt_url , thumbnail = thumbnail_fixed , fanart = fanart , folder = False , isPlayable = True)\n",
"\n",
" for entry in plydt:\n",
" plydt_url = 'nowvideo.sx' + entry\n",
" plugintools.add_item(action=\"play\" , title = title_fixed + '[COLOR red] [Nowvideo][/COLOR]', url = plydt_url , thumbnail = thumbnail_fixed , fanart = fanart , folder = False , isPlayable = True) \n",
"\n",
" for entry in plydt:\n",
" plydt_url = 'http://tumi.tv/' + entry\n",
" plugintools.add_item(action=\"play\" , title = title_fixed + '[COLOR forestgreen] [Tumi][/COLOR]', url = plydt_url , thumbnail = thumbnail_fixed , fanart = fanart , folder = False , isPlayable = True)\n",
" \n",
" \n",
"\n",
"def SelectTemp(params, temp):\n",
" plugintools.log(\"[nec tv-0.1.0].SelectTemp \"+repr(params))\n",
"\n",
" seasons = len(temp)\n",
" \n",
" dialog = xbmcgui.Dialog()\n",
" \n",
" if seasons == 1:\n",
" selector = dialog.select('latinototal', [temp[0]])\n",
" \n",
" if seasons == 2:\n",
" selector = dialog.select('latinototal', [temp[0], temp[1]])\n",
" \n",
" if seasons == 3:\n",
" selector = dialog.select('latinototal', [temp[0],temp[1], temp[2]])\n",
" \n",
" if seasons == 4:\n",
" selector = dialog.select('latinototal', [temp[0], temp[1],temp[2], temp[3]])\n",
" \n",
" if seasons == 5:\n",
" selector = dialog.select('latinototal', [temp[0], temp[1],temp[2], temp[3], temp[4]])\n",
" \n",
" if seasons == 6:\n",
" selector = dialog.select('latinototal', [temp[0], temp[1],temp[2], temp[3], temp[4], temp[5]])\n",
" \n",
" if seasons == 7:\n",
" selector = dialog.select('latinototal', [temp[0], temp[1],temp[2], temp[3], temp[4], temp[5], temp[6]])\n",
" \n",
" if seasons == 8:\n",
" selector = dialog.select('latinototal', [temp[0], temp[1],temp[2], temp[3], temp[4], temp[5], temp[6], temp[7]])\n",
" \n",
" if seasons == 9:\n",
" selector = dialog.select('latinototal', [temp[0], temp[1],temp[2], temp[3], temp[4], temp[5], temp[6], temp[7], temp[8]])\n",
" \n",
" if seasons == 10:\n",
" selector = dialog.select('latinototal', [temp[0], temp[1],temp[2], temp[3], temp[4], temp[5], temp[6], temp[7], temp[8], temp[9]]) \n",
"\n",
" i = 0\n",
" while i<= seasons :\n",
" if selector == i:\n",
" params[\"season\"] = temp[i]\n",
" GetSerieChapters(params)\n",
"\n",
" i = i + 1\n",
" \n",
"\n",
" \n",
"\n",
"def m3u_items(title):\n",
" plugintools.log(\"[nec tv-0.1.0].m3u_items= \"+title)\n",
"\n",
" thumbnail = art + 'icon.png'\n",
" fanart = art + 'fanart.jpg'\n",
" only_title = title\n",
"\n",
" if title.find(\"tvg-logo\") >= 0:\n",
" thumbnail = re.compile('tvg-logo=\"(.*?)\"').findall(title)\n",
" num_items = len(thumbnail)\n",
" print 'num_items',num_items\n",
" if num_items == 0:\n",
" thumbnail = 'm3u.png'\n",
" else: \n",
" thumbnail = thumbnail[0]\n",
" #plugintools.log(\"thumbnail= \"+thumbnail)\n",
" \n",
" only_title = only_title.replace('tvg-logo=\"', \"\")\n",
" only_title = only_title.replace(thumbnail, \"\") \n",
"\n",
" if title.find(\"tvg-wall\") >= 0:\n",
" fanart = re.compile('tvg-wall=\"(.*?)\"').findall(title)\n",
" fanart = fanart[0]\n",
" only_title = only_title.replace('tvg-wall=\"', \"\")\n",
" only_title = only_title.replace(fanart, \"\") \n",
"\n",
" if title.find(\"group-title\") >= 0:\n",
" cat = re.compile('group-title=\"(.*?)\"').findall(title)\n",
" if len(cat) == 0:\n",
" cat = \"\"\n",
" else:\n",
" cat = cat[0]\n",
" plugintools.log(\"m3u_categoria= \"+cat)\n",
" only_title = only_title.replace('group-title=', \"\")\n",
" only_title = only_title.replace(cat, \"\")\n",
" else:\n",
" cat = \"\"\n",
"\n",
" if title.find(\"tvg-id\") >= 0:\n",
" title = title.replace('”', '\"')\n",
" title = title.replace('“', '\"')\n",
" tvgid = re.compile('tvg-id=\"(.*?)\"').findall(title)\n",
" print 'tvgid',tvgid\n",
" tvgid = tvgid[0]\n",
" plugintools.log(\"m3u_categoria= \"+tvgid)\n",
" only_title = only_title.replace('tvg-id=', \"\")\n",
" only_title = only_title.replace(tvgid, \"\")\n",
" else:\n",
" tvgid = \"\"\n",
"\n",
" if title.find(\"tvg-name\") >= 0:\n",
" tvgname = re.compile('tvg-name=\"(.*?)').findall(title)\n",
" tvgname = tvgname[0]\n",
" plugintools.log(\"m3u_categoria= \"+tvgname)\n",
" only_title = only_title.replace('tvg-name=', \"\")\n",
" only_title = only_title.replace(tvgname, \"\")\n",
" else:\n",
" tvgname = \"\" \n",
"\n",
" only_title = only_title.replace('\"', \"\")\n",
" #plugintools.log(\"m3u_thumbnail= \"+thumbnail)\n",
" #plugintools.log(\"m3u_fanart= \"+fanart)\n",
" #plugintools.log(\"only_title= \"+only_title)\n",
"\n",
" return thumbnail, fanart, cat, only_title, tvgid, tvgname\n",
"\n",
"\n",
"\n",
"\n",
"def xml_skin():\n",
" plugintools.log(\"[nec tv-0.1.0].xml_skin\")\n",
"\n",
" mastermenu = plugintools.get_setting(\"mastermenu\")\n",
" xmlmaster = plugintools.get_setting(\"xmlmaster\")\n",
" SelectXMLmenu = plugintools.get_setting(\"SelectXMLmenu\")\n",
"\n",
" # values=\"Latino Total|Pastebin|Personalizado\"\n",
" if xmlmaster == 'true':\n",
" if SelectXMLmenu == '0':\n",
" mastermenu = 'http://pastebin.com/raw.php?i=n9BF6Cwe'\n",
" plugintools.log(\"[PalcoTV.xml_skin: \"+SelectXMLmenu)\n",
" # Control para ver la intro de PalcoTV\n",
" ver_intro = plugintools.get_setting(\"ver_intro\")\n",
" if ver_intro == \"true\":\n",
" xbmc.Player(xbmc.PLAYER_CORE_AUTO).play(art + 'intro.mp4') \n",
" elif SelectXMLmenu == '1': # Pastebin\n",
" id_pastebin = plugintools.get_setting(\"id_pastebin\")\n",
" if id_pastebin == \"\":\n",
" plugintools.log(\"[PalcoTV.xml_skin: No definido\") \n",
" mastermenu = 'http://pastebin.com/raw.php?i=n9BF6Cwe'\n",
" else: \n",
" mastermenu = 'http://pastebin.com/raw.php?i=' +id_pastebin\n",
" plugintools.log(\"[PalcoTV.xml_skin: \"+mastermenu)\n",
" elif SelectXMLmenu == '2': # Personalizado\n",
" mastermenu = plugintools.get_setting(\"mastermenu\")\n",
" if mastermenu == \"\":\n",
" plugintools.log(\"[PalcoTV.xml_skin: No definido\")\n",
" mastermenu = 'http://pastebin.com/raw.php?i=n9BF6Cwe' \n",
" # Control para ver la intro de PalcoTV\n",
" ver_intro = plugintools.get_setting(\"ver_intro\")\n",
" if ver_intro == \"true\":\n",
" xbmc.Player(xbmc.PLAYER_CORE_AUTO).play(art + 'intro.mp4')\n",
" \n",
" else:\n",
" # xmlmaster = False (no activado), menú por defecto \n",
" mastermenu = 'http://pastebin.com/raw.php?i=n9BF6Cwe'\n",
"\n",
" # Control para ver la intro de latinototal\n",
" ver_intro = plugintools.get_setting(\"ver_intro\")\n",
" if ver_intro == \"true\":\n",
" xbmc.Player(xbmc.PLAYER_CORE_AUTO).play(art + 'intro.mp4')\n",
" \n",
"\n",
" return mastermenu\n",
"\n",
"\n",
"\n",
"\n",
"\n",
"run()\n",
"\n"
] | [
0,
0.016129032258064516,
0,
0,
0.016129032258064516,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.02702702702702703,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.022222222222222223,
0.009433962264150943,
0,
0.010869565217391304,
0.010869565217391304,
0.011904761904761904,
0,
0,
0,
0,
0,
0,
0,
0.07142857142857142,
0.09090909090909091,
0,
0,
0.2,
0,
0.02564102564102564,
0.2,
0,
0,
0,
0.02702702702702703,
0.03225806451612903,
0.03333333333333333,
0.2,
0.02564102564102564,
0,
0,
0.25,
0,
0.029411764705882353,
0,
0,
0.3333333333333333,
0.08333333333333333,
0,
0.043478260869565216,
0,
0.3333333333333333,
0,
0,
0,
0,
0,
0.08333333333333333,
0,
0,
0.025423728813559324,
0,
0.023529411764705882,
0,
0.013157894736842105,
0.0136986301369863,
0.022727272727272728,
0.012658227848101266,
0.11188811188811189,
0,
0.020833333333333332,
0.024691358024691357,
0,
0.013513513513513514,
0.022727272727272728,
0.012658227848101266,
0.012658227848101266,
0.02127659574468085,
0.014285714285714285,
0.022988505747126436,
0,
0,
0,
0.03125,
0,
0.022727272727272728,
0,
0,
0,
0,
0,
0.12322274881516587,
0,
0.11894273127753303,
0,
0.1187214611872146,
0,
0,
0,
0.12560386473429952,
0,
0.12560386473429952,
0,
0.12093023255813953,
0.07692307692307693,
0.0625,
0,
0.05555555555555555,
0,
0,
0,
0,
0,
0,
0,
0.02564102564102564,
0.023809523809523808,
0,
0.022727272727272728,
0,
0.025,
0,
0.023809523809523808,
0,
0.02564102564102564,
0,
0.024390243902439025,
0,
0.02702702702702703,
0,
0.022727272727272728,
0.022222222222222223,
0,
0,
0,
0,
0,
0,
0.125,
0,
0,
0.022727272727272728,
0,
0,
0,
0,
0,
0,
0.058823529411764705,
0.06666666666666667,
0.5,
0.05,
0.018518518518518517,
0,
0,
0,
0,
0,
0,
0.010416666666666666,
0,
0,
0.009174311926605505,
0,
0,
0.1111111111111111,
0.008264462809917356,
0.2,
0.013333333333333334,
0,
0.013157894736842105,
0,
0,
0.022727272727272728,
0.013888888888888888,
0,
0,
0.14285714285714285,
0.1111111111111111,
0,
0.3333333333333333,
0.043478260869565216,
0,
0.04081632653061224,
0,
0,
0,
0,
0,
0,
0.08196721311475409,
0.2,
0,
0,
0.2,
0.004366812227074236,
0.011363636363636364,
0.03125,
0,
0.02564102564102564,
0,
0.03225806451612903,
0.15384615384615385,
0.02631578947368421,
0,
0.058823529411764705,
0.045454545454545456,
0.15730337078651685,
0.029411764705882353,
0.021739130434782608,
0.058823529411764705,
0,
0.07692307692307693,
0.03571428571428571,
0,
0.2,
0.04081632653061224,
0,
0.024,
0.013157894736842105,
0.2,
0,
0.014492753623188406,
0.014705882352941176,
0.102803738317757,
0.1111111111111111,
0.16666666666666666,
0,
0.030927835051546393,
0,
0.029411764705882353,
0,
0.04081632653061224,
0.125,
0,
0,
0.2,
0,
0,
0,
0,
0,
0.2,
0,
0.024096385542168676,
0,
0.01282051282051282,
0.021739130434782608,
0.024096385542168676,
0.07142857142857142,
0,
0,
0.024096385542168676,
0,
0.01282051282051282,
0.021739130434782608,
0.024096385542168676,
0.07386363636363637,
0.25,
0.1111111111111111,
0,
0,
0,
0.04081632653061224,
0,
0,
0,
0.012195121951219513,
0,
0,
0.07462686567164178,
0,
0.0125,
0,
0.06818181818181818,
0,
0,
0,
0,
0,
0,
0,
0.04081632653061224,
0.022727272727272728,
0,
0.010752688172043012,
0,
0,
0,
0.2,
0,
0.02564102564102564,
0.1323529411764706,
0,
0,
0,
0,
0,
0,
0.019230769230769232,
0,
0.012048192771084338,
0.010638297872340425,
0,
0,
0,
0,
0,
0,
0,
0,
0.1111111111111111,
0.04081632653061224,
0.021739130434782608,
0,
0.011764705882352941,
0.007692307692307693,
0.125,
0,
0,
0,
0.2,
0,
0,
0,
0.02564102564102564,
0.1337579617834395,
0,
0,
0,
0,
0.04081632653061224,
0,
0,
0.023529411764705882,
0,
0.012658227848101266,
0,
0.0125,
0.021739130434782608,
0.01282051282051282,
0.023529411764705882,
0.14942528735632185,
0,
0,
0.023529411764705882,
0,
0.01282051282051282,
0.01282051282051282,
0.1419753086419753,
0,
0.07142857142857142,
0,
0,
0,
0,
0,
0,
0,
0,
0.04,
0,
0,
0.01,
0,
0.043478260869565216,
0,
0.037037037037037035,
0,
0.1111111111111111,
0,
0.03225806451612903,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.07692307692307693,
0,
0,
0,
0,
0.14285714285714285,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.08849557522123894,
0,
0.2,
0.01904761904761905,
0,
0,
0,
0,
0,
0,
0,
0,
0.02,
0,
0,
0,
0,
0.07692307692307693,
0.024390243902439025,
0.021739130434782608,
0.017241379310344827,
0,
0,
0,
0,
0,
0,
0,
0.0072992700729927005,
0.037037037037037035,
0,
0.015151515151515152,
0,
0,
0.027777777777777776,
0,
0,
0,
0,
0,
0.03125,
0,
0,
0,
0,
0,
0,
0.017241379310344827,
0,
0,
0,
0.023809523809523808,
0,
0.021739130434782608,
0,
0.1111111111111111,
0,
0,
0,
0.024390243902439025,
0,
0,
0.017241379310344827,
0,
0,
0,
0,
0,
0,
0,
0.02,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.06666666666666667,
0,
0.011111111111111112,
0,
0.019230769230769232,
0,
0,
0.012345679012345678,
0,
0,
0,
0.07665505226480836,
0,
0,
0,
0,
0,
0,
0,
0.09090909090909091,
0,
0,
0.01639344262295082,
0,
0,
0.07076923076923076,
0,
0,
0,
0,
0.08214285714285714,
0,
0,
0,
0,
0.058823529411764705,
0.038461538461538464,
0,
0.011904761904761904,
0,
0.015625,
0,
0,
0,
0.0782312925170068,
0,
0,
0,
0,
0,
0,
0,
0.07308970099667775,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012195121951219513,
0.0728476821192053,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012195121951219513,
0.07560137457044673,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012195121951219513,
0.07508532423208192,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012195121951219513,
0.07829181494661921,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012195121951219513,
0.07665505226480836,
0,
0,
0,
0,
0,
0,
0.034482758620689655,
0,
0,
0,
0.012195121951219513,
0.07665505226480836,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012195121951219513,
0.07792207792207792,
0,
0,
0.010752688172043012,
0,
0,
0,
0,
0,
0.014705882352941176,
0,
0.011904761904761904,
0,
0,
0,
0,
0.010752688172043012,
0.008,
0.08029197080291971,
0,
0,
0,
0,
0,
0,
0.034482758620689655,
0.010869565217391304,
0,
0,
0.009615384615384616,
0,
0.007692307692307693,
0.021551724137931036,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.030303030303030304,
0,
0,
0,
0.009345794392523364,
0.008695652173913044,
0,
0.007692307692307693,
0.04782608695652174,
0,
0,
0.01020408163265306,
0,
0,
0,
0,
0,
0,
0.013333333333333334,
0,
0.015873015873015872,
0,
0,
0.012195121951219513,
0.07801418439716312,
0,
0,
0,
0,
0,
0.014492753623188406,
0.034482758620689655,
0,
0,
0,
0.012048192771084338,
0.0763888888888889,
0,
0,
0,
0,
0,
0,
0,
0.015625,
0,
0,
0,
0.07665505226480836,
0,
0,
0,
0,
0,
0,
0.034482758620689655,
0.010869565217391304,
0.07876712328767123,
0,
0,
0,
0.034482758620689655,
0.010869565217391304,
0.07849829351535836,
0,
0,
0,
0,
0,
0,
0,
0.02197802197802198,
0.08156028368794327,
0,
0,
0,
0,
0,
0,
0,
0.011235955056179775,
0.08098591549295775,
0,
0,
0,
0,
0,
0,
0.034482758620689655,
0.012195121951219513,
0.08455882352941177,
0,
0,
0,
0,
0,
0.014705882352941176,
0,
0.011363636363636364,
0.08273381294964029,
0,
0,
0,
0,
0,
0,
0,
0.011904761904761904,
0.08273381294964029,
0,
0,
0,
0,
0,
0,
0,
0.01098901098901099,
0.08041958041958042,
0,
0,
0,
0,
0,
0.014084507042253521,
0,
0.011494252873563218,
0.08156028368794327,
0,
0,
0,
0,
0,
0.015873015873015872,
0,
0.011904761904761904,
0,
0,
0.018867924528301886,
0.010752688172043012,
0.008,
0.07560137457044673,
0,
0,
0,
0,
0,
0.014705882352941176,
0.034482758620689655,
0.010869565217391304,
0,
0,
0.012345679012345678,
0,
0.007692307692307693,
0.009615384615384616,
0.029304029304029304,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.009345794392523364,
0.008695652173913044,
0,
0.007692307692307693,
0.03690036900369004,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.030303030303030304,
0.015873015873015872,
0,
0,
0.012195121951219513,
0.08088235294117647,
0,
0,
0,
0,
0,
0,
0.016129032258064516,
0.08394160583941605,
0,
0,
0,
0,
0,
0,
0.02,
0,
0,
0.015625,
0,
0,
0,
0.0782312925170068,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012048192771084338,
0.07308970099667775,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012048192771084338,
0.07260726072607261,
0,
0,
0,
0,
0,
0,
0.041666666666666664,
0.034482758620689655,
0.01694915254237288,
0,
0,
0.012048192771084338,
0.07560137457044673,
0,
0,
0,
0,
0,
0,
0,
0.034482758620689655,
0.01639344262295082,
0,
0,
0.012048192771084338,
0.07508532423208192,
0,
0,
0,
0,
0,
0,
0,
0,
0.017241379310344827,
0,
0,
0.012048192771084338,
0.07829181494661921,
0,
0,
0,
0,
0,
0,
0,
0,
0.016666666666666666,
0,
0,
0.012048192771084338,
0.07665505226480836,
0,
0,
0,
0,
0,
0,
0,
0,
0.01694915254237288,
0,
0,
0.012048192771084338,
0.07665505226480836,
0,
0,
0,
0,
0,
0,
0,
0,
0.015873015873015872,
0,
0,
0.012048192771084338,
0.07457627118644068,
0,
0,
0,
0,
0,
0,
0.034482758620689655,
0,
0.014925373134328358,
0,
0,
0,
0.010752688172043012,
0.013071895424836602,
0.07508532423208192,
0,
0,
0,
0,
0,
0,
0,
0,
0.021739130434782608,
0,
0,
0,
0.012422360248447204,
0.009615384615384616,
0.028169014084507043,
0,
0,
0,
0,
0,
0.012345679012345678,
0,
0,
0,
0,
0,
0,
0,
0,
0.009345794392523364,
0.008695652173913044,
0,
0.007692307692307693,
0.03942652329749104,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.034482758620689655,
0.015873015873015872,
0,
0,
0.012195121951219513,
0.07801418439716312,
0,
0,
0,
0,
0,
0,
0.034482758620689655,
0.034482758620689655,
0.03571428571428571,
0,
0.012048192771084338,
0.07773851590106007,
0,
0,
0,
0,
0,
0,
0,
0,
0.03333333333333333,
0.010752688172043012,
0.088,
0,
0,
0,
0,
0,
0,
0,
0,
0.021739130434782608,
0.08764940239043825,
0,
0,
0,
0,
0,
0,
0,
0,
0.022988505747126436,
0.0954356846473029,
0,
0,
0,
0,
0,
0,
0,
0.034482758620689655,
0.02247191011235955,
0.09465020576131687,
0,
0,
0,
0,
0,
0,
0,
0,
0.023255813953488372,
0.09956709956709957,
0,
0,
0,
0,
0,
0,
0,
0,
0.022727272727272728,
0.0970464135021097,
0,
0,
0,
0,
0,
0,
0,
0,
0.022988505747126436,
0.0970464135021097,
0,
0,
0,
0,
0,
0,
0,
0,
0.02197802197802198,
0.09387755102040816,
0,
0,
0,
0,
0,
0,
0.034482758620689655,
0,
0.014925373134328358,
0,
0,
0,
0.010638297872340425,
0.017857142857142856,
0.013071895424836602,
0.08208955223880597,
0,
0,
0,
0,
0,
0,
0,
0,
0.021739130434782608,
0,
0,
0.012345679012345678,
0.015151515151515152,
0.014285714285714285,
0.04128440366972477,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.009345794392523364,
0.008695652173913044,
0.015151515151515152,
0,
0.007692307692307693,
0.05194805194805195,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.014492753623188406,
0.034482758620689655,
0.015873015873015872,
0,
0,
0.012195121951219513,
0.09523809523809523,
0,
0,
0,
0,
0,
0,
0,
0.08712121212121213,
0,
0,
0,
0.06666666666666667,
0.034482758620689655,
0,
0,
0.011904761904761904,
0,
0,
0.014285714285714285,
0,
0.037037037037037035,
0,
0.07142857142857142,
0,
0.016129032258064516,
0,
0,
0,
0,
0,
0.0136986301369863,
0,
0.037037037037037035,
0,
0,
0.07692307692307693,
0.016129032258064516,
0,
0,
0,
0.034482758620689655,
0,
0,
0,
0.013888888888888888,
0,
0.037037037037037035,
0.013157894736842105,
0.0744336569579288,
0.016129032258064516,
0,
0,
0,
0,
0,
0.0136986301369863,
0,
0,
0.037037037037037035,
0.013333333333333334,
0.0933852140077821,
0,
0,
0,
0,
0,
0.03529411764705882,
0,
0,
0,
0,
0.011904761904761904,
0,
0.08029197080291971,
0,
0,
0,
0,
0.08679245283018867,
0,
0,
0,
0.034482758620689655,
0,
0,
0.08363636363636363,
0,
0,
0,
0,
0.10267857142857142,
0,
0,
0,
0,
0.03529411764705882,
0,
0,
0,
0,
0.011904761904761904,
0,
0.08029197080291971,
0,
0,
0,
0,
0.08712121212121213,
0,
0,
0,
0.034482758620689655,
0,
0,
0.08363636363636363,
0,
0,
0,
0,
0.10267857142857142,
0,
0,
0.01694915254237288,
0,
0.018518518518518517,
0,
0,
0,
0,
0,
0,
0,
0.02,
0.016666666666666666,
0,
0.05789473684210526,
0,
0,
0,
0,
0.0707395498392283,
0,
0,
0,
0,
0,
0.06470588235294118,
0,
0,
0,
0,
0.08118081180811808,
0,
0,
0.014492753623188406,
0.04,
0.0196078431372549,
0,
0,
0.06948640483383686,
0,
0,
0,
0,
0.08185053380782918,
0,
0,
0,
0,
0,
0.07931034482758621,
0,
0,
0,
0,
0.09583333333333334,
0,
0,
0.014492753623188406,
0.04,
0.0196078431372549,
0,
0,
0.0696969696969697,
0,
0,
0,
0,
0.08214285714285714,
0,
0,
0,
0,
0,
0.07931034482758621,
0,
0,
0,
0,
0.09583333333333334,
0,
0,
0,
0.034482758620689655,
0.017857142857142856,
0,
0,
0,
0,
0.024390243902439025,
0,
0.06832298136645963,
0,
0,
0,
0,
0,
0,
0,
0,
0.024390243902439025,
0,
0.011363636363636364,
0,
0,
0.08058608058608059,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.024390243902439025,
0,
0.009523809523809525,
0,
0.07829181494661921,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.09482758620689655,
0,
0,
0,
0,
0,
0,
0,
0.04,
0.03773584905660377,
0,
0,
0,
0,
0,
0,
0.009900990099009901,
0,
0.06811145510835913,
0,
0,
0,
0,
0,
0,
0,
0.009900990099009901,
0.08029197080291971,
0,
0,
0,
0,
0,
0,
0,
0,
0.009900990099009901,
0.07801418439716312,
0,
0,
0,
0,
0,
0,
0,
0.009900990099009901,
0.0944206008583691,
0,
0,
0.01639344262295082,
0,
0.018867924528301886,
0,
0,
0,
0,
0,
0,
0,
0,
0.008771929824561403,
0.05625,
0,
0,
0,
0,
0,
0,
0.021739130434782608,
0,
0,
0.022727272727272728,
0.01,
0.06642066420664207,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.01,
0.06451612903225806,
0,
0,
0,
0,
0,
0,
0.021739130434782608,
0,
0,
0.022727272727272728,
0.01,
0.0782608695652174,
0,
0,
0,
0.01639344262295082,
0.058823529411764705,
0.02040816326530612,
0.019230769230769232,
0.015873015873015872,
0,
0,
0,
0,
0,
0.012345679012345678,
0.012048192771084338,
0.009900990099009901,
0.07119741100323625,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012345679012345678,
0.019417475728155338,
0.009900990099009901,
0.08494208494208494,
0,
0,
0,
0,
0,
0,
0.05555555555555555,
0,
0.03125,
0,
0,
0,
0,
0,
0,
0,
0.006944444444444444,
0.0707395498392283,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011494252873563218,
0.006944444444444444,
0,
0.0842911877394636,
0,
0,
0,
0,
0,
0,
0,
0.04,
0.03773584905660377,
0,
0,
0.0944206008583691,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.09777777777777778,
0,
0,
0,
0,
0,
0,
0,
0,
0.03773584905660377,
0,
0,
0.007692307692307693,
0.1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.007692307692307693,
0.10683760683760683,
0,
0,
0,
0,
0,
0,
0,
0.058823529411764705,
0.05555555555555555,
0,
0,
0,
0,
0,
0,
0,
0.07692307692307693,
0,
0.058823529411764705,
0,
0,
0,
0,
0.08333333333333333,
0.041666666666666664,
0,
0,
0.05466237942122187,
0.06640625,
0,
0.0078125,
0,
0,
0,
0.2,
0,
0.025,
0,
0,
0,
0.03125,
0,
0.025,
0,
0.058823529411764705,
0.029411764705882353,
0.014492753623188406,
0,
0.08041958041958042,
0.047619047619047616,
0.02,
0,
0.08,
0.04,
0.02,
0,
0.07857142857142857,
0,
0.02,
0,
0.07971014492753623,
0.5,
0,
0.1111111111111111,
0.021739130434782608,
0,
0.08041958041958042,
0.047619047619047616,
0.02,
0,
0.08,
0.04,
0.02,
0,
0.07857142857142857,
0,
0.02,
0,
0.07971014492753623,
0,
0,
0,
0.058823529411764705,
0,
0.020833333333333332,
0,
0.04081632653061224,
0,
0,
0,
0,
0,
0.05555555555555555,
0.005988023952095809,
0,
0,
0,
0.09663865546218488,
0,
0,
0.029411764705882353,
0,
0.09663865546218488,
0,
0.029411764705882353,
0,
0.09663865546218488,
0,
0.029411764705882353,
0,
0.09663865546218488,
0,
0.029411764705882353,
0,
0.09663865546218488,
0,
0.029411764705882353,
0,
0.09663865546218488,
0,
0.029411764705882353,
0,
0,
0,
0.10043668122270742,
0,
0,
0,
0.015151515151515152,
0,
0.2,
0.1111111111111111,
0.019417475728155338,
0,
0,
0,
0,
0,
0.25,
0.2,
0,
0,
0,
0,
0,
0.015873015873015872,
0,
0.2,
0,
0.008695652173913044,
0,
0.1111111111111111,
0.047619047619047616,
0.04,
0.015384615384615385,
0,
0,
0,
0,
0,
0.03125,
0.011235955056179775,
0,
0,
0,
0,
0.010869565217391304,
0,
0,
0,
0.010869565217391304,
0.1111111111111111,
0.047619047619047616,
0,
0,
0,
0,
0,
0.2,
0,
0,
0,
0.08333333333333333,
0,
0.037037037037037035,
0.011834319526627219,
0.020833333333333332,
0,
0.03225806451612903,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.2,
0,
0,
0.041666666666666664,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012048192771084338,
0.011904761904761904,
0.2,
0,
0,
0.09606986899563319,
0,
0,
0.10679611650485436,
0,
0,
0,
0.10784313725490197,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.018867924528301886,
0.03333333333333333,
0,
0,
0,
0.021052631578947368,
0,
0.1111111111111111,
0,
0,
0,
0,
0,
0.2,
0,
0,
0,
0,
0,
0.0078125,
0.2,
0,
0.023809523809523808,
0,
0,
0.009708737864077669,
0,
0,
0,
0,
0,
0,
0.008849557522123894,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.043478260869565216,
0.02857142857142857,
0.017241379310344827,
0.009900990099009901,
0,
0,
0.027777777777777776,
0,
0,
0,
0,
0,
0,
0,
0.012345679012345678,
0.04,
0,
0.023809523809523808,
0,
0,
0,
0,
0,
0.016129032258064516,
0,
0,
0.02040816326530612,
0,
0,
0.015625,
0.01,
0.016666666666666666,
0,
0.010869565217391304,
0,
0.010752688172043012,
0,
0.013157894736842105,
0,
0.014084507042253521,
0,
0,
0.009615384615384616,
0.010752688172043012,
0,
0,
0,
0,
0,
0.016666666666666666,
0,
0,
0.010752688172043012,
0,
0.010752688172043012,
0,
0,
0,
0,
0,
0.016129032258064516,
0,
0.010752688172043012,
0,
0,
0,
0,
0,
0.017241379310344827,
0,
0,
0,
0.011235955056179775,
0,
0,
0,
0,
0,
0,
0.04,
0.045454545454545456,
0,
0,
0,
0,
0.04,
0,
0,
0,
0,
0,
0,
0.047619047619047616,
0,
0,
0.02127659574468085,
0.0078125,
0.2,
0,
0.023809523809523808,
0,
0.009708737864077669,
0,
0,
0,
0,
0,
0,
0.008849557522123894,
0.02702702702702703,
0,
0,
0,
0,
0,
0,
0,
0,
0.02040816326530612,
0,
0,
0,
0,
0,
0.017857142857142856,
0,
0,
0,
0,
0,
0.019230769230769232,
0,
0,
0,
0.021739130434782608,
0.058823529411764705,
0,
0,
0,
0,
0,
0,
0,
0,
0.030303030303030304,
0,
0.01694915254237288,
0,
0.014925373134328358,
0.008695652173913044,
0,
0,
0.006024096385542169,
0,
0,
0,
0,
0,
0,
0.021739130434782608,
0,
0,
0,
0,
0,
0,
0,
0.011235955056179775,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.014925373134328358,
0.008695652173913044,
0,
0.006024096385542169,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.058823529411764705,
0.037037037037037035,
0.023809523809523808,
0,
0.010101010101010102,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.019230769230769232,
0,
0,
0.058823529411764705,
0.02040816326530612,
0,
0,
0.012987012987012988,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.046511627906976744,
0.015384615384615385,
0,
0,
0,
0,
0,
0.034482758620689655,
0.015625,
0,
0,
0,
0,
0,
0.034482758620689655,
0.014285714285714285,
0,
0,
0,
0,
0,
0.034482758620689655,
0.015873015873015872,
0,
0,
0,
0,
0,
0,
0.010752688172043012,
0,
0,
0,
0,
0.014285714285714285,
0,
0.034482758620689655,
0,
0.043478260869565216,
0.058823529411764705,
0,
0.006578947368421052,
0.01904761904761905,
0,
0.2,
0,
0,
0,
0.045454545454545456,
0,
0,
0,
0.047619047619047616,
0.05555555555555555,
0.06666666666666667,
0,
0.027777777777777776,
0,
0,
0.5,
0.028169014084507043,
0,
0.012048192771084338,
0.006369426751592357,
0,
0.02,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.017241379310344827,
0,
0,
0,
0.00909090909090909,
0.2,
0,
0,
0.1111111111111111,
0,
0,
0,
0.007633587786259542,
0.008333333333333333,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.1111111111111111,
0.041666666666666664,
0.01098901098901099,
0,
0,
0.2,
0,
0,
0,
0,
0,
0,
0,
0,
0.010869565217391304,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.017543859649122806,
0,
0,
0,
0.04,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.01818181818181818,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.004484304932735426,
0.08121827411167512,
0,
0,
0,
0.004405286343612335,
0,
0,
0,
0,
0.05555555555555555,
0,
0.1111111111111111,
0,
0,
0.041666666666666664,
0.2,
0,
0.12,
0.08333333333333333,
0.08333333333333333,
0.08333333333333333,
0.08333333333333333,
0.08333333333333333,
0.08333333333333333,
0.08333333333333333,
0.08333333333333333,
0.08333333333333333,
0.08333333333333333,
0.08333333333333333,
0.08695652173913043,
0.08695652173913043,
0.08695652173913043,
0.08333333333333333,
0.08333333333333333,
0.08333333333333333,
0.08333333333333333,
0.08333333333333333,
0.08333333333333333,
0.08333333333333333,
0.0967741935483871,
0.0967741935483871,
0.08333333333333333,
0.08333333333333333,
0.08333333333333333,
0.08333333333333333,
0.08333333333333333,
0.08333333333333333,
0.1111111111111111,
0.08333333333333333,
0.08333333333333333,
0.08333333333333333,
0.08333333333333333,
0.07692307692307693,
0.08333333333333333,
0.1,
0.16666666666666666,
0.25,
0,
0,
0,
0.019230769230769232,
0,
0,
0,
0,
0,
0,
0,
0.043478260869565216,
0.016129032258064516,
0,
0,
0,
0,
0.010309278350515464,
0,
0,
0,
0,
0,
0.03125,
0,
0,
0,
0,
0,
0,
0.022727272727272728,
0,
0,
0,
0,
0,
0,
0,
0,
0.047619047619047616,
0.1111111111111111,
0,
0,
0,
0,
0,
0.06666666666666667,
0,
0.0196078431372549,
0,
0,
0,
0,
0,
0,
0,
0.047619047619047616,
0,
0,
0,
0.021739130434782608,
0,
0,
0,
0,
0.011627906976744186,
0,
0,
0,
0.022222222222222223,
0,
0,
0,
0,
0,
0,
0,
0.08444444444444445,
0,
0.10582010582010581,
0.058823529411764705,
0,
0,
0,
0,
0,
0,
0.1,
0.11764705882352941,
0,
0,
0.0196078431372549,
0,
0,
0,
0,
0,
0,
0,
0.01818181818181818,
0,
0,
0,
0,
0,
0.018518518518518517,
0,
0,
0,
0.08333333333333333,
0,
0,
0,
0.037037037037037035,
0,
0.021739130434782608,
0,
0.022727272727272728,
0,
0.09502262443438914,
0.10497237569060773,
0.08333333333333333,
0,
0,
0.1,
0.043478260869565216,
0,
0,
0,
0,
0,
0,
0,
0,
0.1111111111111111,
0.024390243902439025,
0,
0.020833333333333332,
0,
0.018867924528301886,
0,
0,
0.022222222222222223,
0,
0,
0,
0.07692307692307693,
0.06451612903225806,
0.019230769230769232,
0,
0,
0,
0,
0,
0,
0.058823529411764705,
0.01818181818181818,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.058823529411764705,
0.016666666666666666,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.07692307692307693,
0.10810810810810811,
0.1111111111111111,
0.014492753623188406,
0,
0,
0,
0.07692307692307693,
0.06451612903225806,
0.02040816326530612,
0,
0,
0,
0,
0,
0.024691358024691357,
0,
0,
0,
0,
0,
0.017857142857142856,
0,
0,
0,
0,
0,
0.018518518518518517,
0,
0,
0,
0.024096385542168676,
0.012195121951219513,
0,
0.01694915254237288,
0,
0.047619047619047616,
0.015625,
0.011904761904761904,
0.04,
0.015625,
0.012195121951219513,
0.04,
0,
0,
0,
0,
0.01818181818181818,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.018518518518518517,
0,
0,
0,
0,
0.016666666666666666,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.058823529411764705,
0.018867924528301886,
0.007518796992481203,
0,
0,
0,
0,
0,
0,
0.01639344262295082,
0,
0,
0.007142857142857143,
0.07692307692307693,
0,
0.047619047619047616,
0.015625,
0,
0,
0,
0.009433962264150943,
0.07612456747404844,
0,
0,
0,
0,
0.01818181818181818,
0,
0,
0.0072992700729927005,
0,
0.011904761904761904,
0.08050847457627118,
0,
0,
0.017241379310344827,
0.0125,
0,
0.012048192771084338,
0.09401709401709402,
0.02,
0,
0.017543859649122806,
0,
0.08482142857142858,
0,
0,
0.047619047619047616,
0,
0.08444444444444445,
0,
0.010869565217391304,
0,
0.04,
0,
0.08796296296296297,
0,
0,
0,
0.04,
0,
0.08878504672897196,
0,
0,
0,
0.04,
0,
0.09313725490196079,
0,
0,
0,
0,
0,
0.09047619047619047,
0,
0,
0,
0,
0.09047619047619047,
0,
0,
0,
0,
0.08520179372197309,
0,
0.01694915254237288,
0,
0.017241379310344827,
0,
0.010869565217391304,
0.11004784688995216,
0,
0,
0.01694915254237288,
0,
0.010869565217391304,
0.10952380952380952,
0,
0,
0,
0,
0,
0.011111111111111112,
0.008264462809917356,
0.09649122807017543,
0.017241379310344827,
0.04,
0,
0,
0.11004784688995216,
0,
0.047619047619047616,
0.017543859649122806,
0,
0.014492753623188406,
0.01282051282051282,
0.014084507042253521,
0.09745762711864407,
0,
0.047619047619047616,
0.01694915254237288,
0,
0.011764705882352941,
0.09163346613545817,
0,
0,
0,
0,
0,
0.08333333333333333,
0,
0,
0,
0.08181818181818182,
0.034482758620689655,
0.017857142857142856,
0,
0,
0.010416666666666666,
0,
0.08695652173913043,
0,
0,
0,
0.030303030303030304,
0,
0,
0.017857142857142856,
0,
0,
0.010416666666666666,
0,
0.08653846153846154,
0,
0,
0,
0.030303030303030304,
0.018867924528301886,
0.047619047619047616,
0,
0,
0,
0,
0.08737864077669903,
0,
0.041666666666666664,
0.038461538461538464,
0.11377245508982035,
0,
0.018518518518518517,
0.043478260869565216,
0.037037037037037035,
0,
0,
0,
0,
0,
0,
0.25,
0.021739130434782608,
0.00909090909090909,
0,
0,
0,
0,
0,
0.06382978723404255,
0.03773584905660377,
0,
0.01639344262295082,
0.025,
0.01,
0,
0,
0,
0.04,
0.023809523809523808,
0.010101010101010102,
0,
0,
0,
0,
0,
0.011111111111111112,
0,
0,
0,
0.04,
0,
0.00980392156862745,
0,
0,
0.017241379310344827,
0.04,
0,
0.012048192771084338,
0,
0,
0.01694915254237288,
0,
0,
0,
0,
0,
0.02702702702702703,
0,
0,
0,
0,
0,
0.02702702702702703,
0,
0,
0.012048192771084338,
0,
0,
0.017241379310344827,
0,
0.013513513513513514,
0,
0,
0,
0,
0.013513513513513514,
0,
0,
0,
0,
0.04,
0.01818181818181818,
0,
0,
0,
0,
0,
0,
0,
0,
0.058823529411764705,
0.018867924528301886,
0,
0,
0,
0,
0.018518518518518517,
0,
0.10857142857142857,
0.017857142857142856,
0,
0.11046511627906977,
0.047619047619047616,
0.029411764705882353,
0,
0.058823529411764705,
0,
0,
0,
0,
0,
0,
0,
0,
0.058823529411764705,
0,
0,
0.017699115044247787,
0.2,
0,
0,
0,
0,
0,
0.2,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.022727272727272728,
0,
0.02564102564102564,
0,
0,
0,
0,
0.01818181818181818,
0,
0.02564102564102564,
0,
0,
0,
0,
0.022727272727272728,
0,
0,
0,
0,
0,
0,
0.01818181818181818,
0,
0.02564102564102564,
0,
0,
0,
0,
0.014492753623188406,
0,
0.02564102564102564,
0,
0,
0,
0,
0.016666666666666666,
0,
0.02564102564102564,
0,
0,
0,
0.014492753623188406,
0,
0.02564102564102564,
0,
0,
0,
0,
0.014925373134328358,
0,
0.02564102564102564,
0,
0,
0,
0,
0,
0,
0.02564102564102564,
0.058823529411764705,
0,
0,
0,
0.029411764705882353,
0,
0.02564102564102564,
0,
0,
0,
0,
0.029411764705882353,
0,
0.02564102564102564,
0.05555555555555555,
0,
0,
0,
0.038461538461538464,
0,
0,
0,
0.047619047619047616,
0.05555555555555555,
0.06666666666666667,
0,
0,
0,
0,
0,
0,
0.2,
0,
0.05,
0,
0,
0,
0.04878048780487805,
0.1111111111111111,
0.5,
0.041666666666666664,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010526315789473684,
0,
0,
0.011111111111111112,
0,
0,
0.00909090909090909,
0,
0,
0,
0,
0.010869565217391304,
0,
0,
0,
0.07692307692307693,
0.010101010101010102,
0,
0,
0,
0,
0.07692307692307693,
0,
0.1111111111111111,
0.011235955056179775,
0,
0.009174311926605505,
0,
0.1111111111111111,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.016666666666666666,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.017241379310344827,
0,
0.02040816326530612,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.014285714285714285,
0,
0,
0,
0.058823529411764705,
0,
0,
0,
0,
0,
0,
0.014925373134328358,
0,
0,
0,
0,
0,
0.014925373134328358,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.014492753623188406,
0,
0,
0,
0,
0,
0.014285714285714285,
0,
0,
0,
0,
0,
0.014492753623188406,
0.012048192771084338,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.014925373134328358,
0,
0,
0,
0,
0,
0.014705882352941176,
0,
0,
0,
0,
0,
0.014492753623188406,
0,
0,
0,
0,
0,
0.014705882352941176,
0.011627906976744186,
0.01694915254237288,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.014705882352941176,
0,
0,
0,
0,
0,
0.014492753623188406,
0,
0,
0,
0,
0,
0.014492753623188406,
0,
0,
0,
0,
0,
0.014492753623188406,
0,
0,
0,
0,
0,
0.014492753623188406,
0.009900990099009901,
0.01694915254237288,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.014492753623188406,
0,
0,
0,
0,
0,
0.014492753623188406,
0,
0,
0,
0,
0,
0.014492753623188406,
0,
0,
0,
0,
0,
0.014492753623188406,
0,
0,
0,
0,
0,
0.014925373134328358,
0,
0,
0,
0,
0,
0.014492753623188406,
0.008620689655172414,
0,
0.015873015873015872,
0,
0,
0,
0,
0.058823529411764705,
0,
0.04491725768321513,
0.007380073800738007,
0.058823529411764705,
0.0029498525073746312,
0.00390625,
0.0625,
0.1,
0,
0.041666666666666664,
0,
0.2,
0,
0,
0,
0,
0,
0,
0,
0,
0.024390243902439025,
0,
0,
0,
0,
0,
0,
0,
0,
0.038461538461538464,
0,
0.04081632653061224,
0,
0.024691358024691357,
0.2,
0,
0,
0.013513513513513514,
0.023529411764705882,
0.022727272727272728,
0.013333333333333334,
0.022222222222222223,
0,
0.023255813953488372,
0.022222222222222223,
0.1111111111111111,
0,
0,
0.07692307692307693,
0.011363636363636364,
0,
0,
0,
0,
0.1111111111111111,
0,
0,
0,
0,
0,
0.14606741573033707,
0,
0,
0.010101010101010102,
0,
0.14606741573033707,
0,
0,
0,
0.01639344262295082,
0.00980392156862745,
0,
0,
0.14606741573033707,
0,
0.058823529411764705,
0,
0.019801980198019802,
0,
0,
0.14606741573033707,
0,
0.058823529411764705,
0,
0,
0.058823529411764705,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.043478260869565216,
0.016129032258064516,
0.2,
0.02857142857142857,
0,
0,
0,
0.1111111111111111,
0,
0.02197802197802198,
0.011363636363636364,
0,
0,
0,
0,
0.012987012987012988,
0,
0,
0,
0.021739130434782608,
0,
0,
0,
0,
0,
0,
0.2,
0,
0,
0,
0,
0.027777777777777776,
0,
0,
0,
0,
0.03125,
0,
0.01694915254237288,
0,
0.029069767441860465,
0,
0.014925373134328358,
0,
0,
0,
0,
0,
0,
0,
0.023255813953488372,
0,
0,
0,
0,
0,
0.1111111111111111,
0,
0,
0,
0,
0,
0.029411764705882353,
0,
0,
0,
0,
0,
0.047619047619047616,
0,
0.1111111111111111,
0.02040816326530612,
0,
0,
0,
0.047619047619047616,
0,
0.1111111111111111,
0.023255813953488372,
0,
0,
0.11188811188811189,
0,
0,
0,
0,
0.07692307692307693,
0,
0,
0,
0,
0,
0,
0,
0.047619047619047616,
0,
0.02127659574468085,
0,
0,
0,
0.02040816326530612,
0.009708737864077669,
0,
0.08780487804878048,
0,
0,
0,
0.04,
0,
0.058823529411764705,
0.019230769230769232,
0,
0,
0,
0.013513513513513514,
0,
0.08823529411764706,
0,
0,
0,
0,
0,
0,
0,
0,
0.016666666666666666,
0.022727272727272728,
0.011235955056179775,
0.09,
0,
0,
0,
0.04,
0.058823529411764705,
0.07692307692307693,
0,
0,
0.04,
0.015384615384615385,
0,
0,
0,
0,
0,
0,
0,
0,
0.030303030303030304,
0,
0,
0,
0.03333333333333333,
0,
0,
0,
0,
0,
0,
0,
0,
0.047619047619047616,
0,
0,
0,
0,
0,
0,
0.02127659574468085,
0,
0.047619047619047616,
0,
0,
0,
0,
0,
0,
0,
0,
0.02040816326530612,
0,
0.07692307692307693,
0,
0,
0,
0,
0,
0,
0,
0,
0.020833333333333332,
0,
0,
0.011904761904761904,
0.07692307692307693,
0,
0,
0,
0,
0,
0,
0,
0,
0.020833333333333332,
0,
0,
0,
0.021739130434782608,
0.02197802197802198,
0.043478260869565216,
0,
0,
0,
0,
0,
0,
0,
0,
0.020833333333333332,
0,
0,
0,
0,
0,
0.020833333333333332,
0.020618556701030927,
0.07692307692307693,
0,
0,
0.023255813953488372,
0,
0,
0,
0,
0,
0,
0.023255813953488372,
0,
0,
0,
0.023255813953488372,
0,
0.034482758620689655,
0.023255813953488372,
0,
0,
0,
0,
0,
0.03333333333333333,
0.023255813953488372,
0,
0,
0,
0,
0,
0.03333333333333333,
0.023255813953488372,
0,
0,
0,
0.02564102564102564,
0,
0.03225806451612903,
0.023255813953488372,
0,
0,
0,
0,
0,
0.034482758620689655,
0.023255813953488372,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.038461538461538464,
0.015384615384615385,
0.2,
0,
0.2,
0,
0.03125,
0,
0,
0.2,
0,
0,
0,
0.03125,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.058823529411764705,
0,
0,
0,
0,
0,
0.047619047619047616,
0,
0.0196078431372549,
0,
0.01818181818181818,
0,
0,
0.013513513513513514,
0,
0.08372093023255814,
0,
0,
0,
0.04,
0,
0,
0,
0.047619047619047616,
0.07692307692307693,
0,
0,
0,
0,
0,
0.041666666666666664,
0,
0,
0,
0,
0,
0.2,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.1,
0.1111111111111111,
0.03571428571428571,
0,
0,
0,
0,
0,
0,
0,
0,
0.2,
0,
0,
0,
0,
0.04,
0,
0,
0,
0,
0,
0,
0,
0.021739130434782608,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.019230769230769232,
0,
0,
0,
0,
0,
0,
0.020833333333333332,
0.058823529411764705,
0,
0,
0,
0,
0,
0,
0.023809523809523808,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011494252873563218,
0.011904761904761904,
0,
0,
0,
0.025,
0,
0.1111111111111111,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.09042553191489362,
0,
0,
0,
0,
0,
0,
0.014925373134328358,
0,
0,
0,
0.014084507042253521,
0,
0,
0.03333333333333333,
0.022727272727272728,
0.012658227848101266,
0,
0,
0.018518518518518517,
0,
0,
0.030303030303030304,
0,
0.058823529411764705,
0.09482758620689655,
0,
0,
0.014925373134328358,
0,
0,
0,
0.014084507042253521,
0,
0.024390243902439025,
0,
0.022727272727272728,
0,
0,
0,
0.024096385542168676,
0,
0.017857142857142856,
0.01639344262295082,
0,
0,
0,
0.047619047619047616,
0.09813084112149532,
0.058823529411764705,
0,
0,
0,
0,
0.024096385542168676,
0,
0.014084507042253521,
0,
0,
0,
0,
0.047619047619047616,
0.09767441860465116,
0.058823529411764705,
0,
0,
0,
0,
0.024096385542168676,
0,
0.014084507042253521,
0,
0,
0,
0,
0.10194174757281553,
0.058823529411764705,
0,
0,
0,
0,
0.024096385542168676,
0,
0.014084507042253521,
0,
0,
0,
0,
0,
0.09821428571428571,
0,
0.02564102564102564,
0,
0,
0,
0.024096385542168676,
0,
0.017857142857142856,
0.01639344262295082,
0,
0,
0,
0,
0.102803738317757,
0,
0.024390243902439025,
0,
0,
0,
0.024096385542168676,
0,
0.017857142857142856,
0.01639344262295082,
0,
0,
0,
0.047619047619047616,
0.09734513274336283,
0,
0.02702702702702703,
0,
0,
0,
0.024096385542168676,
0,
0.017857142857142856,
0.01639344262295082,
0,
0,
0.013157894736842105,
0.047619047619047616,
0.105,
0,
0.022727272727272728,
0,
0,
0,
0.024096385542168676,
0,
0.017857142857142856,
0.01639344262295082,
0,
0,
0.013157894736842105,
0.047619047619047616,
0.09821428571428571,
0.058823529411764705,
0,
0,
0.029411764705882353,
0,
0.09954751131221719,
0.058823529411764705,
0,
0,
0,
0.022988505747126436,
0,
0,
0.014285714285714285,
0,
0,
0,
0.04,
0.047619047619047616,
0.058823529411764705,
0,
0,
0.03225806451612903,
0,
0.2,
0.04081632653061224,
0.1111111111111111,
0,
0.013157894736842105,
0.2,
0,
0,
0.1111111111111111,
0.024390243902439025,
0.037383177570093455,
0.0392156862745098,
0.05,
0,
0.1111111111111111,
0.0625,
0,
0.16666666666666666,
0,
0,
0.0196078431372549,
0.03571428571428571,
0,
0.2,
0,
0.04081632653061224,
0,
0.2,
0,
0,
0.013157894736842105,
0.2,
0,
0,
0.1111111111111111,
0,
0.024390243902439025,
0.01818181818181818,
0.024691358024691357,
0.03296703296703297,
0,
0.044642857142857144,
0.01,
0,
0,
0.05921052631578947,
0,
0,
0,
0.04,
0,
0,
0,
0,
0.2,
0,
0,
0,
0,
0.05263157894736842,
0,
0,
0,
0,
0.038461538461538464,
0,
0,
0,
0,
0,
0.05263157894736842,
0,
0,
0,
0,
0,
0,
0.05263157894736842,
0.038461538461538464,
0,
0,
0,
0,
0,
0.05263157894736842,
0,
0,
0,
0,
0,
0,
0.05263157894736842,
0.038461538461538464,
0,
0,
0,
0,
0,
0.05263157894736842,
0,
0,
0,
0,
0,
0,
0.05263157894736842,
0,
0,
0,
0,
0,
0,
0.05263157894736842,
0.03571428571428571,
0,
0,
0,
0,
0,
0.05263157894736842,
0,
0.2,
0,
0,
0,
0,
0.05263157894736842,
0,
0,
0,
0,
0.038461538461538464,
0,
0,
0,
0,
0,
0.05263157894736842,
0.038461538461538464,
0,
0,
0,
0.03333333333333333,
0.2,
0,
0,
0,
0,
0.05263157894736842,
0,
0,
0,
0,
0,
0,
0.05263157894736842,
0.038461538461538464,
0,
0,
0,
0,
0,
0.05263157894736842,
0.03571428571428571,
0,
0,
0,
0,
0,
0.05263157894736842,
0,
0,
0,
0,
0,
0,
0.05263157894736842,
0,
0,
0,
0,
0,
0,
0.05263157894736842,
0,
0,
0,
0,
0,
0,
0.05263157894736842,
0.03571428571428571,
0,
0,
0,
0,
0,
0.05263157894736842,
0,
0,
0,
0,
0,
0,
0.05263157894736842,
0,
0,
0,
0,
0,
0,
0.05263157894736842,
0,
0,
0,
0,
0,
0,
0.05263157894736842,
0,
0,
0,
0,
0,
0,
0.05263157894736842,
0,
0,
0,
0,
0,
0,
0.05263157894736842,
0.038461538461538464,
0,
0,
0,
0,
0,
0.03333333333333333,
0,
0,
0,
0,
0,
0,
0.05263157894736842,
0,
0,
0,
0,
0,
0,
0.05263157894736842,
0,
0,
0,
0,
0,
0,
0.05263157894736842,
0,
0,
0,
0,
0,
0,
0.05263157894736842,
0,
0,
0,
0,
0,
0,
0.05263157894736842,
0,
0,
0,
0,
0,
0,
0.05263157894736842,
0,
0,
0,
0,
0,
0,
0.05263157894736842,
0,
0,
0,
0,
0,
0,
0.05263157894736842,
0,
0,
0,
0,
0,
0,
0.05263157894736842,
0,
0,
0,
0,
0,
0,
0.05263157894736842,
0,
0.2,
0,
0,
0,
0,
0.03333333333333333,
0,
0,
0.04,
0,
0,
0,
0,
0,
0,
0,
0,
0.022727272727272728,
0,
0,
0.2,
0.022222222222222223,
0,
0,
0.011494252873563218,
0.022727272727272728,
0.01282051282051282,
0,
0.037037037037037035,
0.022727272727272728,
0,
0.018518518518518517,
0,
0,
0.02564102564102564,
0.09090909090909091,
0,
0.03773584905660377,
0,
0,
0,
0,
0.019230769230769232,
0,
0,
0.02631578947368421,
0,
0.021739130434782608,
0,
0,
0,
0.0196078431372549,
0,
0,
0,
0.02040816326530612,
0,
0,
0,
0.021739130434782608,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.02,
0,
0,
0,
0.02,
0,
0,
0,
0,
0,
0,
0,
0.02,
0,
0,
0,
0.02,
0,
0,
0,
0,
0,
0,
0,
0.020833333333333332,
0,
0,
0,
0.02127659574468085,
0,
0,
0,
0.02040816326530612,
0,
0,
0,
0.02,
0,
0,
0,
0.02,
0,
0,
0,
0.02040816326530612,
0,
0,
0,
0.0196078431372549,
0,
0,
0,
0.02040816326530612,
0,
0,
0,
0.0196078431372549,
0,
0,
0,
0,
0.02,
0,
0,
0,
0,
0,
0.058823529411764705,
0,
0,
0,
0,
0.02,
0.2,
0.3333333333333333,
0,
0.041666666666666664,
0.015625,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.25,
0,
0,
0,
0,
0,
0.06451612903225806,
0,
0,
0.03225806451612903,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010101010101010102,
0,
0,
0,
0,
0,
0,
0,
0.022727272727272728,
0,
0,
0,
0,
0.02631578947368421,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.022727272727272728,
0,
0,
0,
0.1111111111111111,
0,
0,
0,
0,
0,
0,
0.034482758620689655,
0,
0,
0,
0.041666666666666664,
0,
0.03571428571428571,
0.034482758620689655,
0,
0,
0.2,
0,
0,
0.2,
0,
0,
0,
0,
0,
0.015873015873015872,
0,
0,
0,
0.043478260869565216,
0.012121212121212121,
0.02,
0,
0,
0.008695652173913044,
0.017241379310344827,
0.2,
0,
0,
0,
0,
0,
0,
0,
0.1111111111111111,
0,
0,
0,
0,
0,
0.028169014084507043,
0.047619047619047616,
0,
0,
0,
0.015873015873015872,
0,
0,
0,
0,
0.037037037037037035,
0.014285714285714285,
0.019230769230769232,
0,
0,
0,
0.008264462809917356,
0.011904761904761904,
0,
0,
0,
0,
0,
0,
0.08333333333333333,
0,
0,
0,
0,
0.05263157894736842,
0,
0.0125,
0,
0,
0,
0.16666666666666666,
0.25,
0.2,
0.047619047619047616,
0.03333333333333333,
0,
0,
0,
0,
0.038461538461538464,
0,
0.2,
0,
0,
0,
0.01098901098901099,
0,
0,
0,
0,
0,
0,
0,
0,
0.2,
0,
0,
0.2,
0,
0.009345794392523364,
0.011363636363636364,
0,
0.030303030303030304,
0,
0.2,
0,
0,
0,
0,
0.016129032258064516,
0,
0.1111111111111111,
0.1111111111111111,
0.2,
0.03389830508474576,
0,
0.2,
0,
0,
0,
0,
0,
0,
0,
0.2,
0,
0.016666666666666666,
0.08490566037735849,
0,
0,
0,
0.08333333333333333,
0,
0,
0,
0.08653846153846154,
0,
0,
0,
0.0861244019138756,
0,
0,
0,
0.08866995073891626,
0,
0,
0,
0.08878504672897196,
0,
0,
0,
0.08695652173913043,
0.1111111111111111,
0.1111111111111111,
0,
0.03333333333333333,
0,
0,
0,
0.2,
0,
0.2,
0,
0,
0.021739130434782608,
0,
0,
0.021739130434782608,
0,
0.013157894736842105,
0.021739130434782608,
0,
0.023529411764705882,
0.021739130434782608,
0,
0.02127659574468085,
0.1111111111111111,
0,
0.019417475728155338,
0.1111111111111111,
0,
0.017857142857142856,
0.1111111111111111,
0,
0.01652892561983471,
0.1111111111111111,
0,
0.015384615384615385,
0.1111111111111111,
0,
0.01764705882352941,
0,
0,
0.08333333333333333,
0,
0,
0,
0,
0,
0.1111111111111111,
0,
0.07692307692307693,
0,
0.045454545454545456,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.027777777777777776,
0,
0,
0.045454545454545456,
0,
0.018518518518518517,
0.07692307692307693,
0,
0.015873015873015872,
0,
0,
0,
0,
0,
0.016666666666666666,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.03571428571428571,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.034482758620689655,
0,
0,
0.02,
0.022727272727272728,
0.020833333333333332,
0,
0,
0,
0,
0,
0,
0.0625,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.013157894736842105,
0,
0,
0,
0.012195121951219513,
0,
0.029411764705882353,
0.013333333333333334,
0,
0,
0,
0,
0,
0.011627906976744186,
0,
0,
0,
0,
0.1111111111111111,
0,
0.015384615384615385,
0,
0,
0,
0,
0,
0,
0.1111111111111111,
0,
0.045454545454545456,
0,
0,
0,
0,
0,
0.16666666666666666,
1
] | 5,028 | 0.015071 | false |
#-*- coding:utf-8 -*-
from libopensesame.py3compat import *
from libopensesame import debug
from libqtopensesame.extensions import base_extension
class psynteract_extension(base_extension):
"""Install psynteract backend"""
def activate(self):
"""
desc:
Is called when the extension is activated through the menu/ toolbar
action.
"""
from libqtopensesame.dialogs.notification import notification
welcome = notification(self.main_window,'On the next page, please input the URL of your couch db server followed by a database name.'
'For example, if you have couch db installed on the machine you are currently using, you might enter: '
'http://localhost:5984/psynteract')
welcome.exec_()
from libqtopensesame.dialogs.text_input import text_input
psynteract_url = text_input(self.main_window)
current_url = psynteract_url.get_input()
from psynteract import install
server_url = install(current_url)
feedback = notification(self.main_window,'Your server is available at '+server_url)
feedback.exec_()
| [
"#-*- coding:utf-8 -*-\n",
"from libopensesame.py3compat import *\n",
"\n",
"from libopensesame import debug\n",
"from libqtopensesame.extensions import base_extension\n",
"\n",
"\n",
"class psynteract_extension(base_extension):\n",
"\n",
"\t\"\"\"Install psynteract backend\"\"\"\n",
"\n",
"\tdef activate(self):\n",
"\n",
"\t\t\"\"\"\n",
"\t\tdesc:\n",
"\t\t\tIs called when the extension is activated through the menu/ toolbar\n",
"\t\t\taction.\n",
"\t\t\"\"\"\n",
"\t\t\n",
"\t\tfrom libqtopensesame.dialogs.notification import notification\n",
"\t\twelcome = notification(self.main_window,'On the next page, please input the URL of your couch db server followed by a database name.'\n",
"\t\t'For example, if you have couch db installed on the machine you are currently using, you might enter: '\n",
"\t\t'http://localhost:5984/psynteract')\n",
"\t\twelcome.exec_()\n",
"\t\t\n",
"\t\tfrom libqtopensesame.dialogs.text_input import text_input\n",
"\t\tpsynteract_url = text_input(self.main_window)\n",
"\t\tcurrent_url = psynteract_url.get_input()\n",
"\t\t\n",
"\t\tfrom psynteract import install\n",
"\t\tserver_url = install(current_url)\n",
"\n",
"\t\tfeedback = notification(self.main_window,'Your server is available at '+server_url)\n",
"\t\tfeedback.exec_()\n",
"\t\t"
] | [
0.045454545454545456,
0,
0,
0,
0,
0,
0,
0,
0,
0.029411764705882353,
0,
0.047619047619047616,
0,
0.16666666666666666,
0.125,
0.014084507042253521,
0.09090909090909091,
0.16666666666666666,
0.6666666666666666,
0.015625,
0.022058823529411766,
0.02830188679245283,
0.05263157894736842,
0.05555555555555555,
0.6666666666666666,
0.016666666666666666,
0.020833333333333332,
0.023255813953488372,
0.6666666666666666,
0.030303030303030304,
0.027777777777777776,
0,
0.03488372093023256,
0.05263157894736842,
1.5
] | 35 | 0.130467 | false |
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from clr import AddReference
AddReference("QuantConnect.Algorithm.Framework")
AddReference("QuantConnect.Indicators")
AddReference("QuantConnect.Common")
from QuantConnect import *
from QuantConnect.Indicators import *
from QuantConnect.Algorithm.Framework.Alphas import *
from datetime import timedelta
from enum import Enum
class RsiAlphaModel(AlphaModel):
'''Uses Wilder's RSI to create insights.
Using default settings, a cross over below 30 or above 70 will trigger a new insight.'''
def __init__(self,
period = 14,
resolution = Resolution.Daily):
'''Initializes a new instance of the RsiAlphaModel class
Args:
period: The RSI indicator period'''
self.period = period
self.resolution = resolution
self.insightPeriod = Time.Multiply(Extensions.ToTimeSpan(resolution), period)
self.symbolDataBySymbol ={}
resolutionString = Extensions.GetEnumString(resolution, Resolution)
self.Name = '{}({},{})'.format(self.__class__.__name__, period, resolutionString)
def Update(self, algorithm, data):
'''Updates this alpha model with the latest data from the algorithm.
This is called each time the algorithm receives data for subscribed securities
Args:
algorithm: The algorithm instance
data: The new data available
Returns:
The new insights generated'''
insights = []
for symbol, symbolData in self.symbolDataBySymbol.items():
rsi = symbolData.RSI
previous_state = symbolData.State
state = self.GetState(rsi, previous_state)
if state != previous_state and rsi.IsReady:
if state == State.TrippedLow:
insights.append(Insight.Price(symbol, self.insightPeriod, InsightDirection.Up))
if state == State.TrippedHigh:
insights.append(Insight.Price(symbol, self.insightPeriod, InsightDirection.Down))
symbolData.State = state
return insights
def OnSecuritiesChanged(self, algorithm, changes):
'''Cleans out old security data and initializes the RSI for any newly added securities.
Event fired each time the we add/remove securities from the data feed
Args:
algorithm: The algorithm instance that experienced the change in securities
changes: The security additions and removals from the algorithm'''
# clean up data for removed securities
symbols = [ x.Symbol for x in changes.RemovedSecurities ]
if len(symbols) > 0:
for subscription in algorithm.SubscriptionManager.Subscriptions:
if subscription.Symbol in symbols:
self.symbolDataBySymbol.pop(subscription.Symbol, None)
subscription.Consolidators.Clear()
# initialize data for added securities
addedSymbols = [ x.Symbol for x in changes.AddedSecurities if x.Symbol not in self.symbolDataBySymbol]
if len(addedSymbols) == 0: return
history = algorithm.History(addedSymbols, self.period, self.resolution)
for symbol in addedSymbols:
rsi = algorithm.RSI(symbol, self.period, MovingAverageType.Wilders, self.resolution)
if not history.empty:
ticker = SymbolCache.GetTicker(symbol)
for tuple in history.loc[ticker].itertuples():
rsi.Update(tuple.Index, tuple.close)
self.symbolDataBySymbol[symbol] = SymbolData(symbol, rsi)
def GetState(self, rsi, previous):
''' Determines the new state. This is basically cross-over detection logic that
includes considerations for bouncing using the configured bounce tolerance.'''
if rsi.Current.Value > 70:
return State.TrippedHigh
if rsi.Current.Value < 30:
return State.TrippedLow
if previous == State.TrippedLow:
if rsi.Current.Value > 35:
return State.Middle
if previous == State.TrippedHigh:
if rsi.Current.Value < 65:
return State.Middle
return previous
class SymbolData:
'''Contains data specific to a symbol required by this model'''
def __init__(self, symbol, rsi):
self.Symbol = symbol
self.RSI = rsi
self.State = State.Middle
class State(Enum):
'''Defines the state. This is used to prevent signal spamming and aid in bounce detection.'''
TrippedLow = 0
Middle = 1
TrippedHigh = 2 | [
"# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.\n",
"# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.\n",
"#\n",
"# Licensed under the Apache License, Version 2.0 (the \"License\");\n",
"# you may not use this file except in compliance with the License.\n",
"# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0\n",
"#\n",
"# Unless required by applicable law or agreed to in writing, software\n",
"# distributed under the License is distributed on an \"AS IS\" BASIS,\n",
"# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
"# See the License for the specific language governing permissions and\n",
"# limitations under the License.\n",
"\n",
"from clr import AddReference\n",
"AddReference(\"QuantConnect.Algorithm.Framework\")\n",
"AddReference(\"QuantConnect.Indicators\")\n",
"AddReference(\"QuantConnect.Common\")\n",
"\n",
"from QuantConnect import *\n",
"from QuantConnect.Indicators import *\n",
"from QuantConnect.Algorithm.Framework.Alphas import *\n",
"from datetime import timedelta\n",
"from enum import Enum\n",
"\n",
"class RsiAlphaModel(AlphaModel):\n",
" '''Uses Wilder's RSI to create insights.\n",
" Using default settings, a cross over below 30 or above 70 will trigger a new insight.'''\n",
"\n",
" def __init__(self,\n",
" period = 14,\n",
" resolution = Resolution.Daily):\n",
" '''Initializes a new instance of the RsiAlphaModel class\n",
" Args:\n",
" period: The RSI indicator period'''\n",
" self.period = period\n",
" self.resolution = resolution\n",
" self.insightPeriod = Time.Multiply(Extensions.ToTimeSpan(resolution), period)\n",
" self.symbolDataBySymbol ={}\n",
"\n",
" resolutionString = Extensions.GetEnumString(resolution, Resolution)\n",
" self.Name = '{}({},{})'.format(self.__class__.__name__, period, resolutionString)\n",
"\n",
" def Update(self, algorithm, data):\n",
" '''Updates this alpha model with the latest data from the algorithm.\n",
" This is called each time the algorithm receives data for subscribed securities\n",
" Args:\n",
" algorithm: The algorithm instance\n",
" data: The new data available\n",
" Returns:\n",
" The new insights generated'''\n",
" insights = []\n",
" for symbol, symbolData in self.symbolDataBySymbol.items():\n",
" rsi = symbolData.RSI\n",
" previous_state = symbolData.State\n",
" state = self.GetState(rsi, previous_state)\n",
"\n",
" if state != previous_state and rsi.IsReady:\n",
" if state == State.TrippedLow:\n",
" insights.append(Insight.Price(symbol, self.insightPeriod, InsightDirection.Up))\n",
" if state == State.TrippedHigh:\n",
" insights.append(Insight.Price(symbol, self.insightPeriod, InsightDirection.Down))\n",
"\n",
" symbolData.State = state\n",
"\n",
" return insights\n",
"\n",
"\n",
" def OnSecuritiesChanged(self, algorithm, changes):\n",
" '''Cleans out old security data and initializes the RSI for any newly added securities.\n",
" Event fired each time the we add/remove securities from the data feed\n",
" Args:\n",
" algorithm: The algorithm instance that experienced the change in securities\n",
" changes: The security additions and removals from the algorithm'''\n",
"\n",
" # clean up data for removed securities\n",
" symbols = [ x.Symbol for x in changes.RemovedSecurities ]\n",
" if len(symbols) > 0:\n",
" for subscription in algorithm.SubscriptionManager.Subscriptions:\n",
" if subscription.Symbol in symbols:\n",
" self.symbolDataBySymbol.pop(subscription.Symbol, None)\n",
" subscription.Consolidators.Clear()\n",
"\n",
" # initialize data for added securities\n",
"\n",
" addedSymbols = [ x.Symbol for x in changes.AddedSecurities if x.Symbol not in self.symbolDataBySymbol]\n",
" if len(addedSymbols) == 0: return\n",
"\n",
" history = algorithm.History(addedSymbols, self.period, self.resolution)\n",
"\n",
" for symbol in addedSymbols:\n",
" rsi = algorithm.RSI(symbol, self.period, MovingAverageType.Wilders, self.resolution)\n",
"\n",
" if not history.empty:\n",
" ticker = SymbolCache.GetTicker(symbol)\n",
" for tuple in history.loc[ticker].itertuples():\n",
" rsi.Update(tuple.Index, tuple.close)\n",
"\n",
" self.symbolDataBySymbol[symbol] = SymbolData(symbol, rsi)\n",
"\n",
"\n",
" def GetState(self, rsi, previous):\n",
" ''' Determines the new state. This is basically cross-over detection logic that\n",
" includes considerations for bouncing using the configured bounce tolerance.'''\n",
" if rsi.Current.Value > 70:\n",
" return State.TrippedHigh\n",
" if rsi.Current.Value < 30:\n",
" return State.TrippedLow\n",
" if previous == State.TrippedLow:\n",
" if rsi.Current.Value > 35:\n",
" return State.Middle\n",
" if previous == State.TrippedHigh:\n",
" if rsi.Current.Value < 65:\n",
" return State.Middle\n",
"\n",
" return previous\n",
"\n",
"\n",
"class SymbolData:\n",
" '''Contains data specific to a symbol required by this model'''\n",
" def __init__(self, symbol, rsi):\n",
" self.Symbol = symbol\n",
" self.RSI = rsi\n",
" self.State = State.Middle\n",
"\n",
"\n",
"class State(Enum):\n",
" '''Defines the state. This is used to prevent signal spamming and aid in bounce detection.'''\n",
" TrippedLow = 0\n",
" Middle = 1\n",
" TrippedHigh = 2"
] | [
0,
0.012345679012345678,
0,
0,
0,
0.011764705882352941,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.037037037037037035,
0.02631578947368421,
0.018518518518518517,
0.03225806451612903,
0.045454545454545456,
0,
0.030303030303030304,
0,
0.010752688172043012,
0,
0,
0.06666666666666667,
0.04081632653061224,
0,
0,
0,
0,
0,
0.011627906976744186,
0.027777777777777776,
0,
0,
0.011111111111111112,
0,
0,
0,
0.011494252873563218,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.01,
0,
0.00980392156862745,
0,
0,
0,
0,
0,
0,
0.01818181818181818,
0.010416666666666666,
0,
0,
0.011363636363636364,
0,
0,
0,
0.030303030303030304,
0,
0,
0,
0,
0,
0,
0,
0,
0.018018018018018018,
0.023809523809523808,
0,
0,
0,
0,
0.010309278350515464,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.02564102564102564,
0.011363636363636364,
0.011494252873563218,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.01020408163265306,
0,
0,
0.05263157894736842
] | 130 | 0.004983 | false |
# Copyright 2014-2016 The ODL development group
#
# This file is part of ODL.
#
# ODL is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ODL is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ODL. If not, see <http://www.gnu.org/licenses/>.
"""Functions to create noise samples of different distributions."""
# Imports for common Python 2/3 codebase
from __future__ import print_function, division, absolute_import
from future import standard_library
standard_library.install_aliases()
import numpy as np
from odl.util import as_flat_array
__all__ = ('white_noise', 'poisson_noise', 'salt_pepper_noise')
def white_noise(space, mean=0, stddev=1):
"""Standard gaussian noise in space, pointwise ``N(mean, stddev**2)``.
Parameters
----------
space : `FnBase` or `ProductSpace`
The space in which the noise is created.
mean : ``space.field`` element or ``space`` `element-like`, optional
The mean of the white noise. If a scalar, it is interpreted as
``mean * space.one()``.
If ``space`` is complex, the real and imaginary parts are interpreted
as the mean of their respective part of the noise.
stddev : `float` or ``space`` `element-like`, optional
The standard deviation of the white noise. If a scalar, it is
interpreted as ``stddev * space.one()``.
Returns
-------
white_noise : ``space`` element
See Also
--------
poisson_noise
salt_pepper_noise
numpy.random.normal
"""
from odl.space import ProductSpace
if isinstance(space, ProductSpace):
values = [white_noise(subspace, mean, stddev) for subspace in space]
else:
if space.is_cn:
real = np.random.normal(
loc=mean.real, scale=stddev, size=space.shape)
imag = np.random.normal(
loc=mean.imag, scale=stddev, size=space.shape)
values = real + 1j * imag
else:
values = np.random.normal(loc=mean, scale=stddev, size=space.shape)
return space.element(values)
def poisson_noise(intensity):
"""Poisson distributed noise with given intensity.
Parameters
----------
intensity : `FnBase` element or `ProductSpace` element
The intensity (usually called lambda) parameter of the noise.
Returns
-------
poisson_noise : ``intensity.space`` element
Poisson distributed random variable.
Notes
-----
For a Poisson distributed random variable :math:`X` with intensity
:math:`\\lambda`, the probability of it taking the value
:math:`k \\in \mathbb{N}_0` is given by
.. math::
\\frac{\\lambda^k e^{-\\lambda}}{k!}
Note that the function only takes integer values.
See Also
--------
white_noise
salt_pepper_noise
numpy.random.poisson
"""
from odl.space import ProductSpace
if isinstance(intensity.space, ProductSpace):
values = [poisson_noise(subintensity) for subintensity in intensity]
else:
values = np.random.poisson(intensity.asarray())
return intensity.space.element(values)
def salt_pepper_noise(vector, fraction=0.05, salt_vs_pepper=0.5,
low_val=None, high_val=None):
"""Add salt and pepper noise to vector.
Salt and pepper noise replaces random elements in ``vector`` with
``low_val`` or ``high_val``.
Parameters
----------
vector : `FnBase` or `ProductSpace`
The vector that noise should be added to.
fraction : float, optional
The propotion of the elements in ``vector`` that should be converted
to noise.
salt_vs_pepper : float, optional
Relative aboundance of salt (high) vs pepper (low) noise. A high value
means more salt than pepper noise.
low_val : float, optional
The "pepper" color in the noise.
Default: minimum value of ``vector``. For product spaces the minimum
value per subspace is taken.
each sub-space.
high_val : float, optional
The "salt" value in the noise.
Default: maximuim value of ``vector``. For product spaces the maximum
value per subspace is taken.
Returns
-------
salt_pepper_noise : ``vector.space`` element
``vector`` with salt and pepper noise.
See Also
--------
white_noise
poisson_noise
"""
from odl.space import ProductSpace
# Validate input parameters
fraction, fraction_in = float(fraction), fraction
if not (0 <= fraction <= 1):
raise ValueError('`fraction` ({}) should be a float in the interval '
'[0, 1]'.format(fraction_in))
salt_vs_pepper, salt_vs_pepper_in = float(salt_vs_pepper), salt_vs_pepper
if not (0 <= salt_vs_pepper <= 1):
raise ValueError('`salt_vs_pepper` ({}) should be a float in the '
'interval [0, 1]'.format(salt_vs_pepper_in))
if isinstance(vector.space, ProductSpace):
values = [salt_pepper_noise(subintensity, fraction, salt_vs_pepper,
low_val, high_val)
for subintensity in vector]
else:
# Extract vector of values
values = as_flat_array(vector).copy()
# Determine fill-in values if not given
if low_val is None:
low_val = np.min(values)
if high_val is None:
high_val = np.max(values)
# Create randomly selected points as a subset of image.
a = np.arange(vector.size)
np.random.shuffle(a)
salt_indices = a[:int(fraction * vector.size * salt_vs_pepper)]
pepper_indices = a[int(fraction * vector.size * salt_vs_pepper):
int(fraction * vector.size)]
values[salt_indices] = high_val
values[pepper_indices] = -low_val
return vector.space.element(values)
if __name__ == '__main__':
# Show the phantoms
import odl
r100 = odl.rn(100)
white_noise(r100).show('white_noise')
white_noise(r100, mean=5).show('white_noise with mean')
c100 = odl.cn(100)
white_noise(c100).show('complex white_noise')
discr = odl.uniform_discr([-1, -1], [1, 1], [300, 300])
white_noise(discr).show('white_noise 2d')
vector = odl.phantom.shepp_logan(discr, modified=True)
poisson_noise(vector * 100).show('poisson_noise 2d')
salt_pepper_noise(vector).show('salt_pepper_noise 2d')
# Run also the doctests
# pylint: disable=wrong-import-position
from odl.util.testutils import run_doctests
run_doctests()
| [
"# Copyright 2014-2016 The ODL development group\n",
"#\n",
"# This file is part of ODL.\n",
"#\n",
"# ODL is free software: you can redistribute it and/or modify\n",
"# it under the terms of the GNU General Public License as published by\n",
"# the Free Software Foundation, either version 3 of the License, or\n",
"# (at your option) any later version.\n",
"#\n",
"# ODL is distributed in the hope that it will be useful,\n",
"# but WITHOUT ANY WARRANTY; without even the implied warranty of\n",
"# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n",
"# GNU General Public License for more details.\n",
"#\n",
"# You should have received a copy of the GNU General Public License\n",
"# along with ODL. If not, see <http://www.gnu.org/licenses/>.\n",
"\n",
"\"\"\"Functions to create noise samples of different distributions.\"\"\"\n",
"\n",
"# Imports for common Python 2/3 codebase\n",
"from __future__ import print_function, division, absolute_import\n",
"from future import standard_library\n",
"standard_library.install_aliases()\n",
"\n",
"import numpy as np\n",
"from odl.util import as_flat_array\n",
"\n",
"\n",
"__all__ = ('white_noise', 'poisson_noise', 'salt_pepper_noise')\n",
"\n",
"\n",
"def white_noise(space, mean=0, stddev=1):\n",
" \"\"\"Standard gaussian noise in space, pointwise ``N(mean, stddev**2)``.\n",
"\n",
" Parameters\n",
" ----------\n",
" space : `FnBase` or `ProductSpace`\n",
" The space in which the noise is created.\n",
" mean : ``space.field`` element or ``space`` `element-like`, optional\n",
" The mean of the white noise. If a scalar, it is interpreted as\n",
" ``mean * space.one()``.\n",
" If ``space`` is complex, the real and imaginary parts are interpreted\n",
" as the mean of their respective part of the noise.\n",
" stddev : `float` or ``space`` `element-like`, optional\n",
" The standard deviation of the white noise. If a scalar, it is\n",
" interpreted as ``stddev * space.one()``.\n",
"\n",
" Returns\n",
" -------\n",
" white_noise : ``space`` element\n",
"\n",
" See Also\n",
" --------\n",
" poisson_noise\n",
" salt_pepper_noise\n",
" numpy.random.normal\n",
" \"\"\"\n",
" from odl.space import ProductSpace\n",
" if isinstance(space, ProductSpace):\n",
" values = [white_noise(subspace, mean, stddev) for subspace in space]\n",
" else:\n",
" if space.is_cn:\n",
" real = np.random.normal(\n",
" loc=mean.real, scale=stddev, size=space.shape)\n",
" imag = np.random.normal(\n",
" loc=mean.imag, scale=stddev, size=space.shape)\n",
" values = real + 1j * imag\n",
" else:\n",
" values = np.random.normal(loc=mean, scale=stddev, size=space.shape)\n",
" return space.element(values)\n",
"\n",
"\n",
"def poisson_noise(intensity):\n",
" \"\"\"Poisson distributed noise with given intensity.\n",
"\n",
" Parameters\n",
" ----------\n",
" intensity : `FnBase` element or `ProductSpace` element\n",
" The intensity (usually called lambda) parameter of the noise.\n",
"\n",
" Returns\n",
" -------\n",
" poisson_noise : ``intensity.space`` element\n",
" Poisson distributed random variable.\n",
"\n",
" Notes\n",
" -----\n",
" For a Poisson distributed random variable :math:`X` with intensity\n",
" :math:`\\\\lambda`, the probability of it taking the value\n",
" :math:`k \\\\in \\mathbb{N}_0` is given by\n",
"\n",
" .. math::\n",
" \\\\frac{\\\\lambda^k e^{-\\\\lambda}}{k!}\n",
"\n",
" Note that the function only takes integer values.\n",
"\n",
" See Also\n",
" --------\n",
" white_noise\n",
" salt_pepper_noise\n",
" numpy.random.poisson\n",
" \"\"\"\n",
" from odl.space import ProductSpace\n",
" if isinstance(intensity.space, ProductSpace):\n",
" values = [poisson_noise(subintensity) for subintensity in intensity]\n",
" else:\n",
" values = np.random.poisson(intensity.asarray())\n",
" return intensity.space.element(values)\n",
"\n",
"\n",
"def salt_pepper_noise(vector, fraction=0.05, salt_vs_pepper=0.5,\n",
" low_val=None, high_val=None):\n",
" \"\"\"Add salt and pepper noise to vector.\n",
"\n",
" Salt and pepper noise replaces random elements in ``vector`` with\n",
" ``low_val`` or ``high_val``.\n",
"\n",
" Parameters\n",
" ----------\n",
" vector : `FnBase` or `ProductSpace`\n",
" The vector that noise should be added to.\n",
" fraction : float, optional\n",
" The propotion of the elements in ``vector`` that should be converted\n",
" to noise.\n",
" salt_vs_pepper : float, optional\n",
" Relative aboundance of salt (high) vs pepper (low) noise. A high value\n",
" means more salt than pepper noise.\n",
" low_val : float, optional\n",
" The \"pepper\" color in the noise.\n",
" Default: minimum value of ``vector``. For product spaces the minimum\n",
" value per subspace is taken.\n",
" each sub-space.\n",
" high_val : float, optional\n",
" The \"salt\" value in the noise.\n",
" Default: maximuim value of ``vector``. For product spaces the maximum\n",
" value per subspace is taken.\n",
"\n",
" Returns\n",
" -------\n",
" salt_pepper_noise : ``vector.space`` element\n",
" ``vector`` with salt and pepper noise.\n",
"\n",
" See Also\n",
" --------\n",
" white_noise\n",
" poisson_noise\n",
" \"\"\"\n",
" from odl.space import ProductSpace\n",
"\n",
" # Validate input parameters\n",
" fraction, fraction_in = float(fraction), fraction\n",
" if not (0 <= fraction <= 1):\n",
" raise ValueError('`fraction` ({}) should be a float in the interval '\n",
" '[0, 1]'.format(fraction_in))\n",
"\n",
" salt_vs_pepper, salt_vs_pepper_in = float(salt_vs_pepper), salt_vs_pepper\n",
" if not (0 <= salt_vs_pepper <= 1):\n",
" raise ValueError('`salt_vs_pepper` ({}) should be a float in the '\n",
" 'interval [0, 1]'.format(salt_vs_pepper_in))\n",
"\n",
" if isinstance(vector.space, ProductSpace):\n",
" values = [salt_pepper_noise(subintensity, fraction, salt_vs_pepper,\n",
" low_val, high_val)\n",
" for subintensity in vector]\n",
" else:\n",
" # Extract vector of values\n",
" values = as_flat_array(vector).copy()\n",
"\n",
" # Determine fill-in values if not given\n",
" if low_val is None:\n",
" low_val = np.min(values)\n",
" if high_val is None:\n",
" high_val = np.max(values)\n",
"\n",
" # Create randomly selected points as a subset of image.\n",
" a = np.arange(vector.size)\n",
" np.random.shuffle(a)\n",
" salt_indices = a[:int(fraction * vector.size * salt_vs_pepper)]\n",
" pepper_indices = a[int(fraction * vector.size * salt_vs_pepper):\n",
" int(fraction * vector.size)]\n",
"\n",
" values[salt_indices] = high_val\n",
" values[pepper_indices] = -low_val\n",
"\n",
" return vector.space.element(values)\n",
"\n",
"\n",
"if __name__ == '__main__':\n",
" # Show the phantoms\n",
" import odl\n",
"\n",
" r100 = odl.rn(100)\n",
" white_noise(r100).show('white_noise')\n",
" white_noise(r100, mean=5).show('white_noise with mean')\n",
"\n",
" c100 = odl.cn(100)\n",
" white_noise(c100).show('complex white_noise')\n",
"\n",
" discr = odl.uniform_discr([-1, -1], [1, 1], [300, 300])\n",
" white_noise(discr).show('white_noise 2d')\n",
"\n",
" vector = odl.phantom.shepp_logan(discr, modified=True)\n",
" poisson_noise(vector * 100).show('poisson_noise 2d')\n",
" salt_pepper_noise(vector).show('salt_pepper_noise 2d')\n",
"\n",
" # Run also the doctests\n",
" # pylint: disable=wrong-import-position\n",
" from odl.util.testutils import run_doctests\n",
" run_doctests()\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.05263157894736842,
0.02857142857142857,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.022727272727272728,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 209 | 0.000497 | false |
import configparser
import logging
import random
import sys
from copy import deepcopy
import discord
import GameTimer
def load_config():
c = configparser.ConfigParser()
c.read('config.ini')
try:
time = c.getint('game', 'time')
except ValueError:
logger.error(
"В config.ini неверно указано значение time. Значение установлено на 1200")
time = 1200
return {'t': time}
class DiscordClient(discord.Client):
def __init__(self, **kwargs):
discord.Client.__init__(self, **kwargs)
self.paused = False
self.debaters_list = []
self.debater_names = []
self.guesser_attempts = {}
self.guessers_list = []
self.guesser_names = []
self.guesser_points = {}
self.guesser_last_turn = {}
self.guesser_messages = 0
self.debater_cards = {}
self.pack = {}
self.discard = []
try:
config = load_config()
self.t = config["t"]
except:
self.t = 1200
logger.error(
"Файл config.ini отсуствует или содержит некорретные данные, были загруженны настройки по умолчанию.")
print(self.t)
self.game_timer = GameTimer.RenewableTimer(self.t, self.end)
self.started = False
async def on_ready(self):
await self.__reset__()
print('Logged in as')
print(client.user.name)
print(client.user.id)
print('------')
async def __reset__(self):
self.paused = False
self.debaters_list.clear()
self.debater_names.clear()
self.guesser_attempts.clear()
self.guessers_list.clear()
self.guesser_names.clear()
self.guesser_points.clear()
self.guesser_last_turn.clear()
self.guesser_messages = 0
def end(self):
self.loop.create_task(self.end_game())
self.loop.create_task(self.__reset__())
async def end_game(self):
self.started = False
max_points = 0
winners = []
# Определяет отгадчика с максимальным количеством очков
for guesser in self.guesser_points:
if self.guesser_points[guesser] > max_points:
max_points = self.guesser_points[guesser]
winner = guesser
winners = [winner.name]
elif self.guesser_points[guesser] == max_points:
winners.append(guesser.name)
if len(self.guesser_points) < 2:
end_game_message = "Победитель **{}**".format(guesser.name)
elif len(winners) < 2:
end_game_message = "Победитель **{}**".format(winner.name)
elif len(winners) > 1:
end_game_message = "Победители {}".format("**" + "**, **".join(winners) + "**")
score = self.current_score()
for user in self.guessers_list + self.debaters_list:
await user.send("{0}\n{1}\nИгра закончилась".format(score, end_game_message))
print("Игра закончилась")
def current_score(self):
score_message = "Общий счёт (Игрок: очки | попытки):\n"
for guesser in self.guesser_points:
score_message += "**{0}**: {1} | {2} \n".format(guesser.name, self.guesser_points[guesser],
self.guesser_attempts[guesser])
return score_message
async def add_guesser(self, member):
if member not in self.guessers_list:
self.guessers_list.append(member)
self.guesser_names.append(member.name)
guessers = "**" + "**, **".join(self.guesser_names) + "**"
for guesser in self.guessers_list:
if guesser != member:
await guesser.send(
"Игрок {0} добавлен в группу отгадчиков\n"
"Группа отгадчиков: {1}\n"
"Общее количество отгадчиков: **{2}**".format(member.name, guessers,
len(self.guessers_list)))
else:
await guesser.send(
"Вы добавлены в группу отгадчиков\n"
"Группа отгадчиков: {0}\n"
"Общее количество отгадчиков: **{1}**".format(guessers,
len(self.guessers_list)))
elif member in self.guessers_list:
guessers = "**" + "**, **".join(self.guesser_names) + "**"
await member.send(
'Вы уже в группе отгадчиков \nГруппа отгадчиков: {0}\nОбщее количество '
'отгадчиков: **{1}**'.format(guessers, len(self.guessers_list)))
async def remove_guesser(self, member):
if member in self.guessers_list:
self.guessers_list.remove(member)
self.guesser_names.remove(member.name)
guessers = "**" + "**, **".join(self.guesser_names) + "**"
await member.send(
"Вы удалены из группы отгадчиков\n"
"Группа отгадчиков: {0}\n"
"Общее количество отгадчиков: **{1}**".format(guessers, len(self.guessers_list)))
for guesser in self.guessers_list:
await guesser.send(
"Игрок {0} удалён из группы отгадчиков\n"
"Группа отгадчиков: {1}\n"
"Общее количество отгадчиков: **{2}**".format(member.name, guessers,
len(self.guessers_list)))
async def add_debater(self, member):
if member not in self.debaters_list:
self.debaters_list.append(member)
self.debater_names.append(member.name)
debaters = "**" + "**, **".join(self.debater_names) + "**"
for debater in self.debaters_list:
if debater != member:
await debater.send(
"Игрок {0} добавлен в группу спорщиков\n"
"Группа спорщиков: {1}\n"
"Общее количество спорщиков: **{2}**".format(member.name, debaters,
len(self.debaters_list)))
else:
await debater.send(
"Вы добавлены в группу спорщиков\n"
"Группа спорщиков: {1}\n"
"Общее количество спорщиков: **{2}**".format(member.name, debaters,
len(self.debaters_list)))
elif member in self.debaters_list:
debaters = "**" + "**, **".join(self.debater_names) + "**"
await member.send(
'Вы уже в группе спорщиков \nГруппа спорщиков: {0}\nОбщее количество '
'спорщиков: **{1}**'.format(debaters, len(self.debaters_list)))
# we do not want the client to reply to itself
if member == discord.Client.user:
return
async def remove_debater(self, member):
if member in self.debaters_list:
self.debaters_list.remove(member)
self.debater_names.remove(member.name)
debaters = "**" + "**, **".join(self.debater_names) + "**"
await member.send(
"Вы удалены из группы спорщиков\n"
"Группа спорщиков: {0}\n"
"Общее количество спорщиков: **{1}**".format(debaters, len(self.debaters_list)))
for debater in self.debaters_list:
await debater.send(
"Игрок {0} удалён из группы спорщиков\n"
"Группа отгадчиков: {1}\n"
"Общее количество спорщиков: **{2}**".format(member.name, debaters,
len(self.debaters_list)))
async def on_message(self, message):
member = message.author
channel = message.channel
if message.content == "!help" or message.content == "!h":
message_to_other_guessers = """```Чат-бот для игры в Fallacymania
Команды:
"!h" или "!help" - Выводит данную справку
"!r" или "!правила" - Выводит правила
"*" или "!софизмы" - Отправляет в ответ лист с софизмами
"!d" или "!спорщик" - Добавляет пользователя в группу спорщиков
"!-d" или "!-спорщик" - Удаляет пользователя из группы спорщиков
"!g" или "!отгадчик" - Добавляет пользователя в группу отгадчиков
"!-g" или "!-отгадчик" - Удаляет пользователя из группы отгадчиков
"!s" или "!старт" - Если указано минимальное количество отгадчиков и спорщиков, то запускает таймер игры
"!p" или "!пазуа" - Приостанавливает таймер игры
"!stop" или "завершить" - Завершает игру о останавливает таймер
"!reset" или "!сброс" - Удаляет всех игроков из групп отгадчиков и спорщиков
"%номер_софизма%" - Ищет у спорщика софизм по номеру, если находит, то забирает и даёт новый (вбивается без знаков процент)
"+" или "-" - Даёт или забирает 1 очко у отгадчика. Пока у отгадчика есть попытки "-" забирает 1 попытку, а не 1 очко.
".." или "!z" - Отменяет последнее действие отгадчика.
```"""
if not self.started:
await channel.send(message_to_other_guessers)
else:
await member.send(message_to_other_guessers)
if message.content == "!d" or message.content == "!спорщик":
await self.add_debater(member)
await self.remove_guesser(member)
if message.content == "!g" or message.content == "!отгадчик":
await client.loop.create_task(self.add_guesser(member))
await client.loop.create_task(self.remove_debater(member))
if message.content == "!-g" or message.content == "!-отгадчик":
await self.remove_guesser(member)
if message.content == "!-d" or message.content == "!-спорщик":
await self.client.loop.create_task(self.remove_debater(member))
# Сбросить параматеры игры
if message.content == "!reset" or message.content == "!сброс":
if not self.started:
if self.debaters_list + self.guessers_list != []:
for user in self.debaters_list + self.guessers_list:
await user.send("Список игроков и их счёт сброшены")
else:
await member.send("Список игроков и их счёт сброшены")
await self.__reset__()
else:
await member.send(""""Игра уже запущена. Чтобы завершить игру введите "!stop""""")
# Завершить игру
if message.content == "!stop" or message.content == "!завершить":
if self.started:
self.game_timer.cancel()
self.end()
else:
member.send("Нельзя остановить ещё не запущенную игру")
#
# Старт игры
if message.content == '!s' or message.content == '!старт':
# Если таймер не запущен и игра не на паузе, есть как минимум 2 спорщика и 1 отгадчик
if not (self.game_timer.timer.isAlive() or self.paused) and len(self.debaters_list) > 1 and len(
self.guessers_list) > 0:
self.game_timer = GameTimer.RenewableTimer(self.t, self.end)
self.debater_cards = {}
self.pack = deepcopy(fallacies)
self.discard = []
# Перемешать колоду
random.shuffle(self.pack)
# Раздать карты спорщикам
for debater in self.debaters_list:
i = 0
card_list = []
cards = ""
while i < 5:
card = self.pack.pop()
cards += card
card_list.append(card)
i += 1
await debater.send(cards)
self.debater_cards.update({debater: card_list})
# • если отгадчиков 1-2, каждый берёт по 15 карт попыток;
# • если отгадчиков 3-4, каждый берёт по 10 карт попыток;
# • если отгадчиков 5-6, каждый берёт по 8 карт попыток;
# • если отгадчиков больше 6, то 50 карт попыток делятся поровну между отгадчиками,
# а остаток убирается обратно в коробку.
if len(self.guessers_list) < 3:
number_attempts = 15
elif len(self.guessers_list) < 5:
number_attempts = 10
elif len(self.guessers_list) < 7:
number_attempts = 8
elif len(self.guessers_list) > 6:
number_attempts = int(50 / len(self.guessers_list))
for guesser in self.guessers_list:
# Раздать лист с софизмами отгадчикам
await guesser.send(
"http://i.imgur.com/ivEjvmi.png\nhttp://i.imgur.com/BukCpJ7.png\nhttp://i.imgur.com/s4qav82.png")
# Установить начальное количество попыток и очков для отгадчиков
self.guesser_points.update({guesser: 0})
self.guesser_attempts.update({guesser: number_attempts})
self.guesser_last_turn.update({guesser: None})
self.game_timer.start()
await channel.send("Игра началась")
self.started = True
# Если таймер запущен
elif self.game_timer.timer.isAlive() and not self.paused:
await channel.send("Таймер уже запущен")
self.game_timer.pause()
m, s = divmod(int(self.game_timer.get_actual_time()), 60)
await channel.send("Осталось {0}м {1}с".format(m, s))
self.game_timer.resume()
elif self.paused:
for user in self.guessers_list + self.debaters_list:
m, s = divmod(int(self.game_timer.get_actual_time()), 60)
await user.send("Игра продолжается\nОсталось {0}м {1}с".format(m, s))
self.game_timer.resume()
self.paused = False
elif len(self.debaters_list) < 2:
await channel.send("Нужно указать как минимум 2 спорщиков")
elif len(self.guessers_list) < 1:
await channel.send("Нужно указать как минимум 1 отгадчика")
# Пауза
if message.content == '!p' or message.content == '!пауза':
if self.started and not self.paused:
self.game_timer.pause()
self.game_timer.get_actual_time()
self.paused = True
for user in self.guessers_list + self.debaters_list:
m, s = divmod(int(self.game_timer.get_actual_time()), 60)
await user.send("Пауза\nОсталось {0}м {1}с".format(m, s))
elif not self.started:
await channel.send("Игра ещё не запущена")
elif self.paused:
await channel.send("Игра уже на паузе")
# Выдать лист с софизмом
if message.content == '!софизмы' or message.content == '*':
await member.send(
"http://i.imgur.com/ivEjvmi.png\nhttp://i.imgur.com/BukCpJ7.png\nhttp://i.imgur.com/s4qav82.png")
# Начиление очков
if message.content == '+' or message.content == '-':
if not self.started:
return await member.send(
"Игра не запущенна. Проводить манипуляции со счётом до старта игры нельзя.".format(
member))
if member not in self.guesser_points:
return await member.send("'+' или '-' отправленное отгадчиком даёт или отнимает очко у "
"этого отгадчика. **{0}** - не отгадчик".format(member))
if message.content == "+":
self.guesser_points[member] = self.guesser_points[member] + 1
self.guesser_last_turn[member] = "plus_point"
message_to_other_guessers = "Игрок **{0}** получил 1 очко.".format(member.name)
message_to_member_guesser = "Вы получили 1 очко."
elif message.content == "-":
if self.guesser_attempts[member] > 0:
self.guesser_attempts[member] = self.guesser_attempts[member] - 1
self.guesser_last_turn[member] = "minus_attempt"
message_to_other_guessers = "Игрок **{0}** потерял 1 попытку.".format(member.name)
message_to_member_guesser = "Вы потеряли 1 попытку.".format(member.name)
else:
self.guesser_points[member] = self.guesser_points[member] - 1
self.guesser_last_turn[member] = "minus_point"
message_to_other_guessers = "Игрок **{0}** потерял 1 очко.".format(member.name)
message_to_member_guesser = "Вы потеряли 1 очко."
self.guesser_messages += 1
for guesser in self.guesser_points:
if guesser != member:
await guesser.send("{0} {1}".format(message_to_other_guessers, self.current_score()))
else:
await guesser.send("{0} {1}".format(message_to_member_guesser, self.current_score()))
# Раздать лист с софизмами после 3х сообщений о счёте
if self.guesser_messages > 2:
await guesser.send(
"http://i.imgur.com/ivEjvmi.png\nhttp://i.imgur.com/BukCpJ7.png\nhttp://i.imgur.com/s4qav82.png")
if self.guesser_messages > 2:
self.guesser_messages = 0
# Отмена
if message.content == '!z' or message.content == '..':
if not self.started:
return await member.send("Игра не запущенна. Нельзя отменить последнее действие".format(
member))
elif member not in self.guesser_last_turn:
return await member.send("Отменить последнее действие может только отгадчик.".format(
member))
elif self.guesser_last_turn[member] is None:
return await member.send("Вы ещё не совершали никаких действия")
elif self.guesser_last_turn[member] == "returned":
return await member.send("Вы уже отменили своё действие. Отменять больше 1 действия подряд нельзя.")
elif self.guesser_last_turn[member] == "plus_point":
self.guesser_points[member] = self.guesser_points[member] - 1
self.guesser_last_turn[member] = "returned"
message_to_other_players = "Игрок **{0}** отменил своё последнее действие. У него забирается 1 очко.".format(
member.name)
message_to_member_player = "Вы отменили своё последнее действие. У вас забирается 1 очко."
elif self.guesser_last_turn[member] == "minus_point":
self.guesser_points[member] = self.guesser_points[member] + 1
self.guesser_last_turn[member] = "minus_point"
self.guesser_last_turn[member] = "returned"
message_to_other_players = "Игрок **{0}** отменил своё последнее действие. Ему возвращается 1 очко.".format(
member.name)
message_to_member_player = "Вы отменили своё последнее действие. Вам возвращается 1 очко."
elif self.guesser_last_turn[member] == "minus_attempt":
self.guesser_attempts[member] = self.guesser_attempts[member] + 1
self.guesser_last_turn[member] = "returned"
message_to_other_players = "Игрок **{0}** отменил своё последнее действие. Ему возвращается 1 попытка.".format(
member.name)
message_to_member_player = "Вы отменили своё последнее действие. Вам возвращается 1 попытка.".format(
member.name)
for guesser in self.guesser_points:
ch = await client.start_private_message(guesser)
if guesser != member:
await client.send_message(ch, "{0} {1}".format(message_to_other_players,
self.current_score()))
else:
await client.send_message(ch, "{0} {1}".format(message_to_member_player,
self.current_score()))
# Удаляет карту в сброс
if message.content.isdigit() and len(message.content) < 3 and member in self.debaters_list:
if len(fallacies) <= int(message.content):
return await member.send("Номер карточки должен быть не больше {}".format(
len(fallacies) - 1))
so = fallacies[int(message.content)]
card_list = self.debater_cards.get(member)
if card_list.count(so) > 0:
card_list.remove(so)
card = self.pack.pop()
card_list.append(card)
self.discard.append(card)
await member.send(" ".join(card_list))
else:
return await member.send("У вас нет карточки номер {}".format(message.content))
# Если колода закончилась, то сброшенные карты перемешиваются и становятся колодой
if not self.pack:
self.pack = deepcopy(self.discard)
random.shuffle(self.pack)
self.discard = []
if message.content == '!r' or message.content == "!правила":
"""Показать правила игры"""
# Разделено на 3 сообщения, из-за лимита на количество символов в discord
await message.channel.send('''
**Fallacymania — правила игры**
Данные правила являются минимальной модификацией оригинальных правил - http://gdurl.com/z6s0A/download с учётом особенностей игры с чатботом в discord.
Для игры нужно 3–20 игроков (рекомендуется 4–12). Игроки разбиваются на 2 группы: спорщики (2–10 игроков) и отгадчики (1–10 игроков). Ведущий может играть в любой из этих ролей.
Чтобы войти в группу спорщиков надо написать в чат ```!d```, чтобы в группу отгадчиков ```!g```
Игра требует наличия микрофона у каждого игрока. Все игроки (спорщики и отгадчики) должны быть в одном аудиоканале.
**Подготовка к игре**
1. Некоторые (или все) спорщики определяют для себя тезисы, которые они будут отстаивать с использованием софизмов. Тезисы можно написать в общий чат, а можно проговорить словами.
Задача спорщиков — проталкивать собственные тезисы, а также комментировать (поддерживать или опровергать) сказанное другими спорщиками, но с использованием софизмов со своих карт
Примеры тезисов:
Инопланетных цивилизаций не существует;
Никого невозможно в чём-то убедить при помощи софизмов;
Зелёный цвет красивее, чем красный.
2. Спорщики могут объединяться в группы, когда несколько человек отстаивают один и тот же тезис. Те спорщики, которые не взяли себе никакой тезис, используют софизмы только для ответов на сказанное другими.
3. Для начала игры следует написать в чат ```!s```После этого чат-бот раздаст спорщикам по 5 карточек с софизмами, а отгадчикам лист с софизмами
''')
await message.channel.send('''**Ход игры**
1. Игра идёт в реальном времени. Спорщики говорят фразы в поддержку своего тезиса, а также поддерживают или опровергают тезисы других спорщиков.
Но всё это должно делаться с использованием софизмов, которые есть у спорщиков на картах.
2. У спорщиков нет условий победы и поражения; их цель — попрактиковаться использовать софизмы так, чтобы аргументы звучали убедительно.
3. Спорщики могут говорить в любом порядке; могут как отвечать на реплики других игроков, так и высказывать новые суждения относительно своего тезиса.
4. Отгадчики смотрят дебаты и пытаются угадать, какие софизмы используют спорщики. Отгадчики соревнуются между собой, кто наберёт больше очков за угаданные софизмы.
5. Любой из отгадчиков может в любой момент пытаться угадывать софизмы, которые используют спорщики. Для этого отгадчик громко говорит имя спорщика и название софизма, который, как ему кажется, употребил этот спорщик. Спорщик отвечает отгадчику, правильная была догадка или нет.
6. Отгадчик может пытаться угадать софизмы __только из последней реплики__, сказанной спорщиком.
Если спорщик уже начал говорить следующую реплику, предыдущие софизмы угадывать нельзя. Тем не менее, можно пытаться угадать последние сказанные софизмы других спорщиков, пока они не начали говорить.
''')
message.channel.send('''
7. Спорщик может использовать в одной реплике несколько софизмов одновременно из тех, которые есть у него на картах. Отгадчики тоже могут пытаться найти несколько софизмов в одной реплике спорщика.
8. За каждый угаданный софизм отгадчик получает __1 очко__. Очки начисляет себе отгадчик сам, для этого он должен написать в чат ```+``` или ```-```, а спорщик откладывает угаданную карту софизма в сброс и берёт себе новую из колоды, вводя номер софизма в чат например ```22``` Если колода заканчивается, то сброс перетасовывается и становится новой колодой.
9. Спорщик больше не может использовать тот софизм, который ушёл в сброс, но может пользоваться новым полученным софизмом.
10. Если отгадчик называет не тот софизм, который использовал спорщик, то он теряет 1 карту попытки . Когда карты попыток у отгадчика заканчиваются, __он начинает терять по 1 очку__ за каждую неправильную попытку, для этого он должен написать в чат ```.-```, если ошибся и отнял у себя лишнюю попытку, то можно вернуть попытку с помощью ```.+```
11. Игра продолжается __20 минут__. В конце игры среди отгадчиков определяется победитель по количеству набранных очков.''')
if __name__ == "__main__":
try:
with open(file="token.txt", mode="r") as f:
token = " ".join(f.readline().split())
if token == "":
input(
'В первую строку файла "token.txt" надо вставить токен.\n'
'Нажмите любую клавишу для выхода из программы...\n')
exit()
except FileNotFoundError:
input('Файла "token.txt" нет в директории.\nНажмите любую клавишу для выхода из программы...\n')
exit()
try:
with open(file="fallacies.txt", mode="r") as f:
fallacies = f.readlines()
except FileNotFoundError:
input('Файла "fallacies.txt" нет в директории.\nНажмите любую клавишу для выхода из программы...\n')
exit()
# ------------------------------------------------------------------------------
logger = logging.getLogger('discord')
logger.setLevel(logging.DEBUG)
handler = logging.FileHandler(filename='discord.log', encoding='utf-8', mode='w')
handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s:%(name)s: %(message)s'))
logger.addHandler(handler)
stdout_handler = logging.StreamHandler(sys.stdout)
stdout_handler.setLevel(logging.ERROR)
logger.addHandler(stdout_handler)
# ------------------------------------------------------------------------------
description = '''Чат-бот для игры в fallacymania'''
# ------------------------------------------------------------------------------
# Переменная отвечает за то запущенна ли игра
client = DiscordClient()
client.run(token)
| [
"import configparser\n",
"import logging\n",
"import random\n",
"import sys\n",
"from copy import deepcopy\n",
"\n",
"import discord\n",
"\n",
"import GameTimer\n",
"\n",
"\n",
"def load_config():\n",
" c = configparser.ConfigParser()\n",
" c.read('config.ini')\n",
" try:\n",
" time = c.getint('game', 'time')\n",
" except ValueError:\n",
" logger.error(\n",
" \"В config.ini неверно указано значение time. Значение установлено на 1200\")\n",
" time = 1200\n",
" return {'t': time}\n",
"\n",
"\n",
"class DiscordClient(discord.Client):\n",
" def __init__(self, **kwargs):\n",
" discord.Client.__init__(self, **kwargs)\n",
"\n",
" self.paused = False\n",
" self.debaters_list = []\n",
" self.debater_names = []\n",
" self.guesser_attempts = {}\n",
" self.guessers_list = []\n",
" self.guesser_names = []\n",
" self.guesser_points = {}\n",
" self.guesser_last_turn = {}\n",
" self.guesser_messages = 0\n",
"\n",
" self.debater_cards = {}\n",
" self.pack = {}\n",
" self.discard = []\n",
"\n",
" try:\n",
" config = load_config()\n",
" self.t = config[\"t\"]\n",
" except:\n",
" self.t = 1200\n",
" logger.error(\n",
" \"Файл config.ini отсуствует или содержит некорретные данные, были загруженны настройки по умолчанию.\")\n",
"\n",
" print(self.t)\n",
" self.game_timer = GameTimer.RenewableTimer(self.t, self.end)\n",
"\n",
" self.started = False\n",
"\n",
" async def on_ready(self):\n",
" await self.__reset__()\n",
" print('Logged in as')\n",
" print(client.user.name)\n",
" print(client.user.id)\n",
"\n",
" print('------')\n",
"\n",
" async def __reset__(self):\n",
" self.paused = False\n",
" self.debaters_list.clear()\n",
" self.debater_names.clear()\n",
" self.guesser_attempts.clear()\n",
" self.guessers_list.clear()\n",
" self.guesser_names.clear()\n",
" self.guesser_points.clear()\n",
" self.guesser_last_turn.clear()\n",
" self.guesser_messages = 0\n",
"\n",
" def end(self):\n",
" self.loop.create_task(self.end_game())\n",
" self.loop.create_task(self.__reset__())\n",
"\n",
" async def end_game(self):\n",
" self.started = False\n",
"\n",
" max_points = 0\n",
" winners = []\n",
" # Определяет отгадчика с максимальным количеством очков\n",
" for guesser in self.guesser_points:\n",
" if self.guesser_points[guesser] > max_points:\n",
" max_points = self.guesser_points[guesser]\n",
" winner = guesser\n",
" winners = [winner.name]\n",
" elif self.guesser_points[guesser] == max_points:\n",
" winners.append(guesser.name)\n",
"\n",
" if len(self.guesser_points) < 2:\n",
" end_game_message = \"Победитель **{}**\".format(guesser.name)\n",
" elif len(winners) < 2:\n",
" end_game_message = \"Победитель **{}**\".format(winner.name)\n",
" elif len(winners) > 1:\n",
" end_game_message = \"Победители {}\".format(\"**\" + \"**, **\".join(winners) + \"**\")\n",
"\n",
" score = self.current_score()\n",
"\n",
" for user in self.guessers_list + self.debaters_list:\n",
" await user.send(\"{0}\\n{1}\\nИгра закончилась\".format(score, end_game_message))\n",
" print(\"Игра закончилась\")\n",
"\n",
" def current_score(self):\n",
" score_message = \"Общий счёт (Игрок: очки | попытки):\\n\"\n",
" for guesser in self.guesser_points:\n",
" score_message += \"**{0}**: {1} | {2} \\n\".format(guesser.name, self.guesser_points[guesser],\n",
" self.guesser_attempts[guesser])\n",
" return score_message\n",
"\n",
" async def add_guesser(self, member):\n",
" if member not in self.guessers_list:\n",
" self.guessers_list.append(member)\n",
" self.guesser_names.append(member.name)\n",
" guessers = \"**\" + \"**, **\".join(self.guesser_names) + \"**\"\n",
"\n",
" for guesser in self.guessers_list:\n",
" if guesser != member:\n",
" await guesser.send(\n",
" \"Игрок {0} добавлен в группу отгадчиков\\n\"\n",
" \"Группа отгадчиков: {1}\\n\"\n",
" \"Общее количество отгадчиков: **{2}**\".format(member.name, guessers,\n",
" len(self.guessers_list)))\n",
" else:\n",
" await guesser.send(\n",
" \"Вы добавлены в группу отгадчиков\\n\"\n",
" \"Группа отгадчиков: {0}\\n\"\n",
" \"Общее количество отгадчиков: **{1}**\".format(guessers,\n",
" len(self.guessers_list)))\n",
" elif member in self.guessers_list:\n",
" guessers = \"**\" + \"**, **\".join(self.guesser_names) + \"**\"\n",
" await member.send(\n",
" 'Вы уже в группе отгадчиков \\nГруппа отгадчиков: {0}\\nОбщее количество '\n",
" 'отгадчиков: **{1}**'.format(guessers, len(self.guessers_list)))\n",
"\n",
" async def remove_guesser(self, member):\n",
" if member in self.guessers_list:\n",
" self.guessers_list.remove(member)\n",
" self.guesser_names.remove(member.name)\n",
" guessers = \"**\" + \"**, **\".join(self.guesser_names) + \"**\"\n",
" await member.send(\n",
" \"Вы удалены из группы отгадчиков\\n\"\n",
" \"Группа отгадчиков: {0}\\n\"\n",
" \"Общее количество отгадчиков: **{1}**\".format(guessers, len(self.guessers_list)))\n",
" for guesser in self.guessers_list:\n",
" await guesser.send(\n",
" \"Игрок {0} удалён из группы отгадчиков\\n\"\n",
" \"Группа отгадчиков: {1}\\n\"\n",
" \"Общее количество отгадчиков: **{2}**\".format(member.name, guessers,\n",
" len(self.guessers_list)))\n",
"\n",
" async def add_debater(self, member):\n",
" if member not in self.debaters_list:\n",
" self.debaters_list.append(member)\n",
" self.debater_names.append(member.name)\n",
" debaters = \"**\" + \"**, **\".join(self.debater_names) + \"**\"\n",
"\n",
" for debater in self.debaters_list:\n",
" if debater != member:\n",
" await debater.send(\n",
" \"Игрок {0} добавлен в группу спорщиков\\n\"\n",
" \"Группа спорщиков: {1}\\n\"\n",
" \"Общее количество спорщиков: **{2}**\".format(member.name, debaters,\n",
" len(self.debaters_list)))\n",
" else:\n",
" await debater.send(\n",
" \"Вы добавлены в группу спорщиков\\n\"\n",
" \"Группа спорщиков: {1}\\n\"\n",
" \"Общее количество спорщиков: **{2}**\".format(member.name, debaters,\n",
" len(self.debaters_list)))\n",
" elif member in self.debaters_list:\n",
" debaters = \"**\" + \"**, **\".join(self.debater_names) + \"**\"\n",
" await member.send(\n",
" 'Вы уже в группе спорщиков \\nГруппа спорщиков: {0}\\nОбщее количество '\n",
" 'спорщиков: **{1}**'.format(debaters, len(self.debaters_list)))\n",
"\n",
" # we do not want the client to reply to itself\n",
" if member == discord.Client.user:\n",
" return\n",
"\n",
" async def remove_debater(self, member):\n",
" if member in self.debaters_list:\n",
" self.debaters_list.remove(member)\n",
" self.debater_names.remove(member.name)\n",
" debaters = \"**\" + \"**, **\".join(self.debater_names) + \"**\"\n",
" await member.send(\n",
" \"Вы удалены из группы спорщиков\\n\"\n",
" \"Группа спорщиков: {0}\\n\"\n",
" \"Общее количество спорщиков: **{1}**\".format(debaters, len(self.debaters_list)))\n",
" for debater in self.debaters_list:\n",
" await debater.send(\n",
" \"Игрок {0} удалён из группы спорщиков\\n\"\n",
" \"Группа отгадчиков: {1}\\n\"\n",
" \"Общее количество спорщиков: **{2}**\".format(member.name, debaters,\n",
" len(self.debaters_list)))\n",
"\n",
" async def on_message(self, message):\n",
" member = message.author\n",
" channel = message.channel\n",
"\n",
" if message.content == \"!help\" or message.content == \"!h\":\n",
" message_to_other_guessers = \"\"\"```Чат-бот для игры в Fallacymania\n",
"\n",
" Команды:\n",
"\n",
" \"!h\" или \"!help\" - Выводит данную справку\n",
"\n",
" \"!r\" или \"!правила\" - Выводит правила\n",
"\n",
" \"*\" или \"!софизмы\" - Отправляет в ответ лист с софизмами\n",
"\n",
" \"!d\" или \"!спорщик\" - Добавляет пользователя в группу спорщиков\n",
"\n",
" \"!-d\" или \"!-спорщик\" - Удаляет пользователя из группы спорщиков\n",
"\n",
" \"!g\" или \"!отгадчик\" - Добавляет пользователя в группу отгадчиков\n",
"\n",
" \"!-g\" или \"!-отгадчик\" - Удаляет пользователя из группы отгадчиков\n",
"\n",
" \"!s\" или \"!старт\" - Если указано минимальное количество отгадчиков и спорщиков, то запускает таймер игры\n",
"\n",
" \"!p\" или \"!пазуа\" - Приостанавливает таймер игры\n",
"\n",
" \"!stop\" или \"завершить\" - Завершает игру о останавливает таймер\n",
"\n",
" \"!reset\" или \"!сброс\" - Удаляет всех игроков из групп отгадчиков и спорщиков\n",
"\n",
" \"%номер_софизма%\" - Ищет у спорщика софизм по номеру, если находит, то забирает и даёт новый (вбивается без знаков процент)\n",
"\n",
" \"+\" или \"-\" - Даёт или забирает 1 очко у отгадчика. Пока у отгадчика есть попытки \"-\" забирает 1 попытку, а не 1 очко.\n",
"\n",
" \"..\" или \"!z\" - Отменяет последнее действие отгадчика.\n",
" ```\"\"\"\n",
"\n",
" if not self.started:\n",
" await channel.send(message_to_other_guessers)\n",
" else:\n",
" await member.send(message_to_other_guessers)\n",
"\n",
" if message.content == \"!d\" or message.content == \"!спорщик\":\n",
" await self.add_debater(member)\n",
" await self.remove_guesser(member)\n",
"\n",
" if message.content == \"!g\" or message.content == \"!отгадчик\":\n",
" await client.loop.create_task(self.add_guesser(member))\n",
" await client.loop.create_task(self.remove_debater(member))\n",
"\n",
" if message.content == \"!-g\" or message.content == \"!-отгадчик\":\n",
" await self.remove_guesser(member)\n",
"\n",
" if message.content == \"!-d\" or message.content == \"!-спорщик\":\n",
" await self.client.loop.create_task(self.remove_debater(member))\n",
"\n",
" # Сбросить параматеры игры\n",
" if message.content == \"!reset\" or message.content == \"!сброс\":\n",
" if not self.started:\n",
" if self.debaters_list + self.guessers_list != []:\n",
" for user in self.debaters_list + self.guessers_list:\n",
" await user.send(\"Список игроков и их счёт сброшены\")\n",
" else:\n",
" await member.send(\"Список игроков и их счёт сброшены\")\n",
" await self.__reset__()\n",
"\n",
" else:\n",
" await member.send(\"\"\"\"Игра уже запущена. Чтобы завершить игру введите \"!stop\"\"\"\"\")\n",
"\n",
" # Завершить игру\n",
" if message.content == \"!stop\" or message.content == \"!завершить\":\n",
" if self.started:\n",
" self.game_timer.cancel()\n",
" self.end()\n",
" else:\n",
" member.send(\"Нельзя остановить ещё не запущенную игру\")\n",
" #\n",
" # Старт игры\n",
" if message.content == '!s' or message.content == '!старт':\n",
" # Если таймер не запущен и игра не на паузе, есть как минимум 2 спорщика и 1 отгадчик\n",
" if not (self.game_timer.timer.isAlive() or self.paused) and len(self.debaters_list) > 1 and len(\n",
" self.guessers_list) > 0:\n",
" self.game_timer = GameTimer.RenewableTimer(self.t, self.end)\n",
" self.debater_cards = {}\n",
" self.pack = deepcopy(fallacies)\n",
" self.discard = []\n",
" # Перемешать колоду\n",
" random.shuffle(self.pack)\n",
" # Раздать карты спорщикам\n",
" for debater in self.debaters_list:\n",
" i = 0\n",
" card_list = []\n",
" cards = \"\"\n",
" while i < 5:\n",
" card = self.pack.pop()\n",
" cards += card\n",
" card_list.append(card)\n",
" i += 1\n",
" await debater.send(cards)\n",
" self.debater_cards.update({debater: card_list})\n",
"\n",
" # • если отгадчиков 1-2, каждый берёт по 15 карт попыток;\n",
" # • если отгадчиков 3-4, каждый берёт по 10 карт попыток;\n",
" # • если отгадчиков 5-6, каждый берёт по 8 карт попыток;\n",
" # • если отгадчиков больше 6, то 50 карт попыток делятся поровну между отгадчиками,\n",
" # а остаток убирается обратно в коробку.\n",
" if len(self.guessers_list) < 3:\n",
" number_attempts = 15\n",
" elif len(self.guessers_list) < 5:\n",
" number_attempts = 10\n",
" elif len(self.guessers_list) < 7:\n",
" number_attempts = 8\n",
" elif len(self.guessers_list) > 6:\n",
" number_attempts = int(50 / len(self.guessers_list))\n",
"\n",
" for guesser in self.guessers_list:\n",
" # Раздать лист с софизмами отгадчикам\n",
" await guesser.send(\n",
" \"http://i.imgur.com/ivEjvmi.png\\nhttp://i.imgur.com/BukCpJ7.png\\nhttp://i.imgur.com/s4qav82.png\")\n",
" # Установить начальное количество попыток и очков для отгадчиков\n",
" self.guesser_points.update({guesser: 0})\n",
" self.guesser_attempts.update({guesser: number_attempts})\n",
" self.guesser_last_turn.update({guesser: None})\n",
"\n",
" self.game_timer.start()\n",
" await channel.send(\"Игра началась\")\n",
" self.started = True\n",
" # Если таймер запущен\n",
" elif self.game_timer.timer.isAlive() and not self.paused:\n",
" await channel.send(\"Таймер уже запущен\")\n",
" self.game_timer.pause()\n",
" m, s = divmod(int(self.game_timer.get_actual_time()), 60)\n",
" await channel.send(\"Осталось {0}м {1}с\".format(m, s))\n",
" self.game_timer.resume()\n",
" elif self.paused:\n",
" for user in self.guessers_list + self.debaters_list:\n",
" m, s = divmod(int(self.game_timer.get_actual_time()), 60)\n",
" await user.send(\"Игра продолжается\\nОсталось {0}м {1}с\".format(m, s))\n",
" self.game_timer.resume()\n",
" self.paused = False\n",
"\n",
" elif len(self.debaters_list) < 2:\n",
" await channel.send(\"Нужно указать как минимум 2 спорщиков\")\n",
" elif len(self.guessers_list) < 1:\n",
" await channel.send(\"Нужно указать как минимум 1 отгадчика\")\n",
"\n",
" # Пауза\n",
" if message.content == '!p' or message.content == '!пауза':\n",
" if self.started and not self.paused:\n",
" self.game_timer.pause()\n",
" self.game_timer.get_actual_time()\n",
" self.paused = True\n",
"\n",
" for user in self.guessers_list + self.debaters_list:\n",
" m, s = divmod(int(self.game_timer.get_actual_time()), 60)\n",
" await user.send(\"Пауза\\nОсталось {0}м {1}с\".format(m, s))\n",
" elif not self.started:\n",
" await channel.send(\"Игра ещё не запущена\")\n",
" elif self.paused:\n",
" await channel.send(\"Игра уже на паузе\")\n",
"\n",
" # Выдать лист с софизмом\n",
" if message.content == '!софизмы' or message.content == '*':\n",
" await member.send(\n",
" \"http://i.imgur.com/ivEjvmi.png\\nhttp://i.imgur.com/BukCpJ7.png\\nhttp://i.imgur.com/s4qav82.png\")\n",
"\n",
" # Начиление очков\n",
" if message.content == '+' or message.content == '-':\n",
" if not self.started:\n",
" return await member.send(\n",
" \"Игра не запущенна. Проводить манипуляции со счётом до старта игры нельзя.\".format(\n",
" member))\n",
"\n",
" if member not in self.guesser_points:\n",
" return await member.send(\"'+' или '-' отправленное отгадчиком даёт или отнимает очко у \"\n",
" \"этого отгадчика. **{0}** - не отгадчик\".format(member))\n",
"\n",
" if message.content == \"+\":\n",
" self.guesser_points[member] = self.guesser_points[member] + 1\n",
" self.guesser_last_turn[member] = \"plus_point\"\n",
" message_to_other_guessers = \"Игрок **{0}** получил 1 очко.\".format(member.name)\n",
" message_to_member_guesser = \"Вы получили 1 очко.\"\n",
" elif message.content == \"-\":\n",
" if self.guesser_attempts[member] > 0:\n",
" self.guesser_attempts[member] = self.guesser_attempts[member] - 1\n",
" self.guesser_last_turn[member] = \"minus_attempt\"\n",
" message_to_other_guessers = \"Игрок **{0}** потерял 1 попытку.\".format(member.name)\n",
" message_to_member_guesser = \"Вы потеряли 1 попытку.\".format(member.name)\n",
" else:\n",
" self.guesser_points[member] = self.guesser_points[member] - 1\n",
" self.guesser_last_turn[member] = \"minus_point\"\n",
" message_to_other_guessers = \"Игрок **{0}** потерял 1 очко.\".format(member.name)\n",
" message_to_member_guesser = \"Вы потеряли 1 очко.\"\n",
"\n",
" self.guesser_messages += 1\n",
" for guesser in self.guesser_points:\n",
" if guesser != member:\n",
" await guesser.send(\"{0} {1}\".format(message_to_other_guessers, self.current_score()))\n",
" else:\n",
" await guesser.send(\"{0} {1}\".format(message_to_member_guesser, self.current_score()))\n",
"\n",
" # Раздать лист с софизмами после 3х сообщений о счёте\n",
" if self.guesser_messages > 2:\n",
" await guesser.send(\n",
" \"http://i.imgur.com/ivEjvmi.png\\nhttp://i.imgur.com/BukCpJ7.png\\nhttp://i.imgur.com/s4qav82.png\")\n",
" if self.guesser_messages > 2:\n",
" self.guesser_messages = 0\n",
"\n",
" # Отмена\n",
" if message.content == '!z' or message.content == '..':\n",
" if not self.started:\n",
" return await member.send(\"Игра не запущенна. Нельзя отменить последнее действие\".format(\n",
" member))\n",
"\n",
" elif member not in self.guesser_last_turn:\n",
" return await member.send(\"Отменить последнее действие может только отгадчик.\".format(\n",
" member))\n",
"\n",
" elif self.guesser_last_turn[member] is None:\n",
" return await member.send(\"Вы ещё не совершали никаких действия\")\n",
"\n",
" elif self.guesser_last_turn[member] == \"returned\":\n",
" return await member.send(\"Вы уже отменили своё действие. Отменять больше 1 действия подряд нельзя.\")\n",
"\n",
" elif self.guesser_last_turn[member] == \"plus_point\":\n",
" self.guesser_points[member] = self.guesser_points[member] - 1\n",
" self.guesser_last_turn[member] = \"returned\"\n",
" message_to_other_players = \"Игрок **{0}** отменил своё последнее действие. У него забирается 1 очко.\".format(\n",
" member.name)\n",
" message_to_member_player = \"Вы отменили своё последнее действие. У вас забирается 1 очко.\"\n",
"\n",
" elif self.guesser_last_turn[member] == \"minus_point\":\n",
" self.guesser_points[member] = self.guesser_points[member] + 1\n",
" self.guesser_last_turn[member] = \"minus_point\"\n",
" self.guesser_last_turn[member] = \"returned\"\n",
" message_to_other_players = \"Игрок **{0}** отменил своё последнее действие. Ему возвращается 1 очко.\".format(\n",
" member.name)\n",
" message_to_member_player = \"Вы отменили своё последнее действие. Вам возвращается 1 очко.\"\n",
"\n",
" elif self.guesser_last_turn[member] == \"minus_attempt\":\n",
" self.guesser_attempts[member] = self.guesser_attempts[member] + 1\n",
" self.guesser_last_turn[member] = \"returned\"\n",
" message_to_other_players = \"Игрок **{0}** отменил своё последнее действие. Ему возвращается 1 попытка.\".format(\n",
" member.name)\n",
" message_to_member_player = \"Вы отменили своё последнее действие. Вам возвращается 1 попытка.\".format(\n",
" member.name)\n",
"\n",
" for guesser in self.guesser_points:\n",
" ch = await client.start_private_message(guesser)\n",
" if guesser != member:\n",
" await client.send_message(ch, \"{0} {1}\".format(message_to_other_players,\n",
" self.current_score()))\n",
" else:\n",
" await client.send_message(ch, \"{0} {1}\".format(message_to_member_player,\n",
" self.current_score()))\n",
"\n",
" # Удаляет карту в сброс\n",
" if message.content.isdigit() and len(message.content) < 3 and member in self.debaters_list:\n",
" if len(fallacies) <= int(message.content):\n",
" return await member.send(\"Номер карточки должен быть не больше {}\".format(\n",
" len(fallacies) - 1))\n",
"\n",
" so = fallacies[int(message.content)]\n",
"\n",
" card_list = self.debater_cards.get(member)\n",
" if card_list.count(so) > 0:\n",
" card_list.remove(so)\n",
" card = self.pack.pop()\n",
" card_list.append(card)\n",
" self.discard.append(card)\n",
" await member.send(\" \".join(card_list))\n",
"\n",
" else:\n",
" return await member.send(\"У вас нет карточки номер {}\".format(message.content))\n",
"\n",
" # Если колода закончилась, то сброшенные карты перемешиваются и становятся колодой\n",
" if not self.pack:\n",
" self.pack = deepcopy(self.discard)\n",
" random.shuffle(self.pack)\n",
" self.discard = []\n",
"\n",
" if message.content == '!r' or message.content == \"!правила\":\n",
" \"\"\"Показать правила игры\"\"\"\n",
" # Разделено на 3 сообщения, из-за лимита на количество символов в discord\n",
" await message.channel.send('''\n",
" **Fallacymania — правила игры**\n",
" Данные правила являются минимальной модификацией оригинальных правил - http://gdurl.com/z6s0A/download с учётом особенностей игры с чатботом в discord.\n",
"\n",
" Для игры нужно 3–20 игроков (рекомендуется 4–12). Игроки разбиваются на 2 группы: спорщики (2–10 игроков) и отгадчики (1–10 игроков). Ведущий может играть в любой из этих ролей.\n",
" Чтобы войти в группу спорщиков надо написать в чат ```!d```, чтобы в группу отгадчиков ```!g```\n",
" Игра требует наличия микрофона у каждого игрока. Все игроки (спорщики и отгадчики) должны быть в одном аудиоканале.\n",
"\n",
" **Подготовка к игре**\n",
" 1. Некоторые (или все) спорщики определяют для себя тезисы, которые они будут отстаивать с использованием софизмов. Тезисы можно написать в общий чат, а можно проговорить словами.\n",
" Задача спорщиков — проталкивать собственные тезисы, а также комментировать (поддерживать или опровергать) сказанное другими спорщиками, но с использованием софизмов со своих карт\n",
" Примеры тезисов:\n",
" Инопланетных цивилизаций не существует;\n",
" Никого невозможно в чём-то убедить при помощи софизмов;\n",
" Зелёный цвет красивее, чем красный.\n",
" 2. Спорщики могут объединяться в группы, когда несколько человек отстаивают один и тот же тезис. Те спорщики, которые не взяли себе никакой тезис, используют софизмы только для ответов на сказанное другими.\n",
" 3. Для начала игры следует написать в чат ```!s```После этого чат-бот раздаст спорщикам по 5 карточек с софизмами, а отгадчикам лист с софизмами\n",
" ''')\n",
"\n",
" await message.channel.send('''**Ход игры**\n",
" 1. Игра идёт в реальном времени. Спорщики говорят фразы в поддержку своего тезиса, а также поддерживают или опровергают тезисы других спорщиков.\n",
" Но всё это должно делаться с использованием софизмов, которые есть у спорщиков на картах.\n",
" 2. У спорщиков нет условий победы и поражения; их цель — попрактиковаться использовать софизмы так, чтобы аргументы звучали убедительно.\n",
" 3. Спорщики могут говорить в любом порядке; могут как отвечать на реплики других игроков, так и высказывать новые суждения относительно своего тезиса.\n",
" 4. Отгадчики смотрят дебаты и пытаются угадать, какие софизмы используют спорщики. Отгадчики соревнуются между собой, кто наберёт больше очков за угаданные софизмы.\n",
" 5. Любой из отгадчиков может в любой момент пытаться угадывать софизмы, которые используют спорщики. Для этого отгадчик громко говорит имя спорщика и название софизма, который, как ему кажется, употребил этот спорщик. Спорщик отвечает отгадчику, правильная была догадка или нет.\n",
" 6. Отгадчик может пытаться угадать софизмы __только из последней реплики__, сказанной спорщиком.\n",
" Если спорщик уже начал говорить следующую реплику, предыдущие софизмы угадывать нельзя. Тем не менее, можно пытаться угадать последние сказанные софизмы других спорщиков, пока они не начали говорить.\n",
" ''')\n",
" message.channel.send('''\n",
" 7. Спорщик может использовать в одной реплике несколько софизмов одновременно из тех, которые есть у него на картах. Отгадчики тоже могут пытаться найти несколько софизмов в одной реплике спорщика.\n",
" 8. За каждый угаданный софизм отгадчик получает __1 очко__. Очки начисляет себе отгадчик сам, для этого он должен написать в чат ```+``` или ```-```, а спорщик откладывает угаданную карту софизма в сброс и берёт себе новую из колоды, вводя номер софизма в чат например ```22``` Если колода заканчивается, то сброс перетасовывается и становится новой колодой.\n",
" 9. Спорщик больше не может использовать тот софизм, который ушёл в сброс, но может пользоваться новым полученным софизмом.\n",
" 10. Если отгадчик называет не тот софизм, который использовал спорщик, то он теряет 1 карту попытки . Когда карты попыток у отгадчика заканчиваются, __он начинает терять по 1 очку__ за каждую неправильную попытку, для этого он должен написать в чат ```.-```, если ошибся и отнял у себя лишнюю попытку, то можно вернуть попытку с помощью ```.+```\n",
" 11. Игра продолжается __20 минут__. В конце игры среди отгадчиков определяется победитель по количеству набранных очков.''')\n",
"\n",
"\n",
"if __name__ == \"__main__\":\n",
" try:\n",
" with open(file=\"token.txt\", mode=\"r\") as f:\n",
" token = \" \".join(f.readline().split())\n",
" if token == \"\":\n",
" input(\n",
" 'В первую строку файла \"token.txt\" надо вставить токен.\\n'\n",
" 'Нажмите любую клавишу для выхода из программы...\\n')\n",
" exit()\n",
" except FileNotFoundError:\n",
" input('Файла \"token.txt\" нет в директории.\\nНажмите любую клавишу для выхода из программы...\\n')\n",
" exit()\n",
"\n",
" try:\n",
" with open(file=\"fallacies.txt\", mode=\"r\") as f:\n",
" fallacies = f.readlines()\n",
" except FileNotFoundError:\n",
" input('Файла \"fallacies.txt\" нет в директории.\\nНажмите любую клавишу для выхода из программы...\\n')\n",
" exit()\n",
"\n",
" # ------------------------------------------------------------------------------\n",
" logger = logging.getLogger('discord')\n",
" logger.setLevel(logging.DEBUG)\n",
" handler = logging.FileHandler(filename='discord.log', encoding='utf-8', mode='w')\n",
" handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s:%(name)s: %(message)s'))\n",
" logger.addHandler(handler)\n",
"\n",
" stdout_handler = logging.StreamHandler(sys.stdout)\n",
" stdout_handler.setLevel(logging.ERROR)\n",
" logger.addHandler(stdout_handler)\n",
" # ------------------------------------------------------------------------------\n",
" description = '''Чат-бот для игры в fallacymania'''\n",
" # ------------------------------------------------------------------------------\n",
" # Переменная отвечает за то запущенна ли игра\n",
"\n",
" client = DiscordClient()\n",
" client.run(token)\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011363636363636364,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.0625,
0,
0,
0.008403361344537815,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010869565217391304,
0,
0,
0,
0,
0.011111111111111112,
0,
0,
0,
0,
0,
0.009615384615384616,
0.010869565217391304,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010752688172043012,
0.010416666666666666,
0,
0,
0,
0,
0,
0.010416666666666666,
0,
0,
0,
0.011235955056179775,
0.012345679012345678,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.01020408163265306,
0,
0,
0,
0,
0.011235955056179775,
0.010869565217391304,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010869565217391304,
0.010526315789473684,
0,
0,
0,
0,
0.010869565217391304,
0.010526315789473684,
0,
0,
0,
0.011494252873563218,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010309278350515464,
0,
0,
0,
0,
0.011363636363636364,
0.01098901098901099,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.008849557522123894,
0,
0,
0,
0,
0,
0.011764705882352941,
0,
0.007575757575757576,
0,
0.007874015748031496,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010101010101010102,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.01020408163265306,
0.009174311926605505,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.01,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.00819672131147541,
0.011764705882352941,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011111111111111112,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.008771929824561403,
0,
0,
0,
0,
0,
0.009615384615384616,
0,
0,
0,
0.009523809523809525,
0.01020408163265306,
0,
0,
0,
0,
0.010416666666666666,
0,
0,
0,
0.011627906976744186,
0,
0.009708737864077669,
0.010752688172043012,
0,
0.012195121951219513,
0,
0.01,
0,
0,
0,
0,
0,
0.009433962264150943,
0,
0.009433962264150943,
0,
0,
0,
0,
0.00819672131147541,
0,
0,
0,
0,
0,
0,
0.009523809523809525,
0,
0,
0,
0.00980392156862745,
0,
0,
0,
0.012345679012345678,
0,
0,
0.008547008547008548,
0,
0,
0,
0,
0.007936507936507936,
0,
0.009345794392523364,
0,
0,
0,
0,
0,
0.008,
0,
0.009345794392523364,
0,
0,
0.012195121951219513,
0,
0.0078125,
0,
0.00847457627118644,
0,
0,
0,
0,
0,
0.010752688172043012,
0.011111111111111112,
0,
0.010752688172043012,
0.011111111111111112,
0,
0,
0.01,
0,
0.01098901098901099,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010416666666666666,
0,
0.010526315789473684,
0,
0,
0,
0,
0,
0,
0,
0.011627906976744186,
0,
0,
0.00625,
0,
0.005376344086021506,
0.009615384615384616,
0.008064516129032258,
0,
0,
0.005319148936170213,
0.0053475935828877,
0,
0,
0,
0,
0.004651162790697674,
0.006535947712418301,
0,
0,
0,
0.006535947712418301,
0.01020408163265306,
0.006896551724137931,
0.006289308176100629,
0.005780346820809248,
0.003484320557491289,
0.009523809523809525,
0.004807692307692308,
0,
0,
0.0048543689320388345,
0.0027247956403269754,
0.007633587786259542,
0.002824858757062147,
0.007518796992481203,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.009523809523809525,
0,
0,
0,
0,
0,
0,
0.009174311926605505,
0,
0,
0,
0,
0,
0.011627906976744186,
0.010526315789473684,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 556 | 0.001632 | false |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
"""This class defines the basic environments that define how agents interact
with one another.
World(object) provides a generic parent class, including __enter__ and __exit__
statements which allow you to guarantee that the shutdown method is called
and KeyboardInterrupts are less noisy (if desired).
DialogPartnerWorld(World) provides a two-agent turn-based dialog setting
MultiAgentDialogWorld provides a multi-agent setting.
MultiWorld(World) creates a set of environments (worlds) for the same agent
to multitask over, a different environment will be chosen per episode.
HogwildWorld(World) is a container that creates another world within itself for
every thread, in order to have separate simulated environments for each one.
Each world gets its own agents initialized using the "share()" parameters
from the original agents.
BatchWorld(World) is a container for doing minibatch training over a world by
collecting batches of N copies of the environment (each with different state).
All worlds are initialized with the following parameters:
opt -- contains any options needed to set up the agent. This generally contains
all command-line arguments recognized from core.params, as well as other
options that might be set through the framework to enable certain modes.
agents -- the set of agents that should be attached to the world,
e.g. for DialogPartnerWorld this could be the teacher (that defines the
task/dataset) and the learner agent. This is ignored in the case of
sharing, and the shared parameter is used instead to initalize agents.
shared (optional) -- if not None, contains any shared data used to construct
this particular instantiation of the world. This data might have been
initialized by another world, so that different agents can share the same
data (possibly in different Processes).
"""
import copy
import importlib
import random
from multiprocessing import Process, Value, Condition, Semaphore
from collections import deque
from parlai.core.agents import _create_task_agents, create_agents_from_shared
from parlai.tasks.tasks import ids_to_tasks
import pdb
def validate(observation):
"""Make sure the observation table is valid, or raise an error."""
if observation is not None and type(observation) == dict:
if ('text_candidates' in observation and
'text' in observation and
observation['text'] != observation['text_candidates'][0]):
raise RuntimeError('If text and text_candidates fields are both ' +
'filled, top text candidate should be the same' +
' as text.')
return observation
else:
raise RuntimeError('Must return dictionary from act().')
class World(object):
"""Empty parent providing null definitions of API functions for Worlds.
All children can override these to provide more detailed functionality."""
def __init__(self, opt, agents=None, shared=None):
self.id = opt['task']
self.opt = copy.deepcopy(opt)
if shared:
# Create agents based on shared data.
self.agents = create_agents_from_shared(shared['agents'])
else:
# Add passed in agents to world directly.
self.agents = agents
def parley(self):
""" The main method, that does one step of actions for the agents
in the world. This is empty in the base class."""
pass
def getID(self):
"""Return the name of the world, typically the task the world encodes."""
return self.id
def display(self):
"""Returns a string describing the current state of the world.
Useful for monitoring and debugging.
By default, display the messages between the agents."""
if not hasattr(self, 'acts'):
return ''
lines = []
for index, msg in enumerate(self.acts):
if msg is None:
continue
# Possibly indent the text (for the second speaker, if two).
space = ''
if len(self.acts) == 2 and index == 1:
space = ' '
if msg.get('reward', None) is not None:
lines.append(space + '[reward: {r}]'.format(r=msg['reward']))
if msg.get('text', ''):
ID = '[' + msg['id'] + ']: ' if 'id' in msg else ''
lines.append(space + ID + msg['text'])
if msg.get('labels', False):
lines.append(space + ('[labels: {}]'.format(
'|'.join(msg['labels']))))
if msg.get('label_candidates', False):
cand_len = len(msg['label_candidates'])
if cand_len <= 10:
lines.append(space + ('[cands: {}]'.format(
'|'.join(msg['label_candidates']))))
else:
# select five label_candidates from the candidate set,
# can't slice in because it's a set
cand_iter = iter(msg['label_candidates'])
display_cands = (next(cand_iter) for _ in range(5))
# print those cands plus how many cands remain
lines.append(space + ('[cands: {}{}]'.format(
'|'.join(display_cands),
'| ...and {} more'.format(cand_len - 5)
)))
if self.episode_done():
lines.append('- - - - - - - - - - - - - - - - - - - - -')
return '\n'.join(lines)
def episode_done(self):
"""Whether the episode is done or not. """
return False
def epoch_done(self):
"""Whether the epoch is done or not.
Not all worlds have the notion of an epoch, but this is useful
for fixed training, validation or test sets.
"""
return False
def share(self):
shared_data = {}
shared_data['world_class'] = type(self)
shared_data['opt'] = self.opt
shared_data['agents'] = self._share_agents()
return shared_data
def _share_agents(self):
""" create shared data for agents so other classes can create the same
agents without duplicating the data (i.e. sharing parameters)."""
if not hasattr(self, 'agents'):
return None
shared_agents = [a.share() for a in self.agents]
return shared_agents
def get_agents(self):
"""Return the list of agents."""
return self.agents
def get_acts(self):
"""Return the last act of each agent."""
return self.acts
def __enter__(self):
"""Empty enter provided for use with `with` statement.
e.g:
with World() as world:
for n in range(10):
n.parley()
"""
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
"""After `with` statement, call shutdown."""
silent_exit = isinstance(exc_value, KeyboardInterrupt)
self.shutdown()
return silent_exit
def __iter__(self):
raise NotImplementedError('Subclass did not implement this.')
def __len__(self):
return 0
def reset(self):
for a in self.agents:
a.reset()
def synchronize(self):
"""Can be used to synchronize processes."""
pass
def shutdown(self):
"""Performs any cleanup, if appropriate."""
pass
class DialogPartnerWorld(World):
"""This basic world switches back and forth between two agents, giving each
agent one chance to speak per turn and passing that back to the other agent."""
def __init__(self, opt, agents, shared=None):
super().__init__(opt)
if shared:
# Create agents based on shared data.
self.agents = create_agents_from_shared(shared['agents'])
else:
if len(agents) != 2:
raise RuntimeError('There must be exactly two agents for this ' +
'world.')
# Add passed in agents directly.
self.agents = agents
self.acts = [None] * len(self.agents)
def parley(self):
"""Agent 0 goes first. Alternate between the two agents."""
acts = self.acts
agents = self.agents
acts[0] = agents[0].act()
agents[1].observe(validate(acts[0]))
acts[1] = agents[1].act()
agents[0].observe(validate(acts[1]))
def episode_done(self):
""" Only the first agent indicates when the episode is done."""
if self.acts[0] is not None:
return self.acts[0].get('episode_done', False)
else:
return False
def epoch_done(self):
"""Only the first agent indicates when the epoch is done."""
return (self.agents[0].epoch_done()
if hasattr(self.agents[0], 'epoch_done') else False)
def report(self):
return self.agents[0].report()
def __len__(self):
return len(self.agents[0])
def __iter__(self):
return iter(self.agents[0])
def shutdown(self):
"""Shutdown each agent."""
for a in self.agents:
a.shutdown()
class MultiAgentDialogWorld(World):
"""Basic world where each agent gets a turn in a round-robin fashion,
recieving as input the actions of all other agents since that agent last
acted."""
def __init__(self, opt, agents=None, shared=None):
super().__init__(opt)
if shared:
# Create agents based on shared data.
self.agents = create_agents_from_shared(shared['agents'])
else:
# Add passed in agents directly.
self.agents = agents
self.acts = [None] * len(agents)
super().__init__(opt, agents, shared)
def parley(self):
"""For each agent, get an observation of the last action each of the
other agents took. Then take an action yourself"""
acts = self.acts
for index, agent in enumerate(self.agents):
acts[index] = agent.act()
for other_agent in self.agents:
if other_agent != agent:
other_agent.observe(validate(acts[index]))
def epoch_done(self):
done = False
for a in self.agents:
if a.epoch_done():
done = True
return done
def episode_done(self):
done = False
for a in self.agents:
if a.episode_done():
done = True
return done
def report(self):
return self.agents[0].report()
def shutdown(self):
for a in self.agents:
a.shutdown()
class MultiWorld(World):
"""Container for a set of worlds where each world gets a turn
in a round-robin fashion. The same user_agents are placed in each,
though each world may contain additional agents according to the task
that world represents.
"""
def __init__(self, opt, agents=None, shared=None):
super().__init__(opt)
self.worlds = []
for index, k in enumerate(opt['task'].split(',')):
k = k.strip()
if k:
print("[creating world: " + k + "]")
opt_singletask = copy.deepcopy(opt)
opt_singletask['task'] = k
if shared:
# Create worlds based on shared data.
s = shared['worlds'][index]
self.worlds.append(s['world_class'](s['opt'], None, s))
else:
# Agents are already specified.
self.worlds.append(create_task_world(opt_singletask, agents))
self.world_idx = -1
self.new_world = True
self.parleys = -1
self.random = opt.get('datatype', None) == 'train'
def __iter__(self):
return self
def __next__(self):
if self.epoch_done():
raise StopIteration()
def __len__(self):
if not hasattr(self, 'len'):
self.len = 0
# length is sum of all world lengths
for _ind, t in enumerate(self.worlds):
self.len += len(t)
return self.len
def get_agents(self):
return self.worlds[self.world_idx].get_agents()
def get_acts(self):
return self.worlds[self.world_idx].get_acts()
def share(self):
shared_data = {}
shared_data['world_class'] = type(self)
shared_data['opt'] = self.opt
shared_data['worlds'] = [w.share() for w in self.worlds]
return shared_data
def epoch_done(self):
for t in self.worlds:
if not t.epoch_done():
return False
return True
def parley_init(self):
self.parleys = self.parleys + 1
if self.world_idx >= 0 and self.worlds[self.world_idx].episode_done():
self.new_world = True
if self.new_world:
self.new_world = False
self.parleys = 0
if self.random:
self.world_idx = random.randrange(len(self.worlds))
else:
start_idx = self.world_idx
keep_looking = True
while keep_looking:
self.world_idx = (self.world_idx + 1) % len(self.worlds)
keep_looking = (self.worlds[self.world_idx].epoch_done() and
start_idx != self.world_idx)
if start_idx == self.world_idx:
return {'text': 'There are no more examples remaining.'}
def parley(self):
self.parley_init()
self.worlds[self.world_idx].parley()
def display(self):
if self.world_idx != -1:
s = ''
w = self.worlds[self.world_idx]
if self.parleys == 0:
s = '[world ' + str(self.world_idx) + ':' + w.getID() + ']\n'
s = s + w.display()
return s
else:
return ''
def report(self):
# TODO: static method in metrics, "aggregate metrics"
m = {}
m['tasks'] = {}
sum_accuracy = 0
num_tasks = 0
total = 0
for i in range(len(self.worlds)):
mt = self.worlds[i].report()
m['tasks'][self.worlds[i].getID()] = mt
total += mt['total']
if 'accuracy' in mt:
sum_accuracy += mt['accuracy']
num_tasks += 1
if num_tasks > 0:
m['accuracy'] = sum_accuracy / num_tasks
m['total'] = total
return m
def override_opts_in_shared(table, overrides):
"""Looks recursively for opt dictionaries within shared dict and overrides
any key-value pairs with pairs from the overrides dict.
"""
if 'opt' in table:
# change values if an 'opt' dict is available
for k, v in overrides.items():
table['opt'][k] = v
for k, v in table.items():
# look for sub-dictionaries which also might contain an 'opt' dict
if type(v) == dict and k != 'opt':
override_opts_in_shared(v, overrides)
elif type(v) == list:
for item in v:
if type(item) == dict:
override_opts_in_shared(item, overrides)
return table
class BatchWorld(World):
"""Creates a separate world for each item in the batch, sharing
the parameters for each.
The underlying world(s) it is batching can be either DialogPartnerWorld,
MultiAgentWorld or MultiWorld.
"""
def __init__(self, opt, world):
self.opt = opt
self.random = opt.get('datatype', None) == 'train'
self.world = world
shared = world.share()
self.worlds = []
for i in range(opt['batchsize']):
# make sure that any opt dicts in shared have batchindex set to i
# this lets all shared agents know which batchindex they have,
# which is needed for ordered data (esp valid/test sets)
override_opts_in_shared(shared, { 'batchindex': i })
self.worlds.append(shared['world_class'](opt, None, shared))
self.batch_observations = [ None ] * len(self.world.get_agents())
def __iter__(self):
return self
def __next__(self):
if self.epoch_done():
raise StopIteration()
def batch_observe(self, index, batch_actions):
batch_observations = []
for i, w in enumerate(self.worlds):
agents = w.get_agents()
observation = agents[index].observe(validate(batch_actions[i]))
if observation is None:
raise ValueError('Agents should return what they observed.')
batch_observations.append(observation)
return batch_observations
def batch_act(self, index, batch_observation):
# Given batch observation, do update for agents[index].
# Call update on agent
a = self.world.get_agents()[index]
if (batch_observation is not None and len(batch_observation) > 0 and
hasattr(a, 'batch_act')):
batch_actions = a.batch_act(batch_observation)
# Store the actions locally in each world.
for w in self.worlds:
acts = w.get_acts()
acts[index] = batch_actions[index]
else:
# Reverts to running on each individually.
batch_actions = []
for w in self.worlds:
agents = w.get_agents()
acts = w.get_acts()
acts[index] = agents[index].act()
batch_actions.append(acts[index])
return batch_actions
def parley(self):
# Collect batch together for each agent, and do update.
# Assumes DialogPartnerWorld, MultiAgentWorld, or MultiWorlds of them.
num_agents = len(self.world.get_agents())
batch_observations = self.batch_observations
for w in self.worlds:
if hasattr(w, 'parley_init'):
w.parley_init()
for index in range(num_agents):
batch_act = self.batch_act(index, batch_observations[index])
for other_index in range(num_agents):
if index != other_index:
batch_observations[other_index] = (
self.batch_observe(other_index, batch_act))
def display(self):
s = ("[--batchsize " + str(len(self.worlds)) + "--]\n")
for i, w in enumerate(self.worlds):
s += ("[batch world " + str(i) + ":]\n")
s += (w.display() + '\n')
s += ("[--end of batch--]")
return s
def getID(self):
return self.world.getID()
def episode_done(self):
return False
def epoch_done(self):
for world in self.worlds:
if not world.epoch_done():
return False
return True
def report(self):
return self.worlds[0].report()
class HogwildProcess(Process):
"""Process child used for HogwildWorld.
Each HogwildProcess contain its own unique World.
"""
def __init__(self, tid, world, opt, agents, sem, fin, term, cnt):
self.threadId = tid
self.world_type = world
self.opt = opt
self.agent_shares = [a.share() for a in agents]
self.queued_items = sem
self.epochDone = fin
self.terminate = term
self.cnt = cnt
super().__init__()
def run(self):
"""Runs normal parley loop for as many examples as this thread can get
ahold of via the semaphore queued_items.
"""
shared_agents = create_agents_from_shared(self.agent_shares)
world = self.world_type(self.opt, shared_agents)
with world:
while True:
self.queued_items.acquire()
if self.terminate.value:
break # time to close
world.parley()
with self.cnt.get_lock():
self.cnt.value -= 1
if self.cnt.value == 0:
# let main thread know that all the examples are finished
with self.epochDone:
self.epochDone.notify_all()
class HogwildWorld(World):
"""Creates a separate world for each thread (process).
Maintains a few shared objects to keep track of state:
- A Semaphore which represents queued examples to be processed. Every call
of parley increments this counter; every time a Process claims an
example, it decrements this counter.
- A Condition variable which notifies when there are no more queued
examples.
- A boolean Value which represents whether the inner worlds should shutdown.
- An integer Value which contains the number of unprocessed examples queued
(acquiring the semaphore only claims them--this counter is decremented
once the processing is complete).
"""
def __init__(self, world_class, opt, agents):
self.inner_world = world_class(opt, agents)
self.queued_items = Semaphore(0) # counts num exs to be processed
self.epochDone = Condition() # notifies when exs are finished
self.terminate = Value('b', False) # tells threads when to shut down
self.cnt = Value('i', 0) # number of exs that remain to be processed
self.threads = []
for i in range(opt['numthreads']):
self.threads.append(HogwildProcess(i, world_class, opt,
agents, self.queued_items,
self.epochDone, self.terminate,
self.cnt))
for t in self.threads:
t.start()
def __iter__(self):
raise NotImplementedError('Iteration not available in hogwild.')
def display(self):
self.shutdown()
raise NotImplementedError('Hogwild does not support displaying in-run' +
' task data. Use `--numthreads 1`.')
def episode_done(self):
return False
def parley(self):
"""Queue one item to be processed."""
with self.cnt.get_lock():
self.cnt.value += 1
self.queued_items.release()
def getID(self):
return self.inner_world.getID()
def report(self):
return self.inner_world.report()
def synchronize(self):
"""Sync barrier: will wait until all queued examples are processed."""
with self.epochDone:
self.epochDone.wait_for(lambda: self.cnt.value == 0)
def shutdown(self):
"""Set shutdown flag and wake threads up to close themselves"""
# set shutdown flag
with self.terminate.get_lock():
self.terminate.value = True
# wake up each thread by queueing fake examples
for _ in self.threads:
self.queued_items.release()
# wait for threads to close
for t in self.threads:
t.join()
### Functions for creating tasks/worlds given options.
def _get_task_world(opt):
sp = opt['task'].strip().split(':')
if '.' in sp[0]:
# The case of opt['task'] = 'parlai.tasks.squad.agents:DefaultTeacher'
# (i.e. specifying your own path directly, assumes DialogPartnerWorld)
world_class = DialogPartnerWorld
else:
task = sp[0].lower()
if len(sp) > 1:
sp[1] = sp[1][0].upper() + sp[1][1:]
world_name = sp[1] + "World"
else:
world_name = "DefaultWorld"
module_name = "parlai.tasks.%s.worlds" % (task)
try:
my_module = importlib.import_module(module_name)
world_class = getattr(my_module, world_name)
except:
# Defaults to this if you did not specify a world for your task.
world_class = DialogPartnerWorld
task_agents = _create_task_agents(opt)
return world_class, task_agents
def create_task_world(opt, user_agents):
world_class, task_agents = _get_task_world(opt)
return world_class(opt, task_agents + user_agents)
def create_task(opt, user_agents):
"""Creates a world + task_agents (aka a task)
assuming opt['task']="task_dir:teacher_class:options"
e.g. "babi:Task1k:1" or "#babi-1k" or "#QA",
see parlai/tasks/tasks.py and see parlai/tasks/task_list.py
for list of tasks.
"""
if type(user_agents) != list:
user_agents = [user_agents]
# Convert any hashtag task labels to task directory path names.
# (e.g. "#QA" to the list of tasks that are QA tasks).
opt = copy.deepcopy(opt)
#pdb.set_trace()
opt['task'] = ids_to_tasks(opt['task'])
print('[creating task(s): ' + opt['task'] + ']')
# Single threaded or hogwild task creation (the latter creates multiple threads).
# Check datatype for train, because we need to do single-threaded for
# valid and test in order to guarantee exactly one epoch of training.
if opt.get('numthreads', 1) == 1 or opt['datatype'] != 'train':
if ',' not in opt['task']:
# Single task
world = create_task_world(opt, user_agents)
else:
# Multitask teacher/agent
world = MultiWorld(opt, user_agents)
if opt.get('batchsize', 1) > 1:
return BatchWorld(opt, world)
else:
return world
else:
# more than one thread requested: do hogwild training
if ',' not in opt['task']:
# Single task
# TODO(ahm): fix metrics for multiteacher hogwild training
world_class, task_agents = _get_task_world(opt)
return HogwildWorld(world_class, opt, task_agents + user_agents)
else:
# TODO(ahm): fix this
raise NotImplementedError('hogwild multiworld not supported yet')
| [
"# Copyright (c) 2017-present, Facebook, Inc.\n",
"# All rights reserved.\n",
"# This source code is licensed under the BSD-style license found in the\n",
"# LICENSE file in the root directory of this source tree. An additional grant\n",
"# of patent rights can be found in the PATENTS file in the same directory.\n",
"\"\"\"This class defines the basic environments that define how agents interact\n",
"with one another.\n",
"\n",
"World(object) provides a generic parent class, including __enter__ and __exit__\n",
" statements which allow you to guarantee that the shutdown method is called\n",
" and KeyboardInterrupts are less noisy (if desired).\n",
"\n",
"DialogPartnerWorld(World) provides a two-agent turn-based dialog setting\n",
"MultiAgentDialogWorld provides a multi-agent setting.\n",
"\n",
"MultiWorld(World) creates a set of environments (worlds) for the same agent\n",
" to multitask over, a different environment will be chosen per episode.\n",
"\n",
"HogwildWorld(World) is a container that creates another world within itself for\n",
" every thread, in order to have separate simulated environments for each one.\n",
" Each world gets its own agents initialized using the \"share()\" parameters\n",
" from the original agents.\n",
"\n",
"BatchWorld(World) is a container for doing minibatch training over a world by\n",
"collecting batches of N copies of the environment (each with different state).\n",
"\n",
"\n",
"All worlds are initialized with the following parameters:\n",
"opt -- contains any options needed to set up the agent. This generally contains\n",
" all command-line arguments recognized from core.params, as well as other\n",
" options that might be set through the framework to enable certain modes.\n",
"agents -- the set of agents that should be attached to the world,\n",
" e.g. for DialogPartnerWorld this could be the teacher (that defines the\n",
" task/dataset) and the learner agent. This is ignored in the case of\n",
" sharing, and the shared parameter is used instead to initalize agents.\n",
"shared (optional) -- if not None, contains any shared data used to construct\n",
" this particular instantiation of the world. This data might have been\n",
" initialized by another world, so that different agents can share the same\n",
" data (possibly in different Processes).\n",
"\"\"\"\n",
"\n",
"import copy\n",
"import importlib\n",
"import random\n",
"\n",
"from multiprocessing import Process, Value, Condition, Semaphore\n",
"from collections import deque\n",
"from parlai.core.agents import _create_task_agents, create_agents_from_shared\n",
"from parlai.tasks.tasks import ids_to_tasks\n",
"\n",
"\n",
"import pdb\n",
"\n",
"def validate(observation):\n",
" \"\"\"Make sure the observation table is valid, or raise an error.\"\"\"\n",
" if observation is not None and type(observation) == dict:\n",
" if ('text_candidates' in observation and\n",
" 'text' in observation and\n",
" observation['text'] != observation['text_candidates'][0]):\n",
" raise RuntimeError('If text and text_candidates fields are both ' +\n",
" 'filled, top text candidate should be the same' +\n",
" ' as text.')\n",
" return observation\n",
" else:\n",
" raise RuntimeError('Must return dictionary from act().')\n",
"\n",
"\n",
"class World(object):\n",
" \"\"\"Empty parent providing null definitions of API functions for Worlds.\n",
" All children can override these to provide more detailed functionality.\"\"\"\n",
"\n",
" def __init__(self, opt, agents=None, shared=None):\n",
" self.id = opt['task']\n",
" self.opt = copy.deepcopy(opt)\n",
" if shared:\n",
" # Create agents based on shared data.\n",
" self.agents = create_agents_from_shared(shared['agents'])\n",
" else:\n",
" # Add passed in agents to world directly.\n",
" self.agents = agents\n",
"\n",
" def parley(self):\n",
" \"\"\" The main method, that does one step of actions for the agents\n",
" in the world. This is empty in the base class.\"\"\"\n",
" pass\n",
"\n",
" def getID(self):\n",
" \"\"\"Return the name of the world, typically the task the world encodes.\"\"\"\n",
" return self.id\n",
"\n",
" def display(self):\n",
" \"\"\"Returns a string describing the current state of the world.\n",
" Useful for monitoring and debugging.\n",
" By default, display the messages between the agents.\"\"\"\n",
" if not hasattr(self, 'acts'):\n",
" return ''\n",
" lines = []\n",
" for index, msg in enumerate(self.acts):\n",
" if msg is None:\n",
" continue\n",
" # Possibly indent the text (for the second speaker, if two).\n",
" space = ''\n",
" if len(self.acts) == 2 and index == 1:\n",
" space = ' '\n",
" if msg.get('reward', None) is not None:\n",
" lines.append(space + '[reward: {r}]'.format(r=msg['reward']))\n",
" if msg.get('text', ''):\n",
" ID = '[' + msg['id'] + ']: ' if 'id' in msg else ''\n",
" lines.append(space + ID + msg['text'])\n",
" if msg.get('labels', False):\n",
" lines.append(space + ('[labels: {}]'.format(\n",
" '|'.join(msg['labels']))))\n",
" if msg.get('label_candidates', False):\n",
" cand_len = len(msg['label_candidates'])\n",
" if cand_len <= 10:\n",
" lines.append(space + ('[cands: {}]'.format(\n",
" '|'.join(msg['label_candidates']))))\n",
" else:\n",
" # select five label_candidates from the candidate set,\n",
" # can't slice in because it's a set\n",
" cand_iter = iter(msg['label_candidates'])\n",
" display_cands = (next(cand_iter) for _ in range(5))\n",
" # print those cands plus how many cands remain\n",
" lines.append(space + ('[cands: {}{}]'.format(\n",
" '|'.join(display_cands),\n",
" '| ...and {} more'.format(cand_len - 5)\n",
" )))\n",
" if self.episode_done():\n",
" lines.append('- - - - - - - - - - - - - - - - - - - - -')\n",
" return '\\n'.join(lines)\n",
"\n",
" def episode_done(self):\n",
" \"\"\"Whether the episode is done or not. \"\"\"\n",
" return False\n",
"\n",
" def epoch_done(self):\n",
" \"\"\"Whether the epoch is done or not.\n",
" Not all worlds have the notion of an epoch, but this is useful\n",
" for fixed training, validation or test sets.\n",
" \"\"\"\n",
" return False\n",
"\n",
" def share(self):\n",
" shared_data = {}\n",
" shared_data['world_class'] = type(self)\n",
" shared_data['opt'] = self.opt\n",
" shared_data['agents'] = self._share_agents()\n",
" return shared_data\n",
"\n",
" def _share_agents(self):\n",
" \"\"\" create shared data for agents so other classes can create the same\n",
" agents without duplicating the data (i.e. sharing parameters).\"\"\"\n",
" if not hasattr(self, 'agents'):\n",
" return None\n",
" shared_agents = [a.share() for a in self.agents]\n",
" return shared_agents\n",
"\n",
" def get_agents(self):\n",
" \"\"\"Return the list of agents.\"\"\"\n",
" return self.agents\n",
"\n",
" def get_acts(self):\n",
" \"\"\"Return the last act of each agent.\"\"\"\n",
" return self.acts\n",
"\n",
" def __enter__(self):\n",
" \"\"\"Empty enter provided for use with `with` statement.\n",
" e.g:\n",
" with World() as world:\n",
" for n in range(10):\n",
" n.parley()\n",
" \"\"\"\n",
" return self\n",
"\n",
" def __exit__(self, exc_type, exc_value, exc_traceback):\n",
" \"\"\"After `with` statement, call shutdown.\"\"\"\n",
" silent_exit = isinstance(exc_value, KeyboardInterrupt)\n",
" self.shutdown()\n",
" return silent_exit\n",
"\n",
" def __iter__(self):\n",
" raise NotImplementedError('Subclass did not implement this.')\n",
"\n",
" def __len__(self):\n",
" return 0\n",
"\n",
" def reset(self):\n",
" for a in self.agents:\n",
" a.reset()\n",
"\n",
" def synchronize(self):\n",
" \"\"\"Can be used to synchronize processes.\"\"\"\n",
" pass\n",
"\n",
" def shutdown(self):\n",
" \"\"\"Performs any cleanup, if appropriate.\"\"\"\n",
" pass\n",
"\n",
"\n",
"class DialogPartnerWorld(World):\n",
" \"\"\"This basic world switches back and forth between two agents, giving each\n",
" agent one chance to speak per turn and passing that back to the other agent.\"\"\"\n",
"\n",
" def __init__(self, opt, agents, shared=None):\n",
" super().__init__(opt)\n",
" if shared:\n",
" # Create agents based on shared data.\n",
" self.agents = create_agents_from_shared(shared['agents'])\n",
" else:\n",
" if len(agents) != 2:\n",
" raise RuntimeError('There must be exactly two agents for this ' +\n",
" 'world.')\n",
" # Add passed in agents directly.\n",
" self.agents = agents\n",
" self.acts = [None] * len(self.agents)\n",
"\n",
" def parley(self):\n",
" \"\"\"Agent 0 goes first. Alternate between the two agents.\"\"\"\n",
" acts = self.acts\n",
" agents = self.agents\n",
" acts[0] = agents[0].act()\n",
" agents[1].observe(validate(acts[0]))\n",
" acts[1] = agents[1].act()\n",
" agents[0].observe(validate(acts[1]))\n",
"\n",
"\n",
" def episode_done(self):\n",
" \"\"\" Only the first agent indicates when the episode is done.\"\"\"\n",
" if self.acts[0] is not None:\n",
" return self.acts[0].get('episode_done', False)\n",
" else:\n",
" return False\n",
"\n",
" def epoch_done(self):\n",
" \"\"\"Only the first agent indicates when the epoch is done.\"\"\"\n",
" return (self.agents[0].epoch_done()\n",
" if hasattr(self.agents[0], 'epoch_done') else False)\n",
"\n",
" def report(self):\n",
" return self.agents[0].report()\n",
"\n",
" def __len__(self):\n",
" return len(self.agents[0])\n",
"\n",
" def __iter__(self):\n",
" return iter(self.agents[0])\n",
"\n",
" def shutdown(self):\n",
" \"\"\"Shutdown each agent.\"\"\"\n",
" for a in self.agents:\n",
" a.shutdown()\n",
"\n",
"\n",
"class MultiAgentDialogWorld(World):\n",
" \"\"\"Basic world where each agent gets a turn in a round-robin fashion,\n",
" recieving as input the actions of all other agents since that agent last\n",
" acted.\"\"\"\n",
" def __init__(self, opt, agents=None, shared=None):\n",
" super().__init__(opt)\n",
" if shared:\n",
" # Create agents based on shared data.\n",
" self.agents = create_agents_from_shared(shared['agents'])\n",
" else:\n",
" # Add passed in agents directly.\n",
" self.agents = agents\n",
" self.acts = [None] * len(agents)\n",
" super().__init__(opt, agents, shared)\n",
"\n",
" def parley(self):\n",
" \"\"\"For each agent, get an observation of the last action each of the\n",
" other agents took. Then take an action yourself\"\"\"\n",
" acts = self.acts\n",
" for index, agent in enumerate(self.agents):\n",
" acts[index] = agent.act()\n",
" for other_agent in self.agents:\n",
" if other_agent != agent:\n",
" other_agent.observe(validate(acts[index]))\n",
"\n",
" def epoch_done(self):\n",
" done = False\n",
" for a in self.agents:\n",
" if a.epoch_done():\n",
" done = True\n",
" return done\n",
"\n",
" def episode_done(self):\n",
" done = False\n",
" for a in self.agents:\n",
" if a.episode_done():\n",
" done = True\n",
" return done\n",
"\n",
" def report(self):\n",
" return self.agents[0].report()\n",
"\n",
" def shutdown(self):\n",
" for a in self.agents:\n",
" a.shutdown()\n",
"\n",
"\n",
"class MultiWorld(World):\n",
" \"\"\"Container for a set of worlds where each world gets a turn\n",
" in a round-robin fashion. The same user_agents are placed in each,\n",
" though each world may contain additional agents according to the task\n",
" that world represents.\n",
" \"\"\"\n",
"\n",
" def __init__(self, opt, agents=None, shared=None):\n",
" super().__init__(opt)\n",
" self.worlds = []\n",
" for index, k in enumerate(opt['task'].split(',')):\n",
" k = k.strip()\n",
" if k:\n",
" print(\"[creating world: \" + k + \"]\")\n",
" opt_singletask = copy.deepcopy(opt)\n",
" opt_singletask['task'] = k\n",
" if shared:\n",
" # Create worlds based on shared data.\n",
" s = shared['worlds'][index]\n",
" self.worlds.append(s['world_class'](s['opt'], None, s))\n",
" else:\n",
" # Agents are already specified.\n",
" self.worlds.append(create_task_world(opt_singletask, agents))\n",
" self.world_idx = -1\n",
" self.new_world = True\n",
" self.parleys = -1\n",
" self.random = opt.get('datatype', None) == 'train'\n",
"\n",
" def __iter__(self):\n",
" return self\n",
"\n",
" def __next__(self):\n",
" if self.epoch_done():\n",
" raise StopIteration()\n",
"\n",
" def __len__(self):\n",
" if not hasattr(self, 'len'):\n",
" self.len = 0\n",
" # length is sum of all world lengths\n",
" for _ind, t in enumerate(self.worlds):\n",
" self.len += len(t)\n",
" return self.len\n",
"\n",
" def get_agents(self):\n",
" return self.worlds[self.world_idx].get_agents()\n",
"\n",
" def get_acts(self):\n",
" return self.worlds[self.world_idx].get_acts()\n",
"\n",
" def share(self):\n",
" shared_data = {}\n",
" shared_data['world_class'] = type(self)\n",
" shared_data['opt'] = self.opt\n",
" shared_data['worlds'] = [w.share() for w in self.worlds]\n",
" return shared_data\n",
"\n",
" def epoch_done(self):\n",
" for t in self.worlds:\n",
" if not t.epoch_done():\n",
" return False\n",
" return True\n",
"\n",
" def parley_init(self):\n",
" self.parleys = self.parleys + 1\n",
" if self.world_idx >= 0 and self.worlds[self.world_idx].episode_done():\n",
" self.new_world = True\n",
" if self.new_world:\n",
" self.new_world = False\n",
" self.parleys = 0\n",
" if self.random:\n",
" self.world_idx = random.randrange(len(self.worlds))\n",
" else:\n",
" start_idx = self.world_idx\n",
" keep_looking = True\n",
" while keep_looking:\n",
" self.world_idx = (self.world_idx + 1) % len(self.worlds)\n",
" keep_looking = (self.worlds[self.world_idx].epoch_done() and\n",
" start_idx != self.world_idx)\n",
" if start_idx == self.world_idx:\n",
" return {'text': 'There are no more examples remaining.'}\n",
"\n",
" def parley(self):\n",
" self.parley_init()\n",
" self.worlds[self.world_idx].parley()\n",
"\n",
" def display(self):\n",
" if self.world_idx != -1:\n",
" s = ''\n",
" w = self.worlds[self.world_idx]\n",
" if self.parleys == 0:\n",
" s = '[world ' + str(self.world_idx) + ':' + w.getID() + ']\\n'\n",
" s = s + w.display()\n",
" return s\n",
" else:\n",
" return ''\n",
"\n",
" def report(self):\n",
" # TODO: static method in metrics, \"aggregate metrics\"\n",
" m = {}\n",
" m['tasks'] = {}\n",
" sum_accuracy = 0\n",
" num_tasks = 0\n",
" total = 0\n",
" for i in range(len(self.worlds)):\n",
" mt = self.worlds[i].report()\n",
" m['tasks'][self.worlds[i].getID()] = mt\n",
" total += mt['total']\n",
" if 'accuracy' in mt:\n",
" sum_accuracy += mt['accuracy']\n",
" num_tasks += 1\n",
" if num_tasks > 0:\n",
" m['accuracy'] = sum_accuracy / num_tasks\n",
" m['total'] = total\n",
" return m\n",
"\n",
"\n",
"def override_opts_in_shared(table, overrides):\n",
" \"\"\"Looks recursively for opt dictionaries within shared dict and overrides\n",
" any key-value pairs with pairs from the overrides dict.\n",
" \"\"\"\n",
" if 'opt' in table:\n",
" # change values if an 'opt' dict is available\n",
" for k, v in overrides.items():\n",
" table['opt'][k] = v\n",
" for k, v in table.items():\n",
" # look for sub-dictionaries which also might contain an 'opt' dict\n",
" if type(v) == dict and k != 'opt':\n",
" override_opts_in_shared(v, overrides)\n",
" elif type(v) == list:\n",
" for item in v:\n",
" if type(item) == dict:\n",
" override_opts_in_shared(item, overrides)\n",
" return table\n",
"\n",
"\n",
"class BatchWorld(World):\n",
" \"\"\"Creates a separate world for each item in the batch, sharing\n",
" the parameters for each.\n",
" The underlying world(s) it is batching can be either DialogPartnerWorld,\n",
" MultiAgentWorld or MultiWorld.\n",
" \"\"\"\n",
"\n",
" def __init__(self, opt, world):\n",
" self.opt = opt\n",
" self.random = opt.get('datatype', None) == 'train'\n",
" self.world = world\n",
" shared = world.share()\n",
" self.worlds = []\n",
" for i in range(opt['batchsize']):\n",
" # make sure that any opt dicts in shared have batchindex set to i\n",
" # this lets all shared agents know which batchindex they have,\n",
" # which is needed for ordered data (esp valid/test sets)\n",
" override_opts_in_shared(shared, { 'batchindex': i })\n",
" self.worlds.append(shared['world_class'](opt, None, shared))\n",
" self.batch_observations = [ None ] * len(self.world.get_agents())\n",
"\n",
" def __iter__(self):\n",
" return self\n",
"\n",
" def __next__(self):\n",
" if self.epoch_done():\n",
" raise StopIteration()\n",
"\n",
" def batch_observe(self, index, batch_actions):\n",
" batch_observations = []\n",
" for i, w in enumerate(self.worlds):\n",
" agents = w.get_agents()\n",
" observation = agents[index].observe(validate(batch_actions[i]))\n",
" if observation is None:\n",
" raise ValueError('Agents should return what they observed.')\n",
" batch_observations.append(observation)\n",
" return batch_observations\n",
"\n",
" def batch_act(self, index, batch_observation):\n",
" # Given batch observation, do update for agents[index].\n",
" # Call update on agent\n",
" a = self.world.get_agents()[index]\n",
" if (batch_observation is not None and len(batch_observation) > 0 and\n",
" hasattr(a, 'batch_act')):\n",
" batch_actions = a.batch_act(batch_observation)\n",
" # Store the actions locally in each world.\n",
" for w in self.worlds:\n",
" acts = w.get_acts()\n",
" acts[index] = batch_actions[index]\n",
" else:\n",
" # Reverts to running on each individually.\n",
" batch_actions = []\n",
" for w in self.worlds:\n",
" agents = w.get_agents()\n",
" acts = w.get_acts()\n",
" acts[index] = agents[index].act()\n",
" batch_actions.append(acts[index])\n",
" return batch_actions\n",
"\n",
" def parley(self):\n",
" # Collect batch together for each agent, and do update.\n",
" # Assumes DialogPartnerWorld, MultiAgentWorld, or MultiWorlds of them.\n",
" num_agents = len(self.world.get_agents())\n",
" batch_observations = self.batch_observations\n",
"\n",
" for w in self.worlds:\n",
" if hasattr(w, 'parley_init'):\n",
" w.parley_init()\n",
"\n",
" for index in range(num_agents):\n",
" batch_act = self.batch_act(index, batch_observations[index])\n",
" for other_index in range(num_agents):\n",
" if index != other_index:\n",
" batch_observations[other_index] = (\n",
" self.batch_observe(other_index, batch_act))\n",
"\n",
" def display(self):\n",
" s = (\"[--batchsize \" + str(len(self.worlds)) + \"--]\\n\")\n",
" for i, w in enumerate(self.worlds):\n",
" s += (\"[batch world \" + str(i) + \":]\\n\")\n",
" s += (w.display() + '\\n')\n",
" s += (\"[--end of batch--]\")\n",
" return s\n",
"\n",
" def getID(self):\n",
" return self.world.getID()\n",
"\n",
" def episode_done(self):\n",
" return False\n",
"\n",
" def epoch_done(self):\n",
" for world in self.worlds:\n",
" if not world.epoch_done():\n",
" return False\n",
" return True\n",
"\n",
" def report(self):\n",
" return self.worlds[0].report()\n",
"\n",
"\n",
"class HogwildProcess(Process):\n",
" \"\"\"Process child used for HogwildWorld.\n",
" Each HogwildProcess contain its own unique World.\n",
" \"\"\"\n",
"\n",
" def __init__(self, tid, world, opt, agents, sem, fin, term, cnt):\n",
" self.threadId = tid\n",
" self.world_type = world\n",
" self.opt = opt\n",
" self.agent_shares = [a.share() for a in agents]\n",
" self.queued_items = sem\n",
" self.epochDone = fin\n",
" self.terminate = term\n",
" self.cnt = cnt\n",
" super().__init__()\n",
"\n",
" def run(self):\n",
" \"\"\"Runs normal parley loop for as many examples as this thread can get\n",
" ahold of via the semaphore queued_items.\n",
" \"\"\"\n",
" shared_agents = create_agents_from_shared(self.agent_shares)\n",
" world = self.world_type(self.opt, shared_agents)\n",
"\n",
" with world:\n",
" while True:\n",
" self.queued_items.acquire()\n",
" if self.terminate.value:\n",
" break # time to close\n",
" world.parley()\n",
" with self.cnt.get_lock():\n",
" self.cnt.value -= 1\n",
" if self.cnt.value == 0:\n",
" # let main thread know that all the examples are finished\n",
" with self.epochDone:\n",
" self.epochDone.notify_all()\n",
"\n",
"\n",
"class HogwildWorld(World):\n",
" \"\"\"Creates a separate world for each thread (process).\n",
"\n",
" Maintains a few shared objects to keep track of state:\n",
" - A Semaphore which represents queued examples to be processed. Every call\n",
" of parley increments this counter; every time a Process claims an\n",
" example, it decrements this counter.\n",
" - A Condition variable which notifies when there are no more queued\n",
" examples.\n",
" - A boolean Value which represents whether the inner worlds should shutdown.\n",
" - An integer Value which contains the number of unprocessed examples queued\n",
" (acquiring the semaphore only claims them--this counter is decremented\n",
" once the processing is complete).\n",
" \"\"\"\n",
"\n",
" def __init__(self, world_class, opt, agents):\n",
" self.inner_world = world_class(opt, agents)\n",
"\n",
" self.queued_items = Semaphore(0) # counts num exs to be processed\n",
" self.epochDone = Condition() # notifies when exs are finished\n",
" self.terminate = Value('b', False) # tells threads when to shut down\n",
" self.cnt = Value('i', 0) # number of exs that remain to be processed\n",
"\n",
" self.threads = []\n",
" for i in range(opt['numthreads']):\n",
" self.threads.append(HogwildProcess(i, world_class, opt,\n",
" agents, self.queued_items,\n",
" self.epochDone, self.terminate,\n",
" self.cnt))\n",
" for t in self.threads:\n",
" t.start()\n",
"\n",
" def __iter__(self):\n",
" raise NotImplementedError('Iteration not available in hogwild.')\n",
"\n",
" def display(self):\n",
" self.shutdown()\n",
" raise NotImplementedError('Hogwild does not support displaying in-run' +\n",
" ' task data. Use `--numthreads 1`.')\n",
"\n",
" def episode_done(self):\n",
" return False\n",
"\n",
" def parley(self):\n",
" \"\"\"Queue one item to be processed.\"\"\"\n",
" with self.cnt.get_lock():\n",
" self.cnt.value += 1\n",
" self.queued_items.release()\n",
"\n",
" def getID(self):\n",
" return self.inner_world.getID()\n",
"\n",
" def report(self):\n",
" return self.inner_world.report()\n",
"\n",
" def synchronize(self):\n",
" \"\"\"Sync barrier: will wait until all queued examples are processed.\"\"\"\n",
" with self.epochDone:\n",
" self.epochDone.wait_for(lambda: self.cnt.value == 0)\n",
"\n",
" def shutdown(self):\n",
" \"\"\"Set shutdown flag and wake threads up to close themselves\"\"\"\n",
" # set shutdown flag\n",
" with self.terminate.get_lock():\n",
" self.terminate.value = True\n",
" # wake up each thread by queueing fake examples\n",
" for _ in self.threads:\n",
" self.queued_items.release()\n",
" # wait for threads to close\n",
" for t in self.threads:\n",
" t.join()\n",
"\n",
"\n",
"\n",
"### Functions for creating tasks/worlds given options.\n",
"\n",
"def _get_task_world(opt):\n",
" sp = opt['task'].strip().split(':')\n",
" if '.' in sp[0]:\n",
" # The case of opt['task'] = 'parlai.tasks.squad.agents:DefaultTeacher'\n",
" # (i.e. specifying your own path directly, assumes DialogPartnerWorld)\n",
" world_class = DialogPartnerWorld\n",
" else:\n",
" task = sp[0].lower()\n",
" if len(sp) > 1:\n",
" sp[1] = sp[1][0].upper() + sp[1][1:]\n",
" world_name = sp[1] + \"World\"\n",
" else:\n",
" world_name = \"DefaultWorld\"\n",
" module_name = \"parlai.tasks.%s.worlds\" % (task)\n",
" try:\n",
" my_module = importlib.import_module(module_name)\n",
" world_class = getattr(my_module, world_name)\n",
" except:\n",
" # Defaults to this if you did not specify a world for your task.\n",
" world_class = DialogPartnerWorld\n",
" task_agents = _create_task_agents(opt)\n",
" return world_class, task_agents\n",
"\n",
"\n",
"def create_task_world(opt, user_agents):\n",
" world_class, task_agents = _get_task_world(opt)\n",
" return world_class(opt, task_agents + user_agents)\n",
"\n",
"def create_task(opt, user_agents):\n",
" \"\"\"Creates a world + task_agents (aka a task)\n",
" assuming opt['task']=\"task_dir:teacher_class:options\"\n",
" e.g. \"babi:Task1k:1\" or \"#babi-1k\" or \"#QA\",\n",
" see parlai/tasks/tasks.py and see parlai/tasks/task_list.py\n",
" for list of tasks.\n",
" \"\"\"\n",
" if type(user_agents) != list:\n",
" user_agents = [user_agents]\n",
"\n",
" # Convert any hashtag task labels to task directory path names.\n",
" # (e.g. \"#QA\" to the list of tasks that are QA tasks).\n",
" opt = copy.deepcopy(opt)\n",
" #pdb.set_trace()\n",
" opt['task'] = ids_to_tasks(opt['task'])\n",
" print('[creating task(s): ' + opt['task'] + ']')\n",
"\n",
" # Single threaded or hogwild task creation (the latter creates multiple threads).\n",
" # Check datatype for train, because we need to do single-threaded for\n",
" # valid and test in order to guarantee exactly one epoch of training.\n",
" if opt.get('numthreads', 1) == 1 or opt['datatype'] != 'train':\n",
" if ',' not in opt['task']:\n",
" # Single task\n",
" world = create_task_world(opt, user_agents)\n",
" else:\n",
" # Multitask teacher/agent\n",
" world = MultiWorld(opt, user_agents)\n",
"\n",
" if opt.get('batchsize', 1) > 1:\n",
" return BatchWorld(opt, world)\n",
" else:\n",
" return world\n",
" else:\n",
" # more than one thread requested: do hogwild training\n",
" if ',' not in opt['task']:\n",
" # Single task\n",
" # TODO(ahm): fix metrics for multiteacher hogwild training\n",
" world_class, task_agents = _get_task_world(opt)\n",
" return HogwildWorld(world_class, opt, task_agents + user_agents)\n",
" else:\n",
" # TODO(ahm): fix this\n",
" raise NotImplementedError('hogwild multiworld not supported yet')\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012345679012345678,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.037037037037037035,
0,
0,
0,
0,
0.014084507042253521,
0,
0.012345679012345678,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012195121951219513,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011904761904761904,
0,
0,
0,
0,
0,
0,
0,
0,
0.012195121951219513,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.03571428571428571,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012195121951219513,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012345679012345678,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.03076923076923077,
0,
0.02702702702702703,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012195121951219513,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012345679012345678,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012345679012345678,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.03636363636363636,
0,
0.038461538461538464,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.0625,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.02857142857142857,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.047619047619047616,
0,
0,
0,
0.011627906976744186,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 718 | 0.000686 | false |
# coding: UTF-8
# Name: 运算符
# Author: LYC
# Created: 2014-04-03
import re
from gpcalccfg import OPLEVEL
class Operator(object):
"""
运算符
"""
def __init__(self, original, level = 0, opnum = 0):
super(Operator, self).__init__()
self.original = original
self.level = level #权值
self.opnum = opnum #可操作数
def __call__(self, *arg):
return str(arg)
def __str__(self):
return self.original
def __repr__(self):
return str(self.original)
def __lt__(self, op):
return self.level < op.level
def __gt__(self, op):
return self.level > op.level
def __le__(self, op):
return self.level <= op.level
def __ge__(self, op):
return self.level >= op.level
def __eq__(self, op):
if isinstance(op, str):return self.original == op
if self.original != op.original:return False
if self.level != op.level:return False
if self.opnum != op.opnum:return False
return True
def __ne__(self, op):
return not self == op
class UnaryOperator(Operator):
"""
单目运算符
"""
def __init__(self, original):
super(UnaryOperator, self).__init__(original, OPLEVEL.UOP, 1)
def __call__(self, operand):
return "%s(%s)" % (self.original, operand)
class BinaryOperator(Operator):
"""
双目运算符
"""
def __init__(self, original, level):
super(BinaryOperator, self).__init__(original, level, 2)
def __call__(self, operand1, operand2):
return "(%s %s %s)" % (operand1, self.original, operand2)
def operator_factory(original, unary = False):
"""
运算符工厂
original: 运算符表示
unary: 限定为单目,请用于单目的+-
"""
if unary:return UnaryOperator(original)
if original == "+":return BinaryOperator(original, OPLEVEL.ADD)
if original == "-":return BinaryOperator(original, OPLEVEL.SUB)
if original == "*":return BinaryOperator(original, OPLEVEL.MUL)
if original == "/":return BinaryOperator(original, OPLEVEL.DIV)
if original == ",":return BinaryOperator(original, OPLEVEL.CMM)
if original == "%":return BinaryOperator(original, OPLEVEL.MOD)
if original == "**":return BinaryOperator(original, OPLEVEL.POW)
if original == "mod":return BinaryOperator("%", OPLEVEL.MOD)
if original == "^":return BinaryOperator("**", OPLEVEL.POW)
if original == "(":return Operator(original, OPLEVEL.LBK)
if original == ")":return Operator(original)
return UnaryOperator(original)
| [
"# coding: UTF-8\n",
"# Name: 运算符\n",
"# Author: LYC\n",
"# Created: 2014-04-03\n",
"\n",
"import re\n",
"from gpcalccfg import OPLEVEL\n",
"\n",
"class Operator(object):\n",
" \"\"\"\n",
" 运算符\n",
" \"\"\"\n",
" def __init__(self, original, level = 0, opnum = 0):\n",
" super(Operator, self).__init__()\n",
" self.original = original\n",
" self.level = level #权值\n",
" self.opnum = opnum #可操作数\n",
"\n",
" def __call__(self, *arg):\n",
" return str(arg)\n",
"\n",
" def __str__(self):\n",
" return self.original\n",
"\n",
" def __repr__(self):\n",
" return str(self.original)\n",
"\n",
" def __lt__(self, op):\n",
" return self.level < op.level\n",
"\n",
" def __gt__(self, op):\n",
" return self.level > op.level\n",
"\n",
" def __le__(self, op):\n",
" return self.level <= op.level\n",
"\n",
" def __ge__(self, op):\n",
" return self.level >= op.level\n",
"\n",
" def __eq__(self, op):\n",
" if isinstance(op, str):return self.original == op\n",
" if self.original != op.original:return False\n",
" if self.level != op.level:return False\n",
" if self.opnum != op.opnum:return False\n",
" return True\n",
"\n",
" def __ne__(self, op):\n",
" return not self == op\n",
"\n",
"class UnaryOperator(Operator):\n",
" \"\"\"\n",
" 单目运算符\n",
" \"\"\"\n",
" def __init__(self, original):\n",
" super(UnaryOperator, self).__init__(original, OPLEVEL.UOP, 1)\n",
"\n",
" def __call__(self, operand):\n",
" return \"%s(%s)\" % (self.original, operand)\n",
"\n",
"\n",
"class BinaryOperator(Operator):\n",
" \"\"\"\n",
" 双目运算符\n",
" \"\"\"\n",
" def __init__(self, original, level):\n",
" super(BinaryOperator, self).__init__(original, level, 2)\n",
"\n",
" def __call__(self, operand1, operand2):\n",
" return \"(%s %s %s)\" % (operand1, self.original, operand2)\n",
"\n",
"def operator_factory(original, unary = False):\n",
" \"\"\"\n",
" 运算符工厂\n",
" original: 运算符表示\n",
" unary: 限定为单目,请用于单目的+-\n",
" \"\"\"\n",
"\n",
" if unary:return UnaryOperator(original)\n",
" if original == \"+\":return BinaryOperator(original, OPLEVEL.ADD)\n",
" if original == \"-\":return BinaryOperator(original, OPLEVEL.SUB)\n",
" if original == \"*\":return BinaryOperator(original, OPLEVEL.MUL)\n",
" if original == \"/\":return BinaryOperator(original, OPLEVEL.DIV)\n",
" if original == \",\":return BinaryOperator(original, OPLEVEL.CMM)\n",
" if original == \"%\":return BinaryOperator(original, OPLEVEL.MOD)\n",
" if original == \"**\":return BinaryOperator(original, OPLEVEL.POW)\n",
" if original == \"mod\":return BinaryOperator(\"%\", OPLEVEL.MOD)\n",
" if original == \"^\":return BinaryOperator(\"**\", OPLEVEL.POW)\n",
" if original == \"(\":return Operator(original, OPLEVEL.LBK)\n",
" if original == \")\":return Operator(original)\n",
" return UnaryOperator(original)\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0.041666666666666664,
0,
0,
0,
0.07142857142857142,
0,
0,
0.06451612903225806,
0.06060606060606061,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.034482758620689655,
0.03773584905660377,
0.0425531914893617,
0.0425531914893617,
0,
0,
0,
0,
0,
0.03225806451612903,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.06382978723404255,
0,
0,
0,
0,
0,
0,
0.045454545454545456,
0.029411764705882353,
0.029411764705882353,
0.029411764705882353,
0.029411764705882353,
0.029411764705882353,
0.029411764705882353,
0.028985507246376812,
0.03076923076923077,
0.03125,
0.03225806451612903,
0.04081632653061224,
0
] | 90 | 0.009751 | false |
# -*- coding: utf-8 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Canal para http://conectate.gov.ar
# creado por rsantaella
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from core import logger
from core import config
from core import scrapertools
from core.item import Item
from servers import servertools
__channel__ = "rtspan"
__category__ = "F"
__type__ = "generic"
__title__ = "rtspan"
__language__ = "ES"
__creationdate__ = "20121212"
__vfanart__ = "http://actualidad.rt.com/static/actualidad/design1/i/d/bg.png"
DEBUG = config.get_setting("debug")
def isGeneric():
return True
def mainlist(item):
logger.info("[rtspan.py] mainlist")
itemlist = []
itemlist.append( Item(channel=__channel__, title="Detrás de la noticia", action="videos", url="http://actualidad.rt.com/programas/detras_de_la_noticia", fanart = __vfanart__))
itemlist.append( Item(channel=__channel__, title="RT reporta", action="videos", url="http://actualidad.rt.com/programas/rt_reporta", fanart = __vfanart__))
itemlist.append( Item(channel=__channel__, title="Keiser report", action="videos", url="http://actualidad.rt.com/programas/keiser_report", fanart = __vfanart__))
itemlist.append( Item(channel=__channel__, title="Desde la sombra", action="videos", url="http://actualidad.rt.com/programas/desde_la_sombra", fanart = __vfanart__))
itemlist.append( Item(channel=__channel__, title="Entrevista", action="videos", url="http://actualidad.rt.com/programas/entrevista", fanart = __vfanart__))
itemlist.append( Item(channel=__channel__, title="Deportes en reportes", action="videos", url="http://actualidad.rt.com/programas/deportes_reportes", fanart = __vfanart__))
itemlist.append( Item(channel=__channel__, title="La lista de Erick", action="videos", url="http://actualidad.rt.com/programas/la_lista_de_erick", fanart = __vfanart__))
itemlist.append( Item(channel=__channel__, title="Especial", action="videos", url="http://actualidad.rt.com/programas/especial", fanart = __vfanart__))
itemlist.append( Item(channel=__channel__, title="Tecnología de punta", action="videos", url="http://actualidad.rt.com/programas/tecnologia", fanart = __vfanart__))
itemlist.append( Item(channel=__channel__, title="Más allá de Moscú", action="videos", url="http://actualidad.rt.com/programas/mas_alla_de_moscu", fanart = __vfanart__))
#itemlist.append( Item(channel=__channel__, title="Diálogos con Julian Assange", action="videos", url="http://assange.rt.com/es/", fanart = __vfanart__))
itemlist.append( Item(channel=__channel__, title="Archivo - A fondo", action="videos", url="http://actualidad.rt.com/programas/a_fondo", fanart = __vfanart__))
itemlist.append( Item(channel=__channel__, title="Archivo - A solas", action="videos", url="http://actualidad.rt.com/programas/a_solas", fanart = __vfanart__))
return itemlist
def videos(item):
logger.info("[rtspan.py] videos")
data = scrapertools.cachePage(item.url)
data = data.replace("\n", " ")
data = data.replace("\r", " ")
data = " ".join(data.split())
#logger.info(data)
patron = '<a href="([^"]+)" title="(.*?)"> <img src="([^"]+)"> <span>(.*?)</span> </a>'
matches = re.compile(patron,re.DOTALL).findall(data)
if DEBUG: scrapertools.printMatches(matches)
itemlist = []
for match in matches:
scrapedtitle = match[3]
scrapedthumbnail = 'http://actualidad.rt.com'+match[2]
scrapedurl = 'http://actualidad.rt.com'+match[0]
itemlist.append( Item(channel=__channel__, action="play", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, folder=False) )
patron = '<div class="right"><a href="([^"]+)" onclick='
matches = re.compile(patron,re.DOTALL).findall(data)
if DEBUG: scrapertools.printMatches(matches)
for match in matches:
scrapedurl = 'http://actualidad.rt.com'+match
scrapedtitle = "!Página siguiente"
itemlist.append( Item(channel=__channel__, action="videos", title=scrapedtitle , url=scrapedurl , folder=True) )
return itemlist
def play(item):
logger.info("[rtspan.py] play")
data = scrapertools.cachePage(item.url)
logger.info(data)
patron = '<meta property="og:video" content="([^"]+)" />'
matches = re.compile(patron,re.DOTALL).findall(data)
if DEBUG: scrapertools.printMatches(matches)
if matches:
scrapedurl = matches[0]
itemlist = []
itemlist.append( Item(channel=__channel__, action="play", server="directo", title=item.title , url=scrapedurl , folder=False) )
return itemlist
def test():
# Al entrar sale una lista de programas
programas_items = mainlist(Item())
if len(programas_items)==0:
print "No devuelve programas"
return False
videos_items = videos(programas_items[0])
if len(videos_items)==1:
print "No devuelve videos en "+programas_items[0].title
return False
return True | [
"# -*- coding: utf-8 -*-\r#------------------------------------------------------------\r# pelisalacarta - XBMC Plugin\r# Canal para http://conectate.gov.ar\r# creado por rsantaella\r# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/\r#------------------------------------------------------------\r\rimport urlparse,urllib2,urllib,re\rimport os, sys\r\rfrom core import logger\rfrom core import config\rfrom core import scrapertools\rfrom core.item import Item\rfrom servers import servertools\r\r__channel__ = \"rtspan\"\r__category__ = \"F\"\r__type__ = \"generic\"\r__title__ = \"rtspan\"\r__language__ = \"ES\"\r__creationdate__ = \"20121212\"\r__vfanart__ = \"http://actualidad.rt.com/static/actualidad/design1/i/d/bg.png\"\r\rDEBUG = config.get_setting(\"debug\")\r\rdef isGeneric():\r return True\r\rdef mainlist(item):\r logger.info(\"[rtspan.py] mainlist\")\r\r itemlist = []\r\r itemlist.append( Item(channel=__channel__, title=\"Detrás de la noticia\", action=\"videos\", url=\"http://actualidad.rt.com/programas/detras_de_la_noticia\", fanart = __vfanart__)) \r itemlist.append( Item(channel=__channel__, title=\"RT reporta\", action=\"videos\", url=\"http://actualidad.rt.com/programas/rt_reporta\", fanart = __vfanart__))\r itemlist.append( Item(channel=__channel__, title=\"Keiser report\", action=\"videos\", url=\"http://actualidad.rt.com/programas/keiser_report\", fanart = __vfanart__)) \r itemlist.append( Item(channel=__channel__, title=\"Desde la sombra\", action=\"videos\", url=\"http://actualidad.rt.com/programas/desde_la_sombra\", fanart = __vfanart__))\r itemlist.append( Item(channel=__channel__, title=\"Entrevista\", action=\"videos\", url=\"http://actualidad.rt.com/programas/entrevista\", fanart = __vfanart__))\r itemlist.append( Item(channel=__channel__, title=\"Deportes en reportes\", action=\"videos\", url=\"http://actualidad.rt.com/programas/deportes_reportes\", fanart = __vfanart__))\r itemlist.append( Item(channel=__channel__, title=\"La lista de Erick\", action=\"videos\", url=\"http://actualidad.rt.com/programas/la_lista_de_erick\", fanart = __vfanart__))\r itemlist.append( Item(channel=__channel__, title=\"Especial\", action=\"videos\", url=\"http://actualidad.rt.com/programas/especial\", fanart = __vfanart__))\r itemlist.append( Item(channel=__channel__, title=\"Tecnología de punta\", action=\"videos\", url=\"http://actualidad.rt.com/programas/tecnologia\", fanart = __vfanart__)) \r itemlist.append( Item(channel=__channel__, title=\"Más allá de Moscú\", action=\"videos\", url=\"http://actualidad.rt.com/programas/mas_alla_de_moscu\", fanart = __vfanart__)) \r #itemlist.append( Item(channel=__channel__, title=\"Diálogos con Julian Assange\", action=\"videos\", url=\"http://assange.rt.com/es/\", fanart = __vfanart__)) \r itemlist.append( Item(channel=__channel__, title=\"Archivo - A fondo\", action=\"videos\", url=\"http://actualidad.rt.com/programas/a_fondo\", fanart = __vfanart__)) \r itemlist.append( Item(channel=__channel__, title=\"Archivo - A solas\", action=\"videos\", url=\"http://actualidad.rt.com/programas/a_solas\", fanart = __vfanart__)) \r\r return itemlist\r\rdef videos(item):\r\r logger.info(\"[rtspan.py] videos\") \r\r data = scrapertools.cachePage(item.url)\r data = data.replace(\"\\n\", \" \")\r data = data.replace(\"\\r\", \" \")\r data = \" \".join(data.split())\r\t\r #logger.info(data)\r \r patron = '<a href=\"([^\"]+)\" title=\"(.*?)\"> <img src=\"([^\"]+)\"> <span>(.*?)</span> </a>'\r\r matches = re.compile(patron,re.DOTALL).findall(data)\r\r if DEBUG: scrapertools.printMatches(matches)\r\t\r itemlist = []\r for match in matches:\r\r scrapedtitle = match[3]\r scrapedthumbnail = 'http://actualidad.rt.com'+match[2]\r scrapedurl = 'http://actualidad.rt.com'+match[0]\r itemlist.append( Item(channel=__channel__, action=\"play\", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, folder=False) )\r\r patron = '<div class=\"right\"><a href=\"([^\"]+)\" onclick='\r matches = re.compile(patron,re.DOTALL).findall(data)\r\r if DEBUG: scrapertools.printMatches(matches)\r\t\r for match in matches:\r\r scrapedurl = 'http://actualidad.rt.com'+match\t\t\r scrapedtitle = \"!Página siguiente\"\r itemlist.append( Item(channel=__channel__, action=\"videos\", title=scrapedtitle , url=scrapedurl , folder=True) )\r \r\r return itemlist\r\r\rdef play(item):\r\r logger.info(\"[rtspan.py] play\") \r\r data = scrapertools.cachePage(item.url)\r\t\r logger.info(data)\r \r patron = '<meta property=\"og:video\" content=\"([^\"]+)\" />'\r\t\t\r matches = re.compile(patron,re.DOTALL).findall(data)\r\t\t\r if DEBUG: scrapertools.printMatches(matches)\r\t\t\r if matches: \r scrapedurl = matches[0]\r\t\t\r itemlist = []\r itemlist.append( Item(channel=__channel__, action=\"play\", server=\"directo\", title=item.title , url=scrapedurl , folder=False) )\r\r return itemlist\r\rdef test():\r\r # Al entrar sale una lista de programas\r programas_items = mainlist(Item())\r if len(programas_items)==0:\r print \"No devuelve programas\"\r return False\r\r videos_items = videos(programas_items[0])\r if len(videos_items)==1:\r print \"No devuelve videos en \"+programas_items[0].title\r return False\r\r return True"
] | [
0.00038498556304138594
] | 1 | 0.000385 | false |
# coding=utf-8
from os import walk, path
from random import randint
from PIL import Image
from base.settings import MEDIA_ROOT
from base.views import call_template, amount_events
from .models import Slideshow
class NotImageError(Exception):
"""
Обработка исключения, возникающего при открытии файлов,
не являющихся файлми изображений.
"""
def __init__(self, file_type):
Exception.__init__(self, file_type)
self.file_type = file_type
def __str__(self):
return repr('NotImageError: type of file is [%s]' % self.file_type)
class PathNotFound(Exception):
"""
Обработка исключения ошибочного пути до папки с файлми
изображений.
"""
def __init__(self, broken_path):
Exception.__init__(self, broken_path)
self.broken_path = broken_path
def __str__(self):
return repr('PathNotFound: [%s] is not in slideshow directory.' % self.broken_path)
def slideshow(request):
"""
Функция отображения слайдшоу
:param request: django request
"""
return call_template(
request,
templ_path='slideshow/slideshow.html'
)
def slide(request):
"""
Функция отображения на начальной странице произвольной фотографии
:param request: django request
"""
params = {'album': 'Нет альбомов', 'slide': ''}
albums = Slideshow.objects.exclude(is_shown=False)
if len(albums):
slideshow_folder = MEDIA_ROOT + '/slideshow'
attempts = 0
# Делаем 10 попыток найти подходящий для показа файл.
# Если альбом был удален, а база не проиндексирована еще, нет графических файлов,
# выводим на экран об этом сообщение.
while attempts < 10:
try:
# Получаем первый элемент произвольно отсортированного списка фотоальбомов,
# исключая альбомы с пометкой is_shown = False
rnd_album = unicode(
slideshow_folder + albums.order_by('?')[0].album_path
)
if path.exists(rnd_album):
for root, dirs, files in walk(rnd_album):
rnd_file = randint(0, len(files) - 1)
img = '%s/%s' % (root, files[rnd_file])
file_type = img.split('.')[-1].lower()
try:
Image.open(img)
params['album'] = rnd_album.split('/')[-1].replace('_', ' ')
params['slide'] = img.replace(MEDIA_ROOT, '').replace('//', '/')
break
except:
raise NotImageError(file_type)
else:
raise PathNotFound(rnd_album)
except (NotImageError, PathNotFound):
attempts += 1
continue
else:
break
return call_template(
request,
params,
templ_path='slideshow/slide.html'
)
def slideshow_events(request):
"""" Количество непросмотренных событий для отображения на странице слайдшоу """
return amount_events(request, 'slideshow/slideshow_events.html', days=1)
| [
"# coding=utf-8\r\n",
"from os import walk, path\r\n",
"from random import randint\r\n",
"\r\n",
"from PIL import Image\r\n",
"from base.settings import MEDIA_ROOT\r\n",
"from base.views import call_template, amount_events\r\n",
"from .models import Slideshow\r\n",
"\r\n",
"\r\n",
"class NotImageError(Exception):\r\n",
" \"\"\"\r\n",
" Обработка исключения, возникающего при открытии файлов,\r\n",
" не являющихся файлми изображений.\r\n",
" \"\"\"\r\n",
"\r\n",
" def __init__(self, file_type):\r\n",
" Exception.__init__(self, file_type)\r\n",
" self.file_type = file_type\r\n",
"\r\n",
" def __str__(self):\r\n",
" return repr('NotImageError: type of file is [%s]' % self.file_type)\r\n",
"\r\n",
"\r\n",
"class PathNotFound(Exception):\r\n",
" \"\"\"\r\n",
" Обработка исключения ошибочного пути до папки с файлми\r\n",
" изображений.\r\n",
" \"\"\"\r\n",
"\r\n",
" def __init__(self, broken_path):\r\n",
" Exception.__init__(self, broken_path)\r\n",
" self.broken_path = broken_path\r\n",
"\r\n",
" def __str__(self):\r\n",
" return repr('PathNotFound: [%s] is not in slideshow directory.' % self.broken_path)\r\n",
"\r\n",
"\r\n",
"def slideshow(request):\r\n",
" \"\"\"\r\n",
" Функция отображения слайдшоу\r\n",
"\r\n",
" :param request: django request\r\n",
" \"\"\"\r\n",
"\r\n",
" return call_template(\r\n",
" request,\r\n",
" templ_path='slideshow/slideshow.html'\r\n",
" )\r\n",
"\r\n",
"\r\n",
"def slide(request):\r\n",
" \"\"\"\r\n",
" Функция отображения на начальной странице произвольной фотографии\r\n",
"\r\n",
" :param request: django request\r\n",
" \"\"\"\r\n",
"\r\n",
" params = {'album': 'Нет альбомов', 'slide': ''}\r\n",
"\r\n",
" albums = Slideshow.objects.exclude(is_shown=False)\r\n",
" if len(albums):\r\n",
" slideshow_folder = MEDIA_ROOT + '/slideshow'\r\n",
" attempts = 0\r\n",
"\r\n",
" # Делаем 10 попыток найти подходящий для показа файл.\r\n",
" # Если альбом был удален, а база не проиндексирована еще, нет графических файлов,\r\n",
" # выводим на экран об этом сообщение.\r\n",
" while attempts < 10:\r\n",
" try:\r\n",
" # Получаем первый элемент произвольно отсортированного списка фотоальбомов,\r\n",
" # исключая альбомы с пометкой is_shown = False\r\n",
" rnd_album = unicode(\r\n",
" slideshow_folder + albums.order_by('?')[0].album_path\r\n",
" )\r\n",
" if path.exists(rnd_album):\r\n",
" for root, dirs, files in walk(rnd_album):\r\n",
" rnd_file = randint(0, len(files) - 1)\r\n",
" img = '%s/%s' % (root, files[rnd_file])\r\n",
" file_type = img.split('.')[-1].lower()\r\n",
" try:\r\n",
" Image.open(img)\r\n",
" params['album'] = rnd_album.split('/')[-1].replace('_', ' ')\r\n",
" params['slide'] = img.replace(MEDIA_ROOT, '').replace('//', '/')\r\n",
" break\r\n",
" except:\r\n",
" raise NotImageError(file_type)\r\n",
" else:\r\n",
" raise PathNotFound(rnd_album)\r\n",
" except (NotImageError, PathNotFound):\r\n",
" attempts += 1\r\n",
" continue\r\n",
" else:\r\n",
" break\r\n",
" return call_template(\r\n",
" request,\r\n",
" params,\r\n",
" templ_path='slideshow/slide.html'\r\n",
" )\r\n",
"\r\n",
"\r\n",
"def slideshow_events(request):\r\n",
" \"\"\"\" Количество непросмотренных событий для отображения на странице слайдшоу \"\"\"\r\n",
" return amount_events(request, 'slideshow/slideshow_events.html', days=1)\r\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010752688172043012,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.01098901098901099,
0,
0,
0,
0.010752688172043012,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011111111111111112,
0.010638297872340425,
0,
0.030303030303030304,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011627906976744186,
0
] | 104 | 0.000925 | false |
# Copyright (c) 2016, LE GOFF Vincent
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of ytranslate nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""File containing the Scriptable abstract class."""
class Scriptable:
"""An abstract scriptable.
All scriptable objects in CocoMUD should inherit this class at some
level. This class offers mechanisms for propagating modifications to
linked objects. This is necessary because of the abstraction of
configuration level: every piece of scritpable in CocoMUD can be
defined on the general level, a world level, a character level,
or a category level. Thus, scriptables are often connected together
and, for every modification, they have to modify their peers.
"""
content = ()
def __init__(self):
self.duplicates = []
def add_duplicate(self, scriptable):
"""Add the scriptable as a duplicate of self, if it's not the case.
This method also adds self as a duplicate of scriptable.
"""
if scriptable not in self.duplicates:
self.duplicates.append(scriptable)
if self not in scriptable.duplicates:
scriptable.duplicates.append(self)
def propagate(self):
"""Propagate the modifications to duplicates."""
data = {}
for name in type(self).content:
value = getattr(self, name)
data[name] = value
# Browse the list of duplicates
for scriptable in self.duplicates:
for name, value in data.items():
setattr(scriptable, name, value)
| [
"# Copyright (c) 2016, LE GOFF Vincent\n",
"# All rights reserved.\n",
"\n",
"# Redistribution and use in source and binary forms, with or without\n",
"# modification, are permitted provided that the following conditions are met:\n",
"\n",
"# * Redistributions of source code must retain the above copyright notice, this\n",
"# list of conditions and the following disclaimer.\n",
"\n",
"# * Redistributions in binary form must reproduce the above copyright notice,\n",
"# this list of conditions and the following disclaimer in the documentation\n",
"# and/or other materials provided with the distribution.\n",
"\n",
"# * Neither the name of ytranslate nor the names of its\n",
"# contributors may be used to endorse or promote products derived from\n",
"# this software without specific prior written permission.\n",
"\n",
"# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n",
"# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n",
"# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n",
"# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n",
"# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n",
"# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n",
"# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n",
"# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n",
"# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n",
"# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n",
"\n",
"\"\"\"File containing the Scriptable abstract class.\"\"\"\n",
"\n",
"class Scriptable:\n",
"\n",
" \"\"\"An abstract scriptable.\n",
"\n",
" All scriptable objects in CocoMUD should inherit this class at some\n",
" level. This class offers mechanisms for propagating modifications to\n",
" linked objects. This is necessary because of the abstraction of\n",
" configuration level: every piece of scritpable in CocoMUD can be\n",
" defined on the general level, a world level, a character level,\n",
" or a category level. Thus, scriptables are often connected together\n",
" and, for every modification, they have to modify their peers.\n",
"\n",
" \"\"\"\n",
"\n",
" content = ()\n",
"\n",
" def __init__(self):\n",
" self.duplicates = []\n",
"\n",
" def add_duplicate(self, scriptable):\n",
" \"\"\"Add the scriptable as a duplicate of self, if it's not the case.\n",
"\n",
" This method also adds self as a duplicate of scriptable.\n",
"\n",
" \"\"\"\n",
" if scriptable not in self.duplicates:\n",
" self.duplicates.append(scriptable)\n",
" if self not in scriptable.duplicates:\n",
" scriptable.duplicates.append(self)\n",
"\n",
" def propagate(self):\n",
" \"\"\"Propagate the modifications to duplicates.\"\"\"\n",
" data = {}\n",
" for name in type(self).content:\n",
" value = getattr(self, name)\n",
" data[name] = value\n",
"\n",
" # Browse the list of duplicates\n",
" for scriptable in self.duplicates:\n",
" for name, value in data.items():\n",
" setattr(scriptable, name, value)\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012345679012345678,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.05555555555555555,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 71 | 0.000956 | false |
# -*- coding: utf-8 -*-
from module.plugins.internal.MultiHoster import MultiHoster, create_getInfo
class FreeWayMe(MultiHoster):
__name__ = "FreeWayMe"
__type__ = "hoster"
__version__ = "0.16"
__pattern__ = r'https://(?:www\.)?free-way\.me/.+'
__description__ = """FreeWayMe multi-hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("Nicolas Giese", "james@free-way.me")]
def setup(self):
self.resumeDownload = False
self.multiDL = self.premium
self.chunkLimit = 1
def handlePremium(self, pyfile):
user, data = self.account.selectAccount()
for _i in xrange(5):
# try it five times
header = self.load("https://www.free-way.me/load.php",
get={'multiget': 7,
'url' : pyfile.url,
'user' : user,
'pw' : self.account.getAccountData(user)['password'],
'json' : ""},
just_header=True)
if 'location' in header:
headers = self.load(header['location'], just_header=True)
if headers['code'] == 500:
# error on 2nd stage
self.logError(_("Error [stage2]"))
else:
# seems to work..
self.download(header['location'])
break
else:
# error page first stage
self.logError(_("Error [stage1]"))
#@TODO: handle errors
getInfo = create_getInfo(FreeWayMe)
| [
"# -*- coding: utf-8 -*-\n",
"\n",
"from module.plugins.internal.MultiHoster import MultiHoster, create_getInfo\n",
"\n",
"\n",
"class FreeWayMe(MultiHoster):\n",
" __name__ = \"FreeWayMe\"\n",
" __type__ = \"hoster\"\n",
" __version__ = \"0.16\"\n",
"\n",
" __pattern__ = r'https://(?:www\\.)?free-way\\.me/.+'\n",
"\n",
" __description__ = \"\"\"FreeWayMe multi-hoster plugin\"\"\"\n",
" __license__ = \"GPLv3\"\n",
" __authors__ = [(\"Nicolas Giese\", \"james@free-way.me\")]\n",
"\n",
"\n",
" def setup(self):\n",
" self.resumeDownload = False\n",
" self.multiDL = self.premium\n",
" self.chunkLimit = 1\n",
"\n",
"\n",
" def handlePremium(self, pyfile):\n",
" user, data = self.account.selectAccount()\n",
"\n",
" for _i in xrange(5):\n",
" # try it five times\n",
" header = self.load(\"https://www.free-way.me/load.php\",\n",
" get={'multiget': 7,\n",
" 'url' : pyfile.url,\n",
" 'user' : user,\n",
" 'pw' : self.account.getAccountData(user)['password'],\n",
" 'json' : \"\"},\n",
" just_header=True)\n",
"\n",
" if 'location' in header:\n",
" headers = self.load(header['location'], just_header=True)\n",
" if headers['code'] == 500:\n",
" # error on 2nd stage\n",
" self.logError(_(\"Error [stage2]\"))\n",
" else:\n",
" # seems to work..\n",
" self.download(header['location'])\n",
" break\n",
" else:\n",
" # error page first stage\n",
" self.logError(_(\"Error [stage1]\"))\n",
"\n",
" #@TODO: handle errors\n",
"\n",
"\n",
"getInfo = create_getInfo(FreeWayMe)\n"
] | [
0,
0,
0,
0,
0,
0,
0.03333333333333333,
0.037037037037037035,
0,
0,
0,
0,
0,
0.03333333333333333,
0.015873015873015872,
0,
0,
0.047619047619047616,
0,
0.023255813953488372,
0.03125,
0,
0,
0.02702702702702703,
0,
0,
0,
0,
0,
0,
0.016666666666666666,
0.018518518518518517,
0.021052631578947368,
0.018867924528301886,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.029411764705882353,
0,
0,
0
] | 53 | 0.006665 | false |
# Python Tools for Visual Studio
# Copyright(c) Microsoft Corporation
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the License); you may not use
# this file except in compliance with the License. You may obtain a copy of the
# License at http://www.apache.org/licenses/LICENSE-2.0
#
# THIS CODE IS PROVIDED ON AN *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY
# IMPLIED WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
# MERCHANTABILITY OR NON-INFRINGEMENT.
#
# See the Apache Version 2.0 License for specific language governing
# permissions and limitations under the License.
from __future__ import absolute_import, print_function
"""Implements REPL support over IPython/ZMQ for VisualStudio"""
__author__ = "Microsoft Corporation <ptvshelp@microsoft.com>"
__version__ = "3.2.1.0"
import ast
import base64
import errno
import os
import re
import sys
import threading
import time
import traceback
from ptvsd.repl import BasicReplBackend, ReplBackend, UnsupportedReplException, _command_line_to_args_list, DEBUG
from ptvsd.util import to_bytes
try:
import jupyter_client
import jupyter_client.manager
import zmq.error
except ImportError:
raise UnsupportedReplException("Jupyter mode requires the jupyter_client and ipykernel packages. " + traceback.format_exc())
try:
import _thread
except ImportError:
import thread as _thread # legacy name
try:
from queue import Empty
except ImportError:
from Queue import Empty
# Use safer eval
eval = ast.literal_eval
class Message(object):
_sentinel = object()
def __init__(self, msg):
self._m = msg
self._read = False
def __getattr__(self, attr):
v = self[attr, self._sentinel]
if v is self._sentinel:
return Message({})
if isinstance(v, dict):
return Message(v)
return v
def __getitem__(self, key):
if isinstance(key, tuple):
key, default_value = key
else:
default_value = None
if not self._m:
return self
try:
v = self._m[key]
except KeyError:
return default_value
if isinstance(v, dict):
return Message(v)
return v
def __repr__(self):
return repr(self._m)
class IntrospectHandler(object):
def __init__(self, client, on_reply, suppress_io):
self._client = client
self._on_reply = on_reply
self._suppress_io = suppress_io
self.callback = None
self.error = None
self.typename = None
self.members = None
self.responded = False
def send(self, expression):
if not expression:
self.typename = ''
msg_id = self._client.complete("")
self._suppress_io.add(msg_id)
self._on_reply.setdefault((msg_id, 'complete_reply'), []).append(self.complete_reply)
else:
msg_id = self._client.execute("_=" + expression,
store_history=False, allow_stdin=False, silent=True,
user_expressions={'m': 'getattr(type(_), "__module__", "") + "." + type(_).__name__'},
)
self._suppress_io.add(msg_id)
self._on_reply.setdefault((msg_id, 'execute_reply'), []).append(self.typename_reply)
msg_id = self._client.complete(expression + '.')
self._suppress_io.add(msg_id)
self._on_reply.setdefault((msg_id, 'complete_reply'), []).append(self.complete_reply)
def _respond(self, success):
if self.responded:
return
if not self.callback:
raise RuntimeError("No callback provider to message handler")
if success:
self.callback(self.typename, self.members, {})
else:
self.error()
self.responded = True
def complete_reply(self, message):
if message.content.status != 'ok':
self._respond(False)
return
self.members = dict((m.rpartition('.')[-1], '') for m in message.content['matches', ()])
omit = [m for m in self.members if m.startswith('__ptvs_repl_')]
for m in omit:
del self.members[m]
if self.typename is not None:
self._respond(True)
def typename_reply(self, message):
if message.content.status != 'ok' or message.content.user_expressions.m.status != 'ok':
self._respond(False)
return
self.typename = eval(message.content.user_expressions.m.data['text/plain', '"object"'])
m, _, n = self.typename.partition('.')
if m == type(int).__module__:
self.typename = n
if self.members is not None:
self._respond(True)
def set_callback(self, success_callback, error_callback):
self.callback = success_callback
self.error = error_callback
class SignaturesHandler(object):
def __init__(self, client, on_reply, suppress_io):
self._client = client
self._on_reply = on_reply
self._suppress_io = suppress_io
self.callback = None
self.error = None
self.signatures = None
self.responded = False
def send(self, expression):
if not expression:
self.signatures = []
self._respond(False)
return
msg_id = self._client.execute("pass",
store_history=False, allow_stdin=False, silent=True,
user_expressions={'sigs': '__ptvs_repl_sig(' + expression + ')'},
)
self._suppress_io.add(msg_id)
self._on_reply.setdefault((msg_id, 'execute_reply'), []).append(self.signatures_reply)
def _respond(self, success):
if self.responded:
return
if not self.callback:
raise RuntimeError("No callback provider to message handler")
if success:
self.callback(self.signatures)
else:
self.error()
self.responded = True
def signatures_reply(self, message):
if message.content.status != 'ok' or message.content.user_expressions.sigs.status != 'ok':
self._respond(False)
return
self.signatures = eval(message.content.user_expressions.sigs.data['text/plain', '[]'])
self._respond(True)
def set_callback(self, success_callback, error_callback):
self.callback = success_callback
self.error = error_callback
EXEC_HELPERS_COMMAND = """#nohistory
def __ptvs_repl_exec_helpers():
with open(%r, 'rb') as f:
content = f.read().replace('\\r\\n'.encode('ascii'), '\\n'.encode('ascii'))
exec(content, globals())
__ptvs_repl_exec_helpers()
""" % os.path.join(os.path.dirname(os.path.abspath(__file__)), 'jupyter_client-helpers.py')
class JupyterClientBackend(ReplBackend):
def __init__(self, mod_name='__main__', launch_file=None):
super(JupyterClientBackend, self).__init__()
self.__client = None
# This lock will be released when we should shut down
self.__exit = threading.Lock()
self.__exit.acquire()
self.__lock = threading.RLock()
self.__status = 'idle'
self.__msg_buffer = []
self.__cmd_buffer = [EXEC_HELPERS_COMMAND]
self.__on_reply = {}
self.__suppress_io = set()
def execution_loop(self):
"""starts processing execution requests"""
try:
return self._execution_loop()
except:
# TODO: Better fatal error handling
traceback.print_exc()
try:
raw_input()
except NameError:
input()
raise
def _execution_loop(self):
km, kc = jupyter_client.manager.start_new_kernel()
try:
self.exit_requested = False
self.__client = kc
self.send_cwd()
self.__shell_thread = _thread.start_new_thread(self.__shell_threadproc, (kc,))
self.__iopub_thread = _thread.start_new_thread(self.__iopub_threadproc, (kc,))
self.__exit.acquire()
self.send_exit()
finally:
kc.stop_channels()
km.shutdown_kernel(now=True)
def __command_executed(self, msg):
if msg.msg_type == 'execute_reply':
self.__handle_payloads(msg.content['payload'])
self.send_command_executed()
def run_command(self, command):
"""runs the specified command which is a string containing code"""
if self.__client:
with self.__lock:
self.__exec(command, store_history=True, silent=False).append(self.__command_executed)
return True
self.__cmd_buffer.append(command)
return False
def __exec(self, command, store_history=False, allow_stdin=False, silent=True, get_vars=None):
with self.__lock:
msg_id = self.__client.execute(
command,
store_history=store_history,
allow_stdin=allow_stdin,
silent=silent,
user_expressions=get_vars,
)
return self.__on_reply.setdefault((msg_id, 'execute_reply'), [])
def execute_file_ex(self, filetype, filename, args):
"""executes the given filename as a 'script', 'module' or 'process'."""
if filetype == 'process':
command = "!%s %s" % (filename, args)
else:
command = "__ptvs_repl_exec_%s(%r, %r, globals(), locals())" % (filetype, filename, args)
if self.__client:
self.__exec(command, silent=False).append(self.__command_executed)
return True
self.__cmd_buffer.append(command)
return False
def interrupt_main(self):
"""aborts the current running command"""
#raise NotImplementedError
pass
def exit_process(self):
"""exits the REPL process"""
self.exit_requested = True
self.__exit.release()
def get_members(self, expression):
handler = IntrospectHandler(self.__client, self.__on_reply, self.__suppress_io)
with self.__lock:
handler.send(expression)
return handler.set_callback
def get_signatures(self, expression):
"""returns doc, args, vargs, varkw, defaults."""
handler = SignaturesHandler(self.__client, self.__on_reply, self.__suppress_io)
with self.__lock:
handler.send(expression)
return handler.set_callback
def set_current_module(self, module):
"""sets the module which code executes against"""
pass # not supported
def set_current_thread_and_frame(self, thread_id, frame_id, frame_kind):
"""sets the current thread and frame which code will execute against"""
pass # not supported
def get_module_names(self):
"""returns a list of module names"""
return [] # not supported
def flush(self):
"""flushes the stdout/stderr buffers"""
pass
def __shell_threadproc(self, client):
try:
last_exec_count = None
on_replies = self.__on_reply
while not self.exit_requested:
while self.__cmd_buffer and not self.exit_requested:
cmd = self.__cmd_buffer.pop(0)
if cmd.startswith('#nohistory'):
self.__exec(cmd)
else:
self.run_command(cmd)
if self.exit_requested:
break
try:
m = Message(client.get_shell_msg(timeout=0.1))
msg_id = m.msg_id
msg_type = m.msg_type
print('%s: %s' % (msg_type, msg_id))
exec_count = m.content['execution_count', None]
if exec_count != last_exec_count and exec_count is not None:
last_exec_count = exec_count
exec_count = int(exec_count) + 1
ps1 = 'In [%s]: ' % exec_count
ps2 = ' ' * (len(ps1) - 5) + '...: '
self.send_prompt('\n' + ps1, ps2, allow_multiple_statements=True)
parent_id = m.parent_header['msg_id', None]
if parent_id:
on_reply = on_replies.pop((parent_id, msg_type), ())
for callable in on_reply:
callable(m)
except Empty:
pass
except zmq.error.ZMQError:
self.exit_process()
except KeyboardInterrupt:
self.exit_process()
except:
# TODO: Better fatal error handling
traceback.print_exc()
try:
raw_input()
except NameError:
input()
self.exit_process()
def __iopub_threadproc(self, client):
try:
last_exec_count = None
while not self.exit_requested:
m = Message(client.get_iopub_msg(block=True))
if m.parent_header.msg_id in self.__suppress_io:
if m.msg_type != 'status':
self.__suppress_io.discard(m.parent_header.msg_id)
continue
if m.msg_type == 'execute_input':
pass
elif m.msg_type == 'execute_result':
self.__write_result(m.content)
elif m.msg_type == 'display_data':
self.__write_content(m.content)
elif m.msg_type == 'stream':
self.__write_stream(m.content)
elif m.msg_type == 'error':
self.__write_result(m.content, treat_as_error=True)
elif m.msg_type == 'status':
self.__status = m.content['execution_state', 'idle']
else:
print("Received: " + m.msg_type + ":" + str(m) + "\n")
self.write_stdout(str(m) + '\n')
except zmq.error.ZMQError:
self.exit_process()
except KeyboardInterrupt:
self.exit_process()
except:
# TODO: Better fatal error handling
traceback.print_exc()
try:
raw_input()
except NameError:
input()
self.exit_process()
def __write_stream(self, content):
if content.name == 'stderr':
f = self.write_stderr
else:
f = self.write_stdout
text = content.text
if text:
f(text)
def __write_result(self, content, treat_as_error=False):
exec_count = content['execution_count']
if exec_count is not None:
prefix = 'Out [%s]: ' % exec_count
else:
prefix = 'Out: '
if treat_as_error or content['status'] == 'error':
tb = content['traceback']
if tb:
self.write_stderr(prefix + '\n')
for line in tb:
self.write_stderr(line + '\n')
return
if content['status', 'ok'] == 'ok':
output_str = content.data['text/plain']
if output_str is None:
output_str = str(content.data)
if '\n' in output_str:
output_str = '%s\n%s\n' % (prefix, output_str)
else:
output_str = prefix + output_str + '\n'
self.write_stdout(output_str)
return
self.write_stderr(str(content) + '\n')
self.send_error()
def __handle_payloads(self, payloads):
if not payloads:
return
for p in payloads:
print(p['source'], p)
def __write_content(self, content):
if content['status', 'ok'] != 'ok':
return
output_xaml = content.data['application/xaml+xml']
if output_xaml is not None:
try:
if isinstance(output_xaml, str) and sys.version_info[0] >= 3:
output_xaml = output_xaml.encode('ascii')
self.write_xaml(base64.decodestring(output_xaml))
self.write_stdout('\n')
return
except Exception:
if DEBUG:
raise
output_png = content.data['image/png', None]
if output_png is not None:
try:
if isinstance(output_png, str) and sys.version_info[0] >= 3:
output_png = output_png.encode('ascii')
self.write_png(base64.decodestring(output_png))
self.write_stdout('\n')
return
except Exception:
if DEBUG:
raise
| [
"# Python Tools for Visual Studio\n",
"# Copyright(c) Microsoft Corporation\n",
"# All rights reserved.\n",
"# \n",
"# Licensed under the Apache License, Version 2.0 (the License); you may not use\n",
"# this file except in compliance with the License. You may obtain a copy of the\n",
"# License at http://www.apache.org/licenses/LICENSE-2.0\n",
"# \n",
"# THIS CODE IS PROVIDED ON AN *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS\n",
"# OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY\n",
"# IMPLIED WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,\n",
"# MERCHANTABILITY OR NON-INFRINGEMENT.\n",
"# \n",
"# See the Apache Version 2.0 License for specific language governing\n",
"# permissions and limitations under the License.\n",
"\n",
"from __future__ import absolute_import, print_function\n",
"\n",
"\"\"\"Implements REPL support over IPython/ZMQ for VisualStudio\"\"\"\n",
"\n",
"__author__ = \"Microsoft Corporation <ptvshelp@microsoft.com>\"\n",
"__version__ = \"3.2.1.0\"\n",
"\n",
"import ast\n",
"import base64\n",
"import errno\n",
"import os\n",
"import re\n",
"import sys\n",
"import threading\n",
"import time\n",
"import traceback\n",
"from ptvsd.repl import BasicReplBackend, ReplBackend, UnsupportedReplException, _command_line_to_args_list, DEBUG\n",
"from ptvsd.util import to_bytes\n",
"\n",
"try:\n",
" import jupyter_client\n",
" import jupyter_client.manager\n",
" import zmq.error\n",
"except ImportError:\n",
" raise UnsupportedReplException(\"Jupyter mode requires the jupyter_client and ipykernel packages. \" + traceback.format_exc())\n",
"\n",
"try:\n",
" import _thread\n",
"except ImportError:\n",
" import thread as _thread # legacy name\n",
"\n",
"try:\n",
" from queue import Empty\n",
"except ImportError:\n",
" from Queue import Empty\n",
"\n",
"# Use safer eval\n",
"eval = ast.literal_eval\n",
"\n",
"class Message(object):\n",
" _sentinel = object()\n",
"\n",
" def __init__(self, msg):\n",
" self._m = msg\n",
" self._read = False\n",
"\n",
" def __getattr__(self, attr):\n",
" v = self[attr, self._sentinel]\n",
" if v is self._sentinel:\n",
" return Message({})\n",
" if isinstance(v, dict):\n",
" return Message(v)\n",
" return v\n",
"\n",
" def __getitem__(self, key):\n",
" if isinstance(key, tuple):\n",
" key, default_value = key\n",
" else:\n",
" default_value = None\n",
" if not self._m:\n",
" return self\n",
" try:\n",
" v = self._m[key]\n",
" except KeyError:\n",
" return default_value\n",
" if isinstance(v, dict):\n",
" return Message(v)\n",
" return v\n",
"\n",
" def __repr__(self):\n",
" return repr(self._m)\n",
"\n",
"class IntrospectHandler(object):\n",
" def __init__(self, client, on_reply, suppress_io):\n",
" self._client = client\n",
" self._on_reply = on_reply\n",
" self._suppress_io = suppress_io\n",
" self.callback = None\n",
" self.error = None\n",
" self.typename = None\n",
" self.members = None\n",
" self.responded = False\n",
"\n",
" def send(self, expression):\n",
" if not expression:\n",
" self.typename = ''\n",
" msg_id = self._client.complete(\"\")\n",
" self._suppress_io.add(msg_id)\n",
" self._on_reply.setdefault((msg_id, 'complete_reply'), []).append(self.complete_reply)\n",
" else:\n",
" msg_id = self._client.execute(\"_=\" + expression,\n",
" store_history=False, allow_stdin=False, silent=True,\n",
" user_expressions={'m': 'getattr(type(_), \"__module__\", \"\") + \".\" + type(_).__name__'},\n",
" )\n",
" self._suppress_io.add(msg_id)\n",
" self._on_reply.setdefault((msg_id, 'execute_reply'), []).append(self.typename_reply)\n",
" msg_id = self._client.complete(expression + '.')\n",
" self._suppress_io.add(msg_id)\n",
" self._on_reply.setdefault((msg_id, 'complete_reply'), []).append(self.complete_reply)\n",
"\n",
" def _respond(self, success):\n",
" if self.responded:\n",
" return\n",
" if not self.callback:\n",
" raise RuntimeError(\"No callback provider to message handler\")\n",
" if success:\n",
" self.callback(self.typename, self.members, {})\n",
" else:\n",
" self.error()\n",
" self.responded = True\n",
"\n",
" def complete_reply(self, message):\n",
" if message.content.status != 'ok':\n",
" self._respond(False)\n",
" return\n",
" self.members = dict((m.rpartition('.')[-1], '') for m in message.content['matches', ()])\n",
" omit = [m for m in self.members if m.startswith('__ptvs_repl_')]\n",
" for m in omit:\n",
" del self.members[m]\n",
" if self.typename is not None:\n",
" self._respond(True)\n",
"\n",
" def typename_reply(self, message):\n",
" if message.content.status != 'ok' or message.content.user_expressions.m.status != 'ok':\n",
" self._respond(False)\n",
" return\n",
" self.typename = eval(message.content.user_expressions.m.data['text/plain', '\"object\"'])\n",
" m, _, n = self.typename.partition('.')\n",
" if m == type(int).__module__:\n",
" self.typename = n\n",
" if self.members is not None:\n",
" self._respond(True)\n",
"\n",
" def set_callback(self, success_callback, error_callback):\n",
" self.callback = success_callback\n",
" self.error = error_callback\n",
"\n",
"class SignaturesHandler(object):\n",
" def __init__(self, client, on_reply, suppress_io):\n",
" self._client = client\n",
" self._on_reply = on_reply\n",
" self._suppress_io = suppress_io\n",
" self.callback = None\n",
" self.error = None\n",
" self.signatures = None\n",
" self.responded = False\n",
"\n",
" def send(self, expression):\n",
" if not expression:\n",
" self.signatures = []\n",
" self._respond(False)\n",
" return\n",
"\n",
" msg_id = self._client.execute(\"pass\",\n",
" store_history=False, allow_stdin=False, silent=True,\n",
" user_expressions={'sigs': '__ptvs_repl_sig(' + expression + ')'},\n",
" )\n",
" self._suppress_io.add(msg_id)\n",
" self._on_reply.setdefault((msg_id, 'execute_reply'), []).append(self.signatures_reply)\n",
"\n",
" def _respond(self, success):\n",
" if self.responded:\n",
" return\n",
" if not self.callback:\n",
" raise RuntimeError(\"No callback provider to message handler\")\n",
" if success:\n",
" self.callback(self.signatures)\n",
" else:\n",
" self.error()\n",
" self.responded = True\n",
"\n",
" def signatures_reply(self, message):\n",
" if message.content.status != 'ok' or message.content.user_expressions.sigs.status != 'ok':\n",
" self._respond(False)\n",
" return\n",
" self.signatures = eval(message.content.user_expressions.sigs.data['text/plain', '[]'])\n",
" self._respond(True)\n",
"\n",
" def set_callback(self, success_callback, error_callback):\n",
" self.callback = success_callback\n",
" self.error = error_callback\n",
"\n",
"EXEC_HELPERS_COMMAND = \"\"\"#nohistory\n",
"def __ptvs_repl_exec_helpers():\n",
" with open(%r, 'rb') as f:\n",
" content = f.read().replace('\\\\r\\\\n'.encode('ascii'), '\\\\n'.encode('ascii'))\n",
" exec(content, globals())\n",
"__ptvs_repl_exec_helpers()\n",
"\"\"\" % os.path.join(os.path.dirname(os.path.abspath(__file__)), 'jupyter_client-helpers.py')\n",
"\n",
"class JupyterClientBackend(ReplBackend):\n",
" def __init__(self, mod_name='__main__', launch_file=None):\n",
" super(JupyterClientBackend, self).__init__()\n",
" self.__client = None\n",
"\n",
" # This lock will be released when we should shut down\n",
" self.__exit = threading.Lock()\n",
" self.__exit.acquire()\n",
"\n",
" self.__lock = threading.RLock()\n",
" self.__status = 'idle'\n",
" self.__msg_buffer = []\n",
" self.__cmd_buffer = [EXEC_HELPERS_COMMAND]\n",
" self.__on_reply = {}\n",
" self.__suppress_io = set()\n",
"\n",
" def execution_loop(self):\n",
" \"\"\"starts processing execution requests\"\"\"\n",
" try:\n",
" return self._execution_loop()\n",
" except:\n",
" # TODO: Better fatal error handling\n",
" traceback.print_exc()\n",
" try:\n",
" raw_input()\n",
" except NameError:\n",
" input()\n",
" raise\n",
"\n",
" def _execution_loop(self):\n",
" km, kc = jupyter_client.manager.start_new_kernel()\n",
" try:\n",
" self.exit_requested = False\n",
" self.__client = kc\n",
" self.send_cwd()\n",
"\n",
" self.__shell_thread = _thread.start_new_thread(self.__shell_threadproc, (kc,))\n",
" self.__iopub_thread = _thread.start_new_thread(self.__iopub_threadproc, (kc,))\n",
"\n",
" self.__exit.acquire()\n",
"\n",
" self.send_exit()\n",
" finally:\n",
" kc.stop_channels()\n",
" km.shutdown_kernel(now=True)\n",
"\n",
" def __command_executed(self, msg):\n",
" if msg.msg_type == 'execute_reply':\n",
" self.__handle_payloads(msg.content['payload'])\n",
"\n",
" self.send_command_executed()\n",
"\n",
" def run_command(self, command):\n",
" \"\"\"runs the specified command which is a string containing code\"\"\"\n",
" if self.__client:\n",
" with self.__lock:\n",
" self.__exec(command, store_history=True, silent=False).append(self.__command_executed)\n",
" return True\n",
"\n",
" self.__cmd_buffer.append(command)\n",
" return False\n",
"\n",
" def __exec(self, command, store_history=False, allow_stdin=False, silent=True, get_vars=None):\n",
" with self.__lock:\n",
" msg_id = self.__client.execute(\n",
" command,\n",
" store_history=store_history,\n",
" allow_stdin=allow_stdin,\n",
" silent=silent,\n",
" user_expressions=get_vars,\n",
" )\n",
" return self.__on_reply.setdefault((msg_id, 'execute_reply'), [])\n",
"\n",
" def execute_file_ex(self, filetype, filename, args):\n",
" \"\"\"executes the given filename as a 'script', 'module' or 'process'.\"\"\"\n",
" if filetype == 'process':\n",
" command = \"!%s %s\" % (filename, args)\n",
" else:\n",
" command = \"__ptvs_repl_exec_%s(%r, %r, globals(), locals())\" % (filetype, filename, args)\n",
" if self.__client:\n",
" self.__exec(command, silent=False).append(self.__command_executed)\n",
" return True\n",
"\n",
" self.__cmd_buffer.append(command)\n",
" return False\n",
"\n",
" def interrupt_main(self):\n",
" \"\"\"aborts the current running command\"\"\"\n",
" #raise NotImplementedError\n",
" pass\n",
"\n",
" def exit_process(self):\n",
" \"\"\"exits the REPL process\"\"\"\n",
" self.exit_requested = True\n",
" self.__exit.release()\n",
"\n",
" def get_members(self, expression):\n",
" handler = IntrospectHandler(self.__client, self.__on_reply, self.__suppress_io)\n",
" with self.__lock:\n",
" handler.send(expression)\n",
" return handler.set_callback\n",
"\n",
" def get_signatures(self, expression):\n",
" \"\"\"returns doc, args, vargs, varkw, defaults.\"\"\"\n",
" handler = SignaturesHandler(self.__client, self.__on_reply, self.__suppress_io)\n",
" with self.__lock:\n",
" handler.send(expression)\n",
" return handler.set_callback\n",
"\n",
" def set_current_module(self, module):\n",
" \"\"\"sets the module which code executes against\"\"\"\n",
" pass # not supported\n",
"\n",
" def set_current_thread_and_frame(self, thread_id, frame_id, frame_kind):\n",
" \"\"\"sets the current thread and frame which code will execute against\"\"\"\n",
" pass # not supported\n",
"\n",
" def get_module_names(self):\n",
" \"\"\"returns a list of module names\"\"\"\n",
" return [] # not supported\n",
"\n",
" def flush(self):\n",
" \"\"\"flushes the stdout/stderr buffers\"\"\"\n",
" pass\n",
"\n",
" def __shell_threadproc(self, client):\n",
" try:\n",
" last_exec_count = None\n",
" on_replies = self.__on_reply\n",
" while not self.exit_requested:\n",
" while self.__cmd_buffer and not self.exit_requested:\n",
" cmd = self.__cmd_buffer.pop(0)\n",
" if cmd.startswith('#nohistory'):\n",
" self.__exec(cmd)\n",
" else:\n",
" self.run_command(cmd)\n",
" if self.exit_requested:\n",
" break\n",
"\n",
" try:\n",
" m = Message(client.get_shell_msg(timeout=0.1))\n",
" msg_id = m.msg_id\n",
" msg_type = m.msg_type\n",
"\n",
" print('%s: %s' % (msg_type, msg_id))\n",
"\n",
" exec_count = m.content['execution_count', None]\n",
" if exec_count != last_exec_count and exec_count is not None:\n",
" last_exec_count = exec_count\n",
" exec_count = int(exec_count) + 1\n",
" ps1 = 'In [%s]: ' % exec_count\n",
" ps2 = ' ' * (len(ps1) - 5) + '...: '\n",
" self.send_prompt('\\n' + ps1, ps2, allow_multiple_statements=True)\n",
"\n",
" parent_id = m.parent_header['msg_id', None]\n",
" if parent_id:\n",
" on_reply = on_replies.pop((parent_id, msg_type), ())\n",
" for callable in on_reply:\n",
" callable(m)\n",
" except Empty:\n",
" pass\n",
"\n",
" except zmq.error.ZMQError:\n",
" self.exit_process()\n",
" except KeyboardInterrupt:\n",
" self.exit_process()\n",
" except:\n",
" # TODO: Better fatal error handling\n",
" traceback.print_exc()\n",
" try:\n",
" raw_input()\n",
" except NameError:\n",
" input()\n",
" self.exit_process()\n",
"\n",
" def __iopub_threadproc(self, client):\n",
" try:\n",
" last_exec_count = None\n",
" while not self.exit_requested:\n",
" m = Message(client.get_iopub_msg(block=True))\n",
"\n",
" if m.parent_header.msg_id in self.__suppress_io:\n",
" if m.msg_type != 'status':\n",
" self.__suppress_io.discard(m.parent_header.msg_id)\n",
" continue\n",
"\n",
" if m.msg_type == 'execute_input':\n",
" pass\n",
" elif m.msg_type == 'execute_result':\n",
" self.__write_result(m.content)\n",
" elif m.msg_type == 'display_data':\n",
" self.__write_content(m.content)\n",
" elif m.msg_type == 'stream':\n",
" self.__write_stream(m.content)\n",
" elif m.msg_type == 'error':\n",
" self.__write_result(m.content, treat_as_error=True)\n",
" elif m.msg_type == 'status':\n",
" self.__status = m.content['execution_state', 'idle']\n",
" else:\n",
" print(\"Received: \" + m.msg_type + \":\" + str(m) + \"\\n\")\n",
" self.write_stdout(str(m) + '\\n')\n",
"\n",
" except zmq.error.ZMQError:\n",
" self.exit_process()\n",
" except KeyboardInterrupt:\n",
" self.exit_process()\n",
" except:\n",
" # TODO: Better fatal error handling\n",
" traceback.print_exc()\n",
" try:\n",
" raw_input()\n",
" except NameError:\n",
" input()\n",
" self.exit_process()\n",
"\n",
" def __write_stream(self, content):\n",
" if content.name == 'stderr':\n",
" f = self.write_stderr\n",
" else:\n",
" f = self.write_stdout\n",
" text = content.text\n",
" if text:\n",
" f(text)\n",
"\n",
" def __write_result(self, content, treat_as_error=False):\n",
" exec_count = content['execution_count']\n",
" if exec_count is not None:\n",
" prefix = 'Out [%s]: ' % exec_count\n",
" else:\n",
" prefix = 'Out: '\n",
"\n",
" if treat_as_error or content['status'] == 'error':\n",
" tb = content['traceback']\n",
" if tb:\n",
" self.write_stderr(prefix + '\\n')\n",
" for line in tb:\n",
" self.write_stderr(line + '\\n')\n",
" return\n",
"\n",
" if content['status', 'ok'] == 'ok':\n",
" output_str = content.data['text/plain']\n",
" if output_str is None:\n",
" output_str = str(content.data)\n",
" if '\\n' in output_str:\n",
" output_str = '%s\\n%s\\n' % (prefix, output_str)\n",
" else:\n",
" output_str = prefix + output_str + '\\n'\n",
" self.write_stdout(output_str)\n",
" return\n",
"\n",
" self.write_stderr(str(content) + '\\n')\n",
" self.send_error()\n",
"\n",
" def __handle_payloads(self, payloads):\n",
" if not payloads:\n",
" return\n",
" for p in payloads:\n",
" print(p['source'], p)\n",
"\n",
" def __write_content(self, content):\n",
" if content['status', 'ok'] != 'ok':\n",
" return\n",
"\n",
" output_xaml = content.data['application/xaml+xml']\n",
" if output_xaml is not None:\n",
" try:\n",
" if isinstance(output_xaml, str) and sys.version_info[0] >= 3:\n",
" output_xaml = output_xaml.encode('ascii')\n",
" self.write_xaml(base64.decodestring(output_xaml))\n",
" self.write_stdout('\\n')\n",
" return\n",
" except Exception:\n",
" if DEBUG:\n",
" raise\n",
"\n",
" output_png = content.data['image/png', None]\n",
" if output_png is not None:\n",
" try:\n",
" if isinstance(output_png, str) and sys.version_info[0] >= 3:\n",
" output_png = output_png.encode('ascii')\n",
" self.write_png(base64.decodestring(output_png))\n",
" self.write_stdout('\\n')\n",
" return\n",
" except Exception:\n",
" if DEBUG:\n",
" raise\n",
"\n"
] | [
0,
0,
0,
0.3333333333333333,
0,
0,
0,
0.3333333333333333,
0,
0,
0,
0,
0.3333333333333333,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.008771929824561403,
0,
0,
0,
0,
0,
0,
0,
0.007751937984496124,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.043478260869565216,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.030303030303030304,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.01020408163265306,
0,
0,
0.014492753623188406,
0.019417475728155338,
0,
0,
0.010309278350515464,
0,
0,
0.01020408163265306,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010309278350515464,
0,
0,
0,
0,
0,
0,
0,
0.010416666666666666,
0,
0,
0.010416666666666666,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.030303030303030304,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.015384615384615385,
0.01282051282051282,
0,
0,
0.010526315789473684,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010101010101010102,
0,
0,
0.010526315789473684,
0,
0,
0,
0,
0,
0,
0.02702702702702703,
0,
0,
0.011904761904761904,
0,
0,
0.010869565217391304,
0,
0.024390243902439025,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.0625,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.01098901098901099,
0.01098901098901099,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.009708737864077669,
0,
0,
0,
0,
0,
0.010101010101010102,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.00980392156862745,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.02857142857142857,
0,
0,
0,
0,
0,
0,
0,
0,
0.011363636363636364,
0,
0,
0,
0,
0,
0,
0.011363636363636364,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012345679012345678,
0,
0,
0,
0,
0.011111111111111112,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.0625,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.0625,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1
] | 493 | 0.005403 | false |
# -*- coding: utf-8 -*-
r"""
Comparison of 1D-bent crystal analyzers
---------------------------------------
Files in ``\examples\withRaycing\06_AnalyzerBent1D``
Rowland circle based analyzers
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This study compares simply bent and ground-bent spectrometers utilizing Bragg
and Laue crystals. The bending is cylindrical (one-dimensional).
.. imagezoom:: _images/BraggLaue.*
:Conditions: Rowland circle diameter = 1 m, 70v × 200h µm² unpolarized
fluorescence source, crystal size = 100meridional × 20saggittal mm².
The energy resolution was calculated as described in `the CDR of a diced
Johansson-like spectrometer at Alba/CLÆSS beamline
<http://www.cells.es/Beamlines/CLAESS/EXD-BL22-FA-0001v4.0.pdf>`_. This
requires two images: 1) of a flat energy distribution source and 2) of a
monochromatic source. The image is energy dispersive in the diffraction plane,
which can be used in practice with a position sensitive detector or with a slit
scan in front of a bulk detector. From these two images the energy resolution
*δE* was calculated and then 3) a verifying image was ray-traced for a source
of 7 energy lines evenly spaced with the found step *δE*. Such images are shown
for the four crystal geometries at a particular Bragg angle:
+----------+---------------------+---------------------+---------------------+
| geometry | flat source | line source | 7 lines |
+==========+=====================+=====================+=====================+
| Bragg | | | |
| simply | |bb_flat| | |bb_line| | |bb_7lin| |
| bent | | | |
+----------+---------------------+---------------------+---------------------+
| Bragg | | | |
| ground | |bg_flat| | |bg_line| | |bg_7lin| |
| bent | | | |
+----------+---------------------+---------------------+---------------------+
| Laue | | | |
| simply | |lb_flat| | |lb_line| | |lb_7lin| |
| bent | | | |
+----------+---------------------+---------------------+---------------------+
| Laue | | | |
| ground | |lg_flat| | |lg_line| | |lg_7lin| |
| bent | | | |
+----------+---------------------+---------------------+---------------------+
.. |bb_flat| imagezoom:: _images/1D-01b-Si444-60-det_E-flat.*
.. |bb_line| imagezoom:: _images/1D-01b-Si444-60-det_E-line.*
.. |bb_7lin| imagezoom:: _images/1D-01b-Si444-60-det_E-7lin.*
:loc: upper-right-corner
.. |bg_flat| imagezoom:: _images/1D-02gb-Si444-60-det_E-flat.*
.. |bg_line| imagezoom:: _images/1D-02gb-Si444-60-det_E-line.*
.. |bg_7lin| imagezoom:: _images/1D-02gb-Si444-60-det_E-7lin.*
:loc: upper-right-corner
.. |lb_flat| imagezoom:: _images/1D-03lb-Si444-60-det_E-flat.*
.. |lb_line| imagezoom:: _images/1D-03lb-Si444-60-det_E-line.*
.. |lb_7lin| imagezoom:: _images/1D-03lb-Si444-60-det_E-7lin.*
:loc: upper-right-corner
.. |lg_flat| imagezoom:: _images/1D-04lgb-Si444-60-det_E-flat.*
.. |lg_line| imagezoom:: _images/1D-04lgb-Si444-60-det_E-line.*
.. |lg_7lin| imagezoom:: _images/1D-04lgb-Si444-60-det_E-7lin.*
:loc: upper-right-corner
The energy distribution over the crystal surface is hyperbolic for Bragg and
ellipsoidal for Laue crystals. Therefore, Laue crystals have limited acceptance
in the sagittal direction whereas Bragg crystals have the hyperbola branches
even for large sagittal sizes. Notice the full crystal coverage in the
meridional direction for the two ground-bent cases.
+----------+---------------------+---------------------+---------------------+
| geometry | flat source | line source | 7 lines |
+==========+=====================+=====================+=====================+
| Bragg | | | |
| simply | |xbb_flat| | |xbb_line| | |xbb_7lin| |
| bent | | | |
+----------+---------------------+---------------------+---------------------+
| Bragg | | | |
| ground | |xbg_flat| | |xbg_line| | |xbg_7lin| |
| bent | | | |
+----------+---------------------+---------------------+---------------------+
| Laue | | | |
| simply | |xlb_flat| | |xlb_line| | |xlb_7lin| |
| bent | | | |
+----------+---------------------+---------------------+---------------------+
| Laue | | | |
| ground | |xlg_flat| | |xlg_line| | |xlg_7lin| |
| bent | | | |
+----------+---------------------+---------------------+---------------------+
.. |xbb_flat| imagezoom:: _images/1D-01b-Si444-60-xtal_E-flat.*
.. |xbb_line| imagezoom:: _images/1D-01b-Si444-60-xtal_E-line.*
.. |xbb_7lin| imagezoom:: _images/1D-01b-Si444-60-xtal_E-7lin.*
:loc: upper-right-corner
.. |xbg_flat| imagezoom:: _images/1D-02gb-Si444-60-xtal_E-flat.*
.. |xbg_line| imagezoom:: _images/1D-02gb-Si444-60-xtal_E-line.*
.. |xbg_7lin| imagezoom:: _images/1D-02gb-Si444-60-xtal_E-7lin.*
:loc: upper-right-corner
.. |xlb_flat| imagezoom:: _images/1D-03lb-Si444-60-xtal_E-flat.*
.. |xlb_line| imagezoom:: _images/1D-03lb-Si444-60-xtal_E-line.*
.. |xlb_7lin| imagezoom:: _images/1D-03lb-Si444-60-xtal_E-7lin.*
:loc: upper-right-corner
.. |xlg_flat| imagezoom:: _images/1D-04lgb-Si444-60-xtal_E-flat.*
.. |xlg_line| imagezoom:: _images/1D-04lgb-Si444-60-xtal_E-line.*
.. |xlg_7lin| imagezoom:: _images/1D-04lgb-Si444-60-xtal_E-7lin.*
:loc: upper-right-corner
As a matter of principles checking, let us consider how the initially
unpolarized beam becomes partially polarized after being diffracted by the
crystal analyzer. As expected, the beam is fully polarized at 45° Bragg angle
(Brewster angle in x-ray regime). CAxis here is degree of polarization:
+-------------+-------------+
| Bragg | Laue |
+=============+=============+
| |DPBragg| | |DPLaue| |
+-------------+-------------+
.. |DPBragg| animation:: _images/1D-DegOfPol_Bragg
.. |DPLaue| animation:: _images/1D-DegOfPol_Laue
.. rubric:: Comments
1) The ground-bent crystals are more efficient as the whole their surface works
for a single energy, as opposed to simply bent crystals which have different
parts reflecting the rays of different energies.
2) When the crystal is close to the source (small θ for Bragg and large θ for
Laue), the images are distorted, even for the ground-bent crystals.
3) The Bragg case requires small pixel size in the meridional direction (~10 µm
for 1-m-diameter Rowland circle) for a good spatial resolution but can
profit from its compactness. The Laue case requires a big detector of a size
comparable to that of the crystal but the pixel size is not required to be
small.
4) The comparison of energy resolution in Bragg and Laue cases is not strictly
correct here. While the former case can use the small beam size at the
detector for utilizing energy dispersive property of the spectrometer, the
latter one has a big image at the detector which is restricted by the size
of the crystal. The size of the 'white' beam image is therefore correct only
for the crystal size selected here. The Laue case can still be used in
energy dispersive regime if 2D image analysis is utilized. At the present
conditions, the energy resolution of Bragg crystals is better than that of
Laue crystals except at small Bragg angles and low diffraction orders.
5) The energy resolution in ground-bent cases is not always better than that
in simply bent cases because of strongly curved images. If the sagittal size
of the crystal is smaller or :ref:`sagittal bending is used
<dicedBentAnalyzers>`, the advantage of ground-bent crystals is clearly
visible not only in terms of efficiency but also in terms of energy
resolution.
.. _VonHamos:
Von Hamos analyzer
~~~~~~~~~~~~~~~~~~
A von Hamos spectrometer has axial symmetry around the axis connecting the
source and the detector. The analyzing crystal is cylindrically bent with the
radius equal to the crystal-to-axis distance. In this scheme, the emission
escape direction depends on the Bragg angle (energy). In practice, the
spectrometer axis is adapted such that the escape direction is appropriate for
a given sample setup. In particular, the escape direction can be kept in back
scattering (relatively to the sample), see the figure below. In the latter case
the mechanical model is more complex and includes three translations and two
rotations. In the figure below, the crystal is sagittally curved around the
source–detector line. The detector plane is perpendicular to the sketch.
Left: the classical setup [vH]_ with 2 translations.
Right: the setup with an invariant escape direction.
.. imagezoom:: _images/vonHamosPositionsClassic.*
.. imagezoom:: _images/vonHamosPositionsFixedEscape.*
The geometrical parameters for the von Hamos spectrometer were taken from
[vH_SLS]_: a diced 100 (sagittal) × 50 (meridional) mm² Si(444) crystal is
curved with Rs = 250 mm. The width of segmented facets was taken equal to 5 mm
(as in [vH_SLS]_) and 1 mm together with a continuously bent case.
.. [vH] L. von Hámos, *Röntgenspektroskopie und Abbildung mittels gekrümmter
Kristallreflektoren II. Beschreibung eines fokussierenden Spektrographen mit
punktgetreuer Spaltabbildung*, Annalen der Physik **411** (1934) 252–260
.. [vH_SLS] J. Szlachetko, M. Nachtegaal, E. de Boni, M. Willimann,
O. Safonova, J. Sa, G. Smolentsev, M. Szlachetko, J. A. van Bokhoven,
J.-Cl. Dousse, J. Hoszowska, Y. Kayser, P. Jagodzinski, A. Bergamaschi,
B. Schmitt, C. David, and A. Lücke, *A von Hamos x-ray spectrometer based on
a segmented-type diffraction crystal for single-shot x-ray emission
spectroscopy and time-resolved resonant inelastic x-ray scattering studies*,
Rev. Sci. Instrum. **83** (2012) 103105.
The calculation of energy resolution requires two detector images: 1) of a flat
energy distribution source and 2) of a monochromatic source. From these two
images the energy resolution *δE* was calculated and then 3) a verifying image
was ray-traced for a source of 7 energy lines evenly spaced with the found step
*δE*. Such images are shown for different dicing sizes at a particular Bragg
angle.
+---------+--------------------+--------------------+--------------------+
| crystal | flat source | line source | 7 lines |
+=========+====================+====================+====================+
| diced | | | |
| 5 mm | |vH5_flat| | |vH5_line| | |vH5_7lin| |
+---------+--------------------+--------------------+--------------------+
| diced | | | |
| 1 mm | |vH1_flat| | |vH1_line| | |vH1_7lin| |
+---------+--------------------+--------------------+--------------------+
| not | | | |
| diced | |vHc_flat| | |vHc_line| | |vHc_7lin| |
+---------+--------------------+--------------------+--------------------+
.. |vH5_flat| imagezoom:: _images/SivonHamos-5mmDiced60-det_E-flat.*
.. |vH5_line| imagezoom:: _images/SivonHamos-5mmDiced60-det_E-line.*
.. |vH5_7lin| imagezoom:: _images/SivonHamos-5mmDiced60-det_E-7lin.*
:loc: upper-right-corner
.. |vH1_flat| imagezoom:: _images/SivonHamos-1mmDiced60-det_E-flat.*
.. |vH1_line| imagezoom:: _images/SivonHamos-1mmDiced60-det_E-line.*
.. |vH1_7lin| imagezoom:: _images/SivonHamos-1mmDiced60-det_E-7lin.*
:loc: upper-right-corner
.. |vHc_flat| imagezoom:: _images/SivonHamos-notDiced60-det_E-flat.*
.. |vHc_line| imagezoom:: _images/SivonHamos-notDiced60-det_E-line.*
.. |vHc_7lin| imagezoom:: _images/SivonHamos-notDiced60-det_E-7lin.*
:loc: upper-right-corner
With the coloring by stripe (crystal facet) number, the image below explains
why energy resolution is worse when stripes are wider and the crystal is
sagittally larger. The peripheral stripes contribute to aberrations which
increase the detector image.
+------------+--------------------------------------+
| crystal | line source colored by stripe number |
+============+======================================+
| diced 5 mm | |vH5_line_stripes| |
+------------+--------------------------------------+
| diced 1 mm | |vH1_line_stripes| |
+------------+--------------------------------------+
.. |vH5_line_stripes| imagezoom:: _images/SivonHamos-5mmDiced60-det_stripes-line.*
.. |vH1_line_stripes| imagezoom:: _images/SivonHamos-1mmDiced60-det_stripes-line.*
The efficiency of a von Hamos spectrometer is significantly lower as compared
to Johann and Johansson crystals. The reason for the lower efficiency can be
understood from the figure below, where the magnified footprint on the crystal
is shown: only a narrow part of the crystal surface contributes to a given
energy band. Here, in the 5-mm-stripe case a bandwidth of ~12 eV uses less than
1 mm of the crystal!
.. imagezoom:: _images/SivonHamos-5mmDiced60-xtal_E_zoom-7lin.*
Comparison of Rowland circle based and von Hamos analyzers
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
An additional case was also included here: when a Johann crystal is rotated by
90⁰ around the sample-to-crystal line, it becomes a von Hamos crystal that has
to be put at a correct distance corresponding to the 1 m sagittal radius. This
case is labelled as “Johann as von Hamos”.
In comparing with a von Hamos spectrometer, one should realize its strongest
advantage – inherent energy dispersive operation without a need for energy
scan. This advantage is especially important for broad emission lines. Below,
the comparison is made for two cases: (1) a narrow energy band (left figure),
which is more interesting for valence band RIXS and which assumes a high
\resolution monochromator in the primary beam and (2) a wide energy band (right
figure), which is more interesting for core state RIXS and normal fluorescence
detection. The desired position on the charts is in the upper left corner. As
seen in the figures, the efficiency of the von Hamos crystals (i) is
independent of the energy band (equal for the left and right charts), which
demonstrates truly energy-dispersive behavior of the crystals but (ii) is
significantly lower as compared to the Johann and Johansson crystals. A way to
increase efficiency is to place the crystal closer to the source, which
obviously worsens energy resolution because of the increased angular source
size. Inversely, if the crystal is put at a further distance, the energy
resolution is improved (square symbols) but the efficiency is low because of a
smaller solid angle collected. The left figure is with a narrow energy band
equal to the 6-fold energy resolution. The right figure is with a wide energy
band equal to 8·10 :sup:`-4`·E (approximate width of K β lines [Henke]_).
.. imagezoom:: _images/ResolutionEfficiency1D-narrowBand.*
.. imagezoom:: _images/ResolutionEfficiency1D-8e-4Band.*
Finally, among the compared 1D-bent spectrometers the Johansson type is the
best in the combination of good energy resolution and high efficiency. It is
the only one that can function both as a high resolution spectrometer and a
fluorescence detector. One should bear in mind, however, two very strong
advantages of von Hamos spectrometers: (1) they do not need alignment – a
crystal and a detector positioned approximately will most probably immediately
work and (2) the image is inherently energy dispersive with a flat (energy
independent) detector response. The low efficiency and mediocre energy
resolution are a price for the commissioning-free energy dispersive operation.
Rowland circle based spectrometers will always require good alignment, and
among them only the Johansson-type spectrometer can be made energy dispersive
with a flat detector response.
.. _elliptical_VonHamos:
Circular and elliptical von Hamos analyzers
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The axial symmetry of the classical von Hamos spectrometer [vH]_ results in a
close detector-to-sample position at large Bragg angles. A single detector may
find enough space there but when the spectrometer has several branches, the
corresponding detectors come close to each other, which restricts both the
space around the sample and the accessible Bragg angle range. A solution to
this problem could be an increased magnification of the crystal from the
classical 1:1. The axis of the circular cilynder is then split into *two* axes
representing the two foci of an ellipsoid, see the scheme below. The lower axis
holds the source (sample) and the upper one holds the detector. The crystal in
the figure has the magnification 1:1 for the circular crystal (left part) and
1.5:1 for the elliptical one (right part).
.. imagezoom:: _images/CircularAndElliptical_vonHamos_s.*
The crystal is diced along the cylinder axis with 1 mm pitch. The difference in
the circular and elliptical figures is shown below.
.. imagezoom:: _images/Cylinders.*
The elliptical figure results in some aberrations, as seen by the monochromatic
images below, which worsens energy resolution.
+------------+--------------------+--------------------+--------------------+
| crystal | flat source | line source | 7 lines |
+============+====================+====================+====================+
| bent as | | | |
| circular | |circ_flat| | |circ_line| | |circ_7lin| |
| cylinder | | | |
+------------+--------------------+--------------------+--------------------+
| bent as | | | |
| elliptical | |ell_flat| | |ell_line| | |ell_7lin| |
| cylinder | | | |
+------------+--------------------+--------------------+--------------------+
.. |circ_flat| imagezoom:: _images/SivonHamosDicedCircular60-det_E-flat.*
.. |circ_line| imagezoom:: _images/SivonHamosDicedCircular60-det_E-line.*
.. |circ_7lin| imagezoom:: _images/SivonHamosDicedCircular60-det_E-7lin.*
:loc: upper-right-corner
.. |ell_flat| imagezoom:: _images/SivonHamosDicedElliptical60-det_E-flat.*
.. |ell_line| imagezoom:: _images/SivonHamosDicedElliptical60-det_E-line.*
.. |ell_7lin| imagezoom:: _images/SivonHamosDicedElliptical60-det_E-7lin.*
:loc: upper-right-corner
"""
pass
| [
"# -*- coding: utf-8 -*-\n",
"r\"\"\"\n",
"Comparison of 1D-bent crystal analyzers\n",
"---------------------------------------\n",
"\n",
"Files in ``\\examples\\withRaycing\\06_AnalyzerBent1D``\n",
"\n",
"Rowland circle based analyzers\n",
"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n",
"\n",
"This study compares simply bent and ground-bent spectrometers utilizing Bragg\n",
"and Laue crystals. The bending is cylindrical (one-dimensional).\n",
"\n",
".. imagezoom:: _images/BraggLaue.*\n",
"\n",
":Conditions: Rowland circle diameter = 1 m, 70v × 200h µm² unpolarized\n",
" fluorescence source, crystal size = 100meridional × 20saggittal mm².\n",
"\n",
"The energy resolution was calculated as described in `the CDR of a diced\n",
"Johansson-like spectrometer at Alba/CLÆSS beamline\n",
"<http://www.cells.es/Beamlines/CLAESS/EXD-BL22-FA-0001v4.0.pdf>`_. This\n",
"requires two images: 1) of a flat energy distribution source and 2) of a\n",
"monochromatic source. The image is energy dispersive in the diffraction plane,\n",
"which can be used in practice with a position sensitive detector or with a slit\n",
"scan in front of a bulk detector. From these two images the energy resolution\n",
"*δE* was calculated and then 3) a verifying image was ray-traced for a source\n",
"of 7 energy lines evenly spaced with the found step *δE*. Such images are shown\n",
"for the four crystal geometries at a particular Bragg angle:\n",
"\n",
"+----------+---------------------+---------------------+---------------------+\n",
"| geometry | flat source | line source | 7 lines |\n",
"+==========+=====================+=====================+=====================+\n",
"| Bragg | | | |\n",
"| simply | |bb_flat| | |bb_line| | |bb_7lin| |\n",
"| bent | | | |\n",
"+----------+---------------------+---------------------+---------------------+\n",
"| Bragg | | | |\n",
"| ground | |bg_flat| | |bg_line| | |bg_7lin| |\n",
"| bent | | | |\n",
"+----------+---------------------+---------------------+---------------------+\n",
"| Laue | | | |\n",
"| simply | |lb_flat| | |lb_line| | |lb_7lin| |\n",
"| bent | | | |\n",
"+----------+---------------------+---------------------+---------------------+\n",
"| Laue | | | |\n",
"| ground | |lg_flat| | |lg_line| | |lg_7lin| |\n",
"| bent | | | |\n",
"+----------+---------------------+---------------------+---------------------+\n",
"\n",
".. |bb_flat| imagezoom:: _images/1D-01b-Si444-60-det_E-flat.*\n",
".. |bb_line| imagezoom:: _images/1D-01b-Si444-60-det_E-line.*\n",
".. |bb_7lin| imagezoom:: _images/1D-01b-Si444-60-det_E-7lin.*\n",
" :loc: upper-right-corner\n",
".. |bg_flat| imagezoom:: _images/1D-02gb-Si444-60-det_E-flat.*\n",
".. |bg_line| imagezoom:: _images/1D-02gb-Si444-60-det_E-line.*\n",
".. |bg_7lin| imagezoom:: _images/1D-02gb-Si444-60-det_E-7lin.*\n",
" :loc: upper-right-corner\n",
".. |lb_flat| imagezoom:: _images/1D-03lb-Si444-60-det_E-flat.*\n",
".. |lb_line| imagezoom:: _images/1D-03lb-Si444-60-det_E-line.*\n",
".. |lb_7lin| imagezoom:: _images/1D-03lb-Si444-60-det_E-7lin.*\n",
" :loc: upper-right-corner\n",
".. |lg_flat| imagezoom:: _images/1D-04lgb-Si444-60-det_E-flat.*\n",
".. |lg_line| imagezoom:: _images/1D-04lgb-Si444-60-det_E-line.*\n",
".. |lg_7lin| imagezoom:: _images/1D-04lgb-Si444-60-det_E-7lin.*\n",
" :loc: upper-right-corner\n",
"\n",
"The energy distribution over the crystal surface is hyperbolic for Bragg and\n",
"ellipsoidal for Laue crystals. Therefore, Laue crystals have limited acceptance\n",
"in the sagittal direction whereas Bragg crystals have the hyperbola branches\n",
"even for large sagittal sizes. Notice the full crystal coverage in the\n",
"meridional direction for the two ground-bent cases.\n",
"\n",
"+----------+---------------------+---------------------+---------------------+\n",
"| geometry | flat source | line source | 7 lines |\n",
"+==========+=====================+=====================+=====================+\n",
"| Bragg | | | |\n",
"| simply | |xbb_flat| | |xbb_line| | |xbb_7lin| |\n",
"| bent | | | |\n",
"+----------+---------------------+---------------------+---------------------+\n",
"| Bragg | | | |\n",
"| ground | |xbg_flat| | |xbg_line| | |xbg_7lin| |\n",
"| bent | | | |\n",
"+----------+---------------------+---------------------+---------------------+\n",
"| Laue | | | |\n",
"| simply | |xlb_flat| | |xlb_line| | |xlb_7lin| |\n",
"| bent | | | |\n",
"+----------+---------------------+---------------------+---------------------+\n",
"| Laue | | | |\n",
"| ground | |xlg_flat| | |xlg_line| | |xlg_7lin| |\n",
"| bent | | | |\n",
"+----------+---------------------+---------------------+---------------------+\n",
"\n",
".. |xbb_flat| imagezoom:: _images/1D-01b-Si444-60-xtal_E-flat.*\n",
".. |xbb_line| imagezoom:: _images/1D-01b-Si444-60-xtal_E-line.*\n",
".. |xbb_7lin| imagezoom:: _images/1D-01b-Si444-60-xtal_E-7lin.*\n",
" :loc: upper-right-corner\n",
".. |xbg_flat| imagezoom:: _images/1D-02gb-Si444-60-xtal_E-flat.*\n",
".. |xbg_line| imagezoom:: _images/1D-02gb-Si444-60-xtal_E-line.*\n",
".. |xbg_7lin| imagezoom:: _images/1D-02gb-Si444-60-xtal_E-7lin.*\n",
" :loc: upper-right-corner\n",
".. |xlb_flat| imagezoom:: _images/1D-03lb-Si444-60-xtal_E-flat.*\n",
".. |xlb_line| imagezoom:: _images/1D-03lb-Si444-60-xtal_E-line.*\n",
".. |xlb_7lin| imagezoom:: _images/1D-03lb-Si444-60-xtal_E-7lin.*\n",
" :loc: upper-right-corner\n",
".. |xlg_flat| imagezoom:: _images/1D-04lgb-Si444-60-xtal_E-flat.*\n",
".. |xlg_line| imagezoom:: _images/1D-04lgb-Si444-60-xtal_E-line.*\n",
".. |xlg_7lin| imagezoom:: _images/1D-04lgb-Si444-60-xtal_E-7lin.*\n",
" :loc: upper-right-corner\n",
"\n",
"As a matter of principles checking, let us consider how the initially\n",
"unpolarized beam becomes partially polarized after being diffracted by the\n",
"crystal analyzer. As expected, the beam is fully polarized at 45° Bragg angle\n",
"(Brewster angle in x-ray regime). CAxis here is degree of polarization:\n",
"\n",
"+-------------+-------------+\n",
"| Bragg | Laue |\n",
"+=============+=============+\n",
"| |DPBragg| | |DPLaue| |\n",
"+-------------+-------------+\n",
"\n",
".. |DPBragg| animation:: _images/1D-DegOfPol_Bragg\n",
".. |DPLaue| animation:: _images/1D-DegOfPol_Laue\n",
"\n",
".. rubric:: Comments\n",
"\n",
"1) The ground-bent crystals are more efficient as the whole their surface works\n",
" for a single energy, as opposed to simply bent crystals which have different\n",
" parts reflecting the rays of different energies.\n",
"2) When the crystal is close to the source (small θ for Bragg and large θ for\n",
" Laue), the images are distorted, even for the ground-bent crystals.\n",
"3) The Bragg case requires small pixel size in the meridional direction (~10 µm\n",
" for 1-m-diameter Rowland circle) for a good spatial resolution but can\n",
" profit from its compactness. The Laue case requires a big detector of a size\n",
" comparable to that of the crystal but the pixel size is not required to be\n",
" small.\n",
"4) The comparison of energy resolution in Bragg and Laue cases is not strictly\n",
" correct here. While the former case can use the small beam size at the\n",
" detector for utilizing energy dispersive property of the spectrometer, the\n",
" latter one has a big image at the detector which is restricted by the size\n",
" of the crystal. The size of the 'white' beam image is therefore correct only\n",
" for the crystal size selected here. The Laue case can still be used in\n",
" energy dispersive regime if 2D image analysis is utilized. At the present\n",
" conditions, the energy resolution of Bragg crystals is better than that of\n",
" Laue crystals except at small Bragg angles and low diffraction orders.\n",
"5) The energy resolution in ground-bent cases is not always better than that\n",
" in simply bent cases because of strongly curved images. If the sagittal size\n",
" of the crystal is smaller or :ref:`sagittal bending is used\n",
" <dicedBentAnalyzers>`, the advantage of ground-bent crystals is clearly\n",
" visible not only in terms of efficiency but also in terms of energy\n",
" resolution.\n",
"\n",
".. _VonHamos:\n",
"\n",
"Von Hamos analyzer\n",
"~~~~~~~~~~~~~~~~~~\n",
"\n",
"A von Hamos spectrometer has axial symmetry around the axis connecting the\n",
"source and the detector. The analyzing crystal is cylindrically bent with the\n",
"radius equal to the crystal-to-axis distance. In this scheme, the emission\n",
"escape direction depends on the Bragg angle (energy). In practice, the\n",
"spectrometer axis is adapted such that the escape direction is appropriate for\n",
"a given sample setup. In particular, the escape direction can be kept in back\n",
"scattering (relatively to the sample), see the figure below. In the latter case\n",
"the mechanical model is more complex and includes three translations and two\n",
"rotations. In the figure below, the crystal is sagittally curved around the\n",
"source–detector line. The detector plane is perpendicular to the sketch.\n",
"Left: the classical setup [vH]_ with 2 translations.\n",
"Right: the setup with an invariant escape direction.\n",
"\n",
".. imagezoom:: _images/vonHamosPositionsClassic.*\n",
".. imagezoom:: _images/vonHamosPositionsFixedEscape.*\n",
"\n",
"The geometrical parameters for the von Hamos spectrometer were taken from\n",
"[vH_SLS]_: a diced 100 (sagittal) × 50 (meridional) mm² Si(444) crystal is\n",
"curved with Rs = 250 mm. The width of segmented facets was taken equal to 5 mm\n",
"(as in [vH_SLS]_) and 1 mm together with a continuously bent case.\n",
"\n",
".. [vH] L. von Hámos, *Röntgenspektroskopie und Abbildung mittels gekrümmter\n",
" Kristallreflektoren II. Beschreibung eines fokussierenden Spektrographen mit\n",
" punktgetreuer Spaltabbildung*, Annalen der Physik **411** (1934) 252–260\n",
"\n",
".. [vH_SLS] J. Szlachetko, M. Nachtegaal, E. de Boni, M. Willimann,\n",
" O. Safonova, J. Sa, G. Smolentsev, M. Szlachetko, J. A. van Bokhoven,\n",
" J.-Cl. Dousse, J. Hoszowska, Y. Kayser, P. Jagodzinski, A. Bergamaschi,\n",
" B. Schmitt, C. David, and A. Lücke, *A von Hamos x-ray spectrometer based on\n",
" a segmented-type diffraction crystal for single-shot x-ray emission\n",
" spectroscopy and time-resolved resonant inelastic x-ray scattering studies*,\n",
" Rev. Sci. Instrum. **83** (2012) 103105.\n",
"\n",
"The calculation of energy resolution requires two detector images: 1) of a flat\n",
"energy distribution source and 2) of a monochromatic source. From these two\n",
"images the energy resolution *δE* was calculated and then 3) a verifying image\n",
"was ray-traced for a source of 7 energy lines evenly spaced with the found step\n",
"*δE*. Such images are shown for different dicing sizes at a particular Bragg\n",
"angle.\n",
"\n",
"+---------+--------------------+--------------------+--------------------+\n",
"| crystal | flat source | line source | 7 lines |\n",
"+=========+====================+====================+====================+\n",
"| diced | | | |\n",
"| 5 mm | |vH5_flat| | |vH5_line| | |vH5_7lin| |\n",
"+---------+--------------------+--------------------+--------------------+\n",
"| diced | | | |\n",
"| 1 mm | |vH1_flat| | |vH1_line| | |vH1_7lin| |\n",
"+---------+--------------------+--------------------+--------------------+\n",
"| not | | | |\n",
"| diced | |vHc_flat| | |vHc_line| | |vHc_7lin| |\n",
"+---------+--------------------+--------------------+--------------------+\n",
"\n",
".. |vH5_flat| imagezoom:: _images/SivonHamos-5mmDiced60-det_E-flat.*\n",
".. |vH5_line| imagezoom:: _images/SivonHamos-5mmDiced60-det_E-line.*\n",
".. |vH5_7lin| imagezoom:: _images/SivonHamos-5mmDiced60-det_E-7lin.*\n",
" :loc: upper-right-corner\n",
"\n",
".. |vH1_flat| imagezoom:: _images/SivonHamos-1mmDiced60-det_E-flat.*\n",
".. |vH1_line| imagezoom:: _images/SivonHamos-1mmDiced60-det_E-line.*\n",
".. |vH1_7lin| imagezoom:: _images/SivonHamos-1mmDiced60-det_E-7lin.*\n",
" :loc: upper-right-corner\n",
"\n",
".. |vHc_flat| imagezoom:: _images/SivonHamos-notDiced60-det_E-flat.*\n",
".. |vHc_line| imagezoom:: _images/SivonHamos-notDiced60-det_E-line.*\n",
".. |vHc_7lin| imagezoom:: _images/SivonHamos-notDiced60-det_E-7lin.*\n",
" :loc: upper-right-corner\n",
"\n",
"With the coloring by stripe (crystal facet) number, the image below explains\n",
"why energy resolution is worse when stripes are wider and the crystal is\n",
"sagittally larger. The peripheral stripes contribute to aberrations which\n",
"increase the detector image.\n",
"\n",
"+------------+--------------------------------------+\n",
"| crystal | line source colored by stripe number |\n",
"+============+======================================+\n",
"| diced 5 mm | |vH5_line_stripes| |\n",
"+------------+--------------------------------------+\n",
"| diced 1 mm | |vH1_line_stripes| |\n",
"+------------+--------------------------------------+\n",
"\n",
".. |vH5_line_stripes| imagezoom:: _images/SivonHamos-5mmDiced60-det_stripes-line.*\n",
".. |vH1_line_stripes| imagezoom:: _images/SivonHamos-1mmDiced60-det_stripes-line.*\n",
"\n",
"\n",
"The efficiency of a von Hamos spectrometer is significantly lower as compared\n",
"to Johann and Johansson crystals. The reason for the lower efficiency can be\n",
"understood from the figure below, where the magnified footprint on the crystal\n",
"is shown: only a narrow part of the crystal surface contributes to a given\n",
"energy band. Here, in the 5-mm-stripe case a bandwidth of ~12 eV uses less than\n",
"1 mm of the crystal!\n",
"\n",
".. imagezoom:: _images/SivonHamos-5mmDiced60-xtal_E_zoom-7lin.*\n",
"\n",
"Comparison of Rowland circle based and von Hamos analyzers\n",
"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n",
"\n",
"An additional case was also included here: when a Johann crystal is rotated by\n",
"90⁰ around the sample-to-crystal line, it becomes a von Hamos crystal that has\n",
"to be put at a correct distance corresponding to the 1 m sagittal radius. This\n",
"case is labelled as “Johann as von Hamos”.\n",
"\n",
"In comparing with a von Hamos spectrometer, one should realize its strongest\n",
"advantage – inherent energy dispersive operation without a need for energy\n",
"scan. This advantage is especially important for broad emission lines. Below,\n",
"the comparison is made for two cases: (1) a narrow energy band (left figure),\n",
"which is more interesting for valence band RIXS and which assumes a high\n",
"\\resolution monochromator in the primary beam and (2) a wide energy band (right\n",
"figure), which is more interesting for core state RIXS and normal fluorescence\n",
"detection. The desired position on the charts is in the upper left corner. As\n",
"seen in the figures, the efficiency of the von Hamos crystals (i) is\n",
"independent of the energy band (equal for the left and right charts), which\n",
"demonstrates truly energy-dispersive behavior of the crystals but (ii) is\n",
"significantly lower as compared to the Johann and Johansson crystals. A way to\n",
"increase efficiency is to place the crystal closer to the source, which\n",
"obviously worsens energy resolution because of the increased angular source\n",
"size. Inversely, if the crystal is put at a further distance, the energy\n",
"resolution is improved (square symbols) but the efficiency is low because of a\n",
"smaller solid angle collected. The left figure is with a narrow energy band\n",
"equal to the 6-fold energy resolution. The right figure is with a wide energy\n",
"band equal to 8·10 :sup:`-4`·E (approximate width of K β lines [Henke]_).\n",
"\n",
".. imagezoom:: _images/ResolutionEfficiency1D-narrowBand.*\n",
".. imagezoom:: _images/ResolutionEfficiency1D-8e-4Band.*\n",
"\n",
"Finally, among the compared 1D-bent spectrometers the Johansson type is the\n",
"best in the combination of good energy resolution and high efficiency. It is\n",
"the only one that can function both as a high resolution spectrometer and a\n",
"fluorescence detector. One should bear in mind, however, two very strong\n",
"advantages of von Hamos spectrometers: (1) they do not need alignment – a\n",
"crystal and a detector positioned approximately will most probably immediately\n",
"work and (2) the image is inherently energy dispersive with a flat (energy\n",
"independent) detector response. The low efficiency and mediocre energy\n",
"resolution are a price for the commissioning-free energy dispersive operation.\n",
"Rowland circle based spectrometers will always require good alignment, and\n",
"among them only the Johansson-type spectrometer can be made energy dispersive\n",
"with a flat detector response.\n",
"\n",
".. _elliptical_VonHamos:\n",
"\n",
"Circular and elliptical von Hamos analyzers\n",
"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n",
"\n",
"The axial symmetry of the classical von Hamos spectrometer [vH]_ results in a\n",
"close detector-to-sample position at large Bragg angles. A single detector may\n",
"find enough space there but when the spectrometer has several branches, the\n",
"corresponding detectors come close to each other, which restricts both the\n",
"space around the sample and the accessible Bragg angle range. A solution to\n",
"this problem could be an increased magnification of the crystal from the\n",
"classical 1:1. The axis of the circular cilynder is then split into *two* axes\n",
"representing the two foci of an ellipsoid, see the scheme below. The lower axis\n",
"holds the source (sample) and the upper one holds the detector. The crystal in\n",
"the figure has the magnification 1:1 for the circular crystal (left part) and\n",
"1.5:1 for the elliptical one (right part).\n",
"\n",
".. imagezoom:: _images/CircularAndElliptical_vonHamos_s.*\n",
"\n",
"The crystal is diced along the cylinder axis with 1 mm pitch. The difference in\n",
"the circular and elliptical figures is shown below.\n",
"\n",
".. imagezoom:: _images/Cylinders.*\n",
"\n",
"The elliptical figure results in some aberrations, as seen by the monochromatic\n",
"images below, which worsens energy resolution.\n",
"\n",
"+------------+--------------------+--------------------+--------------------+\n",
"| crystal | flat source | line source | 7 lines |\n",
"+============+====================+====================+====================+\n",
"| bent as | | | |\n",
"| circular | |circ_flat| | |circ_line| | |circ_7lin| |\n",
"| cylinder | | | |\n",
"+------------+--------------------+--------------------+--------------------+\n",
"| bent as | | | |\n",
"| elliptical | |ell_flat| | |ell_line| | |ell_7lin| |\n",
"| cylinder | | | |\n",
"+------------+--------------------+--------------------+--------------------+\n",
"\n",
".. |circ_flat| imagezoom:: _images/SivonHamosDicedCircular60-det_E-flat.*\n",
".. |circ_line| imagezoom:: _images/SivonHamosDicedCircular60-det_E-line.*\n",
".. |circ_7lin| imagezoom:: _images/SivonHamosDicedCircular60-det_E-7lin.*\n",
" :loc: upper-right-corner\n",
".. |ell_flat| imagezoom:: _images/SivonHamosDicedElliptical60-det_E-flat.*\n",
".. |ell_line| imagezoom:: _images/SivonHamosDicedElliptical60-det_E-line.*\n",
".. |ell_7lin| imagezoom:: _images/SivonHamosDicedElliptical60-det_E-7lin.*\n",
" :loc: upper-right-corner\n",
"\n",
"\"\"\"\n",
"pass\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012048192771084338,
0.012048192771084338,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 344 | 0.00007 | false |
from wikitextparser import ExternalLink
def test_externallinks():
assert ExternalLink('http://example.org').external_links == []
def test_repr():
assert repr(ExternalLink('HTTP://mediawiki.org')) == \
"ExternalLink('HTTP://mediawiki.org')"
def test_numberedmailto_change_none_to_empty():
s = (
'[mailto:'
'info@example.org?Subject=URL%20Encoded%20Subject&body='
'Body%20Textinfo]')
el = ExternalLink(s)
assert s[1:-1] == el.url
assert el.text is None
assert el.in_brackets
el.text = ''
assert el.string == s[:-1] + ' ]'
def test_bare_link():
el = ExternalLink('HTTP://mediawiki.org')
assert 'HTTP://mediawiki.org' == el.url
assert el.text is None
assert not el.in_brackets
def test_inbracket_with_text():
el = ExternalLink('[ftp://mediawiki.org mediawiki ftp]')
assert 'ftp://mediawiki.org' == el.url
assert 'mediawiki ftp' == el.text
assert el.in_brackets is True
def test_text_setter():
el = ExternalLink('[ftp://mediawiki.org mediawiki ftp]')
el.text = 'a'
assert '[ftp://mediawiki.org a]' == el.string
del el.text
el.text = 'b'
assert '[ftp://mediawiki.org b]' == el.string
el = ExternalLink('ftp://mediawiki.org')
el.text = 'c'
assert '[ftp://mediawiki.org c]' == el.string
def test_text_delter():
el = ExternalLink('[ftp://mediawiki.org mediawiki ftp]')
del el.text
assert '[ftp://mediawiki.org]' == el.string
del el.text
assert '[ftp://mediawiki.org]' == el.string
el = ExternalLink('ftp://mediawiki.org')
del el.text
assert 'ftp://mediawiki.org' == el.string
def test_url_setter():
el = ExternalLink('[ftp://mediawiki.org mw]')
el.url = 'https://www.mediawiki.org/'
assert '[https://www.mediawiki.org/ mw]' == el.string
el = ExternalLink('ftp://mediawiki.org')
el.url = 'https://www.mediawiki.org/'
assert 'https://www.mediawiki.org/' == el.string
el = ExternalLink('[ftp://mediawiki.org]')
el.url = 'https://www.mediawiki.org/'
assert '[https://www.mediawiki.org/]' == el.string
def test_ending_with_less_than_sign():
el = ExternalLink('[https://www.google.<com]')
assert el.url == 'https://www.google.'
assert el.text == '<com'
| [
"from wikitextparser import ExternalLink\n",
"\n",
"\n",
"def test_externallinks():\n",
" assert ExternalLink('http://example.org').external_links == []\n",
"\n",
"\n",
"def test_repr():\n",
" assert repr(ExternalLink('HTTP://mediawiki.org')) == \\\n",
" \"ExternalLink('HTTP://mediawiki.org')\"\n",
"\n",
"\n",
"def test_numberedmailto_change_none_to_empty():\n",
" s = (\n",
" '[mailto:'\n",
" 'info@example.org?Subject=URL%20Encoded%20Subject&body='\n",
" 'Body%20Textinfo]')\n",
" el = ExternalLink(s)\n",
" assert s[1:-1] == el.url\n",
" assert el.text is None\n",
" assert el.in_brackets\n",
" el.text = ''\n",
" assert el.string == s[:-1] + ' ]'\n",
"\n",
"\n",
"def test_bare_link():\n",
" el = ExternalLink('HTTP://mediawiki.org')\n",
" assert 'HTTP://mediawiki.org' == el.url\n",
" assert el.text is None\n",
" assert not el.in_brackets\n",
"\n",
"\n",
"def test_inbracket_with_text():\n",
" el = ExternalLink('[ftp://mediawiki.org mediawiki ftp]')\n",
" assert 'ftp://mediawiki.org' == el.url\n",
" assert 'mediawiki ftp' == el.text\n",
" assert el.in_brackets is True\n",
"\n",
"\n",
"def test_text_setter():\n",
" el = ExternalLink('[ftp://mediawiki.org mediawiki ftp]')\n",
" el.text = 'a'\n",
" assert '[ftp://mediawiki.org a]' == el.string\n",
"\n",
" del el.text\n",
" el.text = 'b'\n",
" assert '[ftp://mediawiki.org b]' == el.string\n",
"\n",
" el = ExternalLink('ftp://mediawiki.org')\n",
" el.text = 'c'\n",
" assert '[ftp://mediawiki.org c]' == el.string\n",
"\n",
"\n",
"def test_text_delter():\n",
" el = ExternalLink('[ftp://mediawiki.org mediawiki ftp]')\n",
" del el.text\n",
" assert '[ftp://mediawiki.org]' == el.string\n",
"\n",
" del el.text\n",
" assert '[ftp://mediawiki.org]' == el.string\n",
"\n",
" el = ExternalLink('ftp://mediawiki.org')\n",
" del el.text\n",
" assert 'ftp://mediawiki.org' == el.string\n",
"\n",
"\n",
"def test_url_setter():\n",
" el = ExternalLink('[ftp://mediawiki.org mw]')\n",
" el.url = 'https://www.mediawiki.org/'\n",
" assert '[https://www.mediawiki.org/ mw]' == el.string\n",
"\n",
" el = ExternalLink('ftp://mediawiki.org')\n",
" el.url = 'https://www.mediawiki.org/'\n",
" assert 'https://www.mediawiki.org/' == el.string\n",
"\n",
" el = ExternalLink('[ftp://mediawiki.org]')\n",
" el.url = 'https://www.mediawiki.org/'\n",
" assert '[https://www.mediawiki.org/]' == el.string\n",
"\n",
"\n",
"def test_ending_with_less_than_sign():\n",
" el = ExternalLink('[https://www.google.<com]')\n",
" assert el.url == 'https://www.google.'\n",
" assert el.text == '<com'\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 84 | 0 | false |
"""Class to perform random over-sampling."""
from __future__ import print_function
from __future__ import division
import numpy as np
from collections import Counter
from sklearn.utils import check_random_state
from ..base import SamplerMixin
class RandomOverSampler(SamplerMixin):
"""Class to perform random over-sampling.
Object to over-sample the minority class(es) by picking samples at random
with replacement.
Parameters
----------
ratio : str or float, optional (default='auto')
If 'auto', the ratio will be defined automatically to balance
the dataset. Otherwise, the ratio is defined as the number
of samples in the minority class over the the number of samples
in the majority class.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by np.random.
Attributes
----------
min_c_ : str or int
The identifier of the minority class.
max_c_ : str or int
The identifier of the majority class.
stats_c_ : dict of str/int : int
A dictionary in which the number of occurences of each class is
reported.
X_shape_ : tuple of int
Shape of the data `X` during fitting.
Notes
-----
Supports multiple classes.
Examples
--------
>>> from collections import Counter
>>> from sklearn.datasets import make_classification
>>> from imblearn.over_sampling import RandomOverSampler
>>> X, y = make_classification(n_classes=2, class_sep=2, weights=[0.1, 0.9],
... n_informative=3, n_redundant=1, flip_y=0,
... n_features=20, n_clusters_per_class=1,
... n_samples=1000, random_state=10)
>>> print('Original dataset shape {}'.format(Counter(y)))
Original dataset shape Counter({1: 900, 0: 100})
>>> ros = RandomOverSampler(random_state=42)
>>> X_res, y_res = ros.fit_sample(X, y)
>>> print('Resampled dataset shape {}'.format(Counter(y_res)))
Resampled dataset shape Counter({0: 900, 1: 900})
"""
def __init__(self,
ratio='auto',
random_state=None):
super(RandomOverSampler, self).__init__(ratio=ratio)
self.random_state = random_state
def _sample(self, X, y):
"""Resample the dataset.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Matrix containing the data which have to be sampled.
y : ndarray, shape (n_samples, )
Corresponding label for each sample in X.
Returns
-------
X_resampled : ndarray, shape (n_samples_new, n_features)
The array containing the resampled data.
y_resampled : ndarray, shape (n_samples_new)
The corresponding label of `X_resampled`
"""
# Keep the samples from the majority class
X_resampled = X[y == self.maj_c_]
y_resampled = y[y == self.maj_c_]
# Loop over the other classes over picking at random
for key in self.stats_c_.keys():
# If this is the majority class, skip it
if key == self.maj_c_:
continue
# Define the number of sample to create
if self.ratio == 'auto':
num_samples = int(self.stats_c_[self.maj_c_] -
self.stats_c_[key])
else:
num_samples = int((self.ratio * self.stats_c_[self.maj_c_]) -
self.stats_c_[key])
# Pick some elements at random
random_state = check_random_state(self.random_state)
indx = random_state.randint(low=0, high=self.stats_c_[key],
size=num_samples)
# Concatenate to the majority class
X_resampled = np.concatenate((X_resampled,
X[y == key],
X[y == key][indx]),
axis=0)
y_resampled = np.concatenate((y_resampled,
y[y == key],
y[y == key][indx]), axis=0)
self.logger.info('Over-sampling performed: %s', Counter(
y_resampled))
return X_resampled, y_resampled
| [
"\"\"\"Class to perform random over-sampling.\"\"\"\n",
"from __future__ import print_function\n",
"from __future__ import division\n",
"\n",
"import numpy as np\n",
"\n",
"from collections import Counter\n",
"\n",
"from sklearn.utils import check_random_state\n",
"\n",
"from ..base import SamplerMixin\n",
"\n",
"\n",
"class RandomOverSampler(SamplerMixin):\n",
"\n",
" \"\"\"Class to perform random over-sampling.\n",
"\n",
" Object to over-sample the minority class(es) by picking samples at random\n",
" with replacement.\n",
"\n",
" Parameters\n",
" ----------\n",
" ratio : str or float, optional (default='auto')\n",
" If 'auto', the ratio will be defined automatically to balance\n",
" the dataset. Otherwise, the ratio is defined as the number\n",
" of samples in the minority class over the the number of samples\n",
" in the majority class.\n",
"\n",
" random_state : int, RandomState instance or None, optional (default=None)\n",
" If int, random_state is the seed used by the random number generator;\n",
" If RandomState instance, random_state is the random number generator;\n",
" If None, the random number generator is the RandomState instance used\n",
" by np.random.\n",
"\n",
" Attributes\n",
" ----------\n",
" min_c_ : str or int\n",
" The identifier of the minority class.\n",
"\n",
" max_c_ : str or int\n",
" The identifier of the majority class.\n",
"\n",
" stats_c_ : dict of str/int : int\n",
" A dictionary in which the number of occurences of each class is\n",
" reported.\n",
"\n",
" X_shape_ : tuple of int\n",
" Shape of the data `X` during fitting.\n",
"\n",
" Notes\n",
" -----\n",
" Supports multiple classes.\n",
"\n",
" Examples\n",
" --------\n",
"\n",
" >>> from collections import Counter\n",
" >>> from sklearn.datasets import make_classification\n",
" >>> from imblearn.over_sampling import RandomOverSampler\n",
" >>> X, y = make_classification(n_classes=2, class_sep=2, weights=[0.1, 0.9],\n",
" ... n_informative=3, n_redundant=1, flip_y=0,\n",
" ... n_features=20, n_clusters_per_class=1,\n",
" ... n_samples=1000, random_state=10)\n",
" >>> print('Original dataset shape {}'.format(Counter(y)))\n",
" Original dataset shape Counter({1: 900, 0: 100})\n",
" >>> ros = RandomOverSampler(random_state=42)\n",
" >>> X_res, y_res = ros.fit_sample(X, y)\n",
" >>> print('Resampled dataset shape {}'.format(Counter(y_res)))\n",
" Resampled dataset shape Counter({0: 900, 1: 900})\n",
"\n",
" \"\"\"\n",
"\n",
" def __init__(self,\n",
" ratio='auto',\n",
" random_state=None):\n",
"\n",
" super(RandomOverSampler, self).__init__(ratio=ratio)\n",
" self.random_state = random_state\n",
"\n",
" def _sample(self, X, y):\n",
" \"\"\"Resample the dataset.\n",
"\n",
" Parameters\n",
" ----------\n",
" X : ndarray, shape (n_samples, n_features)\n",
" Matrix containing the data which have to be sampled.\n",
"\n",
" y : ndarray, shape (n_samples, )\n",
" Corresponding label for each sample in X.\n",
"\n",
" Returns\n",
" -------\n",
" X_resampled : ndarray, shape (n_samples_new, n_features)\n",
" The array containing the resampled data.\n",
"\n",
" y_resampled : ndarray, shape (n_samples_new)\n",
" The corresponding label of `X_resampled`\n",
"\n",
" \"\"\"\n",
"\n",
" # Keep the samples from the majority class\n",
" X_resampled = X[y == self.maj_c_]\n",
" y_resampled = y[y == self.maj_c_]\n",
"\n",
" # Loop over the other classes over picking at random\n",
" for key in self.stats_c_.keys():\n",
"\n",
" # If this is the majority class, skip it\n",
" if key == self.maj_c_:\n",
" continue\n",
"\n",
" # Define the number of sample to create\n",
" if self.ratio == 'auto':\n",
" num_samples = int(self.stats_c_[self.maj_c_] -\n",
" self.stats_c_[key])\n",
" else:\n",
" num_samples = int((self.ratio * self.stats_c_[self.maj_c_]) -\n",
" self.stats_c_[key])\n",
"\n",
" # Pick some elements at random\n",
" random_state = check_random_state(self.random_state)\n",
" indx = random_state.randint(low=0, high=self.stats_c_[key],\n",
" size=num_samples)\n",
"\n",
" # Concatenate to the majority class\n",
" X_resampled = np.concatenate((X_resampled,\n",
" X[y == key],\n",
" X[y == key][indx]),\n",
" axis=0)\n",
"\n",
" y_resampled = np.concatenate((y_resampled,\n",
" y[y == key],\n",
" y[y == key][indx]), axis=0)\n",
"\n",
" self.logger.info('Over-sampling performed: %s', Counter(\n",
" y_resampled))\n",
"\n",
" return X_resampled, y_resampled\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012345679012345678,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 138 | 0.000089 | false |
from sigma.core.permission import check_man_roles
from sigma.core.rolecheck import matching_role, user_matching_role
import discord
async def takerole(cmd, message, args):
if not check_man_roles(message.author, message.channel):
out_content = discord.Embed(type='rich', color=0xDB0000,
title='⛔ Insufficient Permissions. Server Admin Only.')
await message.channel.send(None, embed=out_content)
return
if len(args) < 2:
out_content = discord.Embed(type='rich', color=0xDB0000, title='❗ Error')
out_content.add_field(name='Missing Arguments', value=cmd.help())
await message.channel.send(None, embed=out_content)
return
if not message.mentions:
out_content = discord.Embed(type='rich', color=0xDB0000, title='❗ Error')
out_content.add_field(name='Missing Target User', value=cmd.help())
await message.channel.send(None, embed=out_content)
return
role_qry = ' '.join(args[1:])
target_role = matching_role(message.guild, role_qry)
target_user = message.mentions[0]
user_contained_role = user_matching_role(target_user, role_qry)
if not target_role:
out_content = discord.Embed(type='rich', color=0xFF9900, title='❗ Error')
out_content.add_field(name='Role Not Found', value='I was unable to find **' + role_qry + '** on this server.')
await message.channel.send(None, embed=out_content)
else:
if user_contained_role:
await target_user.remove_roles(target_role)
out_content = discord.Embed(type='rich', color=0x66cc66,
title='✅ Role ' + role_qry + ' removed from **' + target_user.name + '**.')
await message.channel.send(None, embed=out_content)
else:
out_content = discord.Embed(type='rich', color=0xFF9900, title='❗ Error')
out_content.add_field(name='User Missing Role',
value='I was unable to find **' + role_qry + '** in ' + target_user.name + '\'s roles.')
await message.channel.send(None, embed=out_content)
| [
"from sigma.core.permission import check_man_roles\n",
"from sigma.core.rolecheck import matching_role, user_matching_role\n",
"import discord\n",
"\n",
"\n",
"async def takerole(cmd, message, args):\n",
" if not check_man_roles(message.author, message.channel):\n",
" out_content = discord.Embed(type='rich', color=0xDB0000,\n",
" title='⛔ Insufficient Permissions. Server Admin Only.')\n",
" await message.channel.send(None, embed=out_content)\n",
" return\n",
" if len(args) < 2:\n",
" out_content = discord.Embed(type='rich', color=0xDB0000, title='❗ Error')\n",
" out_content.add_field(name='Missing Arguments', value=cmd.help())\n",
" await message.channel.send(None, embed=out_content)\n",
" return\n",
" if not message.mentions:\n",
" out_content = discord.Embed(type='rich', color=0xDB0000, title='❗ Error')\n",
" out_content.add_field(name='Missing Target User', value=cmd.help())\n",
" await message.channel.send(None, embed=out_content)\n",
" return\n",
" role_qry = ' '.join(args[1:])\n",
" target_role = matching_role(message.guild, role_qry)\n",
" target_user = message.mentions[0]\n",
" user_contained_role = user_matching_role(target_user, role_qry)\n",
" if not target_role:\n",
" out_content = discord.Embed(type='rich', color=0xFF9900, title='❗ Error')\n",
" out_content.add_field(name='Role Not Found', value='I was unable to find **' + role_qry + '** on this server.')\n",
" await message.channel.send(None, embed=out_content)\n",
" else:\n",
" if user_contained_role:\n",
" await target_user.remove_roles(target_role)\n",
" out_content = discord.Embed(type='rich', color=0x66cc66,\n",
" title='✅ Role ' + role_qry + ' removed from **' + target_user.name + '**.')\n",
" await message.channel.send(None, embed=out_content)\n",
" else:\n",
" out_content = discord.Embed(type='rich', color=0xFF9900, title='❗ Error')\n",
" out_content.add_field(name='User Missing Role',\n",
" value='I was unable to find **' + role_qry + '** in ' + target_user.name + '\\'s roles.')\n",
" await message.channel.send(None, embed=out_content)\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0.010869565217391304,
0,
0,
0,
0.012195121951219513,
0,
0,
0,
0,
0.012195121951219513,
0,
0,
0,
0,
0,
0,
0,
0,
0.012195121951219513,
0.008333333333333333,
0,
0,
0,
0,
0,
0.008620689655172414,
0,
0,
0.011627906976744186,
0,
0.008130081300813009,
0
] | 40 | 0.002104 | false |
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from clr import AddReference
AddReference("System")
AddReference("QuantConnect.Algorithm")
AddReference("QuantConnect.Common")
from System import *
from QuantConnect import *
from QuantConnect.Algorithm import *
from QuantConnect.Data.Custom.SEC import *
from QuantConnect.Data.Custom.USTreasury import *
import numpy as np
### <summary>
### Regression algorithm checks that adding data via AddData
### works as expected
### </summary>
class CustomDataAddDataRegressionAlgorithm(QCAlgorithm):
def Initialize(self):
self.SetStartDate(2013, 10, 7)
self.SetEndDate(2013, 10, 11)
self.SetCash(100000)
twxEquity = self.AddEquity("TWX", Resolution.Daily).Symbol
customTwxSymbol = self.AddData(SECReport8K, twxEquity, Resolution.Daily).Symbol
self.googlEquity = self.AddEquity("GOOGL", Resolution.Daily).Symbol
customGooglSymbol = self.AddData(SECReport10K, "GOOGL", Resolution.Daily).Symbol
usTreasury = self.AddData(USTreasuryYieldCurveRate, "GOOGL", Resolution.Daily).Symbol
usTreasuryUnderlyingEquity = Symbol.Create("MSFT", SecurityType.Equity, Market.USA)
usTreasuryUnderlying = self.AddData(USTreasuryYieldCurveRate, usTreasuryUnderlyingEquity, Resolution.Daily).Symbol
optionSymbol = self.AddOption("TWX", Resolution.Minute).Symbol
customOptionSymbol = self.AddData(SECReport10K, optionSymbol, Resolution.Daily).Symbol
if customTwxSymbol.Underlying != twxEquity:
raise Exception(f"Underlying symbol for {customTwxSymbol} is not equal to TWX equity. Expected {twxEquity} got {customTwxSymbol.Underlying}")
if customGooglSymbol.Underlying != self.googlEquity:
raise Exception(f"Underlying symbol for {customGooglSymbol} is not equal to GOOGL equity. Expected {self.googlEquity} got {customGooglSymbol.Underlying}")
if usTreasury.HasUnderlying:
raise Exception(f"US Treasury yield curve (no underlying) has underlying when it shouldn't. Found {usTreasury.Underlying}")
if not usTreasuryUnderlying.HasUnderlying:
raise Exception("US Treasury yield curve (with underlying) has no underlying Symbol even though we added with Symbol")
if usTreasuryUnderlying.Underlying != usTreasuryUnderlyingEquity:
raise Exception(f"US Treasury yield curve underlying does not equal equity Symbol added. Expected {usTreasuryUnderlyingEquity} got {usTreasuryUnderlying.Underlying}")
if customOptionSymbol.Underlying != optionSymbol:
raise Exception("Option symbol not equal to custom underlying symbol. Expected {optionSymbol} got {customOptionSymbol.Underlying}")
try:
customDataNoCache = self.AddData(SECReport10Q, "AAPL", Resolution.Daily)
raise Exception("AAPL was found in the SymbolCache, though it should be missing")
except InvalidOperationException as e:
return
def OnData(self, data):
'''OnData event is the primary entry point for your algorithm. Each new data point will be pumped in here.
Arguments:
data: Slice object keyed by symbol containing the stock data
'''
if not self.Portfolio.Invested and len(self.Transactions.GetOpenOrders()) == 0:
self.SetHoldings(self.googlEquity, 0.5) | [
"# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.\n",
"# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.\n",
"#\n",
"# Licensed under the Apache License, Version 2.0 (the \"License\");\n",
"# you may not use this file except in compliance with the License.\n",
"# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0\n",
"#\n",
"# Unless required by applicable law or agreed to in writing, software\n",
"# distributed under the License is distributed on an \"AS IS\" BASIS,\n",
"# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
"# See the License for the specific language governing permissions and\n",
"# limitations under the License.\n",
"\n",
"from clr import AddReference\n",
"AddReference(\"System\")\n",
"AddReference(\"QuantConnect.Algorithm\")\n",
"AddReference(\"QuantConnect.Common\")\n",
"\n",
"from System import *\n",
"from QuantConnect import *\n",
"from QuantConnect.Algorithm import *\n",
"from QuantConnect.Data.Custom.SEC import *\n",
"from QuantConnect.Data.Custom.USTreasury import *\n",
"import numpy as np\n",
"\n",
"### <summary>\n",
"### Regression algorithm checks that adding data via AddData\n",
"### works as expected\n",
"### </summary>\n",
"class CustomDataAddDataRegressionAlgorithm(QCAlgorithm):\n",
"\n",
" def Initialize(self):\n",
" self.SetStartDate(2013, 10, 7)\n",
" self.SetEndDate(2013, 10, 11)\n",
" self.SetCash(100000)\n",
"\n",
" twxEquity = self.AddEquity(\"TWX\", Resolution.Daily).Symbol\n",
" customTwxSymbol = self.AddData(SECReport8K, twxEquity, Resolution.Daily).Symbol\n",
"\n",
" self.googlEquity = self.AddEquity(\"GOOGL\", Resolution.Daily).Symbol\n",
" customGooglSymbol = self.AddData(SECReport10K, \"GOOGL\", Resolution.Daily).Symbol\n",
"\n",
" usTreasury = self.AddData(USTreasuryYieldCurveRate, \"GOOGL\", Resolution.Daily).Symbol\n",
" usTreasuryUnderlyingEquity = Symbol.Create(\"MSFT\", SecurityType.Equity, Market.USA)\n",
" usTreasuryUnderlying = self.AddData(USTreasuryYieldCurveRate, usTreasuryUnderlyingEquity, Resolution.Daily).Symbol\n",
"\n",
" optionSymbol = self.AddOption(\"TWX\", Resolution.Minute).Symbol\n",
" customOptionSymbol = self.AddData(SECReport10K, optionSymbol, Resolution.Daily).Symbol\n",
"\n",
" if customTwxSymbol.Underlying != twxEquity:\n",
" raise Exception(f\"Underlying symbol for {customTwxSymbol} is not equal to TWX equity. Expected {twxEquity} got {customTwxSymbol.Underlying}\")\n",
" if customGooglSymbol.Underlying != self.googlEquity:\n",
" raise Exception(f\"Underlying symbol for {customGooglSymbol} is not equal to GOOGL equity. Expected {self.googlEquity} got {customGooglSymbol.Underlying}\")\n",
" if usTreasury.HasUnderlying:\n",
" raise Exception(f\"US Treasury yield curve (no underlying) has underlying when it shouldn't. Found {usTreasury.Underlying}\")\n",
" if not usTreasuryUnderlying.HasUnderlying:\n",
" raise Exception(\"US Treasury yield curve (with underlying) has no underlying Symbol even though we added with Symbol\")\n",
" if usTreasuryUnderlying.Underlying != usTreasuryUnderlyingEquity:\n",
" raise Exception(f\"US Treasury yield curve underlying does not equal equity Symbol added. Expected {usTreasuryUnderlyingEquity} got {usTreasuryUnderlying.Underlying}\")\n",
" if customOptionSymbol.Underlying != optionSymbol:\n",
" raise Exception(\"Option symbol not equal to custom underlying symbol. Expected {optionSymbol} got {customOptionSymbol.Underlying}\")\n",
"\n",
" try:\n",
" customDataNoCache = self.AddData(SECReport10Q, \"AAPL\", Resolution.Daily)\n",
" raise Exception(\"AAPL was found in the SymbolCache, though it should be missing\")\n",
" except InvalidOperationException as e:\n",
" return\n",
"\n",
" def OnData(self, data):\n",
" '''OnData event is the primary entry point for your algorithm. Each new data point will be pumped in here.\n",
"\n",
" Arguments:\n",
" data: Slice object keyed by symbol containing the stock data\n",
" '''\n",
" if not self.Portfolio.Invested and len(self.Transactions.GetOpenOrders()) == 0:\n",
" self.SetHoldings(self.googlEquity, 0.5)"
] | [
0,
0.012345679012345678,
0,
0,
0,
0.011764705882352941,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.047619047619047616,
0.037037037037037035,
0.02702702702702703,
0.023255813953488372,
0.02,
0.05263157894736842,
0,
0.07142857142857142,
0.01639344262295082,
0.045454545454545456,
0.06666666666666667,
0.017543859649122806,
0,
0,
0,
0,
0,
0,
0,
0.011363636363636364,
0,
0,
0.011235955056179775,
0,
0.010638297872340425,
0.010869565217391304,
0.008130081300813009,
0,
0,
0.010526315789473684,
0,
0,
0.006493506493506494,
0,
0.005988023952095809,
0,
0.007352941176470588,
0,
0.007633587786259542,
0,
0.00558659217877095,
0,
0.006944444444444444,
0,
0,
0.011764705882352941,
0.010638297872340425,
0,
0,
0,
0,
0.008695652173913044,
0,
0,
0,
0,
0.011363636363636364,
0.0196078431372549
] | 76 | 0.008079 | false |
# -*- coding: utf-8 -*-
# legume. Copyright 2009-2010 Dale Reidy. All rights reserved.
# See LICENSE for details.
import struct
from legume.exceptions import BufferError
class ByteBuffer(object):
'''
Provides a simplified method of reading struct packed data from
a string buffer.
read_bytes and read_struct remove the read data from the string buffer.
'''
def __init__(self, bytes):
self._bytes = bytes
def read_bytes(self, bytes_to_read):
if bytes_to_read > len(self._bytes):
raise BufferError(
'Cannot read %d bytes, buffer too small (%d bytes)' \
% (bytes_to_read, len(self._bytes)))
result = self._bytes[:bytes_to_read]
self._bytes = self._bytes[bytes_to_read:]
return result
def peek_bytes(self, bytes_to_peek):
if bytes_to_peek > len(self._bytes):
raise BufferError(
'Cannot peek %d bytes, buffer too small (%d bytes)' \
% (bytes_to_peek, len(self._bytes)))
return self._bytes[:bytes_to_peek]
def push_bytes(self, bytes):
self._bytes += bytes
def read_struct(self, struct_format):
struct_size = struct.calcsize('!'+struct_format)
try:
struct_bytes = self.read_bytes(struct_size)
bytes = struct.unpack('!'+struct_format, struct_bytes)
except struct.error:
raise BufferError('Unable to unpack data')
except BufferError as e:
raise BufferError(
'Could not unpack using format %s' % struct_format, e)
return bytes
def peek_struct(self, struct_format):
struct_size = struct.calcsize('!'+struct_format)
try:
struct_bytes = self.peek_bytes(struct_size)
bytes = struct.unpack('!'+struct_format, struct_bytes)
except struct.error:
raise BufferError('Unable to unpack data')
except BufferError as e:
raise BufferError(
'Could not unpack using format %s' % struct_format, e)
return bytes
def is_empty(self):
return len(self._bytes) == 0
@property
def length(self):
return len(self._bytes) | [
"# -*- coding: utf-8 -*-\n",
"# legume. Copyright 2009-2010 Dale Reidy. All rights reserved.\n",
"# See LICENSE for details.\n",
"\n",
"import struct\n",
"from legume.exceptions import BufferError\n",
"\n",
"class ByteBuffer(object):\n",
" '''\n",
" Provides a simplified method of reading struct packed data from\n",
" a string buffer.\n",
"\n",
" read_bytes and read_struct remove the read data from the string buffer.\n",
" '''\n",
" def __init__(self, bytes):\n",
" self._bytes = bytes\n",
"\n",
" def read_bytes(self, bytes_to_read):\n",
" if bytes_to_read > len(self._bytes):\n",
" raise BufferError(\n",
" 'Cannot read %d bytes, buffer too small (%d bytes)' \\\n",
" % (bytes_to_read, len(self._bytes)))\n",
" result = self._bytes[:bytes_to_read]\n",
" self._bytes = self._bytes[bytes_to_read:]\n",
" return result\n",
"\n",
" def peek_bytes(self, bytes_to_peek):\n",
" if bytes_to_peek > len(self._bytes):\n",
" raise BufferError(\n",
" 'Cannot peek %d bytes, buffer too small (%d bytes)' \\\n",
" % (bytes_to_peek, len(self._bytes)))\n",
" return self._bytes[:bytes_to_peek]\n",
"\n",
" def push_bytes(self, bytes):\n",
" self._bytes += bytes\n",
"\n",
" def read_struct(self, struct_format):\n",
" struct_size = struct.calcsize('!'+struct_format)\n",
" try:\n",
" struct_bytes = self.read_bytes(struct_size)\n",
" bytes = struct.unpack('!'+struct_format, struct_bytes)\n",
" except struct.error:\n",
" raise BufferError('Unable to unpack data')\n",
" except BufferError as e:\n",
" raise BufferError(\n",
" 'Could not unpack using format %s' % struct_format, e)\n",
" return bytes\n",
"\n",
" def peek_struct(self, struct_format):\n",
" struct_size = struct.calcsize('!'+struct_format)\n",
" try:\n",
" struct_bytes = self.peek_bytes(struct_size)\n",
" bytes = struct.unpack('!'+struct_format, struct_bytes)\n",
" except struct.error:\n",
" raise BufferError('Unable to unpack data')\n",
" except BufferError as e:\n",
" raise BufferError(\n",
" 'Could not unpack using format %s' % struct_format, e)\n",
" return bytes\n",
"\n",
" def is_empty(self):\n",
" return len(self._bytes) == 0\n",
"\n",
" @property\n",
" def length(self):\n",
" return len(self._bytes)"
] | [
0,
0,
0,
0,
0,
0,
0,
0.038461538461538464,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.014285714285714285,
0,
0,
0,
0,
0,
0,
0,
0,
0.014285714285714285,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.03225806451612903
] | 66 | 0.001504 | false |
# -*- coding: utf-8 -*-
from Plugins.Extensions.MediaPortal.plugin import _
from Plugins.Extensions.MediaPortal.resources.imports import *
from Plugins.Extensions.MediaPortal.resources.myvideolink import MyvideoLink
class p7s1Main(MPScreen):
def __init__(self, session, mode):
self.mode = mode
if self.mode == "ProSieben":
self.portal = "ProSieben Mediathek"
self.baseurl = "http://videokatalog.prosieben.de/"
if self.mode == "Sat1":
self.portal = "Sat.1 Mediathek"
self.baseurl = "http://videokatalog.sat1.de/"
self.plugin_path = mp_globals.pluginPath
self.skin_path = mp_globals.pluginPath + mp_globals.skinsPath
path = "%s/%s/defaultGenreScreen.xml" % (self.skin_path, config.mediaportal.skin.value)
if not fileExists(path):
path = self.skin_path + mp_globals.skinFallback + "/defaultGenreScreen.xml"
with open(path, "r") as f:
self.skin = f.read()
f.close()
MPScreen.__init__(self, session)
self["actions"] = ActionMap(["MP_Actions"], {
"0": self.closeAll,
"ok" : self.keyOK,
"cancel": self.keyCancel
}, -1)
self['title'] = Label(self.portal)
self['ContentTitle'] = Label("Genre:")
self.streamList = []
self.ml = MenuList([], enableWrapAround=True, content=eListboxPythonMultiContent)
self['liste'] = self.ml
self.keyLocked = True
self.onLayoutFinish.append(self.loadPage)
def loadPage(self):
self['name'].setText(_('Please wait...'))
url = self.baseurl
getPage(url, headers={'Content-Type':'application/x-www-form-urlencoded'}).addCallback(self.parseData).addErrback(self.dataError)
def parseData(self, data):
step1 = re.search('<h2>Alle Kategorien</h2>(.*?)id="site_footer"', data, re.S)
if self.mode == "Sat1":
parse = re.findall('<h3><a\sclass="arrow"\shref="(.*?)">(.*?)</a></h3>.*?<p>\s+(.*?)</p>', step1.group(1), re.S)
else:
parse = re.findall('<li>.*?<a\shref="(.*?)">(.*?)</a>.*?<p>\s+(.*?)</p>', step1.group(1), re.S)
for (Link, Title, Sub) in parse:
Sub = Sub.strip()
self.streamList.append((Title, Link, Sub))
self.streamList.sort()
self.ml.setList(map(self._defaultlistcenter, self.streamList))
self.keyLocked = False
self['name'].setText("")
def keyOK(self):
exist = self['liste'].getCurrent()
if self.keyLocked or exist == None:
return
title = self['liste'].getCurrent()[0][0]
url = self['liste'].getCurrent()[0][1]
sub = self['liste'].getCurrent()[0][2]
if sub == "":
self.session.open(p7s1vidScreen, url, title, self.mode, self.portal)
else:
self.session.open(p7s1subScreen, url, title, self.mode, self.portal)
class p7s1subScreen(MPScreen):
def __init__(self, session, Link, Name, mode, portal):
self.Link = Link
self.Name = Name
self.mode = mode
self.portal = portal
self.plugin_path = mp_globals.pluginPath
self.skin_path = mp_globals.pluginPath + mp_globals.skinsPath
path = "%s/%s/defaultListWideScreen.xml" % (self.skin_path, config.mediaportal.skin.value)
if not fileExists(path):
path = self.skin_path + mp_globals.skinFallback + "/defaultListWideScreen.xml"
with open(path, "r") as f:
self.skin = f.read()
f.close()
MPScreen.__init__(self, session)
self["actions"] = ActionMap(["MP_Actions"], {
"ok" : self.keyOK,
"0" : self.closeAll,
"cancel" : self.keyCancel
}, -1)
self['title'] = Label(self.portal)
self['ContentTitle'] = Label("Genre: %s" % self.Name)
self.keyLocked = True
self.filmliste = []
self.ml = MenuList([], enableWrapAround=True, content=eListboxPythonMultiContent)
self['liste'] = self.ml
self.onLayoutFinish.append(self.loadPage)
def loadPage(self):
self.keyLocked = True
self['name'].setText(_('Please wait...'))
self.filmliste = []
url = self.Link
getPage(url, headers={'Content-Type':'application/x-www-form-urlencoded'}).addCallback(self.loadData).addErrback(self.dataError)
def loadData(self, data):
self['name'].setText('')
step1 = re.search('Unterkategorien:</strong>(.*?)</article>', data, re.S)
if self.mode == "Sat1":
parse = re.findall('<a.*?href="(.*?)"><strong>(.*?)</strong></a>', step1.group(1), re.S)
else:
parse = re.findall('<a\shref="(.*?)">(.*?)</a>', step1.group(1), re.S)
if parse:
for (Link, Title) in parse:
self.filmliste.append((decodeHtml(Title).strip(), Link))
self.filmliste.sort()
if self.Name != "Sendungen":
self.filmliste.insert(0, (self.Name, self.Link, None))
self.ml.setList(map(self._defaultlistcenter, self.filmliste))
self.keyLocked = False
def keyOK(self):
exist = self['liste'].getCurrent()
if self.keyLocked or exist == None:
return
title = self['liste'].getCurrent()[0][0]
url = self['liste'].getCurrent()[0][1]
self.session.open(p7s1vidScreen, url, title, self.mode, self.portal)
class p7s1vidScreen(MPScreen, ThumbsHelper):
def __init__(self, session, Link, Name, mode, portal):
self.Link = Link
self.Name = Name
self.mode = mode
self.portal = portal
self.plugin_path = mp_globals.pluginPath
self.skin_path = mp_globals.pluginPath + mp_globals.skinsPath
path = "%s/%s/defaultListWideScreen.xml" % (self.skin_path, config.mediaportal.skin.value)
if not fileExists(path):
path = self.skin_path + mp_globals.skinFallback + "/defaultListWideScreen.xml"
with open(path, "r") as f:
self.skin = f.read()
f.close()
MPScreen.__init__(self, session)
ThumbsHelper.__init__(self)
self["actions"] = ActionMap(["MP_Actions"], {
"ok" : self.keyOK,
"0" : self.closeAll,
"cancel" : self.keyCancel,
"5" : self.keyShowThumb,
"up" : self.keyUp,
"down" : self.keyDown,
"right" : self.keyRight,
"left" : self.keyLeft,
"nextBouquet" : self.keyPageUp,
"prevBouquet" : self.keyPageDown,
"green" : self.keyPageNumber
}, -1)
self['title'] = Label(self.portal)
self['ContentTitle'] = Label("Genre: %s" % self.Name)
self['F2'] = Label(_("Page"))
self['Page'] = Label(_("Page:"))
self.keyLocked = True
self.page = 1
self.lastpage = 1
self.filmliste = []
self.ml = MenuList([], enableWrapAround=True, content=eListboxPythonMultiContent)
self['liste'] = self.ml
self.onLayoutFinish.append(self.loadPage)
def loadPage(self):
self.keyLocked = True
self['name'].setText(_('Please wait...'))
self.filmliste = []
if self.page == 1:
url = self.Link
else:
url = self.Link + "%s.html" % self.page
getPage(url, headers={'Content-Type':'application/x-www-form-urlencoded'}).addCallback(self.loadData).addErrback(self.dataError)
def loadData(self, data):
if self.mode == "Sat1":
self.getLastPage(data, 'class="other">(.*?)</ul>')
else:
self.getLastPage(data, 'class="active">(.*?)</ul>')
if self.mode == "Sat1":
step1 = re.search('<h2>Videos</h2>(.*?)id="site_footer"', data, re.S)
parse = re.findall('<a href="(.*?)".*?src="(.*?)">.*?align_right">(.*?)<.*?title=".*?">(.*?)</a>.*?"vs-description">(.*?)</div>', step1.group(1), re.S)
else:
step1 = re.search('class="videoList">(.*?)<div\sclass="clear">', data, re.S)
parse = re.findall('<li class.*?href="(.*?)".*?src="(.*?)".*?date">(.*?)<.*?title.*?>(.*?)</a>.*?<span title="(.*?)"', step1.group(1), re.S)
if parse:
for (Link, Pic, Date, Title, Desc) in parse:
self.filmliste.append((decodeHtml(Title), Link, Pic, Date, Desc))
if len(self.filmliste) == 0:
self.filmliste.append((_('No videos found!'), None, None))
self.ml.setList(map(self._defaultlistleft, self.filmliste))
self.ml.moveToIndex(0)
self.keyLocked = False
self.th_ThumbsQuery(self.filmliste, 0, 1, 2, None, None, self.page, self.lastpage, mode=1)
self.showInfos()
def showInfos(self):
title = self['liste'].getCurrent()[0][0]
pic = self['liste'].getCurrent()[0][2]
desc = self['liste'].getCurrent()[0][4]
self['name'].setText(title)
self['handlung'].setText(decodeHtml(desc))
CoverHelper(self['coverArt']).getCover(pic)
def keyOK(self):
if self.keyLocked:
return
url = self['liste'].getCurrent()[0][1]
getPage(url, headers={'Content-Type':'application/x-www-form-urlencoded'}).addCallback(self.loadIDData).addErrback(self.dataError)
def loadIDData(self, data):
id = re.findall('broadcast_date.*?id.":."(.*?)."', data, re.S)
if id:
url = 'http://vas.sim-technik.de/video/video.json?clipid=%s&app=megapp&method=1' % id[-1]
getPage(url, headers={'Content-Type':'application/x-www-form-urlencoded'}).addCallback(self.getStream).addErrback(self.dataError)
def getStream(self, data):
title = self['liste'].getCurrent()[0][0]
url = re.search('VideoURL"."(.*?)"', data, re.S)
if url:
url = url.group(1).replace('\/','/')
self.session.open(SimplePlayer, [(title, url)], showPlaylist=False, ltype='p7s1media') | [
"# -*- coding: utf-8 -*-\n",
"from Plugins.Extensions.MediaPortal.plugin import _\n",
"from Plugins.Extensions.MediaPortal.resources.imports import *\n",
"from Plugins.Extensions.MediaPortal.resources.myvideolink import MyvideoLink\n",
"\n",
"class p7s1Main(MPScreen):\n",
"\n",
"\tdef __init__(self, session, mode):\n",
"\t\tself.mode = mode\n",
"\n",
"\t\tif self.mode == \"ProSieben\":\n",
"\t\t\tself.portal = \"ProSieben Mediathek\"\n",
"\t\t\tself.baseurl = \"http://videokatalog.prosieben.de/\"\n",
"\t\tif self.mode == \"Sat1\":\n",
"\t\t\tself.portal = \"Sat.1 Mediathek\"\n",
"\t\t\tself.baseurl = \"http://videokatalog.sat1.de/\"\n",
"\n",
"\t\tself.plugin_path = mp_globals.pluginPath\n",
"\t\tself.skin_path = mp_globals.pluginPath + mp_globals.skinsPath\n",
"\t\tpath = \"%s/%s/defaultGenreScreen.xml\" % (self.skin_path, config.mediaportal.skin.value)\n",
"\t\tif not fileExists(path):\n",
"\t\t\tpath = self.skin_path + mp_globals.skinFallback + \"/defaultGenreScreen.xml\"\n",
"\t\twith open(path, \"r\") as f:\n",
"\t\t\tself.skin = f.read()\n",
"\t\t\tf.close()\n",
"\n",
"\t\tMPScreen.__init__(self, session)\n",
"\n",
"\t\tself[\"actions\"] = ActionMap([\"MP_Actions\"], {\n",
"\t\t\t\"0\": self.closeAll,\n",
"\t\t\t\"ok\" : self.keyOK,\n",
"\t\t\t\"cancel\": self.keyCancel\n",
"\t\t}, -1)\n",
"\n",
"\t\tself['title'] = Label(self.portal)\n",
"\t\tself['ContentTitle'] = Label(\"Genre:\")\n",
"\n",
"\t\tself.streamList = []\n",
"\t\tself.ml = MenuList([], enableWrapAround=True, content=eListboxPythonMultiContent)\n",
"\t\tself['liste'] = self.ml\n",
"\n",
"\t\tself.keyLocked = True\n",
"\t\tself.onLayoutFinish.append(self.loadPage)\n",
"\n",
"\tdef loadPage(self):\n",
"\t\tself['name'].setText(_('Please wait...'))\n",
"\t\turl = self.baseurl\n",
"\t\tgetPage(url, headers={'Content-Type':'application/x-www-form-urlencoded'}).addCallback(self.parseData).addErrback(self.dataError)\n",
"\n",
"\tdef parseData(self, data):\n",
"\t\tstep1 = re.search('<h2>Alle Kategorien</h2>(.*?)id=\"site_footer\"', data, re.S)\n",
"\t\tif self.mode == \"Sat1\":\n",
"\t\t\tparse = re.findall('<h3><a\\sclass=\"arrow\"\\shref=\"(.*?)\">(.*?)</a></h3>.*?<p>\\s+(.*?)</p>', step1.group(1), re.S)\n",
"\t\telse:\n",
"\t\t\tparse = re.findall('<li>.*?<a\\shref=\"(.*?)\">(.*?)</a>.*?<p>\\s+(.*?)</p>', step1.group(1), re.S)\n",
"\t\tfor (Link, Title, Sub) in parse:\n",
"\t\t\tSub = Sub.strip()\n",
"\t\t\tself.streamList.append((Title, Link, Sub))\n",
"\t\tself.streamList.sort()\n",
"\t\tself.ml.setList(map(self._defaultlistcenter, self.streamList))\n",
"\t\tself.keyLocked = False\n",
"\t\tself['name'].setText(\"\")\n",
"\n",
"\tdef keyOK(self):\n",
"\t\texist = self['liste'].getCurrent()\n",
"\t\tif self.keyLocked or exist == None:\n",
"\t\t\treturn\n",
"\t\ttitle = self['liste'].getCurrent()[0][0]\n",
"\t\turl = self['liste'].getCurrent()[0][1]\n",
"\t\tsub = self['liste'].getCurrent()[0][2]\n",
"\t\tif sub == \"\":\n",
"\t\t\tself.session.open(p7s1vidScreen, url, title, self.mode, self.portal)\n",
"\t\telse:\n",
"\t\t\tself.session.open(p7s1subScreen, url, title, self.mode, self.portal)\n",
"\n",
"class p7s1subScreen(MPScreen):\n",
"\n",
"\tdef __init__(self, session, Link, Name, mode, portal):\n",
"\t\tself.Link = Link\n",
"\t\tself.Name = Name\n",
"\t\tself.mode = mode\n",
"\t\tself.portal = portal\n",
"\t\tself.plugin_path = mp_globals.pluginPath\n",
"\t\tself.skin_path = mp_globals.pluginPath + mp_globals.skinsPath\n",
"\t\tpath = \"%s/%s/defaultListWideScreen.xml\" % (self.skin_path, config.mediaportal.skin.value)\n",
"\t\tif not fileExists(path):\n",
"\t\t\tpath = self.skin_path + mp_globals.skinFallback + \"/defaultListWideScreen.xml\"\n",
"\n",
"\t\twith open(path, \"r\") as f:\n",
"\t\t\tself.skin = f.read()\n",
"\t\t\tf.close()\n",
"\n",
"\t\tMPScreen.__init__(self, session)\n",
"\n",
"\t\tself[\"actions\"] = ActionMap([\"MP_Actions\"], {\n",
"\t\t\t\"ok\" : self.keyOK,\n",
"\t\t\t\"0\" : self.closeAll,\n",
"\t\t\t\"cancel\" : self.keyCancel\n",
"\t\t}, -1)\n",
"\n",
"\t\tself['title'] = Label(self.portal)\n",
"\t\tself['ContentTitle'] = Label(\"Genre: %s\" % self.Name)\n",
"\n",
"\t\tself.keyLocked = True\n",
"\n",
"\t\tself.filmliste = []\n",
"\t\tself.ml = MenuList([], enableWrapAround=True, content=eListboxPythonMultiContent)\n",
"\t\tself['liste'] = self.ml\n",
"\n",
"\t\tself.onLayoutFinish.append(self.loadPage)\n",
"\n",
"\tdef loadPage(self):\n",
"\t\tself.keyLocked = True\n",
"\t\tself['name'].setText(_('Please wait...'))\n",
"\t\tself.filmliste = []\n",
"\t\turl = self.Link\n",
"\t\tgetPage(url, headers={'Content-Type':'application/x-www-form-urlencoded'}).addCallback(self.loadData).addErrback(self.dataError)\n",
"\n",
"\tdef loadData(self, data):\n",
"\t\tself['name'].setText('')\n",
"\t\tstep1 = re.search('Unterkategorien:</strong>(.*?)</article>', data, re.S)\n",
"\t\tif self.mode == \"Sat1\":\n",
"\t\t\tparse = re.findall('<a.*?href=\"(.*?)\"><strong>(.*?)</strong></a>', step1.group(1), re.S)\n",
"\t\telse:\n",
"\t\t\tparse = re.findall('<a\\shref=\"(.*?)\">(.*?)</a>', step1.group(1), re.S)\n",
"\t\tif parse:\n",
"\t\t\tfor (Link, Title) in parse:\n",
"\t\t\t\tself.filmliste.append((decodeHtml(Title).strip(), Link))\n",
"\t\t\tself.filmliste.sort()\n",
"\t\t\tif self.Name != \"Sendungen\":\n",
"\t\t\t\tself.filmliste.insert(0, (self.Name, self.Link, None))\n",
"\t\tself.ml.setList(map(self._defaultlistcenter, self.filmliste))\n",
"\t\tself.keyLocked = False\n",
"\n",
"\tdef keyOK(self):\n",
"\t\texist = self['liste'].getCurrent()\n",
"\t\tif self.keyLocked or exist == None:\n",
"\t\t\treturn\n",
"\t\ttitle = self['liste'].getCurrent()[0][0]\n",
"\t\turl = self['liste'].getCurrent()[0][1]\n",
"\t\tself.session.open(p7s1vidScreen, url, title, self.mode, self.portal)\n",
"\n",
"class p7s1vidScreen(MPScreen, ThumbsHelper):\n",
"\n",
"\tdef __init__(self, session, Link, Name, mode, portal):\n",
"\t\tself.Link = Link\n",
"\t\tself.Name = Name\n",
"\t\tself.mode = mode\n",
"\t\tself.portal = portal\n",
"\t\tself.plugin_path = mp_globals.pluginPath\n",
"\t\tself.skin_path = mp_globals.pluginPath + mp_globals.skinsPath\n",
"\t\tpath = \"%s/%s/defaultListWideScreen.xml\" % (self.skin_path, config.mediaportal.skin.value)\n",
"\t\tif not fileExists(path):\n",
"\t\t\tpath = self.skin_path + mp_globals.skinFallback + \"/defaultListWideScreen.xml\"\n",
"\n",
"\t\twith open(path, \"r\") as f:\n",
"\t\t\tself.skin = f.read()\n",
"\t\t\tf.close()\n",
"\n",
"\t\tMPScreen.__init__(self, session)\n",
"\t\tThumbsHelper.__init__(self)\n",
"\n",
"\t\tself[\"actions\"] = ActionMap([\"MP_Actions\"], {\n",
"\t\t\t\"ok\" : self.keyOK,\n",
"\t\t\t\"0\" : self.closeAll,\n",
"\t\t\t\"cancel\" : self.keyCancel,\n",
"\t\t\t\"5\" : self.keyShowThumb,\n",
"\t\t\t\"up\" : self.keyUp,\n",
"\t\t\t\"down\" : self.keyDown,\n",
"\t\t\t\"right\" : self.keyRight,\n",
"\t\t\t\"left\" : self.keyLeft,\n",
"\t\t\t\"nextBouquet\" : self.keyPageUp,\n",
"\t\t\t\"prevBouquet\" : self.keyPageDown,\n",
"\t\t\t\"green\" : self.keyPageNumber\n",
"\t\t}, -1)\n",
"\n",
"\t\tself['title'] = Label(self.portal)\n",
"\t\tself['ContentTitle'] = Label(\"Genre: %s\" % self.Name)\n",
"\t\tself['F2'] = Label(_(\"Page\"))\n",
"\n",
"\t\tself['Page'] = Label(_(\"Page:\"))\n",
"\n",
"\t\tself.keyLocked = True\n",
"\t\tself.page = 1\n",
"\t\tself.lastpage = 1\n",
"\n",
"\t\tself.filmliste = []\n",
"\t\tself.ml = MenuList([], enableWrapAround=True, content=eListboxPythonMultiContent)\n",
"\t\tself['liste'] = self.ml\n",
"\n",
"\t\tself.onLayoutFinish.append(self.loadPage)\n",
"\n",
"\tdef loadPage(self):\n",
"\t\tself.keyLocked = True\n",
"\t\tself['name'].setText(_('Please wait...'))\n",
"\t\tself.filmliste = []\n",
"\t\tif self.page == 1:\n",
"\t\t\turl = self.Link\n",
"\t\telse:\n",
"\t\t\turl = self.Link + \"%s.html\" % self.page\n",
"\t\tgetPage(url, headers={'Content-Type':'application/x-www-form-urlencoded'}).addCallback(self.loadData).addErrback(self.dataError)\n",
"\n",
"\tdef loadData(self, data):\n",
"\t\tif self.mode == \"Sat1\":\n",
"\t\t\tself.getLastPage(data, 'class=\"other\">(.*?)</ul>')\n",
"\t\telse:\n",
"\t\t\tself.getLastPage(data, 'class=\"active\">(.*?)</ul>')\n",
"\t\tif self.mode == \"Sat1\":\n",
"\t\t\tstep1 = re.search('<h2>Videos</h2>(.*?)id=\"site_footer\"', data, re.S)\n",
"\t\t\tparse = re.findall('<a href=\"(.*?)\".*?src=\"(.*?)\">.*?align_right\">(.*?)<.*?title=\".*?\">(.*?)</a>.*?\"vs-description\">(.*?)</div>', step1.group(1), re.S)\n",
"\t\telse:\n",
"\t\t\tstep1 = re.search('class=\"videoList\">(.*?)<div\\sclass=\"clear\">', data, re.S)\n",
"\t\t\tparse = re.findall('<li class.*?href=\"(.*?)\".*?src=\"(.*?)\".*?date\">(.*?)<.*?title.*?>(.*?)</a>.*?<span title=\"(.*?)\"', step1.group(1), re.S)\n",
"\t\tif parse:\n",
"\t\t\tfor (Link, Pic, Date, Title, Desc) in parse:\n",
"\t\t\t\tself.filmliste.append((decodeHtml(Title), Link, Pic, Date, Desc))\n",
"\t\tif len(self.filmliste) == 0:\n",
"\t\t\tself.filmliste.append((_('No videos found!'), None, None))\n",
"\t\tself.ml.setList(map(self._defaultlistleft, self.filmliste))\n",
"\t\tself.ml.moveToIndex(0)\n",
"\t\tself.keyLocked = False\n",
"\t\tself.th_ThumbsQuery(self.filmliste, 0, 1, 2, None, None, self.page, self.lastpage, mode=1)\n",
"\t\tself.showInfos()\n",
"\n",
"\tdef showInfos(self):\n",
"\t\ttitle = self['liste'].getCurrent()[0][0]\n",
"\t\tpic = self['liste'].getCurrent()[0][2]\n",
"\t\tdesc = self['liste'].getCurrent()[0][4]\n",
"\t\tself['name'].setText(title)\n",
"\t\tself['handlung'].setText(decodeHtml(desc))\n",
"\t\tCoverHelper(self['coverArt']).getCover(pic)\n",
"\n",
"\tdef keyOK(self):\n",
"\t\tif self.keyLocked:\n",
"\t\t\treturn\n",
"\t\turl = self['liste'].getCurrent()[0][1]\n",
"\t\tgetPage(url, headers={'Content-Type':'application/x-www-form-urlencoded'}).addCallback(self.loadIDData).addErrback(self.dataError)\n",
"\n",
"\tdef loadIDData(self, data):\n",
"\t\tid = re.findall('broadcast_date.*?id.\":.\"(.*?).\"', data, re.S)\n",
"\t\tif id:\n",
"\t\t\turl = 'http://vas.sim-technik.de/video/video.json?clipid=%s&app=megapp&method=1' % id[-1]\n",
"\t\t\tgetPage(url, headers={'Content-Type':'application/x-www-form-urlencoded'}).addCallback(self.getStream).addErrback(self.dataError)\n",
"\t\t\t\n",
"\tdef getStream(self, data):\n",
"\t\ttitle = self['liste'].getCurrent()[0][0]\n",
"\t\turl = re.search('VideoURL\".\"(.*?)\"', data, re.S)\n",
"\t\tif url:\n",
"\t\t\turl = url.group(1).replace('\\/','/')\n",
"\t\t\tself.session.open(SimplePlayer, [(title, url)], showPlaylist=False, ltype='p7s1media')"
] | [
0,
0,
0,
0,
0,
0.038461538461538464,
0,
0.027777777777777776,
0.05263157894736842,
0,
0.03225806451612903,
0.02564102564102564,
0.018518518518518517,
0.038461538461538464,
0.02857142857142857,
0.02040816326530612,
0,
0.023255813953488372,
0.015625,
0.022222222222222223,
0.037037037037037035,
0.012658227848101266,
0.034482758620689655,
0.041666666666666664,
0.07692307692307693,
0,
0.02857142857142857,
0,
0.020833333333333332,
0.043478260869565216,
0.09090909090909091,
0.03571428571428571,
0.1111111111111111,
0,
0.02702702702702703,
0.024390243902439025,
0,
0.043478260869565216,
0.023809523809523808,
0.038461538461538464,
0,
0.041666666666666664,
0.022727272727272728,
0,
0.047619047619047616,
0.022727272727272728,
0.047619047619047616,
0.022727272727272728,
0,
0.03571428571428571,
0.024691358024691357,
0.038461538461538464,
0.04310344827586207,
0.125,
0.04040404040404041,
0.02857142857142857,
0.047619047619047616,
0.021739130434782608,
0.04,
0.015384615384615385,
0.04,
0.037037037037037035,
0,
0.05555555555555555,
0.02702702702702703,
0.05263157894736842,
0.1,
0.023255813953488372,
0.024390243902439025,
0.024390243902439025,
0.0625,
0.013888888888888888,
0.125,
0.013888888888888888,
0,
0.03225806451612903,
0,
0.017857142857142856,
0.05263157894736842,
0.05263157894736842,
0.05263157894736842,
0.043478260869565216,
0.023255813953488372,
0.015625,
0.021505376344086023,
0.037037037037037035,
0.024390243902439025,
0,
0.034482758620689655,
0.041666666666666664,
0.07692307692307693,
0,
0.02857142857142857,
0,
0.020833333333333332,
0.09090909090909091,
0.08333333333333333,
0.06896551724137931,
0.1111111111111111,
0,
0.02702702702702703,
0.017857142857142856,
0,
0.041666666666666664,
0,
0.045454545454545456,
0.023809523809523808,
0.038461538461538464,
0,
0.022727272727272728,
0,
0.047619047619047616,
0.041666666666666664,
0.022727272727272728,
0.045454545454545456,
0.05555555555555555,
0.022900763358778626,
0,
0.037037037037037035,
0.037037037037037035,
0.013157894736842105,
0.038461538461538464,
0.021739130434782608,
0.125,
0.02702702702702703,
0.08333333333333333,
0.03225806451612903,
0.01639344262295082,
0.04,
0.03125,
0.01694915254237288,
0.015625,
0.04,
0,
0.05555555555555555,
0.02702702702702703,
0.05263157894736842,
0.1,
0.023255813953488372,
0.024390243902439025,
0.014084507042253521,
0,
0.022222222222222223,
0,
0.017857142857142856,
0.05263157894736842,
0.05263157894736842,
0.05263157894736842,
0.043478260869565216,
0.023255813953488372,
0.015625,
0.021505376344086023,
0.037037037037037035,
0.024390243902439025,
0,
0.034482758620689655,
0.041666666666666664,
0.07692307692307693,
0,
0.02857142857142857,
0.03333333333333333,
0,
0.020833333333333332,
0.09090909090909091,
0.08333333333333333,
0.06666666666666667,
0.07142857142857142,
0.09090909090909091,
0.07692307692307693,
0.07142857142857142,
0.07692307692307693,
0.05714285714285714,
0.05405405405405406,
0.0625,
0.1111111111111111,
0,
0.02702702702702703,
0.017857142857142856,
0.03125,
0,
0.02857142857142857,
0,
0.041666666666666664,
0.0625,
0.05,
0,
0.045454545454545456,
0.023809523809523808,
0.038461538461538464,
0,
0.022727272727272728,
0,
0.047619047619047616,
0.041666666666666664,
0.022727272727272728,
0.045454545454545456,
0.047619047619047616,
0.05263157894736842,
0.125,
0.023255813953488372,
0.022900763358778626,
0,
0.037037037037037035,
0.038461538461538464,
0.018518518518518517,
0.125,
0.01818181818181818,
0.038461538461538464,
0.0136986301369863,
0.012903225806451613,
0.125,
0.025,
0.013888888888888888,
0.08333333333333333,
0.020833333333333332,
0.014285714285714285,
0.03225806451612903,
0.016129032258064516,
0.016129032258064516,
0.04,
0.04,
0.021505376344086023,
0.05263157894736842,
0,
0.045454545454545456,
0.023255813953488372,
0.024390243902439025,
0.023809523809523808,
0.03333333333333333,
0.022222222222222223,
0.021739130434782608,
0,
0.05555555555555555,
0.047619047619047616,
0.1,
0.024390243902439025,
0.022556390977443608,
0,
0.034482758620689655,
0.015384615384615385,
0.1111111111111111,
0.021505376344086023,
0.022556390977443608,
0.5,
0.03571428571428571,
0.023255813953488372,
0.0196078431372549,
0.1,
0.075,
0.033707865168539325
] | 250 | 0.036502 | false |
#!/usr/local/bin/python
# -*- coding: utf-8 -*-
# file: main.py
import re
import pandas as pd
from os.path import isfile
from remarkuple import helper as h, table
from isopsephy import preprocess_greek, to_roman, isopsephy
from IPython.display import HTML
dictionary = pd.DataFrame()
def load_dataframe(csv = "nt-strongs.csv"):
global dictionary
if isfile(csv):
print ("Retrieving data from local csv copy...")
dictionary = pd.read_csv(csv, sep = "\t")
# greek sords are pre processed for simpler forms without accents
dictionary['word'] = dictionary['word'].map(lambda x: preprocess_greek(x))
# adding transliteration of the words to the dataframe
dictionary['transliteration'] = dictionary['word'].map(lambda x: to_roman(x))
# adding word isopsephy value to the dataframe
dictionary['isopsephy'] = dictionary['word'].map(lambda x: isopsephy(x))
else:
print ("Cannot read csv file: %s! Please check path/filename and reload dictionary with load_dataframe(csv = \"your_filename\") function" % csv)
def find(query, column):
global dictionary
# if culumn is "isopsephy" use == compare
return dictionary[dictionary[column] == query] if column == 'isopsephy' else dictionary[dictionary[column].str.contains(query)]
def search_strongs_dictionary_table(query, field):
# initialize tagpy table object
tbl = table(Class='data')
# add head row columns
tbl.addHeadRow(h.tr(h.th('Lemma'), h.th('Word'), h.th('Transliteration'), h.th('Translation'), h.th('Isopsephy')))
# make search. if field is isopsephy, force search item to int type
rows = find(int(query) if field == 'isopsephy' else re.compile(query, re.IGNORECASE), field)
for i, item in rows.iterrows():
tbl.addBodyRow(h.tr(h.td(item.lemma), h.td(item.word), h.td(item.transliteration), h.td(item.translation), h.td(item.isopsephy)))
return tbl
def search_strongs_dictionary_html(query, field):
# using print data stream instead of returning data makes
# less hassle with unicodes and character encodings
print (str(search_strongs_dictionary_table(query, field)))
#
def print_search_form_html():
form = """
<h3>Search</h3>
<div class="search-input"><span>Greek</span> <input type="text" id="word" value="Αρμαγεδδων" /> <button onclick="search_strongs_dictionary('word')">Retrieve</button></div>
<div class="search-input"><span>Roman</span> <input type="text" id="transliteration" value="aarwn" /> <button onclick="search_strongs_dictionary('transliteration')">Retrieve</button></div>
<div class="search-input"><span>Lemma</span> <input type="text" id="lemma" value="G2424" /> <button onclick="search_strongs_dictionary('lemma')">Retrieve</button></div>
<div class="search-input"><span>Isopsephy</span> <input type="text" id="isopsephy" value="777" /> <button onclick="search_strongs_dictionary('isopsephy')">Retrieve</button></div>
<div class="search-input"><span>Translation</span> <input type="text" id="translation" value="flute" /> <button onclick="search_strongs_dictionary('translation')">Retrieve</button></div>
<h3>Results</h3>
<div id="result">
<table class="data"><thead><tr><th>Lemma</th><th>Word</th><th>Transliteration</th><th>Translation</th><th>Isopsephy</th></tr></thead><tbody><tr><td>G717</td><td>Αρμαγεδδων</td><td>Armageddwn</td><td>Armageddon (or Har-Meggiddon), a symbolic name</td><td>1008</td></tr></tbody></table>
</div>
<style>
div.search-input span {height: 16px; width: 80px; display: block; float: left;}
div.search-input input {height: 16px;}
div.search-input button {margin-top: -9px;}
</style>
"""
return HTML(form)
def print_search_form_js():
js = """
<script>
function handle_output(out_type, out) {
var res = null
if (out_type == "stream") {
res = out.data
} else if (out_type == "pyout") {
res = out.data["text/plain"]
} else if (out_type == "pyerr") {
res = out.ename + ": " + out.evalue
} else {
res = "[output type: "+out_type+" is not implemented]";
}
document.getElementById("result").innerHTML = res
}
var callbacks = {'output' : handle_output}
function search_strongs_dictionary(field) {
var kernel = IPython.notebook.kernel
kernel.execute("search_strongs_dictionary_html('"+document.getElementById(field).value+"', '"+field+"')", callbacks, {silent:false})
}
</script>
"""
return HTML(js) | [
"#!/usr/local/bin/python\n",
"# -*- coding: utf-8 -*-\n",
"# file: main.py\n",
"\n",
"import re\n",
"import pandas as pd\n",
"from os.path import isfile\n",
"from remarkuple import helper as h, table\n",
"from isopsephy import preprocess_greek, to_roman, isopsephy\n",
"from IPython.display import HTML\n",
"\n",
"dictionary = pd.DataFrame()\n",
"\n",
"def load_dataframe(csv = \"nt-strongs.csv\"):\n",
" global dictionary\n",
" if isfile(csv):\n",
" print (\"Retrieving data from local csv copy...\")\n",
" dictionary = pd.read_csv(csv, sep = \"\\t\")\n",
" # greek sords are pre processed for simpler forms without accents\n",
" dictionary['word'] = dictionary['word'].map(lambda x: preprocess_greek(x))\n",
" # adding transliteration of the words to the dataframe\n",
" dictionary['transliteration'] = dictionary['word'].map(lambda x: to_roman(x))\n",
" # adding word isopsephy value to the dataframe\n",
" dictionary['isopsephy'] = dictionary['word'].map(lambda x: isopsephy(x))\n",
" else:\n",
" print (\"Cannot read csv file: %s! Please check path/filename and reload dictionary with load_dataframe(csv = \\\"your_filename\\\") function\" % csv)\n",
"\n",
"def find(query, column):\n",
" global dictionary\n",
" # if culumn is \"isopsephy\" use == compare\n",
" return dictionary[dictionary[column] == query] if column == 'isopsephy' else dictionary[dictionary[column].str.contains(query)]\n",
"\n",
"def search_strongs_dictionary_table(query, field):\n",
" # initialize tagpy table object\n",
" tbl = table(Class='data')\n",
" # add head row columns\n",
" tbl.addHeadRow(h.tr(h.th('Lemma'), h.th('Word'), h.th('Transliteration'), h.th('Translation'), h.th('Isopsephy')))\n",
" # make search. if field is isopsephy, force search item to int type\n",
" rows = find(int(query) if field == 'isopsephy' else re.compile(query, re.IGNORECASE), field)\n",
" for i, item in rows.iterrows():\n",
" tbl.addBodyRow(h.tr(h.td(item.lemma), h.td(item.word), h.td(item.transliteration), h.td(item.translation), h.td(item.isopsephy)))\n",
" return tbl\n",
"\n",
"def search_strongs_dictionary_html(query, field):\n",
" # using print data stream instead of returning data makes \n",
" # less hassle with unicodes and character encodings\n",
" print (str(search_strongs_dictionary_table(query, field)))\n",
"\n",
"#\n",
"def print_search_form_html():\n",
" form = \"\"\"\n",
"<h3>Search</h3>\n",
"\n",
"<div class=\"search-input\"><span>Greek</span> <input type=\"text\" id=\"word\" value=\"Αρμαγεδδων\" /> <button onclick=\"search_strongs_dictionary('word')\">Retrieve</button></div>\n",
"<div class=\"search-input\"><span>Roman</span> <input type=\"text\" id=\"transliteration\" value=\"aarwn\" /> <button onclick=\"search_strongs_dictionary('transliteration')\">Retrieve</button></div>\n",
"<div class=\"search-input\"><span>Lemma</span> <input type=\"text\" id=\"lemma\" value=\"G2424\" /> <button onclick=\"search_strongs_dictionary('lemma')\">Retrieve</button></div>\n",
"<div class=\"search-input\"><span>Isopsephy</span> <input type=\"text\" id=\"isopsephy\" value=\"777\" /> <button onclick=\"search_strongs_dictionary('isopsephy')\">Retrieve</button></div>\n",
"<div class=\"search-input\"><span>Translation</span> <input type=\"text\" id=\"translation\" value=\"flute\" /> <button onclick=\"search_strongs_dictionary('translation')\">Retrieve</button></div>\n",
"\n",
"<h3>Results</h3>\n",
"\n",
"<div id=\"result\">\n",
"\n",
"<table class=\"data\"><thead><tr><th>Lemma</th><th>Word</th><th>Transliteration</th><th>Translation</th><th>Isopsephy</th></tr></thead><tbody><tr><td>G717</td><td>Αρμαγεδδων</td><td>Armageddwn</td><td>Armageddon (or Har-Meggiddon), a symbolic name</td><td>1008</td></tr></tbody></table>\n",
"\n",
"</div>\n",
"\n",
"<style>\n",
"div.search-input span {height: 16px; width: 80px; display: block; float: left;}\n",
"div.search-input input {height: 16px;}\n",
"div.search-input button {margin-top: -9px;}\n",
"</style>\n",
"\"\"\"\n",
" return HTML(form)\n",
"\n",
"def print_search_form_js():\n",
" js = \"\"\"\n",
"<script>\n",
"\n",
"function handle_output(out_type, out) {\n",
" var res = null\n",
" if (out_type == \"stream\") {\n",
" res = out.data\n",
" } else if (out_type == \"pyout\") {\n",
" res = out.data[\"text/plain\"]\n",
" } else if (out_type == \"pyerr\") {\n",
" res = out.ename + \": \" + out.evalue\n",
" } else {\n",
" res = \"[output type: \"+out_type+\" is not implemented]\";\n",
" }\n",
" document.getElementById(\"result\").innerHTML = res\n",
"}\n",
"\n",
"var callbacks = {'output' : handle_output}\n",
"\n",
"function search_strongs_dictionary(field) {\n",
" var kernel = IPython.notebook.kernel\n",
" kernel.execute(\"search_strongs_dictionary_html('\"+document.getElementById(field).value+\"', '\"+field+\"')\", callbacks, {silent:false})\n",
"}\n",
"\n",
"</script>\n",
"\"\"\"\n",
" return HTML(js)"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.06818181818181818,
0,
0,
0.017543859649122806,
0.04,
0,
0.012048192771084338,
0,
0.011627906976744186,
0,
0.012345679012345678,
0,
0.013071895424836602,
0,
0.04,
0,
0,
0.007575757575757576,
0,
0.0196078431372549,
0,
0,
0,
0.008403361344537815,
0,
0.010309278350515464,
0,
0.007246376811594203,
0,
0,
0.02,
0.015873015873015872,
0,
0.015873015873015872,
0,
0,
0.03333333333333333,
0,
0,
0,
0.005813953488372093,
0.005291005291005291,
0.005917159763313609,
0.00558659217877095,
0.0053475935828877,
0,
0,
0,
0,
0,
0.0035087719298245615,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.03571428571428571,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.0072992700729927005,
0,
0,
0,
0,
0.05263157894736842
] | 103 | 0.004662 | false |
# -*- coding: utf-8 -*-
# This code is based on youtube-dl: https://github.com/rg3/youtube-dl
from __future__ import unicode_literals
import json
import operator
import os
import sys
import re
import urllib2
_NO_DEFAULT = object()
compat_chr = unichr
compat_str = unicode
exception_err = lambda e: "%s\n%s\n" % (type(e), "".join(e.args))
class ExceptionTemplate(Exception):
def __call__(self, *args):
return self.__class__(*(self.args + args))
def __str__(self):
return ': '.join(self.args)
class ExtractorError(ExceptionTemplate): pass
extractorError = ExtractorError('JSInterpreter: ')
_OPERATORS = [
('|', operator.or_),
('^', operator.xor),
('&', operator.and_),
('>>', operator.rshift),
('<<', operator.lshift),
('-', operator.sub),
('+', operator.add),
('%', operator.mod),
('/', operator.truediv),
('*', operator.mul),
]
_ASSIGN_OPERATORS = [(op + '=', opfunc) for op, opfunc in _OPERATORS]
_ASSIGN_OPERATORS.append(('=', lambda cur, right: right))
_NAME_RE = r'[a-zA-Z_$][a-zA-Z_$0-9]*'
class JSInterpreter(object):
def __init__(self, code, objects=None):
if objects is None:
objects = {}
self.code = code
self._functions = {}
self._objects = objects
def interpret_statement(self, stmt, local_vars, allow_recursion=100):
if allow_recursion < 0:
raise extractorError('Recursion limit reached')
should_abort = False
stmt = stmt.lstrip()
stmt_m = re.match(r'var\s', stmt)
if stmt_m:
expr = stmt[len(stmt_m.group(0)):]
else:
return_m = re.match(r'return(?:\s+|$)', stmt)
if return_m:
expr = stmt[len(return_m.group(0)):]
should_abort = True
else:
# Try interpreting it as an expression
expr = stmt
v = self.interpret_expression(expr, local_vars, allow_recursion)
return v, should_abort
def interpret_expression(self, expr, local_vars, allow_recursion):
expr = expr.strip()
if expr == '': # Empty expression
return None
if expr.startswith('('):
parens_count = 0
for m in re.finditer(r'[()]', expr):
if m.group(0) == '(':
parens_count += 1
else:
parens_count -= 1
if parens_count == 0:
sub_expr = expr[1:m.start()]
sub_result = self.interpret_expression(
sub_expr, local_vars, allow_recursion)
remaining_expr = expr[m.end():].strip()
if not remaining_expr:
return sub_result
else:
expr = json.dumps(sub_result) + remaining_expr
break
else:
raise extractorError('Premature end of parens in %r' % expr)
for op, opfunc in _ASSIGN_OPERATORS:
m = re.match(r'''(?x)
(?P<out>%s)(?:\[(?P<index>[^\]]+?)\])?
\s*%s
(?P<expr>.*)$''' % (_NAME_RE, re.escape(op)), expr)
if not m:
continue
right_val = self.interpret_expression(
m.group('expr'), local_vars, allow_recursion - 1)
if m.groupdict().get('index'):
lvar = local_vars[m.group('out')]
idx = self.interpret_expression(
m.group('index'), local_vars, allow_recursion)
assert isinstance(idx, int)
cur = lvar[idx]
val = opfunc(cur, right_val)
lvar[idx] = val
return val
else:
cur = local_vars.get(m.group('out'))
val = opfunc(cur, right_val)
local_vars[m.group('out')] = val
return val
if expr.isdigit():
return int(expr)
var_m = re.match(
r'(?!if|return|true|false)(?P<name>%s)$' % _NAME_RE,
expr)
if var_m:
return local_vars[var_m.group('name')]
try:
return json.loads(expr)
except ValueError:
pass
m = re.match(
r'(?P<var>%s)\.(?P<member>[^(]+)(?:\(+(?P<args>[^()]*)\))?$' % _NAME_RE,
expr)
if m:
variable = m.group('var')
member = m.group('member')
arg_str = m.group('args')
if variable in local_vars:
obj = local_vars[variable]
else:
if variable not in self._objects:
self._objects[variable] = self.extract_object(variable)
obj = self._objects[variable]
if arg_str is None:
# Member access
if member == 'length':
return len(obj)
return obj[member]
assert expr.endswith(')')
# Function call
if arg_str == '':
argvals = tuple()
else:
argvals = tuple([
self.interpret_expression(v, local_vars, allow_recursion)
for v in arg_str.split(',')])
if member == 'split':
assert argvals == ('',)
return list(obj)
if member == 'join':
assert len(argvals) == 1
return argvals[0].join(obj)
if member == 'reverse':
assert len(argvals) == 0
obj.reverse()
return obj
if member == 'slice':
assert len(argvals) == 1
return obj[argvals[0]:]
if member == 'splice':
assert isinstance(obj, list)
index, howMany = argvals
res = []
for i in range(index, min(index + howMany, len(obj))):
res.append(obj.pop(index))
return res
return obj[member](argvals)
m = re.match(
r'(?P<in>%s)\[(?P<idx>.+)\]$' % _NAME_RE, expr)
if m:
val = local_vars[m.group('in')]
idx = self.interpret_expression(
m.group('idx'), local_vars, allow_recursion - 1)
return val[idx]
for op, opfunc in _OPERATORS:
m = re.match(r'(?P<x>.+?)%s(?P<y>.+)' % re.escape(op), expr)
if not m:
continue
x, abort = self.interpret_statement(
m.group('x'), local_vars, allow_recursion - 1)
if abort:
raise extractorError(
'Premature left-side return of %s in %r' % (op, expr))
y, abort = self.interpret_statement(
m.group('y'), local_vars, allow_recursion - 1)
if abort:
raise extractorError(
'Premature right-side return of %s in %r' % (op, expr))
return opfunc(x, y)
m = re.match(
r'^(?P<func>%s)\((?P<args>[a-zA-Z0-9_$,]+)\)$' % _NAME_RE, expr)
if m:
fname = m.group('func')
argvals = tuple([
int(v) if v.isdigit() else local_vars[v]
for v in m.group('args').split(',')])
if fname not in self._functions:
self._functions[fname] = self.extract_function(fname)
return self._functions[fname](argvals)
raise extractorError('Unsupported JS expression %r' % expr)
def extract_object(self, objname):
obj = {}
obj_m = re.search(
(r'(?:var\s+)?%s\s*=\s*\{' % re.escape(objname)) +
r'\s*(?P<fields>([a-zA-Z$0-9]+\s*:\s*function\(.*?\)\s*\{.*?\})*)' +
r'\}\s*;',
self.code)
fields = obj_m.group('fields')
# Currently, it only supports function definitions
fields_m = re.finditer(
r'(?P<key>[a-zA-Z$0-9]+)\s*:\s*function'
r'\((?P<args>[a-z,]+)\){(?P<code>[^}]+)}',
fields)
for f in fields_m:
argnames = f.group('args').split(',')
obj[f.group('key')] = self.build_function(argnames, f.group('code'))
return obj
def extract_function(self, funcname):
func_m = re.search(
r'''(?x)
(?:function\s+%s|[{;]%s\s*=\s*function)\s*
\((?P<args>[^)]*)\)\s*
\{(?P<code>[^}]+)\}''' % (
re.escape(funcname), re.escape(funcname)),
self.code)
if func_m is None:
raise extractorError('Could not find JS function %r' % funcname)
argnames = func_m.group('args').split(',')
return self.build_function(argnames, func_m.group('code'))
def call_function(self, funcname, *args):
f = self.extract_function(funcname)
return f(args)
def build_function(self, argnames, code):
def resf(args):
local_vars = dict(zip(argnames, args))
for stmt in code.split(';'):
res, abort = self.interpret_statement(stmt, local_vars)
if abort:
break
return res
return resf
class CVevoSignAlgoExtractor:
# MAX RECURSION Depth for security
MAX_REC_DEPTH = 5
def __init__(self):
self._player_cache = {}
self._cleanTmpVariables()
def _cleanTmpVariables(self):
self.playerData = ''
def decryptSignature(self, s, playerUrl):
# clear local data
self._cleanTmpVariables()
id_m = re.match(
r'.*?-(?P<id>[a-zA-Z0-9_-]+)(?:/watch_as3|/html5player)?\.(?P<ext>[a-z]+)$',
playerUrl)
if not id_m:
raise extractorError('Cannot identify player %r' % playerUrl)
player_type = id_m.group('ext')
if player_type != 'js':
raise extractorError('Invalid player type %r' % player_type)
# use algoCache
slen = len(s)
player_id = (playerUrl, self._signature_cache_id(s))
#if playerUrl not in self.algoCache:
if player_id not in self._player_cache:
# get player HTML 5 sript
try:
self.playerData = urllib2.urlopen(playerUrl).read()
encoding = 'utf-8'
try:
self.playerData = self.playerData.decode(encoding, 'replace')
except LookupError:
self.playerData = self.playerData.decode('utf-8', 'replace')
except Exception as e:
print('[CVevoSignAlgoExtractor] Unable to download playerUrl webpage')
print exception_err(e)
self._cleanTmpVariables()
return ''
try:
func = self._parse_sig_js()
test_string = u''.join(map(compat_chr, range(slen)))
cache_res = func(test_string)
cache_spec = [ord(c) for c in cache_res]
self._player_cache[player_id] = func
except Exception as e:
print exception_err(e)
self._cleanTmpVariables()
return ''
func = self._player_cache[player_id]
s_out = func(s)
# free not needed data
self._cleanTmpVariables()
return s_out
def _parse_sig_js(self):
funcname = self._search_regex(
r'\.sig\|\|([a-zA-Z0-9$]+)\(', self.playerData)
jsi = JSInterpreter(self.playerData)
initial_function = jsi.extract_function(funcname)
return lambda s: initial_function([s])
def _search_regex(self, pattern, string):
"""
Perform a regex search on the given string, using a single or a list of
patterns returning the first matching group.
In case of failure return a default value or raise a WARNING or a
RegexNotFoundError, depending on fatal, specifying the field name.
"""
mobj = re.search(pattern, string, 0)
if mobj:
# return the first matching group
return next(g for g in mobj.groups() if g is not None)
else:
print '[CVevoSignAlgoExtractor] Unable to extract'
return None
def _signature_cache_id(self, example_sig):
""" Return a string representation of a signature """
return u'.'.join(compat_str(len(part)) for part in example_sig.split('.'))
decryptor = CVevoSignAlgoExtractor()
| [
"#\t-*-\tcoding:\tutf-8\t-*-\n",
"# This code is based on youtube-dl: https://github.com/rg3/youtube-dl\n",
"\n",
"from __future__ import unicode_literals\n",
"\n",
"import json\n",
"import operator\n",
"import os\n",
"import sys\n",
"import re\n",
"import urllib2\n",
"\n",
"_NO_DEFAULT = object()\n",
"compat_chr = unichr\n",
"compat_str = unicode\n",
"exception_err = lambda e: \"%s\\n%s\\n\" % (type(e), \"\".join(e.args))\n",
"\n",
"class ExceptionTemplate(Exception):\n",
"\tdef __call__(self, *args):\n",
"\t\treturn self.__class__(*(self.args + args))\n",
"\tdef __str__(self):\n",
"\t\treturn ': '.join(self.args)\n",
"class ExtractorError(ExceptionTemplate): pass\n",
"extractorError = ExtractorError('JSInterpreter: ')\n",
"\n",
"\n",
"_OPERATORS = [\n",
"\t('|', operator.or_),\n",
"\t('^', operator.xor),\n",
"\t('&', operator.and_),\n",
"\t('>>', operator.rshift),\n",
"\t('<<', operator.lshift),\n",
"\t('-', operator.sub),\n",
"\t('+', operator.add),\n",
"\t('%', operator.mod),\n",
"\t('/', operator.truediv),\n",
"\t('*', operator.mul),\n",
"]\n",
"_ASSIGN_OPERATORS = [(op + '=', opfunc) for op, opfunc in _OPERATORS]\n",
"_ASSIGN_OPERATORS.append(('=', lambda cur, right: right))\n",
"\n",
"_NAME_RE = r'[a-zA-Z_$][a-zA-Z_$0-9]*'\n",
"\n",
"class JSInterpreter(object):\n",
"\tdef __init__(self, code, objects=None):\n",
"\t\tif objects is None:\n",
"\t\t\tobjects = {}\n",
"\t\tself.code = code\n",
"\t\tself._functions = {}\n",
"\t\tself._objects = objects\n",
"\n",
"\tdef interpret_statement(self, stmt, local_vars, allow_recursion=100):\n",
"\t\tif allow_recursion < 0:\n",
"\t\t\traise extractorError('Recursion limit reached')\n",
"\n",
"\t\tshould_abort = False\n",
"\t\tstmt = stmt.lstrip()\n",
"\t\tstmt_m = re.match(r'var\\s', stmt)\n",
"\t\tif stmt_m:\n",
"\t\t\texpr = stmt[len(stmt_m.group(0)):]\n",
"\t\telse:\n",
"\t\t\treturn_m = re.match(r'return(?:\\s+|$)', stmt)\n",
"\t\t\tif return_m:\n",
"\t\t\t\texpr = stmt[len(return_m.group(0)):]\n",
"\t\t\t\tshould_abort = True\n",
"\t\t\telse:\n",
"\t\t\t\t# Try interpreting it as an expression\n",
"\t\t\t\texpr = stmt\n",
"\n",
"\t\tv = self.interpret_expression(expr, local_vars, allow_recursion)\n",
"\t\treturn v, should_abort\n",
"\n",
"\tdef interpret_expression(self, expr, local_vars, allow_recursion):\n",
"\t\texpr = expr.strip()\n",
"\n",
"\t\tif expr == '': # Empty expression\n",
"\t\t\treturn None\n",
"\n",
"\t\tif expr.startswith('('):\n",
"\t\t\tparens_count = 0\n",
"\t\t\tfor m in re.finditer(r'[()]', expr):\n",
"\t\t\t\tif m.group(0) == '(':\n",
"\t\t\t\t\tparens_count += 1\n",
"\t\t\t\telse:\n",
"\t\t\t\t\tparens_count -= 1\n",
"\t\t\t\t\tif parens_count == 0:\n",
"\t\t\t\t\t\tsub_expr = expr[1:m.start()]\n",
"\t\t\t\t\t\tsub_result = self.interpret_expression(\n",
"\t\t\t\t\t\t\tsub_expr, local_vars, allow_recursion)\n",
"\t\t\t\t\t\tremaining_expr = expr[m.end():].strip()\n",
"\t\t\t\t\t\tif not remaining_expr:\n",
"\t\t\t\t\t\t\treturn sub_result\n",
"\t\t\t\t\t\telse:\n",
"\t\t\t\t\t\t\texpr = json.dumps(sub_result) + remaining_expr\n",
"\t\t\t\t\t\tbreak\n",
"\t\t\telse:\n",
"\t\t\t\traise extractorError('Premature end of parens in %r' % expr)\n",
"\n",
"\t\tfor op, opfunc in _ASSIGN_OPERATORS:\n",
"\t\t\tm = re.match(r'''(?x)\n",
"\t\t\t\t(?P<out>%s)(?:\\[(?P<index>[^\\]]+?)\\])?\n",
"\t\t\t\t\\s*%s\n",
"\t\t\t\t(?P<expr>.*)$''' % (_NAME_RE, re.escape(op)), expr)\n",
"\t\t\tif not m:\n",
"\t\t\t\tcontinue\n",
"\t\t\tright_val = self.interpret_expression(\n",
"\t\t\t\tm.group('expr'), local_vars, allow_recursion - 1)\n",
"\n",
"\t\t\tif m.groupdict().get('index'):\n",
"\t\t\t\tlvar = local_vars[m.group('out')]\n",
"\t\t\t\tidx = self.interpret_expression(\n",
"\t\t\t\t\tm.group('index'), local_vars, allow_recursion)\n",
"\t\t\t\tassert isinstance(idx, int)\n",
"\t\t\t\tcur = lvar[idx]\n",
"\t\t\t\tval = opfunc(cur, right_val)\n",
"\t\t\t\tlvar[idx] = val\n",
"\t\t\t\treturn val\n",
"\t\t\telse:\n",
"\t\t\t\tcur = local_vars.get(m.group('out'))\n",
"\t\t\t\tval = opfunc(cur, right_val)\n",
"\t\t\t\tlocal_vars[m.group('out')] = val\n",
"\t\t\t\treturn val\n",
"\n",
"\t\tif expr.isdigit():\n",
"\t\t\treturn int(expr)\n",
"\n",
"\t\tvar_m = re.match(\n",
"\t\t\tr'(?!if|return|true|false)(?P<name>%s)$' % _NAME_RE,\n",
"\t\t\texpr)\n",
"\t\tif var_m:\n",
"\t\t\treturn local_vars[var_m.group('name')]\n",
"\n",
"\t\ttry:\n",
"\t\t\treturn json.loads(expr)\n",
"\t\texcept ValueError:\n",
"\t\t\tpass\n",
"\n",
"\t\tm = re.match(\n",
"\t\t\tr'(?P<var>%s)\\.(?P<member>[^(]+)(?:\\(+(?P<args>[^()]*)\\))?$' % _NAME_RE,\n",
"\t\t\texpr)\n",
"\t\tif m:\n",
"\t\t\tvariable = m.group('var')\n",
"\t\t\tmember = m.group('member')\n",
"\t\t\targ_str = m.group('args')\n",
"\n",
"\t\t\tif variable in local_vars:\n",
"\t\t\t\tobj = local_vars[variable]\n",
"\t\t\telse:\n",
"\t\t\t\tif variable not in self._objects:\n",
"\t\t\t\t\tself._objects[variable] = self.extract_object(variable)\n",
"\t\t\t\tobj = self._objects[variable]\n",
"\n",
"\t\t\tif arg_str is None:\n",
"\t\t\t\t# Member access\n",
"\t\t\t\tif member == 'length':\n",
"\t\t\t\t\treturn len(obj)\n",
"\t\t\t\treturn obj[member]\n",
"\n",
"\t\t\tassert expr.endswith(')')\n",
"\t\t\t# Function call\n",
"\t\t\tif arg_str == '':\n",
"\t\t\t\targvals = tuple()\n",
"\t\t\telse:\n",
"\t\t\t\targvals = tuple([\n",
"\t\t\t\t\tself.interpret_expression(v, local_vars, allow_recursion)\n",
"\t\t\t\t\tfor v in arg_str.split(',')])\n",
"\n",
"\t\t\tif member == 'split':\n",
"\t\t\t\tassert argvals == ('',)\n",
"\t\t\t\treturn list(obj)\n",
"\t\t\tif member == 'join':\n",
"\t\t\t\tassert len(argvals) == 1\n",
"\t\t\t\treturn argvals[0].join(obj)\n",
"\t\t\tif member == 'reverse':\n",
"\t\t\t\tassert len(argvals) == 0\n",
"\t\t\t\tobj.reverse()\n",
"\t\t\t\treturn obj\n",
"\t\t\tif member == 'slice':\n",
"\t\t\t\tassert len(argvals) == 1\n",
"\t\t\t\treturn obj[argvals[0]:]\n",
"\t\t\tif member == 'splice':\n",
"\t\t\t\tassert isinstance(obj, list)\n",
"\t\t\t\tindex, howMany = argvals\n",
"\t\t\t\tres = []\n",
"\t\t\t\tfor i in range(index, min(index + howMany, len(obj))):\n",
"\t\t\t\t\tres.append(obj.pop(index))\n",
"\t\t\t\treturn res\n",
"\n",
"\t\t\treturn obj[member](argvals)\n",
"\n",
"\t\tm = re.match(\n",
"\t\t\tr'(?P<in>%s)\\[(?P<idx>.+)\\]$' % _NAME_RE, expr)\n",
"\t\tif m:\n",
"\t\t\tval = local_vars[m.group('in')]\n",
"\t\t\tidx = self.interpret_expression(\n",
"\t\t\t\tm.group('idx'), local_vars, allow_recursion - 1)\n",
"\t\t\treturn val[idx]\n",
"\n",
"\t\tfor op, opfunc in _OPERATORS:\n",
"\t\t\tm = re.match(r'(?P<x>.+?)%s(?P<y>.+)' % re.escape(op), expr)\n",
"\t\t\tif not m:\n",
"\t\t\t\tcontinue\n",
"\t\t\tx, abort = self.interpret_statement(\n",
"\t\t\t\tm.group('x'), local_vars, allow_recursion - 1)\n",
"\t\t\tif abort:\n",
"\t\t\t\traise extractorError(\n",
"\t\t\t\t\t'Premature left-side return of %s in %r' % (op, expr))\n",
"\t\t\ty, abort = self.interpret_statement(\n",
"\t\t\t\tm.group('y'), local_vars, allow_recursion - 1)\n",
"\t\t\tif abort:\n",
"\t\t\t\traise extractorError(\n",
"\t\t\t\t\t'Premature right-side return of %s in %r' % (op, expr))\n",
"\t\t\treturn opfunc(x, y)\n",
"\n",
"\t\tm = re.match(\n",
"\t\t\tr'^(?P<func>%s)\\((?P<args>[a-zA-Z0-9_$,]+)\\)$' % _NAME_RE, expr)\n",
"\t\tif m:\n",
"\t\t\tfname = m.group('func')\n",
"\t\t\targvals = tuple([\n",
"\t\t\t\tint(v) if v.isdigit() else local_vars[v]\n",
"\t\t\t\tfor v in m.group('args').split(',')])\n",
"\t\t\tif fname not in self._functions:\n",
"\t\t\t\tself._functions[fname] = self.extract_function(fname)\n",
"\t\t\treturn self._functions[fname](argvals)\n",
"\n",
"\t\traise extractorError('Unsupported JS expression %r' % expr)\n",
"\n",
"\tdef extract_object(self, objname):\n",
"\t\tobj = {}\n",
"\t\tobj_m = re.search(\n",
"\t\t\t(r'(?:var\\s+)?%s\\s*=\\s*\\{' % re.escape(objname)) +\n",
"\t\t\tr'\\s*(?P<fields>([a-zA-Z$0-9]+\\s*:\\s*function\\(.*?\\)\\s*\\{.*?\\})*)' +\n",
"\t\t\tr'\\}\\s*;',\n",
"\t\t\tself.code)\n",
"\t\tfields = obj_m.group('fields')\n",
"\t\t# Currently, it only supports function definitions\n",
"\t\tfields_m = re.finditer(\n",
"\t\t\tr'(?P<key>[a-zA-Z$0-9]+)\\s*:\\s*function'\n",
"\t\t\tr'\\((?P<args>[a-z,]+)\\){(?P<code>[^}]+)}',\n",
"\t\t\tfields)\n",
"\t\tfor f in fields_m:\n",
"\t\t\targnames = f.group('args').split(',')\n",
"\t\t\tobj[f.group('key')] = self.build_function(argnames, f.group('code'))\n",
"\n",
"\t\treturn obj\n",
"\n",
"\tdef extract_function(self, funcname):\n",
"\t\tfunc_m = re.search(\n",
"\t\t\tr'''(?x)\n",
"\t\t\t\t(?:function\\s+%s|[{;]%s\\s*=\\s*function)\\s*\n",
"\t\t\t\t\\((?P<args>[^)]*)\\)\\s*\n",
"\t\t\t\t\\{(?P<code>[^}]+)\\}''' % (\n",
"\t\t\t\tre.escape(funcname), re.escape(funcname)),\n",
"\t\t\tself.code)\n",
"\t\tif func_m is None:\n",
"\t\t\traise extractorError('Could not find JS function %r' % funcname)\n",
"\t\targnames = func_m.group('args').split(',')\n",
"\n",
"\t\treturn self.build_function(argnames, func_m.group('code'))\n",
"\n",
"\tdef call_function(self, funcname, *args):\n",
"\t\tf = self.extract_function(funcname)\n",
"\t\treturn f(args)\n",
"\n",
"\tdef build_function(self, argnames, code):\n",
"\t\tdef resf(args):\n",
"\t\t\tlocal_vars = dict(zip(argnames, args))\n",
"\t\t\tfor stmt in code.split(';'):\n",
"\t\t\t\tres, abort = self.interpret_statement(stmt, local_vars)\n",
"\t\t\t\tif abort:\n",
"\t\t\t\t\tbreak\n",
"\t\t\treturn res\n",
"\t\treturn resf\n",
"\n",
"class CVevoSignAlgoExtractor:\n",
"\t# MAX RECURSION Depth for security\n",
"\tMAX_REC_DEPTH = 5\n",
"\n",
"\tdef __init__(self):\n",
"\t\tself._player_cache = {}\n",
"\t\tself._cleanTmpVariables()\n",
"\n",
"\tdef _cleanTmpVariables(self):\n",
"\t\tself.playerData = ''\n",
"\t\t\n",
"\tdef decryptSignature(self, s, playerUrl):\n",
"\t\t# clear local data\n",
"\t\tself._cleanTmpVariables()\n",
"\n",
"\t\tid_m = re.match(\n",
"\t\t\tr'.*?-(?P<id>[a-zA-Z0-9_-]+)(?:/watch_as3|/html5player)?\\.(?P<ext>[a-z]+)$',\n",
"\t\t\tplayerUrl)\n",
"\t\tif not id_m:\n",
"\t\t\traise extractorError('Cannot identify player %r' % playerUrl)\n",
"\n",
"\t\tplayer_type = id_m.group('ext')\n",
"\t\tif player_type != 'js':\n",
"\t\t\traise extractorError('Invalid player type %r' % player_type)\n",
"\n",
"\t\t# use algoCache\n",
"\t\tslen = len(s)\n",
"\t\tplayer_id = (playerUrl, self._signature_cache_id(s))\n",
"\t\t#if playerUrl not in self.algoCache:\n",
"\t\tif player_id not in self._player_cache:\n",
"\t\t\t# get player HTML 5 sript\n",
"\t\t\ttry:\n",
"\t\t\t\tself.playerData = urllib2.urlopen(playerUrl).read()\n",
"\t\t\t\tencoding = 'utf-8'\n",
"\t\t\t\ttry:\n",
"\t\t\t\t\tself.playerData = self.playerData.decode(encoding, 'replace')\n",
"\t\t\t\texcept LookupError:\n",
"\t\t\t\t\tself.playerData = self.playerData.decode('utf-8', 'replace')\n",
"\t\t\texcept Exception as e:\n",
"\t\t\t\tprint('[CVevoSignAlgoExtractor] Unable to download playerUrl webpage')\n",
"\t\t\t\tprint exception_err(e)\n",
"\t\t\t\tself._cleanTmpVariables()\n",
"\t\t\t\treturn ''\n",
"\n",
"\t\t\ttry:\n",
"\t\t\t\tfunc = self._parse_sig_js()\n",
"\t\t\t\ttest_string = u''.join(map(compat_chr, range(slen)))\n",
"\t\t\t\tcache_res = func(test_string)\n",
"\t\t\t\tcache_spec = [ord(c) for c in cache_res]\n",
"\t\t\t\tself._player_cache[player_id] = func\n",
"\t\t\texcept Exception as e:\n",
"\t\t\t\tprint exception_err(e)\n",
"\t\t\t\tself._cleanTmpVariables()\n",
"\t\t\t\treturn ''\n",
"\n",
"\t\tfunc = self._player_cache[player_id]\n",
"\t\ts_out = func(s)\n",
"\n",
"\t\t# free not needed data\n",
"\t\tself._cleanTmpVariables()\n",
"\n",
"\t\treturn s_out\n",
"\n",
"\tdef _parse_sig_js(self):\n",
"\t\tfuncname = self._search_regex(\n",
"\t\t\tr'\\.sig\\|\\|([a-zA-Z0-9$]+)\\(', self.playerData)\n",
"\n",
"\t\tjsi = JSInterpreter(self.playerData)\n",
"\t\tinitial_function = jsi.extract_function(funcname)\n",
"\t\treturn lambda s: initial_function([s])\n",
"\n",
"\tdef _search_regex(self, pattern, string):\n",
"\t\t\"\"\"\n",
"\t\tPerform a regex search on the given string, using a single or a list of\n",
"\t\tpatterns returning the first matching group.\n",
"\t\tIn case of failure return a default value or raise a WARNING or a\n",
"\t\tRegexNotFoundError, depending on fatal, specifying the field name.\n",
"\t\t\"\"\"\n",
"\t\tmobj = re.search(pattern, string, 0)\n",
"\t\tif mobj:\n",
"\t\t\t# return the first matching group\n",
"\t\t\treturn next(g for g in mobj.groups() if g is not None)\n",
"\t\telse:\n",
"\t\t\tprint '[CVevoSignAlgoExtractor] Unable to extract'\n",
"\t\t\treturn None\n",
"\n",
"\tdef _signature_cache_id(self, example_sig):\n",
"\t\t\"\"\" Return a string representation of a signature \"\"\"\n",
"\t\treturn u'.'.join(compat_str(len(part)) for part in example_sig.split('.'))\n",
"\n",
"decryptor = CVevoSignAlgoExtractor()\n"
] | [
0.041666666666666664,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.015151515151515152,
0,
0.027777777777777776,
0.03571428571428571,
0.022222222222222223,
0.1,
0.03333333333333333,
0.043478260869565216,
0.0196078431372549,
0,
0,
0,
0.045454545454545456,
0.045454545454545456,
0.043478260869565216,
0.038461538461538464,
0.038461538461538464,
0.045454545454545456,
0.045454545454545456,
0.045454545454545456,
0.038461538461538464,
0.045454545454545456,
0,
0,
0,
0,
0,
0,
0.034482758620689655,
0.024390243902439025,
0.045454545454545456,
0.0625,
0.05263157894736842,
0.043478260869565216,
0.038461538461538464,
0,
0.014084507042253521,
0.038461538461538464,
0.0196078431372549,
0,
0.043478260869565216,
0.043478260869565216,
0.027777777777777776,
0.07692307692307693,
0.02631578947368421,
0.125,
0.02040816326530612,
0.0625,
0.024390243902439025,
0.041666666666666664,
0.1111111111111111,
0.023255813953488372,
0.0625,
0,
0.014925373134328358,
0.04,
0,
0.014705882352941176,
0.045454545454545456,
0,
0.02702702702702703,
0.06666666666666667,
0,
0.037037037037037035,
0.05,
0.025,
0.038461538461538464,
0.043478260869565216,
0.1,
0.043478260869565216,
0.037037037037037035,
0.02857142857142857,
0.021739130434782608,
0.021739130434782608,
0.021739130434782608,
0.034482758620689655,
0.04,
0.08333333333333333,
0.018518518518518517,
0.08333333333333333,
0.1111111111111111,
0.015384615384615385,
0,
0.02564102564102564,
0.04,
0.023255813953488372,
0.1,
0.017857142857142856,
0.07692307692307693,
0.07692307692307693,
0.023809523809523808,
0.018518518518518517,
0,
0.029411764705882353,
0.02631578947368421,
0.02702702702702703,
0.019230769230769232,
0.03125,
0.05,
0.030303030303030304,
0.05,
0.06666666666666667,
0.1111111111111111,
0.024390243902439025,
0.030303030303030304,
0.02702702702702703,
0.06666666666666667,
0,
0.047619047619047616,
0.05,
0,
0.05,
0.017857142857142856,
0.1111111111111111,
0.08333333333333333,
0.023809523809523808,
0,
0.14285714285714285,
0.037037037037037035,
0.047619047619047616,
0.125,
0,
0.0625,
0.013157894736842105,
0.1111111111111111,
0.125,
0.034482758620689655,
0.03333333333333333,
0.034482758620689655,
0,
0.03333333333333333,
0.03225806451612903,
0.1111111111111111,
0.02631578947368421,
0.01639344262295082,
0.029411764705882353,
0,
0.043478260869565216,
0.05,
0.037037037037037035,
0.047619047619047616,
0.043478260869565216,
0,
0.034482758620689655,
0.05263157894736842,
0.047619047619047616,
0.045454545454545456,
0.1111111111111111,
0.045454545454545456,
0.015873015873015872,
0.02857142857142857,
0,
0.04,
0.03571428571428571,
0.047619047619047616,
0.041666666666666664,
0.034482758620689655,
0.03125,
0.037037037037037035,
0.034482758620689655,
0.05555555555555555,
0.06666666666666667,
0.04,
0.034482758620689655,
0.03571428571428571,
0.038461538461538464,
0.030303030303030304,
0.034482758620689655,
0.07692307692307693,
0.01694915254237288,
0.03125,
0.06666666666666667,
0,
0.03225806451612903,
0,
0.0625,
0.0196078431372549,
0.125,
0.02857142857142857,
0.027777777777777776,
0.018867924528301886,
0.05263157894736842,
0,
0.03125,
0.015625,
0.07692307692307693,
0.07692307692307693,
0.025,
0.0196078431372549,
0.07692307692307693,
0.038461538461538464,
0.016666666666666666,
0.025,
0.0196078431372549,
0.07692307692307693,
0.038461538461538464,
0.01639344262295082,
0.043478260869565216,
0,
0.0625,
0.014705882352941176,
0.125,
0.037037037037037035,
0.047619047619047616,
0.022222222222222223,
0.023809523809523808,
0.027777777777777776,
0.017241379310344827,
0.023809523809523808,
0,
0.016129032258064516,
0,
0.027777777777777776,
0.09090909090909091,
0.047619047619047616,
0.018518518518518517,
0.013888888888888888,
0.07142857142857142,
0.07142857142857142,
0.030303030303030304,
0.018867924528301886,
0.038461538461538464,
0.022727272727272728,
0.021739130434782608,
0.09090909090909091,
0.047619047619047616,
0.024390243902439025,
0.013888888888888888,
0,
0.07692307692307693,
0,
0.02564102564102564,
0.045454545454545456,
0.08333333333333333,
0.02127659574468085,
0.037037037037037035,
0.03225806451612903,
0.02127659574468085,
0.07142857142857142,
0.047619047619047616,
0.014705882352941176,
0.022222222222222223,
0,
0.01639344262295082,
0,
0.023255813953488372,
0.02631578947368421,
0.058823529411764705,
0,
0.023255813953488372,
0.05555555555555555,
0.023809523809523808,
0.03125,
0.016666666666666666,
0.07142857142857142,
0.09090909090909091,
0.07142857142857142,
0.07142857142857142,
0,
0.03333333333333333,
0.027777777777777776,
0.05263157894736842,
0,
0.047619047619047616,
0.038461538461538464,
0.03571428571428571,
0,
0.03225806451612903,
0.043478260869565216,
0.6666666666666666,
0.023255813953488372,
0.047619047619047616,
0.03571428571428571,
0,
0.05263157894736842,
0.0125,
0.07142857142857142,
0.06666666666666667,
0.015384615384615385,
0,
0.029411764705882353,
0.038461538461538464,
0.015625,
0,
0.05555555555555555,
0.0625,
0.01818181818181818,
0.05128205128205128,
0.023809523809523808,
0.034482758620689655,
0.125,
0.017857142857142856,
0.043478260869565216,
0.1111111111111111,
0.014925373134328358,
0.041666666666666664,
0.015151515151515152,
0.038461538461538464,
0.013333333333333334,
0.037037037037037035,
0.03333333333333333,
0.07142857142857142,
0,
0.125,
0.03125,
0.017543859649122806,
0.029411764705882353,
0.022222222222222223,
0.024390243902439025,
0.038461538461538464,
0.037037037037037035,
0.03333333333333333,
0.07142857142857142,
0,
0.02564102564102564,
0.05555555555555555,
0,
0.04,
0.03571428571428571,
0,
0.06666666666666667,
0,
0.038461538461538464,
0.030303030303030304,
0.0196078431372549,
0,
0.02564102564102564,
0.019230769230769232,
0.024390243902439025,
0,
0.023255813953488372,
0.16666666666666666,
0.013513513513513514,
0.02127659574468085,
0.014705882352941176,
0.014492753623188406,
0.16666666666666666,
0.02564102564102564,
0.09090909090909091,
0.02702702702702703,
0.017241379310344827,
0.125,
0.018518518518518517,
0.06666666666666667,
0,
0.022222222222222223,
0.017857142857142856,
0.012987012987012988,
0,
0.02702702702702703
] | 365 | 0.037565 | false |
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
from future.moves.urllib.parse import quote
import logging
from flexget import plugin
from flexget.event import event
log = logging.getLogger('animeindex')
# http://tracker.anime-index.org/index.php?page=torrent-details&id=b8327fdf9003e87446c8b3601951a9a65526abb2
# http://tracker.anime-index.org/download.php?id=b8327fdf9003e87446c8b3601951a9a65526abb2&f=[DeadFish]%20Yowamushi%20Pedal:%20Grande%20Road%20-%2002%20[720p][AAC].mp4.torrent
class UrlRewriteAnimeIndex(object):
"""AnimeIndex urlrewriter."""
def url_rewritable(self, task, entry):
return entry['url'].startswith('http://tracker.anime-index.org/index.php?page=torrent-details&id=')
def url_rewrite(self, task, entry):
entry['url'] = entry['url'].replace('index.php?page=torrent-details&', 'download.php?')
entry['url'] += '&f=%s.torrent' % (quote(entry['title'], safe=''))
@event('plugin.register')
def register_plugin():
plugin.register(UrlRewriteAnimeIndex, 'animeindex', groups=['urlrewriter'], api_ver=2)
| [
"from __future__ import unicode_literals, division, absolute_import\n",
"from builtins import * # noqa pylint: disable=unused-import, redefined-builtin\n",
"from future.moves.urllib.parse import quote\n",
"\n",
"import logging\n",
"\n",
"from flexget import plugin\n",
"from flexget.event import event\n",
"\n",
"log = logging.getLogger('animeindex')\n",
"\n",
"\n",
"# http://tracker.anime-index.org/index.php?page=torrent-details&id=b8327fdf9003e87446c8b3601951a9a65526abb2\n",
"# http://tracker.anime-index.org/download.php?id=b8327fdf9003e87446c8b3601951a9a65526abb2&f=[DeadFish]%20Yowamushi%20Pedal:%20Grande%20Road%20-%2002%20[720p][AAC].mp4.torrent\n",
"\n",
"\n",
"class UrlRewriteAnimeIndex(object):\n",
" \"\"\"AnimeIndex urlrewriter.\"\"\"\n",
"\n",
" def url_rewritable(self, task, entry):\n",
" return entry['url'].startswith('http://tracker.anime-index.org/index.php?page=torrent-details&id=')\n",
"\n",
" def url_rewrite(self, task, entry):\n",
" entry['url'] = entry['url'].replace('index.php?page=torrent-details&', 'download.php?')\n",
" entry['url'] += '&f=%s.torrent' % (quote(entry['title'], safe=''))\n",
"\n",
"\n",
"@event('plugin.register')\n",
"def register_plugin():\n",
" plugin.register(UrlRewriteAnimeIndex, 'animeindex', groups=['urlrewriter'], api_ver=2)\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.009259259259259259,
0,
0,
0.010416666666666666,
0,
0,
0,
0,
0,
0.01098901098901099
] | 30 | 0.001022 | false |
#
# This file is part of Dragonfly.
# (c) Copyright 2007, 2008 by Christo Butcher
# Licensed under the LGPL.
#
# Dragonfly is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Dragonfly is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with Dragonfly. If not, see
# <http://www.gnu.org/licenses/>.
#
"""
SAPI 5 engine classes
============================================================================
"""
#---------------------------------------------------------------------------
import time
import win32con
from ctypes import *
from win32com.client import Dispatch, getevents, constants
from win32com.client.gencache import EnsureDispatch
from pywintypes import com_error
from ..base import EngineBase, EngineError
from .compiler import Sapi5Compiler
from .dictation import Sapi5DictationContainer
from .recobs import Sapi5RecObsManager
#from .timer import NatlinkTimerManager
from ...grammar.state import State
from ...windows.window import Window
#===========================================================================
class POINT(Structure):
_fields_ = [('x', c_long),
('y', c_long)]
class MSG(Structure):
_fields_ = [('hwnd', c_int),
('message', c_uint),
('wParam', c_int),
('lParam', c_int),
('time', c_int),
('pt', POINT)]
#===========================================================================
class Sapi5SharedEngine(EngineBase):
""" Speech recognition engine back-end for SAPI 5 shared recognizer. """
_name = "sapi5shared"
_recognizer_dispatch_name = "SAPI.SpSharedRecognizer"
DictationContainer = Sapi5DictationContainer
#-----------------------------------------------------------------------
def __init__(self):
EngineBase.__init__(self)
EnsureDispatch(self._recognizer_dispatch_name)
EnsureDispatch("SAPI.SpVoice")
self._recognizer = None
self._speaker = None
self._compiler = None
self._recognition_observer_manager = Sapi5RecObsManager(self)
# self._timer_manager = NatlinkTimerManager(0.02, self)
def connect(self):
""" Connect to back-end SR engine. """
self._recognizer = Dispatch(self._recognizer_dispatch_name)
self._speaker = Dispatch("SAPI.SpVoice")
self._compiler = Sapi5Compiler()
def disconnect(self):
""" Disconnect from back-end SR engine. """
self._recognizer = None
self._speaker = None
self._compiler = None
#-----------------------------------------------------------------------
# Methods for working with grammars.
def _load_grammar(self, grammar):
""" Load the given *grammar*. """
self._log.debug("Loading grammar %s." % grammar.name)
if not self._recognizer:
self.connect()
grammar.engine = self
# Dependency checking.
memo = []
for r in grammar._rules:
for d in r.dependencies(memo):
grammar.add_dependency(d)
# Create recognition context, compile grammar, and create
# the grammar wrapper object for managing this grammar.
context = self._recognizer.CreateRecoContext()
handle = self._compiler.compile_grammar(grammar, context)
wrapper = GrammarWrapper(grammar, handle, context, self)
handle.State = constants.SGSEnabled
for rule in grammar.rules:
handle.CmdSetRuleState(rule.name, constants.SGDSActive)
# self.activate_grammar(grammar)
# for l in grammar.lists:
# l._update()
handle.CmdSetRuleState("_FakeRule", constants.SGDSActive)
return wrapper
def _unload_grammar(self, grammar, wrapper):
""" Unload the given *grammar*. """
try:
wrapper.handle.State = constants.SGSDisabled
except Exception, e:
self._log.exception("Failed to unload grammar %s: %s."
% (grammar, e))
def activate_grammar(self, grammar):
""" Activate the given *grammar*. """
self._log.debug("Activating grammar %s." % grammar.name)
grammar_handle = self._get_grammar_wrapper(grammar).handle
grammar_handle.State = constants.SGSEnabled
def deactivate_grammar(self, grammar):
""" Deactivate the given *grammar*. """
self._log.debug("Deactivating grammar %s." % grammar.name)
grammar_handle = self._get_grammar_wrapper(grammar).handle
grammar_handle.State = constants.SGSDisabled
def activate_rule(self, rule, grammar):
""" Activate the given *rule*. """
self._log.debug("Activating rule %s in grammar %s."
% (rule.name, grammar.name))
grammar_handle = self._get_grammar_wrapper(grammar).handle
grammar_handle.CmdSetRuleState(rule.name, constants.SGDSActive)
def deactivate_rule(self, rule, grammar):
""" Deactivate the given *rule*. """
self._log.debug("Deactivating rule %s in grammar %s."
% (rule.name, grammar.name))
grammar_handle = self._get_grammar_wrapper(grammar).handle
grammar_handle.CmdSetRuleState(rule.name, constants.SGDSInactive)
def update_list(self, lst, grammar):
grammar_handle = self._get_grammar_wrapper(grammar).handle
list_rule_name = "__list_%s" % lst.name
rule_handle = grammar_handle.Rules.FindRule(list_rule_name)
rule_handle.Clear()
src_state = rule_handle.InitialState
dst_state = None
for item in lst.get_list_items():
src_state.AddWordTransition(dst_state, item)
grammar_handle.Rules.Commit()
def set_exclusiveness(self, grammar, exclusive):
self._log.debug("Setting exclusiveness of grammar %s to %s."
% (grammar.name, exclusive))
grammar_handle = self._get_grammar_wrapper(grammar).handle
grammar_handle.State = constants.SGSExclusive
# grammar_handle.SetGrammarState(constants.SPGS_EXCLUSIVE)
#-----------------------------------------------------------------------
# Miscellaneous methods.
def mimic(self, words):
""" Mimic a recognition of the given *words*. """
if isinstance(words, basestring):
phrase = words
else:
phrase = " ".join(words)
self._recognizer.EmulateRecognition(phrase)
def speak(self, text):
""" Speak the given *text* using text-to-speech. """
self._speaker.Speak(text)
def _get_language(self):
return "en"
def wait_for_recognition(self, timeout=None):
NULL = c_int(win32con.NULL)
if timeout != None:
begin_time = time.time()
timed_out = False
windll.user32.SetTimer(NULL, NULL, int(timeout * 1000), NULL)
message = MSG()
message_pointer = pointer(message)
while (not timeout) or (time.time() - begin_time < timeout):
self._log.error("loop")
if windll.user32.GetMessageW(message_pointer, NULL, 0, 0) == 0:
msg = str(WinError())
self._log.error("GetMessageW() failed: %s" % msg)
raise EngineError("GetMessageW() failed: %s" % msg)
if message.message == win32con.WM_TIMER:
self._log.error("loop, timeout")
# A timer message means this loop has timed out.
timed_out = True
break
else:
self._log.error("loop, dispatch")
# Process other messages as normal.
windll.user32.TranslateMessage(message_pointer)
windll.user32.DispatchMessageW(message_pointer)
return not timed_out
#---------------------------------------------------------------------------
# Make the shared engine available as Sapi5Engine, for backwards
# compatibility.
Sapi5Engine = Sapi5SharedEngine
#===========================================================================
class Sapi5InProcEngine(Sapi5SharedEngine):
"""
Speech recognition engine back-end for SAPI 5 in process
recognizer.
"""
_name = "sapi5inproc"
_recognizer_dispatch_name = "SAPI.SpInProcRecognizer"
def connect(self, audio_source=0):
"""
Connect to the speech recognition backend.
The audio source to use for speech recognition can be
specified using the *audio_source* argument. If it is not
given, it defaults to the first audio source found.
"""
Sapi5SharedEngine.connect(self)
self.select_audio_source(audio_source)
def get_audio_sources(self):
"""
Get the available audio sources.
This method returns a list of audio sources, each represented
by a 3-element tuple: the index, the description, and the COM
handle for the audio source.
"""
available_sources = self._recognizer.GetAudioInputs()
audio_sources_list = []
for index, item in enumerate(collection_iter(available_sources)):
audio_sources_list.append((index, item.GetDescription(), item))
return audio_sources_list
def select_audio_source(self, audio_source):
"""
Configure the speech recognition engine to use the given
audio source.
The audio source may be specified as follows:
- As an *int* specifying the index of the audio source to use
- As a *str* containing the description of the audio source
to use, or a substring thereof
This class' method *get_audio_sources()* can be used to
retrieve the available sources together with their indices
and descriptions.
"""
available_sources = self._recognizer.GetAudioInputs()
if isinstance(audio_source, (int, long)):
# Parameter is the index of the source to use.
if 0 <= audio_source < available_sources.Count:
selected_source = available_sources.Item(audio_source)
else:
raise EngineError("Invalid audio source index: %r"
" (%s sources available, so index must be"
" in range 0 to %s)"
% (audio_source, available_sources.Count,
available_sources.Count - 1))
elif isinstance(audio_source, basestring):
for item in collection_iter(available_sources):
if audio_source in item.GetDescription():
selected_source = item
break
else:
raise EngineError("Audio source not found: %r"
% (audio_source))
else:
raise EngineError("Invalid audio source qualifier: %r"
% (audio_source))
self._log.info("Selecting audio source: %r"
% (selected_source.GetDescription(),))
self._recognizer.AudioInput = selected_source
#---------------------------------------------------------------------------
# Utility generator function for iterating over COM collections.
def collection_iter(collection):
if not collection:
return
for index in xrange(0, collection.Count):
yield collection.Item(index)
#---------------------------------------------------------------------------
class GrammarWrapper(object):
def __init__(self, grammar, handle, context, engine):
self.grammar = grammar
self.handle = handle
self.engine = engine
self.context = context
# Register callback functions which will handle recognizer events.
base = getevents("SAPI.SpSharedRecoContext")
class ContextEvents(base): pass
c = ContextEvents(context)
c.OnPhraseStart = self.phrase_start_callback
c.OnRecognition = self.recognition_callback
if hasattr(grammar, "process_recognition_other"):
c.OnRecognitionForOtherContext = self.recognition_other_callback
if hasattr(grammar, "process_recognition_failure"):
c.OnFalseRecognition = self.recognition_failure_callback
def phrase_start_callback(self, stream_number, stream_position):
window = Window.get_foreground()
self.grammar.process_begin(window.executable, window.title,
window.handle)
def recognition_callback(self, StreamNumber, StreamPosition, RecognitionType, Result):
try:
newResult = Dispatch(Result)
phrase_info = newResult.PhraseInfo
rule_name = phrase_info.Rule.Name
#---------------------------------------------------------------
# Build a list of rule names for each element.
# First populate it with the top level rule name.
element = phrase_info.Rule
name = element.Name
start = element.FirstElement
count = element.NumberOfElements
rule_names = [name] * count
# Walk the tree of child rules and put their names in the list.
stack = [collection_iter(phrase_info.Rule.Children)]
while stack:
try: element = stack[-1].next()
except StopIteration: stack.pop(); continue
name = element.Name
start = element.FirstElement
count = element.NumberOfElements
rule_names[start:start + count] = [name] * count
if element.Children:
stack.append(collection_iter(element.Children))
#---------------------------------------------------------------
# Prepare the words and rule names for the element parsers.
replacements = [False] * len(rule_names)
if phrase_info.Replacements:
for replacement in collection_iter(phrase_info.Replacements):
begin = replacement.FirstElement
end = begin + replacement.NumberOfElements
replacements[begin] = replacement.Text
for index in range(begin + 1, end):
replacements[index] = True
results = []
rule_set = list(set(rule_names))
elements = phrase_info.Elements
for index in range(len(rule_names)):
element = elements.Item(index)
rule_id = rule_set.index(rule_names[index])
replacement = replacements[index]
info = [element.LexicalForm, rule_id,
element.DisplayText, element.DisplayAttributes,
replacement]
results.append(info)
#---------------------------------------------------------------
# Attempt to parse the recognition.
func = getattr(self.grammar, "process_recognition", None)
if func:
words = [r[2] for r in results]
if not func(words):
return
s = State(results, rule_set, self.engine)
for r in self.grammar._rules:
if r.name != rule_name:
continue
s.initialize_decoding()
for result in r.decode(s):
if s.finished():
root = s.build_parse_tree()
r.process_recognition(root)
return
except Exception, e:
Sapi5Engine._log.error("Grammar %s: exception: %s"
% (self.grammar._name, e), exc_info=True)
#-------------------------------------------------------------------
# If this point is reached, then the recognition was not
# processed successfully..
self.engine._log.error("Grammar %s: failed to decode"
" recognition %r."
% (self.grammar._name,
[r[0] for r in results]))
def recognition_other_callback(self, StreamNumber, StreamPosition):
func = getattr(self.grammar, "process_recognition_other", None)
if func:
# Note that SAPI 5.3 doesn't offer access to the actual
# recognition contents during a
# OnRecognitionForOtherContext event.
func(words=False)
return
def recognition_failure_callback(self, StreamNumber, StreamPosition, Result):
func = getattr(self.grammar, "process_recognition_failure", None)
if func:
func()
return
| [
"#\r\n",
"# This file is part of Dragonfly.\r\n",
"# (c) Copyright 2007, 2008 by Christo Butcher\r\n",
"# Licensed under the LGPL.\r\n",
"#\r\n",
"# Dragonfly is free software: you can redistribute it and/or modify it \r\n",
"# under the terms of the GNU Lesser General Public License as published \r\n",
"# by the Free Software Foundation, either version 3 of the License, or \r\n",
"# (at your option) any later version.\r\n",
"#\r\n",
"# Dragonfly is distributed in the hope that it will be useful, but \r\n",
"# WITHOUT ANY WARRANTY; without even the implied warranty of \r\n",
"# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU \r\n",
"# Lesser General Public License for more details.\r\n",
"#\r\n",
"# You should have received a copy of the GNU Lesser General Public \r\n",
"# License along with Dragonfly. If not, see \r\n",
"# <http://www.gnu.org/licenses/>.\r\n",
"#\r\n",
"\r\n",
"\"\"\"\r\n",
"SAPI 5 engine classes\r\n",
"============================================================================\r\n",
"\r\n",
"\"\"\"\r\n",
"\r\n",
"\r\n",
"#---------------------------------------------------------------------------\r\n",
"\r\n",
"import time\r\n",
"import win32con\r\n",
"from ctypes import *\r\n",
"\r\n",
"from win32com.client import Dispatch, getevents, constants\r\n",
"from win32com.client.gencache import EnsureDispatch\r\n",
"from pywintypes import com_error\r\n",
"\r\n",
"from ..base import EngineBase, EngineError\r\n",
"from .compiler import Sapi5Compiler\r\n",
"from .dictation import Sapi5DictationContainer\r\n",
"from .recobs import Sapi5RecObsManager\r\n",
"#from .timer import NatlinkTimerManager\r\n",
"from ...grammar.state import State\r\n",
"from ...windows.window import Window\r\n",
"\r\n",
"\r\n",
"#===========================================================================\r\n",
"\r\n",
"class POINT(Structure):\r\n",
" _fields_ = [('x', c_long),\r\n",
" ('y', c_long)]\r\n",
"\r\n",
"class MSG(Structure):\r\n",
" _fields_ = [('hwnd', c_int),\r\n",
" ('message', c_uint),\r\n",
" ('wParam', c_int),\r\n",
" ('lParam', c_int),\r\n",
" ('time', c_int),\r\n",
" ('pt', POINT)]\r\n",
"\r\n",
"\r\n",
"#===========================================================================\r\n",
"\r\n",
"class Sapi5SharedEngine(EngineBase):\r\n",
" \"\"\" Speech recognition engine back-end for SAPI 5 shared recognizer. \"\"\"\r\n",
"\r\n",
" _name = \"sapi5shared\"\r\n",
" _recognizer_dispatch_name = \"SAPI.SpSharedRecognizer\"\r\n",
" DictationContainer = Sapi5DictationContainer\r\n",
"\r\n",
" #-----------------------------------------------------------------------\r\n",
"\r\n",
" def __init__(self):\r\n",
" EngineBase.__init__(self)\r\n",
"\r\n",
" EnsureDispatch(self._recognizer_dispatch_name)\r\n",
" EnsureDispatch(\"SAPI.SpVoice\")\r\n",
" self._recognizer = None\r\n",
" self._speaker = None\r\n",
" self._compiler = None\r\n",
"\r\n",
" self._recognition_observer_manager = Sapi5RecObsManager(self)\r\n",
"# self._timer_manager = NatlinkTimerManager(0.02, self)\r\n",
"\r\n",
" def connect(self):\r\n",
" \"\"\" Connect to back-end SR engine. \"\"\"\r\n",
" self._recognizer = Dispatch(self._recognizer_dispatch_name)\r\n",
" self._speaker = Dispatch(\"SAPI.SpVoice\")\r\n",
" self._compiler = Sapi5Compiler()\r\n",
"\r\n",
" def disconnect(self):\r\n",
" \"\"\" Disconnect from back-end SR engine. \"\"\"\r\n",
" self._recognizer = None\r\n",
" self._speaker = None\r\n",
" self._compiler = None\r\n",
"\r\n",
" #-----------------------------------------------------------------------\r\n",
" # Methods for working with grammars.\r\n",
"\r\n",
" def _load_grammar(self, grammar):\r\n",
" \"\"\" Load the given *grammar*. \"\"\"\r\n",
" self._log.debug(\"Loading grammar %s.\" % grammar.name)\r\n",
" if not self._recognizer:\r\n",
" self.connect()\r\n",
"\r\n",
" grammar.engine = self\r\n",
"\r\n",
" # Dependency checking.\r\n",
" memo = []\r\n",
" for r in grammar._rules:\r\n",
" for d in r.dependencies(memo):\r\n",
" grammar.add_dependency(d)\r\n",
"\r\n",
" # Create recognition context, compile grammar, and create\r\n",
" # the grammar wrapper object for managing this grammar.\r\n",
" context = self._recognizer.CreateRecoContext()\r\n",
" handle = self._compiler.compile_grammar(grammar, context)\r\n",
" wrapper = GrammarWrapper(grammar, handle, context, self)\r\n",
"\r\n",
" handle.State = constants.SGSEnabled\r\n",
" for rule in grammar.rules:\r\n",
" handle.CmdSetRuleState(rule.name, constants.SGDSActive)\r\n",
"# self.activate_grammar(grammar)\r\n",
"# for l in grammar.lists:\r\n",
"# l._update()\r\n",
" handle.CmdSetRuleState(\"_FakeRule\", constants.SGDSActive)\r\n",
"\r\n",
" return wrapper\r\n",
"\r\n",
" def _unload_grammar(self, grammar, wrapper):\r\n",
" \"\"\" Unload the given *grammar*. \"\"\"\r\n",
" try:\r\n",
" wrapper.handle.State = constants.SGSDisabled\r\n",
" except Exception, e:\r\n",
" self._log.exception(\"Failed to unload grammar %s: %s.\"\r\n",
" % (grammar, e))\r\n",
"\r\n",
" def activate_grammar(self, grammar):\r\n",
" \"\"\" Activate the given *grammar*. \"\"\"\r\n",
" self._log.debug(\"Activating grammar %s.\" % grammar.name)\r\n",
" grammar_handle = self._get_grammar_wrapper(grammar).handle\r\n",
" grammar_handle.State = constants.SGSEnabled\r\n",
"\r\n",
" def deactivate_grammar(self, grammar):\r\n",
" \"\"\" Deactivate the given *grammar*. \"\"\"\r\n",
" self._log.debug(\"Deactivating grammar %s.\" % grammar.name)\r\n",
" grammar_handle = self._get_grammar_wrapper(grammar).handle\r\n",
" grammar_handle.State = constants.SGSDisabled\r\n",
"\r\n",
" def activate_rule(self, rule, grammar):\r\n",
" \"\"\" Activate the given *rule*. \"\"\"\r\n",
" self._log.debug(\"Activating rule %s in grammar %s.\"\r\n",
" % (rule.name, grammar.name))\r\n",
" grammar_handle = self._get_grammar_wrapper(grammar).handle\r\n",
" grammar_handle.CmdSetRuleState(rule.name, constants.SGDSActive)\r\n",
"\r\n",
" def deactivate_rule(self, rule, grammar):\r\n",
" \"\"\" Deactivate the given *rule*. \"\"\"\r\n",
" self._log.debug(\"Deactivating rule %s in grammar %s.\"\r\n",
" % (rule.name, grammar.name))\r\n",
" grammar_handle = self._get_grammar_wrapper(grammar).handle\r\n",
" grammar_handle.CmdSetRuleState(rule.name, constants.SGDSInactive)\r\n",
"\r\n",
" def update_list(self, lst, grammar):\r\n",
" grammar_handle = self._get_grammar_wrapper(grammar).handle\r\n",
" list_rule_name = \"__list_%s\" % lst.name\r\n",
" rule_handle = grammar_handle.Rules.FindRule(list_rule_name)\r\n",
"\r\n",
" rule_handle.Clear()\r\n",
" src_state = rule_handle.InitialState\r\n",
" dst_state = None\r\n",
" for item in lst.get_list_items():\r\n",
" src_state.AddWordTransition(dst_state, item)\r\n",
"\r\n",
" grammar_handle.Rules.Commit()\r\n",
"\r\n",
" def set_exclusiveness(self, grammar, exclusive):\r\n",
" self._log.debug(\"Setting exclusiveness of grammar %s to %s.\"\r\n",
" % (grammar.name, exclusive))\r\n",
" grammar_handle = self._get_grammar_wrapper(grammar).handle\r\n",
" grammar_handle.State = constants.SGSExclusive\r\n",
"# grammar_handle.SetGrammarState(constants.SPGS_EXCLUSIVE)\r\n",
"\r\n",
"\r\n",
" #-----------------------------------------------------------------------\r\n",
" # Miscellaneous methods.\r\n",
"\r\n",
" def mimic(self, words):\r\n",
" \"\"\" Mimic a recognition of the given *words*. \"\"\"\r\n",
" if isinstance(words, basestring):\r\n",
" phrase = words\r\n",
" else:\r\n",
" phrase = \" \".join(words)\r\n",
" self._recognizer.EmulateRecognition(phrase)\r\n",
"\r\n",
" def speak(self, text):\r\n",
" \"\"\" Speak the given *text* using text-to-speech. \"\"\"\r\n",
" self._speaker.Speak(text)\r\n",
"\r\n",
" def _get_language(self):\r\n",
" return \"en\"\r\n",
"\r\n",
" def wait_for_recognition(self, timeout=None):\r\n",
" NULL = c_int(win32con.NULL)\r\n",
" if timeout != None:\r\n",
" begin_time = time.time()\r\n",
" timed_out = False\r\n",
" windll.user32.SetTimer(NULL, NULL, int(timeout * 1000), NULL)\r\n",
" \r\n",
" message = MSG()\r\n",
" message_pointer = pointer(message)\r\n",
"\r\n",
" while (not timeout) or (time.time() - begin_time < timeout):\r\n",
" self._log.error(\"loop\")\r\n",
" if windll.user32.GetMessageW(message_pointer, NULL, 0, 0) == 0:\r\n",
" msg = str(WinError())\r\n",
" self._log.error(\"GetMessageW() failed: %s\" % msg)\r\n",
" raise EngineError(\"GetMessageW() failed: %s\" % msg)\r\n",
"\r\n",
" if message.message == win32con.WM_TIMER:\r\n",
" self._log.error(\"loop, timeout\")\r\n",
" # A timer message means this loop has timed out.\r\n",
" timed_out = True\r\n",
" break\r\n",
" else:\r\n",
" self._log.error(\"loop, dispatch\")\r\n",
" # Process other messages as normal.\r\n",
" windll.user32.TranslateMessage(message_pointer)\r\n",
" windll.user32.DispatchMessageW(message_pointer)\r\n",
"\r\n",
" return not timed_out\r\n",
"\r\n",
"\r\n",
"#---------------------------------------------------------------------------\r\n",
"# Make the shared engine available as Sapi5Engine, for backwards\r\n",
"# compatibility.\r\n",
"\r\n",
"Sapi5Engine = Sapi5SharedEngine\r\n",
"\r\n",
"\r\n",
"#===========================================================================\r\n",
"\r\n",
"class Sapi5InProcEngine(Sapi5SharedEngine):\r\n",
" \"\"\"\r\n",
" Speech recognition engine back-end for SAPI 5 in process\r\n",
" recognizer.\r\n",
"\r\n",
" \"\"\"\r\n",
"\r\n",
" _name = \"sapi5inproc\"\r\n",
" _recognizer_dispatch_name = \"SAPI.SpInProcRecognizer\"\r\n",
"\r\n",
" def connect(self, audio_source=0):\r\n",
" \"\"\"\r\n",
" Connect to the speech recognition backend.\r\n",
"\r\n",
" The audio source to use for speech recognition can be\r\n",
" specified using the *audio_source* argument. If it is not\r\n",
" given, it defaults to the first audio source found.\r\n",
"\r\n",
" \"\"\"\r\n",
"\r\n",
" Sapi5SharedEngine.connect(self)\r\n",
" self.select_audio_source(audio_source)\r\n",
"\r\n",
" def get_audio_sources(self):\r\n",
" \"\"\"\r\n",
" Get the available audio sources.\r\n",
"\r\n",
" This method returns a list of audio sources, each represented\r\n",
" by a 3-element tuple: the index, the description, and the COM\r\n",
" handle for the audio source.\r\n",
"\r\n",
" \"\"\"\r\n",
"\r\n",
" available_sources = self._recognizer.GetAudioInputs()\r\n",
" audio_sources_list = []\r\n",
" for index, item in enumerate(collection_iter(available_sources)):\r\n",
" audio_sources_list.append((index, item.GetDescription(), item))\r\n",
" return audio_sources_list\r\n",
"\r\n",
" def select_audio_source(self, audio_source):\r\n",
" \"\"\"\r\n",
" Configure the speech recognition engine to use the given\r\n",
" audio source.\r\n",
"\r\n",
" The audio source may be specified as follows:\r\n",
" - As an *int* specifying the index of the audio source to use\r\n",
" - As a *str* containing the description of the audio source\r\n",
" to use, or a substring thereof\r\n",
"\r\n",
" This class' method *get_audio_sources()* can be used to\r\n",
" retrieve the available sources together with their indices\r\n",
" and descriptions.\r\n",
"\r\n",
" \"\"\"\r\n",
"\r\n",
" available_sources = self._recognizer.GetAudioInputs()\r\n",
"\r\n",
" if isinstance(audio_source, (int, long)):\r\n",
" # Parameter is the index of the source to use.\r\n",
" if 0 <= audio_source < available_sources.Count:\r\n",
" selected_source = available_sources.Item(audio_source)\r\n",
" else:\r\n",
" raise EngineError(\"Invalid audio source index: %r\"\r\n",
" \" (%s sources available, so index must be\"\r\n",
" \" in range 0 to %s)\"\r\n",
" % (audio_source, available_sources.Count,\r\n",
" available_sources.Count - 1))\r\n",
"\r\n",
" elif isinstance(audio_source, basestring):\r\n",
" for item in collection_iter(available_sources):\r\n",
" if audio_source in item.GetDescription():\r\n",
" selected_source = item\r\n",
" break\r\n",
" else:\r\n",
" raise EngineError(\"Audio source not found: %r\"\r\n",
" % (audio_source))\r\n",
"\r\n",
" else:\r\n",
" raise EngineError(\"Invalid audio source qualifier: %r\"\r\n",
" % (audio_source))\r\n",
"\r\n",
" self._log.info(\"Selecting audio source: %r\"\r\n",
" % (selected_source.GetDescription(),))\r\n",
" self._recognizer.AudioInput = selected_source\r\n",
"\r\n",
"\r\n",
"#---------------------------------------------------------------------------\r\n",
"# Utility generator function for iterating over COM collections.\r\n",
"\r\n",
"def collection_iter(collection):\r\n",
" if not collection:\r\n",
" return\r\n",
" for index in xrange(0, collection.Count):\r\n",
" yield collection.Item(index)\r\n",
"\r\n",
"\r\n",
"#---------------------------------------------------------------------------\r\n",
"\r\n",
"class GrammarWrapper(object):\r\n",
"\r\n",
" def __init__(self, grammar, handle, context, engine):\r\n",
" self.grammar = grammar\r\n",
" self.handle = handle\r\n",
" self.engine = engine\r\n",
" self.context = context\r\n",
"\r\n",
" # Register callback functions which will handle recognizer events.\r\n",
" base = getevents(\"SAPI.SpSharedRecoContext\")\r\n",
" class ContextEvents(base): pass\r\n",
" c = ContextEvents(context)\r\n",
" c.OnPhraseStart = self.phrase_start_callback\r\n",
" c.OnRecognition = self.recognition_callback\r\n",
" if hasattr(grammar, \"process_recognition_other\"):\r\n",
" c.OnRecognitionForOtherContext = self.recognition_other_callback\r\n",
" if hasattr(grammar, \"process_recognition_failure\"):\r\n",
" c.OnFalseRecognition = self.recognition_failure_callback\r\n",
"\r\n",
" def phrase_start_callback(self, stream_number, stream_position):\r\n",
" window = Window.get_foreground()\r\n",
" self.grammar.process_begin(window.executable, window.title,\r\n",
" window.handle)\r\n",
"\r\n",
" def recognition_callback(self, StreamNumber, StreamPosition, RecognitionType, Result):\r\n",
" try:\r\n",
" newResult = Dispatch(Result)\r\n",
" phrase_info = newResult.PhraseInfo\r\n",
" rule_name = phrase_info.Rule.Name\r\n",
"\r\n",
" #---------------------------------------------------------------\r\n",
" # Build a list of rule names for each element.\r\n",
"\r\n",
" # First populate it with the top level rule name.\r\n",
" element = phrase_info.Rule\r\n",
" name = element.Name\r\n",
" start = element.FirstElement\r\n",
" count = element.NumberOfElements\r\n",
" rule_names = [name] * count\r\n",
"\r\n",
" # Walk the tree of child rules and put their names in the list.\r\n",
" stack = [collection_iter(phrase_info.Rule.Children)]\r\n",
" while stack:\r\n",
" try: element = stack[-1].next()\r\n",
" except StopIteration: stack.pop(); continue\r\n",
" name = element.Name\r\n",
" start = element.FirstElement\r\n",
" count = element.NumberOfElements\r\n",
" rule_names[start:start + count] = [name] * count\r\n",
" if element.Children:\r\n",
" stack.append(collection_iter(element.Children))\r\n",
"\r\n",
" #---------------------------------------------------------------\r\n",
" # Prepare the words and rule names for the element parsers.\r\n",
"\r\n",
" replacements = [False] * len(rule_names)\r\n",
" if phrase_info.Replacements:\r\n",
" for replacement in collection_iter(phrase_info.Replacements):\r\n",
" begin = replacement.FirstElement\r\n",
" end = begin + replacement.NumberOfElements\r\n",
" replacements[begin] = replacement.Text\r\n",
" for index in range(begin + 1, end):\r\n",
" replacements[index] = True\r\n",
"\r\n",
" results = []\r\n",
" rule_set = list(set(rule_names))\r\n",
" elements = phrase_info.Elements\r\n",
" for index in range(len(rule_names)):\r\n",
" element = elements.Item(index)\r\n",
" rule_id = rule_set.index(rule_names[index])\r\n",
" replacement = replacements[index]\r\n",
" info = [element.LexicalForm, rule_id,\r\n",
" element.DisplayText, element.DisplayAttributes,\r\n",
" replacement]\r\n",
" results.append(info)\r\n",
"\r\n",
" #---------------------------------------------------------------\r\n",
" # Attempt to parse the recognition.\r\n",
"\r\n",
" func = getattr(self.grammar, \"process_recognition\", None)\r\n",
" if func:\r\n",
" words = [r[2] for r in results]\r\n",
" if not func(words):\r\n",
" return\r\n",
"\r\n",
" s = State(results, rule_set, self.engine)\r\n",
" for r in self.grammar._rules:\r\n",
" if r.name != rule_name:\r\n",
" continue\r\n",
" s.initialize_decoding()\r\n",
" for result in r.decode(s):\r\n",
" if s.finished():\r\n",
" root = s.build_parse_tree()\r\n",
" r.process_recognition(root)\r\n",
" return\r\n",
"\r\n",
" except Exception, e:\r\n",
" Sapi5Engine._log.error(\"Grammar %s: exception: %s\"\r\n",
" % (self.grammar._name, e), exc_info=True)\r\n",
"\r\n",
" #-------------------------------------------------------------------\r\n",
" # If this point is reached, then the recognition was not\r\n",
" # processed successfully..\r\n",
"\r\n",
" self.engine._log.error(\"Grammar %s: failed to decode\"\r\n",
" \" recognition %r.\"\r\n",
" % (self.grammar._name,\r\n",
" [r[0] for r in results]))\r\n",
"\r\n",
" def recognition_other_callback(self, StreamNumber, StreamPosition):\r\n",
" func = getattr(self.grammar, \"process_recognition_other\", None)\r\n",
" if func:\r\n",
" # Note that SAPI 5.3 doesn't offer access to the actual\r\n",
" # recognition contents during a\r\n",
" # OnRecognitionForOtherContext event.\r\n",
" func(words=False)\r\n",
" return\r\n",
"\r\n",
" def recognition_failure_callback(self, StreamNumber, StreamPosition, Result):\r\n",
" func = getattr(self.grammar, \"process_recognition_failure\", None)\r\n",
" if func:\r\n",
" func()\r\n",
" return\r\n"
] | [
0,
0,
0,
0,
0,
0.013333333333333334,
0.013157894736842105,
0.013333333333333334,
0,
0,
0.014084507042253521,
0.015384615384615385,
0.013888888888888888,
0,
0,
0.014084507042253521,
0.02040816326530612,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.01282051282051282,
0,
0,
0,
0,
0,
0.014285714285714285,
0.018518518518518517,
0.02040816326530612,
0,
0.015873015873015872,
0.018867924528301886,
0.015873015873015872,
0.017241379310344827,
0.016666666666666666,
0.022222222222222223,
0.021739130434782608,
0,
0,
0.01282051282051282,
0,
0,
0,
0,
0,
0.043478260869565216,
0,
0,
0,
0,
0,
0,
0,
0,
0.01282051282051282,
0,
0,
0,
0,
0,
0,
0,
0,
0.01282051282051282,
0,
0,
0,
0,
0,
0,
0.029411764705882353,
0.029411764705882353,
0.029411764705882353,
0,
0,
0,
0,
0,
0,
0.014285714285714285,
0.018518518518518517,
0.022222222222222223,
0,
0,
0,
0.029411764705882353,
0.029411764705882353,
0.029411764705882353,
0,
0.01282051282051282,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.02564102564102564,
0,
0,
0.034482758620689655,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.034482758620689655,
0,
0,
0,
0.16666666666666666,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.01282051282051282,
0,
0,
0,
0,
0,
0,
0.01282051282051282,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.01282051282051282,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.01282051282051282,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.024390243902439025,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010869565217391304,
0,
0,
0,
0,
0,
0.01282051282051282,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.02040816326530612,
0.03278688524590164,
0,
0,
0,
0,
0,
0,
0,
0.01282051282051282,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.01282051282051282,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.01282051282051282,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012987012987012988,
0,
0,
0,
0,
0,
0,
0,
0.012048192771084338,
0.012658227848101266,
0,
0,
0
] | 463 | 0.002437 | false |
#
# This file is part of Dragonfly.
# (c) Copyright 2007, 2008 by Christo Butcher
# Licensed under the LGPL.
#
# Dragonfly is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Dragonfly is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with Dragonfly. If not, see
# <http://www.gnu.org/licenses/>.
#
"""
Playback action
============================================================================
The :class:`Playback` action mimics a sequence of recognitions. This is
for example useful for repeating a series of prerecorded or predefined
voice-commands.
This class could for example be used to reload with one single action::
action = Playback([
(["focus", "Natlink"], 1.0),
(["File"], 0.5),
(["Reload"], 0.0),
])
action.execute()
Mimic quirks
----------------------------------------------------------------------------
Some SR engine back-ends have confusing :meth:`engine.mimic` method
behavior. See the engine-specific mimic method documentation in sections
under :ref:`RefEngines` for more information.
Class reference
----------------------------------------------------------------------------
"""
from time import sleep
from .action_base import ActionBase, ActionError
from ..engines import get_engine
#---------------------------------------------------------------------------
class Playback(ActionBase):
""" Playback a series of recognitions. """
def __init__(self, series, speed=1):
"""
Constructor arguments:
- *series* (sequence of 2-tuples) --
the recognitions to playback. Each element must be a
2-tuple of the form *(["words", "two", "mimic"], interval)*,
where *interval* is a float giving the number of seconds to
pause after the given words are mimicked.
- *speed* (*float*) --
the factor by which to speed up playback. The intervals
after each mimic are divided by this number.
"""
ActionBase.__init__(self)
self._series = tuple(series)
self._speed = float(speed)
self._str = str([w for w, i in self._series])
def _get_speed(self):
return self._speed
def _set_speed(self, speed):
self._speed = float(speed)
speed = property(fget=_get_speed, fset=_set_speed,
doc="Factor to speed up playback.")
def _execute(self, data=None):
engine = get_engine()
# Mimic the series of recognitions.
for words, interval in self._series:
self._log.debug("Mimicking recognition: %r", words)
try:
engine.mimic(words)
if interval and self._speed:
sleep(interval / self._speed)
except Exception as e:
raise ActionError("Playback failed: %s" % e)
| [
"#\n",
"# This file is part of Dragonfly.\n",
"# (c) Copyright 2007, 2008 by Christo Butcher\n",
"# Licensed under the LGPL.\n",
"#\n",
"# Dragonfly is free software: you can redistribute it and/or modify it\n",
"# under the terms of the GNU Lesser General Public License as published\n",
"# by the Free Software Foundation, either version 3 of the License, or\n",
"# (at your option) any later version.\n",
"#\n",
"# Dragonfly is distributed in the hope that it will be useful, but\n",
"# WITHOUT ANY WARRANTY; without even the implied warranty of\n",
"# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n",
"# Lesser General Public License for more details.\n",
"#\n",
"# You should have received a copy of the GNU Lesser General Public\n",
"# License along with Dragonfly. If not, see\n",
"# <http://www.gnu.org/licenses/>.\n",
"#\n",
"\n",
"\"\"\"\n",
"Playback action\n",
"============================================================================\n",
"\n",
"The :class:`Playback` action mimics a sequence of recognitions. This is\n",
"for example useful for repeating a series of prerecorded or predefined\n",
"voice-commands.\n",
"\n",
"This class could for example be used to reload with one single action::\n",
"\n",
" action = Playback([\n",
" ([\"focus\", \"Natlink\"], 1.0),\n",
" ([\"File\"], 0.5),\n",
" ([\"Reload\"], 0.0),\n",
" ])\n",
" action.execute()\n",
"\n",
"\n",
"Mimic quirks\n",
"----------------------------------------------------------------------------\n",
"\n",
"Some SR engine back-ends have confusing :meth:`engine.mimic` method\n",
"behavior. See the engine-specific mimic method documentation in sections\n",
"under :ref:`RefEngines` for more information.\n",
"\n",
"\n",
"Class reference\n",
"----------------------------------------------------------------------------\n",
"\n",
"\"\"\"\n",
"\n",
"from time import sleep\n",
"from .action_base import ActionBase, ActionError\n",
"from ..engines import get_engine\n",
"\n",
"\n",
"#---------------------------------------------------------------------------\n",
"\n",
"class Playback(ActionBase):\n",
" \"\"\" Playback a series of recognitions. \"\"\"\n",
"\n",
" def __init__(self, series, speed=1):\n",
" \"\"\"\n",
" Constructor arguments:\n",
" - *series* (sequence of 2-tuples) --\n",
" the recognitions to playback. Each element must be a\n",
" 2-tuple of the form *([\"words\", \"two\", \"mimic\"], interval)*,\n",
" where *interval* is a float giving the number of seconds to\n",
" pause after the given words are mimicked.\n",
" - *speed* (*float*) --\n",
" the factor by which to speed up playback. The intervals\n",
" after each mimic are divided by this number.\n",
"\n",
" \"\"\"\n",
" ActionBase.__init__(self)\n",
" self._series = tuple(series)\n",
" self._speed = float(speed)\n",
" self._str = str([w for w, i in self._series])\n",
"\n",
" def _get_speed(self):\n",
" return self._speed\n",
" def _set_speed(self, speed):\n",
" self._speed = float(speed)\n",
" speed = property(fget=_get_speed, fset=_set_speed,\n",
" doc=\"Factor to speed up playback.\")\n",
"\n",
" def _execute(self, data=None):\n",
" engine = get_engine()\n",
"\n",
" # Mimic the series of recognitions.\n",
" for words, interval in self._series:\n",
" self._log.debug(\"Mimicking recognition: %r\", words)\n",
" try:\n",
" engine.mimic(words)\n",
" if interval and self._speed:\n",
" sleep(interval / self._speed)\n",
" except Exception as e:\n",
" raise ActionError(\"Playback failed: %s\" % e)\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.027777777777777776,
0.018518518518518517,
0.024390243902439025,
0,
0,
0.012987012987012988,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.030303030303030304,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 98 | 0.001163 | false |
"""
provides classes for obtaining information about how frequently individual concepts or categories
have been mentioned in news articles (if source == "news") of in social media (if source == "social")
"""
from eventregistry.Base import *
from eventregistry.ReturnInfo import *
class CountsBase(QueryParamsBase):
def _getPath(self):
return "/api/v1/counters"
class GetCounts(CountsBase):
def __init__(self,
uriOrUriList,
source = "news",
type = "concept",
dateStart = None,
dateEnd = None,
returnInfo = ReturnInfo()):
"""
obtain information about how frequently a concept or category is mentioned in the articles on particular dates
by specifying source="custom" one can obtain counts for custom concepts, such as stocks, macroeconomic indicators, etc. The uri
for these can be found using EventRegistry.getCustomConceptUri() method.
Usage example:
q = GetCounts([er.getConceptUri("Obama"), er.getConceptUri("ebola")])
ret = er.execQuery(q)
Return object:
{
"http://en.wikipedia.org/wiki/Barack_Obama": [
{
"count": 1,
"date": "2015-05-07"
},
{
"count": 4,
"date": "2015-05-08"
},
...
],
"http://en.wikipedia.org/wiki/Ebola_virus_disease": [
{
"count": 0,
"date": "2015-05-07"
},
...
]
}
@param uriOrUriList: concept/category uri or a list of uris
@param source: input source information from which to compute top trends. Options: "news", "social", "custom", "geo" or "sentiment"
@param type: what do the uris represent? "concept" or "category"
@param dateStart: starting date from which to provide counts onwards (either None, datetime.date or "YYYY-MM-DD")
@param dateEnd: ending date until which to provide counts (either None, datetime.date or "YYYY-MM-DD")
@param returnInfo: what details should be included in the returned information
"""
CountsBase.__init__(self)
self._setVal("action", "getCounts")
self._setVal("source", source)
self._setVal("type", type)
self._update(returnInfo.getParams())
self._setVal("uri", uriOrUriList)
if dateStart != None:
self._setDateVal("dateStart", dateStart)
if dateEnd != None:
self._setDateVal("dateEnd", dateEnd)
class GetCountsEx(CountsBase):
def __init__(self,
uriOrUriList,
source = "news",
type = "concept",
dateStart = None,
dateEnd = None,
returnInfo = ReturnInfo()):
"""
obtain information about how frequently a concept or category is mentioned in the articles on particular dates
Similar to GetCounts, but the output is more friendly for a larger set of provided uris/ids at once
Usage example:
q = GetCountsEx(type = "category")
q.queryById(range(10)) # return trends of first 10 categories
ret = er.execQuery(q)
Return object:
{
"categoryInfo": [
{
"id": 0,
"label": "Root",
"uri": "http://www.dmoz.org"
},
{
"id": 1,
"label": "Recreation",
"uri": "http://www.dmoz.org/Recreation"
},
...
],
"counts": [
{
"0": 23, "1": 42, "2": 52, "3": 32, "4": 21, "5": 65, "6": 32, "7": 654, "8": 1, "9": 34,
"date": "2015-05-07"
},
...
]
}
@param uriOrUriList: concept/category uri or a list of uris
@param source: input source information from which to compute top trends. Options: "news", "social"
@param type: what do the uris represent? "concept" or "category"
@param dateStart: starting date from which to provide counts onwards (either None, datetime.date or "YYYY-MM-DD")
@param dateEnd: ending date until which to provide counts (either None, datetime.date or "YYYY-MM-DD")
@param returnInfo: what details should be included in the returned information
"""
CountsBase.__init__(self)
self._setVal("action", "getCountsEx")
self._setVal("source", source)
self._setVal("type", type)
self._update(returnInfo.getParams())
self._setVal("uri", uriOrUriList)
if dateStart != None:
self._setDateVal("dateStart", dateStart)
if dateEnd != None:
self._setDateVal("dateEnd", dateEnd)
| [
"\"\"\"\n",
"provides classes for obtaining information about how frequently individual concepts or categories\n",
"have been mentioned in news articles (if source == \"news\") of in social media (if source == \"social\")\n",
"\"\"\"\n",
"\n",
"from eventregistry.Base import *\n",
"from eventregistry.ReturnInfo import *\n",
"\n",
"\n",
"class CountsBase(QueryParamsBase):\n",
" def _getPath(self):\n",
" return \"/api/v1/counters\"\n",
"\n",
"\n",
"\n",
"class GetCounts(CountsBase):\n",
" def __init__(self,\n",
" uriOrUriList,\n",
" source = \"news\",\n",
" type = \"concept\",\n",
" dateStart = None,\n",
" dateEnd = None,\n",
" returnInfo = ReturnInfo()):\n",
" \"\"\"\n",
" obtain information about how frequently a concept or category is mentioned in the articles on particular dates\n",
" by specifying source=\"custom\" one can obtain counts for custom concepts, such as stocks, macroeconomic indicators, etc. The uri\n",
" for these can be found using EventRegistry.getCustomConceptUri() method.\n",
" Usage example:\n",
" q = GetCounts([er.getConceptUri(\"Obama\"), er.getConceptUri(\"ebola\")])\n",
" ret = er.execQuery(q)\n",
" Return object:\n",
" {\n",
" \"http://en.wikipedia.org/wiki/Barack_Obama\": [\n",
" {\n",
" \"count\": 1,\n",
" \"date\": \"2015-05-07\"\n",
" },\n",
" {\n",
" \"count\": 4,\n",
" \"date\": \"2015-05-08\"\n",
" },\n",
" ...\n",
" ],\n",
" \"http://en.wikipedia.org/wiki/Ebola_virus_disease\": [\n",
" {\n",
" \"count\": 0,\n",
" \"date\": \"2015-05-07\"\n",
" },\n",
" ...\n",
" ]\n",
" }\n",
"\n",
" @param uriOrUriList: concept/category uri or a list of uris\n",
" @param source: input source information from which to compute top trends. Options: \"news\", \"social\", \"custom\", \"geo\" or \"sentiment\"\n",
" @param type: what do the uris represent? \"concept\" or \"category\"\n",
" @param dateStart: starting date from which to provide counts onwards (either None, datetime.date or \"YYYY-MM-DD\")\n",
" @param dateEnd: ending date until which to provide counts (either None, datetime.date or \"YYYY-MM-DD\")\n",
" @param returnInfo: what details should be included in the returned information\n",
" \"\"\"\n",
" CountsBase.__init__(self)\n",
" self._setVal(\"action\", \"getCounts\")\n",
" self._setVal(\"source\", source)\n",
" self._setVal(\"type\", type)\n",
" self._update(returnInfo.getParams())\n",
" self._setVal(\"uri\", uriOrUriList)\n",
" if dateStart != None:\n",
" self._setDateVal(\"dateStart\", dateStart)\n",
" if dateEnd != None:\n",
" self._setDateVal(\"dateEnd\", dateEnd)\n",
"\n",
"\n",
"\n",
"class GetCountsEx(CountsBase):\n",
" def __init__(self,\n",
" uriOrUriList,\n",
" source = \"news\",\n",
" type = \"concept\",\n",
" dateStart = None,\n",
" dateEnd = None,\n",
" returnInfo = ReturnInfo()):\n",
" \"\"\"\n",
" obtain information about how frequently a concept or category is mentioned in the articles on particular dates\n",
" Similar to GetCounts, but the output is more friendly for a larger set of provided uris/ids at once\n",
" Usage example:\n",
" q = GetCountsEx(type = \"category\")\n",
" q.queryById(range(10)) # return trends of first 10 categories\n",
" ret = er.execQuery(q)\n",
" Return object:\n",
" {\n",
" \"categoryInfo\": [\n",
" {\n",
" \"id\": 0,\n",
" \"label\": \"Root\",\n",
" \"uri\": \"http://www.dmoz.org\"\n",
" },\n",
" {\n",
" \"id\": 1,\n",
" \"label\": \"Recreation\",\n",
" \"uri\": \"http://www.dmoz.org/Recreation\"\n",
" },\n",
" ...\n",
" ],\n",
" \"counts\": [\n",
" {\n",
" \"0\": 23, \"1\": 42, \"2\": 52, \"3\": 32, \"4\": 21, \"5\": 65, \"6\": 32, \"7\": 654, \"8\": 1, \"9\": 34,\n",
" \"date\": \"2015-05-07\"\n",
" },\n",
" ...\n",
" ]\n",
" }\n",
"\n",
" @param uriOrUriList: concept/category uri or a list of uris\n",
" @param source: input source information from which to compute top trends. Options: \"news\", \"social\"\n",
" @param type: what do the uris represent? \"concept\" or \"category\"\n",
" @param dateStart: starting date from which to provide counts onwards (either None, datetime.date or \"YYYY-MM-DD\")\n",
" @param dateEnd: ending date until which to provide counts (either None, datetime.date or \"YYYY-MM-DD\")\n",
" @param returnInfo: what details should be included in the returned information\n",
" \"\"\"\n",
" CountsBase.__init__(self)\n",
" self._setVal(\"action\", \"getCountsEx\")\n",
" self._setVal(\"source\", source)\n",
" self._setVal(\"type\", type)\n",
" self._update(returnInfo.getParams())\n",
" self._setVal(\"uri\", uriOrUriList)\n",
" if dateStart != None:\n",
" self._setDateVal(\"dateStart\", dateStart)\n",
" if dateEnd != None:\n",
" self._setDateVal(\"dateEnd\", dateEnd)\n"
] | [
0,
0.01020408163265306,
0.00980392156862745,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.034482758620689655,
0,
0,
0.058823529411764705,
0.05714285714285714,
0.05714285714285714,
0.06060606060606061,
0.044444444444444446,
0,
0.008403361344537815,
0.007352941176470588,
0.012345679012345678,
0,
0.012195121951219513,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.007142857142857143,
0,
0.00819672131147541,
0.009009009009009009,
0.011494252873563218,
0,
0,
0,
0,
0,
0,
0,
0.03333333333333333,
0,
0.03571428571428571,
0,
0,
0,
0,
0.03225806451612903,
0,
0,
0.058823529411764705,
0.05714285714285714,
0.05714285714285714,
0.06060606060606061,
0.044444444444444446,
0,
0.008403361344537815,
0.009259259259259259,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.008771929824561403,
0,
0,
0,
0,
0,
0,
0,
0.009259259259259259,
0,
0.00819672131147541,
0.009009009009009009,
0.011494252873563218,
0,
0,
0,
0,
0,
0,
0,
0.03333333333333333,
0,
0.03571428571428571,
0
] | 128 | 0.007201 | false |
import json
from bottle import route, get, post, put, delete, request, response, abort
from modules.utils.dal.common.dtos.metadataUtils import MetadataEncoder
from modules.utils.customEncoder import CustomEncoder
from modules.utils.dal.common import utils as dalUtils
from modules.utils.dal import dataProviderDto
dataServiceDto = dataProviderDto.createDataServiceInstance()
# GET: api/datasource/crud/metadata
@get('/api/datasource/crud/metadata')
def get_metadata():
try:
response.content_type = "application/json; charset=utf-8"
metadataCli = dataServiceDto.metadataCli
return json.dumps(metadataCli, cls=MetadataEncoder, indent=2)
except:
abort(500, 'Internal server error')
# GET: api/datasource/crud/:entitySetName?skip=20&top=10
@get('/api/datasource/crud/<entitySetName>')
def get_entityset(entitySetName):
try:
resultSerialResponse = dataServiceDto.apiProviderDto.handleGet(entitySetName, request.query)
response.content_type = "application/json; charset=utf-8"
return json.dumps(resultSerialResponse, cls=CustomEncoder, indent=2)
except:
abort(400, 'Bad Request')
# GET: api/datasource/crud/single/:entitySetName?keys=key1:{key1}
@get('/api/datasource/crud/single/<entitySetName>')
def get_single_entityset(entitySetName):
try:
resultSingleSerialData = dataServiceDto.apiProviderDto.handleGetSingle(entitySetName, request.query)
response.content_type = "application/json; charset=utf-8"
return json.dumps(resultSingleSerialData, cls=CustomEncoder, indent=2)
except:
abort(400, 'Bad Request')
# GET: api/datasource/crud/many/:entitySetName?keys=key1:1,2,3,4;key2:4,5,6,7
@get('/api/datasource/crud/many/<entitySetName>')
def get_many_entityset(entitySetName):
try:
resultSerialData = dataServiceDto.apiProviderDto.handleGetMany(entitySetName, request.query)
response.content_type = "application/json; charset=utf-8"
return json.dumps(resultSerialData, cls=CustomEncoder, indent=2)
except:
abort(400, 'Bad Request')
# PUT: api/datasource/crud/:entitySetName?keys=key1:{key1}
@put('/api/datasource/crud/<entitySetName>')
def put_entityset(entitySetName):
try:
resultSingleSerialData = dataServiceDto.apiProviderDto.handleUpdateEntity(entitySetName, request.query, request.json)
response.content_type = "application/json; charset=utf-8"
return json.dumps(resultSingleSerialData, cls=CustomEncoder)
except dalUtils.StatusCodeError as err:
response.status = err.value
except:
abort(400, 'Bad Request')
# PATCH: api/datasource/crud/:entitySetName?keys=key1:{key1}
#@patch('/api/datasource/crud/<entitySetName>')
@route('/api/datasource/crud/<entitySetName>', method='PATCH')
def patch_entityset(entitySetName):
try:
resultSingleSerialData = dataServiceDto.apiProviderDto.handleUpdateEntity(entitySetName, request.query, request.json)
response.content_type = "application/json; charset=utf-8"
return json.dumps(resultSingleSerialData, cls=CustomEncoder)
except dalUtils.StatusCodeError as err:
response.status = err.value
except:
abort(400, 'Bad Request')
# POST: api/datasource/crud/:entitySetName
@post('/api/datasource/crud/<entitySetName>')
def post_entityset(entitySetName):
# test1 = json.loads(request.body.read())
try:
resultSingleSerialData = dataServiceDto.apiProviderDto.handleInsertEntity(entitySetName, request.json)
response.content_type = "application/json; charset=utf-8"
return json.dumps(resultSingleSerialData, cls=CustomEncoder)
except dalUtils.StatusCodeError as err:
response.status = err.value
except:
abort(400, 'Bad Request')
# DELETE: api/datasource/crud/:entitySetName?keys=key1:{key1}
@delete('/api/datasource/crud/<entitySetName>')
def delete_entityset(entitySetName):
try:
resultSingleSerialData = dataServiceDto.apiProviderDto.handleDeleteEntity(entitySetName, request.query)
response.content_type = "application/json; charset=utf-8"
return json.dumps(resultSingleSerialData, cls=CustomEncoder)
except dalUtils.StatusCodeError as err:
response.status = err.value
except:
abort(400, 'Bad Request')
# PUT: api/datasource/crud/batch/:entitySetName
@put('/api/datasource/crud/batch/<entitySetName>')
def put_batch_entityset(entitySetName):
try:
resultSingleSerialDataList = dataServiceDto.apiProviderDto.handleUpdateEntityBatch(entitySetName, request.json)
response.content_type = "application/json; charset=utf-8"
return json.dumps(resultSingleSerialDataList, cls=CustomEncoder)
except dalUtils.StatusCodeError as err:
response.status = err.value
except:
abort(400, 'Bad Request')
# PATCH: api/datasource/crud/batch/:entitySetName
#@patch('/api/datasource/crud/batch/<entitySetName>')
@route('/api/datasource/crud/batch/<entitySetName>', method='PATCH')
def patch_batch_entityset(entitySetName):
try:
resultSingleSerialDataList = dataServiceDto.apiProviderDto.handleUpdateEntityBatch(entitySetName, request.json)
response.content_type = "application/json; charset=utf-8"
return json.dumps(resultSingleSerialDataList, cls=CustomEncoder)
except dalUtils.StatusCodeError as err:
response.status = err.value
except:
abort(400, 'Bad Request')
# POST: api/datasource/crud/batch/:entitySetName
@post('/api/datasource/crud/batch/<entitySetName>')
def post_batch_entityset(entitySetName):
try:
resultSingleSerialDataList = dataServiceDto.apiProviderDto.handleInsertEntityBatch(entitySetName, request.json)
response.content_type = "application/json; charset=utf-8"
return json.dumps(resultSingleSerialDataList, cls=CustomEncoder)
except dalUtils.StatusCodeError as err:
response.status = err.value
except:
abort(400, 'Bad Request')
## DELETE: api/datasource/crud/batch/:entitySetName
#@delete('/api/datasource/crud/batch/<entitySetName>')
#def delete_batch_entityset(entitySetName):
# try:
# resultSingleSerialDataList = dataServiceDto.apiProviderDto.handleDeleteEntityBatch(entitySetName, request.json)
# response.content_type = "application/json; charset=utf-8"
# return json.dumps(resultSingleSerialDataList, cls=CustomEncoder)
# except dalUtils.StatusCodeError as err:
# response.status = err.value
# except:
# abort(400, 'Bad Request')
# DELETE: api/datasource/crud/batch/:entitySetName?keys=key1:1,2,3,4;key2:4,5,6,7
@delete('/api/datasource/crud/batch/<entitySetName>')
def delete_batch_entityset(entitySetName):
try:
resultSerialData = dataServiceDto.apiProviderDto.handleDeleteEntityBatch(entitySetName, request.query)
response.content_type = "application/json; charset=utf-8"
return json.dumps(resultSerialData, cls=CustomEncoder)
except dalUtils.StatusCodeError as err:
response.status = err.value
except:
abort(400, 'Bad Request')
| [
"import json\n",
"from bottle import route, get, post, put, delete, request, response, abort\n",
"from modules.utils.dal.common.dtos.metadataUtils import MetadataEncoder\n",
"from modules.utils.customEncoder import CustomEncoder\n",
"from modules.utils.dal.common import utils as dalUtils\n",
"from modules.utils.dal import dataProviderDto\n",
"\n",
"dataServiceDto = dataProviderDto.createDataServiceInstance()\n",
"\n",
"# GET: api/datasource/crud/metadata\n",
"@get('/api/datasource/crud/metadata')\n",
"def get_metadata():\n",
" try:\n",
" response.content_type = \"application/json; charset=utf-8\"\n",
" metadataCli = dataServiceDto.metadataCli\n",
" return json.dumps(metadataCli, cls=MetadataEncoder, indent=2)\n",
" except:\n",
" abort(500, 'Internal server error')\n",
"\n",
"\n",
"# GET: api/datasource/crud/:entitySetName?skip=20&top=10\n",
"@get('/api/datasource/crud/<entitySetName>')\n",
"def get_entityset(entitySetName):\n",
" try:\n",
" resultSerialResponse = dataServiceDto.apiProviderDto.handleGet(entitySetName, request.query)\n",
" response.content_type = \"application/json; charset=utf-8\"\n",
" return json.dumps(resultSerialResponse, cls=CustomEncoder, indent=2)\n",
" except:\n",
" abort(400, 'Bad Request')\n",
"\n",
"\n",
"# GET: api/datasource/crud/single/:entitySetName?keys=key1:{key1}\n",
"@get('/api/datasource/crud/single/<entitySetName>')\n",
"def get_single_entityset(entitySetName):\n",
" try:\n",
" resultSingleSerialData = dataServiceDto.apiProviderDto.handleGetSingle(entitySetName, request.query)\n",
" response.content_type = \"application/json; charset=utf-8\"\n",
" return json.dumps(resultSingleSerialData, cls=CustomEncoder, indent=2)\n",
" except:\n",
" abort(400, 'Bad Request')\n",
"\n",
"\n",
"# GET: api/datasource/crud/many/:entitySetName?keys=key1:1,2,3,4;key2:4,5,6,7\n",
"@get('/api/datasource/crud/many/<entitySetName>')\n",
"def get_many_entityset(entitySetName):\n",
" try:\n",
" resultSerialData = dataServiceDto.apiProviderDto.handleGetMany(entitySetName, request.query)\n",
" response.content_type = \"application/json; charset=utf-8\"\n",
" return json.dumps(resultSerialData, cls=CustomEncoder, indent=2)\n",
" except:\n",
" abort(400, 'Bad Request')\n",
"\n",
"\n",
"# PUT: api/datasource/crud/:entitySetName?keys=key1:{key1}\n",
"@put('/api/datasource/crud/<entitySetName>')\n",
"def put_entityset(entitySetName):\n",
" try:\n",
" resultSingleSerialData = dataServiceDto.apiProviderDto.handleUpdateEntity(entitySetName, request.query, request.json)\n",
" response.content_type = \"application/json; charset=utf-8\"\n",
" return json.dumps(resultSingleSerialData, cls=CustomEncoder)\n",
" except dalUtils.StatusCodeError as err:\n",
" response.status = err.value\n",
" except:\n",
" abort(400, 'Bad Request')\n",
"\n",
"\n",
"# PATCH: api/datasource/crud/:entitySetName?keys=key1:{key1}\n",
"#@patch('/api/datasource/crud/<entitySetName>')\n",
"@route('/api/datasource/crud/<entitySetName>', method='PATCH')\n",
"def patch_entityset(entitySetName):\n",
" try:\n",
" resultSingleSerialData = dataServiceDto.apiProviderDto.handleUpdateEntity(entitySetName, request.query, request.json)\n",
" response.content_type = \"application/json; charset=utf-8\"\n",
" return json.dumps(resultSingleSerialData, cls=CustomEncoder)\n",
" except dalUtils.StatusCodeError as err:\n",
" response.status = err.value\n",
" except:\n",
" abort(400, 'Bad Request')\n",
"\n",
"\n",
"# POST: api/datasource/crud/:entitySetName\n",
"@post('/api/datasource/crud/<entitySetName>')\n",
"def post_entityset(entitySetName):\n",
" # test1 = json.loads(request.body.read())\n",
" try:\n",
" resultSingleSerialData = dataServiceDto.apiProviderDto.handleInsertEntity(entitySetName, request.json)\n",
" response.content_type = \"application/json; charset=utf-8\"\n",
" return json.dumps(resultSingleSerialData, cls=CustomEncoder)\n",
" except dalUtils.StatusCodeError as err:\n",
" response.status = err.value\n",
" except:\n",
" abort(400, 'Bad Request')\n",
"\n",
"\n",
"# DELETE: api/datasource/crud/:entitySetName?keys=key1:{key1}\n",
"@delete('/api/datasource/crud/<entitySetName>')\n",
"def delete_entityset(entitySetName):\n",
" try:\n",
" resultSingleSerialData = dataServiceDto.apiProviderDto.handleDeleteEntity(entitySetName, request.query)\n",
" response.content_type = \"application/json; charset=utf-8\"\n",
" return json.dumps(resultSingleSerialData, cls=CustomEncoder)\n",
" except dalUtils.StatusCodeError as err:\n",
" response.status = err.value\n",
" except:\n",
" abort(400, 'Bad Request')\n",
"\n",
"\n",
"# PUT: api/datasource/crud/batch/:entitySetName\n",
"@put('/api/datasource/crud/batch/<entitySetName>')\n",
"def put_batch_entityset(entitySetName):\n",
" try:\n",
" resultSingleSerialDataList = dataServiceDto.apiProviderDto.handleUpdateEntityBatch(entitySetName, request.json)\n",
" response.content_type = \"application/json; charset=utf-8\"\n",
" return json.dumps(resultSingleSerialDataList, cls=CustomEncoder)\n",
" except dalUtils.StatusCodeError as err:\n",
" response.status = err.value\n",
" except:\n",
" abort(400, 'Bad Request')\n",
"\n",
"# PATCH: api/datasource/crud/batch/:entitySetName\n",
"#@patch('/api/datasource/crud/batch/<entitySetName>')\n",
"@route('/api/datasource/crud/batch/<entitySetName>', method='PATCH')\n",
"def patch_batch_entityset(entitySetName):\n",
" try:\n",
" resultSingleSerialDataList = dataServiceDto.apiProviderDto.handleUpdateEntityBatch(entitySetName, request.json)\n",
" response.content_type = \"application/json; charset=utf-8\"\n",
" return json.dumps(resultSingleSerialDataList, cls=CustomEncoder)\n",
" except dalUtils.StatusCodeError as err:\n",
" response.status = err.value\n",
" except:\n",
" abort(400, 'Bad Request')\n",
"\n",
"\n",
"# POST: api/datasource/crud/batch/:entitySetName\n",
"@post('/api/datasource/crud/batch/<entitySetName>')\n",
"def post_batch_entityset(entitySetName):\n",
" try:\n",
" resultSingleSerialDataList = dataServiceDto.apiProviderDto.handleInsertEntityBatch(entitySetName, request.json)\n",
" response.content_type = \"application/json; charset=utf-8\"\n",
" return json.dumps(resultSingleSerialDataList, cls=CustomEncoder)\n",
" except dalUtils.StatusCodeError as err:\n",
" response.status = err.value\n",
" except:\n",
" abort(400, 'Bad Request')\n",
"\n",
"\n",
"## DELETE: api/datasource/crud/batch/:entitySetName\n",
"#@delete('/api/datasource/crud/batch/<entitySetName>')\n",
"#def delete_batch_entityset(entitySetName):\n",
"# try:\n",
"# resultSingleSerialDataList = dataServiceDto.apiProviderDto.handleDeleteEntityBatch(entitySetName, request.json)\n",
"# response.content_type = \"application/json; charset=utf-8\"\n",
"# return json.dumps(resultSingleSerialDataList, cls=CustomEncoder)\n",
"# except dalUtils.StatusCodeError as err:\n",
"# response.status = err.value\n",
"# except:\n",
"# abort(400, 'Bad Request')\n",
"\n",
"\n",
"# DELETE: api/datasource/crud/batch/:entitySetName?keys=key1:1,2,3,4;key2:4,5,6,7\n",
"@delete('/api/datasource/crud/batch/<entitySetName>')\n",
"def delete_batch_entityset(entitySetName):\n",
" try:\n",
" resultSerialData = dataServiceDto.apiProviderDto.handleDeleteEntityBatch(entitySetName, request.query)\n",
" response.content_type = \"application/json; charset=utf-8\"\n",
" return json.dumps(resultSerialData, cls=CustomEncoder)\n",
" except dalUtils.StatusCodeError as err:\n",
" response.status = err.value\n",
" except:\n",
" abort(400, 'Bad Request')\n",
"\n",
"\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.02631578947368421,
0,
0,
0,
0,
0,
0.08333333333333333,
0,
0,
0,
0,
0,
0,
0,
0.009900990099009901,
0,
0,
0.08333333333333333,
0,
0,
0,
0,
0,
0,
0,
0.009174311926605505,
0,
0,
0.08333333333333333,
0,
0,
0,
0,
0,
0,
0,
0.009900990099009901,
0,
0,
0.08333333333333333,
0,
0,
0,
0,
0,
0,
0,
0.007936507936507936,
0,
0,
0,
0,
0.08333333333333333,
0,
0,
0,
0,
0.020833333333333332,
0,
0,
0,
0.007936507936507936,
0,
0,
0,
0,
0.08333333333333333,
0,
0,
0,
0,
0,
0,
0,
0,
0.009009009009009009,
0,
0,
0,
0,
0.08333333333333333,
0,
0,
0,
0,
0,
0,
0,
0.008928571428571428,
0,
0,
0,
0,
0.08333333333333333,
0,
0,
0,
0,
0,
0,
0,
0.008333333333333333,
0,
0,
0,
0,
0.08333333333333333,
0,
0,
0,
0.018518518518518517,
0.014492753623188406,
0,
0,
0.008333333333333333,
0,
0,
0,
0,
0.08333333333333333,
0,
0,
0,
0,
0,
0,
0,
0.008333333333333333,
0,
0,
0,
0,
0.08333333333333333,
0,
0,
0,
0.019230769230769232,
0.01818181818181818,
0.022727272727272728,
0,
0.008264462809917356,
0,
0,
0,
0,
0,
0,
0,
0,
0.012195121951219513,
0,
0,
0,
0.009009009009009009,
0,
0,
0,
0,
0.08333333333333333,
0,
0,
1
] | 172 | 0.013125 | false |
# -*- coding: utf-8 -*-
from Components.config import config
from Screens.MessageBox import MessageBox
from simpleplayer import SimplePlayer
from youtubelink import YoutubeLink
class YoutubePlayer(SimplePlayer):
def __init__(self, session, playList, playIdx=0, playAll=False, listTitle=None, plType='local', title_inr=0, showPlaylist=True, showCover=False):
print "YoutubePlayer:"
SimplePlayer.__init__(self, session, playList, playIdx=playIdx, playAll=playAll, listTitle=listTitle, plType=plType, title_inr=title_inr, ltype='youtube', showPlaylist=showPlaylist, cover=showCover)
def getVideo(self):
print "getVideo:"
dhTitle = self.playList[self.playIdx][self.title_inr]
if len(self.playList[self.playIdx]) >= 6:
gid = self.playList[self.playIdx][5]
if gid in ('P', 'C'):
self.dataError('This isn\'t a video: '+dhTitle)
return
dhVideoId = self.playList[self.playIdx][self.title_inr+1]
imgurl = self.playList[self.playIdx][self.title_inr+2]
YoutubeLink(self.session).getLink(self.playStream, self.ytError, dhTitle, dhVideoId, imgurl)
def ytError(self, error):
msg = "Title: %s\n%s" % (self.playList[self.playIdx][self.title_inr], error)
self.dataError(msg) | [
"# -*- coding: utf-8 -*-\n",
"\n",
"from Components.config import config\n",
"from Screens.MessageBox import MessageBox\n",
"from simpleplayer import SimplePlayer\n",
"from youtubelink import YoutubeLink\n",
"\n",
"class YoutubePlayer(SimplePlayer):\n",
"\n",
"\tdef __init__(self, session, playList, playIdx=0, playAll=False, listTitle=None, plType='local', title_inr=0, showPlaylist=True, showCover=False):\n",
"\t\tprint \"YoutubePlayer:\"\n",
"\t\tSimplePlayer.__init__(self, session, playList, playIdx=playIdx, playAll=playAll, listTitle=listTitle, plType=plType, title_inr=title_inr, ltype='youtube', showPlaylist=showPlaylist, cover=showCover)\n",
"\n",
"\tdef getVideo(self):\n",
"\t\tprint \"getVideo:\"\n",
"\t\tdhTitle = self.playList[self.playIdx][self.title_inr]\n",
"\t\tif len(self.playList[self.playIdx]) >= 6:\n",
"\t\t\tgid = self.playList[self.playIdx][5]\n",
"\t\t\tif gid in ('P', 'C'):\n",
"\t\t\t\tself.dataError('This isn\\'t a video: '+dhTitle)\n",
"\t\t\t\treturn\n",
"\t\tdhVideoId = self.playList[self.playIdx][self.title_inr+1]\n",
"\t\timgurl = self.playList[self.playIdx][self.title_inr+2]\n",
"\t\tYoutubeLink(self.session).getLink(self.playStream, self.ytError, dhTitle, dhVideoId, imgurl)\n",
"\n",
"\tdef ytError(self, error):\n",
"\t\tmsg = \"Title: %s\\n%s\" % (self.playList[self.playIdx][self.title_inr], error)\n",
"\t\tself.dataError(msg)"
] | [
0,
0,
0,
0,
0,
0,
0,
0.02857142857142857,
0,
0.013605442176870748,
0.04,
0.009950248756218905,
0,
0.047619047619047616,
0.05,
0.017857142857142856,
0.022727272727272728,
0.025,
0.04,
0.019230769230769232,
0.09090909090909091,
0.016666666666666666,
0.034482758620689655,
0.021052631578947368,
0,
0.037037037037037035,
0.012658227848101266,
0.09523809523809523
] | 28 | 0.022236 | false |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from __future__ import absolute_import, print_function
import sys
import unittest
import inspect
import types
def discover(pytestargs=None, hidestdio=False,
_pytest_main=None, _plugin=None, **_ignored):
suites = unittest.defaultTestLoader.discover(pytestargs[0], pytestargs[1])
#Print errors if not Python 2.7 because .errors wont exist
if ((sys.version_info[0] >= 3) and
unittest.defaultTestLoader.errors):
for error in unittest.defaultTestLoader.errors:
print(error)
root={}
for suite in suites._tests:
for cls in suite._tests:
try:
for test in cls._tests:
try:
#Find source, lineno from TestId and add them to test object
parts = test.id().split('.')
error_case, error_message = None, None
#Used loadTestsFromName(..) as an example of finding source of a function
#https://github.com/python/cpython/blob/master/Lib/unittest/loader.py
parts_copy = parts[:]
while parts_copy:
try:
module_name = '.'.join(parts_copy)
module = __import__(module_name)
break
except ImportError:
next_attribute = parts_copy.pop()
parts = parts[1:]
obj = module
for part in parts:
try:
parent, obj = obj, getattr(obj, part)
except AttributeError as e:
pass
if (isinstance(obj, types.FunctionType) or
((sys.version_info[0] < 3) and isinstance(obj, types.UnboundMethodType))):
#workaround for decorators on functions return the source of decorator and not the actual function
#We return the finename of our parent which should be the class, works even if class also has a decorator
filename = inspect.getsourcefile(parent)
setattr(test, 'source', filename)
_, lineno = inspect.getsourcelines(obj)
setattr(test, 'lineno', lineno)
except:
pass
except:
pass
return (
{},
suites,
) | [
"\n",
"# Copyright (c) Microsoft Corporation. All rights reserved.\n",
"# Licensed under the MIT License.\n",
"\n",
"from __future__ import absolute_import, print_function\n",
"\n",
"import sys\n",
"import unittest\n",
"import inspect\n",
"import types\n",
"\n",
"def discover(pytestargs=None, hidestdio=False,\n",
" _pytest_main=None, _plugin=None, **_ignored):\n",
" \n",
" suites = unittest.defaultTestLoader.discover(pytestargs[0], pytestargs[1])\n",
" \n",
" #Print errors if not Python 2.7 because .errors wont exist\n",
" if ((sys.version_info[0] >= 3) and \n",
" unittest.defaultTestLoader.errors):\n",
" for error in unittest.defaultTestLoader.errors:\n",
" print(error)\n",
"\n",
" root={}\n",
"\n",
" for suite in suites._tests:\n",
" for cls in suite._tests:\n",
" try:\n",
" for test in cls._tests:\n",
" try: \n",
" #Find source, lineno from TestId and add them to test object\n",
" parts = test.id().split('.')\n",
" error_case, error_message = None, None\n",
" \n",
" #Used loadTestsFromName(..) as an example of finding source of a function\n",
" #https://github.com/python/cpython/blob/master/Lib/unittest/loader.py\n",
"\n",
" parts_copy = parts[:]\n",
" while parts_copy:\n",
" try:\n",
" module_name = '.'.join(parts_copy)\n",
" module = __import__(module_name)\n",
" break\n",
" except ImportError:\n",
" next_attribute = parts_copy.pop()\n",
" \n",
" parts = parts[1:]\n",
" \n",
" obj = module\n",
" for part in parts:\n",
" try:\n",
" parent, obj = obj, getattr(obj, part)\n",
" except AttributeError as e:\n",
" pass\n",
"\n",
" if (isinstance(obj, types.FunctionType) or\n",
" ((sys.version_info[0] < 3) and isinstance(obj, types.UnboundMethodType))):\n",
" \n",
" #workaround for decorators on functions return the source of decorator and not the actual function\n",
" #We return the finename of our parent which should be the class, works even if class also has a decorator\n",
" filename = inspect.getsourcefile(parent)\n",
" setattr(test, 'source', filename)\n",
"\n",
" _, lineno = inspect.getsourcelines(obj)\n",
" setattr(test, 'lineno', lineno)\n",
" except:\n",
" pass\n",
" except:\n",
" pass\n",
" return (\n",
" {},\n",
" suites,\n",
" )"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.02127659574468085,
0,
0.2,
0,
0.2,
0.015873015873015872,
0.025,
0.022727272727272728,
0,
0,
0,
0.08333333333333333,
0,
0,
0,
0,
0,
0.034482758620689655,
0.023529411764705882,
0,
0,
0.047619047619047616,
0.02040816326530612,
0.02127659574468085,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.034482758620689655,
0,
0.047619047619047616,
0,
0,
0,
0,
0,
0,
0,
0,
0.019417475728155338,
0.04,
0.015748031496062992,
0.014925373134328358,
0,
0,
0,
0,
0,
0.03571428571428571,
0,
0.05,
0,
0,
0,
0,
0.07692307692307693
] | 72 | 0.014588 | false |
# -*- coding: utf-8 -*-
# Module: default
# Author: Yangqian
# Created on: 25.12.2015
# License: GPL v.3 https://www.gnu.org/copyleft/gpl.html
# Largely following the example at
# https://github.com/romanvm/plugin.video.example/blob/master/main.py
import xbmc,xbmcgui,urllib2,re,xbmcplugin
from urlparse import parse_qsl
import sys
import json
def post(url, data):
req = urllib2.Request(url)
#enable cookie
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor())
response = opener.open(req, data)
return response.read()
# Get the plugin url in plugin:// notation.
_url=sys.argv[0]
# Get the plugin handle as an integer number.
_handle=int(sys.argv[1])
def list_categories():
f = urllib2.urlopen('http://api.m.panda.tv/ajax_get_all_subcate?__version=1.0.5.1098&__plat=iOS')
obj = json.loads(f.read())
listing=[]
for game in obj['data']:
list_item = xbmcgui.ListItem(label=game['cname'], thumbnailImage=game['img'])
list_item.setProperty('fanart_image', game['img'])
url='{0}?action=room_list&game_id={1}'.format(_url, game['ename'])
is_folder=True
listing.append((url, list_item, is_folder))
xbmcplugin.addDirectoryItems(_handle,listing,len(listing))
#xbmcplugin.addSortMethod(_handle, xbmcplugin.SORT_METHOD_LABEL_IGNORE_THE)
# Finish creating a virtual folder.
xbmcplugin.endOfDirectory(_handle)
def room_list(game_id):
apiurl = "http://api.m.panda.tv/ajax_get_live_list_by_cate";
params = "__plat=iOS&__version=1.0.5.1098&cate={ename}&order=person_num&pageno=1&pagenum=100&status=2".format(ename=game_id)
returndata = post(apiurl, params);
obj = json.loads(returndata)
listing=[]
for room in obj['data']['items']:
list_item = xbmcgui.ListItem(label=room['name'], thumbnailImage=room['pictures']['img'])
list_item.setProperty('fanart_image', room['pictures']['img'])
url='{0}?action=play&room_id={1}'.format(_url, room['id'])
is_folder=False
listing.append((url, list_item, is_folder))
xbmcplugin.addDirectoryItems(_handle, listing, len(listing))
#xbmcplugin.addSortMethod(_handle, xbmcplugin.SORT_METHOD_LABEL_IGNORE_THE)
# Finish creating a virtual folder.
xbmcplugin.endOfDirectory(_handle)
def play_video(room_id):
"""
Play a video by the provided path.
:param path: str
:return: None
"""
f = urllib2.urlopen('http://www.panda.tv/api_room?roomid={room_id}'.format(room_id=room_id))
obj = json.loads(f.read())
path = 'http://pl3.live.panda.tv/live_panda/{video}.flv'.format(video=obj['data']['videoinfo']['room_key'])
play_item = xbmcgui.ListItem(path=path, thumbnailImage=obj['data']['hostinfo']['avatar'])
play_item.setInfo(type="Video", infoLabels={"Title":obj['data']['roominfo']['name']})
# Pass the item to the Kodi player.
xbmcplugin.setResolvedUrl(_handle, True, listitem=play_item)
# directly play the item.
xbmc.Player().play(path, play_item)
def router(paramstring):
"""
Router function that calls other functions
depending on the provided paramstring
:param paramstring:
:return:
"""
# Parse a URL-encoded paramstring to the dictionary of
# {<parameter>: <value>} elements
params = dict(parse_qsl(paramstring))
# Check the parameters passed to the plugin
if params:
if params['action'] == 'room_list':
# Display the list of videos in a provided category.
room_list(params['game_id'])
elif params['action'] == 'play':
# Play a video from a provided URL.
play_video(params['room_id'])
else:
# If the plugin is called from Kodi UI without any parameters,
# display the list of video categories
list_categories()
if __name__ == '__main__':
# Call the router function and pass the plugin call parameters to it.
# We use string slicing to trim the leading '?' from the plugin call paramstring
router(sys.argv[2][1:])
| [
"# -*- coding: utf-8 -*-\n",
"# Module: default\n",
"# Author: Yangqian\n",
"# Created on: 25.12.2015\n",
"# License: GPL v.3 https://www.gnu.org/copyleft/gpl.html\n",
"# Largely following the example at \n",
"# https://github.com/romanvm/plugin.video.example/blob/master/main.py\n",
"import xbmc,xbmcgui,urllib2,re,xbmcplugin\n",
"from urlparse import parse_qsl\n",
"import sys\n",
"import json\n",
"\n",
"def post(url, data):\n",
" req = urllib2.Request(url)\n",
" #enable cookie\n",
" opener = urllib2.build_opener(urllib2.HTTPCookieProcessor())\n",
" response = opener.open(req, data)\n",
" return response.read()\n",
"\n",
"# Get the plugin url in plugin:// notation.\n",
"_url=sys.argv[0]\n",
"# Get the plugin handle as an integer number.\n",
"_handle=int(sys.argv[1])\n",
"\n",
"def list_categories():\n",
"\n",
" f = urllib2.urlopen('http://api.m.panda.tv/ajax_get_all_subcate?__version=1.0.5.1098&__plat=iOS')\n",
"\n",
" obj = json.loads(f.read())\n",
"\n",
" listing=[]\n",
" for game in obj['data']:\n",
" list_item = xbmcgui.ListItem(label=game['cname'], thumbnailImage=game['img'])\n",
" list_item.setProperty('fanart_image', game['img'])\n",
" url='{0}?action=room_list&game_id={1}'.format(_url, game['ename'])\n",
"\n",
" is_folder=True\n",
" listing.append((url, list_item, is_folder))\n",
"\n",
" xbmcplugin.addDirectoryItems(_handle,listing,len(listing))\n",
" #xbmcplugin.addSortMethod(_handle, xbmcplugin.SORT_METHOD_LABEL_IGNORE_THE)\n",
" # Finish creating a virtual folder.\n",
" xbmcplugin.endOfDirectory(_handle)\n",
"\n",
"\n",
"def room_list(game_id):\n",
"\n",
" apiurl = \"http://api.m.panda.tv/ajax_get_live_list_by_cate\";\n",
" params = \"__plat=iOS&__version=1.0.5.1098&cate={ename}&order=person_num&pageno=1&pagenum=100&status=2\".format(ename=game_id)\n",
"\n",
" returndata = post(apiurl, params);\n",
"\n",
" obj = json.loads(returndata)\n",
"\n",
" listing=[]\n",
" for room in obj['data']['items']:\n",
" list_item = xbmcgui.ListItem(label=room['name'], thumbnailImage=room['pictures']['img'])\n",
" list_item.setProperty('fanart_image', room['pictures']['img'])\n",
" url='{0}?action=play&room_id={1}'.format(_url, room['id'])\n",
" is_folder=False\n",
" listing.append((url, list_item, is_folder))\n",
" xbmcplugin.addDirectoryItems(_handle, listing, len(listing))\n",
" #xbmcplugin.addSortMethod(_handle, xbmcplugin.SORT_METHOD_LABEL_IGNORE_THE)\n",
" # Finish creating a virtual folder.\n",
" xbmcplugin.endOfDirectory(_handle)\n",
"\n",
"def play_video(room_id):\n",
" \"\"\"\n",
" Play a video by the provided path.\n",
" :param path: str\n",
" :return: None\n",
" \"\"\"\n",
" f = urllib2.urlopen('http://www.panda.tv/api_room?roomid={room_id}'.format(room_id=room_id))\n",
" obj = json.loads(f.read())\n",
" path = 'http://pl3.live.panda.tv/live_panda/{video}.flv'.format(video=obj['data']['videoinfo']['room_key'])\n",
" play_item = xbmcgui.ListItem(path=path, thumbnailImage=obj['data']['hostinfo']['avatar'])\n",
" play_item.setInfo(type=\"Video\", infoLabels={\"Title\":obj['data']['roominfo']['name']})\n",
" # Pass the item to the Kodi player.\n",
" xbmcplugin.setResolvedUrl(_handle, True, listitem=play_item)\n",
" # directly play the item.\n",
" xbmc.Player().play(path, play_item)\n",
"\n",
"def router(paramstring):\n",
" \"\"\"\n",
" Router function that calls other functions\n",
" depending on the provided paramstring\n",
" :param paramstring:\n",
" :return:\n",
" \"\"\"\n",
" # Parse a URL-encoded paramstring to the dictionary of\n",
" # {<parameter>: <value>} elements\n",
" params = dict(parse_qsl(paramstring))\n",
" # Check the parameters passed to the plugin\n",
" if params:\n",
" if params['action'] == 'room_list':\n",
" # Display the list of videos in a provided category.\n",
" room_list(params['game_id'])\n",
" elif params['action'] == 'play':\n",
" # Play a video from a provided URL.\n",
" play_video(params['room_id'])\n",
" else:\n",
" # If the plugin is called from Kodi UI without any parameters,\n",
" # display the list of video categories\n",
" list_categories()\n",
"\n",
"\n",
"if __name__ == '__main__':\n",
" # Call the router function and pass the plugin call parameters to it.\n",
" # We use string slicing to trim the leading '?' from the plugin call paramstring\n",
" router(sys.argv[2][1:])\n"
] | [
0,
0,
0,
0,
0,
0.027777777777777776,
0,
0.11904761904761904,
0,
0,
0,
0,
0.047619047619047616,
0,
0.05263157894736842,
0,
0,
0,
0,
0,
0.11764705882352941,
0,
0.04,
0,
0.043478260869565216,
0,
0.00980392156862745,
0,
0,
0,
0.06666666666666667,
0,
0.011627906976744186,
0,
0.013333333333333334,
0,
0.043478260869565216,
0,
0,
0.031746031746031744,
0.0125,
0,
0,
0,
0,
0,
0,
0.015384615384615385,
0.007751937984496124,
0,
0.02564102564102564,
0,
0,
0,
0.06666666666666667,
0,
0.010309278350515464,
0,
0.014925373134328358,
0.041666666666666664,
0,
0,
0.0125,
0,
0,
0,
0.04,
0,
0,
0,
0,
0,
0.010309278350515464,
0,
0.008928571428571428,
0.010638297872340425,
0.022222222222222223,
0,
0,
0,
0,
0,
0.04,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011764705882352941,
0
] | 110 | 0.008873 | false |
#
# This file is part of Dragonfly.
# (c) Copyright 2007, 2008 by Christo Butcher
# Licensed under the LGPL.
#
# Dragonfly is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Dragonfly is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with Dragonfly. If not, see
# <http://www.gnu.org/licenses/>.
#
"""
Dictation container for the Kaldi engine.
"""
import itertools
from six import text_type
from ...grammar.elements_basic import Dictation as BaseDictation
from ...grammar.elements_basic import ElementBase, RuleRef, Alternative, ListRef, DictListRef, Repetition
from ...grammar.list import List, DictList
from ...grammar.rule_compound import CompoundRule
#---------------------------------------------------------------------------
# User dictation class -- elements capable of also recognizing user words.
class UserDictation(RuleRef):
"""
Imitates the standard Dictation element class, using individual chunks
of Dictation or the user's added terminology.
"""
# Partially copied from BaseDictation
def __init__(self, name=None, format=True, default=None):
RuleRef.__init__(self, rule=_user_dictation_sequence_rule, name=name, default=default)
self._format_words = format
self._string_methods = []
# Use BaseDictation methods
__repr__ = BaseDictation.__repr__
__getattr__ = BaseDictation.__getattr__
# Partially copied from BaseDictation
def value(self, node):
words = node.children[0].value()
if self._format_words:
return node.engine.DictationContainer(words, self._string_methods)
else:
return words
user_dictation_list = List('__kaldi_user_dictation_list', [])
user_dictation_dictlist = DictList('__kaldi_user_dictation_dictlist', {})
class _UserDictationSequenceRule(CompoundRule):
spec = "<__kaldi_user_dictation_sequence>"
extras = [
Repetition(Alternative([
BaseDictation(format=False),
ListRef('__kaldi_user_dictation_listref', user_dictation_list),
DictListRef('__kaldi_user_dictation_dictlistref', user_dictation_dictlist),
]), min=1, max=16, name="__kaldi_user_dictation_sequence"),
]
exported = False
def value(self, node):
# This method returns the value of the root Repetition: a list of values of
# Alternatives, each (dictation or listref or dictlistref) being a string.
chunks = node.children[0].value()
# Make sure each chunk is a list (of strings), then concat them together.
return list(itertools.chain.from_iterable([chunk] if not isinstance(chunk, list) else chunk for chunk in chunks))
_user_dictation_sequence_rule = _UserDictationSequenceRule()
#---------------------------------------------------------------------------
# Alternative dictation classes -- elements capable of default or alternative dictation.
class AlternativeDictation(BaseDictation):
alternative_default = True
def __init__(self, *args, **kwargs):
self.alternative = kwargs.pop('alternative', self.alternative_default)
BaseDictation.__init__(self, *args, **kwargs)
class DefaultDictation(BaseDictation):
alternative_default = False
def __init__(self, *args, **kwargs):
self.alternative = kwargs.pop('alternative', self.alternative_default)
BaseDictation.__init__(self, *args, **kwargs)
| [
"#\n",
"# This file is part of Dragonfly.\n",
"# (c) Copyright 2007, 2008 by Christo Butcher\n",
"# Licensed under the LGPL.\n",
"#\n",
"# Dragonfly is free software: you can redistribute it and/or modify it\n",
"# under the terms of the GNU Lesser General Public License as published\n",
"# by the Free Software Foundation, either version 3 of the License, or\n",
"# (at your option) any later version.\n",
"#\n",
"# Dragonfly is distributed in the hope that it will be useful, but\n",
"# WITHOUT ANY WARRANTY; without even the implied warranty of\n",
"# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n",
"# Lesser General Public License for more details.\n",
"#\n",
"# You should have received a copy of the GNU Lesser General Public\n",
"# License along with Dragonfly. If not, see\n",
"# <http://www.gnu.org/licenses/>.\n",
"#\n",
"\n",
"\"\"\"\n",
"Dictation container for the Kaldi engine.\n",
"\n",
"\"\"\"\n",
"\n",
"import itertools\n",
"\n",
"from six import text_type\n",
"\n",
"from ...grammar.elements_basic import Dictation as BaseDictation\n",
"from ...grammar.elements_basic import ElementBase, RuleRef, Alternative, ListRef, DictListRef, Repetition\n",
"from ...grammar.list import List, DictList\n",
"from ...grammar.rule_compound import CompoundRule\n",
"\n",
"\n",
"#---------------------------------------------------------------------------\n",
"# User dictation class -- elements capable of also recognizing user words.\n",
"\n",
"class UserDictation(RuleRef):\n",
" \"\"\"\n",
" Imitates the standard Dictation element class, using individual chunks\n",
" of Dictation or the user's added terminology.\n",
" \"\"\"\n",
"\n",
" # Partially copied from BaseDictation\n",
" def __init__(self, name=None, format=True, default=None):\n",
" RuleRef.__init__(self, rule=_user_dictation_sequence_rule, name=name, default=default)\n",
" self._format_words = format\n",
" self._string_methods = []\n",
"\n",
" # Use BaseDictation methods\n",
" __repr__ = BaseDictation.__repr__\n",
" __getattr__ = BaseDictation.__getattr__\n",
"\n",
" # Partially copied from BaseDictation\n",
" def value(self, node):\n",
" words = node.children[0].value()\n",
" if self._format_words:\n",
" return node.engine.DictationContainer(words, self._string_methods)\n",
" else:\n",
" return words\n",
"\n",
"user_dictation_list = List('__kaldi_user_dictation_list', [])\n",
"user_dictation_dictlist = DictList('__kaldi_user_dictation_dictlist', {})\n",
"\n",
"class _UserDictationSequenceRule(CompoundRule):\n",
" spec = \"<__kaldi_user_dictation_sequence>\"\n",
" extras = [\n",
" Repetition(Alternative([\n",
" BaseDictation(format=False),\n",
" ListRef('__kaldi_user_dictation_listref', user_dictation_list),\n",
" DictListRef('__kaldi_user_dictation_dictlistref', user_dictation_dictlist),\n",
" ]), min=1, max=16, name=\"__kaldi_user_dictation_sequence\"),\n",
" ]\n",
" exported = False\n",
" def value(self, node):\n",
" # This method returns the value of the root Repetition: a list of values of\n",
" # Alternatives, each (dictation or listref or dictlistref) being a string.\n",
" chunks = node.children[0].value()\n",
" # Make sure each chunk is a list (of strings), then concat them together.\n",
" return list(itertools.chain.from_iterable([chunk] if not isinstance(chunk, list) else chunk for chunk in chunks))\n",
"\n",
"_user_dictation_sequence_rule = _UserDictationSequenceRule()\n",
"\n",
"\n",
"#---------------------------------------------------------------------------\n",
"# Alternative dictation classes -- elements capable of default or alternative dictation.\n",
"\n",
"class AlternativeDictation(BaseDictation):\n",
"\n",
" alternative_default = True\n",
"\n",
" def __init__(self, *args, **kwargs):\n",
" self.alternative = kwargs.pop('alternative', self.alternative_default)\n",
" BaseDictation.__init__(self, *args, **kwargs)\n",
"\n",
"class DefaultDictation(BaseDictation):\n",
"\n",
" alternative_default = False\n",
"\n",
" def __init__(self, *args, **kwargs):\n",
" self.alternative = kwargs.pop('alternative', self.alternative_default)\n",
" BaseDictation.__init__(self, *args, **kwargs)\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.009433962264150943,
0,
0,
0,
0,
0.012987012987012988,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010526315789473684,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.016129032258064516,
0,
0,
0.020833333333333332,
0,
0,
0,
0,
0,
0.011363636363636364,
0,
0,
0,
0.037037037037037035,
0.011904761904761904,
0.012048192771084338,
0,
0.012195121951219513,
0.00819672131147541,
0,
0.01639344262295082,
0,
0,
0.012987012987012988,
0.011235955056179775,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.02564102564102564,
0,
0,
0,
0,
0,
0
] | 103 | 0.002222 | false |
# Copyright (C) 2015 Anisan, Kevin S. Graer
#
#
# This file is part of PseudoTV Live.
#
# PseudoTV is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PseudoTV is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PseudoTV. If not, see <http://www.gnu.org/licenses/>.
import sys
import os
import time
import threading
import xbmc
import xbmcgui
import xbmcaddon
import datetime
import random
import Globals
from Globals import *
rootDir = ADDON_PATH
if rootDir[-1] == ';':rootDir = rootDir[0:-1]
resDir = os.path.join(rootDir, 'resources')
skinsDir = os.path.join(resDir, 'skins')
addon_image_path = os.path.join( resDir, "skins", "Default", "media")
background = (os.path.join( addon_image_path, "Background"))
digits = os.path.join( addon_image_path, "Digits")
backMEDIA_LOC = xbmc.translatePath(os.path.join(MEDIA_LOC, 'pstvlBackground.png'))
cacheDir = xbmc.translatePath('special://profile/addon_data/script.twitXBMC/cache/')
if not os.path.exists(cacheDir): os.makedirs(cacheDir)
EXIT_SCRIPT = ( 9, 6, 10, 247, 275, 61467, 216, 257, 61448, )
CANCEL_DIALOG = EXIT_SCRIPT + ( 1, 2, 3, 4, 12, 122, 75, 7, 92, )
FLIP1=210
DIGIT1 = 211
DIGIT11 = 2111
DIGIT12 = 2112
DIGIT110 = 21110
DIGIT120 = 21120
DIGIT2 = 212
DIGIT21 = 2121
DIGIT22 = 2122
DIGIT210 = 21210
DIGIT220 = 21220
FLIP2=220
DIGIT3 = 221
DIGIT31 = 2211
DIGIT32 = 2212
DIGIT310 = 22110
DIGIT320 = 22120
DIGIT4 = 222
DIGIT41 = 2221
DIGIT42 = 2222
DIGIT410 = 22210
DIGIT420 = 22220
LABEL = 224
CLOCK = 200
class GUI( xbmcgui.WindowXMLDialog ):
class TimeCounter(threading.Thread):
def __init__(self, ui):
threading.Thread.__init__(self)
self.ui = ui
self.h1=0
self.h2=0
self.m1=0
self.m2=0
self.clockMode = REAL_SETTINGS.getSetting("ClockMode")
self.ui.getControl(100).setImage(backMEDIA_LOC)
def run(self):
print 'run'
i=0
while (not self.ui.terminate):
time.sleep(1)
dtn = datetime.datetime.now()
if self.clockMode == "1":
dte = dtn.strftime("%d.%m.%Y %H:%M:%S")
h = dtn.strftime("%H")
else:
dte = dtn.strftime("%d.%m.%Y %I:%M:%S")
h = dtn.strftime("%I")
h1=int(h[0])
h2=int(h[1])
m = dtn.strftime("%M")
m1=int(m[0])
m2=int(m[1])
if (self.h1!=h1)|(self.h2!=h2):
Flip = self.ui.Fliper(self.ui,1,self.h1,h1,self.h2,h2)
Flip.start()
self.h1=h1
self.h2=h2
if (self.m1!=m1)|(self.m2!=m2):
Flip = self.ui.Fliper(self.ui,2,self.m1,m1,self.m2,m2)
Flip.start()
self.m1=m1
self.m2=m2
# print background+"|"+str(i)+".png"
self.ui.getControl( LABEL ).setLabel(dte)
class Fliper(threading.Thread):
def __init__(self, ui,flip,old1,new1,old2,new2):
threading.Thread.__init__(self)
self.ui = ui
self.flip = flip
self.old1 = old1
self.new1 = new1
self.old2 = old2
self.new2 = new2
def run (self):
if (self.flip==1):
self.flip1()
else:
self.flip2()
def flip1(self):
print 'flip1'
i=1
print (os.path.join(background,"0.png"))
print (os.path.join(digits,str(self.new1)+"(1).png"))
self.ui.getControl( FLIP1 ).setImage(os.path.join(background,"0.png"))
self.ui.getControl( FLIP1 ).setVisible(1)
self.ui.getControl( DIGIT11 ).setImage(os.path.join(digits,str(self.new1)+"(1).png"))
self.ui.getControl( DIGIT12 ).setImage(os.path.join(digits,str(self.old1)+"(2).png"))
self.ui.getControl( DIGIT110 ).setImage(os.path.join(digits,str(self.old1)+"(1).png"))
self.ui.getControl( DIGIT21 ).setImage(os.path.join(digits,str(self.new2)+"(1).png"))
self.ui.getControl( DIGIT22 ).setImage(os.path.join(digits,str(self.old2)+"(2).png"))
self.ui.getControl( DIGIT210 ).setImage(os.path.join(digits,str(self.old2)+"(1).png"))
self.ui.getControl( DIGIT110 ).setHeight(40)
self.ui.getControl( DIGIT110 ).setPosition(15,24)
self.ui.getControl( DIGIT210 ).setHeight(40)
self.ui.getControl( DIGIT210 ).setPosition(65,24)
self.ui.getControl( DIGIT110 ).setVisible(1)
self.ui.getControl( DIGIT210 ).setVisible(1)
self.ui.getControl( DIGIT12 ).setVisible(1)
self.ui.getControl( DIGIT22 ).setVisible(1)
self.ui.getControl( DIGIT1 ).setVisible(0)
self.ui.getControl( DIGIT2 ).setVisible(0)
self.ui.getControl( DIGIT11 ).setVisible(1)
self.ui.getControl( DIGIT21 ).setVisible(1)
h=40
while (i<12):
time.sleep(0.01)
# print background+ "|"+ str(i)+".png"
self.ui.getControl( FLIP1 ).setImage(os.path.join(background,str(i)+".png"))
h=h-3
self.ui.getControl( DIGIT110 ).setPosition(15,24+(40-h))
self.ui.getControl( DIGIT110 ).setHeight(h)
self.ui.getControl( DIGIT210 ).setPosition(65,24+(40-h))
self.ui.getControl( DIGIT210 ).setHeight(h)
i = i +1
h=43
self.ui.getControl( DIGIT110 ).setVisible(0)
self.ui.getControl( DIGIT210 ).setVisible(0)
self.ui.getControl( DIGIT120 ).setHeight(3)
self.ui.getControl( DIGIT120 ).setImage(os.path.join(digits,str(self.new1)+"(2).png"))
self.ui.getControl( DIGIT220 ).setHeight(3)
self.ui.getControl( DIGIT220 ).setImage(os.path.join(digits,str(self.new2)+"(2).png"))
self.ui.getControl( DIGIT120 ).setVisible(1)
self.ui.getControl( DIGIT220 ).setVisible(1)
h=3
while (i<20):
time.sleep(0.01)
print background+ "|"+ str(i)+".png"
h=h+4
self.ui.getControl( FLIP1 ).setImage(os.path.join(background,str(i)+".png"))
self.ui.getControl( DIGIT120 ).setHeight(h)
self.ui.getControl( DIGIT220 ).setHeight(h)
i = i +1
self.ui.getControl( DIGIT1 ).setImage(os.path.join(digits,str(self.new1)+".png"))
self.ui.getControl( DIGIT2 ).setImage(os.path.join(digits,str(self.new2)+".png"))
self.ui.getControl( DIGIT1 ).setVisible(1)
self.ui.getControl( DIGIT2 ).setVisible(1)
self.ui.getControl( DIGIT11 ).setVisible(0)
self.ui.getControl( DIGIT12 ).setVisible(0)
self.ui.getControl( DIGIT120 ).setVisible(0)
self.ui.getControl( DIGIT21 ).setVisible(0)
self.ui.getControl( DIGIT22 ).setVisible(0)
self.ui.getControl( DIGIT220 ).setVisible(0)
self.ui.getControl( FLIP1 ).setVisible(0)
def flip2(self):
i=1
self.ui.getControl( FLIP2 ).setImage(os.path.join(background,"0.png"))
self.ui.getControl( FLIP2 ).setVisible(1)
self.ui.getControl( DIGIT31 ).setImage(os.path.join(digits,str(self.new1)+"(1).png"))
self.ui.getControl( DIGIT32 ).setImage(os.path.join(digits,str(self.old1)+"(2).png"))
self.ui.getControl( DIGIT310 ).setImage(os.path.join(digits,str(self.old1)+"(1).png"))
self.ui.getControl( DIGIT41 ).setImage(os.path.join(digits,str(self.new2)+"(1).png"))
self.ui.getControl( DIGIT42 ).setImage(os.path.join(digits,str(self.old2)+"(2).png"))
self.ui.getControl( DIGIT410 ).setImage(os.path.join(digits,str(self.old2)+"(1).png"))
self.ui.getControl( DIGIT310 ).setHeight(40)
self.ui.getControl( DIGIT310 ).setPosition(15,24)
self.ui.getControl( DIGIT410 ).setHeight(40)
self.ui.getControl( DIGIT410 ).setPosition(65,24)
self.ui.getControl( DIGIT310 ).setVisible(1)
self.ui.getControl( DIGIT410 ).setVisible(1)
self.ui.getControl( DIGIT32 ).setVisible(1)
self.ui.getControl( DIGIT42 ).setVisible(1)
self.ui.getControl( DIGIT3 ).setVisible(0)
self.ui.getControl( DIGIT4 ).setVisible(0)
self.ui.getControl( DIGIT31 ).setVisible(1)
self.ui.getControl( DIGIT41 ).setVisible(1)
h=40
while (i<12):
time.sleep(0.01)
print background+ "|"+ str(i)+".png"
self.ui.getControl( FLIP2 ).setImage(os.path.join(background,str(i)+".png"))
h=h-3
self.ui.getControl( DIGIT310 ).setPosition(15,24+(40-h))
self.ui.getControl( DIGIT310 ).setHeight(h)
self.ui.getControl( DIGIT410 ).setPosition(65,24+(40-h))
self.ui.getControl( DIGIT410 ).setHeight(h)
i = i +1
h=43
self.ui.getControl( DIGIT310 ).setVisible(0)
self.ui.getControl( DIGIT410 ).setVisible(0)
self.ui.getControl( DIGIT320 ).setHeight(3)
self.ui.getControl( DIGIT320 ).setImage(os.path.join(digits,str(self.new1)+"(2).png"))
self.ui.getControl( DIGIT420 ).setHeight(3)
self.ui.getControl( DIGIT420 ).setImage(os.path.join(digits,str(self.new2)+"(2).png"))
self.ui.getControl( DIGIT320 ).setVisible(1)
self.ui.getControl( DIGIT420 ).setVisible(1)
h=3
while (i<20):
time.sleep(0.01)
print background+ "|"+ str(i)+".png"
h=h+4
self.ui.getControl( FLIP2 ).setImage(os.path.join(background,str(i)+".png"))
self.ui.getControl( DIGIT320 ).setHeight(h)
self.ui.getControl( DIGIT420 ).setHeight(h)
i = i +1
self.ui.getControl( DIGIT3 ).setImage(os.path.join(digits,str(self.new1)+".png"))
self.ui.getControl( DIGIT4 ).setImage(os.path.join(digits,str(self.new2)+".png"))
self.ui.getControl( DIGIT3 ).setVisible(1)
self.ui.getControl( DIGIT4 ).setVisible(1)
self.ui.getControl( DIGIT31 ).setVisible(0)
self.ui.getControl( DIGIT32 ).setVisible(0)
self.ui.getControl( DIGIT320 ).setVisible(0)
self.ui.getControl( DIGIT41 ).setVisible(0)
self.ui.getControl( DIGIT42 ).setVisible(0)
self.ui.getControl( DIGIT420 ).setVisible(0)
self.ui.getControl( FLIP2 ).setVisible(0)
class MoveClock(threading.Thread):
def __init__(self, ui):
threading.Thread.__init__(self)
self.ui = ui
def run(self):
i =0
while (not self.ui.terminate):
time.sleep(1)
i=i+1
if (i>4):
i=0
x = random.randint(0,990)
y = random.randint(0,570)
self.ui.getControl( CLOCK ).setPosition(x,y)
def __init__( self, *args, **kwargs ):
self.terminate = False
pass
def onInit( self ):
counter = self.TimeCounter(self)
counter.start()
mover = self.MoveClock(self)
mover.start()
pass
def onClick( self, controlId ):
pass
def onFocus( self, controlId ):
pass
def onAction( self, action ):
# if ( action.getButtonCode() in CANCEL_DIALOG ):
self.terminate = True
self.close() | [
"# Copyright (C) 2015 Anisan, Kevin S. Graer\n",
"#\n",
"#\n",
"# This file is part of PseudoTV Live.\n",
"#\n",
"# PseudoTV is free software: you can redistribute it and/or modify\n",
"# it under the terms of the GNU General Public License as published by\n",
"# the Free Software Foundation, either version 3 of the License, or\n",
"# (at your option) any later version.\n",
"#\n",
"# PseudoTV is distributed in the hope that it will be useful,\n",
"# but WITHOUT ANY WARRANTY; without even the implied warranty of\n",
"# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n",
"# GNU General Public License for more details.\n",
"#\n",
"# You should have received a copy of the GNU General Public License\n",
"# along with PseudoTV. If not, see <http://www.gnu.org/licenses/>.\n",
"\n",
"import sys\n",
"import os\n",
"import time\n",
"import threading\n",
"import xbmc\n",
"import xbmcgui\n",
"import xbmcaddon\n",
"import datetime\n",
"import random\n",
"import Globals\n",
"from Globals import *\n",
"\n",
"rootDir = ADDON_PATH\n",
"if rootDir[-1] == ';':rootDir = rootDir[0:-1]\n",
"resDir = os.path.join(rootDir, 'resources')\n",
"skinsDir = os.path.join(resDir, 'skins')\n",
"addon_image_path = os.path.join( resDir, \"skins\", \"Default\", \"media\")\n",
"background = (os.path.join( addon_image_path, \"Background\"))\n",
"digits = os.path.join( addon_image_path, \"Digits\")\n",
"backMEDIA_LOC = xbmc.translatePath(os.path.join(MEDIA_LOC, 'pstvlBackground.png'))\n",
"\n",
"cacheDir = xbmc.translatePath('special://profile/addon_data/script.twitXBMC/cache/')\n",
"if not os.path.exists(cacheDir): os.makedirs(cacheDir)\n",
"\n",
"EXIT_SCRIPT = ( 9, 6, 10, 247, 275, 61467, 216, 257, 61448, )\n",
"CANCEL_DIALOG = EXIT_SCRIPT + ( 1, 2, 3, 4, 12, 122, 75, 7, 92, )\n",
"\n",
"\n",
"FLIP1=210\n",
"DIGIT1 = 211\n",
"DIGIT11 = 2111\n",
"DIGIT12 = 2112\n",
"DIGIT110 = 21110\n",
"DIGIT120 = 21120\n",
"DIGIT2 = 212\n",
"DIGIT21 = 2121\n",
"DIGIT22 = 2122\n",
"DIGIT210 = 21210\n",
"DIGIT220 = 21220\n",
"FLIP2=220\n",
"DIGIT3 = 221\n",
"DIGIT31 = 2211\n",
"DIGIT32 = 2212\n",
"DIGIT310 = 22110\n",
"DIGIT320 = 22120\n",
"DIGIT4 = 222\n",
"DIGIT41 = 2221\n",
"DIGIT42 = 2222\n",
"DIGIT410 = 22210\n",
"DIGIT420 = 22220\n",
"LABEL = 224\n",
"CLOCK = 200\n",
"\n",
"\n",
"class GUI( xbmcgui.WindowXMLDialog ):\n",
" class TimeCounter(threading.Thread):\n",
" def __init__(self, ui):\n",
" threading.Thread.__init__(self)\n",
" self.ui = ui\n",
" self.h1=0\n",
" self.h2=0\n",
" self.m1=0\n",
" self.m2=0\n",
" self.clockMode = REAL_SETTINGS.getSetting(\"ClockMode\")\n",
" self.ui.getControl(100).setImage(backMEDIA_LOC)\n",
" \n",
" def run(self):\n",
" print 'run'\n",
" i=0\n",
" while (not self.ui.terminate):\n",
" time.sleep(1)\n",
" dtn = datetime.datetime.now()\n",
" \n",
" if self.clockMode == \"1\":\n",
" dte = dtn.strftime(\"%d.%m.%Y %H:%M:%S\")\n",
" h = dtn.strftime(\"%H\")\n",
" else:\n",
" dte = dtn.strftime(\"%d.%m.%Y %I:%M:%S\")\n",
" h = dtn.strftime(\"%I\")\n",
" \n",
" h1=int(h[0])\n",
" h2=int(h[1])\n",
" m = dtn.strftime(\"%M\")\n",
" m1=int(m[0])\n",
" m2=int(m[1])\n",
" if (self.h1!=h1)|(self.h2!=h2):\n",
" Flip = self.ui.Fliper(self.ui,1,self.h1,h1,self.h2,h2)\n",
" Flip.start()\n",
" self.h1=h1\n",
" self.h2=h2\n",
" if (self.m1!=m1)|(self.m2!=m2):\n",
" Flip = self.ui.Fliper(self.ui,2,self.m1,m1,self.m2,m2)\n",
" Flip.start()\n",
" self.m1=m1\n",
" self.m2=m2\n",
" \n",
" # print background+\"|\"+str(i)+\".png\"\n",
" self.ui.getControl( LABEL ).setLabel(dte)\n",
" \n",
" class Fliper(threading.Thread):\n",
" def __init__(self, ui,flip,old1,new1,old2,new2):\n",
" threading.Thread.__init__(self)\n",
" self.ui = ui\n",
" self.flip = flip\n",
" self.old1 = old1 \n",
" self.new1 = new1\n",
" self.old2 = old2\n",
" self.new2 = new2\n",
" \n",
" \n",
" def run (self):\n",
" if (self.flip==1):\n",
" self.flip1()\n",
" else:\n",
" self.flip2()\n",
" \n",
" def flip1(self):\n",
" print 'flip1'\n",
" i=1\n",
" print (os.path.join(background,\"0.png\"))\n",
" print (os.path.join(digits,str(self.new1)+\"(1).png\"))\n",
" self.ui.getControl( FLIP1 ).setImage(os.path.join(background,\"0.png\"))\n",
" self.ui.getControl( FLIP1 ).setVisible(1)\n",
" self.ui.getControl( DIGIT11 ).setImage(os.path.join(digits,str(self.new1)+\"(1).png\"))\n",
" self.ui.getControl( DIGIT12 ).setImage(os.path.join(digits,str(self.old1)+\"(2).png\"))\n",
" self.ui.getControl( DIGIT110 ).setImage(os.path.join(digits,str(self.old1)+\"(1).png\"))\n",
" self.ui.getControl( DIGIT21 ).setImage(os.path.join(digits,str(self.new2)+\"(1).png\"))\n",
" self.ui.getControl( DIGIT22 ).setImage(os.path.join(digits,str(self.old2)+\"(2).png\"))\n",
" self.ui.getControl( DIGIT210 ).setImage(os.path.join(digits,str(self.old2)+\"(1).png\"))\n",
" self.ui.getControl( DIGIT110 ).setHeight(40)\n",
" self.ui.getControl( DIGIT110 ).setPosition(15,24)\n",
" self.ui.getControl( DIGIT210 ).setHeight(40)\n",
" self.ui.getControl( DIGIT210 ).setPosition(65,24)\n",
" self.ui.getControl( DIGIT110 ).setVisible(1)\n",
" self.ui.getControl( DIGIT210 ).setVisible(1)\n",
" self.ui.getControl( DIGIT12 ).setVisible(1)\n",
" self.ui.getControl( DIGIT22 ).setVisible(1)\n",
" self.ui.getControl( DIGIT1 ).setVisible(0)\n",
" self.ui.getControl( DIGIT2 ).setVisible(0)\n",
" self.ui.getControl( DIGIT11 ).setVisible(1)\n",
" self.ui.getControl( DIGIT21 ).setVisible(1)\n",
" h=40\n",
" while (i<12):\n",
" time.sleep(0.01)\n",
" # print background+ \"|\"+ str(i)+\".png\"\n",
" self.ui.getControl( FLIP1 ).setImage(os.path.join(background,str(i)+\".png\"))\n",
" h=h-3\n",
" self.ui.getControl( DIGIT110 ).setPosition(15,24+(40-h))\n",
" self.ui.getControl( DIGIT110 ).setHeight(h)\n",
" self.ui.getControl( DIGIT210 ).setPosition(65,24+(40-h))\n",
" self.ui.getControl( DIGIT210 ).setHeight(h)\n",
" i = i +1\n",
" h=43\n",
" self.ui.getControl( DIGIT110 ).setVisible(0)\n",
" self.ui.getControl( DIGIT210 ).setVisible(0)\n",
" self.ui.getControl( DIGIT120 ).setHeight(3)\n",
" self.ui.getControl( DIGIT120 ).setImage(os.path.join(digits,str(self.new1)+\"(2).png\"))\n",
" self.ui.getControl( DIGIT220 ).setHeight(3)\n",
" self.ui.getControl( DIGIT220 ).setImage(os.path.join(digits,str(self.new2)+\"(2).png\"))\n",
" self.ui.getControl( DIGIT120 ).setVisible(1)\n",
" self.ui.getControl( DIGIT220 ).setVisible(1)\n",
" h=3\n",
" while (i<20):\n",
" time.sleep(0.01)\n",
" print background+ \"|\"+ str(i)+\".png\"\n",
" h=h+4\n",
" self.ui.getControl( FLIP1 ).setImage(os.path.join(background,str(i)+\".png\"))\n",
" self.ui.getControl( DIGIT120 ).setHeight(h)\n",
" self.ui.getControl( DIGIT220 ).setHeight(h)\n",
" i = i +1\n",
" self.ui.getControl( DIGIT1 ).setImage(os.path.join(digits,str(self.new1)+\".png\"))\n",
" self.ui.getControl( DIGIT2 ).setImage(os.path.join(digits,str(self.new2)+\".png\"))\n",
" self.ui.getControl( DIGIT1 ).setVisible(1)\n",
" self.ui.getControl( DIGIT2 ).setVisible(1)\n",
" self.ui.getControl( DIGIT11 ).setVisible(0)\n",
" self.ui.getControl( DIGIT12 ).setVisible(0)\n",
" self.ui.getControl( DIGIT120 ).setVisible(0)\n",
" self.ui.getControl( DIGIT21 ).setVisible(0)\n",
" self.ui.getControl( DIGIT22 ).setVisible(0)\n",
" self.ui.getControl( DIGIT220 ).setVisible(0)\n",
" self.ui.getControl( FLIP1 ).setVisible(0)\n",
" \n",
" def flip2(self):\n",
" i=1\n",
" self.ui.getControl( FLIP2 ).setImage(os.path.join(background,\"0.png\"))\n",
" self.ui.getControl( FLIP2 ).setVisible(1)\n",
" self.ui.getControl( DIGIT31 ).setImage(os.path.join(digits,str(self.new1)+\"(1).png\"))\n",
" self.ui.getControl( DIGIT32 ).setImage(os.path.join(digits,str(self.old1)+\"(2).png\"))\n",
" self.ui.getControl( DIGIT310 ).setImage(os.path.join(digits,str(self.old1)+\"(1).png\"))\n",
" self.ui.getControl( DIGIT41 ).setImage(os.path.join(digits,str(self.new2)+\"(1).png\"))\n",
" self.ui.getControl( DIGIT42 ).setImage(os.path.join(digits,str(self.old2)+\"(2).png\"))\n",
" self.ui.getControl( DIGIT410 ).setImage(os.path.join(digits,str(self.old2)+\"(1).png\"))\n",
" self.ui.getControl( DIGIT310 ).setHeight(40)\n",
" self.ui.getControl( DIGIT310 ).setPosition(15,24)\n",
" self.ui.getControl( DIGIT410 ).setHeight(40)\n",
" self.ui.getControl( DIGIT410 ).setPosition(65,24)\n",
" self.ui.getControl( DIGIT310 ).setVisible(1)\n",
" self.ui.getControl( DIGIT410 ).setVisible(1)\n",
" self.ui.getControl( DIGIT32 ).setVisible(1)\n",
" self.ui.getControl( DIGIT42 ).setVisible(1)\n",
" self.ui.getControl( DIGIT3 ).setVisible(0)\n",
" self.ui.getControl( DIGIT4 ).setVisible(0)\n",
" self.ui.getControl( DIGIT31 ).setVisible(1)\n",
" self.ui.getControl( DIGIT41 ).setVisible(1)\n",
" h=40\n",
" while (i<12):\n",
" time.sleep(0.01)\n",
" print background+ \"|\"+ str(i)+\".png\"\n",
" self.ui.getControl( FLIP2 ).setImage(os.path.join(background,str(i)+\".png\"))\n",
" h=h-3\n",
" self.ui.getControl( DIGIT310 ).setPosition(15,24+(40-h))\n",
" self.ui.getControl( DIGIT310 ).setHeight(h)\n",
" self.ui.getControl( DIGIT410 ).setPosition(65,24+(40-h))\n",
" self.ui.getControl( DIGIT410 ).setHeight(h)\n",
" i = i +1\n",
" h=43\n",
" self.ui.getControl( DIGIT310 ).setVisible(0)\n",
" self.ui.getControl( DIGIT410 ).setVisible(0)\n",
" self.ui.getControl( DIGIT320 ).setHeight(3)\n",
" self.ui.getControl( DIGIT320 ).setImage(os.path.join(digits,str(self.new1)+\"(2).png\"))\n",
" self.ui.getControl( DIGIT420 ).setHeight(3)\n",
" self.ui.getControl( DIGIT420 ).setImage(os.path.join(digits,str(self.new2)+\"(2).png\"))\n",
" self.ui.getControl( DIGIT320 ).setVisible(1)\n",
" self.ui.getControl( DIGIT420 ).setVisible(1)\n",
" h=3\n",
" while (i<20):\n",
" time.sleep(0.01)\n",
" print background+ \"|\"+ str(i)+\".png\"\n",
" h=h+4\n",
" self.ui.getControl( FLIP2 ).setImage(os.path.join(background,str(i)+\".png\"))\n",
" self.ui.getControl( DIGIT320 ).setHeight(h)\n",
" self.ui.getControl( DIGIT420 ).setHeight(h)\n",
" i = i +1\n",
" self.ui.getControl( DIGIT3 ).setImage(os.path.join(digits,str(self.new1)+\".png\"))\n",
" self.ui.getControl( DIGIT4 ).setImage(os.path.join(digits,str(self.new2)+\".png\"))\n",
" self.ui.getControl( DIGIT3 ).setVisible(1)\n",
" self.ui.getControl( DIGIT4 ).setVisible(1)\n",
" self.ui.getControl( DIGIT31 ).setVisible(0)\n",
" self.ui.getControl( DIGIT32 ).setVisible(0)\n",
" self.ui.getControl( DIGIT320 ).setVisible(0)\n",
" self.ui.getControl( DIGIT41 ).setVisible(0)\n",
" self.ui.getControl( DIGIT42 ).setVisible(0)\n",
" self.ui.getControl( DIGIT420 ).setVisible(0)\n",
" self.ui.getControl( FLIP2 ).setVisible(0)\n",
" \n",
" class MoveClock(threading.Thread):\n",
" def __init__(self, ui):\n",
" threading.Thread.__init__(self)\n",
" self.ui = ui\n",
" \n",
" def run(self):\n",
" i =0\n",
" while (not self.ui.terminate):\n",
" time.sleep(1)\n",
" i=i+1\n",
" if (i>4):\n",
" i=0\n",
" x = random.randint(0,990)\n",
" y = random.randint(0,570)\n",
" self.ui.getControl( CLOCK ).setPosition(x,y)\n",
" \n",
" def __init__( self, *args, **kwargs ):\n",
" self.terminate = False\n",
" pass\n",
"\n",
"\n",
" def onInit( self ):\n",
" counter = self.TimeCounter(self)\n",
" counter.start()\n",
" mover = self.MoveClock(self)\n",
" mover.start()\n",
" pass\n",
" \n",
" \n",
" def onClick( self, controlId ):\n",
" pass\t\n",
" \n",
" def onFocus( self, controlId ):\n",
" pass\n",
" \n",
" def onAction( self, action ):\n",
" # if ( action.getButtonCode() in CANCEL_DIALOG ):\n",
" self.terminate = True\n",
" self.close()"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.043478260869565216,
0,
0,
0.014285714285714285,
0.01639344262295082,
0.0196078431372549,
0.012048192771084338,
0,
0.011764705882352941,
0.01818181818181818,
0,
0.016129032258064516,
0.015151515151515152,
0,
0,
0.1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.05263157894736842,
0,
0,
0,
0,
0.045454545454545456,
0.045454545454545456,
0.045454545454545456,
0.045454545454545456,
0,
0,
0.07692307692307693,
0,
0,
0.0625,
0,
0,
0,
0.058823529411764705,
0,
0,
0,
0,
0,
0,
0.047619047619047616,
0.034482758620689655,
0.034482758620689655,
0,
0.034482758620689655,
0.034482758620689655,
0.0625,
0.06666666666666667,
0,
0.03225806451612903,
0.03225806451612903,
0.0625,
0.06666666666666667,
0,
0.03225806451612903,
0.03225806451612903,
0.058823529411764705,
0,
0.034482758620689655,
0.07692307692307693,
0,
0.08771929824561403,
0,
0,
0,
0.03333333333333333,
0,
0,
0,
0.07692307692307693,
0.07692307692307693,
0.08333333333333333,
0.03225806451612903,
0,
0,
0,
0.1111111111111111,
0,
0,
0.0625,
0.03773584905660377,
0.030303030303030304,
0.04819277108433735,
0.037037037037037035,
0.04081632653061224,
0.04081632653061224,
0.04040404040404041,
0.04081632653061224,
0.04081632653061224,
0.04040404040404041,
0.03508771929824561,
0.04838709677419355,
0.03508771929824561,
0.04838709677419355,
0.03508771929824561,
0.03508771929824561,
0.03571428571428571,
0.03571428571428571,
0.03636363636363636,
0.03636363636363636,
0.03571428571428571,
0.03571428571428571,
0.058823529411764705,
0.038461538461538464,
0,
0,
0.043010752688172046,
0.045454545454545456,
0.0410958904109589,
0.03333333333333333,
0.0410958904109589,
0.03333333333333333,
0.04,
0.058823529411764705,
0.03508771929824561,
0.03508771929824561,
0.03571428571428571,
0.04040404040404041,
0.03571428571428571,
0.04040404040404041,
0.03508771929824561,
0.03508771929824561,
0.0625,
0.038461538461538464,
0,
0.03773584905660377,
0.045454545454545456,
0.043010752688172046,
0.03333333333333333,
0.03333333333333333,
0.04,
0.0425531914893617,
0.0425531914893617,
0.03636363636363636,
0.03636363636363636,
0.03571428571428571,
0.03571428571428571,
0.03508771929824561,
0.03571428571428571,
0.03571428571428571,
0.03508771929824561,
0.037037037037037035,
0.1111111111111111,
0,
0.0625,
0.04819277108433735,
0.037037037037037035,
0.04081632653061224,
0.04081632653061224,
0.04040404040404041,
0.04081632653061224,
0.04081632653061224,
0.04040404040404041,
0.03508771929824561,
0.04838709677419355,
0.03508771929824561,
0.04838709677419355,
0.03508771929824561,
0.03508771929824561,
0.03571428571428571,
0.03571428571428571,
0.03636363636363636,
0.03636363636363636,
0.03571428571428571,
0.03571428571428571,
0.058823529411764705,
0.038461538461538464,
0,
0.03773584905660377,
0.043010752688172046,
0.045454545454545456,
0.0410958904109589,
0.03333333333333333,
0.0410958904109589,
0.03333333333333333,
0.04,
0.058823529411764705,
0.03508771929824561,
0.03508771929824561,
0.03571428571428571,
0.04040404040404041,
0.03571428571428571,
0.04040404040404041,
0.03508771929824561,
0.03508771929824561,
0.0625,
0.038461538461538464,
0,
0.03773584905660377,
0.045454545454545456,
0.043010752688172046,
0.03333333333333333,
0.03333333333333333,
0.04,
0.0425531914893617,
0.0425531914893617,
0.03636363636363636,
0.03636363636363636,
0.03571428571428571,
0.03571428571428571,
0.03508771929824561,
0.03571428571428571,
0.03571428571428571,
0.03508771929824561,
0.037037037037037035,
0.09090909090909091,
0,
0,
0,
0,
0.07692307692307693,
0,
0.058823529411764705,
0,
0,
0.045454545454545456,
0.038461538461538464,
0.041666666666666664,
0.021739130434782608,
0.021739130434782608,
0.046153846153846156,
0.07692307692307693,
0.046511627906976744,
0,
0,
0,
0,
0.125,
0,
0,
0,
0,
0,
0.2,
0.25,
0.08333333333333333,
0.07142857142857142,
0.1111111111111111,
0.05555555555555555,
0,
0.2,
0.058823529411764705,
0,
0,
0.05
] | 302 | 0.028664 | false |
# -*- coding: utf-8 -*-
from Plugins.Extensions.MediaPortal.plugin import _
from Plugins.Extensions.MediaPortal.resources.imports import *
from Plugins.Extensions.MediaPortal.resources.keyboardext import VirtualKeyBoardExt
import Queue
import threading
from Plugins.Extensions.MediaPortal.resources.youtubeplayer import YoutubePlayer
from Plugins.Extensions.MediaPortal.resources.menuhelper import MenuHelper
from Components.ProgressBar import ProgressBar
try:
from Plugins.Extensions.MediaPortal.resources import cfscrape
except:
cfscrapeModule = False
else:
cfscrapeModule = True
try:
import requests
except:
requestsModule = False
else:
requestsModule = True
import urlparse
import thread
BASE_URL = "https://streamit.ws"
sit_cookies = CookieJar()
sit_ck = {}
sit_agent = ''
def sit_grabpage(pageurl, method='GET', postdata={}):
if requestsModule:
try:
s = requests.session()
url = urlparse.urlparse(pageurl)
headers = {'User-Agent': sit_agent}
if method == 'GET':
page = s.get(url.geturl(), cookies=sit_cookies, headers=headers)
elif method == 'POST':
page = s.post(url.geturl(), data=postdata, cookies=sit_cookies, headers=headers)
return page.content
except:
pass
class showstreamitGenre(MenuHelper):
base_menu = [
(0, "/kino", 'Neu im Kino'),
(0, "/film", 'Neueste Filme'),
(0, "/film/?cat=1", 'Action'),
(0, "/film/?cat=2", 'Adventure'),
(0, "/film/?cat=3", 'Animation'),
(0, "/film/?cat=4", 'Biography'),
(0, "/film/?cat=5", 'Comedy'),
(0, "/film/?cat=6", 'Crime'),
(0, "/film/?cat=7", 'Documentary'),
(0, "/film/?cat=8", 'Drama'),
(0, "/film/?cat=9", 'Family'),
(0, "/film/?cat=10", 'Fantasy'),
(0, "/film/?cat=13", 'History'),
(0, "/film/?cat=14", 'Horror'),
(0, "/film/?cat=15", 'Music'),
(0, "/film/?cat=17", 'Mystery'),
(0, "/film/?cat=20", 'Romance'),
(0, "/film/?cat=21", 'Sci-Fi'),
(0, "/film/?cat=22", 'Sport'),
(0, "/film/?cat=24", 'Thriller'),
(0, "/film/?cat=25", 'War'),
(0, "/film/?cat=26", 'Western'),
]
def __init__(self, session, m_level='main', m_path='/'):
self.m_level = m_level
self.m_path = m_path
MenuHelper.__init__(self, session, 0, None, BASE_URL, "", self._defaultlistcenter, cookieJar=sit_cookies)
self['title'] = Label("STREAMIT")
self['ContentTitle'] = Label("Genres")
self.param_search = ''
self.search_token = None
self.onLayoutFinish.append(self.mh_start)
def mh_start(self):
thread.start_new_thread(self.get_tokens,("GetTokens",))
self['name'].setText(_("Please wait..."))
def get_tokens(self, threadName):
if requestsModule and cfscrapeModule:
printl("Calling thread: %s" % threadName,self,'A')
global sit_ck
global sit_agent
if sit_ck == {} or sit_agent == '':
sit_ck, sit_agent = cfscrape.get_tokens(BASE_URL)
requests.cookies.cookiejar_from_dict(sit_ck, cookiejar=sit_cookies)
else:
s = requests.session()
url = urlparse.urlparse(BASE_URL)
headers = {'user-agent': sit_agent}
page = s.get(url.geturl(), cookies=sit_cookies, headers=headers)
if page.status_code == 503 and page.headers.get("Server") == "cloudflare-nginx":
sit_ck, sit_agent = cfscrape.get_tokens(BASE_URL)
requests.cookies.cookiejar_from_dict(sit_ck, cookiejar=sit_cookies)
reactor.callFromThread(self.mh_initMenu)
else:
reactor.callFromThread(self.mh_errorMenu)
def mh_errorMenu(self):
message = self.session.open(MessageBoxExt, _("Mandatory depends python-requests and/or python-pyexecjs and nodejs are missing!"), MessageBoxExt.TYPE_ERROR)
self.keyCancel()
def mh_initMenu(self):
self.mh_buildMenu(self.mh_baseUrl + self.m_path, agent=sit_agent)
def mh_parseCategorys(self, data):
self.mh_genMenu2(self.base_menu)
def mh_callGenreListScreen(self):
genreurl = self.mh_baseUrl+self.mh_genreUrl[self.mh_menuLevel]
self.session.open(streamitFilmListeScreen, genreurl, self.mh_genreTitle)
class streamitFilmListeScreen(MPScreen, ThumbsHelper):
def __init__(self, session, genreLink, genreName, series_img=None, last_series_tag='', season_data=None):
self.genreLink = genreLink
self.genreName = genreName
self.seriesImg = series_img
self.seasonData = season_data
MPScreen.__init__(self, session, skin='MP_PluginDescr', widgets=('MP_widget_rating',))
ThumbsHelper.__init__(self)
self["hdpic"] = Pixmap()
self['rating10'] = ProgressBar()
self['rating0'] = Pixmap()
self["hdpic"].hide()
self["actions"] = ActionMap(["MP_Actions2", "MP_Actions"], {
"ok" : self.keyOK,
"cancel": self.keyCancel,
"5" : self.keyShowThumb,
"up" : self.keyUp,
"down" : self.keyDown,
"right" : self.keyRight,
"left" : self.keyLeft,
"upUp" : self.key_repeatedUp,
"rightUp" : self.key_repeatedUp,
"leftUp" : self.key_repeatedUp,
"downUp" : self.key_repeatedUp,
"upRepeated" : self.keyUpRepeated,
"downRepeated" : self.keyDownRepeated,
"rightRepeated" : self.keyRightRepeated,
"leftRepeated" : self.keyLeftRepeated,
"nextBouquet" : self.keyPageUp,
"prevBouquet" : self.keyPageDown,
"1" : self.key_1,
"3" : self.key_3,
"4" : self.key_4,
"6" : self.key_6,
"7" : self.key_7,
"9" : self.key_9,
"0": self.closeAll,
"yellow" : self.keySort,
}, -1)
self.sortFuncs = None
self.sortOrderStrGenre = ""
self['title'] = Label("STREAMIT")
self['Page'] = Label(_("Page:"))
self['F3'] = Label(_("Sort by..."))
self['F3'].hide()
self.timerStart = False
self.seekTimerRun = False
self.eventL = threading.Event()
self.eventH = threading.Event()
self.eventP = threading.Event()
self.filmQ = Queue.Queue(0)
self.hanQ = Queue.Queue(0)
self.picQ = Queue.Queue(0)
self.updateP = 0
self.keyLocked = True
self.filmListe = []
self.page = 0
self.pages = 0;
self.neueFilme = re.search('Neue Filme',self.genreName)
self.sucheFilme = re.search('Videosuche',self.genreName)
if 'HD Filme' in self.genreName:
self.streamTag = 'streamhd'
else:
self.streamTag = 'stream'
self.seriesTag = ''
self.setGenreStrTitle()
self.ml = MenuList([], enableWrapAround=True, content=eListboxPythonMultiContent)
self['liste'] = self.ml
self.onLayoutFinish.append(self.loadPage)
def setGenreStrTitle(self):
if self.sortOrderStrGenre:
sortOrder = ' (%s)' % self.sortOrderStrGenre
else:
sortOrder = ''
self['ContentTitle'].setText("%s%s%s" % (self.seriesTag,self.genreName,sortOrder))
def loadPage(self):
if not self.sucheFilme and self.page > 1:
page = max(1,self.page)
link = self.genreLink
if not '?' in link:
link += '?'
else:
link += '&'
url = "%spage=%d" % (link, page)
else:
url = self.genreLink
if self.page:
self['page'].setText("%d / %d" % (self.page,self.pages))
self.filmQ.put(url)
if not self.eventL.is_set():
self.eventL.set()
self.loadPageQueued()
else:
self['name'].setText(_('Please wait...'))
self['handlung'].setText("")
self['coverArt'].hide()
def loadPageQueued(self):
self['name'].setText(_('Please wait...'))
self['handlung'].setText("")
self['coverArt'].hide()
while not self.filmQ.empty():
url = self.filmQ.get_nowait()
data = sit_grabpage(url)
self.loadPageData(data)
def dataError(self, error):
self.eventL.clear()
printl(error,self,"E")
self.filmListe.append((_("No movies found!"),"","","", 0, False))
self.ml.setList(map(self.streamitFilmListEntry, self.filmListe))
def loadPageData(self, data):
self.getPostFuncs(data)
self.filmListe = []
l = len(data)
a = 0
while a < l:
mg = re.search('<div class="post-thumb"(.*?)</div>\s+</li>', data[a:], re.S)
if mg:
a += mg.end()
m = re.search('<a href="(.*?)".*?title="(.*?)">.*?<img.*?src="(.*?)".*?<div class="voting".*?style="width:(\d*)', mg.group(1), re.S)
if m:
url,name,imageurl,rating = m.groups()
if 'hd_icon' in mg.group(1):
hd = True
else:
hd = False
if not rating: rating = "0"
imdb = "IMDb: %.1f / 10" % (float(rating) / 10)
if not url.startswith('http'):
url = BASE_URL + url
if not imageurl.startswith('http'):
imageurl = BASE_URL + imageurl
self.filmListe.append((decodeHtml(name), url, imageurl, imdb, rating, hd))
else:
a = l
if self.filmListe:
if not self.pages:
m = re.search('class=\'pagination\'.*?page=(\d+)\'>Last</a', data)
if m:
self.pages = int(m.group(1))
else:
self.pages = 1
self.page = 1
self['page'].setText("%d / %d" % (self.page,self.pages))
self.keyLocked = False
self.ml.setList(map(self.streamitFilmListEntry, self.filmListe))
self.th_ThumbsQuery(self.filmListe, 0, 1, 2, None, None, self.page, self.pages, agent=sit_agent, cookies=sit_ck)
self['liste'].moveToIndex(0)
self.loadPicQueued()
else:
self.filmListe.append((_("No entries found!"),"","","", 0, False))
self.ml.setList(map(self.streamitFilmListEntry, self.filmListe))
if self.filmQ.empty():
self.eventL.clear()
else:
self.loadPageQueued()
def getPostFuncs(self, data):
self.sortFuncs = []
try:
m = re.search('id="postFuncs">(.*?)<!-- /#postFuncs -->', data, re.S)
if m:
for m2 in re.finditer('href="(.*?)">(.*?)</a', m.group(1)):
href, name = m2.groups()
href = re.sub('&page=\d+', '', href, 1)
href = re.sub('\?page=\d+', '?', href, 1)
self.sortFuncs.append((decodeHtml(name), decodeHtml(href)))
except:
pass
if self.sortFuncs:
self['F3'].show()
else:
self['F3'].hide()
def loadPicQueued(self):
self.picQ.put(None)
if not self.eventP.is_set():
self.eventP.set()
self.loadPic()
def loadPic(self):
if self.picQ.empty():
self.eventP.clear()
return
if self.eventH.is_set() or self.updateP:
print "Pict. or descr. update in progress"
print "eventH: ",self.eventH.is_set()
print "eventP: ",self.eventP.is_set()
print "updateP: ",self.updateP
return
while not self.picQ.empty():
self.picQ.get_nowait()
streamName = self['liste'].getCurrent()[0][0]
self['name'].setText(streamName)
streamPic = self['liste'].getCurrent()[0][2]
streamUrl = self['liste'].getCurrent()[0][1]
self.updateP = 1
CoverHelper(self['coverArt'], self.showCoverExit).getCover(streamPic, agent=sit_agent, cookieJar=sit_cookies)
rate = self['liste'].getCurrent()[0][4]
hd = self['liste'].getCurrent()[0][5]
if hd:
self['hdpic'].show()
else:
self['hdpic'].hide()
rating = int(rate)
if rating > 100:
rating = 100
self['rating10'].setValue(rating)
def dataErrorP(self, error):
printl(error,self,"E")
self.ShowCoverNone()
def showCoverExit(self):
self.updateP = 0;
if not self.filmQ.empty():
self.loadPageQueued()
else:
self.eventL.clear()
self.loadPic()
def keyOK(self):
if self.keyLocked or self.eventL.is_set():
return
streamLink = self['liste'].getCurrent()[0][1]
streamName = self['liste'].getCurrent()[0][0]
imageLink = self['liste'].getCurrent()[0][2]
self.session.open(streamitStreams, streamLink, streamName, imageLink, self.streamTag)
def keyUpRepeated(self):
if self.keyLocked:
return
self['coverArt'].hide()
self['liste'].up()
def keyDownRepeated(self):
if self.keyLocked:
return
self['coverArt'].hide()
self['liste'].down()
def key_repeatedUp(self):
if self.keyLocked:
return
self.loadPicQueued()
def keyLeftRepeated(self):
if self.keyLocked:
return
self['coverArt'].hide()
self['liste'].pageUp()
def keyRightRepeated(self):
if self.keyLocked:
return
self['coverArt'].hide()
self['liste'].pageDown()
def keyPageDown(self):
if self.seekTimerRun:
self.seekTimerRun = False
self.keyPageDownFast(1)
def keyPageUp(self):
if self.seekTimerRun:
self.seekTimerRun = False
self.keyPageUpFast(1)
def keyPageUpFast(self,step):
if self.keyLocked:
return
oldpage = self.page
if (self.page + step) <= self.pages:
self.page += step
else:
self.page = 1
if oldpage != self.page:
self.loadPage()
def keyPageDownFast(self,step):
if self.keyLocked:
return
oldpage = self.page
if (self.page - step) >= 1:
self.page -= step
else:
self.page = self.pages
if oldpage != self.page:
self.loadPage()
def key_1(self):
self.keyPageDownFast(2)
def key_4(self):
self.keyPageDownFast(5)
def key_7(self):
self.keyPageDownFast(10)
def key_3(self):
self.keyPageUpFast(2)
def key_6(self):
self.keyPageUpFast(5)
def key_9(self):
self.keyPageUpFast(10)
def keySort(self):
if not self.keyLocked and self.sortFuncs:
self.handleSort()
def handleSort(self):
from Screens.ChoiceBox import ChoiceBox
self.session.openWithCallback(self.cb_handleSort, ChoiceBox, title=_("Sort Selection"), list = self.sortFuncs)
def cb_handleSort(self, answer):
href = answer and answer[1]
if href:
self.genreLink = self.genreLink.split('?')[0] + href
self.sortOrderStrGenre = answer[0]
self.setGenreStrTitle()
self.loadPage()
class streamitStreams(MPScreen):
def __init__(self, session, filmUrl, filmName, imageLink, streamTag, post_data=None, post_url=None):
self.filmUrl = filmUrl
self.filmName = filmName
self.imageUrl = imageLink
self.stream_tag = streamTag
self.postData = post_data
self.postUrl = post_url
MPScreen.__init__(self, session, skin='MP_PluginDescr')
self["actions"] = ActionMap(["MP_Actions"], {
"green" : self.keyTrailer,
"ok" : self.keyOK,
"0" : self.closeAll,
"cancel" : self.keyCancel
}, -1)
self['title'] = Label("STREAMIT")
self['ContentTitle'] = Label(_("Stream Selection"))
self['name'] = Label(filmName)
self.trailerId = None
self.streamListe = []
self.ml = MenuList([], enableWrapAround=True, content=eListboxPythonMultiContent)
self['liste'] = self.ml
self.keyLocked = True
self.onLayoutFinish.append(self.loadPage)
def loadPage(self):
self.streamListe.append((_('Please wait...'),"","",""))
self.ml.setList(map(self.streamitStreamListEntry, self.streamListe))
seriesStreams = self.postData != None
data = sit_grabpage(self.filmUrl)
self.parseData(data, seriesStreams)
def getSeriesStreams(self):
data = sit_grabpage(self.postUrl, method='POST', postdata=self.postData)
self.parseStreams(data)
def parseStreams(self, data):
self.streamListe = []
m = re.search('id="sel_qualideutsch">(.*?)</select>', data, re.S)
if m:
buttons = re.findall('id="(.*?)" class="mirrorbuttonsdeutsch">(.*?)</', m.group(1))
for id,nm in buttons:
m2 = re.search('class="mirrorsdeutsch"\sid="\w*%s"(.*?)></div></div>' % id, data, re.S)
if m2:
m3 = re.search('>Ton: <b>(.*?)</b', m2.group(1))
if m3:
ton = ', %s' % m3.group(1)
else:
ton = ''
streams = re.findall('<a href="(.*?)".*?value="(.*?)"', m2.group(1).replace('\n', ''))
for (isUrl,isStream) in streams:
if isSupportedHoster(isStream, True):
streamPart = ''
isUrl = isUrl.replace('\n','')
isUrl = isUrl.replace('\r','')
self.streamListe.append((isStream,isUrl,streamPart,' (%s%s)' % (nm.strip(), ton.strip())))
else:
print "No supported hoster:"
if self.streamListe:
self.keyLocked = False
else:
self.streamListe.append(("No streams found!","","",""))
self.ml.setList(map(self.streamitStreamListEntry, self.streamListe))
def parseData(self, data, seriesStreams=False):
m = re.search('//www.youtube\.com/(embed|v|p)/(.*?)(\?|" |&)', data)
if m:
self.trailerId = m.group(2)
self['F2'].setText('Trailer')
else: self.trailerId = None
desc = ''
mdesc = re.search('<b>(Jahr:)</b>.*?">(.*?)</.*?<b>(Länge:)</b>.*?">(.*?)</', data, re.S)
if mdesc:
desc += mdesc.group(1) + mdesc.group(2) + ' ' + mdesc.group(3) + mdesc.group(4) + '\n\n'
elif desc:
desc += '\n'
mdesc = re.search('<div id="cleaner"> </div><div id="cleaner"> </div>(.*?)<br><br>',data, re.S)
if mdesc:
desc += re.sub('<.*?>', '', mdesc.group(1).replace('\n',''), re.S).replace(' ','').strip()
else:
desc += "Keine weiteren Info's !"
self['handlung'].setText(decodeHtml(desc))
CoverHelper(self['coverArt']).getCover(self.imageUrl, agent=sit_agent, cookieJar=sit_cookies)
if not seriesStreams:
self.parseStreams(data)
else:
self.getSeriesStreams()
def dataError(self, error):
printl(error,self,"E")
self.streamListe.append(("Data error!","","",""))
self.ml.setList(map(self.streamitStreamListEntry, self.streamListe))
def gotLink(self, stream_url):
if stream_url:
title = self.filmName + self['liste'].getCurrent()[0][2]
self.session.open(SimplePlayer, [(title, stream_url, self.imageUrl)], cover=True, showPlaylist=False, ltype='streamit')
def keyTrailer(self):
if self.trailerId:
self.session.open(
YoutubePlayer,
[(self.filmName+' - Trailer', self.trailerId, self.imageUrl)],
playAll = False,
showPlaylist=False,
showCover=True
)
def keyOK(self):
if self.keyLocked:
return
url = self['liste'].getCurrent()[0][1]
if not url.startswith('http'):
url = BASE_URL + url
data = sit_grabpage(url)
self.getUrl(data)
def getUrl(self,data):
try:
link = re.search('id="download" class="cd" style="display:none"><a href="(.*?)">', data).group(1)
us = urlparse.urlsplit(link)
link = urlparse.urlunsplit(us[0:1]+(us[1].lower(),)+us[2:])
except:
link = "http://fuck.com"
get_stream_link(self.session).check_link(link, self.gotLink) | [
"# -*- coding: utf-8 -*-\n",
"from Plugins.Extensions.MediaPortal.plugin import _\n",
"from Plugins.Extensions.MediaPortal.resources.imports import *\n",
"from Plugins.Extensions.MediaPortal.resources.keyboardext import VirtualKeyBoardExt\n",
"import Queue\n",
"import threading\n",
"from Plugins.Extensions.MediaPortal.resources.youtubeplayer import YoutubePlayer\n",
"from Plugins.Extensions.MediaPortal.resources.menuhelper import MenuHelper\n",
"from Components.ProgressBar import ProgressBar\n",
"\n",
"try:\n",
"\tfrom Plugins.Extensions.MediaPortal.resources import cfscrape\n",
"except:\n",
"\tcfscrapeModule = False\n",
"else:\n",
"\tcfscrapeModule = True\n",
"\n",
"try:\n",
"\timport requests\n",
"except:\n",
"\trequestsModule = False\n",
"else:\n",
"\trequestsModule = True\n",
"\n",
"import urlparse\n",
"import thread\n",
"\n",
"BASE_URL = \"https://streamit.ws\"\n",
"sit_cookies = CookieJar()\n",
"sit_ck = {}\n",
"sit_agent = ''\n",
"\n",
"def sit_grabpage(pageurl, method='GET', postdata={}):\n",
"\tif requestsModule:\n",
"\t\ttry:\n",
"\t\t\ts = requests.session()\n",
"\t\t\turl = urlparse.urlparse(pageurl)\n",
"\t\t\theaders = {'User-Agent': sit_agent}\n",
"\t\t\tif method == 'GET':\n",
"\t\t\t\tpage = s.get(url.geturl(), cookies=sit_cookies, headers=headers)\n",
"\t\t\telif method == 'POST':\n",
"\t\t\t\tpage = s.post(url.geturl(), data=postdata, cookies=sit_cookies, headers=headers)\n",
"\t\t\treturn page.content\n",
"\t\texcept:\n",
"\t\t\tpass\n",
"\n",
"class showstreamitGenre(MenuHelper):\n",
"\n",
"\tbase_menu = [\n",
"\t\t(0, \"/kino\", 'Neu im Kino'),\n",
"\t\t(0, \"/film\", 'Neueste Filme'),\n",
"\t\t(0, \"/film/?cat=1\", 'Action'),\n",
"\t\t(0, \"/film/?cat=2\", 'Adventure'),\n",
"\t\t(0, \"/film/?cat=3\", 'Animation'),\n",
"\t\t(0, \"/film/?cat=4\", 'Biography'),\n",
"\t\t(0, \"/film/?cat=5\", 'Comedy'),\n",
"\t\t(0, \"/film/?cat=6\", 'Crime'),\n",
"\t\t(0, \"/film/?cat=7\", 'Documentary'),\n",
"\t\t(0, \"/film/?cat=8\", 'Drama'),\n",
"\t\t(0, \"/film/?cat=9\", 'Family'),\n",
"\t\t(0, \"/film/?cat=10\", 'Fantasy'),\n",
"\t\t(0, \"/film/?cat=13\", 'History'),\n",
"\t\t(0, \"/film/?cat=14\", 'Horror'),\n",
"\t\t(0, \"/film/?cat=15\", 'Music'),\n",
"\t\t(0, \"/film/?cat=17\", 'Mystery'),\n",
"\t\t(0, \"/film/?cat=20\", 'Romance'),\n",
"\t\t(0, \"/film/?cat=21\", 'Sci-Fi'),\n",
"\t\t(0, \"/film/?cat=22\", 'Sport'),\n",
"\t\t(0, \"/film/?cat=24\", 'Thriller'),\n",
"\t\t(0, \"/film/?cat=25\", 'War'),\n",
"\t\t(0, \"/film/?cat=26\", 'Western'),\n",
"\t\t]\n",
"\n",
"\tdef __init__(self, session, m_level='main', m_path='/'):\n",
"\t\tself.m_level = m_level\n",
"\t\tself.m_path = m_path\n",
"\t\tMenuHelper.__init__(self, session, 0, None, BASE_URL, \"\", self._defaultlistcenter, cookieJar=sit_cookies)\n",
"\n",
"\t\tself['title'] = Label(\"STREAMIT\")\n",
"\t\tself['ContentTitle'] = Label(\"Genres\")\n",
"\t\tself.param_search = ''\n",
"\t\tself.search_token = None\n",
"\n",
"\t\tself.onLayoutFinish.append(self.mh_start)\n",
"\n",
"\tdef mh_start(self):\n",
"\t\tthread.start_new_thread(self.get_tokens,(\"GetTokens\",))\n",
"\t\tself['name'].setText(_(\"Please wait...\"))\n",
"\n",
"\tdef get_tokens(self, threadName):\n",
"\t\tif requestsModule and cfscrapeModule:\n",
"\t\t\tprintl(\"Calling thread: %s\" % threadName,self,'A')\n",
"\t\t\tglobal sit_ck\n",
"\t\t\tglobal sit_agent\n",
"\t\t\tif sit_ck == {} or sit_agent == '':\n",
"\t\t\t\tsit_ck, sit_agent = cfscrape.get_tokens(BASE_URL)\n",
"\t\t\t\trequests.cookies.cookiejar_from_dict(sit_ck, cookiejar=sit_cookies)\n",
"\t\t\telse:\n",
"\t\t\t\ts = requests.session()\n",
"\t\t\t\turl = urlparse.urlparse(BASE_URL)\n",
"\t\t\t\theaders = {'user-agent': sit_agent}\n",
"\t\t\t\tpage = s.get(url.geturl(), cookies=sit_cookies, headers=headers)\n",
"\t\t\t\tif page.status_code == 503 and page.headers.get(\"Server\") == \"cloudflare-nginx\":\n",
"\t\t\t\t\tsit_ck, sit_agent = cfscrape.get_tokens(BASE_URL)\n",
"\t\t\t\t\trequests.cookies.cookiejar_from_dict(sit_ck, cookiejar=sit_cookies)\n",
"\t\t\treactor.callFromThread(self.mh_initMenu)\n",
"\t\telse:\n",
"\t\t\treactor.callFromThread(self.mh_errorMenu)\n",
"\n",
"\tdef mh_errorMenu(self):\n",
"\t\tmessage = self.session.open(MessageBoxExt, _(\"Mandatory depends python-requests and/or python-pyexecjs and nodejs are missing!\"), MessageBoxExt.TYPE_ERROR)\n",
"\t\tself.keyCancel()\n",
"\n",
"\tdef mh_initMenu(self):\n",
"\t\tself.mh_buildMenu(self.mh_baseUrl + self.m_path, agent=sit_agent)\n",
"\n",
"\tdef mh_parseCategorys(self, data):\n",
"\t\tself.mh_genMenu2(self.base_menu)\n",
"\n",
"\tdef mh_callGenreListScreen(self):\n",
"\t\tgenreurl = self.mh_baseUrl+self.mh_genreUrl[self.mh_menuLevel]\n",
"\t\tself.session.open(streamitFilmListeScreen, genreurl, self.mh_genreTitle)\n",
"\n",
"class streamitFilmListeScreen(MPScreen, ThumbsHelper):\n",
"\n",
"\tdef __init__(self, session, genreLink, genreName, series_img=None, last_series_tag='', season_data=None):\n",
"\t\tself.genreLink = genreLink\n",
"\t\tself.genreName = genreName\n",
"\t\tself.seriesImg = series_img\n",
"\t\tself.seasonData = season_data\n",
"\n",
"\t\tMPScreen.__init__(self, session, skin='MP_PluginDescr', widgets=('MP_widget_rating',))\n",
"\t\tThumbsHelper.__init__(self)\n",
"\n",
"\t\tself[\"hdpic\"] = Pixmap()\n",
"\t\tself['rating10'] = ProgressBar()\n",
"\t\tself['rating0'] = Pixmap()\n",
"\t\tself[\"hdpic\"].hide()\n",
"\n",
"\t\tself[\"actions\"] = ActionMap([\"MP_Actions2\", \"MP_Actions\"], {\n",
"\t\t\t\"ok\" : self.keyOK,\n",
"\t\t\t\"cancel\": self.keyCancel,\n",
"\t\t\t\"5\" : self.keyShowThumb,\n",
"\t\t\t\"up\" : self.keyUp,\n",
"\t\t\t\"down\" : self.keyDown,\n",
"\t\t\t\"right\" : self.keyRight,\n",
"\t\t\t\"left\" : self.keyLeft,\n",
"\t\t\t\"upUp\" : self.key_repeatedUp,\n",
"\t\t\t\"rightUp\" : self.key_repeatedUp,\n",
"\t\t\t\"leftUp\" : self.key_repeatedUp,\n",
"\t\t\t\"downUp\" : self.key_repeatedUp,\n",
"\t\t\t\"upRepeated\" : self.keyUpRepeated,\n",
"\t\t\t\"downRepeated\" : self.keyDownRepeated,\n",
"\t\t\t\"rightRepeated\" : self.keyRightRepeated,\n",
"\t\t\t\"leftRepeated\" : self.keyLeftRepeated,\n",
"\t\t\t\"nextBouquet\" : self.keyPageUp,\n",
"\t\t\t\"prevBouquet\" : self.keyPageDown,\n",
"\t\t\t\"1\" : self.key_1,\n",
"\t\t\t\"3\" : self.key_3,\n",
"\t\t\t\"4\" : self.key_4,\n",
"\t\t\t\"6\" : self.key_6,\n",
"\t\t\t\"7\" : self.key_7,\n",
"\t\t\t\"9\" : self.key_9,\n",
"\t\t\t\"0\": self.closeAll,\n",
"\t\t\t\"yellow\" : self.keySort,\n",
"\t\t}, -1)\n",
"\n",
"\t\tself.sortFuncs = None\n",
"\t\tself.sortOrderStrGenre = \"\"\n",
"\t\tself['title'] = Label(\"STREAMIT\")\n",
"\n",
"\t\tself['Page'] = Label(_(\"Page:\"))\n",
"\t\tself['F3'] = Label(_(\"Sort by...\"))\n",
"\t\tself['F3'].hide()\n",
"\n",
"\t\tself.timerStart = False\n",
"\t\tself.seekTimerRun = False\n",
"\t\tself.eventL = threading.Event()\n",
"\t\tself.eventH = threading.Event()\n",
"\t\tself.eventP = threading.Event()\n",
"\t\tself.filmQ = Queue.Queue(0)\n",
"\t\tself.hanQ = Queue.Queue(0)\n",
"\t\tself.picQ = Queue.Queue(0)\n",
"\t\tself.updateP = 0\n",
"\t\tself.keyLocked = True\n",
"\t\tself.filmListe = []\n",
"\t\tself.page = 0\n",
"\t\tself.pages = 0;\n",
"\t\tself.neueFilme = re.search('Neue Filme',self.genreName)\n",
"\t\tself.sucheFilme = re.search('Videosuche',self.genreName)\n",
"\t\tif 'HD Filme' in self.genreName:\n",
"\t\t\tself.streamTag = 'streamhd'\n",
"\t\telse:\n",
"\t\t\tself.streamTag = 'stream'\n",
"\t\tself.seriesTag = ''\n",
"\n",
"\t\tself.setGenreStrTitle()\n",
"\n",
"\t\tself.ml = MenuList([], enableWrapAround=True, content=eListboxPythonMultiContent)\n",
"\t\tself['liste'] = self.ml\n",
"\n",
"\t\tself.onLayoutFinish.append(self.loadPage)\n",
"\n",
"\tdef setGenreStrTitle(self):\n",
"\t\tif self.sortOrderStrGenre:\n",
"\t\t\tsortOrder = ' (%s)' % self.sortOrderStrGenre\n",
"\t\telse:\n",
"\t\t\tsortOrder = ''\n",
"\n",
"\t\tself['ContentTitle'].setText(\"%s%s%s\" % (self.seriesTag,self.genreName,sortOrder))\n",
"\n",
"\tdef loadPage(self):\n",
"\t\tif not self.sucheFilme and self.page > 1:\n",
"\t\t\tpage = max(1,self.page)\n",
"\t\t\tlink = self.genreLink\n",
"\t\t\tif not '?' in link:\n",
"\t\t\t\tlink += '?'\n",
"\t\t\telse:\n",
"\t\t\t\tlink += '&'\n",
"\t\t\turl = \"%spage=%d\" % (link, page)\n",
"\t\telse:\n",
"\t\t\turl = self.genreLink\n",
"\n",
"\t\tif self.page:\n",
"\t\t\tself['page'].setText(\"%d / %d\" % (self.page,self.pages))\n",
"\n",
"\t\tself.filmQ.put(url)\n",
"\t\tif not self.eventL.is_set():\n",
"\t\t\tself.eventL.set()\n",
"\t\t\tself.loadPageQueued()\n",
"\t\telse:\n",
"\t\t\tself['name'].setText(_('Please wait...'))\n",
"\t\t\tself['handlung'].setText(\"\")\n",
"\t\t\tself['coverArt'].hide()\n",
"\n",
"\tdef loadPageQueued(self):\n",
"\t\tself['name'].setText(_('Please wait...'))\n",
"\t\tself['handlung'].setText(\"\")\n",
"\t\tself['coverArt'].hide()\n",
"\t\twhile not self.filmQ.empty():\n",
"\t\t\turl = self.filmQ.get_nowait()\n",
"\t\tdata = sit_grabpage(url)\n",
"\t\tself.loadPageData(data)\n",
"\n",
"\tdef dataError(self, error):\n",
"\t\tself.eventL.clear()\n",
"\t\tprintl(error,self,\"E\")\n",
"\t\tself.filmListe.append((_(\"No movies found!\"),\"\",\"\",\"\", 0, False))\n",
"\t\tself.ml.setList(map(self.streamitFilmListEntry,\tself.filmListe))\n",
"\n",
"\tdef loadPageData(self, data):\n",
"\t\tself.getPostFuncs(data)\n",
"\t\tself.filmListe = []\n",
"\t\tl = len(data)\n",
"\t\ta = 0\n",
"\t\twhile a < l:\n",
"\t\t\tmg = re.search('<div class=\"post-thumb\"(.*?)</div>\\s+</li>', data[a:], re.S)\n",
"\t\t\tif mg:\n",
"\t\t\t\ta += mg.end()\n",
"\t\t\t\tm = re.search('<a href=\"(.*?)\".*?title=\"(.*?)\">.*?<img.*?src=\"(.*?)\".*?<div class=\"voting\".*?style=\"width:(\\d*)', mg.group(1), re.S)\n",
"\t\t\t\tif m:\n",
"\t\t\t\t\turl,name,imageurl,rating = m.groups()\n",
"\t\t\t\t\tif 'hd_icon' in mg.group(1):\n",
"\t\t\t\t\t\thd = True\n",
"\t\t\t\t\telse:\n",
"\t\t\t\t\t\thd = False\n",
"\n",
"\t\t\t\t\tif not rating: rating = \"0\"\n",
"\t\t\t\t\timdb = \"IMDb: %.1f / 10\" % (float(rating) / 10)\n",
"\t\t\t\t\tif not url.startswith('http'):\n",
"\t\t\t\t\t\turl = BASE_URL + url\n",
"\t\t\t\t\tif not imageurl.startswith('http'):\n",
"\t\t\t\t\t\timageurl = BASE_URL + imageurl\n",
"\t\t\t\t\tself.filmListe.append((decodeHtml(name), url, imageurl, imdb, rating, hd))\n",
"\t\t\telse:\n",
"\t\t\t\ta = l\n",
"\n",
"\t\tif self.filmListe:\n",
"\t\t\tif not self.pages:\n",
"\t\t\t\tm = re.search('class=\\'pagination\\'.*?page=(\\d+)\\'>Last</a', data)\n",
"\t\t\t\tif m:\n",
"\t\t\t\t\tself.pages = int(m.group(1))\n",
"\t\t\t\telse:\n",
"\t\t\t\t\tself.pages = 1\n",
"\n",
"\t\t\t\tself.page = 1\n",
"\t\t\t\tself['page'].setText(\"%d / %d\" % (self.page,self.pages))\n",
"\n",
"\t\t\tself.keyLocked = False\n",
"\t\t\tself.ml.setList(map(self.streamitFilmListEntry,\tself.filmListe))\n",
"\t\t\tself.th_ThumbsQuery(self.filmListe, 0, 1, 2, None, None, self.page, self.pages, agent=sit_agent, cookies=sit_ck)\n",
"\n",
"\t\t\tself['liste'].moveToIndex(0)\n",
"\t\t\tself.loadPicQueued()\n",
"\t\telse:\n",
"\t\t\tself.filmListe.append((_(\"No entries found!\"),\"\",\"\",\"\", 0, False))\n",
"\t\t\tself.ml.setList(map(self.streamitFilmListEntry,\tself.filmListe))\n",
"\t\t\tif self.filmQ.empty():\n",
"\t\t\t\tself.eventL.clear()\n",
"\t\t\telse:\n",
"\t\t\t\tself.loadPageQueued()\n",
"\n",
"\tdef getPostFuncs(self, data):\n",
"\t\tself.sortFuncs = []\n",
"\t\ttry:\n",
"\t\t\tm = re.search('id=\"postFuncs\">(.*?)<!-- /#postFuncs -->', data, re.S)\n",
"\t\t\tif m:\n",
"\t\t\t\tfor m2 in re.finditer('href=\"(.*?)\">(.*?)</a', m.group(1)):\n",
"\t\t\t\t\thref, name = m2.groups()\n",
"\t\t\t\t\thref = re.sub('&page=\\d+', '', href, 1)\n",
"\t\t\t\t\thref = re.sub('\\?page=\\d+', '?', href, 1)\n",
"\t\t\t\t\tself.sortFuncs.append((decodeHtml(name), decodeHtml(href)))\n",
"\t\texcept:\n",
"\t\t\tpass\n",
"\t\tif self.sortFuncs:\n",
"\t\t\tself['F3'].show()\n",
"\t\telse:\n",
"\t\t\tself['F3'].hide()\n",
"\n",
"\tdef loadPicQueued(self):\n",
"\t\tself.picQ.put(None)\n",
"\t\tif not self.eventP.is_set():\n",
"\t\t\tself.eventP.set()\n",
"\t\t\tself.loadPic()\n",
"\n",
"\tdef loadPic(self):\n",
"\t\tif self.picQ.empty():\n",
"\t\t\tself.eventP.clear()\n",
"\t\t\treturn\n",
"\n",
"\t\tif self.eventH.is_set() or self.updateP:\n",
"\t\t\tprint \"Pict. or descr. update in progress\"\n",
"\t\t\tprint \"eventH: \",self.eventH.is_set()\n",
"\t\t\tprint \"eventP: \",self.eventP.is_set()\n",
"\t\t\tprint \"updateP: \",self.updateP\n",
"\t\t\treturn\n",
"\n",
"\t\twhile not self.picQ.empty():\n",
"\t\t\tself.picQ.get_nowait()\n",
"\n",
"\t\tstreamName = self['liste'].getCurrent()[0][0]\n",
"\t\tself['name'].setText(streamName)\n",
"\t\tstreamPic = self['liste'].getCurrent()[0][2]\n",
"\t\tstreamUrl = self['liste'].getCurrent()[0][1]\n",
"\t\tself.updateP = 1\n",
"\t\tCoverHelper(self['coverArt'], self.showCoverExit).getCover(streamPic, agent=sit_agent, cookieJar=sit_cookies)\n",
"\t\trate = self['liste'].getCurrent()[0][4]\n",
"\t\thd = self['liste'].getCurrent()[0][5]\n",
"\t\tif hd:\n",
"\t\t\tself['hdpic'].show()\n",
"\t\telse:\n",
"\t\t\tself['hdpic'].hide()\n",
"\t\trating = int(rate)\n",
"\t\tif rating > 100:\n",
"\t\t\trating = 100\n",
"\t\tself['rating10'].setValue(rating)\n",
"\n",
"\tdef dataErrorP(self, error):\n",
"\t\tprintl(error,self,\"E\")\n",
"\t\tself.ShowCoverNone()\n",
"\n",
"\tdef showCoverExit(self):\n",
"\t\tself.updateP = 0;\n",
"\t\tif not self.filmQ.empty():\n",
"\t\t\tself.loadPageQueued()\n",
"\t\telse:\n",
"\t\t\tself.eventL.clear()\n",
"\t\t\tself.loadPic()\n",
"\n",
"\tdef keyOK(self):\n",
"\t\tif self.keyLocked or self.eventL.is_set():\n",
"\t\t\treturn\n",
"\n",
"\t\tstreamLink = self['liste'].getCurrent()[0][1]\n",
"\t\tstreamName = self['liste'].getCurrent()[0][0]\n",
"\t\timageLink = self['liste'].getCurrent()[0][2]\n",
"\t\tself.session.open(streamitStreams, streamLink, streamName, imageLink, self.streamTag)\n",
"\n",
"\tdef keyUpRepeated(self):\n",
"\t\tif self.keyLocked:\n",
"\t\t\treturn\n",
"\t\tself['coverArt'].hide()\n",
"\t\tself['liste'].up()\n",
"\n",
"\tdef keyDownRepeated(self):\n",
"\t\tif self.keyLocked:\n",
"\t\t\treturn\n",
"\t\tself['coverArt'].hide()\n",
"\t\tself['liste'].down()\n",
"\n",
"\tdef key_repeatedUp(self):\n",
"\t\tif self.keyLocked:\n",
"\t\t\treturn\n",
"\t\tself.loadPicQueued()\n",
"\n",
"\tdef keyLeftRepeated(self):\n",
"\t\tif self.keyLocked:\n",
"\t\t\treturn\n",
"\t\tself['coverArt'].hide()\n",
"\t\tself['liste'].pageUp()\n",
"\n",
"\tdef keyRightRepeated(self):\n",
"\t\tif self.keyLocked:\n",
"\t\t\treturn\n",
"\t\tself['coverArt'].hide()\n",
"\t\tself['liste'].pageDown()\n",
"\n",
"\tdef keyPageDown(self):\n",
"\t\tif self.seekTimerRun:\n",
"\t\t\tself.seekTimerRun = False\n",
"\t\tself.keyPageDownFast(1)\n",
"\n",
"\tdef keyPageUp(self):\n",
"\t\tif self.seekTimerRun:\n",
"\t\t\tself.seekTimerRun = False\n",
"\t\tself.keyPageUpFast(1)\n",
"\n",
"\tdef keyPageUpFast(self,step):\n",
"\t\tif self.keyLocked:\n",
"\t\t\treturn\n",
"\t\toldpage = self.page\n",
"\t\tif (self.page + step) <= self.pages:\n",
"\t\t\tself.page += step\n",
"\t\telse:\n",
"\t\t\tself.page = 1\n",
"\t\tif oldpage != self.page:\n",
"\t\t\tself.loadPage()\n",
"\n",
"\tdef keyPageDownFast(self,step):\n",
"\t\tif self.keyLocked:\n",
"\t\t\treturn\n",
"\t\toldpage = self.page\n",
"\t\tif (self.page - step) >= 1:\n",
"\t\t\tself.page -= step\n",
"\t\telse:\n",
"\t\t\tself.page = self.pages\n",
"\t\tif oldpage != self.page:\n",
"\t\t\tself.loadPage()\n",
"\n",
"\tdef key_1(self):\n",
"\t\tself.keyPageDownFast(2)\n",
"\n",
"\tdef key_4(self):\n",
"\t\tself.keyPageDownFast(5)\n",
"\n",
"\tdef key_7(self):\n",
"\t\tself.keyPageDownFast(10)\n",
"\n",
"\tdef key_3(self):\n",
"\t\tself.keyPageUpFast(2)\n",
"\n",
"\tdef key_6(self):\n",
"\t\tself.keyPageUpFast(5)\n",
"\n",
"\tdef key_9(self):\n",
"\t\tself.keyPageUpFast(10)\n",
"\n",
"\tdef keySort(self):\n",
"\t\tif not self.keyLocked and self.sortFuncs:\n",
"\t\t\tself.handleSort()\n",
"\n",
"\tdef handleSort(self):\n",
"\t\tfrom Screens.ChoiceBox import ChoiceBox\n",
"\t\tself.session.openWithCallback(self.cb_handleSort, ChoiceBox, title=_(\"Sort Selection\"), list = self.sortFuncs)\n",
"\n",
"\tdef cb_handleSort(self, answer):\n",
"\t\thref = answer and answer[1]\n",
"\t\tif href:\n",
"\t\t\tself.genreLink = self.genreLink.split('?')[0] + href\n",
"\t\t\tself.sortOrderStrGenre = answer[0]\n",
"\t\t\tself.setGenreStrTitle()\n",
"\t\t\tself.loadPage()\n",
"\n",
"class streamitStreams(MPScreen):\n",
"\n",
"\tdef __init__(self, session, filmUrl, filmName, imageLink, streamTag, post_data=None, post_url=None):\n",
"\t\tself.filmUrl = filmUrl\n",
"\t\tself.filmName = filmName\n",
"\t\tself.imageUrl = imageLink\n",
"\t\tself.stream_tag = streamTag\n",
"\t\tself.postData = post_data\n",
"\t\tself.postUrl = post_url\n",
"\n",
"\t\tMPScreen.__init__(self, session, skin='MP_PluginDescr')\n",
"\n",
"\t\tself[\"actions\"] = ActionMap([\"MP_Actions\"], {\n",
"\t\t\t\"green\" \t: self.keyTrailer,\n",
"\t\t\t\"ok\" \t: self.keyOK,\n",
"\t\t\t\"0\"\t\t: self.closeAll,\n",
"\t\t\t\"cancel\"\t: self.keyCancel\n",
"\t\t}, -1)\n",
"\n",
"\t\tself['title'] = Label(\"STREAMIT\")\n",
"\t\tself['ContentTitle'] = Label(_(\"Stream Selection\"))\n",
"\n",
"\t\tself['name'] = Label(filmName)\n",
"\n",
"\t\tself.trailerId = None\n",
"\t\tself.streamListe = []\n",
"\t\tself.ml = MenuList([], enableWrapAround=True, content=eListboxPythonMultiContent)\n",
"\t\tself['liste'] = self.ml\n",
"\t\tself.keyLocked = True\n",
"\t\tself.onLayoutFinish.append(self.loadPage)\n",
"\n",
"\tdef loadPage(self):\n",
"\t\tself.streamListe.append((_('Please wait...'),\"\",\"\",\"\"))\n",
"\t\tself.ml.setList(map(self.streamitStreamListEntry, self.streamListe))\n",
"\t\tseriesStreams = self.postData != None\n",
"\t\tdata = sit_grabpage(self.filmUrl)\n",
"\t\tself.parseData(data, seriesStreams)\n",
"\n",
"\tdef getSeriesStreams(self):\n",
"\t\tdata = sit_grabpage(self.postUrl, method='POST', postdata=self.postData)\n",
"\t\tself.parseStreams(data)\n",
"\n",
"\tdef parseStreams(self, data):\n",
"\t\tself.streamListe = []\n",
"\t\tm = re.search('id=\"sel_qualideutsch\">(.*?)</select>', data, re.S)\n",
"\t\tif m:\n",
"\t\t\tbuttons = re.findall('id=\"(.*?)\" class=\"mirrorbuttonsdeutsch\">(.*?)</', m.group(1))\n",
"\t\t\tfor id,nm in buttons:\n",
"\t\t\t\tm2 = re.search('class=\"mirrorsdeutsch\"\\sid=\"\\w*%s\"(.*?)></div></div>' % id, data, re.S)\n",
"\t\t\t\tif m2:\n",
"\t\t\t\t\tm3 = re.search('>Ton: <b>(.*?)</b', m2.group(1))\n",
"\t\t\t\t\tif m3:\n",
"\t\t\t\t\t\tton = ', %s' % m3.group(1)\n",
"\t\t\t\t\telse:\n",
"\t\t\t\t\t\tton = ''\n",
"\t\t\t\t\tstreams = re.findall('<a href=\"(.*?)\".*?value=\"(.*?)\"', m2.group(1).replace('\\n', ''))\n",
"\t\t\t\t\tfor (isUrl,isStream) in streams:\n",
"\t\t\t\t\t\tif isSupportedHoster(isStream, True):\n",
"\t\t\t\t\t\t\tstreamPart = ''\n",
"\t\t\t\t\t\t\tisUrl = isUrl.replace('\\n','')\n",
"\t\t\t\t\t\t\tisUrl = isUrl.replace('\\r','')\n",
"\t\t\t\t\t\t\tself.streamListe.append((isStream,isUrl,streamPart,' (%s%s)' % (nm.strip(), ton.strip())))\n",
"\t\t\t\t\t\telse:\n",
"\t\t\t\t\t\t\tprint \"No supported hoster:\"\n",
"\n",
"\t\tif self.streamListe:\n",
"\t\t\tself.keyLocked = False\n",
"\t\telse:\n",
"\t\t\tself.streamListe.append((\"No streams found!\",\"\",\"\",\"\"))\n",
"\t\tself.ml.setList(map(self.streamitStreamListEntry, self.streamListe))\n",
"\n",
"\tdef parseData(self, data, seriesStreams=False):\n",
"\t\tm = re.search('//www.youtube\\.com/(embed|v|p)/(.*?)(\\?|\" |&)', data)\n",
"\t\tif m:\n",
"\t\t\tself.trailerId = m.group(2)\n",
"\t\t\tself['F2'].setText('Trailer')\n",
"\t\telse: self.trailerId = None\n",
"\n",
"\t\tdesc = ''\n",
"\t\tmdesc = re.search('<b>(Jahr:)</b>.*?\">(.*?)</.*?<b>(Länge:)</b>.*?\">(.*?)</', data, re.S)\n",
"\t\tif mdesc:\n",
"\t\t\tdesc += mdesc.group(1) + mdesc.group(2) + ' ' + mdesc.group(3) + mdesc.group(4) + '\\n\\n'\n",
"\t\telif desc:\n",
"\t\t\tdesc += '\\n'\n",
"\n",
"\t\tmdesc = re.search('<div id=\"cleaner\"> </div><div id=\"cleaner\"> </div>(.*?)<br><br>',data, re.S)\n",
"\t\tif mdesc:\n",
"\t\t\tdesc += re.sub('<.*?>', '', mdesc.group(1).replace('\\n',''), re.S).replace(' ','').strip()\n",
"\t\telse:\n",
"\t\t\tdesc += \"Keine weiteren Info's !\"\n",
"\n",
"\t\tself['handlung'].setText(decodeHtml(desc))\n",
"\t\tCoverHelper(self['coverArt']).getCover(self.imageUrl, agent=sit_agent, cookieJar=sit_cookies)\n",
"\n",
"\t\tif not seriesStreams:\n",
"\t\t\tself.parseStreams(data)\n",
"\t\telse:\n",
"\t\t\tself.getSeriesStreams()\n",
"\n",
"\tdef dataError(self, error):\n",
"\t\tprintl(error,self,\"E\")\n",
"\t\tself.streamListe.append((\"Data error!\",\"\",\"\",\"\"))\n",
"\t\tself.ml.setList(map(self.streamitStreamListEntry, self.streamListe))\n",
"\n",
"\tdef gotLink(self, stream_url):\n",
"\t\tif stream_url:\n",
"\t\t\ttitle = self.filmName + self['liste'].getCurrent()[0][2]\n",
"\t\t\tself.session.open(SimplePlayer, [(title, stream_url, self.imageUrl)], cover=True, showPlaylist=False, ltype='streamit')\n",
"\n",
"\tdef keyTrailer(self):\n",
"\t\tif self.trailerId:\n",
"\t\t\tself.session.open(\n",
"\t\t\t\tYoutubePlayer,\n",
"\t\t\t\t[(self.filmName+' - Trailer', self.trailerId, self.imageUrl)],\n",
"\t\t\t\tplayAll = False,\n",
"\t\t\t\tshowPlaylist=False,\n",
"\t\t\t\tshowCover=True\n",
"\t\t\t\t)\n",
"\n",
"\tdef keyOK(self):\n",
"\t\tif self.keyLocked:\n",
"\t\t\treturn\n",
"\t\turl = self['liste'].getCurrent()[0][1]\n",
"\t\tif not url.startswith('http'):\n",
"\t\t\turl = BASE_URL + url\n",
"\t\tdata = sit_grabpage(url)\n",
"\t\tself.getUrl(data)\n",
"\n",
"\tdef getUrl(self,data):\n",
"\t\ttry:\n",
"\t\t\tlink = re.search('id=\"download\" class=\"cd\" style=\"display:none\"><a href=\"(.*?)\">', data).group(1)\n",
"\t\t\tus = urlparse.urlsplit(link)\n",
"\t\t\tlink = urlparse.urlunsplit(us[0:1]+(us[1].lower(),)+us[2:])\n",
"\t\texcept:\n",
"\t\t\tlink = \"http://fuck.com\"\n",
"\t\tget_stream_link(self.session).check_link(link, self.gotLink)"
] | [
0,
0,
0,
0.011904761904761904,
0,
0,
0.012345679012345678,
0,
0,
0,
0,
0.015873015873015872,
0.125,
0.041666666666666664,
0,
0.043478260869565216,
0,
0,
0.058823529411764705,
0.125,
0.041666666666666664,
0,
0.043478260869565216,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.018518518518518517,
0.05,
0.14285714285714285,
0.038461538461538464,
0.027777777777777776,
0.02564102564102564,
0.043478260869565216,
0.014492753623188406,
0.038461538461538464,
0.023529411764705882,
0.043478260869565216,
0.2,
0.125,
0,
0.02702702702702703,
0,
0.06666666666666667,
0.03225806451612903,
0.030303030303030304,
0.030303030303030304,
0.027777777777777776,
0.027777777777777776,
0.027777777777777776,
0.030303030303030304,
0.03125,
0.02631578947368421,
0.03125,
0.030303030303030304,
0.02857142857142857,
0.02857142857142857,
0.029411764705882353,
0.030303030303030304,
0.02857142857142857,
0.02857142857142857,
0.029411764705882353,
0.030303030303030304,
0.027777777777777776,
0.03225806451612903,
0.02857142857142857,
0.25,
0,
0.017241379310344827,
0.04,
0.043478260869565216,
0.018518518518518517,
0,
0.027777777777777776,
0.024390243902439025,
0.04,
0.037037037037037035,
0,
0.022727272727272728,
0,
0.047619047619047616,
0.034482758620689655,
0.022727272727272728,
0,
0.02857142857142857,
0.025,
0.05555555555555555,
0.058823529411764705,
0.05,
0.02564102564102564,
0.018518518518518517,
0.013888888888888888,
0.1111111111111111,
0.037037037037037035,
0.02631578947368421,
0.025,
0.014492753623188406,
0.023529411764705882,
0.01818181818181818,
0.0136986301369863,
0.022727272727272728,
0.125,
0.022222222222222223,
0,
0.04,
0.012658227848101266,
0.05263157894736842,
0,
0.041666666666666664,
0.014705882352941176,
0,
0.027777777777777776,
0.02857142857142857,
0,
0.02857142857142857,
0.015384615384615385,
0.013333333333333334,
0,
0.01818181818181818,
0,
0.018691588785046728,
0.034482758620689655,
0.034482758620689655,
0.03333333333333333,
0.03125,
0,
0.02247191011235955,
0.03333333333333333,
0,
0.037037037037037035,
0.02857142857142857,
0.034482758620689655,
0.043478260869565216,
0,
0.015873015873015872,
0.08,
0.034482758620689655,
0.07142857142857142,
0.09090909090909091,
0.07692307692307693,
0.07142857142857142,
0.07692307692307693,
0.06060606060606061,
0.05555555555555555,
0.05714285714285714,
0.05714285714285714,
0.05263157894736842,
0.047619047619047616,
0.045454545454545456,
0.047619047619047616,
0.05714285714285714,
0.05405405405405406,
0.09523809523809523,
0.09523809523809523,
0.09523809523809523,
0.09523809523809523,
0.09523809523809523,
0.09523809523809523,
0.043478260869565216,
0.07142857142857142,
0.1111111111111111,
0,
0.041666666666666664,
0.03333333333333333,
0.027777777777777776,
0,
0.02857142857142857,
0.02631578947368421,
0.05,
0,
0.038461538461538464,
0.03571428571428571,
0.029411764705882353,
0.029411764705882353,
0.029411764705882353,
0.03333333333333333,
0.034482758620689655,
0.034482758620689655,
0.05263157894736842,
0.041666666666666664,
0.045454545454545456,
0.0625,
0.1111111111111111,
0.034482758620689655,
0.03389830508474576,
0.02857142857142857,
0.03225806451612903,
0.125,
0.034482758620689655,
0.045454545454545456,
0,
0.038461538461538464,
0,
0.023809523809523808,
0.038461538461538464,
0,
0.022727272727272728,
0,
0.034482758620689655,
0.034482758620689655,
0.020833333333333332,
0.125,
0.05555555555555555,
0,
0.047058823529411764,
0,
0.047619047619047616,
0.022727272727272728,
0.07407407407407407,
0.04,
0.08695652173913043,
0.0625,
0.1111111111111111,
0.0625,
0.027777777777777776,
0.125,
0.041666666666666664,
0,
0.0625,
0.03333333333333333,
0,
0.045454545454545456,
0.03225806451612903,
0.047619047619047616,
0.04,
0.125,
0.022222222222222223,
0.03125,
0.037037037037037035,
0,
0.037037037037037035,
0.022727272727272728,
0.03225806451612903,
0.038461538461538464,
0.03125,
0.030303030303030304,
0.037037037037037035,
0.038461538461538464,
0,
0.034482758620689655,
0.045454545454545456,
0.12,
0.058823529411764705,
0.014925373134328358,
0,
0.03225806451612903,
0.038461538461538464,
0.045454545454545456,
0.125,
0.125,
0.06666666666666667,
0.025,
0.1,
0.05555555555555555,
0.021897810218978103,
0.1,
0.09302325581395349,
0.029411764705882353,
0.0625,
0.09090909090909091,
0.058823529411764705,
0,
0.06060606060606061,
0.018867924528301886,
0.027777777777777776,
0.037037037037037035,
0.024390243902439025,
0.02702702702702703,
0.0125,
0.1111111111111111,
0.1,
0,
0.047619047619047616,
0.045454545454545456,
0.028169014084507043,
0.1,
0.029411764705882353,
0.1,
0.05,
0,
0.05555555555555555,
0.03278688524590164,
0,
0.038461538461538464,
0.014705882352941176,
0.017241379310344827,
0,
0.03125,
0.041666666666666664,
0.125,
0.05714285714285714,
0.014705882352941176,
0.038461538461538464,
0.041666666666666664,
0.1111111111111111,
0.038461538461538464,
0,
0.03225806451612903,
0.045454545454545456,
0.14285714285714285,
0.0136986301369863,
0.1111111111111111,
0.015625,
0.03333333333333333,
0.044444444444444446,
0.06382978723404255,
0.015384615384615385,
0.2,
0.125,
0.047619047619047616,
0.047619047619047616,
0.125,
0.047619047619047616,
0,
0.038461538461538464,
0.045454545454545456,
0.03225806451612903,
0.047619047619047616,
0.05555555555555555,
0,
0.05,
0.041666666666666664,
0.043478260869565216,
0.1,
0,
0.023255813953488372,
0.021739130434782608,
0.04878048780487805,
0.04878048780487805,
0.058823529411764705,
0.1,
0,
0.03225806451612903,
0.038461538461538464,
0,
0.020833333333333332,
0.02857142857142857,
0.02127659574468085,
0.02127659574468085,
0.05263157894736842,
0.017857142857142856,
0.023809523809523808,
0.025,
0.1111111111111111,
0.041666666666666664,
0.125,
0.041666666666666664,
0.047619047619047616,
0.05263157894736842,
0.0625,
0.027777777777777776,
0,
0.03333333333333333,
0.12,
0.043478260869565216,
0,
0.038461538461538464,
0.1,
0.034482758620689655,
0.04,
0.125,
0.043478260869565216,
0.05555555555555555,
0,
0.05555555555555555,
0.022222222222222223,
0.1,
0,
0.020833333333333332,
0.020833333333333332,
0.02127659574468085,
0.022727272727272728,
0,
0.038461538461538464,
0.047619047619047616,
0.1,
0.038461538461538464,
0.047619047619047616,
0,
0.03571428571428571,
0.047619047619047616,
0.1,
0.038461538461538464,
0.043478260869565216,
0,
0.037037037037037035,
0.047619047619047616,
0.1,
0.043478260869565216,
0,
0.03571428571428571,
0.047619047619047616,
0.1,
0.038461538461538464,
0.04,
0,
0.034482758620689655,
0.047619047619047616,
0.1,
0.038461538461538464,
0.037037037037037035,
0,
0.041666666666666664,
0.041666666666666664,
0.034482758620689655,
0.038461538461538464,
0,
0.045454545454545456,
0.041666666666666664,
0.034482758620689655,
0.041666666666666664,
0,
0.06451612903225806,
0.047619047619047616,
0.1,
0.045454545454545456,
0.02564102564102564,
0.047619047619047616,
0.125,
0.058823529411764705,
0.037037037037037035,
0.05263157894736842,
0,
0.06060606060606061,
0.047619047619047616,
0.1,
0.045454545454545456,
0.03333333333333333,
0.047619047619047616,
0.125,
0.038461538461538464,
0.037037037037037035,
0.05263157894736842,
0,
0.05555555555555555,
0.038461538461538464,
0,
0.05555555555555555,
0.038461538461538464,
0,
0.05555555555555555,
0.037037037037037035,
0,
0.05555555555555555,
0.041666666666666664,
0,
0.05555555555555555,
0.041666666666666664,
0,
0.05555555555555555,
0.04,
0,
0.05,
0.022727272727272728,
0.047619047619047616,
0,
0.043478260869565216,
0.023809523809523808,
0.035398230088495575,
0,
0.029411764705882353,
0.03333333333333333,
0.09090909090909091,
0.017857142857142856,
0.02631578947368421,
0.037037037037037035,
0.05263157894736842,
0,
0.030303030303030304,
0,
0.0196078431372549,
0.04,
0.037037037037037035,
0.03571428571428571,
0.03333333333333333,
0.03571428571428571,
0.038461538461538464,
0,
0.017241379310344827,
0,
0.020833333333333332,
0.06451612903225806,
0.07692307692307693,
0.08,
0.06896551724137931,
0.1111111111111111,
0,
0.027777777777777776,
0.018518518518518517,
0,
0.030303030303030304,
0,
0.041666666666666664,
0.041666666666666664,
0.023809523809523808,
0.038461538461538464,
0.041666666666666664,
0.022727272727272728,
0,
0.047619047619047616,
0.06896551724137931,
0.014084507042253521,
0.05,
0.027777777777777776,
0.02631578947368421,
0,
0.034482758620689655,
0.013333333333333334,
0.038461538461538464,
0,
0.03225806451612903,
0.041666666666666664,
0.014705882352941176,
0.125,
0.022988505747126436,
0.08,
0.043478260869565216,
0.09090909090909091,
0.018518518518518517,
0.08333333333333333,
0.030303030303030304,
0.09090909090909091,
0.06666666666666667,
0.021739130434782608,
0.05263157894736842,
0.022727272727272728,
0.043478260869565216,
0.05263157894736842,
0.05263157894736842,
0.05102040816326531,
0.08333333333333333,
0.027777777777777776,
0,
0.043478260869565216,
0.038461538461538464,
0.125,
0.06779661016949153,
0.014084507042253521,
0,
0.02040816326530612,
0.04054054054054054,
0.125,
0.03225806451612903,
0.030303030303030304,
0.06666666666666667,
0,
0.08333333333333333,
0.020618556701030927,
0.08333333333333333,
0.021505376344086023,
0.07692307692307693,
0.0625,
0,
0.027777777777777776,
0.08333333333333333,
0.04040404040404041,
0.125,
0.02702702702702703,
0,
0.022222222222222223,
0.020833333333333332,
0,
0.041666666666666664,
0.037037037037037035,
0.125,
0.037037037037037035,
0,
0.034482758620689655,
0.12,
0.07692307692307693,
0.014084507042253521,
0,
0.03125,
0.058823529411764705,
0.016666666666666666,
0.016260162601626018,
0,
0.043478260869565216,
0.047619047619047616,
0.045454545454545456,
0.05263157894736842,
0.014925373134328358,
0.14285714285714285,
0.041666666666666664,
0.05263157894736842,
0.16666666666666666,
0,
0.05555555555555555,
0.047619047619047616,
0.1,
0.024390243902439025,
0.030303030303030304,
0.041666666666666664,
0.037037037037037035,
0.05,
0,
0.08333333333333333,
0.14285714285714285,
0.019801980198019802,
0.03125,
0.015873015873015872,
0.2,
0.03571428571428571,
0.03225806451612903
] | 609 | 0.041185 | false |
# Copyright (c) 2013 Torrent-TV.RU
# Writer (c) 2011, Welicobratov K.A., E-mail: 07pov23@gmail.com
# Edited (c) 2015, Vorotilin D.V., E-mail: dvor85@mail.ru
import sys
import defines
# append pydev remote debugger
if defines.DEBUG:
# Make pydev debugger works for auto reload.
# Note pydevd module need to be copied in XBMC\system\python\Lib\pysrc
#Add "sys.path.append(os.path.normpath(os.path.join(os.path.dirname(__file__), '..')))" to
#d:\Program Files (x86)\Kodi\system\python\Lib\pysrc\_pydev_imps\_pydev_pluginbase.py
try:
import pysrc.pydevd as pydevd # with the addon script.module.pydevd, only use `import pydevd`
# stdoutToServer and stderrToServer redirect stdout and stderr to eclipse console
pydevd.settrace('localhost', stdoutToServer=True, stderrToServer=True)
except:
t, v, tb = sys.exc_info()
sys.stderr.write("Error: {0}:{1} | You must add org.python.pydev.debug.pysrc to your PYTHONPATH.".format(t, v))
import traceback
traceback.print_tb(tb)
del tb
sys.exit(0)
import mainform
from okdialog import OkDialog
def checkPort(params):
if not defines.checkPort(params):
dialog = OkDialog("okdialog.xml", defines.SKIN_PATH, defines.ADDON.getSetting('skin'))
dialog.setText("Порт %s закрыт. Для стабильной работы сервиса и трансляций, настоятельно рекомендуется его открыть." % defines.ADDON.getSetting('outport'))
dialog.doModal()
if __name__ == '__main__':
if not defines.ADDON.getSetting('skin'):
defines.ADDON.setSetting('skin', 'st.anger')
if defines.ADDON.getSetting("skin") == "default":
defines.ADDON.setSetting("skin", "st.anger")
if not defines.ADDON.getSetting("login"):
defines.ADDON.setSetting("login", "anonymous")
defines.ADDON.setSetting("password", "anonymous")
#thr = defines.MyThread(checkPort, defines.ADDON.getSetting("outport"))
#thr.start()
print defines.ADDON_PATH
print defines.SKIN_PATH
defines.MyThread(defines.Autostart, defines.AUTOSTART).start()
w = mainform.WMainForm("mainform.xml", defines.SKIN_PATH, defines.ADDON.getSetting('skin'))
w.doModal()
defines.showMessage('Close plugin')
del w
| [
"# Copyright (c) 2013 Torrent-TV.RU\r\n",
"# Writer (c) 2011, Welicobratov K.A., E-mail: 07pov23@gmail.com\r\n",
"# Edited (c) 2015, Vorotilin D.V., E-mail: dvor85@mail.ru\r\n",
"\r\n",
"import sys\r\n",
"import defines\r\n",
"\r\n",
"# append pydev remote debugger\r\n",
"if defines.DEBUG:\r\n",
" # Make pydev debugger works for auto reload.\r\n",
" # Note pydevd module need to be copied in XBMC\\system\\python\\Lib\\pysrc\r\n",
" #Add \"sys.path.append(os.path.normpath(os.path.join(os.path.dirname(__file__), '..')))\" to \r\n",
" #d:\\Program Files (x86)\\Kodi\\system\\python\\Lib\\pysrc\\_pydev_imps\\_pydev_pluginbase.py\r\n",
" try:\r\n",
" import pysrc.pydevd as pydevd # with the addon script.module.pydevd, only use `import pydevd`\r\n",
" # stdoutToServer and stderrToServer redirect stdout and stderr to eclipse console\r\n",
" pydevd.settrace('localhost', stdoutToServer=True, stderrToServer=True)\r\n",
" except:\r\n",
" t, v, tb = sys.exc_info() \r\n",
" sys.stderr.write(\"Error: {0}:{1} | You must add org.python.pydev.debug.pysrc to your PYTHONPATH.\".format(t, v))\r\n",
" import traceback\r\n",
" traceback.print_tb(tb)\r\n",
" del tb\r\n",
" sys.exit(0)\r\n",
" \r\n",
"\r\n",
"import mainform \r\n",
"from okdialog import OkDialog\r\n",
"\r\n",
"def checkPort(params):\r\n",
" if not defines.checkPort(params):\r\n",
" dialog = OkDialog(\"okdialog.xml\", defines.SKIN_PATH, defines.ADDON.getSetting('skin'))\r\n",
" dialog.setText(\"Порт %s закрыт. Для стабильной работы сервиса и трансляций, настоятельно рекомендуется его открыть.\" % defines.ADDON.getSetting('outport'))\r\n",
" dialog.doModal()\r\n",
"\r\n",
"if __name__ == '__main__':\r\n",
" if not defines.ADDON.getSetting('skin'):\r\n",
" defines.ADDON.setSetting('skin', 'st.anger')\r\n",
" if defines.ADDON.getSetting(\"skin\") == \"default\":\r\n",
" defines.ADDON.setSetting(\"skin\", \"st.anger\")\r\n",
" if not defines.ADDON.getSetting(\"login\"):\r\n",
" defines.ADDON.setSetting(\"login\", \"anonymous\")\r\n",
" defines.ADDON.setSetting(\"password\", \"anonymous\")\r\n",
"\r\n",
" #thr = defines.MyThread(checkPort, defines.ADDON.getSetting(\"outport\"))\r\n",
" #thr.start()\r\n",
"\r\n",
" print defines.ADDON_PATH\r\n",
" print defines.SKIN_PATH\r\n",
" \r\n",
" defines.MyThread(defines.Autostart, defines.AUTOSTART).start()\r\n",
" \r\n",
" w = mainform.WMainForm(\"mainform.xml\", defines.SKIN_PATH, defines.ADDON.getSetting('skin'))\r\n",
" w.doModal()\r\n",
" defines.showMessage('Close plugin')\r\n",
" del w\r\n",
" "
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.030927835051546393,
0.02197802197802198,
0,
0.009615384615384616,
0.01098901098901099,
0,
0.07692307692307693,
0.023255813953488372,
0.008264462809917356,
0,
0,
0,
0,
0.16666666666666666,
0,
0.05555555555555555,
0,
0,
0.041666666666666664,
0,
0.010416666666666666,
0.006060606060606061,
0,
0,
0.03571428571428571,
0,
0,
0,
0,
0,
0,
0,
0,
0.012987012987012988,
0.05555555555555555,
0,
0,
0,
0.16666666666666666,
0,
0.16666666666666666,
0.010309278350515464,
0,
0,
0,
0.5
] | 57 | 0.024741 | false |
#coding: UTF-8
import math
import time
import string
import sys,os
import os, io, sys, re, time, json, base64
import webbrowser
import urllib2
import msvcrt
import time
import ctypes
STD_INPUT_HANDLE = -10
STD_OUTPUT_HANDLE= -11
STD_ERROR_HANDLE = -12
FOREGROUND_BLACK = 0x0
FOREGROUND_BLUE = 0x01 # text color contains blue.
FOREGROUND_GREEN= 0x02 # text color contains green.
FOREGROUND_RED = 0x04 # text color contains red.
FOREGROUND_INTENSITY = 0x08 # text color is intensified.
BACKGROUND_BLUE = 0x10 # background color contains blue.
BACKGROUND_GREEN= 0x20 # background color contains green.
BACKGROUND_RED = 0x40 # background color contains red.
BACKGROUND_INTENSITY = 0x80 # background color is intensified.
class Color:
''''' See http://msdn.microsoft.com/library/default.asp?url=/library/en-us/winprog/winprog/windows_api_reference.asp
for information on Windows APIs.'''
std_out_handle = ctypes.windll.kernel32.GetStdHandle(STD_OUTPUT_HANDLE)
def set_cmd_color(self, color, handle=std_out_handle):
"""(color) -> bit
Example: set_cmd_color(FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE | FOREGROUND_INTENSITY)
"""
bool = ctypes.windll.kernel32.SetConsoleTextAttribute(handle, color)
return bool
def reset_color(self):
self.set_cmd_color(FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE)
def print_red_text(self, print_text):
self.set_cmd_color(FOREGROUND_RED | FOREGROUND_INTENSITY)
print print_text
self.reset_color()
def print_green_text(self, print_text):
self.set_cmd_color(FOREGROUND_GREEN | FOREGROUND_INTENSITY)
print print_text
self.reset_color()
def print_blue_text(self, print_text):
self.set_cmd_color(FOREGROUND_BLUE | FOREGROUND_INTENSITY)
print print_text
self.reset_color()
def print_red_text_with_blue_bg(self, print_text):
self.set_cmd_color(FOREGROUND_RED | FOREGROUND_INTENSITY| BACKGROUND_BLUE | BACKGROUND_INTENSITY)
print print_text
self.reset_color()
# http://hq.sinajs.cn/list=sh600000
# http://hq.sinajs.cn/list=sz000913
#var hq_str_sh601006="大秦铁路, 27.55, 27.25, 26.91, 27.55, 26.20, 26.91, 26.92,
# 22114263, 589824680, 4695, 26.91, 57590, 26.90, 14700, 26.89,
# 14300,6.88, 15100, 26.87, 3100, 26.92, 8900, 26.93, 14230,
# 26.94, 25150, 26.95, 15220, 26.96, 2008-01-11, 15:05:32";
#
# 0:”大秦铁路”,股票名字;
# 1:”27.55″,今日开盘价;
# 2:”27.25″,昨日收盘价;
# 3:”26.91″,当前价格;//时间结束后也就是收盘价了
# 4:”27.55″,今日最高价;
# 5:”26.20″,今日最低价;
# 6:”26.91″,竞买价,即“买一”报价;
# 7:”26.92″,竞卖价,即“卖一”报价;
# 8:”22114263″,成交的股票数,由于股票交易以一百股为基本单位,所以在使用时,通常把该值除以一百;
# 9:”589824680″,成交金额,单位为“元”,为了一目了然,通常以“万元”为成交金额的单位,所以通常把该值除以一万;
# 10:”4695″,“买一”申请4695股,即47手;
# 11:”26.91″,“买一”报价;
# 12:”57590″,“买二”
# 13:”26.90″,“买二”
# 14:”14700″,“买三”
# 15:”26.89″,“买三”
# 16:”14300″,“买四”
# 17:”26.88″,“买四”
# 18:”15100″,“买五”
# 19:”26.87″,“买五”
# 20:”3100″,“卖一”申报3100股,即31手;
# 21:”26.92″,“卖一”报价 (22, 23), (24, 25), (26,27), (28, 29)分别为“卖二”至“卖四的情况”
# 30:”2008-01-11″,日期;
# 31:”15:05:32″,时间;
import msvcrt
import time
def kbfunc():
x = msvcrt.kbhit()
if x:
ret = ord(msvcrt.getch())
else:
ret = 0
return ret
def calc_profit(buy,sell,number):
#==========相关费率
profit=[0]*4
yongjin=0.0006 #拥金
yinhuasui=0.001#印花税率
buy_fee=buy*number*yongjin
sell_fee=sell*number*(yongjin+yinhuasui)
souxufei=number*.0006
#计算 利润-利润率
profit[0]=(sell-buy)*number-buy_fee-sell_fee-2*souxufei
profit[1]=profit[0]/(buy*number)#利润率
profit[2]=sell*number-sell_fee-souxufei#真市值
return profit
#print "buy_f'e=",buy_fee
#print "sell_fee",sell_fee
#print "souxufei",souxufei*2
def querystock(stock_code,buy,number):
stock={}
exchange = "sz" if (int(stock_code) // 100000 == 3) else "sh"
query="http://hq.sinajs.cn/list="
qy=query+exchange+stock_code
stdout = urllib2.urlopen(qy)
html=stdout.read()
tempData = re.search('''(")(.+)(")''', html).group(2)
stockinfo = tempData.split(",")
sell=string.atof(stockinfo[3])
pf=calc_profit(buy,sell,number)#利润
stock['name']=stockinfo[0]
stock['yestoday']=string.atof(stockinfo[2])
stock['now']=string.atof(stockinfo[3])
stock['max']=string.atof(stockinfo[4])
stock['min']=string.atof(stockinfo[5])
stock['profit']=pf[0]
stock['profit_percent']=pf[1]*100
stock['cost']=buy*number
stock['time']=stockinfo[31]
stock['date']=stockinfo[30]
stock['range']=(stock['now']/stock['yestoday']-1)*100
stock['buy']=buy
stock['number']=number
stock['total']=stock['now']*number
stock['true_total']=pf[2]
return stock
#********** main **************
#==========设置买入=======
#buy=32
#number=3500
#stock='601688'
format_number=5 #文件数据行数
fb=open('stock.txt','r')
line=fb.readlines()
fb.close()
repeat=len(line)/format_number
times=2
stock_number=10
runing=1
res=['']*12
max=[0]*10
prev=[0]*10
stock=[{}]*stock_number #最多支持几支股票查询
up=u'↗'
down=u'↘'
no=u' '
flag=u''
clr=Color()
while runing:
for i in range(repeat):
up2=u' '
buy=string.atof(line[1+i*format_number])
number=string.atoi(line[2+i*format_number])
stock_code=line[0+i*format_number]
stock[i]=querystock(stock_code,buy,number)
max[i]=stock[i]['now']
if stock[i]['now']==stock[i]['max']:
flag=u'☆'*times
clr.set_cmd_color(0x1|BACKGROUND_INTENSITY)
else:
if prev[i]<stock[i]['now']:
flag=up*times
clr.set_cmd_color(FOREGROUND_BLACK|500)
else:
if prev[i]==stock[i]['now']:
flag=no*times
else:
flag=down*times
clr.set_cmd_color(FOREGROUND_GREEN)
#print stockname
if stock[i]['now']>stock[i]['yestoday']:
updown=u'↗'
else:
if stock[i]['now']<stock[i]['yestoday']:
updown=u'↘'
else:
updown=u'→'
prev[i]=stock[i]['now']
if stock[i]['profit']>0:
up2=up
else:
up2=down
print "%s %s %6.2f %5.2f%%"%(stock[i]['name'],stock[i]['time'],stock[i]['now'],stock[i]['range']),updown,"total=%6d profit=%8.2f%s %-4.2f%% "%(stock[i]['true_total'],stock[i]['profit'],up2,stock[i]['profit_percent']),flag
clr.reset_color()
r = kbfunc()
if r != 0:
break
time.sleep(3)
print '-'*50
print ' '*50
| [
"#coding: UTF-8\n",
"import math\n",
"import time\n",
"import string\n",
"import sys,os\n",
"import os, io, sys, re, time, json, base64\n",
"import webbrowser\n",
"import urllib2\n",
"\n",
"import msvcrt\n",
"import time\n",
"import ctypes\n",
"\n",
"\n",
"\n",
"STD_INPUT_HANDLE = -10 \n",
"STD_OUTPUT_HANDLE= -11 \n",
"STD_ERROR_HANDLE = -12 \n",
" \n",
"FOREGROUND_BLACK = 0x0 \n",
"FOREGROUND_BLUE = 0x01 # text color contains blue. \n",
"FOREGROUND_GREEN= 0x02 # text color contains green. \n",
"FOREGROUND_RED = 0x04 # text color contains red. \n",
"FOREGROUND_INTENSITY = 0x08 # text color is intensified. \n",
" \n",
"BACKGROUND_BLUE = 0x10 # background color contains blue. \n",
"BACKGROUND_GREEN= 0x20 # background color contains green. \n",
"BACKGROUND_RED = 0x40 # background color contains red. \n",
"BACKGROUND_INTENSITY = 0x80 # background color is intensified. \n",
" \n",
"class Color: \n",
" ''''' See http://msdn.microsoft.com/library/default.asp?url=/library/en-us/winprog/winprog/windows_api_reference.asp \n",
" for information on Windows APIs.''' \n",
" std_out_handle = ctypes.windll.kernel32.GetStdHandle(STD_OUTPUT_HANDLE) \n",
" \n",
" def set_cmd_color(self, color, handle=std_out_handle): \n",
" \"\"\"(color) -> bit \n",
" Example: set_cmd_color(FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE | FOREGROUND_INTENSITY) \n",
" \"\"\" \n",
" bool = ctypes.windll.kernel32.SetConsoleTextAttribute(handle, color) \n",
" return bool \n",
" \n",
" def reset_color(self): \n",
" self.set_cmd_color(FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE) \n",
" \n",
" def print_red_text(self, print_text): \n",
" self.set_cmd_color(FOREGROUND_RED | FOREGROUND_INTENSITY) \n",
" print print_text \n",
" self.reset_color() \n",
" \n",
" def print_green_text(self, print_text): \n",
" self.set_cmd_color(FOREGROUND_GREEN | FOREGROUND_INTENSITY) \n",
" print print_text \n",
" self.reset_color() \n",
" \n",
" def print_blue_text(self, print_text): \n",
" self.set_cmd_color(FOREGROUND_BLUE | FOREGROUND_INTENSITY) \n",
" print print_text \n",
" self.reset_color() \n",
" \n",
" def print_red_text_with_blue_bg(self, print_text): \n",
" self.set_cmd_color(FOREGROUND_RED | FOREGROUND_INTENSITY| BACKGROUND_BLUE | BACKGROUND_INTENSITY) \n",
" print print_text \n",
" self.reset_color() \n",
" \n",
"\n",
" \n",
"# http://hq.sinajs.cn/list=sh600000\n",
"\n",
"# http://hq.sinajs.cn/list=sz000913\n",
"#var hq_str_sh601006=\"大秦铁路, 27.55, 27.25, 26.91, 27.55, 26.20, 26.91, 26.92,\n",
"# 22114263, 589824680, 4695, 26.91, 57590, 26.90, 14700, 26.89,\n",
"# 14300,6.88, 15100, 26.87, 3100, 26.92, 8900, 26.93, 14230,\n",
"# 26.94, 25150, 26.95, 15220, 26.96, 2008-01-11, 15:05:32\";\n",
"\n",
" \n",
"#\n",
"# 0:”大秦铁路”,股票名字;\n",
"# 1:”27.55″,今日开盘价;\n",
"# 2:”27.25″,昨日收盘价;\n",
"# 3:”26.91″,当前价格;//时间结束后也就是收盘价了\n",
"# 4:”27.55″,今日最高价;\n",
"# 5:”26.20″,今日最低价;\n",
"# 6:”26.91″,竞买价,即“买一”报价;\n",
"# 7:”26.92″,竞卖价,即“卖一”报价;\n",
"# 8:”22114263″,成交的股票数,由于股票交易以一百股为基本单位,所以在使用时,通常把该值除以一百;\n",
"# 9:”589824680″,成交金额,单位为“元”,为了一目了然,通常以“万元”为成交金额的单位,所以通常把该值除以一万;\n",
"# 10:”4695″,“买一”申请4695股,即47手;\n",
"# 11:”26.91″,“买一”报价;\n",
"# 12:”57590″,“买二”\n",
"# 13:”26.90″,“买二”\n",
"# 14:”14700″,“买三”\n",
"# 15:”26.89″,“买三”\n",
"# 16:”14300″,“买四”\n",
"# 17:”26.88″,“买四”\n",
"# 18:”15100″,“买五”\n",
"# 19:”26.87″,“买五”\n",
"# 20:”3100″,“卖一”申报3100股,即31手;\n",
"# 21:”26.92″,“卖一”报价 (22, 23), (24, 25), (26,27), (28, 29)分别为“卖二”至“卖四的情况”\n",
"# 30:”2008-01-11″,日期; \n",
"# 31:”15:05:32″,时间;\n",
"\n",
"import msvcrt \n",
"import time\n",
"\n",
"def kbfunc(): \n",
" x = msvcrt.kbhit()\n",
" if x: \n",
" ret = ord(msvcrt.getch()) \n",
" else: \n",
" ret = 0 \n",
" return ret\n",
"\n",
"def calc_profit(buy,sell,number):\n",
" #==========相关费率\n",
" profit=[0]*4\n",
" yongjin=0.0006 #拥金\n",
" yinhuasui=0.001#印花税率\n",
" \n",
" buy_fee=buy*number*yongjin\n",
" sell_fee=sell*number*(yongjin+yinhuasui)\n",
" souxufei=number*.0006\n",
"\n",
" #计算 利润-利润率\n",
" profit[0]=(sell-buy)*number-buy_fee-sell_fee-2*souxufei\n",
" profit[1]=profit[0]/(buy*number)#利润率\n",
" profit[2]=sell*number-sell_fee-souxufei#真市值\n",
" return profit\n",
"#print \"buy_f'e=\",buy_fee\n",
"#print \"sell_fee\",sell_fee\n",
"#print \"souxufei\",souxufei*2\n",
"\n",
"def querystock(stock_code,buy,number):\n",
" stock={}\n",
" exchange = \"sz\" if (int(stock_code) // 100000 == 3) else \"sh\"\n",
" query=\"http://hq.sinajs.cn/list=\"\n",
" qy=query+exchange+stock_code\n",
" stdout = urllib2.urlopen(qy)\n",
" html=stdout.read()\n",
" tempData = re.search('''(\")(.+)(\")''', html).group(2)\n",
" stockinfo = tempData.split(\",\")\n",
" sell=string.atof(stockinfo[3])\n",
" pf=calc_profit(buy,sell,number)#利润\n",
"\n",
" stock['name']=stockinfo[0]\n",
" stock['yestoday']=string.atof(stockinfo[2])\n",
" stock['now']=string.atof(stockinfo[3])\n",
" stock['max']=string.atof(stockinfo[4])\n",
" stock['min']=string.atof(stockinfo[5])\n",
" stock['profit']=pf[0]\n",
" stock['profit_percent']=pf[1]*100\n",
" stock['cost']=buy*number\n",
" stock['time']=stockinfo[31]\n",
" stock['date']=stockinfo[30]\n",
" stock['range']=(stock['now']/stock['yestoday']-1)*100\n",
" stock['buy']=buy\n",
" stock['number']=number\n",
" stock['total']=stock['now']*number\n",
" stock['true_total']=pf[2]\n",
" return stock\n",
"\n",
"\n",
"#********** main **************\n",
"\n",
"#==========设置买入=======\n",
"\n",
"#buy=32\n",
"#number=3500\n",
"#stock='601688'\n",
"format_number=5 #文件数据行数\n",
"fb=open('stock.txt','r')\n",
"line=fb.readlines()\n",
"fb.close()\n",
"\n",
"repeat=len(line)/format_number\n",
"times=2\n",
"stock_number=10\n",
"runing=1\n",
"res=['']*12\n",
"max=[0]*10\n",
"prev=[0]*10\n",
"stock=[{}]*stock_number #最多支持几支股票查询\n",
"up=u'↗'\n",
"down=u'↘'\n",
"no=u' '\n",
"flag=u''\n",
"clr=Color()\n",
"while runing:\n",
" for i in range(repeat):\n",
" up2=u' '\n",
" buy=string.atof(line[1+i*format_number])\n",
" number=string.atoi(line[2+i*format_number])\n",
" stock_code=line[0+i*format_number]\n",
" stock[i]=querystock(stock_code,buy,number)\n",
" max[i]=stock[i]['now']\n",
" if stock[i]['now']==stock[i]['max']:\n",
" flag=u'☆'*times\n",
" clr.set_cmd_color(0x1|BACKGROUND_INTENSITY)\n",
"\n",
" else:\n",
" if prev[i]<stock[i]['now']:\n",
" flag=up*times\n",
" clr.set_cmd_color(FOREGROUND_BLACK|500)\n",
" else:\n",
" if prev[i]==stock[i]['now']:\n",
" flag=no*times\n",
" else:\n",
" flag=down*times\n",
" clr.set_cmd_color(FOREGROUND_GREEN)\n",
" #print stockname\n",
" if stock[i]['now']>stock[i]['yestoday']:\n",
" updown=u'↗'\n",
" else:\n",
" if stock[i]['now']<stock[i]['yestoday']:\n",
" updown=u'↘'\n",
" else:\n",
" updown=u'→'\n",
"\n",
" prev[i]=stock[i]['now']\n",
" if stock[i]['profit']>0:\n",
"\t\t\tup2=up\n",
" else:\n",
"\t\t\tup2=down\n",
" print \"%s %s %6.2f %5.2f%%\"%(stock[i]['name'],stock[i]['time'],stock[i]['now'],stock[i]['range']),updown,\"total=%6d profit=%8.2f%s %-4.2f%% \"%(stock[i]['true_total'],stock[i]['profit'],up2,stock[i]['profit_percent']),flag\n",
" clr.reset_color()\n",
" r = kbfunc()\n",
" if r != 0:\n",
" break \n",
" time.sleep(3)\n",
" print '-'*50\n",
" print ' '*50\n",
"\n"
] | [
0.06666666666666667,
0,
0,
0,
0.14285714285714285,
0.023255813953488372,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.08,
0.08,
0.04,
0.3333333333333333,
0.04,
0.03773584905660377,
0.05555555555555555,
0.0392156862745098,
0.03389830508474576,
0.3333333333333333,
0.03389830508474576,
0.05,
0.03508771929824561,
0.03076923076923077,
0.3333333333333333,
0.13333333333333333,
0.01639344262295082,
0.023809523809523808,
0.01282051282051282,
0.14285714285714285,
0.01639344262295082,
0.037037037037037035,
0.018518518518518517,
0.07142857142857142,
0.012658227848101266,
0.045454545454545456,
0.14285714285714285,
0.034482758620689655,
0.012195121951219513,
0.14285714285714285,
0.022727272727272728,
0.014705882352941176,
0.037037037037037035,
0.034482758620689655,
0.09090909090909091,
0.021739130434782608,
0.014285714285714285,
0.037037037037037035,
0.034482758620689655,
0.14285714285714285,
0.021739130434782608,
0.014492753623188406,
0.037037037037037035,
0.034482758620689655,
0.07692307692307693,
0.017543859649122806,
0.027777777777777776,
0.037037037037037035,
0.030303030303030304,
0.3333333333333333,
0,
0.14285714285714285,
0.027777777777777776,
0,
0,
0.012987012987012988,
0.011904761904761904,
0.012345679012345678,
0,
0,
0.5,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.038461538461538464,
0,
0,
0.2,
0.08333333333333333,
0,
0.13333333333333333,
0.045454545454545456,
0.2,
0.06060606060606061,
0.2,
0.13333333333333333,
0.07142857142857142,
0,
0.08823529411764706,
0.05,
0.058823529411764705,
0.13043478260869565,
0.12,
0.2,
0.03225806451612903,
0.022222222222222223,
0.038461538461538464,
0,
0.06666666666666667,
0.016666666666666666,
0.07317073170731707,
0.0625,
0,
0.038461538461538464,
0.037037037037037035,
0.034482758620689655,
0,
0.07692307692307693,
0.07692307692307693,
0,
0.02631578947368421,
0.030303030303030304,
0,
0.043478260869565216,
0,
0,
0.02857142857142857,
0.1282051282051282,
0,
0.03225806451612903,
0.020833333333333332,
0.023255813953488372,
0.023255813953488372,
0.023255813953488372,
0.038461538461538464,
0.02631578947368421,
0.034482758620689655,
0.03125,
0.03125,
0.017241379310344827,
0.047619047619047616,
0.037037037037037035,
0.02564102564102564,
0.03333333333333333,
0,
0,
0,
0.02857142857142857,
0,
0.043478260869565216,
0,
0.125,
0.07692307692307693,
0.0625,
0.125,
0.08,
0.05,
0,
0,
0.03225806451612903,
0.125,
0.0625,
0.1111111111111111,
0.08333333333333333,
0.09090909090909091,
0.08333333333333333,
0.05405405405405406,
0.125,
0.1,
0.125,
0.1111111111111111,
0.08333333333333333,
0,
0,
0.058823529411764705,
0.02040816326530612,
0.019230769230769232,
0.023255813953488372,
0.058823529411764705,
0.03225806451612903,
0.022222222222222223,
0.03571428571428571,
0.017857142857142856,
0,
0,
0.025,
0.03333333333333333,
0.017857142857142856,
0,
0.022222222222222223,
0.029411764705882353,
0,
0.027777777777777776,
0,
0.04,
0.02040816326530612,
0.041666666666666664,
0,
0.018867924528301886,
0.03571428571428571,
0,
0.03571428571428571,
0,
0.03125,
0.030303030303030304,
0.4,
0.07142857142857142,
0.3333333333333333,
0.05652173913043478,
0,
0,
0,
0.06666666666666667,
0,
0,
0,
1
] | 232 | 0.051452 | false |
# Book Data Structure and algorithms using Python
# implement the Array ADT
import ctypes
class Array:
def __init__(self, size):
assert size > 0, 'Array size must be > 0'
self._size = size
PyArrayType = ctypes.py_object * size
self._elements = PyArrayType()
self.clear(None)
def __len__(self):
return self._size
def __getitem__(self, index):
assert index >= 0 and index < len(self), 'Array out of range'
return self._elements[index]
def __setitem__(self, index, value):
assert index >= 0 and index < len(self), 'Array out of range'
self._elements[index] = value
def clear(self, value):
for i in range(len(self)):
self._elements[i] = value
def __iter__(self):
return _ArrayIterator(self._elements)
# an iterator for the Array ADT
class _ArrayIterator:
def __init__(self, theArray):
self._arrayRef = theArray
self._curNdx = 0
def __iter__(self):
return self
def __next__(self):
if self._curNdx < len(self._arrayRef):
entry = self._arrayRef[self._curNdx]
self._curNdx += 1
return entry
else:
raise StopIteration
if __name__ == '__main__':
a = Array(3)
a[0] = 0
a[1] = 1
a[2] = 3
for i in a:
print(i)
print(len(a))
a.clear(5)
print(a[0])
print('bingo')
| [
"# Book Data Structure and algorithms using Python\n",
"# implement the Array ADT \n",
"import ctypes\n",
"\n",
"class Array:\n",
" \n",
" def __init__(self, size):\n",
" assert size > 0, 'Array size must be > 0'\n",
" self._size = size\n",
" PyArrayType = ctypes.py_object * size\n",
" self._elements = PyArrayType()\n",
" self.clear(None)\n",
"\n",
" def __len__(self):\n",
" return self._size\n",
"\n",
" def __getitem__(self, index):\n",
" assert index >= 0 and index < len(self), 'Array out of range'\n",
" return self._elements[index]\n",
"\n",
" def __setitem__(self, index, value):\n",
" assert index >= 0 and index < len(self), 'Array out of range'\n",
" self._elements[index] = value\n",
"\n",
" def clear(self, value):\n",
" for i in range(len(self)):\n",
" self._elements[i] = value\n",
"\n",
" def __iter__(self):\n",
" return _ArrayIterator(self._elements)\n",
"\n",
"# an iterator for the Array ADT\n",
"class _ArrayIterator:\n",
" def __init__(self, theArray):\n",
" self._arrayRef = theArray\n",
" self._curNdx = 0 \n",
" \n",
" def __iter__(self):\n",
" return self\n",
"\n",
" def __next__(self):\n",
" if self._curNdx < len(self._arrayRef):\n",
" entry = self._arrayRef[self._curNdx]\n",
" self._curNdx += 1\n",
" return entry\n",
" else:\n",
" raise StopIteration\n",
"\n",
"if __name__ == '__main__':\n",
" a = Array(3)\n",
" a[0] = 0\n",
" a[1] = 1\n",
" a[2] = 3\n",
" for i in a:\n",
" print(i)\n",
" print(len(a))\n",
" a.clear(5)\n",
" print(a[0])\n",
" print('bingo')\n"
] | [
0,
0.037037037037037035,
0,
0,
0.07692307692307693,
0.2,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.045454545454545456,
0,
0,
0.038461538461538464,
0.2,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.037037037037037035,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 59 | 0.010761 | false |
"""
Make projects files for building Far Manager Encyclopedia in .CHM format
"""
# pythonized by techtonik // gmail.com
# modifications by Far Group
#
# IMPORTANT: must be albe to run under python 2.4
#
# keywords for HHK are generated from "<a name=>" and "<h1>"
# contents tree for HHC is generated from /html/index.html following links one level down.
# links in each file are followed only between <!-- HHC --> comments, for each "<h3>" a new "folder" is created,
# for each "<a href=>" a new topic with some additional logic that prevents following unwanted links (only width=40% links are followed under h3 sections)
# also, for "<h3>", text for the title is taken upto to the first comma
execfile("config.inc.py")
from os import makedirs, walk, listdir
from os.path import isdir, join, exists
from string import Template
import shutil
import logging
import re
import operator
logging.basicConfig(level=logging.DEBUG, format="%(asctime)s %(levelname)-6s %(message)s")
logging.addLevelName("WARN", 30)
#: just a shortcut
log = logging.info
warn = logging.warn
def copytree(src, dst, symlinks=False, ignore=None):
if not exists(dst):
makedirs(dst)
for item in listdir(src):
s = join(src, item)
d = join(dst, item)
if isdir(s):
shutil.copytree(s, d, symlinks, ignore)
else:
shutil.copy2(s, d)
#end of copytree
def make_chm_lang(lang):
"""@param lang : either 'rus*' or 'eng*'"""
lang_code = lang[0:2]
log("------------------------------------")
log("preparing %s " % lang_code)
log("copying files")
copytree("%s/enc_%s" % (ROOT_DIR, lang), "%s/%s" % (DEST_CHM, lang_code))
chm_lang_dir = join(DEST_CHM, lang_code)
makedirs(join(chm_lang_dir, "html"))
# build empty directory tree
chm_meta_dir = join(chm_lang_dir, "meta")
chm_html_dir = join(chm_lang_dir, "html")
for root, dirs, files in walk(chm_meta_dir):
for d in dirs:
makedirs(join(root.replace(chm_meta_dir, chm_html_dir), d))
log("-- translating meta into html")
# filter files and replace "win32/.." links with calls to MSDN
link_match = re.compile(r'href[\s"\'=\/\.]*?win32\/(?P<funcname>[^"\']*?)(\.html)?[\'"].*?>(?P<linkend>.*?<\/a>)', re.I)
link_replace = Template(
'''href="JavaScript:link$id.Click()">\g<linkend>
<object id="link$id" type="application/x-oleobject" classid="clsid:adb880a6-d8ff-11cf-9377-00aa003b7a11">
<param name="Command" value="KLink">
<param name="DefaultTopic" value="">
<param name="Item1" value="">
<param name="Item2" value="\g<funcname>">
</object>''')
id = 0
for root, dirs, files in walk(chm_meta_dir):
for f in files:
infile = open(join(root, f))
outfile = open(join(root.replace(chm_meta_dir, chm_html_dir), f), "w")
for line in infile:
while link_match.search(line):
line = link_match.sub(link_replace.substitute(id=id), line)
id += 1
outfile.write(line)
infile.close()
outfile.close()
log("total %d win32 links" % id)
log("-- cleaning meta")
shutil.rmtree(chm_meta_dir)
log("-- creating CHM contents")
contents_filename = join(chm_lang_dir, "plugins%s.hhc" % lang[0])
match_h3 = re.compile("<h3>(?P<title>.*?)(</h3>|,)", re.I)
match_link_no_h3 = re.compile(r'<a.+?href\s*=\s*(?P<quote>[\'\"])(.+?)(?P=quote).*?>(.+?)</a>', re.I)
match_link_after_h3 = re.compile(r'.?width\=\"40\%\".?<a.+?href\s*=\s*(?P<quote>[\'\"])(.+?)(?P=quote).*?>(.+?)</a>', re.I)
cntnts = open(contents_filename, "w")
cntnts.write(
"""<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML//EN">
<HTML>
<HEAD>
<meta http-equiv="Content-Type" Content="text/html; charset=Windows-1251">
<meta name="GENERATOR" content="Microsoft® HTML Help Workshop 4.1">
<!-- Sitemap 1.0 -->
</HEAD><BODY>
<UL>
<LI> <OBJECT type="text/sitemap">
<param name="Name" value="Programming Far Manager plugins - Encyclopedia for Developers">
<param name="Local" value="html/index.html">
</OBJECT>
<UL>
""")
in_hhc1 = 0
f1 = open(join(chm_html_dir, "index.html"))
log("Scanning %s" % f1.name)
for l1 in f1:
if (l1.find("HHC") != -1):
if (in_hhc1==0):
in_hhc1 = 1
continue
break
elif (in_hhc1==0):
continue
for rl in match_link_no_h3.findall(l1):
log(" New link: %s" % rl[2])
cntnts.write(
""" <LI> <OBJECT type="text/sitemap">
<param name="Name" value="%s">
<param name="Local" value="html/%s">
</OBJECT>
""" % (rl[2], rl[1]))
link_dir = rl[1]
link_dir = link_dir[:link_dir.find("/")]
in_hhc2 = 0
in_h3 = 0
in_link = 0
f2 = open(join(chm_html_dir, rl[1]))
log("Scanning %s" % f2.name)
for l2 in f2:
if (l2.find("HHC") != -1):
if (in_hhc2 == 0):
in_hhc2 = 1
continue
break
elif (in_hhc2 == 0):
continue
for rh in match_h3.findall(l2):
if (in_h3 == 1):
log(" Close section")
cntnts.write(" </UL>\n");
else:
log(" Open section")
cntnts.write(" <UL>\n");
in_h3 = 1
cntnts.write(
""" <LI> <OBJECT type="text/sitemap">
<param name="Name" value="%s">
</OBJECT>
<UL>
""" % (rh[0]))
log(" Open section: %s" % rh[0])
match_link = (match_link_no_h3, match_link_after_h3)[in_h3 == 1]
for rl in match_link.findall(l2):
if (in_h3 == 0 and in_link == 0):
log(" Open section")
cntnts.write(" <UL>\n");
in_link = 1
cntnts.write(
""" <LI> <OBJECT type="text/sitemap">
<param name="Name" value="%s">
<param name="Local" value="html/%s/%s">
</OBJECT>
""" % (rl[2], link_dir, rl[1]))
log(" New topic: %s" % rl[2])
if (in_h3 == 1):
log(" Close section")
cntnts.write(" </UL>\n")
if (in_h3 == 1 or in_link == 1):
log(" Close section")
cntnts.write(" </UL>\n")
cntnts.write(
"""
</UL>
</UL>
</BODY></HTML>
""")
log("-- creating CHM indexes")
# indexes are extracted from <h1> and <a name="">..</a>
# articles are not included in index
index_filename = join(chm_lang_dir, "plugins%s.hhk" % lang[0])
match_h1 = re.compile("<h1>(?P<title>.*?)</h1>", re.I)
match_aname = re.compile(r'<a.+?name\s*=\s*(?P<quote>[\'\"])(.+?)(?P=quote).*?>(.+?)</a>', re.I)
strip_re = re.compile(r'"|[/<>\'"]', re.I)
title_list = []
macro_list = []
for root, dirs, files in walk(chm_html_dir):
if root.startswith(join(chm_html_dir, "articles")):
continue
macro_flag = "macro" in root
for fn in files:
if not fn.endswith(".html") or fn in ["faq.html", "msdn.html"]:
continue
relflink = join(root[root.find("html"):], fn).replace('\\', '/')
f = open(join(root, fn))
print chr(8)+".",
for line in f:
if not macro_flag:
target_list = title_list
else:
target_list = macro_list
for rh in match_h1.findall(line):
target_list.append([relflink, strip_re.sub("", rh)])
for ra in match_aname.findall(line):
target_list.append([relflink+"#"+ra[1], strip_re.sub("", ra[2])])
f.close()
print
titles = [t[1] for t in title_list]
for ix, iv in enumerate(macro_list):
if iv[1] in titles:
macro_list[ix][1] = iv[1]+" (Macros)"
title_list.extend(macro_list)
title_list.sort(key=lambda x: x[1].lower())
# processing duplicates
for i,x in enumerate(title_list):
if i and x[1] == title_list[i-1][1]:
# non-anchored link from <h1> take precendence if filename is the same
if "#" in x[0]:
dup1 = x[0]
dup2 = title_list[i-1][0]
else:
dup1 = title_list[i-1][0]
dup2 = x[0]
if "#" not in dup1 or "#" in dup2 or not (dup1.startswith(dup2) and len(dup2) == dup1.find("#")):
warn("duplicate index : %s" % (x[1]))
warn(" %s" % (x[0]))
warn(" %s" % (title_list[i-1][0]))
else:
# remove anchored duplicate
if dup1 == x[0]:
del title_list[i]
else:
del title_list[i-1]
# fcreep: mark all macros with (Macros) explicitly
# fcreep: add distinguished meta labels to macros
idx = open(index_filename, "w")
idx.write(
"""<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML//EN">
<HTML>
<HEAD>
<meta http-equiv="Content-Type" Content="text/html; charset=Windows-1251">
<meta name="GENERATOR" content="Microsoft® HTML Help Workshop 4.1">
<!-- Sitemap 1.0 -->
</HEAD><BODY>
<UL>
""")
for title in title_list:
idx.write(
""" <LI> <OBJECT type="text/sitemap">
<param name="Name" value="%s">
<param name="Local" value="%s">
</OBJECT>
""" % (title[1], title[0]))
idx.write(
"""</UL>
</BODY></HTML>
""")
# end def make_chm_lang(lang):
log("preparing CHM build")
log("-- cleaning build dir")
if isdir(DEST): shutil.rmtree(DEST)
makedirs(DEST)
logfile = logging.FileHandler(BUILD_CHM_LOG, "w")
logging.getLogger().addHandler(logfile)
log("-- making directory tree")
makedirs(DEST_CHM)
make_chm_lang("rus3.work")
#make_chm_lang("eng")
log("-- done. check build log at %s" % BUILD_CHM_LOG)
| [
"\"\"\"\n",
"Make projects files for building Far Manager Encyclopedia in .CHM format\n",
"\"\"\"\n",
"\n",
"# pythonized by techtonik // gmail.com\n",
"# modifications by Far Group\n",
"\n",
"#\n",
"# IMPORTANT: must be albe to run under python 2.4\n",
"#\n",
"\n",
"# keywords for HHK are generated from \"<a name=>\" and \"<h1>\"\n",
"\n",
"# contents tree for HHC is generated from /html/index.html following links one level down.\n",
"# links in each file are followed only between <!-- HHC --> comments, for each \"<h3>\" a new \"folder\" is created,\n",
"# for each \"<a href=>\" a new topic with some additional logic that prevents following unwanted links (only width=40% links are followed under h3 sections)\n",
"# also, for \"<h3>\", text for the title is taken upto to the first comma\n",
"\n",
"\n",
"execfile(\"config.inc.py\")\n",
"\n",
"from os import makedirs, walk, listdir\n",
"from os.path import isdir, join, exists\n",
"from string import Template\n",
"import shutil\n",
"import logging\n",
"import re\n",
"import operator\n",
"\n",
"logging.basicConfig(level=logging.DEBUG, format=\"%(asctime)s %(levelname)-6s %(message)s\")\n",
"logging.addLevelName(\"WARN\", 30)\n",
"\n",
"#: just a shortcut\n",
"log = logging.info\n",
"warn = logging.warn\n",
"\n",
"def copytree(src, dst, symlinks=False, ignore=None):\n",
" if not exists(dst):\n",
" makedirs(dst)\n",
" for item in listdir(src):\n",
" s = join(src, item)\n",
" d = join(dst, item)\n",
" if isdir(s):\n",
" shutil.copytree(s, d, symlinks, ignore)\n",
" else:\n",
" shutil.copy2(s, d)\n",
"#end of copytree\n",
"\n",
"def make_chm_lang(lang):\n",
" \"\"\"@param lang : either 'rus*' or 'eng*'\"\"\"\n",
" lang_code = lang[0:2]\n",
"\n",
" log(\"------------------------------------\")\n",
" log(\"preparing %s \" % lang_code)\n",
"\n",
" log(\"copying files\")\n",
" copytree(\"%s/enc_%s\" % (ROOT_DIR, lang), \"%s/%s\" % (DEST_CHM, lang_code))\n",
"\n",
" chm_lang_dir = join(DEST_CHM, lang_code)\n",
" makedirs(join(chm_lang_dir, \"html\"))\n",
"\n",
" # build empty directory tree\n",
" chm_meta_dir = join(chm_lang_dir, \"meta\")\n",
" chm_html_dir = join(chm_lang_dir, \"html\")\n",
" for root, dirs, files in walk(chm_meta_dir):\n",
" for d in dirs:\n",
" makedirs(join(root.replace(chm_meta_dir, chm_html_dir), d))\n",
"\n",
" log(\"-- translating meta into html\")\n",
" # filter files and replace \"win32/..\" links with calls to MSDN\n",
" link_match = re.compile(r'href[\\s\"\\'=\\/\\.]*?win32\\/(?P<funcname>[^\"\\']*?)(\\.html)?[\\'\"].*?>(?P<linkend>.*?<\\/a>)', re.I)\n",
" link_replace = Template(\n",
"'''href=\"JavaScript:link$id.Click()\">\\g<linkend>\n",
"<object id=\"link$id\" type=\"application/x-oleobject\" classid=\"clsid:adb880a6-d8ff-11cf-9377-00aa003b7a11\">\n",
"<param name=\"Command\" value=\"KLink\">\n",
"<param name=\"DefaultTopic\" value=\"\">\n",
"<param name=\"Item1\" value=\"\">\n",
"<param name=\"Item2\" value=\"\\g<funcname>\">\n",
"</object>''')\n",
" id = 0\n",
" for root, dirs, files in walk(chm_meta_dir):\n",
" for f in files:\n",
" infile = open(join(root, f))\n",
" outfile = open(join(root.replace(chm_meta_dir, chm_html_dir), f), \"w\")\n",
" for line in infile:\n",
" while link_match.search(line):\n",
" line = link_match.sub(link_replace.substitute(id=id), line)\n",
" id += 1\n",
" outfile.write(line)\n",
" infile.close()\n",
" outfile.close()\n",
" log(\"total %d win32 links\" % id)\n",
"\n",
" log(\"-- cleaning meta\")\n",
" shutil.rmtree(chm_meta_dir)\n",
"\n",
" log(\"-- creating CHM contents\")\n",
" contents_filename = join(chm_lang_dir, \"plugins%s.hhc\" % lang[0])\n",
" match_h3 = re.compile(\"<h3>(?P<title>.*?)(</h3>|,)\", re.I)\n",
" match_link_no_h3 = re.compile(r'<a.+?href\\s*=\\s*(?P<quote>[\\'\\\"])(.+?)(?P=quote).*?>(.+?)</a>', re.I)\n",
" match_link_after_h3 = re.compile(r'.?width\\=\\\"40\\%\\\".?<a.+?href\\s*=\\s*(?P<quote>[\\'\\\"])(.+?)(?P=quote).*?>(.+?)</a>', re.I)\n",
"\n",
" cntnts = open(contents_filename, \"w\")\n",
" cntnts.write(\n",
"\"\"\"<!DOCTYPE HTML PUBLIC \"-//IETF//DTD HTML//EN\">\n",
"<HTML>\n",
"<HEAD>\n",
"<meta http-equiv=\"Content-Type\" Content=\"text/html; charset=Windows-1251\">\n",
"<meta name=\"GENERATOR\" content=\"Microsoft® HTML Help Workshop 4.1\">\n",
"<!-- Sitemap 1.0 -->\n",
"</HEAD><BODY>\n",
"<UL>\n",
" <LI> <OBJECT type=\"text/sitemap\">\n",
" <param name=\"Name\" value=\"Programming Far Manager plugins - Encyclopedia for Developers\">\n",
" <param name=\"Local\" value=\"html/index.html\">\n",
" </OBJECT>\n",
" <UL>\n",
"\n",
"\"\"\")\n",
" in_hhc1 = 0\n",
" f1 = open(join(chm_html_dir, \"index.html\"))\n",
" log(\"Scanning %s\" % f1.name)\n",
" for l1 in f1:\n",
" if (l1.find(\"HHC\") != -1):\n",
" if (in_hhc1==0):\n",
" in_hhc1 = 1\n",
" continue\n",
" break\n",
" elif (in_hhc1==0):\n",
" continue\n",
" for rl in match_link_no_h3.findall(l1):\n",
" log(\" New link: %s\" % rl[2])\n",
" cntnts.write(\n",
"\"\"\" <LI> <OBJECT type=\"text/sitemap\">\n",
" <param name=\"Name\" value=\"%s\">\n",
" <param name=\"Local\" value=\"html/%s\">\n",
" </OBJECT>\n",
"\"\"\" % (rl[2], rl[1]))\n",
" link_dir = rl[1]\n",
" link_dir = link_dir[:link_dir.find(\"/\")]\n",
" in_hhc2 = 0\n",
" in_h3 = 0\n",
" in_link = 0\n",
" f2 = open(join(chm_html_dir, rl[1]))\n",
" log(\"Scanning %s\" % f2.name)\n",
" for l2 in f2:\n",
" if (l2.find(\"HHC\") != -1):\n",
" if (in_hhc2 == 0):\n",
" in_hhc2 = 1\n",
" continue\n",
" break\n",
" elif (in_hhc2 == 0):\n",
" continue\n",
"\n",
" for rh in match_h3.findall(l2):\n",
" if (in_h3 == 1):\n",
" log(\" Close section\")\n",
" cntnts.write(\" </UL>\\n\");\n",
" else:\n",
" log(\" Open section\")\n",
" cntnts.write(\" <UL>\\n\");\n",
"\n",
" in_h3 = 1\n",
" cntnts.write(\n",
"\"\"\" <LI> <OBJECT type=\"text/sitemap\">\n",
" <param name=\"Name\" value=\"%s\">\n",
" </OBJECT>\n",
" <UL>\n",
"\"\"\" % (rh[0]))\n",
" log(\" Open section: %s\" % rh[0])\n",
"\n",
" match_link = (match_link_no_h3, match_link_after_h3)[in_h3 == 1]\n",
" for rl in match_link.findall(l2):\n",
" if (in_h3 == 0 and in_link == 0):\n",
" log(\" Open section\")\n",
" cntnts.write(\" <UL>\\n\");\n",
" in_link = 1\n",
" cntnts.write(\n",
"\"\"\" <LI> <OBJECT type=\"text/sitemap\">\n",
" <param name=\"Name\" value=\"%s\">\n",
" <param name=\"Local\" value=\"html/%s/%s\">\n",
" </OBJECT>\n",
"\"\"\" % (rl[2], link_dir, rl[1]))\n",
" log(\" New topic: %s\" % rl[2])\n",
"\n",
" if (in_h3 == 1):\n",
" log(\" Close section\")\n",
" cntnts.write(\" </UL>\\n\")\n",
"\n",
" if (in_h3 == 1 or in_link == 1):\n",
" log(\" Close section\")\n",
" cntnts.write(\" </UL>\\n\")\n",
"\n",
" cntnts.write(\n",
"\"\"\"\n",
" </UL>\n",
"</UL>\n",
"</BODY></HTML>\n",
"\"\"\")\n",
"\n",
" log(\"-- creating CHM indexes\")\n",
" # indexes are extracted from <h1> and <a name=\"\">..</a>\n",
" # articles are not included in index\n",
"\n",
" index_filename = join(chm_lang_dir, \"plugins%s.hhk\" % lang[0])\n",
" match_h1 = re.compile(\"<h1>(?P<title>.*?)</h1>\", re.I)\n",
" match_aname = re.compile(r'<a.+?name\\s*=\\s*(?P<quote>[\\'\\\"])(.+?)(?P=quote).*?>(.+?)</a>', re.I)\n",
" strip_re = re.compile(r'"|[/<>\\'\"]', re.I)\n",
" title_list = []\n",
" macro_list = []\n",
" for root, dirs, files in walk(chm_html_dir):\n",
" if root.startswith(join(chm_html_dir, \"articles\")):\n",
" continue\n",
" macro_flag = \"macro\" in root\n",
" for fn in files:\n",
" if not fn.endswith(\".html\") or fn in [\"faq.html\", \"msdn.html\"]:\n",
" continue\n",
" relflink = join(root[root.find(\"html\"):], fn).replace('\\\\', '/')\n",
" f = open(join(root, fn))\n",
" print chr(8)+\".\",\n",
" for line in f:\n",
" if not macro_flag:\n",
" target_list = title_list\n",
" else:\n",
" target_list = macro_list\n",
" for rh in match_h1.findall(line):\n",
" target_list.append([relflink, strip_re.sub(\"\", rh)])\n",
" for ra in match_aname.findall(line):\n",
" target_list.append([relflink+\"#\"+ra[1], strip_re.sub(\"\", ra[2])])\n",
" f.close()\n",
" print\n",
"\n",
" titles = [t[1] for t in title_list]\n",
" for ix, iv in enumerate(macro_list):\n",
" if iv[1] in titles:\n",
" macro_list[ix][1] = iv[1]+\" (Macros)\"\n",
"\n",
" title_list.extend(macro_list)\n",
" title_list.sort(key=lambda x: x[1].lower())\n",
"\n",
" # processing duplicates\n",
" for i,x in enumerate(title_list):\n",
" if i and x[1] == title_list[i-1][1]:\n",
" # non-anchored link from <h1> take precendence if filename is the same\n",
" if \"#\" in x[0]:\n",
" dup1 = x[0]\n",
" dup2 = title_list[i-1][0]\n",
" else:\n",
" dup1 = title_list[i-1][0]\n",
" dup2 = x[0]\n",
" if \"#\" not in dup1 or \"#\" in dup2 or not (dup1.startswith(dup2) and len(dup2) == dup1.find(\"#\")):\n",
" warn(\"duplicate index : %s\" % (x[1]))\n",
" warn(\" %s\" % (x[0]))\n",
" warn(\" %s\" % (title_list[i-1][0]))\n",
" else:\n",
" # remove anchored duplicate\n",
" if dup1 == x[0]:\n",
" del title_list[i]\n",
" else:\n",
" del title_list[i-1]\n",
"\n",
" # fcreep: mark all macros with (Macros) explicitly\n",
" # fcreep: add distinguished meta labels to macros\n",
"\n",
" idx = open(index_filename, \"w\")\n",
" idx.write(\n",
"\"\"\"<!DOCTYPE HTML PUBLIC \"-//IETF//DTD HTML//EN\">\n",
"<HTML>\n",
"<HEAD>\n",
"<meta http-equiv=\"Content-Type\" Content=\"text/html; charset=Windows-1251\">\n",
"<meta name=\"GENERATOR\" content=\"Microsoft® HTML Help Workshop 4.1\">\n",
"<!-- Sitemap 1.0 -->\n",
"</HEAD><BODY>\n",
"<UL>\n",
"\"\"\")\n",
"\n",
" for title in title_list:\n",
" idx.write(\n",
"\"\"\" <LI> <OBJECT type=\"text/sitemap\">\n",
" <param name=\"Name\" value=\"%s\">\n",
" <param name=\"Local\" value=\"%s\">\n",
" </OBJECT>\n",
"\"\"\" % (title[1], title[0]))\n",
"\n",
" idx.write(\n",
"\"\"\"</UL>\n",
"</BODY></HTML>\n",
"\"\"\")\n",
"# end def make_chm_lang(lang):\n",
"\n",
"log(\"preparing CHM build\")\n",
"log(\"-- cleaning build dir\")\n",
"if isdir(DEST): shutil.rmtree(DEST)\n",
"makedirs(DEST)\n",
"logfile = logging.FileHandler(BUILD_CHM_LOG, \"w\")\n",
"logging.getLogger().addHandler(logfile)\n",
"\n",
"\n",
"log(\"-- making directory tree\")\n",
"makedirs(DEST_CHM)\n",
"\n",
"make_chm_lang(\"rus3.work\")\n",
"#make_chm_lang(\"eng\")\n",
"\n",
"log(\"-- done. check build log at %s\" % BUILD_CHM_LOG)\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.01098901098901099,
0.008849557522123894,
0.0064516129032258064,
0,
0,
0,
0,
0,
0.02564102564102564,
0.025,
0.03571428571428571,
0.07142857142857142,
0.06666666666666667,
0.1,
0.0625,
0,
0.01098901098901099,
0,
0,
0,
0,
0,
0,
0.018867924528301886,
0.045454545454545456,
0,
0.03571428571428571,
0,
0,
0,
0.021739130434782608,
0,
0.04,
0.058823529411764705,
0,
0.04,
0.021739130434782608,
0.041666666666666664,
0,
0.021739130434782608,
0.02857142857142857,
0,
0.043478260869565216,
0.013157894736842105,
0,
0.023255813953488372,
0.02564102564102564,
0,
0.03225806451612903,
0.022727272727272728,
0.022727272727272728,
0.02127659574468085,
0,
0.015151515151515152,
0,
0.02564102564102564,
0.015384615384615385,
0.016260162601626018,
0.037037037037037035,
0.04081632653061224,
0.009433962264150943,
0,
0,
0,
0.023809523809523808,
0,
0.1111111111111111,
0.02127659574468085,
0,
0.05555555555555555,
0.012987012987012988,
0.038461538461538464,
0,
0.014285714285714285,
0.05555555555555555,
0,
0.047619047619047616,
0.045454545454545456,
0.02857142857142857,
0,
0.038461538461538464,
0.03333333333333333,
0,
0.029411764705882353,
0.014705882352941176,
0.01639344262295082,
0.019230769230769232,
0.015873015873015872,
0,
0.025,
0.0625,
0.02,
0,
0,
0,
0,
0,
0,
0,
0,
0.010869565217391304,
0,
0,
0,
0,
0,
0.07142857142857142,
0.021739130434782608,
0.03225806451612903,
0.0625,
0,
0.08695652173913043,
0,
0,
0.08333333333333333,
0.043478260869565216,
0.06666666666666667,
0,
0.027777777777777776,
0.05,
0.023255813953488372,
0,
0,
0,
0,
0.043478260869565216,
0.02127659574468085,
0.05555555555555555,
0.0625,
0.05555555555555555,
0.023255813953488372,
0.02857142857142857,
0.05,
0,
0.034482758620689655,
0,
0,
0.0625,
0,
0.05263157894736842,
0,
0,
0.037037037037037035,
0,
0.02127659574468085,
0.0625,
0,
0.023809523809523808,
0,
0.05,
0.041666666666666664,
0.02127659574468085,
0,
0,
0,
0,
0.021739130434782608,
0,
0,
0,
0.022727272727272728,
0,
0.023809523809523808,
0.045454545454545456,
0.041666666666666664,
0.018867924528301886,
0,
0,
0,
0,
0.022222222222222223,
0,
0.043478260869565216,
0,
0,
0,
0.02564102564102564,
0,
0,
0,
0.0625,
0.25,
0,
0,
0,
0,
0,
0.030303030303030304,
0.017241379310344827,
0.02564102564102564,
0,
0.015384615384615385,
0.017543859649122806,
0.020202020202020204,
0.02,
0.05555555555555555,
0.05555555555555555,
0.02127659574468085,
0,
0.06666666666666667,
0,
0,
0.014285714285714285,
0,
0.014084507042253521,
0.03225806451612903,
0.041666666666666664,
0.047619047619047616,
0,
0.02857142857142857,
0,
0.02857142857142857,
0,
0.015873015873015872,
0,
0.013157894736842105,
0.0625,
0.125,
0,
0.02631578947368421,
0.02564102564102564,
0,
0.022727272727272728,
0,
0.03125,
0.021739130434782608,
0,
0.038461538461538464,
0.05555555555555555,
0,
0.012987012987012988,
0.045454545454545456,
0,
0,
0.08333333333333333,
0,
0,
0.019230769230769232,
0,
0,
0,
0.08333333333333333,
0,
0,
0.03571428571428571,
0,
0.03333333333333333,
0,
0.018867924528301886,
0.019230769230769232,
0,
0.029411764705882353,
0.07692307692307693,
0.02,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.037037037037037035,
0,
0.02564102564102564,
0,
0,
0,
0,
0,
0.07692307692307693,
0.1111111111111111,
0,
0,
0,
0,
0.037037037037037035,
0,
0.027777777777777776,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.045454545454545456,
0,
0
] | 305 | 0.018451 | false |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2014 clowwindy
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import logging
import struct
import time
from shadowsocks import shell, eventloop, tcprelay, udprelay, asyncdns, common
import threading
import sys
import traceback
from socket import *
from configloader import load_config, get_config
class MainThread(threading.Thread):
def __init__(self, params):
super(MainThread, self).__init__()
self.params = params
def run(self):
ServerPool._loop(*self.params)
class ServerPool(object):
instance = None
def __init__(self):
shell.check_python()
self.config = shell.get_config(False)
self.dns_resolver = asyncdns.DNSResolver()
if not self.config.get('dns_ipv6', False):
asyncdns.IPV6_CONNECTION_SUPPORT = False
self.mgr = None #asyncmgr.ServerMgr()
self.tcp_servers_pool = {}
self.tcp_ipv6_servers_pool = {}
self.udp_servers_pool = {}
self.udp_ipv6_servers_pool = {}
self.stat_counter = {}
self.loop = eventloop.EventLoop()
self.thread = MainThread( (self.loop, self.dns_resolver, self.mgr) )
self.thread.start()
@staticmethod
def get_instance():
if ServerPool.instance is None:
ServerPool.instance = ServerPool()
return ServerPool.instance
def stop(self):
self.loop.stop()
@staticmethod
def _loop(loop, dns_resolver, mgr):
try:
if mgr is not None:
mgr.add_to_loop(loop)
dns_resolver.add_to_loop(loop)
loop.run()
except (KeyboardInterrupt, IOError, OSError) as e:
logging.error(e)
traceback.print_exc()
os.exit(0)
except Exception as e:
logging.error(e)
traceback.print_exc()
def server_is_run(self, port):
port = int(port)
ret = 0
if port in self.tcp_servers_pool:
ret = 1
if port in self.tcp_ipv6_servers_pool:
ret |= 2
return ret
def server_run_status(self, port):
if 'server' in self.config:
if port not in self.tcp_servers_pool:
return False
if 'server_ipv6' in self.config:
if port not in self.tcp_ipv6_servers_pool:
return False
return True
def new_server(self, port, user_config):
ret = True
port = int(port)
ipv6_ok = False
if 'server_ipv6' in self.config:
if port in self.tcp_ipv6_servers_pool:
logging.info("server already at %s:%d" % (self.config['server_ipv6'], port))
return 'this port server is already running'
else:
a_config = self.config.copy()
a_config.update(user_config)
if len(a_config['server_ipv6']) > 2 and a_config['server_ipv6'][0] == "[" and a_config['server_ipv6'][-1] == "]":
a_config['server_ipv6'] = a_config['server_ipv6'][1:-1]
a_config['server'] = a_config['server_ipv6']
a_config['server_port'] = port
a_config['max_connect'] = 128
a_config['method'] = common.to_str(a_config['method'])
try:
logging.info("starting server at [%s]:%d" % (common.to_str(a_config['server']), port))
tcp_server = tcprelay.TCPRelay(a_config, self.dns_resolver, False, stat_counter=self.stat_counter)
tcp_server.add_to_loop(self.loop)
self.tcp_ipv6_servers_pool.update({port: tcp_server})
udp_server = udprelay.UDPRelay(a_config, self.dns_resolver, False, stat_counter=self.stat_counter)
udp_server.add_to_loop(self.loop)
self.udp_ipv6_servers_pool.update({port: udp_server})
if common.to_str(a_config['server_ipv6']) == "::":
ipv6_ok = True
except Exception as e:
logging.warn("IPV6 %s " % (e,))
if 'server' in self.config:
if port in self.tcp_servers_pool:
logging.info("server already at %s:%d" % (common.to_str(self.config['server']), port))
return 'this port server is already running'
else:
a_config = self.config.copy()
a_config.update(user_config)
a_config['server_port'] = port
a_config['max_connect'] = 128
a_config['method'] = common.to_str(a_config['method'])
try:
logging.info("starting server at %s:%d" % (common.to_str(a_config['server']), port))
tcp_server = tcprelay.TCPRelay(a_config, self.dns_resolver, False)
tcp_server.add_to_loop(self.loop)
self.tcp_servers_pool.update({port: tcp_server})
udp_server = udprelay.UDPRelay(a_config, self.dns_resolver, False)
udp_server.add_to_loop(self.loop)
self.udp_servers_pool.update({port: udp_server})
except Exception as e:
if not ipv6_ok:
logging.warn("IPV4 %s " % (e,))
return True
def del_server(self, port):
port = int(port)
logging.info("del server at %d" % port)
try:
udpsock = socket(AF_INET, SOCK_DGRAM)
udpsock.sendto('%s:%s:0:0' % (get_config().MANAGE_PASS, port), (get_config().MANAGE_BIND_IP, get_config().MANAGE_PORT))
udpsock.close()
except Exception as e:
logging.warn(e)
return True
def cb_del_server(self, port):
port = int(port)
if port not in self.tcp_servers_pool:
logging.info("stopped server at %s:%d already stop" % (self.config['server'], port))
else:
logging.info("stopped server at %s:%d" % (self.config['server'], port))
try:
self.tcp_servers_pool[port].close(True)
del self.tcp_servers_pool[port]
except Exception as e:
logging.warn(e)
try:
self.udp_servers_pool[port].close(True)
del self.udp_servers_pool[port]
except Exception as e:
logging.warn(e)
if 'server_ipv6' in self.config:
if port not in self.tcp_ipv6_servers_pool:
logging.info("stopped server at [%s]:%d already stop" % (self.config['server_ipv6'], port))
else:
logging.info("stopped server at [%s]:%d" % (self.config['server_ipv6'], port))
try:
self.tcp_ipv6_servers_pool[port].close(True)
del self.tcp_ipv6_servers_pool[port]
except Exception as e:
logging.warn(e)
try:
self.udp_ipv6_servers_pool[port].close(True)
del self.udp_ipv6_servers_pool[port]
except Exception as e:
logging.warn(e)
return True
def update_mu_users(self, port, users):
port = int(port)
if port in self.tcp_servers_pool:
try:
self.tcp_servers_pool[port].update_users(users)
except Exception as e:
logging.warn(e)
try:
self.udp_servers_pool[port].update_users(users)
except Exception as e:
logging.warn(e)
if port in self.tcp_ipv6_servers_pool:
try:
self.tcp_ipv6_servers_pool[port].update_users(users)
except Exception as e:
logging.warn(e)
try:
self.udp_ipv6_servers_pool[port].update_users(users)
except Exception as e:
logging.warn(e)
def get_server_transfer(self, port):
port = int(port)
uid = struct.pack('<I', port)
ret = [0, 0]
if port in self.tcp_servers_pool:
ret[0], ret[1] = self.tcp_servers_pool[port].get_ud()
if port in self.udp_servers_pool:
u, d = self.udp_servers_pool[port].get_ud()
ret[0] += u
ret[1] += d
if port in self.tcp_ipv6_servers_pool:
u, d = self.tcp_ipv6_servers_pool[port].get_ud()
ret[0] += u
ret[1] += d
if port in self.udp_ipv6_servers_pool:
u, d = self.udp_ipv6_servers_pool[port].get_ud()
ret[0] += u
ret[1] += d
return ret
def get_server_mu_transfer(self, server):
return server.get_users_ud()
def update_mu_transfer(self, user_dict, u, d):
for uid in u:
port = struct.unpack('<I', uid)[0]
if port not in user_dict:
user_dict[port] = [0, 0]
user_dict[port][0] += u[uid]
for uid in d:
port = struct.unpack('<I', uid)[0]
if port not in user_dict:
user_dict[port] = [0, 0]
user_dict[port][1] += d[uid]
def get_servers_transfer(self):
servers = self.tcp_servers_pool.copy()
servers.update(self.tcp_ipv6_servers_pool)
servers.update(self.udp_servers_pool)
servers.update(self.udp_ipv6_servers_pool)
ret = {}
for port in servers.keys():
ret[port] = self.get_server_transfer(port)
for port in self.tcp_servers_pool:
u, d = self.get_server_mu_transfer(self.tcp_servers_pool[port])
self.update_mu_transfer(ret, u, d)
for port in self.tcp_ipv6_servers_pool:
u, d = self.get_server_mu_transfer(self.tcp_ipv6_servers_pool[port])
self.update_mu_transfer(ret, u, d)
for port in self.udp_servers_pool:
u, d = self.get_server_mu_transfer(self.udp_servers_pool[port])
self.update_mu_transfer(ret, u, d)
for port in self.udp_ipv6_servers_pool:
u, d = self.get_server_mu_transfer(self.udp_ipv6_servers_pool[port])
self.update_mu_transfer(ret, u, d)
return ret
| [
"#!/usr/bin/env python3\n",
"# -*- coding: utf-8 -*-\n",
"\n",
"# Copyright (c) 2014 clowwindy\n",
"#\n",
"# Permission is hereby granted, free of charge, to any person obtaining a copy\n",
"# of this software and associated documentation files (the \"Software\"), to deal\n",
"# in the Software without restriction, including without limitation the rights\n",
"# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n",
"# copies of the Software, and to permit persons to whom the Software is\n",
"# furnished to do so, subject to the following conditions:\n",
"#\n",
"# The above copyright notice and this permission notice shall be included in\n",
"# all copies or substantial portions of the Software.\n",
"#\n",
"# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n",
"# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n",
"# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n",
"# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n",
"# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n",
"# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n",
"# SOFTWARE.\n",
"\n",
"import os\n",
"import logging\n",
"import struct\n",
"import time\n",
"from shadowsocks import shell, eventloop, tcprelay, udprelay, asyncdns, common\n",
"import threading\n",
"import sys\n",
"import traceback\n",
"from socket import *\n",
"from configloader import load_config, get_config\n",
"\n",
"class MainThread(threading.Thread):\n",
"\tdef __init__(self, params):\n",
"\t\tsuper(MainThread, self).__init__()\n",
"\t\tself.params = params\n",
"\n",
"\tdef run(self):\n",
"\t\tServerPool._loop(*self.params)\n",
"\n",
"class ServerPool(object):\n",
"\n",
"\tinstance = None\n",
"\n",
"\tdef __init__(self):\n",
"\t\tshell.check_python()\n",
"\t\tself.config = shell.get_config(False)\n",
"\t\tself.dns_resolver = asyncdns.DNSResolver()\n",
"\t\tif not self.config.get('dns_ipv6', False):\n",
"\t\t\tasyncdns.IPV6_CONNECTION_SUPPORT = False\n",
"\n",
"\t\tself.mgr = None #asyncmgr.ServerMgr()\n",
"\n",
"\t\tself.tcp_servers_pool = {}\n",
"\t\tself.tcp_ipv6_servers_pool = {}\n",
"\t\tself.udp_servers_pool = {}\n",
"\t\tself.udp_ipv6_servers_pool = {}\n",
"\t\tself.stat_counter = {}\n",
"\n",
"\t\tself.loop = eventloop.EventLoop()\n",
"\t\tself.thread = MainThread( (self.loop, self.dns_resolver, self.mgr) )\n",
"\t\tself.thread.start()\n",
"\n",
"\t@staticmethod\n",
"\tdef get_instance():\n",
"\t\tif ServerPool.instance is None:\n",
"\t\t\tServerPool.instance = ServerPool()\n",
"\t\treturn ServerPool.instance\n",
"\n",
"\tdef stop(self):\n",
"\t\tself.loop.stop()\n",
"\n",
"\t@staticmethod\n",
"\tdef _loop(loop, dns_resolver, mgr):\n",
"\t\ttry:\n",
"\t\t\tif mgr is not None:\n",
"\t\t\t\tmgr.add_to_loop(loop)\n",
"\t\t\tdns_resolver.add_to_loop(loop)\n",
"\t\t\tloop.run()\n",
"\t\texcept (KeyboardInterrupt, IOError, OSError) as e:\n",
"\t\t\tlogging.error(e)\n",
"\t\t\ttraceback.print_exc()\n",
"\t\t\tos.exit(0)\n",
"\t\texcept Exception as e:\n",
"\t\t\tlogging.error(e)\n",
"\t\t\ttraceback.print_exc()\n",
"\n",
"\tdef server_is_run(self, port):\n",
"\t\tport = int(port)\n",
"\t\tret = 0\n",
"\t\tif port in self.tcp_servers_pool:\n",
"\t\t\tret = 1\n",
"\t\tif port in self.tcp_ipv6_servers_pool:\n",
"\t\t\tret |= 2\n",
"\t\treturn ret\n",
"\n",
"\tdef server_run_status(self, port):\n",
"\t\tif 'server' in self.config:\n",
"\t\t\tif port not in self.tcp_servers_pool:\n",
"\t\t\t\treturn False\n",
"\t\tif 'server_ipv6' in self.config:\n",
"\t\t\tif port not in self.tcp_ipv6_servers_pool:\n",
"\t\t\t\treturn False\n",
"\t\treturn True\n",
"\n",
"\tdef new_server(self, port, user_config):\n",
"\t\tret = True\n",
"\t\tport = int(port)\n",
"\t\tipv6_ok = False\n",
"\n",
"\t\tif 'server_ipv6' in self.config:\n",
"\t\t\tif port in self.tcp_ipv6_servers_pool:\n",
"\t\t\t\tlogging.info(\"server already at %s:%d\" % (self.config['server_ipv6'], port))\n",
"\t\t\t\treturn 'this port server is already running'\n",
"\t\t\telse:\n",
"\t\t\t\ta_config = self.config.copy()\n",
"\t\t\t\ta_config.update(user_config)\n",
"\t\t\t\tif len(a_config['server_ipv6']) > 2 and a_config['server_ipv6'][0] == \"[\" and a_config['server_ipv6'][-1] == \"]\":\n",
"\t\t\t\t\ta_config['server_ipv6'] = a_config['server_ipv6'][1:-1]\n",
"\t\t\t\ta_config['server'] = a_config['server_ipv6']\n",
"\t\t\t\ta_config['server_port'] = port\n",
"\t\t\t\ta_config['max_connect'] = 128\n",
"\t\t\t\ta_config['method'] = common.to_str(a_config['method'])\n",
"\t\t\t\ttry:\n",
"\t\t\t\t\tlogging.info(\"starting server at [%s]:%d\" % (common.to_str(a_config['server']), port))\n",
"\n",
"\t\t\t\t\ttcp_server = tcprelay.TCPRelay(a_config, self.dns_resolver, False, stat_counter=self.stat_counter)\n",
"\t\t\t\t\ttcp_server.add_to_loop(self.loop)\n",
"\t\t\t\t\tself.tcp_ipv6_servers_pool.update({port: tcp_server})\n",
"\n",
"\t\t\t\t\tudp_server = udprelay.UDPRelay(a_config, self.dns_resolver, False, stat_counter=self.stat_counter)\n",
"\t\t\t\t\tudp_server.add_to_loop(self.loop)\n",
"\t\t\t\t\tself.udp_ipv6_servers_pool.update({port: udp_server})\n",
"\n",
"\t\t\t\t\tif common.to_str(a_config['server_ipv6']) == \"::\":\n",
"\t\t\t\t\t\tipv6_ok = True\n",
"\t\t\t\texcept Exception as e:\n",
"\t\t\t\t\tlogging.warn(\"IPV6 %s \" % (e,))\n",
"\n",
"\t\tif 'server' in self.config:\n",
"\t\t\tif port in self.tcp_servers_pool:\n",
"\t\t\t\tlogging.info(\"server already at %s:%d\" % (common.to_str(self.config['server']), port))\n",
"\t\t\t\treturn 'this port server is already running'\n",
"\t\t\telse:\n",
"\t\t\t\ta_config = self.config.copy()\n",
"\t\t\t\ta_config.update(user_config)\n",
"\t\t\t\ta_config['server_port'] = port\n",
"\t\t\t\ta_config['max_connect'] = 128\n",
"\t\t\t\ta_config['method'] = common.to_str(a_config['method'])\n",
"\t\t\t\ttry:\n",
"\t\t\t\t\tlogging.info(\"starting server at %s:%d\" % (common.to_str(a_config['server']), port))\n",
"\n",
"\t\t\t\t\ttcp_server = tcprelay.TCPRelay(a_config, self.dns_resolver, False)\n",
"\t\t\t\t\ttcp_server.add_to_loop(self.loop)\n",
"\t\t\t\t\tself.tcp_servers_pool.update({port: tcp_server})\n",
"\n",
"\t\t\t\t\tudp_server = udprelay.UDPRelay(a_config, self.dns_resolver, False)\n",
"\t\t\t\t\tudp_server.add_to_loop(self.loop)\n",
"\t\t\t\t\tself.udp_servers_pool.update({port: udp_server})\n",
"\n",
"\t\t\t\texcept Exception as e:\n",
"\t\t\t\t\tif not ipv6_ok:\n",
"\t\t\t\t\t\tlogging.warn(\"IPV4 %s \" % (e,))\n",
"\n",
"\t\treturn True\n",
"\n",
"\tdef del_server(self, port):\n",
"\t\tport = int(port)\n",
"\t\tlogging.info(\"del server at %d\" % port)\n",
"\t\ttry:\n",
"\t\t\tudpsock = socket(AF_INET, SOCK_DGRAM)\n",
"\t\t\tudpsock.sendto('%s:%s:0:0' % (get_config().MANAGE_PASS, port), (get_config().MANAGE_BIND_IP, get_config().MANAGE_PORT))\n",
"\t\t\tudpsock.close()\n",
"\t\texcept Exception as e:\n",
"\t\t\tlogging.warn(e)\n",
"\t\treturn True\n",
"\n",
"\tdef cb_del_server(self, port):\n",
"\t\tport = int(port)\n",
"\n",
"\t\tif port not in self.tcp_servers_pool:\n",
"\t\t\tlogging.info(\"stopped server at %s:%d already stop\" % (self.config['server'], port))\n",
"\t\telse:\n",
"\t\t\tlogging.info(\"stopped server at %s:%d\" % (self.config['server'], port))\n",
"\t\t\ttry:\n",
"\t\t\t\tself.tcp_servers_pool[port].close(True)\n",
"\t\t\t\tdel self.tcp_servers_pool[port]\n",
"\t\t\texcept Exception as e:\n",
"\t\t\t\tlogging.warn(e)\n",
"\t\t\ttry:\n",
"\t\t\t\tself.udp_servers_pool[port].close(True)\n",
"\t\t\t\tdel self.udp_servers_pool[port]\n",
"\t\t\texcept Exception as e:\n",
"\t\t\t\tlogging.warn(e)\n",
"\n",
"\t\tif 'server_ipv6' in self.config:\n",
"\t\t\tif port not in self.tcp_ipv6_servers_pool:\n",
"\t\t\t\tlogging.info(\"stopped server at [%s]:%d already stop\" % (self.config['server_ipv6'], port))\n",
"\t\t\telse:\n",
"\t\t\t\tlogging.info(\"stopped server at [%s]:%d\" % (self.config['server_ipv6'], port))\n",
"\t\t\t\ttry:\n",
"\t\t\t\t\tself.tcp_ipv6_servers_pool[port].close(True)\n",
"\t\t\t\t\tdel self.tcp_ipv6_servers_pool[port]\n",
"\t\t\t\texcept Exception as e:\n",
"\t\t\t\t\tlogging.warn(e)\n",
"\t\t\t\ttry:\n",
"\t\t\t\t\tself.udp_ipv6_servers_pool[port].close(True)\n",
"\t\t\t\t\tdel self.udp_ipv6_servers_pool[port]\n",
"\t\t\t\texcept Exception as e:\n",
"\t\t\t\t\tlogging.warn(e)\n",
"\n",
"\t\treturn True\n",
"\n",
"\tdef update_mu_users(self, port, users):\n",
"\t\tport = int(port)\n",
"\t\tif port in self.tcp_servers_pool:\n",
"\t\t\ttry:\n",
"\t\t\t\tself.tcp_servers_pool[port].update_users(users)\n",
"\t\t\texcept Exception as e:\n",
"\t\t\t\tlogging.warn(e)\n",
"\t\t\ttry:\n",
"\t\t\t\tself.udp_servers_pool[port].update_users(users)\n",
"\t\t\texcept Exception as e:\n",
"\t\t\t\tlogging.warn(e)\n",
"\t\tif port in self.tcp_ipv6_servers_pool:\n",
"\t\t\ttry:\n",
"\t\t\t\tself.tcp_ipv6_servers_pool[port].update_users(users)\n",
"\t\t\texcept Exception as e:\n",
"\t\t\t\tlogging.warn(e)\n",
"\t\t\ttry:\n",
"\t\t\t\tself.udp_ipv6_servers_pool[port].update_users(users)\n",
"\t\t\texcept Exception as e:\n",
"\t\t\t\tlogging.warn(e)\n",
"\n",
"\tdef get_server_transfer(self, port):\n",
"\t\tport = int(port)\n",
"\t\tuid = struct.pack('<I', port)\n",
"\t\tret = [0, 0]\n",
"\t\tif port in self.tcp_servers_pool:\n",
"\t\t\tret[0], ret[1] = self.tcp_servers_pool[port].get_ud()\n",
"\t\tif port in self.udp_servers_pool:\n",
"\t\t\tu, d = self.udp_servers_pool[port].get_ud()\n",
"\t\t\tret[0] += u\n",
"\t\t\tret[1] += d\n",
"\t\tif port in self.tcp_ipv6_servers_pool:\n",
"\t\t\tu, d = self.tcp_ipv6_servers_pool[port].get_ud()\n",
"\t\t\tret[0] += u\n",
"\t\t\tret[1] += d\n",
"\t\tif port in self.udp_ipv6_servers_pool:\n",
"\t\t\tu, d = self.udp_ipv6_servers_pool[port].get_ud()\n",
"\t\t\tret[0] += u\n",
"\t\t\tret[1] += d\n",
"\t\treturn ret\n",
"\n",
"\tdef get_server_mu_transfer(self, server):\n",
"\t\treturn server.get_users_ud()\n",
"\n",
"\tdef update_mu_transfer(self, user_dict, u, d):\n",
"\t\tfor uid in u:\n",
"\t\t\tport = struct.unpack('<I', uid)[0]\n",
"\t\t\tif port not in user_dict:\n",
"\t\t\t\tuser_dict[port] = [0, 0]\n",
"\t\t\tuser_dict[port][0] += u[uid]\n",
"\t\tfor uid in d:\n",
"\t\t\tport = struct.unpack('<I', uid)[0]\n",
"\t\t\tif port not in user_dict:\n",
"\t\t\t\tuser_dict[port] = [0, 0]\n",
"\t\t\tuser_dict[port][1] += d[uid]\n",
"\n",
"\tdef get_servers_transfer(self):\n",
"\t\tservers = self.tcp_servers_pool.copy()\n",
"\t\tservers.update(self.tcp_ipv6_servers_pool)\n",
"\t\tservers.update(self.udp_servers_pool)\n",
"\t\tservers.update(self.udp_ipv6_servers_pool)\n",
"\t\tret = {}\n",
"\t\tfor port in servers.keys():\n",
"\t\t\tret[port] = self.get_server_transfer(port)\n",
"\t\tfor port in self.tcp_servers_pool:\n",
"\t\t\tu, d = self.get_server_mu_transfer(self.tcp_servers_pool[port])\n",
"\t\t\tself.update_mu_transfer(ret, u, d)\n",
"\t\tfor port in self.tcp_ipv6_servers_pool:\n",
"\t\t\tu, d = self.get_server_mu_transfer(self.tcp_ipv6_servers_pool[port])\n",
"\t\t\tself.update_mu_transfer(ret, u, d)\n",
"\t\tfor port in self.udp_servers_pool:\n",
"\t\t\tu, d = self.get_server_mu_transfer(self.udp_servers_pool[port])\n",
"\t\t\tself.update_mu_transfer(ret, u, d)\n",
"\t\tfor port in self.udp_ipv6_servers_pool:\n",
"\t\t\tu, d = self.get_server_mu_transfer(self.udp_ipv6_servers_pool[port])\n",
"\t\t\tself.update_mu_transfer(ret, u, d)\n",
"\t\treturn ret\n",
"\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.027777777777777776,
0.034482758620689655,
0.02702702702702703,
0.043478260869565216,
0,
0.0625,
0.030303030303030304,
0,
0.038461538461538464,
0,
0.058823529411764705,
0,
0.047619047619047616,
0.043478260869565216,
0.025,
0.022222222222222223,
0.022222222222222223,
0.022727272727272728,
0,
0.075,
0,
0.034482758620689655,
0.029411764705882353,
0.034482758620689655,
0.029411764705882353,
0.04,
0,
0.027777777777777776,
0.04225352112676056,
0.045454545454545456,
0,
0.06666666666666667,
0.047619047619047616,
0.029411764705882353,
0.02631578947368421,
0.034482758620689655,
0,
0.058823529411764705,
0.05263157894736842,
0,
0.06666666666666667,
0.02702702702702703,
0.14285714285714285,
0.043478260869565216,
0.038461538461538464,
0.029411764705882353,
0.07142857142857142,
0.018867924528301886,
0.05,
0.04,
0.07142857142857142,
0.04,
0.05,
0.04,
0,
0.03125,
0.05263157894736842,
0.1,
0.027777777777777776,
0.09090909090909091,
0.024390243902439025,
0.08333333333333333,
0.07692307692307693,
0,
0.027777777777777776,
0.03333333333333333,
0.024390243902439025,
0.058823529411764705,
0.02857142857142857,
0.021739130434782608,
0.058823529411764705,
0.07142857142857142,
0,
0.023809523809523808,
0.07692307692307693,
0.05263157894736842,
0.05555555555555555,
0,
0.02857142857142857,
0.023809523809523808,
0.024691358024691357,
0.02040816326530612,
0.1111111111111111,
0.029411764705882353,
0.030303030303030304,
0.01694915254237288,
0.01639344262295082,
0.02040816326530612,
0.02857142857142857,
0.029411764705882353,
0.01694915254237288,
0.1111111111111111,
0.021739130434782608,
0,
0.019230769230769232,
0.02564102564102564,
0.01694915254237288,
0,
0.019230769230769232,
0.02564102564102564,
0.01694915254237288,
0,
0.017857142857142856,
0.047619047619047616,
0.037037037037037035,
0.02702702702702703,
0,
0.03333333333333333,
0.02702702702702703,
0.02197802197802198,
0.02040816326530612,
0.1111111111111111,
0.029411764705882353,
0.030303030303030304,
0.02857142857142857,
0.029411764705882353,
0.01694915254237288,
0.1111111111111111,
0.022222222222222223,
0,
0.013888888888888888,
0.02564102564102564,
0.018518518518518517,
0,
0.013888888888888888,
0.02564102564102564,
0.018518518518518517,
0,
0.037037037037037035,
0.047619047619047616,
0.02631578947368421,
0,
0.07142857142857142,
0,
0.034482758620689655,
0.05263157894736842,
0.023809523809523808,
0.14285714285714285,
0.024390243902439025,
0.016260162601626018,
0.05263157894736842,
0.04,
0.05263157894736842,
0.07142857142857142,
0,
0.03125,
0.05263157894736842,
0,
0.025,
0.022727272727272728,
0.125,
0.013333333333333334,
0.125,
0.022727272727272728,
0.027777777777777776,
0.038461538461538464,
0.05,
0.125,
0.022727272727272728,
0.027777777777777776,
0.038461538461538464,
0.05,
0,
0.02857142857142857,
0.021739130434782608,
0.020833333333333332,
0.1111111111111111,
0.024096385542168676,
0.1111111111111111,
0.02,
0.023809523809523808,
0.037037037037037035,
0.047619047619047616,
0.1111111111111111,
0.02,
0.023809523809523808,
0.037037037037037035,
0.047619047619047616,
0,
0.07142857142857142,
0,
0.024390243902439025,
0.05263157894736842,
0.027777777777777776,
0.125,
0.019230769230769232,
0.038461538461538464,
0.05,
0.125,
0.019230769230769232,
0.038461538461538464,
0.05,
0.024390243902439025,
0.125,
0.017543859649122806,
0.038461538461538464,
0.05,
0.125,
0.017543859649122806,
0.038461538461538464,
0.05,
0,
0.02631578947368421,
0.05263157894736842,
0.03125,
0.06666666666666667,
0.027777777777777776,
0.017543859649122806,
0.027777777777777776,
0.02127659574468085,
0.06666666666666667,
0.06666666666666667,
0.024390243902439025,
0.019230769230769232,
0.06666666666666667,
0.06666666666666667,
0.024390243902439025,
0.019230769230769232,
0.06666666666666667,
0.06666666666666667,
0.07692307692307693,
0,
0.023255813953488372,
0.03225806451612903,
0,
0.020833333333333332,
0.0625,
0.02631578947368421,
0.034482758620689655,
0.034482758620689655,
0.03125,
0.0625,
0.02631578947368421,
0.034482758620689655,
0.034482758620689655,
0.03125,
0,
0.030303030303030304,
0.024390243902439025,
0.022222222222222223,
0.025,
0.022222222222222223,
0.09090909090909091,
0.03333333333333333,
0.021739130434782608,
0.02702702702702703,
0.014925373134328358,
0.02631578947368421,
0.023809523809523808,
0.013888888888888888,
0.02631578947368421,
0.02702702702702703,
0.014925373134328358,
0.02631578947368421,
0.023809523809523808,
0.013888888888888888,
0.02631578947368421,
0.07692307692307693,
1
] | 293 | 0.035899 | false |
# -*- coding: utf_8 -*-
from pyspec import *
from stack import *
class StackBehavior(object):
"""スタックの動作仕様."""
@context(group=0)
def New_stack(self):
"""新しいスタック."""
self.stack = Stack()
@spec(group=0)
def should_empty(self):
"""空でなければならない."""
About(self.stack).should_be_empty()
@spec(group=0)
def should_not_be_empty_after_push(self):
"""push()後は空ではない"""
self.stack.push(37)
About(self.stack).should_not_be_empty()
@context(group=1)
def A_stack_with_one_item(self):
"""要素をひとつ持つスタック."""
self.stack = Stack()
self.stack.push("one item")
@spec(group=1)
def should_return_top_when_top_method_called(self):
"""top()メソッドを呼ぶと、先頭の要素を返す."""
About(self.stack.top()).should_equal("one item")
@spec(group=1)
def should_not_be_empty(self):
"""空ではない."""
print "コンソールに出した文字列が表示されます"
About(self.stack).should_not_be_empty()
if __name__ == "__main__":
run_test()
| [
"# -*- coding: utf_8 -*-\n",
"\n",
"from pyspec import *\n",
"from stack import *\n",
"\n",
"class StackBehavior(object):\n",
"\t\"\"\"スタックの動作仕様.\"\"\"\n",
"\t@context(group=0)\n",
"\tdef New_stack(self):\n",
"\t\t\"\"\"新しいスタック.\"\"\"\n",
"\t\tself.stack = Stack()\n",
"\n",
"\t@spec(group=0)\n",
"\tdef should_empty(self):\n",
"\t\t\"\"\"空でなければならない.\"\"\"\n",
"\t\tAbout(self.stack).should_be_empty()\n",
"\n",
"\t@spec(group=0)\n",
"\tdef should_not_be_empty_after_push(self):\n",
"\t\t\"\"\"push()後は空ではない\"\"\"\n",
"\t\tself.stack.push(37)\n",
"\t\tAbout(self.stack).should_not_be_empty()\n",
"\n",
"\t@context(group=1)\n",
"\tdef A_stack_with_one_item(self):\n",
"\t\t\"\"\"要素をひとつ持つスタック.\"\"\"\n",
"\t\tself.stack = Stack()\n",
"\t\tself.stack.push(\"one item\")\n",
"\n",
"\t@spec(group=1)\n",
"\tdef should_return_top_when_top_method_called(self):\n",
"\t\t\"\"\"top()メソッドを呼ぶと、先頭の要素を返す.\"\"\"\n",
"\t\tAbout(self.stack.top()).should_equal(\"one item\")\n",
"\n",
"\t@spec(group=1)\n",
"\tdef should_not_be_empty(self):\n",
"\t\t\"\"\"空ではない.\"\"\"\n",
"\t\tprint \"コンソールに出した文字列が表示されます\"\n",
"\t\tAbout(self.stack).should_not_be_empty()\n",
"\n",
"if __name__ == \"__main__\":\n",
" run_test()\n"
] | [
0,
0,
0,
0,
0,
0.034482758620689655,
0.05555555555555555,
0.05263157894736842,
0.045454545454545456,
0.058823529411764705,
0.043478260869565216,
0,
0.0625,
0.04,
0.05,
0.02631578947368421,
0,
0.0625,
0.023255813953488372,
0.045454545454545456,
0.045454545454545456,
0.023809523809523808,
0,
0.05263157894736842,
0.029411764705882353,
0.045454545454545456,
0.043478260869565216,
0.03333333333333333,
0,
0.0625,
0.018867924528301886,
0.03125,
0.0196078431372549,
0,
0.0625,
0.03125,
0.06666666666666667,
0.03333333333333333,
0.023809523809523808,
0,
0.037037037037037035,
0.06666666666666667
] | 42 | 0.031607 | false |
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from clr import AddReference
AddReference("System.Core")
AddReference("QuantConnect.Common")
AddReference("QuantConnect.Algorithm")
from System import *
from QuantConnect import *
from QuantConnect.Algorithm import QCAlgorithm
from QuantConnect.Data.UniverseSelection import *
from datetime import datetime
### <summary>
### Demonstration of how to define a universe as a combination of use the coarse fundamental data and fine fundamental data
### </summary>
### <meta name="tag" content="using data" />
### <meta name="tag" content="universes" />
### <meta name="tag" content="coarse universes" />
### <meta name="tag" content="regression test" />
class CoarseFineFundamentalRegressionAlgorithm(QCAlgorithm):
def Initialize(self):
self.SetStartDate(2014,4,1) #Set Start Date
self.SetEndDate(2014,4,30) #Set End Date
self.SetCash(50000) #Set Strategy Cash
self.UniverseSettings.Resolution = Resolution.Daily
# this add universe method accepts two parameters:
# - coarse selection function: accepts an IEnumerable<CoarseFundamental> and returns an IEnumerable<Symbol>
# - fine selection function: accepts an IEnumerable<FineFundamental> and returns an IEnumerable<Symbol>
self.AddUniverse(self.CoarseSelectionFunction, self.FineSelectionFunction)
self.changes = None
self.numberOfSymbolsFine = 2
# return a list of three fixed symbol objects
def CoarseSelectionFunction(self, coarse):
tickers = [ "GOOG", "BAC", "SPY" ]
if self.Time < datetime(2014, 4, 5):
tickers = [ "AAPL", "AIG", "IBM" ]
return [ Symbol.Create(x, SecurityType.Equity, Market.USA) for x in tickers ]
# sort the data by P/E ratio and take the top 'NumberOfSymbolsFine'
def FineSelectionFunction(self, fine):
# sort descending by P/E ratio
sortedByPeRatio = sorted(fine, key=lambda x: x.ValuationRatios.PERatio, reverse=True)
# take the top entries from our sorted collection
return [ x.Symbol for x in sortedByPeRatio[:self.numberOfSymbolsFine] ]
def OnData(self, data):
# if we have no changes, do nothing
if self.changes == None: return
# liquidate removed securities
for security in self.changes.RemovedSecurities:
if security.Invested:
self.Liquidate(security.Symbol)
self.Debug("Liquidated Stock: " + str(security.Symbol.Value))
# we want 50% allocation in each security in our universe
for security in self.changes.AddedSecurities:
self.SetHoldings(security.Symbol, 0.5)
self.Debug("Purchased Stock: " + str(security.Symbol.Value))
self.changes = None
# this event fires whenever we have changes to our universe
def OnSecuritiesChanged(self, changes):
self.changes = changes | [
"# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.\n",
"# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.\n",
"#\n",
"# Licensed under the Apache License, Version 2.0 (the \"License\");\n",
"# you may not use this file except in compliance with the License.\n",
"# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0\n",
"#\n",
"# Unless required by applicable law or agreed to in writing, software\n",
"# distributed under the License is distributed on an \"AS IS\" BASIS,\n",
"# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
"# See the License for the specific language governing permissions and\n",
"# limitations under the License.\n",
"\n",
"from clr import AddReference\n",
"AddReference(\"System.Core\")\n",
"AddReference(\"QuantConnect.Common\")\n",
"AddReference(\"QuantConnect.Algorithm\")\n",
"\n",
"from System import *\n",
"from QuantConnect import *\n",
"from QuantConnect.Algorithm import QCAlgorithm\n",
"from QuantConnect.Data.UniverseSelection import *\n",
"from datetime import datetime\n",
"\n",
"### <summary>\n",
"### Demonstration of how to define a universe as a combination of use the coarse fundamental data and fine fundamental data\n",
"### </summary>\n",
"### <meta name=\"tag\" content=\"using data\" />\n",
"### <meta name=\"tag\" content=\"universes\" />\n",
"### <meta name=\"tag\" content=\"coarse universes\" />\n",
"### <meta name=\"tag\" content=\"regression test\" />\n",
"class CoarseFineFundamentalRegressionAlgorithm(QCAlgorithm):\n",
"\n",
" def Initialize(self):\n",
" self.SetStartDate(2014,4,1) #Set Start Date\n",
" self.SetEndDate(2014,4,30) #Set End Date\n",
" self.SetCash(50000) #Set Strategy Cash\n",
"\n",
" self.UniverseSettings.Resolution = Resolution.Daily\n",
"\n",
" # this add universe method accepts two parameters:\n",
" # - coarse selection function: accepts an IEnumerable<CoarseFundamental> and returns an IEnumerable<Symbol>\n",
" # - fine selection function: accepts an IEnumerable<FineFundamental> and returns an IEnumerable<Symbol>\n",
" self.AddUniverse(self.CoarseSelectionFunction, self.FineSelectionFunction)\n",
"\n",
" self.changes = None\n",
" self.numberOfSymbolsFine = 2\n",
"\n",
" # return a list of three fixed symbol objects\n",
" def CoarseSelectionFunction(self, coarse): \n",
" tickers = [ \"GOOG\", \"BAC\", \"SPY\" ]\n",
"\n",
" if self.Time < datetime(2014, 4, 5):\n",
" tickers = [ \"AAPL\", \"AIG\", \"IBM\" ]\n",
" \n",
" return [ Symbol.Create(x, SecurityType.Equity, Market.USA) for x in tickers ]\n",
" \n",
"\n",
" # sort the data by P/E ratio and take the top 'NumberOfSymbolsFine'\n",
" def FineSelectionFunction(self, fine):\n",
" # sort descending by P/E ratio\n",
" sortedByPeRatio = sorted(fine, key=lambda x: x.ValuationRatios.PERatio, reverse=True)\n",
"\n",
" # take the top entries from our sorted collection\n",
" return [ x.Symbol for x in sortedByPeRatio[:self.numberOfSymbolsFine] ]\n",
"\n",
" def OnData(self, data):\n",
" # if we have no changes, do nothing\n",
" if self.changes == None: return\n",
"\n",
" # liquidate removed securities\n",
" for security in self.changes.RemovedSecurities:\n",
" if security.Invested:\n",
" self.Liquidate(security.Symbol)\n",
" self.Debug(\"Liquidated Stock: \" + str(security.Symbol.Value))\n",
"\n",
" # we want 50% allocation in each security in our universe\n",
" for security in self.changes.AddedSecurities:\n",
" self.SetHoldings(security.Symbol, 0.5)\n",
" self.Debug(\"Purchased Stock: \" + str(security.Symbol.Value))\n",
"\n",
" self.changes = None\n",
"\n",
"\n",
" # this event fires whenever we have changes to our universe\n",
" def OnSecuritiesChanged(self, changes):\n",
" self.changes = changes"
] | [
0,
0.012345679012345678,
0,
0,
0,
0.011764705882352941,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.047619047619047616,
0.037037037037037035,
0.02127659574468085,
0.02,
0.03333333333333333,
0,
0.07142857142857142,
0.016129032258064516,
0.06666666666666667,
0.022222222222222223,
0.022727272727272728,
0.0196078431372549,
0.02,
0.01639344262295082,
0,
0,
0.05454545454545454,
0.05660377358490566,
0.017241379310344827,
0,
0,
0,
0,
0.008620689655172414,
0.008928571428571428,
0.012048192771084338,
0,
0,
0,
0,
0,
0.01818181818181818,
0.046511627906976744,
0,
0,
0.0425531914893617,
0.1111111111111111,
0.03488372093023256,
0.1111111111111111,
0,
0.013888888888888888,
0.023255813953488372,
0,
0.010638297872340425,
0,
0,
0.025,
0,
0,
0,
0.05,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.015625,
0.022727272727272728,
0.03333333333333333
] | 87 | 0.01328 | false |
# -*- coding: utf-8 -*-
__author__ = "Konstantin Klementiev", "Roman Chernikov"
__date__ = "20 Sep 2016"
import os
import sys
#import pickle
import numpy as np
from scipy import optimize
from scipy import special
import inspect
from .. import raycing
from . import myopencl as mcl
from .sources_beams import Beam, allArguments
from .physconsts import E0, C, M0, EV2ERG, K2B, SIE0,\
SIM0, FINE_STR, PI, PI2, SQ2, SQ3, SQPI, E2W, CHeVcm, CH, CHBAR
try:
import pyopencl as cl # analysis:ignore
isOpenCL = True
os.environ['PYOPENCL_COMPILER_OUTPUT'] = '1'
except ImportError:
isOpenCL = False
# _DEBUG replaced with raycing._VERBOSITY_
class BendingMagnet(object):
u"""
Bending magnet source. The computation is reasonably fast and thus a GPU
is not required and is not implemented.
"""
def __init__(self, bl=None, name='BM', center=(0, 0, 0),
nrays=raycing.nrays,
eE=3.0, eI=0.5, eEspread=0., eSigmaX=None, eSigmaZ=None,
eEpsilonX=1., eEpsilonZ=0.01, betaX=9., betaZ=2.,
B0=1., rho=None, filamentBeam=False, uniformRayDensity=False,
eMin=5000., eMax=15000., eN=51, distE='eV',
xPrimeMax=0.5, zPrimeMax=0.5, nx=25, nz=25, pitch=0, yaw=0):
u"""
*bl*: instance of :class:`~xrt.backends.raycing.BeamLine`
Container for beamline elements. Sourcess are added to its
`sources` list.
*name*: str
User-specified name, can be used for diagnostics output.
*center*: tuple of 3 floats
3D point in global system.
*nrays*: int
The number of rays sampled in one iteration.
*eE*: float
Electron beam energy (GeV).
*eI*: float
Electron beam current (A).
*eEspread*: float
Energy spread relative to the beam energy.
*eSigmaX*, *eSigmaZ*: float
rms horizontal and vertical electron beam sizes (µm).
Alternatively, betatron functions can be specified instead of the
electron beam sizes.
*eEpsilonX*, *eEpsilonZ*: float
Horizontal and vertical electron beam emittance (nm rad).
*betaX*, *betaZ*: float
Betatron function (m). Alternatively, beam size can be specified.
*B0*: float
Magnetic field (T). Alternatively, specify *rho*.
*rho*: float
Curvature radius (m). Alternatively, specify *B0*.
*eMin*, *eMax*: float
Minimum and maximum photon energy (eV).
*eN*: int
Number of photon energy intervals, used only in the test suit,
not required in ray tracing
*distE*: 'eV' or 'BW'
The resulted flux density is per 1 eV or 0.1% bandwidth. For ray
tracing 'eV' is used.
*xPrimeMax*, *zPrimeMax*: float
Horizontal and vertical acceptance (mrad).
*nx*, *nz*: int
Number of intervals in the horizontal and vertical directions,
used only in the test suit, not required in ray tracing.
*filamentBeam*: bool
If True the source generates coherent monochromatic wavefronts.
Required for the wave propagation calculations.
*pitch*, *yaw*: float
rotation angles around x and z axis. Useful for canted sources and
declined electron beams.
"""
self.Ee = eE
self.gamma = self.Ee * 1e9 * EV2ERG / (M0 * C**2)
if isinstance(self, Wiggler):
self.B = K2B * self.K / self.L0
self.ro = M0 * C**2 * self.gamma / self.B / E0 / 1e6
self.X0 = 0.5 * self.K * self.L0 / self.gamma / PI
self.isMPW = True
else:
self.Np = 0.5
self.B = B0
self.ro = rho
if self.ro:
if not self.B:
self.B = M0 * C**2 * self.gamma / self.ro / E0 / 1e6
elif self.B:
self.ro = M0 * C**2 * self.gamma / self.B / E0 / 1e6
self.isMPW = False
self.bl = bl
if bl is not None:
if self not in bl.sources:
bl.sources.append(self)
self.ordinalNum = len(bl.sources)
raycing.set_name(self, name)
# if name in [None, 'None', '']:
# self.name = '{0}{1}'.format(self.__class__.__name__,
# self.ordinalNum)
# else:
# self.name = name
self.center = center # 3D point in global system
self.nrays = np.long(nrays)
self.dx = eSigmaX * 1e-3 if eSigmaX else None
self.dz = eSigmaZ * 1e-3 if eSigmaZ else None
self.eEpsilonX = eEpsilonX
self.eEpsilonZ = eEpsilonZ
self.I0 = eI
self.eEspread = eEspread
self.eMin = float(eMin)
self.eMax = float(eMax)
if bl is not None:
if self.bl.flowSource != 'Qook':
bl.oesDict[self.name] = [self, 0]
xPrimeMax = raycing.auto_units_angle(xPrimeMax) * 1e3 if\
isinstance(xPrimeMax, raycing.basestring) else xPrimeMax
zPrimeMax = raycing.auto_units_angle(zPrimeMax) * 1e3 if\
isinstance(zPrimeMax, raycing.basestring) else zPrimeMax
self.xPrimeMax = xPrimeMax * 1e-3 if xPrimeMax else None
self.zPrimeMax = zPrimeMax * 1e-3 if zPrimeMax else None
self.betaX = betaX
self.betaZ = betaZ
self.eN = eN + 1
self.nx = 2*nx + 1
self.nz = 2*nz + 1
self.xs = np.linspace(-self.xPrimeMax, self.xPrimeMax, self.nx)
self.zs = np.linspace(-self.zPrimeMax, self.zPrimeMax, self.nz)
self.energies = np.linspace(eMin, eMax, self.eN)
self.distE = distE
self.mode = 1
self.uniformRayDensity = uniformRayDensity
self.filamentBeam = filamentBeam
self.pitch = raycing.auto_units_angle(pitch)
self.yaw = raycing.auto_units_angle(yaw)
if (self.dx is None) and (self.betaX is not None):
self.dx = np.sqrt(self.eEpsilonX * self.betaX * 0.001)
elif (self.dx is None) and (self.betaX is None):
print("Set either eSigmaX or betaX!")
if (self.dz is None) and (self.betaZ is not None):
self.dz = np.sqrt(self.eEpsilonZ * self.betaZ * 0.001)
elif (self.dz is None) and (self.betaZ is None):
print("Set either eSigmaZ or betaZ!")
dxprime, dzprime = None, None
if dxprime:
self.dxprime = dxprime
else:
self.dxprime = 1e-6 * self.eEpsilonX /\
self.dx if self.dx > 0 else 0. # [rad]
if dzprime:
self.dzprime = dzprime
else:
self.dzprime = 1e-6 * self.eEpsilonZ /\
self.dz if self.dx > 0 else 0. # [rad]
self.gamma2 = self.gamma**2
"""" K2B: Conversion of Deflection parameter to magnetic field [T]
for the period in [mm]"""
# self.c_E = 0.0075 * HPLANCK * C * self.gamma**3 / PI / EV2ERG
# self.c_3 = 40. * PI * E0 * EV2ERG * self.I0 /\
# (np.sqrt(3) * HPLANCK * HPLANCK * C * self.gamma2) * \
# 200. * EV2ERG / (np.sqrt(3) * HPLANCK * C * self.gamma2)
mE = self.eN
mTheta = self.nx
mPsi = self.nz
if self.isMPW: # xPrimeMaxAutoReduce
xPrimeMaxTmp = self.K / self.gamma
if self.xPrimeMax > xPrimeMaxTmp:
print("Reducing xPrimeMax from {0} down to {1} mrad".format(
self.xPrimeMax * 1e3, xPrimeMaxTmp * 1e3))
self.xPrimeMax = xPrimeMaxTmp
self.Theta_min = float(-self.xPrimeMax)
self.Psi_min = float(-self.zPrimeMax)
self.Theta_max = float(self.xPrimeMax)
self.Psi_max = float(self.zPrimeMax)
self.E_min = float(np.min(self.energies))
self.E_max = float(np.max(self.energies))
self.dE = (self.E_max-self.E_min) / float(mE-1)
self.dTheta = (self.Theta_max-self.Theta_min) / float(mTheta-1)
self.dPsi = (self.Psi_max-self.Psi_min) / float(mPsi-1)
"""Trying to find real maximum of the flux density"""
E0fit = 0.5 * (self.E_max+self.E_min)
precalc = True
rMax = self.nrays
if precalc:
rE = np.random.uniform(self.E_min, self.E_max, rMax)
rTheta = np.random.uniform(0., self.Theta_max, rMax)
rPsi = np.random.uniform(0., self.Psi_max, rMax)
DistI = self.build_I_map(rE, rTheta, rPsi)[0]
f_max = np.amax(DistI)
a_max = np.argmax(DistI)
NZ = np.ceil(np.max(rPsi[np.where(DistI > 0)[0]]) / self.dPsi) *\
self.dPsi
self.zPrimeMax = min(self.zPrimeMax, NZ)
self.Psi_max = float(self.zPrimeMax)
initial_x = [(rE[a_max]-E0fit) * 1e-5,
rTheta[a_max] * 1e3, rPsi[a_max] * 1e3]
else:
xE, xTheta, xPsi = np.mgrid[
self.E_min:self.E_max + 0.5*self.dE:self.dE,
self.Theta_min:self.Theta_max + 0.5*self.dTheta:self.dTheta,
self.Psi_min:self.Psi_max + 0.5*self.dPsi:self.dPsi]
DistI = self.build_I_map(xE, xTheta, xPsi)[0]
f_max = np.amax(DistI)
initial_x = [
(self.E_min + 0.6 * mE * self.dE - E0fit) * 1e-5,
(self.Theta_min + 0.6 * mTheta * self.dTheta) * 1e3,
(self.Psi_min + 0.6 * self.dPsi * mPsi) * 1e3]
bounds_x = [
((self.E_min - E0fit) * 1e-5, (self.E_max - E0fit) * 1e-5),
(0, self.Theta_max * 1e3),
(0, self.Psi_max * 1e3)]
def int_fun(x):
return -1. * (self.build_I_map(x[0] * 1e5 + E0fit,
x[1] * 1e-3,
x[2] * 1e-3)[0]) / f_max
res = optimize.fmin_slsqp(int_fun, initial_x,
bounds=bounds_x,
acc=1e-12,
iter=1000,
epsilon=1.e-8,
full_output=1,
iprint=0)
self.Imax = max(-1 * int_fun(res[0]) * f_max, f_max)
if self.filamentBeam:
self.nrepmax = np.floor(rMax / len(np.where(
self.Imax * np.random.rand(rMax) < DistI)[0]))
"""Preparing to calculate the total flux integral"""
self.xzE = 4 * (self.E_max-self.E_min) * self.Theta_max * self.Psi_max
self.fluxConst = self.Imax * self.xzE
def prefix_save_name(self):
return '3-BM-xrt'
def build_I_map(self, dde, ddtheta, ddpsi):
np.seterr(invalid='ignore')
np.seterr(divide='ignore')
gamma = self.gamma
if self.eEspread > 0:
if np.array(dde).shape:
if dde.shape[0] > 1:
gamma += np.random.normal(0, gamma*self.eEspread,
dde.shape)
gamma2 = gamma**2
else:
gamma2 = self.gamma2
w_cr = 1.5 * gamma2 * self.B * SIE0 / SIM0
if self.isMPW:
w_cr *= np.sin(np.arccos(ddtheta * gamma / self.K))
w_cr = np.where(np.isfinite(w_cr), w_cr, 0.)
gammapsi = gamma * ddpsi
gamma2psi2p1 = gammapsi**2 + 1
eta = 0.5 * dde * E2W / w_cr * gamma2psi2p1**1.5
ampSP = -0.5j * SQ3 / PI * gamma * dde * E2W / w_cr * gamma2psi2p1
ampS = ampSP * special.kv(2./3., eta)
ampP = 1j * gammapsi * ampSP * special.kv(1./3., eta) /\
np.sqrt(gamma2psi2p1)
ampS = np.where(np.isfinite(ampS), ampS, 0.)
ampP = np.where(np.isfinite(ampP), ampP, 0.)
bwFact = 0.001 if self.distE == 'BW' else 1./dde
Amp2Flux = FINE_STR * bwFact * self.I0 / SIE0 * 2 * self.Np
np.seterr(invalid='warn')
np.seterr(divide='warn')
return (Amp2Flux * (np.abs(ampS)**2 + np.abs(ampP)**2),
np.sqrt(Amp2Flux) * ampS,
np.sqrt(Amp2Flux) * ampP)
def intensities_on_mesh(self, energy='auto', theta='auto', psi='auto'):
if isinstance(energy, str):
energy = np.mgrid[self.E_min:self.E_max + 0.5*self.dE:self.dE]
if isinstance(theta, str):
theta = np.mgrid[
self.Theta_min:self.Theta_max + 0.5*self.dTheta:self.dTheta]
if isinstance(psi, str):
psi = np.mgrid[self.Psi_min:self.Psi_max + 0.5*self.dPsi:self.dPsi]
xE, xTheta, xPsi = np.meshgrid(energy, theta, psi, indexing='ij')
self.Itotal, ampS, ampP = self.build_I_map(xE, xTheta, xPsi)
self.Is = (ampS * np.conj(ampS)).real
self.Ip = (ampP * np.conj(ampP)).real
self.Isp = ampS * np.conj(ampP)
s0 = self.Is + self.Ip
with np.errstate(divide='ignore'):
Pol1 = np.where(s0, (self.Is - self.Ip) / s0, s0)
Pol3 = np.where(s0, 2. * self.Isp / s0, s0)
return (self.Itotal, Pol1, self.Is*0., Pol3)
def shine(self, toGlobal=True, withAmplitudes=True, fixedEnergy=False,
accuBeam=None):
u"""
Returns the source beam. If *toGlobal* is True, the output is in
the global system. If *withAmplitudes* is True, the resulted beam
contains arrays Es and Ep with the *s* and *p* components of the
electric field.
.. Returned values: beamGlobal
"""
if self.bl is not None:
try:
self.bl._alignE = float(self.bl.alignE)
except ValueError:
self.bl._alignE = 0.5 * (self.eMin + self.eMax)
if self.uniformRayDensity:
withAmplitudes = True
bo = None
length = 0
seeded = np.long(0)
seededI = 0.
np.seterr(invalid='warn')
np.seterr(divide='warn')
mcRays = np.long(self.nrays * 1.2) if not self.uniformRayDensity else\
self.nrays
if self.filamentBeam:
if accuBeam is None:
rE = np.random.random_sample() *\
float(self.E_max - self.E_min) + self.E_min
if self.isMPW:
sigma_r2 = 2 * (CHeVcm/rE*10*self.L0*self.Np) / PI2**2
sourceSIGMAx = self.dx
sourceSIGMAz = self.dz
rTheta0 = np.random.random_sample() *\
(self.Theta_max - self.Theta_min) + self.Theta_min
ryNp = 0.5 * self.L0 *\
(np.arccos(rTheta0 * self.gamma / self.K) / PI) +\
0.5 * self.L0 *\
np.random.random_integers(0, int(2*self.Np - 1))
rY = ryNp - 0.5*self.L0*self.Np
if (ryNp - 0.25*self.L0 <= 0):
rY += self.L0*self.Np
rX = self.X0 * np.sin(PI2 * rY / self.L0) +\
sourceSIGMAx * np.random.standard_normal()
rY -= 0.25 * self.L0
rZ = sourceSIGMAz * np.random.standard_normal()
else:
rZ = self.dz * np.random.standard_normal()
rTheta0 = np.random.random_sample() *\
(self.Theta_max - self.Theta_min) + self.Theta_min
R1 = self.dx * np.random.standard_normal() +\
self.ro * 1000.
rX = -R1 * np.cos(rTheta0) + self.ro*1000.
rY = R1 * np.sin(rTheta0)
dtheta = self.dxprime * np.random.standard_normal()
dpsi = self.dzprime * np.random.standard_normal()
else:
rE = accuBeam.E[0]
rX = accuBeam.x[0]
rY = accuBeam.y[0]
rZ = accuBeam.z[0]
dtheta = accuBeam.filamentDtheta
dpsi = accuBeam.filamentDpsi
if fixedEnergy:
rE = fixedEnergy
nrep = 0
rep_condition = True
# while length < self.nrays:
while rep_condition:
"""Preparing 4 columns of random numbers
0: Energy
1: Theta / horizontal
2: Psi / vertical
3: Monte-Carlo discriminator"""
rnd_r = np.random.rand(mcRays, 4)
seeded += mcRays
if self.filamentBeam:
# print(self.Theta_min, rTheta0 - 1. / self.gamma)
rThetaMin = np.max((self.Theta_min, rTheta0 - 1. / self.gamma))
rThetaMax = np.min((self.Theta_max, rTheta0 + 1. / self.gamma))
rTheta = (rnd_r[:, 1]) * (rThetaMax - rThetaMin) +\
rThetaMin
rE *= np.ones(mcRays)
else:
rE = rnd_r[:, 0] * float(self.E_max - self.E_min) +\
self.E_min
rTheta = (rnd_r[:, 1]) * (self.Theta_max - self.Theta_min) +\
self.Theta_min
rPsi = rnd_r[:, 2] * (self.Psi_max - self.Psi_min) +\
self.Psi_min
Intensity, mJss, mJpp = self.build_I_map(rE, rTheta, rPsi)
if self.uniformRayDensity:
seededI += self.nrays * self.xzE
else:
seededI += Intensity.sum() * self.xzE
if self.uniformRayDensity:
I_pass = slice(None)
npassed = mcRays
else:
I_pass =\
np.where(self.Imax * rnd_r[:, 3] < Intensity)[0]
npassed = len(I_pass)
if npassed == 0:
print('No good rays in this seed!'
' {0} of {1} rays in total so far...'.format(
length, self.nrays))
continue
bot = Beam(npassed, withAmplitudes=withAmplitudes)
bot.state[:] = 1 # good
bot.E[:] = rE[I_pass]
Theta0 = rTheta[I_pass]
Psi0 = rPsi[I_pass]
if not self.filamentBeam:
if self.dxprime > 0:
dtheta = np.random.normal(0, self.dxprime, npassed)
else:
dtheta = 0
if not self.isMPW:
dtheta += np.random.normal(0, 1/self.gamma, npassed)
if self.dzprime > 0:
dpsi = np.random.normal(0, self.dzprime, npassed)
else:
dpsi = 0
bot.a[:] = np.tan(Theta0 + dtheta)
bot.c[:] = np.tan(Psi0 + dpsi)
intensS = (mJss[I_pass] * np.conj(mJss[I_pass])).real
intensP = (mJpp[I_pass] * np.conj(mJpp[I_pass])).real
if self.uniformRayDensity:
sSP = 1.
else:
sSP = intensS + intensP
# as by Walker and by Ellaume; SPECTRA's value is two times
# smaller:
if self.isMPW:
sigma_r2 = 2 * (CHeVcm/bot.E*10 * self.L0*self.Np) / PI2**2
bot.sourceSIGMAx = np.sqrt(self.dx**2 + sigma_r2)
bot.sourceSIGMAz = np.sqrt(self.dz**2 + sigma_r2)
if self.filamentBeam:
bot.z[:] = rZ
bot.x[:] = rX
bot.y[:] = rY
else:
bot.y[:] = ((np.arccos(Theta0*self.gamma/self.K) / PI) +
np.random.randint(
-int(self.Np), int(self.Np), npassed) -
0.5) * 0.5 * self.L0
bot.x[:] = self.X0 * np.sin(PI2 * bot.y / self.L0) +\
np.random.normal(0., bot.sourceSIGMAx, npassed)
bot.z[:] = np.random.normal(0., bot.sourceSIGMAz, npassed)
bot.Jsp[:] = np.zeros(npassed)
else:
if self.filamentBeam:
bot.z[:] = rZ
bot.x[:] = rX
bot.y[:] = rY
else:
if self.dz > 0:
bot.z[:] = np.random.normal(0., self.dz, npassed)
if self.dx > 0:
R1 = np.random.normal(self.ro*1e3, self.dx, npassed)
else:
R1 = self.ro * 1e3
bot.x[:] = -R1 * np.cos(Theta0) + self.ro*1000.
bot.y[:] = R1 * np.sin(Theta0)
bot.Jsp[:] = np.array(
np.where(sSP,
mJss[I_pass] * np.conj(mJpp[I_pass]) / sSP,
sSP), dtype=complex)
bot.Jss[:] = np.where(sSP, intensS / sSP, sSP)
bot.Jpp[:] = np.where(sSP, intensP / sSP, sSP)
if withAmplitudes:
bot.Es[:] = mJss[I_pass]
bot.Ep[:] = mJpp[I_pass]
if bo is None:
bo = bot
else:
bo.concatenate(bot)
length = len(bo.a)
if raycing._VERBOSITY_ > 0:
print("{0} rays of {1}".format(length, self.nrays))
try:
if self.bl is not None:
if self.bl.flowSource == 'Qook' and\
self.bl.statusSignal is not None:
ptg = (self.bl.statusSignal[1] +
float(length) / float(self.nrays)) /\
self.bl.statusSignal[2]
self.bl.statusSignal[0].emit(
(ptg, self.bl.statusSignal[3]))
except:
pass
if self.filamentBeam:
nrep += 1
rep_condition = nrep < self.nrepmax
else:
rep_condition = length < self.nrays
if self.uniformRayDensity:
rep_condition = False
if raycing._VERBOSITY_ > 0:
sys.stdout.flush()
if length >= self.nrays:
bo.accepted = length * self.fluxConst
bo.acceptedE = bo.E.sum() * self.fluxConst * SIE0
bo.seeded = seeded
bo.seededI = seededI
if length > self.nrays and not self.filamentBeam:
bo.filter_by_index(slice(0, np.long(self.nrays)))
if self.filamentBeam:
bo.filamentDtheta = dtheta
bo.filamentDpsi = dpsi
norm = np.sqrt(bo.a**2 + 1.0 + bo.c**2)
bo.a /= norm
bo.b /= norm
bo.c /= norm
if self.pitch or self.yaw:
raycing.rotate_beam(bo, pitch=self.pitch, yaw=self.yaw)
if toGlobal: # in global coordinate system:
raycing.virgin_local_to_global(self.bl, bo, self.center)
raycing.append_to_flow(self.shine, [bo],
inspect.currentframe())
return bo
class Wiggler(BendingMagnet):
u"""
Wiggler source. The computation is reasonably fast and thus a GPU
is not required and is not implemented.
"""
hiddenParams = ['B0', 'rho']
def __init__(self, *args, **kwargs):
u"""Parameters are the same as in BendingMagnet except *B0* and *rho*
which are not required and additionally:
.. warning::
If you change *K* outside of the constructor, invoke
``your_wiggler_instance.reset()``.
*K*: float
Deflection parameter
*period*: float
period length in mm.
*n*: int
Number of periods.
"""
self.K = kwargs.pop('K', 8.446)
self.L0 = kwargs.pop('period', 50)
self.Np = kwargs.pop('n', 40)
name = kwargs.pop('name', 'wiggler')
kwargs['name'] = name
super(Wiggler, self).__init__(*args, **kwargs)
self.reset()
def prefix_save_name(self):
return '2-Wiggler-xrt'
def reset(self):
"""Needed for changing *K* after instantiation."""
self.B = K2B * self.K / self.L0
self.ro = M0 * C**2 * self.gamma / self.B / E0 / 1e6
self.X0 = 0.5 * self.K * self.L0 / self.gamma / PI
def power_vs_K(self, energy, theta, psi, Ks):
u"""
Calculates *power curve* -- total power in W at given K values (*Ks*).
The power is calculated through the aperture defined by *theta* and
*psi* opening angles within the *energy* range.
Returns a 1D array corresponding to *Ks*.
"""
try:
dtheta, dpsi, dE = \
theta[1] - theta[0], psi[1] - psi[0], energy[1] - energy[0]
except TypeError:
dtheta, dpsi, dE = 1, 1, 1
tmpK = self.K
powers = []
for iK, K in enumerate(Ks):
if raycing._VERBOSITY_ > 10:
print("K={0}, {1} of {2}".format(K, iK+1, len(Ks)))
self.K = K
self.reset()
I0 = self.intensities_on_mesh(energy, theta, psi)[0]
if self.distE == 'BW':
I0 *= 1e3
else: # 'eV'
I0 *= energy[:, np.newaxis, np.newaxis]
power = I0.sum() * dtheta * dpsi * dE * EV2ERG * 1e-7 # [W]
powers.append(power)
self.K = tmpK
return np.array(powers)
class Undulator(object):
u"""
Undulator source. The computation is volumnous and thus requires a GPU.
"""
def __init__(self, bl=None, name='und', center=(0, 0, 0),
nrays=raycing.nrays,
eE=6.0, eI=0.1, eEspread=0., eSigmaX=None, eSigmaZ=None,
eEpsilonX=1., eEpsilonZ=0.01, betaX=20., betaZ=5.,
period=50, n=50, K=10., Kx=0, Ky=0., phaseDeg=0,
taper=None, R0=None, targetE=None, customField=None,
eMin=5000., eMax=15000., eN=51, distE='eV',
xPrimeMax=0.5, zPrimeMax=0.5, nx=25, nz=25,
xPrimeMaxAutoReduce=True, zPrimeMaxAutoReduce=True,
gp=1e-2, gIntervals=1, nRK=30,
uniformRayDensity=False, filamentBeam=False,
targetOpenCL=raycing.targetOpenCL,
precisionOpenCL=raycing.precisionOpenCL,
pitch=0, yaw=0):
u"""
.. warning::
If you change any undulator parameter outside of the constructor,
invoke ``your_undulator_instance.reset()``.
*bl*: instance of :class:`~xrt.backends.raycing.BeamLine`
Container for beamline elements. Sourcess are added to its
`sources` list.
*name*: str
User-specified name, can be used for diagnostics output.
*center*: tuple of 3 floats
3D point in global system.
*nrays*: int
The number of rays sampled in one iteration.
*eE*: float
Electron beam energy (GeV).
*eI*: float
Electron beam current (A).
*eEspread*: float
Energy spread relative to the beam energy, rms.
*eSigmaX*, *eSigmaZ*: float
rms horizontal and vertical electron beam sizes (µm).
Alternatively, betatron functions can be specified instead of the
electron beam sizes.
*eEpsilonX*, *eEpsilonZ*: float
Horizontal and vertical electron beam emittance (nm rad).
*betaX*, *betaZ*:
Betatron function (m). Alternatively, beam size can be specified.
*period*, *n*:
Magnetic period (mm) length and number of periods.
*K*, *Kx*, *Ky*: float
Deflection parameter for the vertical field or for an elliptical
undulator.
*phaseDeg*: float
Phase difference between horizontal and vertical magnetic arrays.
Used in the elliptical case where it should be equal to 90 or -90.
*taper*: tuple(dgap(mm), gap(mm))
Linear variation in undulator gap. None if tapering is not used.
Tapering should be used only with pyopencl.
*R0*: float
Distance center-to-screen for the near field calculations (mm).
If None, the far field approximation (i.e. "usual" calculations) is
used. Near field calculations should be used only with pyopencl.
Here, a GPU can be much faster than a CPU.
*targetE*: a tuple (Energy, harmonic{, isElliptical})
Can be given for automatic calculation of the deflection parameter.
If isElliptical is not given, it is assumed as False (as planar).
*customField*: float or str or tuple(fileName, kwargs)
If given, adds a constant longitudinal field or a table of field
samples given as an Excel file or as a column text file. If given
as a tuple or list, the 2nd member is a key word dictionary for
reading Excel by :meth:`pandas.read_excel()` or reading text file
by :meth:`numpy.loadtxt()`, e.g. ``dict(skiprows=4)`` for skipping
the file header. The file must contain the columns with
longitudinal coordinate in mm, B_hor, B_ver {, B_long}, all in T.
*nRK*: int
Size of the Runge-Kutta integration grid per each interval between
Gauss-Legendre integration nodes (only valid if customField is not
None).
*eMin*, *eMax*: float
Minimum and maximum photon energy (eV). Used as band width for flux
calculation.
*eN*: int
Number of photon energy intervals, used only in the test suit, not
required in ray tracing.
*distE*: 'eV' or 'BW'
The resulted flux density is per 1 eV or 0.1% bandwidth. For ray
tracing 'eV' is used.
*xPrimeMax*, *zPrimeMax*:
Horizontal and vertical acceptance (mrad).
.. note::
The Monte Carlo sampling of the rays having their density
proportional to the beam intensity can be extremely inefficient
for sharply peaked distributions, like the undulator angular
density distribution. It is therefore very important to
restrict the sampled angular acceptance down to very small
angles. Use this source only with reasonably small *xPrimeMax*
and *zPrimeMax*!
.. warning::
If you change these parameters outside of the constructor,
interpret them in *rad*; in the constructor they are given in
*mrad*. This awkwardness is kept for version compatibility.
*nx*, *nz*: int
Number of intervals in the horizontal and vertical directions,
used only in the test suit, not required in ray tracing.
*xPrimeMaxAutoReduce*, *zPrimeMaxAutoReduce*: bool
Whether to reduce too large angular ranges down to the feasible
values in order to improve efficiency. It is highly recommended to
keep them True.
*gp*: float
Defines the precision of the Gauss integration.
*gIntervals*: int
Integral of motion is divided by gIntervals to reduce the order of
Gauss-Legendre quadrature. Default value of 1 is usually enough for
a conventional undulator. For extreme cases (wigglers, near field,
wide angles) this value can be set to the order of few hundreds to
achieve the convergence of the integral. Large values can
significantly increase the calculation time and RAM consumption
especially if OpenCL is not used.
*uniformRayDensity*: bool
If True, the radiation is sampled uniformly but with varying
amplitudes, otherwise with the density proportional to intensity
and with constant amplitudes. Required as True for wave propagation
calculations. False is usual for ray-tracing.
*filamentBeam*: bool
If True the source generates coherent monochromatic wavefronts.
Required as True for the wave propagation calculations in partially
coherent regime.
*targetOpenCL*: None, str, 2-tuple or tuple of 2-tuples
assigns the device(s) for OpenCL accelerated calculations. Accepts
the following values:
1) a tuple (iPlatform, iDevice) of indices in the
lists cl.get_platforms() and platform.get_devices(), see the
section :ref:`calculations_on_GPU`. None, if pyopencl is not
wanted. Ignored if pyopencl is not installed.
2) a tuple of tuples ((iP1, iD1),..,(iPn, iDn)) to assign specific
devices from one or multiple platforms.
3) int iPlatform - assigns all devices found at the given platform.
4) 'GPU' - lets the program scan the system and select all found
GPUs.
5) 'CPU' - similar to 'GPU'. If one CPU exists in multiple
platforms the program tries to select the vendor-specific driver.
6) 'other' - similar to 'GPU', used for Intel PHI and other OpenCL-
capable accelerator boards.
7) 'all' - lets the program scan the system and assign all found
devices. Not recommended, since the performance will be limited by
the slowest device.
8) 'auto' - lets the program scan the system and make an assignment
according to the priority list: 'GPU', 'other', 'CPU' or None if no
devices were found. Used by default.
.. warning::
A good graphics or dedicated accelerator card is highly
recommended! Special cases as wigglers by the undulator code,
near field, wide angles and tapering are hardly doable on CPU.
.. note::
Consider the :ref:`warnings and tips <usage_GPU_warnings>` on
using xrt with GPUs.
*precisionOpenCL*: 'float32' or 'float64', only for GPU.
Single precision (float32) should be enough in most cases. The
calculations with doube precision are much slower. Double precision
may be unavailable on your system.
*pitch*, *yaw*: float
rotation angles around x and z axis. Useful for canted sources.
"""
self.bl = bl
if bl is not None:
if self not in bl.sources:
bl.sources.append(self)
self.ordinalNum = len(bl.sources)
raycing.set_name(self, name)
# if name in [None, 'None', '']:
# self.name = '{0}{1}'.format(self.__class__.__name__,
# self.ordinalNum)
# else:
# self.name = name
self.center = center # 3D point in global system
self.nrays = np.long(nrays)
self.gp = gp
self.dx = eSigmaX * 1e-3 if eSigmaX else None
self.dz = eSigmaZ * 1e-3 if eSigmaZ else None
self.eEpsilonX = eEpsilonX * 1e-6
self.eEpsilonZ = eEpsilonZ * 1e-6
self.Ee = float(eE)
self.eEspread = eEspread
self.I0 = float(eI)
self.eMin = float(eMin)
self.eMax = float(eMax)
if bl is not None:
if self.bl.flowSource != 'Qook':
bl.oesDict[self.name] = [self, 0]
xPrimeMax = raycing.auto_units_angle(xPrimeMax) * 1e3 if\
isinstance(xPrimeMax, raycing.basestring) else xPrimeMax
zPrimeMax = raycing.auto_units_angle(zPrimeMax) * 1e3 if\
isinstance(zPrimeMax, raycing.basestring) else zPrimeMax
self.xPrimeMax = xPrimeMax * 1e-3 # if xPrimeMax else None
self.zPrimeMax = zPrimeMax * 1e-3 # if zPrimeMax else None
self.betaX = betaX * 1e3
self.betaZ = betaZ * 1e3
self.eN = eN + 1
self.nx = 2*nx + 1
self.nz = 2*nz + 1
self.xs = np.linspace(-self.xPrimeMax, self.xPrimeMax, self.nx)
self.zs = np.linspace(-self.zPrimeMax, self.zPrimeMax, self.nz)
self.energies = np.linspace(eMin, eMax, self.eN)
self.distE = distE
self.uniformRayDensity = uniformRayDensity
self.filamentBeam = filamentBeam
self.pitch = raycing.auto_units_angle(pitch)
self.yaw = raycing.auto_units_angle(yaw)
self.gIntervals = gIntervals
self.L0 = period
self.R0 = R0 if R0 is None else R0 + self.L0*0.25
self.nRK = nRK
self.trajectory = None
fullLength = False # NOTE maybe a future input parameter
self.full = fullLength
if fullLength:
# self.filamentBeam = True
self.theta0 = 0
self.psi0 = 0
self.cl_ctx = None
if (self.R0 is not None):
precisionOpenCL = 'float64'
if targetOpenCL is not None:
if not isOpenCL:
print("pyopencl is not available!")
else:
self.ucl = mcl.XRT_CL(
r'undulator.cl', targetOpenCL, precisionOpenCL)
if self.ucl.lastTargetOpenCL is not None:
self.cl_precisionF = self.ucl.cl_precisionF
self.cl_precisionC = self.ucl.cl_precisionC
self.cl_queue = self.ucl.cl_queue
self.cl_ctx = self.ucl.cl_ctx
self.cl_program = self.ucl.cl_program
self.cl_mf = self.ucl.cl_mf
self.cl_is_blocking = self.ucl.cl_is_blocking
# self.mode = 1
if (self.dx is None) and (self.betaX is not None):
self.dx = np.sqrt(self.eEpsilonX*self.betaX)
elif (self.dx is None) and (self.betaX is None):
print("Set either dx or betaX!")
if (self.dz is None) and (self.betaZ is not None):
self.dz = np.sqrt(self.eEpsilonZ*self.betaZ)
elif (self.dz is None) and (self.betaZ is None):
print("Set either dz or betaZ!")
dxprime, dzprime = None, None
if dxprime:
self.dxprime = dxprime
else:
self.dxprime = self.eEpsilonX / self.dx if self.dx > 0\
else 0. # [rad]
if dzprime:
self.dzprime = dzprime
else:
self.dzprime = self.eEpsilonZ / self.dz if self.dz > 0\
else 0. # [rad]
if raycing._VERBOSITY_ > 10:
print('dx = {0} mm'.format(self.dx))
print('dz = {0} mm'.format(self.dz))
print('dxprime = {0} rad'.format(self.dxprime))
print('dzprime = {0} rad'.format(self.dzprime))
self.gamma = self.Ee * 1e9 * EV2ERG / (M0 * C**2)
self.gamma2 = self.gamma**2
if targetE is not None:
K = np.sqrt(targetE[1] * 8 * PI * C * 10 * self.gamma2 /
period / targetE[0] / E2W - 2)
if raycing._VERBOSITY_ > 10:
print("K = {0}".format(K))
if np.isnan(K):
raise ValueError("Cannot calculate K, try to increase the "
"undulator harmonic number")
if len(targetE) > 2:
isElliptical = targetE[2]
if isElliptical:
Kx = Ky = K / 2**0.5
if raycing._VERBOSITY_ > 10:
print("Kx = Ky = {0}".format(Kx))
self.Kx = Kx
self.Ky = Ky
self.K = K
phaseDeg = np.degrees(raycing.auto_units_angle(phaseDeg)) if\
isinstance(phaseDeg, raycing.basestring) else phaseDeg
self.phase = np.radians(phaseDeg)
self.Np = n
if taper is not None:
self.taper = taper[0] / self.Np / self.L0 / taper[1]
self.gap = taper[1]
else:
self.taper = None
if self.Kx == 0 and self.Ky == 0:
self.Ky = self.K
self._initialK = self.K
self.B0x = K2B * self.Kx / self.L0
self.B0y = K2B * self.Ky / self.L0
self.customField = customField
if customField is not None:
self.gIntervals *= 2
if isinstance(customField, (tuple, list)):
fname = customField[0]
kwargs = customField[1]
elif isinstance(customField, (float, int)):
fname = None
self.customFieldData = customField
if customField > 0:
betaL = 2 * M0*C**2*self.gamma / customField / E0 / 1e6
print("Larmor betatron function = {0} m".format(betaL))
else:
fname = customField
kwargs = {}
if fname:
self.customFieldData = self.read_custom_field(fname, kwargs)
else:
self.customFieldData = None
self.xPrimeMaxAutoReduce = xPrimeMaxAutoReduce
if xPrimeMaxAutoReduce:
xPrimeMaxTmp = self.Ky / self.gamma
if self.xPrimeMax > xPrimeMaxTmp:
print("Reducing xPrimeMax from {0} down to {1} mrad".format(
self.xPrimeMax * 1e3, xPrimeMaxTmp * 1e3))
self.xPrimeMax = xPrimeMaxTmp
self.zPrimeMaxAutoReduce = zPrimeMaxAutoReduce
if zPrimeMaxAutoReduce:
K0 = self.Kx if self.Kx > 0 else 1.
zPrimeMaxTmp = K0 / self.gamma
if self.zPrimeMax > zPrimeMaxTmp:
print("Reducing zPrimeMax from {0} down to {1} mrad".format(
self.zPrimeMax * 1e3, zPrimeMaxTmp * 1e3))
self.zPrimeMax = zPrimeMaxTmp
self.reset()
def read_custom_field(self, fname, kwargs={}):
def my_sin(x, a, k, ph, c):
return a * np.cos(k * x + ph) + c
from scipy.optimize import curve_fit
if fname.endswith('.xls') or fname.endswith('.xlsx'):
import pandas
kwargs['engine'] = "openpyxl"
try:
data = pandas.read_excel(fname, **kwargs).values
except ValueError as e:
print(e)
if 'openpyxl' in str(e):
print('install it as `pip install openpyxl`')
raise e
else:
data = np.loadtxt(fname)
datalen4 = data.shape[0] // 10
minBx = data[datalen4:-datalen4, 1].min()
maxBx = data[datalen4:-datalen4, 1].max()
minBy = data[datalen4:-datalen4, 2].min()
maxBy = data[datalen4:-datalen4, 2].max()
p0 = [(maxBx-minBx)/2., PI2/self.L0, 0.3, 1e-4]
poptx, pcovx = curve_fit(my_sin,
data[datalen4:-datalen4, 0],
data[datalen4:-datalen4, 1],
p0=p0)
if poptx[0] < 0:
poptx[0] *= -1
poptx[2] += PI
p0 = [(maxBy-minBy)/2., PI2/self.L0, 0.3, 1e-4]
popty, pcovy = curve_fit(my_sin,
data[datalen4:-datalen4, 0],
data[datalen4:-datalen4, 2],
p0=p0)
if popty[0] < 0:
popty[0] *= -1
popty[2] += PI
print(poptx)
print(popty)
B0x = poptx[0]
B0y = popty[0]
Kx = B0x * self.L0 / K2B
Ky = B0y * self.L0 / K2B
lambdaUx = PI2 / poptx[1]
lambdaUy = PI2 / popty[1]
phase = poptx[2] - popty[2]
phaseDeg = phase / PI * 180
print("field data in {0}:".format(fname))
print("B0x={0:.3f}T, B0y={1:.3f}T".format(B0x, B0y))
print("Kx={0:.3f}, Ky={1:.3f}".format(Kx, Ky))
print(u"λ_Ux={0:.3f}mm, λ_Uy={1:.3f}mm".format(lambdaUx, lambdaUy))
print(u"phase difference = {0:.3f}deg".format(phaseDeg))
return data
def magnetic_field(self, z): # 'z' in radians
if isinstance(self.customField, (float, int)):
Bx = self.B0x * np.sin(z + self.phase)
By = self.B0y * np.sin(z)
Bz = self.customFieldData * np.ones_like(z)
else:
dataz = self.customFieldData[:, 0] / self.L0 * PI2
Bx = np.interp(z, dataz, self.customFieldData[:, 1])
By = np.interp(z, dataz, self.customFieldData[:, 2])
if self.customFieldData.shape[1] > 3:
Bz = np.interp(z, dataz, self.customFieldData[:, 3])
else:
Bz = np.zeros_like(Bx)
return (Bx, By, Bz)
def reset(self):
"""This method must be invoked after any changes in the undulator
parameters."""
if self._initialK != self.K: # self.K was modified externally
self.Ky = self.K
self._initialK = self.K
self.wu = PI * (0.01 * C) / self.L0 / 1e-3 / self.gamma2 * \
(2*self.gamma2 - 1 - 0.5*self.Kx**2 - 0.5*self.Ky**2) / E2W
# wnu = 2 * PI * (0.01 * C) / self.L0 / 1e-3 / E2W
self.E1 = 2*self.wu*self.gamma2 / (1 + 0.5*self.Kx**2 + 0.5*self.Ky**2)
if raycing._VERBOSITY_ > 10:
print("E1 = {0}".format(self.E1))
print("E3 = {0}".format(3*self.E1))
print("B0 = {0}".format(self.Ky / 0.09336 / self.L0))
if self.taper is not None:
print("dB/dx/B = {0}".format(
-PI * self.gap * self.taper / self.L0 * 1e3))
mE = self.eN
mTheta = self.nx
mPsi = self.nz
if not self.xPrimeMax:
print("No Theta range specified, using default 1 mrad")
self.xPrimeMax = 1e-3
self.Theta_min = -float(self.xPrimeMax)
self.Theta_max = float(self.xPrimeMax)
self.Psi_min = -float(self.zPrimeMax)
self.Psi_max = float(self.zPrimeMax)
self.energies = np.linspace(self.eMin, self.eMax, self.eN)
self.E_min = float(np.min(self.energies))
self.E_max = float(np.max(self.energies))
self.dE = (self.E_max - self.E_min) / float(mE - 1)
self.dTheta = (self.Theta_max - self.Theta_min) / float(mTheta - 1)
self.dPsi = (self.Psi_max - self.Psi_min) / float(mPsi - 1)
"""Adjusting the number of points for Gauss integration"""
# self.gp = 1
quad_int_error = self.gp * 10.
self.quadm = 0
tmpeEspread = self.eEspread
self.eEspread = 0
mstart = 5
m = mstart
while quad_int_error >= self.gp:
m += 1
self.quadm = int(1.5**m)
if self.cl_ctx is not None:
# sE = np.linspace(self.E_min, self.E_max, self.eN)
sE = self.E_max * np.ones(1)
sTheta_max = self.Theta_max * np.ones(1)
sPsi_max = self.Psi_max * np.ones(1)
In = self.build_I_map(sE, sTheta_max, sPsi_max)[0][0]
else:
In = self.build_I_map(
self.E_max, self.Theta_max, self.Psi_max)[0]
if m == mstart+1:
I2 = In
continue
else:
I1 = I2
I2 = In
quad_int_error = np.abs((I2 - I1)/I2)
if raycing._VERBOSITY_ > 10:
print("G = {0}".format(
[self.gIntervals, self.quadm, quad_int_error, I2,
2-self.ag_n.sum()]))
if self.quadm > 400:
self.gIntervals *= 2
m = mstart
quad_int_error = self.gp * 10.
if self.gIntervals > 100:
break
continue
"""end of Adjusting the number of points for Gauss integration"""
self.eEspread = tmpeEspread
if raycing._VERBOSITY_ > 10:
print("Done with Gaussian optimization, {0} points will be used"
" in {1} interval{2}".format(
self.quadm, self.gIntervals,
's' if self.gIntervals > 1 else ''))
if self.filamentBeam:
rMax = self.nrays
rE = np.random.uniform(self.E_min, self.E_max, rMax)
rTheta = np.random.uniform(self.Theta_min, self.Theta_max, rMax)
rPsi = np.random.uniform(self.Psi_min, self.Psi_max, rMax)
tmpEspread = self.eEspread
self.eEspread = 0
DistI = self.build_I_map(rE, rTheta, rPsi)[0]
self.Imax = np.max(DistI) * 1.2
self.nrepmax = np.floor(rMax / len(np.where(
self.Imax * np.random.rand(rMax) < DistI)[0]))
self.eEspread = tmpEspread
else:
self.Imax = 0.
"""Preparing to calculate the total flux integral"""
self.xzE = (self.E_max - self.E_min) *\
(self.Theta_max - self.Theta_min) *\
(self.Psi_max - self.Psi_min)
self.fluxConst = self.Imax * self.xzE
def prefix_save_name(self):
if self.Kx > 0:
return '4-elu-xrt'
else:
return '1-und-xrt'
def tuning_curves(self, energy, theta, psi, harmonics, Ks):
"""Calculates *tuning curves* -- maximum flux of given *harmomonics* at
given K values (*Ks*). The flux is calculated through the aperture
defined by *theta* and *psi* opening angles.
Returns two 2D arrays: energy positions and flux values. The rows
correspond to *Ks*, the colums correspond to *harmomonics*.
"""
try:
dtheta, dpsi = theta[1] - theta[0], psi[1] - psi[0]
except TypeError:
dtheta, dpsi = 1, 1
tunesE, tunesF = [], []
tmpKy = self.Ky
for iK, K in enumerate(Ks):
if raycing._VERBOSITY_ > 10:
print("K={0}, {1} of {2}".format(K, iK+1, len(Ks)))
self.Ky = K
self.reset()
I0 = self.intensities_on_mesh(energy, theta, psi, harmonics)[0]
flux = I0.sum(axis=(1, 2)) * dtheta * dpsi
argm = np.argmax(flux, axis=0)
fluxm = np.max(flux, axis=0)
tunesE.append(energy[argm] / 1000.)
tunesF.append(fluxm)
self.Ky = tmpKy
self.reset()
return np.array(tunesE).T, np.array(tunesF).T
def power_vs_K(self, energy, theta, psi, harmonics, Ks):
"""Calculates *power curve* -- total power in W for all *harmomonics*
at given K values (*Ks*). The power is calculated through the aperture
defined by *theta* and *psi* opening angles within the *energy* range.
The result of this numerical integration depends on the used angular
and energy meshes; you should check convergence. Internally, electron
beam energy spread is also sampled by adding another dimension to the
intensity array and making it 5-dimensional. You therefore may want to
set energy spread to zero, it doesn’t affect the resulting power
anyway.
Returns a 1D array corresponding to *Ks*.
"""
try:
dtheta, dpsi, dE = \
theta[1] - theta[0], psi[1] - psi[0], energy[1] - energy[0]
except TypeError:
dtheta, dpsi, dE = 1, 1, 1
tmpKy = self.Ky
powers = []
for iK, K in enumerate(Ks):
if raycing._VERBOSITY_ > 10:
print("K={0}, {1} of {2}".format(K, iK+1, len(Ks)))
self.Ky = K
self.reset()
I0 = self.intensities_on_mesh(energy, theta, psi, harmonics)[0]
if self.distE == 'BW':
I0 *= 1e3
else: # 'eV'
I0 *= energy[:, np.newaxis, np.newaxis, np.newaxis]
power = I0.sum() * dtheta * dpsi * dE * EV2ERG * 1e-7 # [W]
powers.append(power)
self.Ky = tmpKy
self.reset()
return np.array(powers)
def multi_electron_stack(self, energy='auto', theta='auto', psi='auto',
harmonic=None, withElectronDivergence=True):
"""Returns Es and Ep in the shape (energy, theta, psi, [harmonic]).
Along the 0th axis (energy) are stored "macro-electrons" that emit at
the photon energy given by *energy* (constant or variable) onto the
angular mesh given by *theta* and *psi*. The transverse field from each
macro-electron gets individual random angular offsets dtheta and dpsi
within the emittance distribution if *withElectronDivergence* is True
and an individual random shift to gamma within the energy spread.
The parameter self.filamentBeam is irrelevant for this method."""
if isinstance(energy, str): # i.e. if 'auto'
energy = np.mgrid[self.E_min:self.E_max + 0.5*self.dE:self.dE]
nmacroe = 1 if len(np.array(energy).shape) == 0 else len(energy)
if isinstance(theta, str):
theta = np.mgrid[
self.Theta_min:self.Theta_max + 0.5*self.dTheta:self.dTheta]
if isinstance(psi, str):
psi = np.mgrid[self.Psi_min:self.Psi_max + 0.5*self.dPsi:self.dPsi]
if harmonic is None:
xH = None
tomesh = energy, theta, psi
else:
tomesh = energy, theta, psi, harmonic
mesh = np.meshgrid(*tomesh, indexing='ij')
if withElectronDivergence and self.dxprime > 0:
dthe = np.random.normal(0, self.dxprime, nmacroe)
if harmonic is None:
mesh[1][:, ...] += dthe[:, np.newaxis, np.newaxis]
else:
mesh[1][:, ...] += dthe[:, np.newaxis, np.newaxis, np.newaxis]
if withElectronDivergence and self.dzprime > 0:
dpsi = np.random.normal(0, self.dzprime, nmacroe)
if harmonic is None:
mesh[2][:, ...] += dpsi[:, np.newaxis, np.newaxis]
else:
mesh[2][:, ...] += dpsi[:, np.newaxis, np.newaxis, np.newaxis]
if self.eEspread > 0:
spr = np.random.normal(0, self.eEspread, nmacroe) * self.gamma
dgamma = np.zeros_like(mesh[0])
if harmonic is None:
dgamma[:, ...] = spr[:, np.newaxis, np.newaxis]
else:
dgamma[:, ...] = spr[:, np.newaxis, np.newaxis, np.newaxis]
xdGamma = dgamma.ravel()
else:
xdGamma = 0
xE, xTheta, xPsi = mesh[0].ravel(), mesh[1].ravel(), mesh[2].ravel()
if harmonic is not None:
xH = mesh[3].ravel()
if harmonic is None:
sh = nmacroe, len(theta), len(psi)
else:
sh = nmacroe, len(theta), len(psi), len(harmonic)
res = self.build_I_map(xE, xTheta, xPsi, xH, xdGamma)
Es = res[1].reshape(sh)
Ep = res[2].reshape(sh)
return Es, Ep
def intensities_on_mesh(self, energy='auto', theta='auto', psi='auto',
harmonic=None):
"""Returns the Stokes parameters in the shape (energy, theta, psi,
[harmonic]), with *theta* being the horizontal mesh angles and *psi*
the vertical mesh angles. Each one of the input parameters is a 1D
array of an individually selectable length.
.. note::
We do not provide any internal mesh optimization, as mesh functions
are not our core objectives. In particular, the angular meshes must
be wider than the electron beam divergences in order to convolve the
field distribution with the electron distribution. A warning will be
printed (new in version 1.3.4) if the requested meshes are too
narrow.
"""
if isinstance(energy, str): # i.e. if 'auto'
energy = np.mgrid[self.E_min:self.E_max + 0.5*self.dE:self.dE]
if isinstance(theta, str):
theta = np.mgrid[
self.Theta_min:self.Theta_max + 0.5*self.dTheta:self.dTheta]
if isinstance(psi, str):
psi = np.mgrid[self.Psi_min:self.Psi_max + 0.5*self.dPsi:self.dPsi]
tomesh = [energy, theta, psi]
if harmonic is not None:
tomesh.append(harmonic)
iharmonic = len(tomesh)-1
else:
iharmonic = None
if self.eEspread > 0:
spr = np.linspace(-3.5, 3.5, 36)
dgamma = self.gamma * spr * self.eEspread
wspr = np.exp(-0.5 * spr**2)
wspr /= wspr.sum()
tomesh.append(dgamma)
ispread = len(tomesh)-1
else:
ispread = None
mesh = np.meshgrid(*tomesh, indexing='ij')
xE, xTheta, xPsi = mesh[0].ravel(), mesh[1].ravel(), mesh[2].ravel()
sh = [len(energy), len(theta), len(psi)]
if iharmonic:
xH = mesh[iharmonic].ravel()
sh.append(len(harmonic))
else:
xH = None
if ispread:
xG = mesh[ispread].ravel()
sh.append(len(dgamma))
else:
xG = None
res = self.build_I_map(xE, xTheta, xPsi, xH, xG)
Es = res[1].reshape(sh)
Ep = res[2].reshape(sh)
if ispread:
if iharmonic:
ws = wspr[np.newaxis, np.newaxis, np.newaxis, np.newaxis, :]
else:
ws = wspr[np.newaxis, np.newaxis, np.newaxis, :]
Is = ((Es*np.conj(Es)).real * ws).sum(axis=ispread)
Ip = ((Ep*np.conj(Ep)).real * ws).sum(axis=ispread)
Isp = (Es*np.conj(Ep) * ws).sum(axis=ispread)
else:
Is = (Es*np.conj(Es)).real
Ip = (Ep*np.conj(Ep)).real
Isp = Es*np.conj(Ep)
self.Is = Is.astype(float)
self.Ip = Ip.astype(float)
self.Isp = Isp.astype(complex)
s0 = self.Is + self.Ip
s1 = self.Is - self.Ip
s2 = 2. * np.real(self.Isp)
s3 = -2. * np.imag(self.Isp)
if (self.dxprime > 0 or self.dzprime > 0) and \
len(theta) > 1 and len(psi) > 1:
from scipy.ndimage.filters import gaussian_filter
Sx = self.dxprime / (theta[1] - theta[0])
Sz = self.dzprime / (psi[1] - psi[0])
# print(self.dxprime, theta[-1] - theta[0], Sx, len(theta))
# print(self.dzprime, psi[-1] - psi[0], Sz, len(psi))
if Sx > len(theta)//4: # ±2σ
print("************* Warning ***********************")
print("Your theta mesh is too narrow!")
print("It must be wider than the electron beam width")
print("*********************************************")
if self.xPrimeMax < theta.max():
print("************* Warning ****************************")
print("Your xPrimeMax is too small!")
print("It must be bigger than theta.max()")
if self.xPrimeMaxAutoReduce:
print("You probably need to set xPrimeMaxAutoReduce=False")
print("**************************************************")
if Sz > len(psi)//4: # ±2σ
print("************* Warning ************************")
print("Your psi mesh is too narrow!")
print("It must be wider than the electron beam height")
print("**********************************************")
if self.zPrimeMax < psi.max():
print("************* Warning ****************************")
print("Your zPrimeMax is too small!")
print("It must be bigger than psi.max()")
if self.zPrimeMaxAutoReduce:
print("You probably need to set zPrimeMaxAutoReduce=False")
print("**************************************************")
for ie, ee in enumerate(energy):
if harmonic is None:
s0[ie, :, :] = gaussian_filter(s0[ie, :, :], [Sx, Sz])
s1[ie, :, :] = gaussian_filter(s1[ie, :, :], [Sx, Sz])
s2[ie, :, :] = gaussian_filter(s2[ie, :, :], [Sx, Sz])
s3[ie, :, :] = gaussian_filter(s3[ie, :, :], [Sx, Sz])
else:
for ih, hh in enumerate(harmonic):
s0[ie, :, :, ih] = gaussian_filter(
s0[ie, :, :, ih], [Sx, Sz])
s1[ie, :, :, ih] = gaussian_filter(
s1[ie, :, :, ih], [Sx, Sz])
s2[ie, :, :, ih] = gaussian_filter(
s2[ie, :, :, ih], [Sx, Sz])
s3[ie, :, :, ih] = gaussian_filter(
s3[ie, :, :, ih], [Sx, Sz])
with np.errstate(divide='ignore'):
return (s0,
np.where(s0, s1 / s0, s0),
np.where(s0, s2 / s0, s0),
np.where(s0, s3 / s0, s0))
def _sp(self, dim, x, ww1, w, wu, gamma, ddphi, ddpsi):
lengamma = 1 if len(np.array(gamma).shape) == 0 else len(gamma)
gS = gamma
if dim == 0:
ww1S = ww1
wS, wuS = w, wu
ddphiS = ddphi
ddpsiS = ddpsi
elif dim == 1:
ww1S = ww1[:, np.newaxis]
wS = w[:, np.newaxis]
wuS = wu[:, np.newaxis]
ddphiS = ddphi[:, np.newaxis]
ddpsiS = ddpsi[:, np.newaxis]
if lengamma > 1:
gS = gamma[:, np.newaxis]
elif dim == 3:
ww1S = ww1[:, :, :, np.newaxis]
wS, wuS = w[:, :, :, np.newaxis], wu[:, :, :, np.newaxis]
ddphiS = ddphi[:, :, :, np.newaxis]
ddpsiS = ddpsi[:, :, :, np.newaxis]
if lengamma > 1:
gS = gamma[:, :, :, np.newaxis]
taperC = 1
alphaS = 0
sinx = np.sin(x)
cosx = np.cos(x)
sin2x = 2*sinx*cosx
if self.taper is not None:
alphaS = self.taper * C * 10 / E2W
taperC = 1 - alphaS * x / wuS
ucos = ww1S * x +\
wS / gS / wuS *\
(-self.Ky * ddphiS * (sinx + alphaS / wuS *
(1 - cosx - x * sinx)) +
self.Kx * ddpsiS * np.sin(x + self.phase) +
0.125 / gS *
(self.Kx**2 * np.sin(2 * (x + self.phase)) +
self.Ky**2 * (sin2x - 2 * alphaS / wuS *
(x**2 + cosx**2 + x * sin2x))))
elif self.R0 is not None:
betam = 1 - (1 + 0.5 * self.Kx**2 + 0.5 * self.Ky**2) / 2. / gS**2
WR0 = self.R0 / 10 / C * E2W
ddphiS = -ddphiS
drx = WR0 * np.tan(ddphiS) - self.Ky / wuS / gS * sinx
dry = WR0 * np.tan(ddpsiS) + self.Kx / wuS / gS * np.sin(
x + self.phase)
drz = WR0 * np.cos(np.sqrt(ddphiS**2+ddpsiS**2)) -\
betam * x / wuS + 0.125 / wuS / gS**2 *\
(self.Ky**2 * sin2x +
self.Kx**2 * np.sin(2 * (x + self.phase)))
ucos = wS * (x / wuS + np.sqrt(drx**2 + dry**2 + drz**2))
else:
ucos = ww1S * x + wS / gS / wuS *\
(-self.Ky * ddphiS * sinx +
self.Kx * ddpsiS * np.sin(x + self.phase) +
0.125 / gS * (self.Ky**2 * sin2x +
self.Kx**2 * np.sin(2. * (x + self.phase))))
nz = 1 - 0.5*(ddphiS**2 + ddpsiS**2)
betax = taperC * self.Ky / gS * cosx
betay = -self.Kx / gS * np.cos(x + self.phase)
betaz = 1 - 0.5*(1./gS**2 + betax**2 + betay**2)
betaPx = -wuS * self.Ky / gS * (alphaS * cosx + taperC * sinx)
betaPy = wuS * self.Kx / gS * np.sin(x + self.phase)
betaPz = 0.5 * wuS / gS**2 *\
(self.Ky**2 * taperC * (alphaS*cosx**2 + taperC * sin2x) +
self.Kx**2 * np.sin(2. * (x + self.phase)))
krel = 1. - ddphiS*betax - ddpsiS*betay - nz*betaz
eucos = np.exp(1j * ucos) / krel**2
bnx = betax - ddphiS
bny = betay - ddpsiS
bnz = betaz - nz
primexy = betaPx*bny - betaPy*bnx
return ((nz*(betaPx*bnz - betaPz*bnx) + ddpsiS*primexy) * eucos,
(nz*(betaPy*bnz - betaPz*bny) - ddphiS*primexy) * eucos)
def build_I_map(self, w, ddtheta, ddpsi, harmonic=None, dg=None):
useCL = False
if isinstance(w, np.ndarray):
if w.shape[0] > 32:
useCL = True
if (self.cl_ctx is None) or not useCL:
return self._build_I_map_conv(w, ddtheta, ddpsi, harmonic, dg)
elif self.customField is not None:
return self._build_I_map_custom(w, ddtheta, ddpsi, harmonic, dg)
else:
return self._build_I_map_CL(w, ddtheta, ddpsi, harmonic, dg)
def _build_I_map_conv(self, w, ddtheta, ddpsi, harmonic, dgamma=None):
# np.seterr(invalid='ignore')
# np.seterr(divide='ignore')
NRAYS = 1 if len(np.array(w).shape) == 0 else len(w)
gamma = self.gamma
if self.eEspread > 0:
if dgamma is not None:
gamma += dgamma
else:
sz = 1 if self.filamentBeam else NRAYS
gamma += gamma * self.eEspread * np.random.normal(size=sz)
gamma = gamma * np.ones(NRAYS)
gamma2 = gamma**2
wu = PI * C * 10 / self.L0 / gamma2 * np.ones_like(w) *\
(2*gamma2 - 1 - 0.5*self.Kx**2 - 0.5*self.Ky**2) / E2W
ww1 = w * ((1. + 0.5*self.Kx**2 + 0.5*self.Ky**2) +
gamma2 * (ddtheta**2 + ddpsi**2)) / (2. * gamma2 * wu)
tg_n, ag_n = np.polynomial.legendre.leggauss(self.quadm)
self.tg_n, self.ag_n = tg_n, ag_n
if (self.taper is not None) or (self.R0 is not None):
AB = 1. / PI2 / wu
dstep = 2 * PI / float(self.gIntervals)
dI = np.arange(0.5 * dstep - PI * self.Np, PI * self.Np, dstep)
else:
AB = 1. / PI2 / wu * np.sin(PI * self.Np * ww1) / np.sin(PI * ww1)
dstep = 2 * PI / float(self.gIntervals)
dI = np.arange(-PI + 0.5 * dstep, PI, dstep)
tg = (dI[:, None] + 0.5*dstep*tg_n).ravel() # + PI/2
ag = (dI[:, None]*0 + ag_n).ravel()
# Bsr = np.zeros_like(w, dtype='complex')
# Bpr = np.zeros_like(w, dtype='complex')
dim = len(np.array(w).shape)
sp3res = self._sp(dim, tg, ww1, w, wu, gamma, ddtheta, ddpsi)
Bsr = np.sum(ag * sp3res[0], axis=dim)
Bpr = np.sum(ag * sp3res[1], axis=dim)
bwFact = 0.001 if self.distE == 'BW' else 1./w
Amp2Flux = FINE_STR * bwFact * self.I0 / SIE0
if harmonic is not None:
Bsr[ww1 > harmonic+0.5] = 0
Bpr[ww1 > harmonic+0.5] = 0
Bsr[ww1 < harmonic-0.5] = 0
Bpr[ww1 < harmonic-0.5] = 0
# np.seterr(invalid='warn')
# np.seterr(divide='warn')
return (Amp2Flux * AB**2 * 0.25 * dstep**2 *
(np.abs(Bsr)**2 + np.abs(Bpr)**2),
np.sqrt(Amp2Flux) * AB * Bsr * 0.5 * dstep,
np.sqrt(Amp2Flux) * AB * Bpr * 0.5 * dstep)
def _build_I_map_custom(self, w, ddtheta, ddpsi, harmonic, dgamma=None):
# time1 = time.time()
NRAYS = 1 if len(np.array(w).shape) == 0 else len(w)
gamma = self.gamma
if self.eEspread > 0:
if dgamma is not None:
gamma += dgamma
else:
sz = 1 if self.filamentBeam else NRAYS
gamma += gamma * self.eEspread * np.random.normal(size=sz)
gamma = gamma * np.ones(NRAYS, dtype=self.cl_precisionF)
gamma2 = gamma**2
wu = PI * C * 10 / self.L0 / gamma2 *\
(2*gamma2 - 1 - 0.5*self.Kx**2 - 0.5*self.Ky**2) / E2W
ww1 = w * ((1. + 0.5 * self.Kx**2 + 0.5 * self.Ky**2) +
gamma2 * (ddtheta * ddtheta + ddpsi * ddpsi)) /\
(2. * gamma2 * wu)
scalarArgs = [] # R0
R0 = self.R0 if self.R0 is not None else 0
Np = np.int32(self.Np)
tg_n, ag_n = np.polynomial.legendre.leggauss(self.quadm)
self.tg_n, self.ag_n = tg_n, ag_n
ab = 1. / PI2 / wu
dstep = 2 * PI / float(self.gIntervals)
dI = np.arange(0.5 * dstep - PI * Np, PI * Np, dstep)
tg = np.array([-PI*Np + PI/2.])
ag = [0]
tg = self.cl_precisionF(
np.concatenate((tg, (dI[:, None]+0.5*dstep*tg_n).ravel() + PI/2.)))
ag = self.cl_precisionF(np.concatenate(
(ag, (dI[:, None]*0+ag_n).ravel())))
nwt = self.nRK
wtGrid = []
for itg in range(len(tg) - 1):
tmppr, tmpstp = np.linspace(tg[itg],
tg[itg+1],
2*nwt,
endpoint=False, retstep=True)
wtGrid.extend(np.linspace(tg[itg],
tg[itg+1],
2*nwt,
endpoint=False))
wtGrid.append(tg[-1])
# print("Custom magnetic field: Bx={0}. By={1}, Bz={2}".format(
# self.B0x, self.B0y, self.B0z))
Bx, By, Bz = self.magnetic_field(wtGrid)
if self.filamentBeam:
emcg = self.L0 * SIE0 / SIM0 / C / 10. / gamma[0] / PI2
scalarArgsTraj = [np.int32(len(tg)), # jend
np.int32(nwt),
self.cl_precisionF(emcg),
self.cl_precisionF(gamma[0])]
nonSlicedROArgs = [tg, # Gauss-Legendre grid
self.cl_precisionF(Bx), # Mangetic field
self.cl_precisionF(By), # components on the
self.cl_precisionF(Bz)] # Runge-Kutta grid
nonSlicedRWArgs = [np.zeros_like(tg), # beta.x
np.zeros_like(tg), # beta.y
np.zeros_like(tg), # beta.z average
np.zeros_like(tg), # traj.x
np.zeros_like(tg), # traj.y
np.zeros_like(tg)] # traj.z
clKernel = 'get_trajectory'
betax, betay, betazav, trajx, trajy, trajz = self.ucl.run_parallel(
clKernel, scalarArgsTraj, None, nonSlicedROArgs,
None, nonSlicedRWArgs, 1)
self.beta = [betax, betay]
self.trajectory = [trajx[1:-1] * self.L0 / PI2,
trajy[1:-1] * self.L0 / PI2,
trajz[1:-1] * self.L0 / PI2]
wuAv = PI2 * C * 10. * betazav[-1] / self.L0 / E2W
scalarArgsTest = [np.int32(len(tg)),
np.int32(nwt),
self.cl_precisionF(emcg),
self.cl_precisionF(gamma[0]**2),
self.cl_precisionF(wuAv),
self.cl_precisionF(self.L0),
self.cl_precisionF(R0)]
slicedROArgs = [self.cl_precisionF(w), # Energy
self.cl_precisionF(ddtheta), # Theta
self.cl_precisionF(ddpsi)] # Psi
nonSlicedROArgs = [tg, # Gauss-Legendre grid
ag, # Gauss-Legendre weights
self.cl_precisionF(Bx), # Mangetic field
self.cl_precisionF(By), # components on the
self.cl_precisionF(Bz), # Runge-Kutta grid
self.cl_precisionF(betax), # Components of the
self.cl_precisionF(betay), # velosity and
self.cl_precisionF(trajx), # trajectory of the
self.cl_precisionF(trajy), # electron on the
self.cl_precisionF(trajz)] # Gauss grid
slicedRWArgs = [np.zeros(NRAYS, dtype=self.cl_precisionC), # Is
np.zeros(NRAYS, dtype=self.cl_precisionC)] # Ip
clKernel = 'undulator_custom_filament'
Is_local, Ip_local = self.ucl.run_parallel(
clKernel, scalarArgsTest, slicedROArgs, nonSlicedROArgs,
slicedRWArgs, None, NRAYS)
else:
scalarArgs.extend([np.int32(len(tg)), # jend
np.int32(nwt),
self.cl_precisionF(self.L0)])
slicedROArgs = [self.cl_precisionF(gamma), # gamma
self.cl_precisionF(w), # Energy
self.cl_precisionF(ddtheta), # Theta
self.cl_precisionF(ddpsi)] # Psi
nonSlicedROArgs = [tg, # Gauss-Legendre grid
ag, # Gauss-Legendre weights
self.cl_precisionF(Bx), # Mangetic field
self.cl_precisionF(By), # components on the
self.cl_precisionF(Bz)] # Runge-Kutta grid
slicedRWArgs = [np.zeros(NRAYS, dtype=self.cl_precisionC), # Is
np.zeros(NRAYS, dtype=self.cl_precisionC)] # Ip
clKernel = 'undulator_custom'
Is_local, Ip_local = self.ucl.run_parallel(
clKernel, scalarArgs, slicedROArgs, nonSlicedROArgs,
slicedRWArgs, None, NRAYS)
bwFact = 0.001 if self.distE == 'BW' else 1./w
Amp2Flux = FINE_STR * bwFact * self.I0 / SIE0
if harmonic is not None:
Is_local[ww1 > harmonic+0.5] = 0
Ip_local[ww1 > harmonic+0.5] = 0
Is_local[ww1 < harmonic-0.5] = 0
Ip_local[ww1 < harmonic-0.5] = 0
return (Amp2Flux * ab**2 * 0.25 * dstep**2 *
(np.abs(Is_local)**2 + np.abs(Ip_local)**2),
np.sqrt(Amp2Flux) * Is_local * ab * 0.5 * dstep,
np.sqrt(Amp2Flux) * Ip_local * ab * 0.5 * dstep)
def _build_I_map_CL(self, w, ddtheta, ddpsi, harmonic, dgamma=None):
# time1 = time.time()
NRAYS = 1 if len(np.array(w).shape) == 0 else len(w)
gamma = self.gamma
if self.eEspread > 0:
if dgamma is not None:
gamma += dgamma
else:
sz = 1 if self.filamentBeam else NRAYS
gamma += gamma * self.eEspread * np.random.normal(size=sz)
gamma = gamma * np.ones(NRAYS, dtype=self.cl_precisionF)
gamma2 = gamma**2
wu = PI * C * 10 / self.L0 / gamma2 *\
(2*gamma2 - 1 - 0.5*self.Kx**2 - 0.5*self.Ky**2) / E2W
ww1 = w * ((1. + 0.5 * self.Kx**2 + 0.5 * self.Ky**2) +
gamma2 * (ddtheta * ddtheta + ddpsi * ddpsi)) /\
(2. * gamma2 * wu)
scalarArgs = [self.cl_precisionF(0.)]
if self.R0 is not None:
scalarArgs = [self.cl_precisionF(self.R0), # R0
self.cl_precisionF(self.L0)]
elif self.taper:
scalarArgs = [self.cl_precisionF(self.taper)]
Np = np.int32(self.Np)
tg_n, ag_n = np.polynomial.legendre.leggauss(self.quadm)
self.tg_n, self.ag_n = tg_n, ag_n
dstep = 2 * PI / float(self.gIntervals)
if (self.taper is not None) or (self.R0 is not None) or self.full:
ab = 1. / PI2 / wu
dI = np.arange(0.5 * dstep - PI * Np, PI * Np, dstep)
else:
ab = 1. / PI2 / wu * np.sin(PI * Np * ww1) / np.sin(PI * ww1)
dI = np.arange(-PI + 0.5*dstep, PI, dstep)
extra = PI/2*0
tg = self.cl_precisionF((dI[:, None]+0.5*dstep*tg_n).ravel()) + extra
ag = self.cl_precisionF((dI[:, None]*0+ag_n).ravel())
scalarArgs.extend([self.cl_precisionF(self.Kx), # Kx
self.cl_precisionF(self.Ky), # Ky
self.cl_precisionF(self.phase), # phase
np.int32(len(tg))]) # jend
slicedROArgs = [self.cl_precisionF(gamma), # gamma
self.cl_precisionF(wu), # Eund
self.cl_precisionF(w), # Energy
self.cl_precisionF(ww1), # Energy/Eund(0)
self.cl_precisionF(ddtheta), # Theta
self.cl_precisionF(ddpsi)] # Psi
if self.full:
if isinstance(self.theta0, np.ndarray):
slicedROArgs.extend([self.cl_precisionF(self.theta0),
self.cl_precisionF(self.psi0)])
else:
slicedROArgs.extend([self.cl_precisionF(
self.theta0*np.ones_like(w)),
self.cl_precisionF(
self.psi0*np.ones_like(w))])
nonSlicedROArgs = [tg, # Gauss-Legendre grid
ag] # Gauss-Legendre weights
slicedRWArgs = [np.zeros(NRAYS, dtype=self.cl_precisionC), # Is
np.zeros(NRAYS, dtype=self.cl_precisionC)] # Ip
if self.taper is not None:
clKernel = 'undulator_taper'
elif self.R0 is not None:
clKernel = 'undulator_nf'
if self.full:
clKernel = 'undulator_nf_full'
elif self.full:
clKernel = 'undulator_full'
else:
clKernel = 'undulator'
Is_local, Ip_local = self.ucl.run_parallel(
clKernel, scalarArgs, slicedROArgs, nonSlicedROArgs,
slicedRWArgs, dimension=NRAYS)
bwFact = 0.001 if self.distE == 'BW' else 1./w
Amp2Flux = FINE_STR * bwFact * self.I0 / SIE0
if harmonic is not None:
Is_local[ww1 > harmonic+0.5] = 0
Ip_local[ww1 > harmonic+0.5] = 0
Is_local[ww1 < harmonic-0.5] = 0
Ip_local[ww1 < harmonic-0.5] = 0
# print("Build_I_Map completed in {0} s".format(time.time() - time1))
return (Amp2Flux * ab**2 * 0.25 * dstep**2 *
(np.abs(Is_local)**2 + np.abs(Ip_local)**2),
np.sqrt(Amp2Flux) * Is_local * ab * 0.5 * dstep,
np.sqrt(Amp2Flux) * Ip_local * ab * 0.5 * dstep)
# def _reportNaN(self, x, strName):
# nanSum = np.isnan(x).sum()
# if nanSum > 0:
# print("{0} NaN rays in {1}!".format(nanSum, strName))
def real_photon_source_sizes(
self, energy='auto', theta='auto', psi='auto', method='rms'):
"""Returns energy dependent arrays: flux, (dx')², (dz')², dx², dz².
Depending on *distE* being 'eV' or 'BW', the flux is either in ph/s or
in ph/s/0.1%BW, being integrated over the specified theta and psi
ranges. The squared angular and linear photon source sizes are
variances, i.e. squared sigmas. The latter two (linear sizes) are in
mm**2.
"""
if isinstance(energy, str): # i.e. if 'auto'
energy = np.mgrid[self.E_min:self.E_max + 0.5*self.dE:self.dE]
if isinstance(theta, str):
theta = np.mgrid[
self.Theta_min:self.Theta_max + 0.5*self.dTheta:self.dTheta]
if isinstance(psi, str):
psi = np.mgrid[self.Psi_min:self.Psi_max + 0.5*self.dPsi:self.dPsi]
tomesh = [energy, theta, psi]
sh = [len(energy), len(theta), len(psi)]
if self.eEspread > 0:
spr = np.linspace(-3, 3, 13)
dgamma = self.gamma * spr * self.eEspread
wspr = np.exp(-0.5 * spr**2)
wspr /= wspr.sum()
tomesh.append(dgamma)
sh.append(len(dgamma))
mesh = np.meshgrid(*tomesh, indexing='ij')
xE, xTheta, xPsi = mesh[0].ravel(), mesh[1].ravel(), mesh[2].ravel()
xG = mesh[3].ravel() if self.eEspread > 0 else None
res = self.build_I_map(xE, xTheta, xPsi, dg=xG)
Es = res[1].reshape(sh)
Ep = res[2].reshape(sh)
if self.eEspread > 0:
ws = wspr[np.newaxis, np.newaxis, np.newaxis, :]
Is = ((Es*np.conj(Es)).real * ws).sum(axis=3)
Ip = ((Ep*np.conj(Ep)).real * ws).sum(axis=3)
else:
Is = (Es*np.conj(Es)).real
Ip = (Ep*np.conj(Ep)).real
dtheta, dpsi = theta[1] - theta[0], psi[1] - psi[0]
I0 = (Is.astype(float) + Ip.astype(float))
flux = I0.sum(axis=(1, 2)) * dtheta * dpsi
theta2, psi2 = self._get_2D_sizes(
I0, flux, theta, psi, dtheta, dpsi, method)
EsFT = np.fft.fftshift(np.fft.fft2(Es), axes=(1, 2)) * dtheta * dpsi
EpFT = np.fft.fftshift(np.fft.fft2(Ep), axes=(1, 2)) * dtheta * dpsi
thetaFT = np.fft.fftshift(np.fft.fftfreq(len(theta), d=dtheta))
psiFT = np.fft.fftshift(np.fft.fftfreq(len(psi), d=dpsi))
dthetaFT, dpsiFT = thetaFT[1] - thetaFT[0], psiFT[1] - psiFT[0]
if self.eEspread > 0:
ws = wspr[np.newaxis, np.newaxis, np.newaxis, :]
IsFT = ((EsFT*np.conj(EsFT)).real * ws).sum(axis=3)
IpFT = ((EpFT*np.conj(EpFT)).real * ws).sum(axis=3)
else:
IsFT = (EsFT*np.conj(EsFT)).real
IpFT = (EpFT*np.conj(EpFT)).real
I0FT = (IsFT.astype(float) + IpFT.astype(float))
fluxFT = I0FT.sum(axis=(1, 2)) * dthetaFT * dpsiFT
# flux equals fluxFT, check it:
# print(flux)
# print(fluxFT)
k = energy / CH * 1e7 # in 1/mm
dx2, dz2 = self._get_2D_sizes(
I0FT, fluxFT, thetaFT, psiFT, dthetaFT, dpsiFT, method, k)
return flux, theta2, psi2, dx2, dz2
def _get_2D_sizes(
self, I0, flux, theta, psi, dtheta, dpsi, method, k=None):
if method == 'rms':
theta2 = (I0 * (theta[np.newaxis, :, np.newaxis])**2).sum(
axis=(1, 2)) * dtheta * dpsi / flux
psi2 = (I0 * (psi[np.newaxis, np.newaxis, :])**2).sum(
axis=(1, 2)) * dtheta * dpsi / flux
elif isinstance(method, float): # 0 < method < 1
theta2 = self._get_1D_size(I0, flux, theta, dtheta, 1, method)
psi2 = self._get_1D_size(I0, flux, psi, dpsi, 2, method)
else:
raise ValueError('unknown method!')
if k is not None:
theta2 *= k**(-2)
psi2 *= k**(-2)
return theta2, psi2
def _get_1D_size(self, I0, flux, ang, dang, axis, method):
ang2 = np.zeros(I0.shape[0])
if axis == 1:
angCutI0 = I0[:, I0.shape[1]//2:, I0.shape[2]//2].squeeze()
elif axis == 2:
angCutI0 = I0[:, I0.shape[1]//2, I0.shape[2]//2:].squeeze()
angCumFlux = (angCutI0*ang[np.newaxis, len(ang)//2:]).cumsum(axis=1)\
* 2*np.pi * dang
for ie, ee in enumerate(flux):
try:
argBorder = np.argwhere(angCumFlux[ie, :] > ee*method)[0][0]
except IndexError:
ang2[ie] = 0
continue
r2a = ang[len(ang)//2+argBorder-1]**2
va = angCumFlux[ie, argBorder-1]
r2b = ang[len(ang)//2+argBorder]**2
vb = angCumFlux[ie, argBorder]
r2m = (ee*method - va) * (r2b-r2a) / (vb-va) + r2a
ang2[ie] = r2m
return ang2
def tanaka_kitamura_Qa2(self, x, eps=1e-6):
"""Squared Q_a function from Tanaka and Kitamura J. Synchrotron Rad. 16
(2009) 380–386, Eq(17). The argument is normalized energy spread by
Eq(13)."""
ret = np.ones_like(x, dtype=float)
xarr = np.array(x)
# ret[x <= eps] = 1 # ret already holds ones
y = SQ2 * xarr[xarr > eps]
y2 = y**2
ret[x > eps] = y2 / (np.exp(-y2) + SQPI*y*special.erf(y) - 1)
return ret
def get_sigma_r02(self, E): # linear size
"""Squared sigma_{r0} as by Walker and by Ellaume and
Tanaka and Kitamura J. Synchrotron Rad. 16 (2009) 380–386 (see the
text after Eq(23))"""
return 2 * CHeVcm/E*10 * self.L0*self.Np / PI2**2
def get_sigmaP_r02(self, E): # angular size
"""Squared sigmaP_{r0}"""
return CHeVcm/E*10 / (2 * self.L0*self.Np)
def get_sigma_r2(self, E, onlyOddHarmonics=True, with0eSpread=False):
"""Squared sigma_{r} as by
Tanaka and Kitamura J. Synchrotron Rad. 16 (2009) 380–386
that also depends on energy spread."""
sigma_r02 = self.get_sigma_r02(E)
if self.eEspread == 0 or with0eSpread:
return sigma_r02
harmonic = np.floor_divide(E, self.E1)
# harmonic[harmonic < 1] = 1
if onlyOddHarmonics:
harmonic += harmonic % 2 - 1
eEspread_norm = PI2 * harmonic * self.Np * self.eEspread
Qa2 = self.tanaka_kitamura_Qa2(eEspread_norm/4.) # note 1/4
return sigma_r02 * Qa2**(2/3.)
def get_sigmaP_r2(self, E, onlyOddHarmonics=True, with0eSpread=False):
"""Squared sigmaP_{r} as by
Tanaka and Kitamura J. Synchrotron Rad. 16 (2009) 380–386
that also depends on energy spread."""
sigmaP_r02 = self.get_sigmaP_r02(E)
if self.eEspread == 0 or with0eSpread:
return sigmaP_r02
harmonic = np.floor_divide(E, self.E1)
# harmonic[harmonic < 1] = 1
if onlyOddHarmonics:
harmonic += harmonic % 2 - 1
eEspread_norm = PI2 * harmonic * self.Np * self.eEspread
Qa2 = self.tanaka_kitamura_Qa2(eEspread_norm)
return sigmaP_r02 * Qa2
def get_SIGMA(self, E, onlyOddHarmonics=True, with0eSpread=False):
"""Calculates total linear source size, also including the effect of
electron beam energy spread. Uses Tanaka and Kitamura, J. Synchrotron
Rad. 16 (2009) 380–6.
*E* can be a value or an array. Returns a 2-tuple with x and y sizes.
"""
sigma_r2 = self.get_sigma_r2(E, onlyOddHarmonics, with0eSpread)
return ((self.dx**2 + sigma_r2)**0.5,
(self.dz**2 + sigma_r2)**0.5)
def get_SIGMAP(self, E, onlyOddHarmonics=True, with0eSpread=False):
"""Calculates total angular source size, also including the effect of
electron beam energy spread. Uses Tanaka and Kitamura, J. Synchrotron
Rad. 16 (2009) 380–6.
*E* can be a value or an array. Returns a 2-tuple with x and y sizes.
"""
sigmaP_r2 = self.get_sigmaP_r2(E, onlyOddHarmonics, with0eSpread)
return ((self.dxprime**2 + sigmaP_r2)**0.5,
(self.dzprime**2 + sigmaP_r2)**0.5)
def shine(self, toGlobal=True, withAmplitudes=True, fixedEnergy=False,
wave=None, accuBeam=None):
u"""
Returns the source beam. If *toGlobal* is True, the output is in
the global system. If *withAmplitudes* is True, the resulted beam
contains arrays Es and Ep with the *s* and *p* components of the
electric field.
*fixedEnergy* is either None or a value in eV. If *fixedEnergy* is
specified, the energy band is not 0.1%BW relative to *fixedEnergy*, as
probably expected but is given by (eMax - eMin) of the constructor.
*wave* and *accuBeam* are used in wave diffraction. *wave* is a Beam
object and determines the positions of the wave samples. It must be
obtained by a previous `prepare_wave` run. *accuBeam* is only needed
with *several* repeats of diffraction integrals when the parameters of
the filament beam must be preserved for all the repeats.
.. Returned values: beamGlobal
"""
if self.bl is not None:
try:
self.bl._alignE = float(self.bl.alignE)
except ValueError:
self.bl._alignE = 0.5 * (self.eMin + self.eMax)
if wave is not None:
if not hasattr(wave, 'rDiffr'):
raise ValueError("If you want to use a `wave`, run a" +
" `prepare_wave` before shine!")
self.uniformRayDensity = True
mcRays = len(wave.a)
else:
mcRays = self.nrays
if self.uniformRayDensity:
withAmplitudes = True
if not self.uniformRayDensity:
if raycing._VERBOSITY_ > 0:
print("Rays generation")
bo = None
length = 0
seeded = np.long(0)
seededI = 0.
np.seterr(invalid='warn')
np.seterr(divide='warn')
if self.filamentBeam:
if accuBeam is None:
rsE = np.random.random_sample() * \
float(self.E_max - self.E_min) + self.E_min
rX = self.dx * np.random.standard_normal()
rZ = self.dz * np.random.standard_normal()
dtheta = self.dxprime * np.random.standard_normal()
dpsi = self.dzprime * np.random.standard_normal()
else:
rsE = accuBeam.E[0]
rX = accuBeam.filamentDX
rZ = accuBeam.filamentDZ
dtheta = accuBeam.filamentDtheta
dpsi = accuBeam.filamentDpsi
seeded = accuBeam.seeded
seededI = accuBeam.seededI
if self.full:
if self.filamentBeam:
self.theta0 = dtheta
self.psi0 = dpsi
else:
self.theta0 = np.random.normal(0, self.dxprime, mcRays)
self.psi0 = np.random.normal(0, self.dzprime, mcRays)
if fixedEnergy:
rsE = fixedEnergy
if (self.E_max-self.E_min) > fixedEnergy*1.1e-3:
print("Warning: the bandwidth seems too big. "
"Specify it by giving eMin and eMax in the constructor.")
nrep = 0
rep_condition = True
# while length < self.nrays:
while rep_condition:
seeded += mcRays
# start_time = time.time()
if self.filamentBeam or fixedEnergy:
rE = rsE * np.ones(mcRays)
else:
rndg = np.random.rand(mcRays)
rE = rndg * float(self.E_max - self.E_min) + self.E_min
if wave is not None:
self.xzE = (self.E_max - self.E_min)
if self.filamentBeam:
shiftX = rX
shiftZ = rZ
else:
shiftX = np.random.normal(
0, self.dx, mcRays) if self.dx > 0 else 0
shiftZ = np.random.normal(
0, self.dz, mcRays) if self.dz > 0 else 0
x = wave.xDiffr + shiftX
y = wave.yDiffr
z = wave.zDiffr + shiftZ
rDiffr = (x**2 + y**2 + z**2)**0.5
rTheta = x / rDiffr
rPsi = z / rDiffr
if self.filamentBeam:
rTheta += dtheta
rPsi += dpsi
else:
if self.dxprime > 0:
rTheta += np.random.normal(0, self.dxprime, mcRays)
if self.dzprime > 0:
rPsi += np.random.normal(0, self.dzprime, mcRays)
else:
rndg = np.random.rand(mcRays)
rTheta = rndg * (self.Theta_max - self.Theta_min) +\
self.Theta_min
rndg = np.random.rand(mcRays)
rPsi = rndg * (self.Psi_max - self.Psi_min) + self.Psi_min
Intensity, mJs, mJp = self.build_I_map(rE, rTheta, rPsi)
if self.uniformRayDensity:
seededI += mcRays * self.xzE
else:
seededI += Intensity.sum() * self.xzE
tmp_max = np.max(Intensity)
if tmp_max > self.Imax:
self.Imax = tmp_max
self.fluxConst = self.Imax * self.xzE
if raycing._VERBOSITY_ > 10:
imax = np.argmax(Intensity)
print(self.Imax, imax, rE[imax], rTheta[imax], rPsi[imax])
if self.uniformRayDensity:
I_pass = slice(None)
npassed = mcRays
else:
rndg = np.random.rand(mcRays)
I_pass = np.where(self.Imax * rndg < Intensity)[0]
npassed = len(I_pass)
if npassed == 0:
if raycing._VERBOSITY_ > 0:
print('No good rays in this seed!', length, 'of',
self.nrays, 'rays in total so far...')
print(self.Imax, self.E_min, self.E_max,
self.Theta_min, self.Theta_max,
self.Psi_min, self.Psi_max)
continue
if wave is not None:
bot = wave
else:
bot = Beam(npassed, withAmplitudes=withAmplitudes)
bot.state[:] = 1 # good
bot.E[:] = rE[I_pass]
if self.filamentBeam:
dxR = rX
dzR = rZ
# sigma_r2 = self.get_sigma_r2(bot.E)
# dxR += np.random.normal(0, sigma_r2**0.5, npassed)
# dzR += np.random.normal(0, sigma_r2**0.5, npassed)
else:
if self.full:
bot.sourceSIGMAx = self.dx
bot.sourceSIGMAz = self.dz
dxR = np.random.normal(0, bot.sourceSIGMAx, npassed)
dzR = np.random.normal(0, bot.sourceSIGMAz, npassed)
else:
bot.sourceSIGMAx, bot.sourceSIGMAz = self.get_SIGMA(
bot.E, onlyOddHarmonics=False)
dxR = np.random.normal(0, bot.sourceSIGMAx, npassed)
dzR = np.random.normal(0, bot.sourceSIGMAz, npassed)
if wave is not None:
wave.rDiffr = ((wave.xDiffr - dxR)**2 + wave.yDiffr**2 +
(wave.zDiffr - dzR)**2)**0.5
wave.path[:] = 0
wave.a[:] = (wave.xDiffr - dxR) / wave.rDiffr
wave.b[:] = wave.yDiffr / wave.rDiffr
wave.c[:] = (wave.zDiffr - dzR) / wave.rDiffr
else:
bot.x[:] = dxR
bot.z[:] = dzR
bot.a[:] = rTheta[I_pass]
bot.c[:] = rPsi[I_pass]
if not self.full:
if self.filamentBeam:
bot.a[:] += dtheta
bot.c[:] += dpsi
else:
if self.dxprime > 0:
bot.a[:] += np.random.normal(
0, self.dxprime, npassed)
if self.dzprime > 0:
bot.c[:] += np.random.normal(
0, self.dzprime, npassed)
mJs = mJs[I_pass]
mJp = mJp[I_pass]
if wave is not None:
area = wave.areaNormal if hasattr(wave, 'areaNormal') else\
wave.area
norm = area**0.5 / wave.rDiffr
mJs *= norm
mJp *= norm
mJs2 = (mJs * np.conj(mJs)).real
mJp2 = (mJp * np.conj(mJp)).real
if self.uniformRayDensity:
sSP = 1.
else:
sSP = mJs2 + mJp2
bot.Jsp[:] = np.where(sSP, mJs * np.conj(mJp) / sSP, 0)
bot.Jss[:] = np.where(sSP, mJs2 / sSP, 0)
bot.Jpp[:] = np.where(sSP, mJp2 / sSP, 0)
if withAmplitudes:
if self.uniformRayDensity:
bot.Es[:] = mJs
bot.Ep[:] = mJp
else:
bot.Es[:] = mJs / mJs2**0.5
bot.Ep[:] = mJp / mJp2**0.5
if bo is None:
bo = bot
else:
bo.concatenate(bot)
length = len(bo.a)
if not self.uniformRayDensity:
if raycing._VERBOSITY_ > 0:
print("{0} rays of {1}".format(length, self.nrays))
try:
if self.bl is not None:
if self.bl.flowSource == 'Qook' and\
self.bl.statusSignal is not None:
ptg = (self.bl.statusSignal[1] +
float(length) / float(self.nrays)) /\
self.bl.statusSignal[2]
self.bl.statusSignal[0].emit(
(ptg, self.bl.statusSignal[3]))
except:
pass
if self.filamentBeam:
nrep += 1
rep_condition = nrep < self.nrepmax
else:
rep_condition = length < self.nrays
if self.uniformRayDensity:
rep_condition = False
bo.accepted = length * self.fluxConst
bo.acceptedE = bo.E.sum() * self.fluxConst * SIE0
bo.seeded = seeded
bo.seededI = seededI
if raycing._VERBOSITY_ > 0:
sys.stdout.flush()
if length > self.nrays and not self.filamentBeam and wave is None:
bo.filter_by_index(slice(0, self.nrays))
if self.filamentBeam:
bo.filamentDtheta = dtheta
bo.filamentDpsi = dpsi
bo.filamentDX = rX
bo.filamentDZ = rZ
norm = (bo.a**2 + bo.b**2 + bo.c**2)**0.5
bo.a /= norm
bo.b /= norm
bo.c /= norm
# if raycing._VERBOSITY_ > 10:
# self._reportNaN(bo.Jss, 'Jss')
# self._reportNaN(bo.Jpp, 'Jpp')
# self._reportNaN(bo.Jsp, 'Jsp')
# self._reportNaN(bo.E, 'E')
# self._reportNaN(bo.x, 'x')
# self._reportNaN(bo.y, 'y')
# self._reportNaN(bo.z, 'z')
# self._reportNaN(bo.a, 'a')
# self._reportNaN(bo.b, 'b')
# self._reportNaN(bo.c, 'c')
if self.pitch or self.yaw:
raycing.rotate_beam(bo, pitch=self.pitch, yaw=self.yaw)
bor = Beam(copyFrom=bo)
if wave is not None:
bor.x[:] = dxR
bor.y[:] = 0
bor.z[:] = dzR
bor.path[:] = 0
mPh = np.exp(1e7j * wave.E/CHBAR * wave.rDiffr)
wave.Es *= mPh
wave.Ep *= mPh
if toGlobal: # in global coordinate system:
raycing.virgin_local_to_global(self.bl, bor, self.center)
bor.parentId = self.name
raycing.append_to_flow(self.shine, [bor],
inspect.currentframe())
return bor
| [
"# -*- coding: utf-8 -*-\n",
"__author__ = \"Konstantin Klementiev\", \"Roman Chernikov\"\n",
"__date__ = \"20 Sep 2016\"\n",
"import os\n",
"import sys\n",
"#import pickle\n",
"import numpy as np\n",
"from scipy import optimize\n",
"from scipy import special\n",
"import inspect\n",
"\n",
"from .. import raycing\n",
"from . import myopencl as mcl\n",
"from .sources_beams import Beam, allArguments\n",
"from .physconsts import E0, C, M0, EV2ERG, K2B, SIE0,\\\n",
" SIM0, FINE_STR, PI, PI2, SQ2, SQ3, SQPI, E2W, CHeVcm, CH, CHBAR\n",
"\n",
"try:\n",
" import pyopencl as cl # analysis:ignore\n",
" isOpenCL = True\n",
" os.environ['PYOPENCL_COMPILER_OUTPUT'] = '1'\n",
"except ImportError:\n",
" isOpenCL = False\n",
"\n",
"# _DEBUG replaced with raycing._VERBOSITY_\n",
"\n",
"\n",
"class BendingMagnet(object):\n",
" u\"\"\"\n",
" Bending magnet source. The computation is reasonably fast and thus a GPU\n",
" is not required and is not implemented.\n",
" \"\"\"\n",
" def __init__(self, bl=None, name='BM', center=(0, 0, 0),\n",
" nrays=raycing.nrays,\n",
" eE=3.0, eI=0.5, eEspread=0., eSigmaX=None, eSigmaZ=None,\n",
" eEpsilonX=1., eEpsilonZ=0.01, betaX=9., betaZ=2.,\n",
" B0=1., rho=None, filamentBeam=False, uniformRayDensity=False,\n",
" eMin=5000., eMax=15000., eN=51, distE='eV',\n",
" xPrimeMax=0.5, zPrimeMax=0.5, nx=25, nz=25, pitch=0, yaw=0):\n",
" u\"\"\"\n",
" *bl*: instance of :class:`~xrt.backends.raycing.BeamLine`\n",
" Container for beamline elements. Sourcess are added to its\n",
" `sources` list.\n",
"\n",
" *name*: str\n",
" User-specified name, can be used for diagnostics output.\n",
"\n",
" *center*: tuple of 3 floats\n",
" 3D point in global system.\n",
"\n",
" *nrays*: int\n",
" The number of rays sampled in one iteration.\n",
"\n",
" *eE*: float\n",
" Electron beam energy (GeV).\n",
"\n",
" *eI*: float\n",
" Electron beam current (A).\n",
"\n",
" *eEspread*: float\n",
" Energy spread relative to the beam energy.\n",
"\n",
" *eSigmaX*, *eSigmaZ*: float\n",
" rms horizontal and vertical electron beam sizes (µm).\n",
" Alternatively, betatron functions can be specified instead of the\n",
" electron beam sizes.\n",
"\n",
" *eEpsilonX*, *eEpsilonZ*: float\n",
" Horizontal and vertical electron beam emittance (nm rad).\n",
"\n",
" *betaX*, *betaZ*: float\n",
" Betatron function (m). Alternatively, beam size can be specified.\n",
"\n",
" *B0*: float\n",
" Magnetic field (T). Alternatively, specify *rho*.\n",
"\n",
" *rho*: float\n",
" Curvature radius (m). Alternatively, specify *B0*.\n",
"\n",
" *eMin*, *eMax*: float\n",
" Minimum and maximum photon energy (eV).\n",
"\n",
" *eN*: int\n",
" Number of photon energy intervals, used only in the test suit,\n",
" not required in ray tracing\n",
"\n",
" *distE*: 'eV' or 'BW'\n",
" The resulted flux density is per 1 eV or 0.1% bandwidth. For ray\n",
" tracing 'eV' is used.\n",
"\n",
" *xPrimeMax*, *zPrimeMax*: float\n",
" Horizontal and vertical acceptance (mrad).\n",
"\n",
" *nx*, *nz*: int\n",
" Number of intervals in the horizontal and vertical directions,\n",
" used only in the test suit, not required in ray tracing.\n",
"\n",
" *filamentBeam*: bool\n",
" If True the source generates coherent monochromatic wavefronts.\n",
" Required for the wave propagation calculations.\n",
"\n",
" *pitch*, *yaw*: float\n",
" rotation angles around x and z axis. Useful for canted sources and\n",
" declined electron beams.\n",
"\n",
"\n",
" \"\"\"\n",
" self.Ee = eE\n",
" self.gamma = self.Ee * 1e9 * EV2ERG / (M0 * C**2)\n",
" if isinstance(self, Wiggler):\n",
" self.B = K2B * self.K / self.L0\n",
" self.ro = M0 * C**2 * self.gamma / self.B / E0 / 1e6\n",
" self.X0 = 0.5 * self.K * self.L0 / self.gamma / PI\n",
" self.isMPW = True\n",
" else:\n",
" self.Np = 0.5\n",
" self.B = B0\n",
" self.ro = rho\n",
" if self.ro:\n",
" if not self.B:\n",
" self.B = M0 * C**2 * self.gamma / self.ro / E0 / 1e6\n",
" elif self.B:\n",
" self.ro = M0 * C**2 * self.gamma / self.B / E0 / 1e6\n",
" self.isMPW = False\n",
"\n",
" self.bl = bl\n",
" if bl is not None:\n",
" if self not in bl.sources:\n",
" bl.sources.append(self)\n",
" self.ordinalNum = len(bl.sources)\n",
" raycing.set_name(self, name)\n",
"# if name in [None, 'None', '']:\n",
"# self.name = '{0}{1}'.format(self.__class__.__name__,\n",
"# self.ordinalNum)\n",
"# else:\n",
"# self.name = name\n",
"\n",
" self.center = center # 3D point in global system\n",
" self.nrays = np.long(nrays)\n",
" self.dx = eSigmaX * 1e-3 if eSigmaX else None\n",
" self.dz = eSigmaZ * 1e-3 if eSigmaZ else None\n",
" self.eEpsilonX = eEpsilonX\n",
" self.eEpsilonZ = eEpsilonZ\n",
" self.I0 = eI\n",
" self.eEspread = eEspread\n",
" self.eMin = float(eMin)\n",
" self.eMax = float(eMax)\n",
"\n",
" if bl is not None:\n",
" if self.bl.flowSource != 'Qook':\n",
" bl.oesDict[self.name] = [self, 0]\n",
"\n",
" xPrimeMax = raycing.auto_units_angle(xPrimeMax) * 1e3 if\\\n",
" isinstance(xPrimeMax, raycing.basestring) else xPrimeMax\n",
" zPrimeMax = raycing.auto_units_angle(zPrimeMax) * 1e3 if\\\n",
" isinstance(zPrimeMax, raycing.basestring) else zPrimeMax\n",
" self.xPrimeMax = xPrimeMax * 1e-3 if xPrimeMax else None\n",
" self.zPrimeMax = zPrimeMax * 1e-3 if zPrimeMax else None\n",
" self.betaX = betaX\n",
" self.betaZ = betaZ\n",
" self.eN = eN + 1\n",
" self.nx = 2*nx + 1\n",
" self.nz = 2*nz + 1\n",
" self.xs = np.linspace(-self.xPrimeMax, self.xPrimeMax, self.nx)\n",
" self.zs = np.linspace(-self.zPrimeMax, self.zPrimeMax, self.nz)\n",
" self.energies = np.linspace(eMin, eMax, self.eN)\n",
" self.distE = distE\n",
" self.mode = 1\n",
" self.uniformRayDensity = uniformRayDensity\n",
" self.filamentBeam = filamentBeam\n",
" self.pitch = raycing.auto_units_angle(pitch)\n",
" self.yaw = raycing.auto_units_angle(yaw)\n",
"\n",
" if (self.dx is None) and (self.betaX is not None):\n",
" self.dx = np.sqrt(self.eEpsilonX * self.betaX * 0.001)\n",
" elif (self.dx is None) and (self.betaX is None):\n",
" print(\"Set either eSigmaX or betaX!\")\n",
" if (self.dz is None) and (self.betaZ is not None):\n",
" self.dz = np.sqrt(self.eEpsilonZ * self.betaZ * 0.001)\n",
" elif (self.dz is None) and (self.betaZ is None):\n",
" print(\"Set either eSigmaZ or betaZ!\")\n",
"\n",
" dxprime, dzprime = None, None\n",
" if dxprime:\n",
" self.dxprime = dxprime\n",
" else:\n",
" self.dxprime = 1e-6 * self.eEpsilonX /\\\n",
" self.dx if self.dx > 0 else 0. # [rad]\n",
" if dzprime:\n",
" self.dzprime = dzprime\n",
" else:\n",
" self.dzprime = 1e-6 * self.eEpsilonZ /\\\n",
" self.dz if self.dx > 0 else 0. # [rad]\n",
"\n",
" self.gamma2 = self.gamma**2\n",
" \"\"\"\" K2B: Conversion of Deflection parameter to magnetic field [T]\n",
" for the period in [mm]\"\"\"\n",
" # self.c_E = 0.0075 * HPLANCK * C * self.gamma**3 / PI / EV2ERG\n",
" # self.c_3 = 40. * PI * E0 * EV2ERG * self.I0 /\\\n",
" # (np.sqrt(3) * HPLANCK * HPLANCK * C * self.gamma2) * \\\n",
" # 200. * EV2ERG / (np.sqrt(3) * HPLANCK * C * self.gamma2)\n",
"\n",
" mE = self.eN\n",
" mTheta = self.nx\n",
" mPsi = self.nz\n",
"\n",
" if self.isMPW: # xPrimeMaxAutoReduce\n",
" xPrimeMaxTmp = self.K / self.gamma\n",
" if self.xPrimeMax > xPrimeMaxTmp:\n",
" print(\"Reducing xPrimeMax from {0} down to {1} mrad\".format(\n",
" self.xPrimeMax * 1e3, xPrimeMaxTmp * 1e3))\n",
" self.xPrimeMax = xPrimeMaxTmp\n",
"\n",
" self.Theta_min = float(-self.xPrimeMax)\n",
" self.Psi_min = float(-self.zPrimeMax)\n",
" self.Theta_max = float(self.xPrimeMax)\n",
" self.Psi_max = float(self.zPrimeMax)\n",
" self.E_min = float(np.min(self.energies))\n",
" self.E_max = float(np.max(self.energies))\n",
"\n",
" self.dE = (self.E_max-self.E_min) / float(mE-1)\n",
" self.dTheta = (self.Theta_max-self.Theta_min) / float(mTheta-1)\n",
" self.dPsi = (self.Psi_max-self.Psi_min) / float(mPsi-1)\n",
"\n",
" \"\"\"Trying to find real maximum of the flux density\"\"\"\n",
" E0fit = 0.5 * (self.E_max+self.E_min)\n",
"\n",
" precalc = True\n",
" rMax = self.nrays\n",
" if precalc:\n",
" rE = np.random.uniform(self.E_min, self.E_max, rMax)\n",
" rTheta = np.random.uniform(0., self.Theta_max, rMax)\n",
" rPsi = np.random.uniform(0., self.Psi_max, rMax)\n",
" DistI = self.build_I_map(rE, rTheta, rPsi)[0]\n",
" f_max = np.amax(DistI)\n",
" a_max = np.argmax(DistI)\n",
" NZ = np.ceil(np.max(rPsi[np.where(DistI > 0)[0]]) / self.dPsi) *\\\n",
" self.dPsi\n",
" self.zPrimeMax = min(self.zPrimeMax, NZ)\n",
" self.Psi_max = float(self.zPrimeMax)\n",
" initial_x = [(rE[a_max]-E0fit) * 1e-5,\n",
" rTheta[a_max] * 1e3, rPsi[a_max] * 1e3]\n",
" else:\n",
" xE, xTheta, xPsi = np.mgrid[\n",
" self.E_min:self.E_max + 0.5*self.dE:self.dE,\n",
" self.Theta_min:self.Theta_max + 0.5*self.dTheta:self.dTheta,\n",
" self.Psi_min:self.Psi_max + 0.5*self.dPsi:self.dPsi]\n",
" DistI = self.build_I_map(xE, xTheta, xPsi)[0]\n",
" f_max = np.amax(DistI)\n",
" initial_x = [\n",
" (self.E_min + 0.6 * mE * self.dE - E0fit) * 1e-5,\n",
" (self.Theta_min + 0.6 * mTheta * self.dTheta) * 1e3,\n",
" (self.Psi_min + 0.6 * self.dPsi * mPsi) * 1e3]\n",
"\n",
" bounds_x = [\n",
" ((self.E_min - E0fit) * 1e-5, (self.E_max - E0fit) * 1e-5),\n",
" (0, self.Theta_max * 1e3),\n",
" (0, self.Psi_max * 1e3)]\n",
"\n",
" def int_fun(x):\n",
" return -1. * (self.build_I_map(x[0] * 1e5 + E0fit,\n",
" x[1] * 1e-3,\n",
" x[2] * 1e-3)[0]) / f_max\n",
" res = optimize.fmin_slsqp(int_fun, initial_x,\n",
" bounds=bounds_x,\n",
" acc=1e-12,\n",
" iter=1000,\n",
" epsilon=1.e-8,\n",
" full_output=1,\n",
" iprint=0)\n",
" self.Imax = max(-1 * int_fun(res[0]) * f_max, f_max)\n",
"\n",
" if self.filamentBeam:\n",
" self.nrepmax = np.floor(rMax / len(np.where(\n",
" self.Imax * np.random.rand(rMax) < DistI)[0]))\n",
"\n",
" \"\"\"Preparing to calculate the total flux integral\"\"\"\n",
" self.xzE = 4 * (self.E_max-self.E_min) * self.Theta_max * self.Psi_max\n",
" self.fluxConst = self.Imax * self.xzE\n",
"\n",
" def prefix_save_name(self):\n",
" return '3-BM-xrt'\n",
"\n",
" def build_I_map(self, dde, ddtheta, ddpsi):\n",
" np.seterr(invalid='ignore')\n",
" np.seterr(divide='ignore')\n",
" gamma = self.gamma\n",
" if self.eEspread > 0:\n",
" if np.array(dde).shape:\n",
" if dde.shape[0] > 1:\n",
" gamma += np.random.normal(0, gamma*self.eEspread,\n",
" dde.shape)\n",
" gamma2 = gamma**2\n",
" else:\n",
" gamma2 = self.gamma2\n",
"\n",
" w_cr = 1.5 * gamma2 * self.B * SIE0 / SIM0\n",
" if self.isMPW:\n",
" w_cr *= np.sin(np.arccos(ddtheta * gamma / self.K))\n",
" w_cr = np.where(np.isfinite(w_cr), w_cr, 0.)\n",
"\n",
" gammapsi = gamma * ddpsi\n",
" gamma2psi2p1 = gammapsi**2 + 1\n",
" eta = 0.5 * dde * E2W / w_cr * gamma2psi2p1**1.5\n",
"\n",
" ampSP = -0.5j * SQ3 / PI * gamma * dde * E2W / w_cr * gamma2psi2p1\n",
" ampS = ampSP * special.kv(2./3., eta)\n",
" ampP = 1j * gammapsi * ampSP * special.kv(1./3., eta) /\\\n",
" np.sqrt(gamma2psi2p1)\n",
"\n",
" ampS = np.where(np.isfinite(ampS), ampS, 0.)\n",
" ampP = np.where(np.isfinite(ampP), ampP, 0.)\n",
"\n",
" bwFact = 0.001 if self.distE == 'BW' else 1./dde\n",
" Amp2Flux = FINE_STR * bwFact * self.I0 / SIE0 * 2 * self.Np\n",
"\n",
" np.seterr(invalid='warn')\n",
" np.seterr(divide='warn')\n",
"\n",
" return (Amp2Flux * (np.abs(ampS)**2 + np.abs(ampP)**2),\n",
" np.sqrt(Amp2Flux) * ampS,\n",
" np.sqrt(Amp2Flux) * ampP)\n",
"\n",
" def intensities_on_mesh(self, energy='auto', theta='auto', psi='auto'):\n",
" if isinstance(energy, str):\n",
" energy = np.mgrid[self.E_min:self.E_max + 0.5*self.dE:self.dE]\n",
" if isinstance(theta, str):\n",
" theta = np.mgrid[\n",
" self.Theta_min:self.Theta_max + 0.5*self.dTheta:self.dTheta]\n",
" if isinstance(psi, str):\n",
" psi = np.mgrid[self.Psi_min:self.Psi_max + 0.5*self.dPsi:self.dPsi]\n",
" xE, xTheta, xPsi = np.meshgrid(energy, theta, psi, indexing='ij')\n",
" self.Itotal, ampS, ampP = self.build_I_map(xE, xTheta, xPsi)\n",
" self.Is = (ampS * np.conj(ampS)).real\n",
" self.Ip = (ampP * np.conj(ampP)).real\n",
" self.Isp = ampS * np.conj(ampP)\n",
" s0 = self.Is + self.Ip\n",
" with np.errstate(divide='ignore'):\n",
" Pol1 = np.where(s0, (self.Is - self.Ip) / s0, s0)\n",
" Pol3 = np.where(s0, 2. * self.Isp / s0, s0)\n",
" return (self.Itotal, Pol1, self.Is*0., Pol3)\n",
"\n",
" def shine(self, toGlobal=True, withAmplitudes=True, fixedEnergy=False,\n",
" accuBeam=None):\n",
" u\"\"\"\n",
" Returns the source beam. If *toGlobal* is True, the output is in\n",
" the global system. If *withAmplitudes* is True, the resulted beam\n",
" contains arrays Es and Ep with the *s* and *p* components of the\n",
" electric field.\n",
"\n",
"\n",
" .. Returned values: beamGlobal\n",
" \"\"\"\n",
" if self.bl is not None:\n",
" try:\n",
" self.bl._alignE = float(self.bl.alignE)\n",
" except ValueError:\n",
" self.bl._alignE = 0.5 * (self.eMin + self.eMax)\n",
"\n",
" if self.uniformRayDensity:\n",
" withAmplitudes = True\n",
"\n",
" bo = None\n",
" length = 0\n",
" seeded = np.long(0)\n",
" seededI = 0.\n",
" np.seterr(invalid='warn')\n",
" np.seterr(divide='warn')\n",
" mcRays = np.long(self.nrays * 1.2) if not self.uniformRayDensity else\\\n",
" self.nrays\n",
" if self.filamentBeam:\n",
" if accuBeam is None:\n",
" rE = np.random.random_sample() *\\\n",
" float(self.E_max - self.E_min) + self.E_min\n",
" if self.isMPW:\n",
" sigma_r2 = 2 * (CHeVcm/rE*10*self.L0*self.Np) / PI2**2\n",
" sourceSIGMAx = self.dx\n",
" sourceSIGMAz = self.dz\n",
" rTheta0 = np.random.random_sample() *\\\n",
" (self.Theta_max - self.Theta_min) + self.Theta_min\n",
" ryNp = 0.5 * self.L0 *\\\n",
" (np.arccos(rTheta0 * self.gamma / self.K) / PI) +\\\n",
" 0.5 * self.L0 *\\\n",
" np.random.random_integers(0, int(2*self.Np - 1))\n",
" rY = ryNp - 0.5*self.L0*self.Np\n",
" if (ryNp - 0.25*self.L0 <= 0):\n",
" rY += self.L0*self.Np\n",
" rX = self.X0 * np.sin(PI2 * rY / self.L0) +\\\n",
" sourceSIGMAx * np.random.standard_normal()\n",
" rY -= 0.25 * self.L0\n",
" rZ = sourceSIGMAz * np.random.standard_normal()\n",
" else:\n",
" rZ = self.dz * np.random.standard_normal()\n",
" rTheta0 = np.random.random_sample() *\\\n",
" (self.Theta_max - self.Theta_min) + self.Theta_min\n",
" R1 = self.dx * np.random.standard_normal() +\\\n",
" self.ro * 1000.\n",
" rX = -R1 * np.cos(rTheta0) + self.ro*1000.\n",
" rY = R1 * np.sin(rTheta0)\n",
" dtheta = self.dxprime * np.random.standard_normal()\n",
" dpsi = self.dzprime * np.random.standard_normal()\n",
" else:\n",
" rE = accuBeam.E[0]\n",
" rX = accuBeam.x[0]\n",
" rY = accuBeam.y[0]\n",
" rZ = accuBeam.z[0]\n",
" dtheta = accuBeam.filamentDtheta\n",
" dpsi = accuBeam.filamentDpsi\n",
" if fixedEnergy:\n",
" rE = fixedEnergy\n",
"\n",
" nrep = 0\n",
" rep_condition = True\n",
"# while length < self.nrays:\n",
" while rep_condition:\n",
" \"\"\"Preparing 4 columns of random numbers\n",
" 0: Energy\n",
" 1: Theta / horizontal\n",
" 2: Psi / vertical\n",
" 3: Monte-Carlo discriminator\"\"\"\n",
" rnd_r = np.random.rand(mcRays, 4)\n",
" seeded += mcRays\n",
" if self.filamentBeam:\n",
"# print(self.Theta_min, rTheta0 - 1. / self.gamma)\n",
" rThetaMin = np.max((self.Theta_min, rTheta0 - 1. / self.gamma))\n",
" rThetaMax = np.min((self.Theta_max, rTheta0 + 1. / self.gamma))\n",
" rTheta = (rnd_r[:, 1]) * (rThetaMax - rThetaMin) +\\\n",
" rThetaMin\n",
" rE *= np.ones(mcRays)\n",
" else:\n",
" rE = rnd_r[:, 0] * float(self.E_max - self.E_min) +\\\n",
" self.E_min\n",
" rTheta = (rnd_r[:, 1]) * (self.Theta_max - self.Theta_min) +\\\n",
" self.Theta_min\n",
" rPsi = rnd_r[:, 2] * (self.Psi_max - self.Psi_min) +\\\n",
" self.Psi_min\n",
" Intensity, mJss, mJpp = self.build_I_map(rE, rTheta, rPsi)\n",
"\n",
" if self.uniformRayDensity:\n",
" seededI += self.nrays * self.xzE\n",
" else:\n",
" seededI += Intensity.sum() * self.xzE\n",
"\n",
" if self.uniformRayDensity:\n",
" I_pass = slice(None)\n",
" npassed = mcRays\n",
" else:\n",
" I_pass =\\\n",
" np.where(self.Imax * rnd_r[:, 3] < Intensity)[0]\n",
" npassed = len(I_pass)\n",
" if npassed == 0:\n",
" print('No good rays in this seed!'\n",
" ' {0} of {1} rays in total so far...'.format(\n",
" length, self.nrays))\n",
" continue\n",
"\n",
" bot = Beam(npassed, withAmplitudes=withAmplitudes)\n",
" bot.state[:] = 1 # good\n",
"\n",
" bot.E[:] = rE[I_pass]\n",
"\n",
" Theta0 = rTheta[I_pass]\n",
" Psi0 = rPsi[I_pass]\n",
"\n",
" if not self.filamentBeam:\n",
" if self.dxprime > 0:\n",
" dtheta = np.random.normal(0, self.dxprime, npassed)\n",
" else:\n",
" dtheta = 0\n",
" if not self.isMPW:\n",
" dtheta += np.random.normal(0, 1/self.gamma, npassed)\n",
"\n",
" if self.dzprime > 0:\n",
" dpsi = np.random.normal(0, self.dzprime, npassed)\n",
" else:\n",
" dpsi = 0\n",
"\n",
" bot.a[:] = np.tan(Theta0 + dtheta)\n",
" bot.c[:] = np.tan(Psi0 + dpsi)\n",
"\n",
" intensS = (mJss[I_pass] * np.conj(mJss[I_pass])).real\n",
" intensP = (mJpp[I_pass] * np.conj(mJpp[I_pass])).real\n",
" if self.uniformRayDensity:\n",
" sSP = 1.\n",
" else:\n",
" sSP = intensS + intensP\n",
" # as by Walker and by Ellaume; SPECTRA's value is two times\n",
" # smaller:\n",
"\n",
" if self.isMPW:\n",
" sigma_r2 = 2 * (CHeVcm/bot.E*10 * self.L0*self.Np) / PI2**2\n",
" bot.sourceSIGMAx = np.sqrt(self.dx**2 + sigma_r2)\n",
" bot.sourceSIGMAz = np.sqrt(self.dz**2 + sigma_r2)\n",
" if self.filamentBeam:\n",
" bot.z[:] = rZ\n",
" bot.x[:] = rX\n",
" bot.y[:] = rY\n",
" else:\n",
" bot.y[:] = ((np.arccos(Theta0*self.gamma/self.K) / PI) +\n",
" np.random.randint(\n",
" -int(self.Np), int(self.Np), npassed) -\n",
" 0.5) * 0.5 * self.L0\n",
" bot.x[:] = self.X0 * np.sin(PI2 * bot.y / self.L0) +\\\n",
" np.random.normal(0., bot.sourceSIGMAx, npassed)\n",
" bot.z[:] = np.random.normal(0., bot.sourceSIGMAz, npassed)\n",
" bot.Jsp[:] = np.zeros(npassed)\n",
" else:\n",
" if self.filamentBeam:\n",
" bot.z[:] = rZ\n",
" bot.x[:] = rX\n",
" bot.y[:] = rY\n",
" else:\n",
" if self.dz > 0:\n",
" bot.z[:] = np.random.normal(0., self.dz, npassed)\n",
" if self.dx > 0:\n",
" R1 = np.random.normal(self.ro*1e3, self.dx, npassed)\n",
" else:\n",
" R1 = self.ro * 1e3\n",
" bot.x[:] = -R1 * np.cos(Theta0) + self.ro*1000.\n",
" bot.y[:] = R1 * np.sin(Theta0)\n",
"\n",
" bot.Jsp[:] = np.array(\n",
" np.where(sSP,\n",
" mJss[I_pass] * np.conj(mJpp[I_pass]) / sSP,\n",
" sSP), dtype=complex)\n",
"\n",
" bot.Jss[:] = np.where(sSP, intensS / sSP, sSP)\n",
" bot.Jpp[:] = np.where(sSP, intensP / sSP, sSP)\n",
"\n",
" if withAmplitudes:\n",
" bot.Es[:] = mJss[I_pass]\n",
" bot.Ep[:] = mJpp[I_pass]\n",
"\n",
" if bo is None:\n",
" bo = bot\n",
" else:\n",
" bo.concatenate(bot)\n",
" length = len(bo.a)\n",
" if raycing._VERBOSITY_ > 0:\n",
" print(\"{0} rays of {1}\".format(length, self.nrays))\n",
" try:\n",
" if self.bl is not None:\n",
" if self.bl.flowSource == 'Qook' and\\\n",
" self.bl.statusSignal is not None:\n",
" ptg = (self.bl.statusSignal[1] +\n",
" float(length) / float(self.nrays)) /\\\n",
" self.bl.statusSignal[2]\n",
" self.bl.statusSignal[0].emit(\n",
" (ptg, self.bl.statusSignal[3]))\n",
" except:\n",
" pass\n",
" if self.filamentBeam:\n",
" nrep += 1\n",
" rep_condition = nrep < self.nrepmax\n",
" else:\n",
" rep_condition = length < self.nrays\n",
" if self.uniformRayDensity:\n",
" rep_condition = False\n",
" if raycing._VERBOSITY_ > 0:\n",
" sys.stdout.flush()\n",
"\n",
" if length >= self.nrays:\n",
" bo.accepted = length * self.fluxConst\n",
" bo.acceptedE = bo.E.sum() * self.fluxConst * SIE0\n",
" bo.seeded = seeded\n",
" bo.seededI = seededI\n",
" if length > self.nrays and not self.filamentBeam:\n",
" bo.filter_by_index(slice(0, np.long(self.nrays)))\n",
" if self.filamentBeam:\n",
" bo.filamentDtheta = dtheta\n",
" bo.filamentDpsi = dpsi\n",
" norm = np.sqrt(bo.a**2 + 1.0 + bo.c**2)\n",
" bo.a /= norm\n",
" bo.b /= norm\n",
" bo.c /= norm\n",
" if self.pitch or self.yaw:\n",
" raycing.rotate_beam(bo, pitch=self.pitch, yaw=self.yaw)\n",
" if toGlobal: # in global coordinate system:\n",
" raycing.virgin_local_to_global(self.bl, bo, self.center)\n",
" raycing.append_to_flow(self.shine, [bo],\n",
" inspect.currentframe())\n",
" return bo\n",
"\n",
"\n",
"class Wiggler(BendingMagnet):\n",
" u\"\"\"\n",
" Wiggler source. The computation is reasonably fast and thus a GPU\n",
" is not required and is not implemented.\n",
" \"\"\"\n",
"\n",
" hiddenParams = ['B0', 'rho']\n",
"\n",
" def __init__(self, *args, **kwargs):\n",
" u\"\"\"Parameters are the same as in BendingMagnet except *B0* and *rho*\n",
" which are not required and additionally:\n",
"\n",
" .. warning::\n",
" If you change *K* outside of the constructor, invoke\n",
" ``your_wiggler_instance.reset()``.\n",
"\n",
" *K*: float\n",
" Deflection parameter\n",
"\n",
" *period*: float\n",
" period length in mm.\n",
"\n",
" *n*: int\n",
" Number of periods.\n",
"\n",
"\n",
" \"\"\"\n",
" self.K = kwargs.pop('K', 8.446)\n",
" self.L0 = kwargs.pop('period', 50)\n",
" self.Np = kwargs.pop('n', 40)\n",
" name = kwargs.pop('name', 'wiggler')\n",
" kwargs['name'] = name\n",
" super(Wiggler, self).__init__(*args, **kwargs)\n",
" self.reset()\n",
"\n",
" def prefix_save_name(self):\n",
" return '2-Wiggler-xrt'\n",
"\n",
" def reset(self):\n",
" \"\"\"Needed for changing *K* after instantiation.\"\"\"\n",
" self.B = K2B * self.K / self.L0\n",
" self.ro = M0 * C**2 * self.gamma / self.B / E0 / 1e6\n",
" self.X0 = 0.5 * self.K * self.L0 / self.gamma / PI\n",
"\n",
" def power_vs_K(self, energy, theta, psi, Ks):\n",
" u\"\"\"\n",
" Calculates *power curve* -- total power in W at given K values (*Ks*).\n",
" The power is calculated through the aperture defined by *theta* and\n",
" *psi* opening angles within the *energy* range.\n",
"\n",
" Returns a 1D array corresponding to *Ks*.\n",
" \"\"\"\n",
" try:\n",
" dtheta, dpsi, dE = \\\n",
" theta[1] - theta[0], psi[1] - psi[0], energy[1] - energy[0]\n",
" except TypeError:\n",
" dtheta, dpsi, dE = 1, 1, 1\n",
" tmpK = self.K\n",
" powers = []\n",
" for iK, K in enumerate(Ks):\n",
" if raycing._VERBOSITY_ > 10:\n",
" print(\"K={0}, {1} of {2}\".format(K, iK+1, len(Ks)))\n",
" self.K = K\n",
" self.reset()\n",
" I0 = self.intensities_on_mesh(energy, theta, psi)[0]\n",
" if self.distE == 'BW':\n",
" I0 *= 1e3\n",
" else: # 'eV'\n",
" I0 *= energy[:, np.newaxis, np.newaxis]\n",
" power = I0.sum() * dtheta * dpsi * dE * EV2ERG * 1e-7 # [W]\n",
" powers.append(power)\n",
" self.K = tmpK\n",
" return np.array(powers)\n",
"\n",
"\n",
"class Undulator(object):\n",
" u\"\"\"\n",
" Undulator source. The computation is volumnous and thus requires a GPU.\n",
" \"\"\"\n",
" def __init__(self, bl=None, name='und', center=(0, 0, 0),\n",
" nrays=raycing.nrays,\n",
" eE=6.0, eI=0.1, eEspread=0., eSigmaX=None, eSigmaZ=None,\n",
" eEpsilonX=1., eEpsilonZ=0.01, betaX=20., betaZ=5.,\n",
" period=50, n=50, K=10., Kx=0, Ky=0., phaseDeg=0,\n",
" taper=None, R0=None, targetE=None, customField=None,\n",
" eMin=5000., eMax=15000., eN=51, distE='eV',\n",
" xPrimeMax=0.5, zPrimeMax=0.5, nx=25, nz=25,\n",
" xPrimeMaxAutoReduce=True, zPrimeMaxAutoReduce=True,\n",
" gp=1e-2, gIntervals=1, nRK=30,\n",
" uniformRayDensity=False, filamentBeam=False,\n",
" targetOpenCL=raycing.targetOpenCL,\n",
" precisionOpenCL=raycing.precisionOpenCL,\n",
" pitch=0, yaw=0):\n",
" u\"\"\"\n",
" .. warning::\n",
" If you change any undulator parameter outside of the constructor,\n",
" invoke ``your_undulator_instance.reset()``.\n",
"\n",
" *bl*: instance of :class:`~xrt.backends.raycing.BeamLine`\n",
" Container for beamline elements. Sourcess are added to its\n",
" `sources` list.\n",
"\n",
" *name*: str\n",
" User-specified name, can be used for diagnostics output.\n",
"\n",
" *center*: tuple of 3 floats\n",
" 3D point in global system.\n",
"\n",
" *nrays*: int\n",
" The number of rays sampled in one iteration.\n",
"\n",
" *eE*: float\n",
" Electron beam energy (GeV).\n",
"\n",
" *eI*: float\n",
" Electron beam current (A).\n",
"\n",
" *eEspread*: float\n",
" Energy spread relative to the beam energy, rms.\n",
"\n",
" *eSigmaX*, *eSigmaZ*: float\n",
" rms horizontal and vertical electron beam sizes (µm).\n",
" Alternatively, betatron functions can be specified instead of the\n",
" electron beam sizes.\n",
"\n",
" *eEpsilonX*, *eEpsilonZ*: float\n",
" Horizontal and vertical electron beam emittance (nm rad).\n",
"\n",
" *betaX*, *betaZ*:\n",
" Betatron function (m). Alternatively, beam size can be specified.\n",
"\n",
" *period*, *n*:\n",
" Magnetic period (mm) length and number of periods.\n",
"\n",
" *K*, *Kx*, *Ky*: float\n",
" Deflection parameter for the vertical field or for an elliptical\n",
" undulator.\n",
"\n",
" *phaseDeg*: float\n",
" Phase difference between horizontal and vertical magnetic arrays.\n",
" Used in the elliptical case where it should be equal to 90 or -90.\n",
"\n",
" *taper*: tuple(dgap(mm), gap(mm))\n",
" Linear variation in undulator gap. None if tapering is not used.\n",
" Tapering should be used only with pyopencl.\n",
"\n",
" *R0*: float\n",
" Distance center-to-screen for the near field calculations (mm).\n",
" If None, the far field approximation (i.e. \"usual\" calculations) is\n",
" used. Near field calculations should be used only with pyopencl.\n",
" Here, a GPU can be much faster than a CPU.\n",
"\n",
" *targetE*: a tuple (Energy, harmonic{, isElliptical})\n",
" Can be given for automatic calculation of the deflection parameter.\n",
" If isElliptical is not given, it is assumed as False (as planar).\n",
"\n",
" *customField*: float or str or tuple(fileName, kwargs)\n",
" If given, adds a constant longitudinal field or a table of field\n",
" samples given as an Excel file or as a column text file. If given\n",
" as a tuple or list, the 2nd member is a key word dictionary for\n",
" reading Excel by :meth:`pandas.read_excel()` or reading text file\n",
" by :meth:`numpy.loadtxt()`, e.g. ``dict(skiprows=4)`` for skipping\n",
" the file header. The file must contain the columns with\n",
" longitudinal coordinate in mm, B_hor, B_ver {, B_long}, all in T.\n",
"\n",
" *nRK*: int\n",
" Size of the Runge-Kutta integration grid per each interval between\n",
" Gauss-Legendre integration nodes (only valid if customField is not\n",
" None).\n",
"\n",
" *eMin*, *eMax*: float\n",
" Minimum and maximum photon energy (eV). Used as band width for flux\n",
" calculation.\n",
"\n",
" *eN*: int\n",
" Number of photon energy intervals, used only in the test suit, not\n",
" required in ray tracing.\n",
"\n",
" *distE*: 'eV' or 'BW'\n",
" The resulted flux density is per 1 eV or 0.1% bandwidth. For ray\n",
" tracing 'eV' is used.\n",
"\n",
" *xPrimeMax*, *zPrimeMax*:\n",
" Horizontal and vertical acceptance (mrad).\n",
"\n",
" .. note::\n",
" The Monte Carlo sampling of the rays having their density\n",
" proportional to the beam intensity can be extremely inefficient\n",
" for sharply peaked distributions, like the undulator angular\n",
" density distribution. It is therefore very important to\n",
" restrict the sampled angular acceptance down to very small\n",
" angles. Use this source only with reasonably small *xPrimeMax*\n",
" and *zPrimeMax*!\n",
"\n",
" .. warning::\n",
" If you change these parameters outside of the constructor,\n",
" interpret them in *rad*; in the constructor they are given in\n",
" *mrad*. This awkwardness is kept for version compatibility.\n",
"\n",
" *nx*, *nz*: int\n",
" Number of intervals in the horizontal and vertical directions,\n",
" used only in the test suit, not required in ray tracing.\n",
"\n",
" *xPrimeMaxAutoReduce*, *zPrimeMaxAutoReduce*: bool\n",
" Whether to reduce too large angular ranges down to the feasible\n",
" values in order to improve efficiency. It is highly recommended to\n",
" keep them True.\n",
"\n",
" *gp*: float\n",
" Defines the precision of the Gauss integration.\n",
"\n",
" *gIntervals*: int\n",
" Integral of motion is divided by gIntervals to reduce the order of\n",
" Gauss-Legendre quadrature. Default value of 1 is usually enough for\n",
" a conventional undulator. For extreme cases (wigglers, near field,\n",
" wide angles) this value can be set to the order of few hundreds to\n",
" achieve the convergence of the integral. Large values can\n",
" significantly increase the calculation time and RAM consumption\n",
" especially if OpenCL is not used.\n",
"\n",
" *uniformRayDensity*: bool\n",
" If True, the radiation is sampled uniformly but with varying\n",
" amplitudes, otherwise with the density proportional to intensity\n",
" and with constant amplitudes. Required as True for wave propagation\n",
" calculations. False is usual for ray-tracing.\n",
"\n",
" *filamentBeam*: bool\n",
" If True the source generates coherent monochromatic wavefronts.\n",
" Required as True for the wave propagation calculations in partially\n",
" coherent regime.\n",
"\n",
" *targetOpenCL*: None, str, 2-tuple or tuple of 2-tuples\n",
" assigns the device(s) for OpenCL accelerated calculations. Accepts\n",
" the following values:\n",
" 1) a tuple (iPlatform, iDevice) of indices in the\n",
" lists cl.get_platforms() and platform.get_devices(), see the\n",
" section :ref:`calculations_on_GPU`. None, if pyopencl is not\n",
" wanted. Ignored if pyopencl is not installed.\n",
" 2) a tuple of tuples ((iP1, iD1),..,(iPn, iDn)) to assign specific\n",
" devices from one or multiple platforms.\n",
" 3) int iPlatform - assigns all devices found at the given platform.\n",
" 4) 'GPU' - lets the program scan the system and select all found\n",
" GPUs.\n",
" 5) 'CPU' - similar to 'GPU'. If one CPU exists in multiple\n",
" platforms the program tries to select the vendor-specific driver.\n",
" 6) 'other' - similar to 'GPU', used for Intel PHI and other OpenCL-\n",
" capable accelerator boards.\n",
" 7) 'all' - lets the program scan the system and assign all found\n",
" devices. Not recommended, since the performance will be limited by\n",
" the slowest device.\n",
" 8) 'auto' - lets the program scan the system and make an assignment\n",
" according to the priority list: 'GPU', 'other', 'CPU' or None if no\n",
" devices were found. Used by default.\n",
"\n",
" .. warning::\n",
" A good graphics or dedicated accelerator card is highly\n",
" recommended! Special cases as wigglers by the undulator code,\n",
" near field, wide angles and tapering are hardly doable on CPU.\n",
"\n",
" .. note::\n",
" Consider the :ref:`warnings and tips <usage_GPU_warnings>` on\n",
" using xrt with GPUs.\n",
"\n",
" *precisionOpenCL*: 'float32' or 'float64', only for GPU.\n",
" Single precision (float32) should be enough in most cases. The\n",
" calculations with doube precision are much slower. Double precision\n",
" may be unavailable on your system.\n",
"\n",
" *pitch*, *yaw*: float\n",
" rotation angles around x and z axis. Useful for canted sources.\n",
"\n",
"\n",
" \"\"\"\n",
" self.bl = bl\n",
" if bl is not None:\n",
" if self not in bl.sources:\n",
" bl.sources.append(self)\n",
" self.ordinalNum = len(bl.sources)\n",
" raycing.set_name(self, name)\n",
"# if name in [None, 'None', '']:\n",
"# self.name = '{0}{1}'.format(self.__class__.__name__,\n",
"# self.ordinalNum)\n",
"# else:\n",
"# self.name = name\n",
"\n",
" self.center = center # 3D point in global system\n",
" self.nrays = np.long(nrays)\n",
" self.gp = gp\n",
" self.dx = eSigmaX * 1e-3 if eSigmaX else None\n",
" self.dz = eSigmaZ * 1e-3 if eSigmaZ else None\n",
" self.eEpsilonX = eEpsilonX * 1e-6\n",
" self.eEpsilonZ = eEpsilonZ * 1e-6\n",
" self.Ee = float(eE)\n",
" self.eEspread = eEspread\n",
" self.I0 = float(eI)\n",
" self.eMin = float(eMin)\n",
" self.eMax = float(eMax)\n",
" if bl is not None:\n",
" if self.bl.flowSource != 'Qook':\n",
" bl.oesDict[self.name] = [self, 0]\n",
" xPrimeMax = raycing.auto_units_angle(xPrimeMax) * 1e3 if\\\n",
" isinstance(xPrimeMax, raycing.basestring) else xPrimeMax\n",
" zPrimeMax = raycing.auto_units_angle(zPrimeMax) * 1e3 if\\\n",
" isinstance(zPrimeMax, raycing.basestring) else zPrimeMax\n",
" self.xPrimeMax = xPrimeMax * 1e-3 # if xPrimeMax else None\n",
" self.zPrimeMax = zPrimeMax * 1e-3 # if zPrimeMax else None\n",
" self.betaX = betaX * 1e3\n",
" self.betaZ = betaZ * 1e3\n",
" self.eN = eN + 1\n",
" self.nx = 2*nx + 1\n",
" self.nz = 2*nz + 1\n",
" self.xs = np.linspace(-self.xPrimeMax, self.xPrimeMax, self.nx)\n",
" self.zs = np.linspace(-self.zPrimeMax, self.zPrimeMax, self.nz)\n",
" self.energies = np.linspace(eMin, eMax, self.eN)\n",
" self.distE = distE\n",
" self.uniformRayDensity = uniformRayDensity\n",
" self.filamentBeam = filamentBeam\n",
" self.pitch = raycing.auto_units_angle(pitch)\n",
" self.yaw = raycing.auto_units_angle(yaw)\n",
" self.gIntervals = gIntervals\n",
" self.L0 = period\n",
" self.R0 = R0 if R0 is None else R0 + self.L0*0.25\n",
" self.nRK = nRK\n",
" self.trajectory = None\n",
" fullLength = False # NOTE maybe a future input parameter\n",
" self.full = fullLength\n",
" if fullLength:\n",
" # self.filamentBeam = True\n",
" self.theta0 = 0\n",
" self.psi0 = 0\n",
"\n",
" self.cl_ctx = None\n",
" if (self.R0 is not None):\n",
" precisionOpenCL = 'float64'\n",
" if targetOpenCL is not None:\n",
" if not isOpenCL:\n",
" print(\"pyopencl is not available!\")\n",
" else:\n",
" self.ucl = mcl.XRT_CL(\n",
" r'undulator.cl', targetOpenCL, precisionOpenCL)\n",
" if self.ucl.lastTargetOpenCL is not None:\n",
" self.cl_precisionF = self.ucl.cl_precisionF\n",
" self.cl_precisionC = self.ucl.cl_precisionC\n",
" self.cl_queue = self.ucl.cl_queue\n",
" self.cl_ctx = self.ucl.cl_ctx\n",
" self.cl_program = self.ucl.cl_program\n",
" self.cl_mf = self.ucl.cl_mf\n",
" self.cl_is_blocking = self.ucl.cl_is_blocking\n",
"\n",
"# self.mode = 1\n",
"\n",
" if (self.dx is None) and (self.betaX is not None):\n",
" self.dx = np.sqrt(self.eEpsilonX*self.betaX)\n",
" elif (self.dx is None) and (self.betaX is None):\n",
" print(\"Set either dx or betaX!\")\n",
" if (self.dz is None) and (self.betaZ is not None):\n",
" self.dz = np.sqrt(self.eEpsilonZ*self.betaZ)\n",
" elif (self.dz is None) and (self.betaZ is None):\n",
" print(\"Set either dz or betaZ!\")\n",
" dxprime, dzprime = None, None\n",
" if dxprime:\n",
" self.dxprime = dxprime\n",
" else:\n",
" self.dxprime = self.eEpsilonX / self.dx if self.dx > 0\\\n",
" else 0. # [rad]\n",
" if dzprime:\n",
" self.dzprime = dzprime\n",
" else:\n",
" self.dzprime = self.eEpsilonZ / self.dz if self.dz > 0\\\n",
" else 0. # [rad]\n",
" if raycing._VERBOSITY_ > 10:\n",
" print('dx = {0} mm'.format(self.dx))\n",
" print('dz = {0} mm'.format(self.dz))\n",
" print('dxprime = {0} rad'.format(self.dxprime))\n",
" print('dzprime = {0} rad'.format(self.dzprime))\n",
" self.gamma = self.Ee * 1e9 * EV2ERG / (M0 * C**2)\n",
" self.gamma2 = self.gamma**2\n",
"\n",
" if targetE is not None:\n",
" K = np.sqrt(targetE[1] * 8 * PI * C * 10 * self.gamma2 /\n",
" period / targetE[0] / E2W - 2)\n",
" if raycing._VERBOSITY_ > 10:\n",
" print(\"K = {0}\".format(K))\n",
" if np.isnan(K):\n",
" raise ValueError(\"Cannot calculate K, try to increase the \"\n",
" \"undulator harmonic number\")\n",
" if len(targetE) > 2:\n",
" isElliptical = targetE[2]\n",
" if isElliptical:\n",
" Kx = Ky = K / 2**0.5\n",
" if raycing._VERBOSITY_ > 10:\n",
" print(\"Kx = Ky = {0}\".format(Kx))\n",
"\n",
" self.Kx = Kx\n",
" self.Ky = Ky\n",
" self.K = K\n",
" phaseDeg = np.degrees(raycing.auto_units_angle(phaseDeg)) if\\\n",
" isinstance(phaseDeg, raycing.basestring) else phaseDeg\n",
" self.phase = np.radians(phaseDeg)\n",
"\n",
" self.Np = n\n",
"\n",
" if taper is not None:\n",
" self.taper = taper[0] / self.Np / self.L0 / taper[1]\n",
" self.gap = taper[1]\n",
" else:\n",
" self.taper = None\n",
" if self.Kx == 0 and self.Ky == 0:\n",
" self.Ky = self.K\n",
" self._initialK = self.K\n",
"\n",
" self.B0x = K2B * self.Kx / self.L0\n",
" self.B0y = K2B * self.Ky / self.L0\n",
" self.customField = customField\n",
"\n",
" if customField is not None:\n",
" self.gIntervals *= 2\n",
" if isinstance(customField, (tuple, list)):\n",
" fname = customField[0]\n",
" kwargs = customField[1]\n",
" elif isinstance(customField, (float, int)):\n",
" fname = None\n",
" self.customFieldData = customField\n",
" if customField > 0:\n",
" betaL = 2 * M0*C**2*self.gamma / customField / E0 / 1e6\n",
" print(\"Larmor betatron function = {0} m\".format(betaL))\n",
" else:\n",
" fname = customField\n",
" kwargs = {}\n",
" if fname:\n",
" self.customFieldData = self.read_custom_field(fname, kwargs)\n",
" else:\n",
" self.customFieldData = None\n",
"\n",
" self.xPrimeMaxAutoReduce = xPrimeMaxAutoReduce\n",
" if xPrimeMaxAutoReduce:\n",
" xPrimeMaxTmp = self.Ky / self.gamma\n",
" if self.xPrimeMax > xPrimeMaxTmp:\n",
" print(\"Reducing xPrimeMax from {0} down to {1} mrad\".format(\n",
" self.xPrimeMax * 1e3, xPrimeMaxTmp * 1e3))\n",
" self.xPrimeMax = xPrimeMaxTmp\n",
" self.zPrimeMaxAutoReduce = zPrimeMaxAutoReduce\n",
" if zPrimeMaxAutoReduce:\n",
" K0 = self.Kx if self.Kx > 0 else 1.\n",
" zPrimeMaxTmp = K0 / self.gamma\n",
" if self.zPrimeMax > zPrimeMaxTmp:\n",
" print(\"Reducing zPrimeMax from {0} down to {1} mrad\".format(\n",
" self.zPrimeMax * 1e3, zPrimeMaxTmp * 1e3))\n",
" self.zPrimeMax = zPrimeMaxTmp\n",
"\n",
" self.reset()\n",
"\n",
" def read_custom_field(self, fname, kwargs={}):\n",
" def my_sin(x, a, k, ph, c):\n",
" return a * np.cos(k * x + ph) + c\n",
" from scipy.optimize import curve_fit\n",
"\n",
" if fname.endswith('.xls') or fname.endswith('.xlsx'):\n",
" import pandas\n",
" kwargs['engine'] = \"openpyxl\"\n",
" try:\n",
" data = pandas.read_excel(fname, **kwargs).values\n",
" except ValueError as e:\n",
" print(e)\n",
" if 'openpyxl' in str(e):\n",
" print('install it as `pip install openpyxl`')\n",
" raise e\n",
" else:\n",
" data = np.loadtxt(fname)\n",
"\n",
" datalen4 = data.shape[0] // 10\n",
" minBx = data[datalen4:-datalen4, 1].min()\n",
" maxBx = data[datalen4:-datalen4, 1].max()\n",
" minBy = data[datalen4:-datalen4, 2].min()\n",
" maxBy = data[datalen4:-datalen4, 2].max()\n",
" p0 = [(maxBx-minBx)/2., PI2/self.L0, 0.3, 1e-4]\n",
" poptx, pcovx = curve_fit(my_sin,\n",
" data[datalen4:-datalen4, 0],\n",
" data[datalen4:-datalen4, 1],\n",
" p0=p0)\n",
" if poptx[0] < 0:\n",
" poptx[0] *= -1\n",
" poptx[2] += PI\n",
" p0 = [(maxBy-minBy)/2., PI2/self.L0, 0.3, 1e-4]\n",
" popty, pcovy = curve_fit(my_sin,\n",
" data[datalen4:-datalen4, 0],\n",
" data[datalen4:-datalen4, 2],\n",
" p0=p0)\n",
" if popty[0] < 0:\n",
" popty[0] *= -1\n",
" popty[2] += PI\n",
" print(poptx)\n",
" print(popty)\n",
" B0x = poptx[0]\n",
" B0y = popty[0]\n",
" Kx = B0x * self.L0 / K2B\n",
" Ky = B0y * self.L0 / K2B\n",
" lambdaUx = PI2 / poptx[1]\n",
" lambdaUy = PI2 / popty[1]\n",
" phase = poptx[2] - popty[2]\n",
" phaseDeg = phase / PI * 180\n",
" print(\"field data in {0}:\".format(fname))\n",
" print(\"B0x={0:.3f}T, B0y={1:.3f}T\".format(B0x, B0y))\n",
" print(\"Kx={0:.3f}, Ky={1:.3f}\".format(Kx, Ky))\n",
" print(u\"λ_Ux={0:.3f}mm, λ_Uy={1:.3f}mm\".format(lambdaUx, lambdaUy))\n",
" print(u\"phase difference = {0:.3f}deg\".format(phaseDeg))\n",
" return data\n",
"\n",
" def magnetic_field(self, z): # 'z' in radians\n",
" if isinstance(self.customField, (float, int)):\n",
" Bx = self.B0x * np.sin(z + self.phase)\n",
" By = self.B0y * np.sin(z)\n",
" Bz = self.customFieldData * np.ones_like(z)\n",
" else:\n",
" dataz = self.customFieldData[:, 0] / self.L0 * PI2\n",
" Bx = np.interp(z, dataz, self.customFieldData[:, 1])\n",
" By = np.interp(z, dataz, self.customFieldData[:, 2])\n",
" if self.customFieldData.shape[1] > 3:\n",
" Bz = np.interp(z, dataz, self.customFieldData[:, 3])\n",
" else:\n",
" Bz = np.zeros_like(Bx)\n",
" return (Bx, By, Bz)\n",
"\n",
" def reset(self):\n",
" \"\"\"This method must be invoked after any changes in the undulator\n",
" parameters.\"\"\"\n",
" if self._initialK != self.K: # self.K was modified externally\n",
" self.Ky = self.K\n",
" self._initialK = self.K\n",
"\n",
" self.wu = PI * (0.01 * C) / self.L0 / 1e-3 / self.gamma2 * \\\n",
" (2*self.gamma2 - 1 - 0.5*self.Kx**2 - 0.5*self.Ky**2) / E2W\n",
" # wnu = 2 * PI * (0.01 * C) / self.L0 / 1e-3 / E2W\n",
" self.E1 = 2*self.wu*self.gamma2 / (1 + 0.5*self.Kx**2 + 0.5*self.Ky**2)\n",
" if raycing._VERBOSITY_ > 10:\n",
" print(\"E1 = {0}\".format(self.E1))\n",
" print(\"E3 = {0}\".format(3*self.E1))\n",
" print(\"B0 = {0}\".format(self.Ky / 0.09336 / self.L0))\n",
" if self.taper is not None:\n",
" print(\"dB/dx/B = {0}\".format(\n",
" -PI * self.gap * self.taper / self.L0 * 1e3))\n",
" mE = self.eN\n",
" mTheta = self.nx\n",
" mPsi = self.nz\n",
"\n",
" if not self.xPrimeMax:\n",
" print(\"No Theta range specified, using default 1 mrad\")\n",
" self.xPrimeMax = 1e-3\n",
"\n",
" self.Theta_min = -float(self.xPrimeMax)\n",
" self.Theta_max = float(self.xPrimeMax)\n",
" self.Psi_min = -float(self.zPrimeMax)\n",
" self.Psi_max = float(self.zPrimeMax)\n",
"\n",
" self.energies = np.linspace(self.eMin, self.eMax, self.eN)\n",
" self.E_min = float(np.min(self.energies))\n",
" self.E_max = float(np.max(self.energies))\n",
"\n",
" self.dE = (self.E_max - self.E_min) / float(mE - 1)\n",
" self.dTheta = (self.Theta_max - self.Theta_min) / float(mTheta - 1)\n",
" self.dPsi = (self.Psi_max - self.Psi_min) / float(mPsi - 1)\n",
"\n",
" \"\"\"Adjusting the number of points for Gauss integration\"\"\"\n",
" # self.gp = 1\n",
" quad_int_error = self.gp * 10.\n",
" self.quadm = 0\n",
" tmpeEspread = self.eEspread\n",
" self.eEspread = 0\n",
" mstart = 5\n",
" m = mstart\n",
" while quad_int_error >= self.gp:\n",
" m += 1\n",
" self.quadm = int(1.5**m)\n",
" if self.cl_ctx is not None:\n",
" # sE = np.linspace(self.E_min, self.E_max, self.eN)\n",
" sE = self.E_max * np.ones(1)\n",
" sTheta_max = self.Theta_max * np.ones(1)\n",
" sPsi_max = self.Psi_max * np.ones(1)\n",
" In = self.build_I_map(sE, sTheta_max, sPsi_max)[0][0]\n",
" else:\n",
" In = self.build_I_map(\n",
" self.E_max, self.Theta_max, self.Psi_max)[0]\n",
" if m == mstart+1:\n",
" I2 = In\n",
" continue\n",
" else:\n",
" I1 = I2\n",
" I2 = In\n",
" quad_int_error = np.abs((I2 - I1)/I2)\n",
" if raycing._VERBOSITY_ > 10:\n",
" print(\"G = {0}\".format(\n",
" [self.gIntervals, self.quadm, quad_int_error, I2,\n",
" 2-self.ag_n.sum()]))\n",
" if self.quadm > 400:\n",
" self.gIntervals *= 2\n",
" m = mstart\n",
" quad_int_error = self.gp * 10.\n",
" if self.gIntervals > 100:\n",
" break\n",
" continue\n",
" \"\"\"end of Adjusting the number of points for Gauss integration\"\"\"\n",
" self.eEspread = tmpeEspread\n",
" if raycing._VERBOSITY_ > 10:\n",
" print(\"Done with Gaussian optimization, {0} points will be used\"\n",
" \" in {1} interval{2}\".format(\n",
" self.quadm, self.gIntervals,\n",
" 's' if self.gIntervals > 1 else ''))\n",
"\n",
" if self.filamentBeam:\n",
" rMax = self.nrays\n",
" rE = np.random.uniform(self.E_min, self.E_max, rMax)\n",
" rTheta = np.random.uniform(self.Theta_min, self.Theta_max, rMax)\n",
" rPsi = np.random.uniform(self.Psi_min, self.Psi_max, rMax)\n",
" tmpEspread = self.eEspread\n",
" self.eEspread = 0\n",
" DistI = self.build_I_map(rE, rTheta, rPsi)[0]\n",
" self.Imax = np.max(DistI) * 1.2\n",
" self.nrepmax = np.floor(rMax / len(np.where(\n",
" self.Imax * np.random.rand(rMax) < DistI)[0]))\n",
" self.eEspread = tmpEspread\n",
" else:\n",
" self.Imax = 0.\n",
" \"\"\"Preparing to calculate the total flux integral\"\"\"\n",
" self.xzE = (self.E_max - self.E_min) *\\\n",
" (self.Theta_max - self.Theta_min) *\\\n",
" (self.Psi_max - self.Psi_min)\n",
" self.fluxConst = self.Imax * self.xzE\n",
"\n",
" def prefix_save_name(self):\n",
" if self.Kx > 0:\n",
" return '4-elu-xrt'\n",
" else:\n",
" return '1-und-xrt'\n",
"\n",
" def tuning_curves(self, energy, theta, psi, harmonics, Ks):\n",
" \"\"\"Calculates *tuning curves* -- maximum flux of given *harmomonics* at\n",
" given K values (*Ks*). The flux is calculated through the aperture\n",
" defined by *theta* and *psi* opening angles.\n",
"\n",
" Returns two 2D arrays: energy positions and flux values. The rows\n",
" correspond to *Ks*, the colums correspond to *harmomonics*.\n",
" \"\"\"\n",
" try:\n",
" dtheta, dpsi = theta[1] - theta[0], psi[1] - psi[0]\n",
" except TypeError:\n",
" dtheta, dpsi = 1, 1\n",
" tunesE, tunesF = [], []\n",
" tmpKy = self.Ky\n",
" for iK, K in enumerate(Ks):\n",
" if raycing._VERBOSITY_ > 10:\n",
" print(\"K={0}, {1} of {2}\".format(K, iK+1, len(Ks)))\n",
" self.Ky = K\n",
" self.reset()\n",
" I0 = self.intensities_on_mesh(energy, theta, psi, harmonics)[0]\n",
" flux = I0.sum(axis=(1, 2)) * dtheta * dpsi\n",
" argm = np.argmax(flux, axis=0)\n",
" fluxm = np.max(flux, axis=0)\n",
" tunesE.append(energy[argm] / 1000.)\n",
" tunesF.append(fluxm)\n",
" self.Ky = tmpKy\n",
" self.reset()\n",
" return np.array(tunesE).T, np.array(tunesF).T\n",
"\n",
" def power_vs_K(self, energy, theta, psi, harmonics, Ks):\n",
" \"\"\"Calculates *power curve* -- total power in W for all *harmomonics*\n",
" at given K values (*Ks*). The power is calculated through the aperture\n",
" defined by *theta* and *psi* opening angles within the *energy* range.\n",
"\n",
" The result of this numerical integration depends on the used angular\n",
" and energy meshes; you should check convergence. Internally, electron\n",
" beam energy spread is also sampled by adding another dimension to the\n",
" intensity array and making it 5-dimensional. You therefore may want to\n",
" set energy spread to zero, it doesn’t affect the resulting power\n",
" anyway.\n",
"\n",
" Returns a 1D array corresponding to *Ks*.\n",
" \"\"\"\n",
" try:\n",
" dtheta, dpsi, dE = \\\n",
" theta[1] - theta[0], psi[1] - psi[0], energy[1] - energy[0]\n",
" except TypeError:\n",
" dtheta, dpsi, dE = 1, 1, 1\n",
" tmpKy = self.Ky\n",
" powers = []\n",
" for iK, K in enumerate(Ks):\n",
" if raycing._VERBOSITY_ > 10:\n",
" print(\"K={0}, {1} of {2}\".format(K, iK+1, len(Ks)))\n",
" self.Ky = K\n",
" self.reset()\n",
" I0 = self.intensities_on_mesh(energy, theta, psi, harmonics)[0]\n",
" if self.distE == 'BW':\n",
" I0 *= 1e3\n",
" else: # 'eV'\n",
" I0 *= energy[:, np.newaxis, np.newaxis, np.newaxis]\n",
" power = I0.sum() * dtheta * dpsi * dE * EV2ERG * 1e-7 # [W]\n",
" powers.append(power)\n",
" self.Ky = tmpKy\n",
" self.reset()\n",
" return np.array(powers)\n",
"\n",
" def multi_electron_stack(self, energy='auto', theta='auto', psi='auto',\n",
" harmonic=None, withElectronDivergence=True):\n",
" \"\"\"Returns Es and Ep in the shape (energy, theta, psi, [harmonic]).\n",
" Along the 0th axis (energy) are stored \"macro-electrons\" that emit at\n",
" the photon energy given by *energy* (constant or variable) onto the\n",
" angular mesh given by *theta* and *psi*. The transverse field from each\n",
" macro-electron gets individual random angular offsets dtheta and dpsi\n",
" within the emittance distribution if *withElectronDivergence* is True\n",
" and an individual random shift to gamma within the energy spread.\n",
" The parameter self.filamentBeam is irrelevant for this method.\"\"\"\n",
" if isinstance(energy, str): # i.e. if 'auto'\n",
" energy = np.mgrid[self.E_min:self.E_max + 0.5*self.dE:self.dE]\n",
" nmacroe = 1 if len(np.array(energy).shape) == 0 else len(energy)\n",
"\n",
" if isinstance(theta, str):\n",
" theta = np.mgrid[\n",
" self.Theta_min:self.Theta_max + 0.5*self.dTheta:self.dTheta]\n",
"\n",
" if isinstance(psi, str):\n",
" psi = np.mgrid[self.Psi_min:self.Psi_max + 0.5*self.dPsi:self.dPsi]\n",
"\n",
" if harmonic is None:\n",
" xH = None\n",
" tomesh = energy, theta, psi\n",
" else:\n",
" tomesh = energy, theta, psi, harmonic\n",
" mesh = np.meshgrid(*tomesh, indexing='ij')\n",
" if withElectronDivergence and self.dxprime > 0:\n",
" dthe = np.random.normal(0, self.dxprime, nmacroe)\n",
" if harmonic is None:\n",
" mesh[1][:, ...] += dthe[:, np.newaxis, np.newaxis]\n",
" else:\n",
" mesh[1][:, ...] += dthe[:, np.newaxis, np.newaxis, np.newaxis]\n",
" if withElectronDivergence and self.dzprime > 0:\n",
" dpsi = np.random.normal(0, self.dzprime, nmacroe)\n",
" if harmonic is None:\n",
" mesh[2][:, ...] += dpsi[:, np.newaxis, np.newaxis]\n",
" else:\n",
" mesh[2][:, ...] += dpsi[:, np.newaxis, np.newaxis, np.newaxis]\n",
"\n",
" if self.eEspread > 0:\n",
" spr = np.random.normal(0, self.eEspread, nmacroe) * self.gamma\n",
" dgamma = np.zeros_like(mesh[0])\n",
" if harmonic is None:\n",
" dgamma[:, ...] = spr[:, np.newaxis, np.newaxis]\n",
" else:\n",
" dgamma[:, ...] = spr[:, np.newaxis, np.newaxis, np.newaxis]\n",
" xdGamma = dgamma.ravel()\n",
" else:\n",
" xdGamma = 0\n",
"\n",
" xE, xTheta, xPsi = mesh[0].ravel(), mesh[1].ravel(), mesh[2].ravel()\n",
" if harmonic is not None:\n",
" xH = mesh[3].ravel()\n",
"\n",
" if harmonic is None:\n",
" sh = nmacroe, len(theta), len(psi)\n",
" else:\n",
" sh = nmacroe, len(theta), len(psi), len(harmonic)\n",
" res = self.build_I_map(xE, xTheta, xPsi, xH, xdGamma)\n",
" Es = res[1].reshape(sh)\n",
" Ep = res[2].reshape(sh)\n",
" return Es, Ep\n",
"\n",
" def intensities_on_mesh(self, energy='auto', theta='auto', psi='auto',\n",
" harmonic=None):\n",
" \"\"\"Returns the Stokes parameters in the shape (energy, theta, psi,\n",
" [harmonic]), with *theta* being the horizontal mesh angles and *psi*\n",
" the vertical mesh angles. Each one of the input parameters is a 1D\n",
" array of an individually selectable length.\n",
"\n",
" .. note::\n",
" We do not provide any internal mesh optimization, as mesh functions\n",
" are not our core objectives. In particular, the angular meshes must\n",
" be wider than the electron beam divergences in order to convolve the\n",
" field distribution with the electron distribution. A warning will be\n",
" printed (new in version 1.3.4) if the requested meshes are too\n",
" narrow.\n",
"\n",
" \"\"\"\n",
" if isinstance(energy, str): # i.e. if 'auto'\n",
" energy = np.mgrid[self.E_min:self.E_max + 0.5*self.dE:self.dE]\n",
"\n",
" if isinstance(theta, str):\n",
" theta = np.mgrid[\n",
" self.Theta_min:self.Theta_max + 0.5*self.dTheta:self.dTheta]\n",
"\n",
" if isinstance(psi, str):\n",
" psi = np.mgrid[self.Psi_min:self.Psi_max + 0.5*self.dPsi:self.dPsi]\n",
"\n",
" tomesh = [energy, theta, psi]\n",
" if harmonic is not None:\n",
" tomesh.append(harmonic)\n",
" iharmonic = len(tomesh)-1\n",
" else:\n",
" iharmonic = None\n",
" if self.eEspread > 0:\n",
" spr = np.linspace(-3.5, 3.5, 36)\n",
" dgamma = self.gamma * spr * self.eEspread\n",
" wspr = np.exp(-0.5 * spr**2)\n",
" wspr /= wspr.sum()\n",
" tomesh.append(dgamma)\n",
" ispread = len(tomesh)-1\n",
" else:\n",
" ispread = None\n",
"\n",
" mesh = np.meshgrid(*tomesh, indexing='ij')\n",
" xE, xTheta, xPsi = mesh[0].ravel(), mesh[1].ravel(), mesh[2].ravel()\n",
" sh = [len(energy), len(theta), len(psi)]\n",
" if iharmonic:\n",
" xH = mesh[iharmonic].ravel()\n",
" sh.append(len(harmonic))\n",
" else:\n",
" xH = None\n",
" if ispread:\n",
" xG = mesh[ispread].ravel()\n",
" sh.append(len(dgamma))\n",
" else:\n",
" xG = None\n",
"\n",
" res = self.build_I_map(xE, xTheta, xPsi, xH, xG)\n",
" Es = res[1].reshape(sh)\n",
" Ep = res[2].reshape(sh)\n",
" if ispread:\n",
" if iharmonic:\n",
" ws = wspr[np.newaxis, np.newaxis, np.newaxis, np.newaxis, :]\n",
" else:\n",
" ws = wspr[np.newaxis, np.newaxis, np.newaxis, :]\n",
" Is = ((Es*np.conj(Es)).real * ws).sum(axis=ispread)\n",
" Ip = ((Ep*np.conj(Ep)).real * ws).sum(axis=ispread)\n",
" Isp = (Es*np.conj(Ep) * ws).sum(axis=ispread)\n",
" else:\n",
" Is = (Es*np.conj(Es)).real\n",
" Ip = (Ep*np.conj(Ep)).real\n",
" Isp = Es*np.conj(Ep)\n",
" self.Is = Is.astype(float)\n",
" self.Ip = Ip.astype(float)\n",
" self.Isp = Isp.astype(complex)\n",
"\n",
" s0 = self.Is + self.Ip\n",
" s1 = self.Is - self.Ip\n",
" s2 = 2. * np.real(self.Isp)\n",
" s3 = -2. * np.imag(self.Isp)\n",
"\n",
" if (self.dxprime > 0 or self.dzprime > 0) and \\\n",
" len(theta) > 1 and len(psi) > 1:\n",
" from scipy.ndimage.filters import gaussian_filter\n",
" Sx = self.dxprime / (theta[1] - theta[0])\n",
" Sz = self.dzprime / (psi[1] - psi[0])\n",
"# print(self.dxprime, theta[-1] - theta[0], Sx, len(theta))\n",
"# print(self.dzprime, psi[-1] - psi[0], Sz, len(psi))\n",
" if Sx > len(theta)//4: # ±2σ\n",
" print(\"************* Warning ***********************\")\n",
" print(\"Your theta mesh is too narrow!\")\n",
" print(\"It must be wider than the electron beam width\")\n",
" print(\"*********************************************\")\n",
" if self.xPrimeMax < theta.max():\n",
" print(\"************* Warning ****************************\")\n",
" print(\"Your xPrimeMax is too small!\")\n",
" print(\"It must be bigger than theta.max()\")\n",
" if self.xPrimeMaxAutoReduce:\n",
" print(\"You probably need to set xPrimeMaxAutoReduce=False\")\n",
" print(\"**************************************************\")\n",
" if Sz > len(psi)//4: # ±2σ\n",
" print(\"************* Warning ************************\")\n",
" print(\"Your psi mesh is too narrow!\")\n",
" print(\"It must be wider than the electron beam height\")\n",
" print(\"**********************************************\")\n",
" if self.zPrimeMax < psi.max():\n",
" print(\"************* Warning ****************************\")\n",
" print(\"Your zPrimeMax is too small!\")\n",
" print(\"It must be bigger than psi.max()\")\n",
" if self.zPrimeMaxAutoReduce:\n",
" print(\"You probably need to set zPrimeMaxAutoReduce=False\")\n",
" print(\"**************************************************\")\n",
" for ie, ee in enumerate(energy):\n",
" if harmonic is None:\n",
" s0[ie, :, :] = gaussian_filter(s0[ie, :, :], [Sx, Sz])\n",
" s1[ie, :, :] = gaussian_filter(s1[ie, :, :], [Sx, Sz])\n",
" s2[ie, :, :] = gaussian_filter(s2[ie, :, :], [Sx, Sz])\n",
" s3[ie, :, :] = gaussian_filter(s3[ie, :, :], [Sx, Sz])\n",
" else:\n",
" for ih, hh in enumerate(harmonic):\n",
" s0[ie, :, :, ih] = gaussian_filter(\n",
" s0[ie, :, :, ih], [Sx, Sz])\n",
" s1[ie, :, :, ih] = gaussian_filter(\n",
" s1[ie, :, :, ih], [Sx, Sz])\n",
" s2[ie, :, :, ih] = gaussian_filter(\n",
" s2[ie, :, :, ih], [Sx, Sz])\n",
" s3[ie, :, :, ih] = gaussian_filter(\n",
" s3[ie, :, :, ih], [Sx, Sz])\n",
"\n",
" with np.errstate(divide='ignore'):\n",
" return (s0,\n",
" np.where(s0, s1 / s0, s0),\n",
" np.where(s0, s2 / s0, s0),\n",
" np.where(s0, s3 / s0, s0))\n",
"\n",
" def _sp(self, dim, x, ww1, w, wu, gamma, ddphi, ddpsi):\n",
" lengamma = 1 if len(np.array(gamma).shape) == 0 else len(gamma)\n",
" gS = gamma\n",
" if dim == 0:\n",
" ww1S = ww1\n",
" wS, wuS = w, wu\n",
" ddphiS = ddphi\n",
" ddpsiS = ddpsi\n",
" elif dim == 1:\n",
" ww1S = ww1[:, np.newaxis]\n",
" wS = w[:, np.newaxis]\n",
" wuS = wu[:, np.newaxis]\n",
" ddphiS = ddphi[:, np.newaxis]\n",
" ddpsiS = ddpsi[:, np.newaxis]\n",
" if lengamma > 1:\n",
" gS = gamma[:, np.newaxis]\n",
" elif dim == 3:\n",
" ww1S = ww1[:, :, :, np.newaxis]\n",
" wS, wuS = w[:, :, :, np.newaxis], wu[:, :, :, np.newaxis]\n",
" ddphiS = ddphi[:, :, :, np.newaxis]\n",
" ddpsiS = ddpsi[:, :, :, np.newaxis]\n",
" if lengamma > 1:\n",
" gS = gamma[:, :, :, np.newaxis]\n",
" taperC = 1\n",
" alphaS = 0\n",
" sinx = np.sin(x)\n",
" cosx = np.cos(x)\n",
" sin2x = 2*sinx*cosx\n",
" if self.taper is not None:\n",
" alphaS = self.taper * C * 10 / E2W\n",
" taperC = 1 - alphaS * x / wuS\n",
" ucos = ww1S * x +\\\n",
" wS / gS / wuS *\\\n",
" (-self.Ky * ddphiS * (sinx + alphaS / wuS *\n",
" (1 - cosx - x * sinx)) +\n",
" self.Kx * ddpsiS * np.sin(x + self.phase) +\n",
" 0.125 / gS *\n",
" (self.Kx**2 * np.sin(2 * (x + self.phase)) +\n",
" self.Ky**2 * (sin2x - 2 * alphaS / wuS *\n",
" (x**2 + cosx**2 + x * sin2x))))\n",
" elif self.R0 is not None:\n",
" betam = 1 - (1 + 0.5 * self.Kx**2 + 0.5 * self.Ky**2) / 2. / gS**2\n",
" WR0 = self.R0 / 10 / C * E2W\n",
" ddphiS = -ddphiS\n",
" drx = WR0 * np.tan(ddphiS) - self.Ky / wuS / gS * sinx\n",
" dry = WR0 * np.tan(ddpsiS) + self.Kx / wuS / gS * np.sin(\n",
" x + self.phase)\n",
" drz = WR0 * np.cos(np.sqrt(ddphiS**2+ddpsiS**2)) -\\\n",
" betam * x / wuS + 0.125 / wuS / gS**2 *\\\n",
" (self.Ky**2 * sin2x +\n",
" self.Kx**2 * np.sin(2 * (x + self.phase)))\n",
" ucos = wS * (x / wuS + np.sqrt(drx**2 + dry**2 + drz**2))\n",
" else:\n",
" ucos = ww1S * x + wS / gS / wuS *\\\n",
" (-self.Ky * ddphiS * sinx +\n",
" self.Kx * ddpsiS * np.sin(x + self.phase) +\n",
" 0.125 / gS * (self.Ky**2 * sin2x +\n",
" self.Kx**2 * np.sin(2. * (x + self.phase))))\n",
"\n",
" nz = 1 - 0.5*(ddphiS**2 + ddpsiS**2)\n",
" betax = taperC * self.Ky / gS * cosx\n",
" betay = -self.Kx / gS * np.cos(x + self.phase)\n",
" betaz = 1 - 0.5*(1./gS**2 + betax**2 + betay**2)\n",
"\n",
" betaPx = -wuS * self.Ky / gS * (alphaS * cosx + taperC * sinx)\n",
" betaPy = wuS * self.Kx / gS * np.sin(x + self.phase)\n",
" betaPz = 0.5 * wuS / gS**2 *\\\n",
" (self.Ky**2 * taperC * (alphaS*cosx**2 + taperC * sin2x) +\n",
" self.Kx**2 * np.sin(2. * (x + self.phase)))\n",
" krel = 1. - ddphiS*betax - ddpsiS*betay - nz*betaz\n",
" eucos = np.exp(1j * ucos) / krel**2\n",
"\n",
" bnx = betax - ddphiS\n",
" bny = betay - ddpsiS\n",
" bnz = betaz - nz\n",
" primexy = betaPx*bny - betaPy*bnx\n",
"\n",
" return ((nz*(betaPx*bnz - betaPz*bnx) + ddpsiS*primexy) * eucos,\n",
" (nz*(betaPy*bnz - betaPz*bny) - ddphiS*primexy) * eucos)\n",
"\n",
" def build_I_map(self, w, ddtheta, ddpsi, harmonic=None, dg=None):\n",
" useCL = False\n",
" if isinstance(w, np.ndarray):\n",
" if w.shape[0] > 32:\n",
" useCL = True\n",
" if (self.cl_ctx is None) or not useCL:\n",
" return self._build_I_map_conv(w, ddtheta, ddpsi, harmonic, dg)\n",
" elif self.customField is not None:\n",
" return self._build_I_map_custom(w, ddtheta, ddpsi, harmonic, dg)\n",
" else:\n",
" return self._build_I_map_CL(w, ddtheta, ddpsi, harmonic, dg)\n",
"\n",
" def _build_I_map_conv(self, w, ddtheta, ddpsi, harmonic, dgamma=None):\n",
" # np.seterr(invalid='ignore')\n",
" # np.seterr(divide='ignore')\n",
" NRAYS = 1 if len(np.array(w).shape) == 0 else len(w)\n",
" gamma = self.gamma\n",
" if self.eEspread > 0:\n",
" if dgamma is not None:\n",
" gamma += dgamma\n",
" else:\n",
" sz = 1 if self.filamentBeam else NRAYS\n",
" gamma += gamma * self.eEspread * np.random.normal(size=sz)\n",
" gamma = gamma * np.ones(NRAYS)\n",
" gamma2 = gamma**2\n",
"\n",
" wu = PI * C * 10 / self.L0 / gamma2 * np.ones_like(w) *\\\n",
" (2*gamma2 - 1 - 0.5*self.Kx**2 - 0.5*self.Ky**2) / E2W\n",
" ww1 = w * ((1. + 0.5*self.Kx**2 + 0.5*self.Ky**2) +\n",
" gamma2 * (ddtheta**2 + ddpsi**2)) / (2. * gamma2 * wu)\n",
" tg_n, ag_n = np.polynomial.legendre.leggauss(self.quadm)\n",
" self.tg_n, self.ag_n = tg_n, ag_n\n",
"\n",
" if (self.taper is not None) or (self.R0 is not None):\n",
" AB = 1. / PI2 / wu\n",
" dstep = 2 * PI / float(self.gIntervals)\n",
" dI = np.arange(0.5 * dstep - PI * self.Np, PI * self.Np, dstep)\n",
" else:\n",
" AB = 1. / PI2 / wu * np.sin(PI * self.Np * ww1) / np.sin(PI * ww1)\n",
" dstep = 2 * PI / float(self.gIntervals)\n",
" dI = np.arange(-PI + 0.5 * dstep, PI, dstep)\n",
"\n",
" tg = (dI[:, None] + 0.5*dstep*tg_n).ravel() # + PI/2\n",
" ag = (dI[:, None]*0 + ag_n).ravel()\n",
" # Bsr = np.zeros_like(w, dtype='complex')\n",
" # Bpr = np.zeros_like(w, dtype='complex')\n",
" dim = len(np.array(w).shape)\n",
" sp3res = self._sp(dim, tg, ww1, w, wu, gamma, ddtheta, ddpsi)\n",
" Bsr = np.sum(ag * sp3res[0], axis=dim)\n",
" Bpr = np.sum(ag * sp3res[1], axis=dim)\n",
"\n",
" bwFact = 0.001 if self.distE == 'BW' else 1./w\n",
" Amp2Flux = FINE_STR * bwFact * self.I0 / SIE0\n",
"\n",
" if harmonic is not None:\n",
" Bsr[ww1 > harmonic+0.5] = 0\n",
" Bpr[ww1 > harmonic+0.5] = 0\n",
" Bsr[ww1 < harmonic-0.5] = 0\n",
" Bpr[ww1 < harmonic-0.5] = 0\n",
"\n",
" # np.seterr(invalid='warn')\n",
" # np.seterr(divide='warn')\n",
" return (Amp2Flux * AB**2 * 0.25 * dstep**2 *\n",
" (np.abs(Bsr)**2 + np.abs(Bpr)**2),\n",
" np.sqrt(Amp2Flux) * AB * Bsr * 0.5 * dstep,\n",
" np.sqrt(Amp2Flux) * AB * Bpr * 0.5 * dstep)\n",
"\n",
" def _build_I_map_custom(self, w, ddtheta, ddpsi, harmonic, dgamma=None):\n",
" # time1 = time.time()\n",
" NRAYS = 1 if len(np.array(w).shape) == 0 else len(w)\n",
" gamma = self.gamma\n",
" if self.eEspread > 0:\n",
" if dgamma is not None:\n",
" gamma += dgamma\n",
" else:\n",
" sz = 1 if self.filamentBeam else NRAYS\n",
" gamma += gamma * self.eEspread * np.random.normal(size=sz)\n",
" gamma = gamma * np.ones(NRAYS, dtype=self.cl_precisionF)\n",
" gamma2 = gamma**2\n",
"\n",
" wu = PI * C * 10 / self.L0 / gamma2 *\\\n",
" (2*gamma2 - 1 - 0.5*self.Kx**2 - 0.5*self.Ky**2) / E2W\n",
" ww1 = w * ((1. + 0.5 * self.Kx**2 + 0.5 * self.Ky**2) +\n",
" gamma2 * (ddtheta * ddtheta + ddpsi * ddpsi)) /\\\n",
" (2. * gamma2 * wu)\n",
" scalarArgs = [] # R0\n",
" R0 = self.R0 if self.R0 is not None else 0\n",
"\n",
" Np = np.int32(self.Np)\n",
"\n",
" tg_n, ag_n = np.polynomial.legendre.leggauss(self.quadm)\n",
" self.tg_n, self.ag_n = tg_n, ag_n\n",
"\n",
" ab = 1. / PI2 / wu\n",
" dstep = 2 * PI / float(self.gIntervals)\n",
" dI = np.arange(0.5 * dstep - PI * Np, PI * Np, dstep)\n",
"\n",
" tg = np.array([-PI*Np + PI/2.])\n",
" ag = [0]\n",
" tg = self.cl_precisionF(\n",
" np.concatenate((tg, (dI[:, None]+0.5*dstep*tg_n).ravel() + PI/2.)))\n",
" ag = self.cl_precisionF(np.concatenate(\n",
" (ag, (dI[:, None]*0+ag_n).ravel())))\n",
"\n",
" nwt = self.nRK\n",
" wtGrid = []\n",
" for itg in range(len(tg) - 1):\n",
" tmppr, tmpstp = np.linspace(tg[itg],\n",
" tg[itg+1],\n",
" 2*nwt,\n",
" endpoint=False, retstep=True)\n",
" wtGrid.extend(np.linspace(tg[itg],\n",
" tg[itg+1],\n",
" 2*nwt,\n",
" endpoint=False))\n",
" wtGrid.append(tg[-1])\n",
"\n",
" # print(\"Custom magnetic field: Bx={0}. By={1}, Bz={2}\".format(\n",
" # self.B0x, self.B0y, self.B0z))\n",
" Bx, By, Bz = self.magnetic_field(wtGrid)\n",
"\n",
" if self.filamentBeam:\n",
" emcg = self.L0 * SIE0 / SIM0 / C / 10. / gamma[0] / PI2\n",
" scalarArgsTraj = [np.int32(len(tg)), # jend\n",
" np.int32(nwt),\n",
" self.cl_precisionF(emcg),\n",
" self.cl_precisionF(gamma[0])]\n",
"\n",
" nonSlicedROArgs = [tg, # Gauss-Legendre grid\n",
" self.cl_precisionF(Bx), # Mangetic field\n",
" self.cl_precisionF(By), # components on the\n",
" self.cl_precisionF(Bz)] # Runge-Kutta grid\n",
"\n",
" nonSlicedRWArgs = [np.zeros_like(tg), # beta.x\n",
" np.zeros_like(tg), # beta.y\n",
" np.zeros_like(tg), # beta.z average\n",
" np.zeros_like(tg), # traj.x\n",
" np.zeros_like(tg), # traj.y\n",
" np.zeros_like(tg)] # traj.z\n",
"\n",
" clKernel = 'get_trajectory'\n",
"\n",
" betax, betay, betazav, trajx, trajy, trajz = self.ucl.run_parallel(\n",
" clKernel, scalarArgsTraj, None, nonSlicedROArgs,\n",
" None, nonSlicedRWArgs, 1)\n",
" self.beta = [betax, betay]\n",
" self.trajectory = [trajx[1:-1] * self.L0 / PI2,\n",
" trajy[1:-1] * self.L0 / PI2,\n",
" trajz[1:-1] * self.L0 / PI2]\n",
" wuAv = PI2 * C * 10. * betazav[-1] / self.L0 / E2W\n",
"\n",
" scalarArgsTest = [np.int32(len(tg)),\n",
" np.int32(nwt),\n",
" self.cl_precisionF(emcg),\n",
" self.cl_precisionF(gamma[0]**2),\n",
" self.cl_precisionF(wuAv),\n",
" self.cl_precisionF(self.L0),\n",
" self.cl_precisionF(R0)]\n",
"\n",
" slicedROArgs = [self.cl_precisionF(w), # Energy\n",
" self.cl_precisionF(ddtheta), # Theta\n",
" self.cl_precisionF(ddpsi)] # Psi\n",
"\n",
" nonSlicedROArgs = [tg, # Gauss-Legendre grid\n",
" ag, # Gauss-Legendre weights\n",
" self.cl_precisionF(Bx), # Mangetic field\n",
" self.cl_precisionF(By), # components on the\n",
" self.cl_precisionF(Bz), # Runge-Kutta grid\n",
" self.cl_precisionF(betax), # Components of the\n",
" self.cl_precisionF(betay), # velosity and\n",
" self.cl_precisionF(trajx), # trajectory of the\n",
" self.cl_precisionF(trajy), # electron on the\n",
" self.cl_precisionF(trajz)] # Gauss grid\n",
"\n",
" slicedRWArgs = [np.zeros(NRAYS, dtype=self.cl_precisionC), # Is\n",
" np.zeros(NRAYS, dtype=self.cl_precisionC)] # Ip\n",
"\n",
" clKernel = 'undulator_custom_filament'\n",
"\n",
" Is_local, Ip_local = self.ucl.run_parallel(\n",
" clKernel, scalarArgsTest, slicedROArgs, nonSlicedROArgs,\n",
" slicedRWArgs, None, NRAYS)\n",
" else:\n",
" scalarArgs.extend([np.int32(len(tg)), # jend\n",
" np.int32(nwt),\n",
" self.cl_precisionF(self.L0)])\n",
"\n",
" slicedROArgs = [self.cl_precisionF(gamma), # gamma\n",
" self.cl_precisionF(w), # Energy\n",
" self.cl_precisionF(ddtheta), # Theta\n",
" self.cl_precisionF(ddpsi)] # Psi\n",
"\n",
" nonSlicedROArgs = [tg, # Gauss-Legendre grid\n",
" ag, # Gauss-Legendre weights\n",
" self.cl_precisionF(Bx), # Mangetic field\n",
" self.cl_precisionF(By), # components on the\n",
" self.cl_precisionF(Bz)] # Runge-Kutta grid\n",
"\n",
" slicedRWArgs = [np.zeros(NRAYS, dtype=self.cl_precisionC), # Is\n",
" np.zeros(NRAYS, dtype=self.cl_precisionC)] # Ip\n",
"\n",
" clKernel = 'undulator_custom'\n",
"\n",
" Is_local, Ip_local = self.ucl.run_parallel(\n",
" clKernel, scalarArgs, slicedROArgs, nonSlicedROArgs,\n",
" slicedRWArgs, None, NRAYS)\n",
"\n",
" bwFact = 0.001 if self.distE == 'BW' else 1./w\n",
" Amp2Flux = FINE_STR * bwFact * self.I0 / SIE0\n",
"\n",
" if harmonic is not None:\n",
" Is_local[ww1 > harmonic+0.5] = 0\n",
" Ip_local[ww1 > harmonic+0.5] = 0\n",
" Is_local[ww1 < harmonic-0.5] = 0\n",
" Ip_local[ww1 < harmonic-0.5] = 0\n",
"\n",
" return (Amp2Flux * ab**2 * 0.25 * dstep**2 *\n",
" (np.abs(Is_local)**2 + np.abs(Ip_local)**2),\n",
" np.sqrt(Amp2Flux) * Is_local * ab * 0.5 * dstep,\n",
" np.sqrt(Amp2Flux) * Ip_local * ab * 0.5 * dstep)\n",
"\n",
" def _build_I_map_CL(self, w, ddtheta, ddpsi, harmonic, dgamma=None):\n",
" # time1 = time.time()\n",
" NRAYS = 1 if len(np.array(w).shape) == 0 else len(w)\n",
" gamma = self.gamma\n",
" if self.eEspread > 0:\n",
" if dgamma is not None:\n",
" gamma += dgamma\n",
" else:\n",
" sz = 1 if self.filamentBeam else NRAYS\n",
" gamma += gamma * self.eEspread * np.random.normal(size=sz)\n",
" gamma = gamma * np.ones(NRAYS, dtype=self.cl_precisionF)\n",
" gamma2 = gamma**2\n",
"\n",
" wu = PI * C * 10 / self.L0 / gamma2 *\\\n",
" (2*gamma2 - 1 - 0.5*self.Kx**2 - 0.5*self.Ky**2) / E2W\n",
" ww1 = w * ((1. + 0.5 * self.Kx**2 + 0.5 * self.Ky**2) +\n",
" gamma2 * (ddtheta * ddtheta + ddpsi * ddpsi)) /\\\n",
" (2. * gamma2 * wu)\n",
" scalarArgs = [self.cl_precisionF(0.)]\n",
"\n",
" if self.R0 is not None:\n",
" scalarArgs = [self.cl_precisionF(self.R0), # R0\n",
" self.cl_precisionF(self.L0)]\n",
" elif self.taper:\n",
" scalarArgs = [self.cl_precisionF(self.taper)]\n",
"\n",
" Np = np.int32(self.Np)\n",
"\n",
" tg_n, ag_n = np.polynomial.legendre.leggauss(self.quadm)\n",
" self.tg_n, self.ag_n = tg_n, ag_n\n",
"\n",
" dstep = 2 * PI / float(self.gIntervals)\n",
" if (self.taper is not None) or (self.R0 is not None) or self.full:\n",
" ab = 1. / PI2 / wu\n",
" dI = np.arange(0.5 * dstep - PI * Np, PI * Np, dstep)\n",
" else:\n",
" ab = 1. / PI2 / wu * np.sin(PI * Np * ww1) / np.sin(PI * ww1)\n",
" dI = np.arange(-PI + 0.5*dstep, PI, dstep)\n",
"\n",
" extra = PI/2*0\n",
" tg = self.cl_precisionF((dI[:, None]+0.5*dstep*tg_n).ravel()) + extra\n",
" ag = self.cl_precisionF((dI[:, None]*0+ag_n).ravel())\n",
"\n",
" scalarArgs.extend([self.cl_precisionF(self.Kx), # Kx\n",
" self.cl_precisionF(self.Ky), # Ky\n",
" self.cl_precisionF(self.phase), # phase\n",
" np.int32(len(tg))]) # jend\n",
"\n",
" slicedROArgs = [self.cl_precisionF(gamma), # gamma\n",
" self.cl_precisionF(wu), # Eund\n",
" self.cl_precisionF(w), # Energy\n",
" self.cl_precisionF(ww1), # Energy/Eund(0)\n",
" self.cl_precisionF(ddtheta), # Theta\n",
" self.cl_precisionF(ddpsi)] # Psi\n",
" if self.full:\n",
" if isinstance(self.theta0, np.ndarray):\n",
" slicedROArgs.extend([self.cl_precisionF(self.theta0),\n",
" self.cl_precisionF(self.psi0)])\n",
" else:\n",
" slicedROArgs.extend([self.cl_precisionF(\n",
" self.theta0*np.ones_like(w)),\n",
" self.cl_precisionF(\n",
" self.psi0*np.ones_like(w))])\n",
"\n",
" nonSlicedROArgs = [tg, # Gauss-Legendre grid\n",
" ag] # Gauss-Legendre weights\n",
"\n",
" slicedRWArgs = [np.zeros(NRAYS, dtype=self.cl_precisionC), # Is\n",
" np.zeros(NRAYS, dtype=self.cl_precisionC)] # Ip\n",
"\n",
" if self.taper is not None:\n",
" clKernel = 'undulator_taper'\n",
" elif self.R0 is not None:\n",
" clKernel = 'undulator_nf'\n",
" if self.full:\n",
" clKernel = 'undulator_nf_full'\n",
" elif self.full:\n",
" clKernel = 'undulator_full'\n",
" else:\n",
" clKernel = 'undulator'\n",
"\n",
" Is_local, Ip_local = self.ucl.run_parallel(\n",
" clKernel, scalarArgs, slicedROArgs, nonSlicedROArgs,\n",
" slicedRWArgs, dimension=NRAYS)\n",
"\n",
" bwFact = 0.001 if self.distE == 'BW' else 1./w\n",
" Amp2Flux = FINE_STR * bwFact * self.I0 / SIE0\n",
"\n",
" if harmonic is not None:\n",
" Is_local[ww1 > harmonic+0.5] = 0\n",
" Ip_local[ww1 > harmonic+0.5] = 0\n",
" Is_local[ww1 < harmonic-0.5] = 0\n",
" Ip_local[ww1 < harmonic-0.5] = 0\n",
"\n",
" # print(\"Build_I_Map completed in {0} s\".format(time.time() - time1))\n",
" return (Amp2Flux * ab**2 * 0.25 * dstep**2 *\n",
" (np.abs(Is_local)**2 + np.abs(Ip_local)**2),\n",
" np.sqrt(Amp2Flux) * Is_local * ab * 0.5 * dstep,\n",
" np.sqrt(Amp2Flux) * Ip_local * ab * 0.5 * dstep)\n",
"\n",
"# def _reportNaN(self, x, strName):\n",
"# nanSum = np.isnan(x).sum()\n",
"# if nanSum > 0:\n",
"# print(\"{0} NaN rays in {1}!\".format(nanSum, strName))\n",
"\n",
" def real_photon_source_sizes(\n",
" self, energy='auto', theta='auto', psi='auto', method='rms'):\n",
" \"\"\"Returns energy dependent arrays: flux, (dx')², (dz')², dx², dz².\n",
" Depending on *distE* being 'eV' or 'BW', the flux is either in ph/s or\n",
" in ph/s/0.1%BW, being integrated over the specified theta and psi\n",
" ranges. The squared angular and linear photon source sizes are\n",
" variances, i.e. squared sigmas. The latter two (linear sizes) are in\n",
" mm**2.\n",
" \"\"\"\n",
" if isinstance(energy, str): # i.e. if 'auto'\n",
" energy = np.mgrid[self.E_min:self.E_max + 0.5*self.dE:self.dE]\n",
"\n",
" if isinstance(theta, str):\n",
" theta = np.mgrid[\n",
" self.Theta_min:self.Theta_max + 0.5*self.dTheta:self.dTheta]\n",
"\n",
" if isinstance(psi, str):\n",
" psi = np.mgrid[self.Psi_min:self.Psi_max + 0.5*self.dPsi:self.dPsi]\n",
"\n",
" tomesh = [energy, theta, psi]\n",
" sh = [len(energy), len(theta), len(psi)]\n",
" if self.eEspread > 0:\n",
" spr = np.linspace(-3, 3, 13)\n",
" dgamma = self.gamma * spr * self.eEspread\n",
" wspr = np.exp(-0.5 * spr**2)\n",
" wspr /= wspr.sum()\n",
" tomesh.append(dgamma)\n",
" sh.append(len(dgamma))\n",
"\n",
" mesh = np.meshgrid(*tomesh, indexing='ij')\n",
" xE, xTheta, xPsi = mesh[0].ravel(), mesh[1].ravel(), mesh[2].ravel()\n",
" xG = mesh[3].ravel() if self.eEspread > 0 else None\n",
"\n",
" res = self.build_I_map(xE, xTheta, xPsi, dg=xG)\n",
" Es = res[1].reshape(sh)\n",
" Ep = res[2].reshape(sh)\n",
" if self.eEspread > 0:\n",
" ws = wspr[np.newaxis, np.newaxis, np.newaxis, :]\n",
" Is = ((Es*np.conj(Es)).real * ws).sum(axis=3)\n",
" Ip = ((Ep*np.conj(Ep)).real * ws).sum(axis=3)\n",
" else:\n",
" Is = (Es*np.conj(Es)).real\n",
" Ip = (Ep*np.conj(Ep)).real\n",
" dtheta, dpsi = theta[1] - theta[0], psi[1] - psi[0]\n",
" I0 = (Is.astype(float) + Ip.astype(float))\n",
" flux = I0.sum(axis=(1, 2)) * dtheta * dpsi\n",
" theta2, psi2 = self._get_2D_sizes(\n",
" I0, flux, theta, psi, dtheta, dpsi, method)\n",
"\n",
" EsFT = np.fft.fftshift(np.fft.fft2(Es), axes=(1, 2)) * dtheta * dpsi\n",
" EpFT = np.fft.fftshift(np.fft.fft2(Ep), axes=(1, 2)) * dtheta * dpsi\n",
" thetaFT = np.fft.fftshift(np.fft.fftfreq(len(theta), d=dtheta))\n",
" psiFT = np.fft.fftshift(np.fft.fftfreq(len(psi), d=dpsi))\n",
" dthetaFT, dpsiFT = thetaFT[1] - thetaFT[0], psiFT[1] - psiFT[0]\n",
" if self.eEspread > 0:\n",
" ws = wspr[np.newaxis, np.newaxis, np.newaxis, :]\n",
" IsFT = ((EsFT*np.conj(EsFT)).real * ws).sum(axis=3)\n",
" IpFT = ((EpFT*np.conj(EpFT)).real * ws).sum(axis=3)\n",
" else:\n",
" IsFT = (EsFT*np.conj(EsFT)).real\n",
" IpFT = (EpFT*np.conj(EpFT)).real\n",
" I0FT = (IsFT.astype(float) + IpFT.astype(float))\n",
" fluxFT = I0FT.sum(axis=(1, 2)) * dthetaFT * dpsiFT\n",
" # flux equals fluxFT, check it:\n",
"# print(flux)\n",
"# print(fluxFT)\n",
" k = energy / CH * 1e7 # in 1/mm\n",
" dx2, dz2 = self._get_2D_sizes(\n",
" I0FT, fluxFT, thetaFT, psiFT, dthetaFT, dpsiFT, method, k)\n",
"\n",
" return flux, theta2, psi2, dx2, dz2\n",
"\n",
" def _get_2D_sizes(\n",
" self, I0, flux, theta, psi, dtheta, dpsi, method, k=None):\n",
" if method == 'rms':\n",
" theta2 = (I0 * (theta[np.newaxis, :, np.newaxis])**2).sum(\n",
" axis=(1, 2)) * dtheta * dpsi / flux\n",
" psi2 = (I0 * (psi[np.newaxis, np.newaxis, :])**2).sum(\n",
" axis=(1, 2)) * dtheta * dpsi / flux\n",
" elif isinstance(method, float): # 0 < method < 1\n",
" theta2 = self._get_1D_size(I0, flux, theta, dtheta, 1, method)\n",
" psi2 = self._get_1D_size(I0, flux, psi, dpsi, 2, method)\n",
" else:\n",
" raise ValueError('unknown method!')\n",
" if k is not None:\n",
" theta2 *= k**(-2)\n",
" psi2 *= k**(-2)\n",
" return theta2, psi2\n",
"\n",
" def _get_1D_size(self, I0, flux, ang, dang, axis, method):\n",
" ang2 = np.zeros(I0.shape[0])\n",
" if axis == 1:\n",
" angCutI0 = I0[:, I0.shape[1]//2:, I0.shape[2]//2].squeeze()\n",
" elif axis == 2:\n",
" angCutI0 = I0[:, I0.shape[1]//2, I0.shape[2]//2:].squeeze()\n",
" angCumFlux = (angCutI0*ang[np.newaxis, len(ang)//2:]).cumsum(axis=1)\\\n",
" * 2*np.pi * dang\n",
" for ie, ee in enumerate(flux):\n",
" try:\n",
" argBorder = np.argwhere(angCumFlux[ie, :] > ee*method)[0][0]\n",
" except IndexError:\n",
" ang2[ie] = 0\n",
" continue\n",
" r2a = ang[len(ang)//2+argBorder-1]**2\n",
" va = angCumFlux[ie, argBorder-1]\n",
" r2b = ang[len(ang)//2+argBorder]**2\n",
" vb = angCumFlux[ie, argBorder]\n",
" r2m = (ee*method - va) * (r2b-r2a) / (vb-va) + r2a\n",
" ang2[ie] = r2m\n",
" return ang2\n",
"\n",
" def tanaka_kitamura_Qa2(self, x, eps=1e-6):\n",
" \"\"\"Squared Q_a function from Tanaka and Kitamura J. Synchrotron Rad. 16\n",
" (2009) 380–386, Eq(17). The argument is normalized energy spread by\n",
" Eq(13).\"\"\"\n",
" ret = np.ones_like(x, dtype=float)\n",
" xarr = np.array(x)\n",
"# ret[x <= eps] = 1 # ret already holds ones\n",
" y = SQ2 * xarr[xarr > eps]\n",
" y2 = y**2\n",
" ret[x > eps] = y2 / (np.exp(-y2) + SQPI*y*special.erf(y) - 1)\n",
" return ret\n",
"\n",
" def get_sigma_r02(self, E): # linear size\n",
" \"\"\"Squared sigma_{r0} as by Walker and by Ellaume and\n",
" Tanaka and Kitamura J. Synchrotron Rad. 16 (2009) 380–386 (see the\n",
" text after Eq(23))\"\"\"\n",
" return 2 * CHeVcm/E*10 * self.L0*self.Np / PI2**2\n",
"\n",
" def get_sigmaP_r02(self, E): # angular size\n",
" \"\"\"Squared sigmaP_{r0}\"\"\"\n",
" return CHeVcm/E*10 / (2 * self.L0*self.Np)\n",
"\n",
" def get_sigma_r2(self, E, onlyOddHarmonics=True, with0eSpread=False):\n",
" \"\"\"Squared sigma_{r} as by\n",
" Tanaka and Kitamura J. Synchrotron Rad. 16 (2009) 380–386\n",
" that also depends on energy spread.\"\"\"\n",
" sigma_r02 = self.get_sigma_r02(E)\n",
" if self.eEspread == 0 or with0eSpread:\n",
" return sigma_r02\n",
" harmonic = np.floor_divide(E, self.E1)\n",
"# harmonic[harmonic < 1] = 1\n",
" if onlyOddHarmonics:\n",
" harmonic += harmonic % 2 - 1\n",
" eEspread_norm = PI2 * harmonic * self.Np * self.eEspread\n",
" Qa2 = self.tanaka_kitamura_Qa2(eEspread_norm/4.) # note 1/4\n",
" return sigma_r02 * Qa2**(2/3.)\n",
"\n",
" def get_sigmaP_r2(self, E, onlyOddHarmonics=True, with0eSpread=False):\n",
" \"\"\"Squared sigmaP_{r} as by\n",
" Tanaka and Kitamura J. Synchrotron Rad. 16 (2009) 380–386\n",
" that also depends on energy spread.\"\"\"\n",
" sigmaP_r02 = self.get_sigmaP_r02(E)\n",
" if self.eEspread == 0 or with0eSpread:\n",
" return sigmaP_r02\n",
" harmonic = np.floor_divide(E, self.E1)\n",
"# harmonic[harmonic < 1] = 1\n",
" if onlyOddHarmonics:\n",
" harmonic += harmonic % 2 - 1\n",
" eEspread_norm = PI2 * harmonic * self.Np * self.eEspread\n",
" Qa2 = self.tanaka_kitamura_Qa2(eEspread_norm)\n",
" return sigmaP_r02 * Qa2\n",
"\n",
" def get_SIGMA(self, E, onlyOddHarmonics=True, with0eSpread=False):\n",
" \"\"\"Calculates total linear source size, also including the effect of\n",
" electron beam energy spread. Uses Tanaka and Kitamura, J. Synchrotron\n",
" Rad. 16 (2009) 380–6.\n",
"\n",
" *E* can be a value or an array. Returns a 2-tuple with x and y sizes.\n",
" \"\"\"\n",
" sigma_r2 = self.get_sigma_r2(E, onlyOddHarmonics, with0eSpread)\n",
" return ((self.dx**2 + sigma_r2)**0.5,\n",
" (self.dz**2 + sigma_r2)**0.5)\n",
"\n",
" def get_SIGMAP(self, E, onlyOddHarmonics=True, with0eSpread=False):\n",
" \"\"\"Calculates total angular source size, also including the effect of\n",
" electron beam energy spread. Uses Tanaka and Kitamura, J. Synchrotron\n",
" Rad. 16 (2009) 380–6.\n",
"\n",
" *E* can be a value or an array. Returns a 2-tuple with x and y sizes.\n",
" \"\"\"\n",
" sigmaP_r2 = self.get_sigmaP_r2(E, onlyOddHarmonics, with0eSpread)\n",
" return ((self.dxprime**2 + sigmaP_r2)**0.5,\n",
" (self.dzprime**2 + sigmaP_r2)**0.5)\n",
"\n",
" def shine(self, toGlobal=True, withAmplitudes=True, fixedEnergy=False,\n",
" wave=None, accuBeam=None):\n",
" u\"\"\"\n",
" Returns the source beam. If *toGlobal* is True, the output is in\n",
" the global system. If *withAmplitudes* is True, the resulted beam\n",
" contains arrays Es and Ep with the *s* and *p* components of the\n",
" electric field.\n",
"\n",
" *fixedEnergy* is either None or a value in eV. If *fixedEnergy* is\n",
" specified, the energy band is not 0.1%BW relative to *fixedEnergy*, as\n",
" probably expected but is given by (eMax - eMin) of the constructor.\n",
"\n",
" *wave* and *accuBeam* are used in wave diffraction. *wave* is a Beam\n",
" object and determines the positions of the wave samples. It must be\n",
" obtained by a previous `prepare_wave` run. *accuBeam* is only needed\n",
" with *several* repeats of diffraction integrals when the parameters of\n",
" the filament beam must be preserved for all the repeats.\n",
"\n",
"\n",
" .. Returned values: beamGlobal\n",
" \"\"\"\n",
" if self.bl is not None:\n",
" try:\n",
" self.bl._alignE = float(self.bl.alignE)\n",
" except ValueError:\n",
" self.bl._alignE = 0.5 * (self.eMin + self.eMax)\n",
"\n",
" if wave is not None:\n",
" if not hasattr(wave, 'rDiffr'):\n",
" raise ValueError(\"If you want to use a `wave`, run a\" +\n",
" \" `prepare_wave` before shine!\")\n",
" self.uniformRayDensity = True\n",
" mcRays = len(wave.a)\n",
" else:\n",
" mcRays = self.nrays\n",
"\n",
" if self.uniformRayDensity:\n",
" withAmplitudes = True\n",
" if not self.uniformRayDensity:\n",
" if raycing._VERBOSITY_ > 0:\n",
" print(\"Rays generation\")\n",
" bo = None\n",
" length = 0\n",
" seeded = np.long(0)\n",
" seededI = 0.\n",
" np.seterr(invalid='warn')\n",
" np.seterr(divide='warn')\n",
" if self.filamentBeam:\n",
" if accuBeam is None:\n",
" rsE = np.random.random_sample() * \\\n",
" float(self.E_max - self.E_min) + self.E_min\n",
" rX = self.dx * np.random.standard_normal()\n",
" rZ = self.dz * np.random.standard_normal()\n",
" dtheta = self.dxprime * np.random.standard_normal()\n",
" dpsi = self.dzprime * np.random.standard_normal()\n",
" else:\n",
" rsE = accuBeam.E[0]\n",
" rX = accuBeam.filamentDX\n",
" rZ = accuBeam.filamentDZ\n",
" dtheta = accuBeam.filamentDtheta\n",
" dpsi = accuBeam.filamentDpsi\n",
" seeded = accuBeam.seeded\n",
" seededI = accuBeam.seededI\n",
" if self.full:\n",
" if self.filamentBeam:\n",
" self.theta0 = dtheta\n",
" self.psi0 = dpsi\n",
" else:\n",
" self.theta0 = np.random.normal(0, self.dxprime, mcRays)\n",
" self.psi0 = np.random.normal(0, self.dzprime, mcRays)\n",
"\n",
" if fixedEnergy:\n",
" rsE = fixedEnergy\n",
" if (self.E_max-self.E_min) > fixedEnergy*1.1e-3:\n",
" print(\"Warning: the bandwidth seems too big. \"\n",
" \"Specify it by giving eMin and eMax in the constructor.\")\n",
" nrep = 0\n",
" rep_condition = True\n",
"# while length < self.nrays:\n",
" while rep_condition:\n",
" seeded += mcRays\n",
" # start_time = time.time()\n",
" if self.filamentBeam or fixedEnergy:\n",
" rE = rsE * np.ones(mcRays)\n",
" else:\n",
" rndg = np.random.rand(mcRays)\n",
" rE = rndg * float(self.E_max - self.E_min) + self.E_min\n",
"\n",
" if wave is not None:\n",
" self.xzE = (self.E_max - self.E_min)\n",
" if self.filamentBeam:\n",
" shiftX = rX\n",
" shiftZ = rZ\n",
" else:\n",
" shiftX = np.random.normal(\n",
" 0, self.dx, mcRays) if self.dx > 0 else 0\n",
" shiftZ = np.random.normal(\n",
" 0, self.dz, mcRays) if self.dz > 0 else 0\n",
" x = wave.xDiffr + shiftX\n",
" y = wave.yDiffr\n",
" z = wave.zDiffr + shiftZ\n",
" rDiffr = (x**2 + y**2 + z**2)**0.5\n",
" rTheta = x / rDiffr\n",
" rPsi = z / rDiffr\n",
" if self.filamentBeam:\n",
" rTheta += dtheta\n",
" rPsi += dpsi\n",
" else:\n",
" if self.dxprime > 0:\n",
" rTheta += np.random.normal(0, self.dxprime, mcRays)\n",
" if self.dzprime > 0:\n",
" rPsi += np.random.normal(0, self.dzprime, mcRays)\n",
" else:\n",
" rndg = np.random.rand(mcRays)\n",
" rTheta = rndg * (self.Theta_max - self.Theta_min) +\\\n",
" self.Theta_min\n",
" rndg = np.random.rand(mcRays)\n",
" rPsi = rndg * (self.Psi_max - self.Psi_min) + self.Psi_min\n",
"\n",
" Intensity, mJs, mJp = self.build_I_map(rE, rTheta, rPsi)\n",
"\n",
" if self.uniformRayDensity:\n",
" seededI += mcRays * self.xzE\n",
" else:\n",
" seededI += Intensity.sum() * self.xzE\n",
" tmp_max = np.max(Intensity)\n",
" if tmp_max > self.Imax:\n",
" self.Imax = tmp_max\n",
" self.fluxConst = self.Imax * self.xzE\n",
" if raycing._VERBOSITY_ > 10:\n",
" imax = np.argmax(Intensity)\n",
" print(self.Imax, imax, rE[imax], rTheta[imax], rPsi[imax])\n",
" if self.uniformRayDensity:\n",
" I_pass = slice(None)\n",
" npassed = mcRays\n",
" else:\n",
" rndg = np.random.rand(mcRays)\n",
" I_pass = np.where(self.Imax * rndg < Intensity)[0]\n",
" npassed = len(I_pass)\n",
" if npassed == 0:\n",
" if raycing._VERBOSITY_ > 0:\n",
" print('No good rays in this seed!', length, 'of',\n",
" self.nrays, 'rays in total so far...')\n",
" print(self.Imax, self.E_min, self.E_max,\n",
" self.Theta_min, self.Theta_max,\n",
" self.Psi_min, self.Psi_max)\n",
" continue\n",
"\n",
" if wave is not None:\n",
" bot = wave\n",
" else:\n",
" bot = Beam(npassed, withAmplitudes=withAmplitudes)\n",
" bot.state[:] = 1 # good\n",
" bot.E[:] = rE[I_pass]\n",
"\n",
" if self.filamentBeam:\n",
" dxR = rX\n",
" dzR = rZ\n",
"# sigma_r2 = self.get_sigma_r2(bot.E)\n",
"# dxR += np.random.normal(0, sigma_r2**0.5, npassed)\n",
"# dzR += np.random.normal(0, sigma_r2**0.5, npassed)\n",
" else:\n",
" if self.full:\n",
" bot.sourceSIGMAx = self.dx\n",
" bot.sourceSIGMAz = self.dz\n",
" dxR = np.random.normal(0, bot.sourceSIGMAx, npassed)\n",
" dzR = np.random.normal(0, bot.sourceSIGMAz, npassed)\n",
" else:\n",
" bot.sourceSIGMAx, bot.sourceSIGMAz = self.get_SIGMA(\n",
" bot.E, onlyOddHarmonics=False)\n",
" dxR = np.random.normal(0, bot.sourceSIGMAx, npassed)\n",
" dzR = np.random.normal(0, bot.sourceSIGMAz, npassed)\n",
"\n",
" if wave is not None:\n",
" wave.rDiffr = ((wave.xDiffr - dxR)**2 + wave.yDiffr**2 +\n",
" (wave.zDiffr - dzR)**2)**0.5\n",
" wave.path[:] = 0\n",
" wave.a[:] = (wave.xDiffr - dxR) / wave.rDiffr\n",
" wave.b[:] = wave.yDiffr / wave.rDiffr\n",
" wave.c[:] = (wave.zDiffr - dzR) / wave.rDiffr\n",
" else:\n",
" bot.x[:] = dxR\n",
" bot.z[:] = dzR\n",
" bot.a[:] = rTheta[I_pass]\n",
" bot.c[:] = rPsi[I_pass]\n",
"\n",
" if not self.full:\n",
" if self.filamentBeam:\n",
" bot.a[:] += dtheta\n",
" bot.c[:] += dpsi\n",
" else:\n",
" if self.dxprime > 0:\n",
" bot.a[:] += np.random.normal(\n",
" 0, self.dxprime, npassed)\n",
" if self.dzprime > 0:\n",
" bot.c[:] += np.random.normal(\n",
" 0, self.dzprime, npassed)\n",
"\n",
" mJs = mJs[I_pass]\n",
" mJp = mJp[I_pass]\n",
" if wave is not None:\n",
" area = wave.areaNormal if hasattr(wave, 'areaNormal') else\\\n",
" wave.area\n",
" norm = area**0.5 / wave.rDiffr\n",
" mJs *= norm\n",
" mJp *= norm\n",
" mJs2 = (mJs * np.conj(mJs)).real\n",
" mJp2 = (mJp * np.conj(mJp)).real\n",
"\n",
" if self.uniformRayDensity:\n",
" sSP = 1.\n",
" else:\n",
" sSP = mJs2 + mJp2\n",
" bot.Jsp[:] = np.where(sSP, mJs * np.conj(mJp) / sSP, 0)\n",
" bot.Jss[:] = np.where(sSP, mJs2 / sSP, 0)\n",
" bot.Jpp[:] = np.where(sSP, mJp2 / sSP, 0)\n",
"\n",
" if withAmplitudes:\n",
" if self.uniformRayDensity:\n",
" bot.Es[:] = mJs\n",
" bot.Ep[:] = mJp\n",
" else:\n",
" bot.Es[:] = mJs / mJs2**0.5\n",
" bot.Ep[:] = mJp / mJp2**0.5\n",
"\n",
" if bo is None:\n",
" bo = bot\n",
" else:\n",
" bo.concatenate(bot)\n",
" length = len(bo.a)\n",
" if not self.uniformRayDensity:\n",
" if raycing._VERBOSITY_ > 0:\n",
" print(\"{0} rays of {1}\".format(length, self.nrays))\n",
" try:\n",
" if self.bl is not None:\n",
" if self.bl.flowSource == 'Qook' and\\\n",
" self.bl.statusSignal is not None:\n",
" ptg = (self.bl.statusSignal[1] +\n",
" float(length) / float(self.nrays)) /\\\n",
" self.bl.statusSignal[2]\n",
" self.bl.statusSignal[0].emit(\n",
" (ptg, self.bl.statusSignal[3]))\n",
" except:\n",
" pass\n",
" if self.filamentBeam:\n",
" nrep += 1\n",
" rep_condition = nrep < self.nrepmax\n",
" else:\n",
" rep_condition = length < self.nrays\n",
" if self.uniformRayDensity:\n",
" rep_condition = False\n",
"\n",
" bo.accepted = length * self.fluxConst\n",
" bo.acceptedE = bo.E.sum() * self.fluxConst * SIE0\n",
" bo.seeded = seeded\n",
" bo.seededI = seededI\n",
" if raycing._VERBOSITY_ > 0:\n",
" sys.stdout.flush()\n",
"\n",
" if length > self.nrays and not self.filamentBeam and wave is None:\n",
" bo.filter_by_index(slice(0, self.nrays))\n",
" if self.filamentBeam:\n",
" bo.filamentDtheta = dtheta\n",
" bo.filamentDpsi = dpsi\n",
" bo.filamentDX = rX\n",
" bo.filamentDZ = rZ\n",
"\n",
" norm = (bo.a**2 + bo.b**2 + bo.c**2)**0.5\n",
" bo.a /= norm\n",
" bo.b /= norm\n",
" bo.c /= norm\n",
"\n",
"# if raycing._VERBOSITY_ > 10:\n",
"# self._reportNaN(bo.Jss, 'Jss')\n",
"# self._reportNaN(bo.Jpp, 'Jpp')\n",
"# self._reportNaN(bo.Jsp, 'Jsp')\n",
"# self._reportNaN(bo.E, 'E')\n",
"# self._reportNaN(bo.x, 'x')\n",
"# self._reportNaN(bo.y, 'y')\n",
"# self._reportNaN(bo.z, 'z')\n",
"# self._reportNaN(bo.a, 'a')\n",
"# self._reportNaN(bo.b, 'b')\n",
"# self._reportNaN(bo.c, 'c')\n",
" if self.pitch or self.yaw:\n",
" raycing.rotate_beam(bo, pitch=self.pitch, yaw=self.yaw)\n",
" bor = Beam(copyFrom=bo)\n",
" if wave is not None:\n",
" bor.x[:] = dxR\n",
" bor.y[:] = 0\n",
" bor.z[:] = dzR\n",
" bor.path[:] = 0\n",
" mPh = np.exp(1e7j * wave.E/CHBAR * wave.rDiffr)\n",
" wave.Es *= mPh\n",
" wave.Ep *= mPh\n",
"\n",
" if toGlobal: # in global coordinate system:\n",
" raycing.virgin_local_to_global(self.bl, bor, self.center)\n",
" bor.parentId = self.name\n",
" raycing.append_to_flow(self.shine, [bor],\n",
" inspect.currentframe())\n",
" return bor\n"
] | [
0,
0,
0,
0,
0,
0.06666666666666667,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.015151515151515152,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.041666666666666664,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.03571428571428571,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 2,374 | 0.000067 | false |
# Copyright 2015 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import ctypes
import datetime
import importlib
import re
import six
import struct
import weakref
import mi
from mi import mi_error
import pbr.version
try:
import eventlet
from eventlet import patcher
from eventlet import tpool
# If eventlet is installed and the 'thread' module is patched, we'll make
# sure that other greenthreads will not be blocked while WMI operations
# are performed by using tpool.
# This behavior can be disabled by using the following flag.
EVENTLET_NONBLOCKING_MODE_ENABLED = patcher.is_monkey_patched('thread')
except ImportError:
eventlet = None
EVENTLET_NONBLOCKING_MODE_ENABLED = False
__all__ = ['__version__']
version_info = pbr.version.VersionInfo('PyMI')
try:
__version__ = version_info.version_string()
except AttributeError:
__version__ = None
def avoid_blocking_call(f):
# Performs blocking calls in a different thread using tpool.execute
# when called from a greenthread.
# Note that eventlet.getcurrent will always return a greenlet object.
# Still, in case of a greenthread, the parent greenlet will always be the
# hub loop greenlet.
def wrapper(*args, **kwargs):
if (EVENTLET_NONBLOCKING_MODE_ENABLED and
eventlet.getcurrent().parent):
return tpool.execute(f, *args, **kwargs)
else:
return f(*args, **kwargs)
return wrapper
def _get_eventlet_original(module_name):
if eventlet:
return eventlet.patcher.original(module_name)
else:
return importlib.import_module(module_name)
# Default operation timeout in seconds.
# In order to enable it, this value must be set.
DEFAULT_OPERATION_TIMEOUT = None
WBEM_E_PROVIDER_NOT_CAPABLE = 0x80041024
class x_wmi(Exception):
def __init__(self, info="", com_error=None):
self.info = info
self.com_error = com_error
def __str__(self):
return "<x_wmi: %s %s>" % (
self.info or "Unexpected COM Error",
self.com_error or "(no underlying exception)"
)
class x_wmi_timed_out(x_wmi):
pass
class com_error(Exception):
def __init__(self, hresult, strerror, excepinfo, argerror):
self.hresult = hresult
self.strerror = strerror
self.excepinfo = excepinfo
self.argerror = argerror
def unsigned_to_signed(unsigned):
signed, = struct.unpack("l", struct.pack("L", unsigned))
return signed
def mi_to_wmi_exception(func):
def func_wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except mi.error as ex:
d = ex.args[0]
hresult = unsigned_to_signed(d.get("error_code", 0))
err_msg = d.get("message") or ""
com_ex = com_error(
hresult, err_msg,
(0, None, err_msg, None, None, hresult),
None)
if(isinstance(ex, mi.timeouterror)):
raise x_wmi_timed_out(err_msg, com_ex)
else:
raise x_wmi(err_msg, com_ex)
return func_wrapper
_app = None
def _get_app():
global _app
if not _app:
_app = mi.Application()
return _app
class _Method(object):
def __init__(self, conn, target, method_name):
self._conn = conn
self._target = target
self._method_name = method_name
self._params = self._conn._get_method_params(target, method_name)
@avoid_blocking_call
@mi_to_wmi_exception
def __call__(self, *args, **kwargs):
return self._conn.invoke_method(
self._target, self._method_name, *args, **kwargs)
def __str__(self):
try:
args = []
# an MI object's length is the number of attributes the object it
# represents has.
for i in range(len(self._params)):
key, value_type, value = self._params.get_element(i)
args.append(key)
obj_string = '<function %s (%s)>' % (
self._method_name, ', '.join(args))
return obj_string
except Exception:
return super(_BaseEntity, self).__str__()
def __repr__(self):
return self.__str__()
class _Path(object):
"""
Provides a SWbemObjectPath replacement.
"""
def __init__(self, item):
self._item = item
def __str__(self):
return self.Path
@property
def Authority(self):
raise NotImplementedError()
@property
def Class(self):
return self._item.get_class_name()
@property
def DisplayName(self):
return self.Path()
@property
def IsClass(self):
return isinstance(self._item, _Class)
@property
def IsSingleton(self):
raise NotImplementedError()
@property
def Keys(self):
raise NotImplementedError()
@property
def Locale(self):
raise NotImplementedError()
@property
def Namespace(self):
return self._item.get_namespace()
@property
def ParentNamespace(self):
raise NotImplementedError()
@property
def Path(self):
return self._item.get_path()
@property
def RelPath(self):
path = self._item.get_path()
return path[path.find(':') + 1:]
@property
def Security_(self):
raise NotImplementedError()
@property
def Server(self):
return self._item.get_server_name()
@six.add_metaclass(abc.ABCMeta)
class _BaseEntity(object):
_convert_references = False
@abc.abstractmethod
def get_wrapped_object(self):
pass
@abc.abstractmethod
def get_class_name(self):
pass
@abc.abstractmethod
def get_class(self):
pass
def __str__(self):
try:
obj = self.get_wrapped_object()
obj_string = 'instance of %s\n{' % obj.get_class_name()
# an MI object's length is the number of attributes the object it
# represents has.
for i in range(len(obj)):
key, value_type, value = obj.get_element(i)
if value_type == mi.MI_STRING:
value = '"%s"' % value
obj_string += "\n\t%s = %s;" % (key, value)
obj_string += '\n};'
return obj_string
except Exception:
return super(_BaseEntity, self).__str__()
def __repr__(self):
try:
obj = self.get_wrapped_object()
return '<pymi_object: %s>' % obj.get_path()
except Exception:
return super(_BaseEntity, self).__repr__()
@mi_to_wmi_exception
def __getattr__(self, name):
try:
# If the class is an association class, certain of its properties
# are references which contain the paths to the associated objecs.
# The WMI module translates automatically into WMI objects those
# class properties that are references. To maintain the
# compatibility with the WMI module, those class properties that
# are references are translated into objects.
obj = self.get_wrapped_object()
return self._conn._wrap_element(
*obj.get_element(name),
convert_references=self._convert_references)
except mi.error:
try:
return _Method(self._conn, self, name)
except mi.error as err:
if err.args[0].get('mi_result') == (
mi_error.MI_RESULT_METHOD_NOT_FOUND):
err_msg = ("'%(cls_name)s' has no attribute "
"'%(attr_name)s'.")
raise AttributeError(
err_msg % dict(cls_name=self.get_class_name(),
attr_name=name))
else:
raise
@mi_to_wmi_exception
def path(self):
return _Path(self.get_wrapped_object())
class _Instance(_BaseEntity):
_convert_references = True
def __init__(self, conn, instance, use_conn_weak_ref=False):
if use_conn_weak_ref:
object.__setattr__(self, "_conn_ref", weakref.ref(conn))
else:
object.__setattr__(self, "_conn_ref", conn)
object.__setattr__(self, "_instance", instance)
object.__setattr__(self, "_cls_name", None)
@property
def _conn(self):
if isinstance(self._conn_ref, weakref.ref):
return self._conn_ref()
else:
return self._conn_ref
def get_wrapped_object(self):
return self._instance
def get_class_name(self):
if not self._cls_name:
object.__setattr__(self, '_cls_name',
self._instance.get_class_name())
return self._cls_name
def get_class(self):
class_name = self.get_class_name()
return self._conn.get_class(class_name)
@mi_to_wmi_exception
def __setattr__(self, name, value):
_, el_type, _ = self._instance.get_element(name)
self._instance[six.text_type(name)] = self._conn._unwrap_element(
el_type, value)
@mi_to_wmi_exception
def associators(self, wmi_association_class=u"", wmi_result_class=u"",
operation_options=None):
return self._conn.get_associators(
self, wmi_association_class, wmi_result_class,
operation_options)
@mi_to_wmi_exception
def path_(self):
return self._instance.get_path()
@mi_to_wmi_exception
def GetText_(self, text_format):
return self._conn.serialize_instance(self)
@mi_to_wmi_exception
def put(self, operation_options=None):
if not self._instance.get_path():
self._conn.create_instance(self, operation_options)
else:
self._conn.modify_instance(self, operation_options)
@mi_to_wmi_exception
def Delete_(self, operation_options=None):
self._conn.delete_instance(self, operation_options)
@mi_to_wmi_exception
def set(self, **kwargs):
for k, v in kwargs.items():
self.__setattr__(k, v)
class _Class(_BaseEntity):
def __init__(self, conn, class_name, cls):
self._conn = conn
self.class_name = six.text_type(class_name)
self._cls = cls
def get_wrapped_object(self):
return self._cls
def get_class_name(self):
return self.class_name
def get_class(self):
return self
@mi_to_wmi_exception
def __call__(self, *argc, **argv):
operation_options = argv.pop("operation_options", None)
fields = ""
for i, v in enumerate(argc):
if i > 0:
raise ValueError('Invalid argument')
if not isinstance(v, list):
raise ValueError('Invalid argument')
# TODO: sanitize input
fields = ", ".join(v)
if not fields:
fields = "*"
# TODO: sanitize input
filter = " and ".join(
"%(k)s = '%(v)s'" % {'k': k, 'v': v} for k, v in argv.items())
if filter:
where = " where %s" % filter
else:
where = ""
wql = (u"select %(fields)s from %(class_name)s%(where)s" %
{"fields": fields,
"class_name": self.class_name,
"where": where})
return self._conn.query(
wql, operation_options=operation_options)
@mi_to_wmi_exception
def new(self):
return self._conn.new_instance_from_class(self)
@mi_to_wmi_exception
def watch_for(self, raw_wql=None, notification_type="operation",
wmi_class=None, delay_secs=1, fields=[], **where_clause):
return _EventWatcher(self._conn, six.text_type(raw_wql))
class _EventWatcher(object):
def __init__(self, conn, wql):
native_threading = _get_eventlet_original('threading')
self._conn = conn
self._events_queue = []
self._error = None
self._event = native_threading.Event()
self._operation = conn.subscribe(
wql, self._indication_result, self.close)
self._operation_finished = native_threading.Event()
def _process_events(self):
if self._error:
err = self._error
self._error = None
raise x_wmi(info=err[1])
if self._events_queue:
return self._events_queue.pop(0)
@avoid_blocking_call
def __call__(self, timeout_ms=-1):
while True:
try:
event = self._process_events()
if event:
return event
timeout = timeout_ms / 1000.0 if timeout_ms else None
if not self._event.wait(timeout):
raise x_wmi_timed_out()
self._event.clear()
finally:
if (not self._operation or
not self._operation.has_more_results()):
self.close()
raise x_wmi("No more events")
def _indication_result(self, instance, bookmark, machine_id, more_results,
result_code, error_string, error_details):
if not more_results:
self._operation_finished.set()
if instance:
event = _Instance(self._conn,
instance[u"TargetInstance"].clone(),
use_conn_weak_ref=True)
try:
previous_inst = _Instance(
self._conn, instance[u'PreviousInstance'].clone(),
use_conn_weak_ref=True)
object.__setattr__(event, 'previous', previous_inst)
except (mi.error, AttributeError):
# The 'PreviousInstance' attribute may be missing, for
# example in case of a creation event or simply
# because this field was not requested.
pass
self._events_queue.append(event)
if error_details:
self._error = (
result_code, error_string,
_Instance(
self._conn, error_details.clone(),
use_conn_weak_ref=True))
self._event.set()
@avoid_blocking_call
def _wait_for_operation_cancel(self):
self._operation_finished.wait()
def close(self):
if self._operation:
self._operation.cancel()
# Those operations are asynchronous. We'll need to wait for the
# subscription to be canceled before deallocating objects,
# otherwise MI can crash when receiving further events. We rely on
# the fact that an event will be emitted once the subscription is
# canceled.
self._wait_for_operation_cancel()
self._operation.close()
self._event.set()
self._operation = None
self._timeout_ms = None
self._events_queue = []
self._conn = None
def __del__(self):
self.close()
class _Connection(object):
def __init__(self, computer_name=".", ns="root/cimv2", locale_name=None,
protocol=mi.PROTOCOL_WMIDCOM, cache_classes=True,
operation_timeout=None, user="", password="",
user_cert_thumbprint="", auth_type="", transport=None):
self._ns = six.text_type(ns)
self._app = _get_app()
self._protocol = six.text_type(protocol)
self._computer_name = six.text_type(computer_name)
self._transport = transport
self._locale_name = locale_name
self._op_timeout = operation_timeout or DEFAULT_OPERATION_TIMEOUT
self._user = user
self._password = password
self._auth_type = auth_type
self._cert_thumbprint = user_cert_thumbprint
self._set_destination_options()
self._session = self._app.create_session(
computer_name=self._computer_name,
protocol=self._protocol,
destination_options=self._destination_options)
self._cache_classes = cache_classes
self._class_cache = {}
self._method_params_cache = {}
self._notify_on_close = []
def _set_destination_options(self):
self._destination_options = self._app.create_destination_options()
if self._locale_name:
self._destination_options.set_ui_locale(
locale_name=six.text_type(self._locale_name))
if self._op_timeout is not None:
timeout = datetime.timedelta(0, self._op_timeout, 0)
self._destination_options.set_timeout(timeout)
if self._transport:
self._destination_options.set_transport(self._transport)
if self._user or self._cert_thumbprint:
user, domain = self._get_username_and_domain()
self._destination_options.add_credentials(
self._auth_type, domain, user, self._password,
self._cert_thumbprint)
def _get_username_and_domain(self):
username = self._user.replace("/", "\\")
if "\\" in username:
domain, user = username.split("\\")
elif "@" in username:
user, domain = username.split("@")
else:
user, domain = username, ""
return user, domain
def _close(self):
for callback in self._notify_on_close:
callback()
self._notify_on_close = []
self._session = None
self._app = None
@mi_to_wmi_exception
def __del__(self):
self._close()
@mi_to_wmi_exception
def __getattr__(self, name):
return self.get_class(six.text_type(name))
def _get_instances(self, op):
l = []
i = op.get_next_instance()
while i is not None:
l.append(_Instance(self, i.clone()))
i = op.get_next_instance()
return l
def _get_mi_operation_options(self, operation_options=None):
if not operation_options:
return
mi_op_options = self._app.create_operation_options()
if operation_options.get('operation_timeout') is not None:
operation_timeout = operation_options['operation_timeout']
timeout = datetime.timedelta(0, operation_timeout, 0)
mi_op_options.set_timeout(timeout)
for option in operation_options.get('custom_options', []):
# The value_type must be a MI type, such as MI_Array.
# The value object will then be 'casted' to that type.
option_value = self._unwrap_element(option['value_type'],
option['value'])
mi_op_options.set_custom_option(
name=six.text_type(option['name']),
value_type=option['value_type'],
value=option_value,
must_comply=option.get('must_comply', True))
return mi_op_options
@mi_to_wmi_exception
@avoid_blocking_call
def query(self, wql, operation_options=None):
wql = wql.replace("\\", "\\\\")
operation_options = self._get_mi_operation_options(
operation_options=operation_options)
with self._session.exec_query(
ns=self._ns, query=six.text_type(wql),
operation_options=operation_options) as q:
return self._get_instances(q)
@mi_to_wmi_exception
@avoid_blocking_call
def get_associators(self, instance, wmi_association_class=u"",
wmi_result_class=u"",
operation_options=None):
operation_options = self._get_mi_operation_options(
operation_options=operation_options)
with self._session.get_associators(
ns=self._ns, instance=instance._instance,
assoc_class=six.text_type(wmi_association_class),
result_class=six.text_type(wmi_result_class),
operation_options=operation_options) as q:
return self._get_instances(q)
def _get_method_params(self, target, method_name):
params = None
class_name = None
if self._cache_classes:
class_name = target.get_class_name()
params = self._method_params_cache.get((class_name, method_name))
if params is not None:
params = params.clone()
else:
mi_class = target.get_class().get_wrapped_object()
params = self._app.create_method_params(
mi_class, six.text_type(method_name))
if self._cache_classes:
self._method_params_cache[(class_name, method_name)] = params
params = params.clone()
return params
@mi_to_wmi_exception
def invoke_method(self, target, method_name, *args, **kwargs):
mi_target = target.get_wrapped_object()
params = self._get_method_params(target, method_name)
operation_options = self._get_mi_operation_options(
operation_options=kwargs.pop('operation_options', None))
for i, v in enumerate(args):
_, el_type, _ = params.get_element(i)
params[i] = self._unwrap_element(el_type, v)
for k, v in kwargs.items():
_, el_type, _ = params.get_element(k)
params[k] = self._unwrap_element(el_type, v)
if not params:
params = None
with self._session.invoke_method(
mi_target, six.text_type(method_name), params,
operation_options) as op:
l = []
r = op.get_next_instance()
elements = []
for i in six.moves.range(0, len(r)):
elements.append(r.get_element(i))
# Sort the output params by name before returning their values.
# The WINRM and WMIDCOM protocols behave differently in how
# returned elements are ordered. This hack aligns with the WMIDCOM
# behaviour to retain compatibility with the wmi.py module.
for element in sorted(elements, key=lambda element: element[0]):
# Workaround to avoid including the return value if the method
# returns void, as there's no direct way to determine it.
# This won't work if the method is expected to return a
# boolean value!!
if element != ('ReturnValue', mi.MI_BOOLEAN, True):
l.append(self._wrap_element(*element))
return tuple(l)
@mi_to_wmi_exception
@avoid_blocking_call
def new_instance_from_class(self, cls):
return _Instance(
self, self._app.create_instance_from_class(
cls.class_name, cls.get_wrapped_object()))
@mi_to_wmi_exception
def serialize_instance(self, instance):
with self._app.create_serializer() as s:
return s.serialize_instance(instance._instance)
@mi_to_wmi_exception
def get_class(self, class_name):
cls = None
if self._cache_classes:
cls = self._class_cache.get(class_name)
if cls is None:
cls = self._get_mi_class(class_name)
if self._cache_classes and cls:
self._class_cache[class_name] = cls
if cls is not None:
return _Class(self, class_name, cls)
@avoid_blocking_call
def _get_mi_class(self, class_name):
with self._session.get_class(
ns=self._ns, class_name=class_name) as op:
cls = op.get_next_class()
cls = cls.clone() if cls is not None else cls
return cls
@mi_to_wmi_exception
@avoid_blocking_call
def get_instance(self, class_name, key):
c = self.get_class(class_name)
key_instance = self.new_instance_from_class(c)
for k, v in key.items():
key_instance._instance[six.text_type(k)] = v
with self._session.get_instance(
self._ns, key_instance._instance) as op:
instance = op.get_next_instance()
if instance:
return _Instance(self, instance.clone())
@mi_to_wmi_exception
@avoid_blocking_call
def create_instance(self, instance, operation_options=None):
operation_options = self._get_mi_operation_options(
operation_options=operation_options)
self._session.create_instance(self._ns, instance._instance,
operation_options)
@mi_to_wmi_exception
@avoid_blocking_call
def modify_instance(self, instance, operation_options=None):
operation_options = self._get_mi_operation_options(
operation_options=operation_options)
self._session.modify_instance(self._ns, instance._instance,
operation_options)
@mi_to_wmi_exception
@avoid_blocking_call
def _delete_instance(self, session, instance, operation_options=None):
operation_options = self._get_mi_operation_options(
operation_options=operation_options)
session.delete_instance(self._ns, instance._instance,
operation_options)
def delete_instance(self, instance, operation_options=None):
try:
self._delete_instance(self._session, instance,
operation_options)
except x_wmi as exc:
# Deleting an instance using WMIDCOM can fail with
# WBEM_E_PROVIDER_NOT_CAPABLE.
# One affected WMI class is root/cimv2:WT_Host, there may
# be others as well.
err = ctypes.c_uint(exc.com_error.hresult).value
if (err == WBEM_E_PROVIDER_NOT_CAPABLE and
self._protocol != mi.PROTOCOL_WINRM):
tmp_session = self._app.create_session(
computer_name=self._computer_name,
protocol=mi.PROTOCOL_WINRM,
destination_options=self._destination_options)
self._delete_instance(tmp_session, instance,
operation_options)
else:
raise
@mi_to_wmi_exception
def subscribe(self, query, indication_result_callback, close_callback):
op = self._session.subscribe(
self._ns, six.text_type(query), indication_result_callback)
self._notify_on_close.append(close_callback)
return op
@mi_to_wmi_exception
def watch_for(self, raw_wql=None, notification_type="operation",
wmi_class=None, delay_secs=1, fields=[], **where_clause):
return _EventWatcher(self, six.text_type(raw_wql))
def _wrap_element(self, name, el_type, value, convert_references=False):
if isinstance(value, mi.Instance):
if el_type == mi.MI_INSTANCE:
return _Instance(self, value.clone())
elif el_type == mi.MI_REFERENCE:
if convert_references:
# Reload the object to populate all properties
return WMI(value.get_path(),
locale_name=self._locale_name,
operation_timeout=self._op_timeout,
user=self._user,
password=self._password,
user_cert_thumbprint=self._cert_thumbprint,
auth_type=self._auth_type,
transport=self._transport,
protocol=self._protocol)
return value.get_path()
else:
raise Exception(
"Unsupported instance element type: %s" % el_type)
if isinstance(value, (tuple, list)):
if el_type == mi.MI_REFERENCEA:
return tuple([i.get_path() for i in value])
elif el_type == mi.MI_INSTANCEA:
return tuple([_Instance(self, i.clone()) for i in value])
else:
return tuple(value)
else:
return value
def _unwrap_element(self, el_type, value):
if value is not None:
if el_type == mi.MI_REFERENCE:
instance = WMI(value,
locale_name=self._locale_name,
operation_timeout=self._op_timeout,
user=self._user,
password=self._password,
user_cert_thumbprint=self._cert_thumbprint,
auth_type=self._auth_type,
transport=self._transport,
protocol=self._protocol)
if instance is None:
raise Exception("Reference not found: %s" % value)
return instance._instance
elif el_type == mi.MI_INSTANCE:
return value._instance
elif el_type == mi.MI_BOOLEAN:
if isinstance(value, (str, six.text_type)):
return value.lower() in ['true', 'yes', '1']
else:
return value
elif el_type & mi.MI_ARRAY:
l = []
for item in value:
l.append(self._unwrap_element(el_type ^ mi.MI_ARRAY,
item))
return tuple(l)
else:
return value
def _parse_moniker(moniker):
PROTOCOL = "winmgmts:"
computer_name = '.'
namespace = None
path = None
class_name = None
key = None
m = re.match("(?:" + PROTOCOL + r")?//([^/]+)/([^:]*)(?::(.*))?", moniker)
if m:
computer_name, namespace, path = m.groups()
if path:
m = re.match("([^.]+).(.*)", path)
if m:
key = {}
class_name, kvs = m.groups()
for kv in kvs.split(","):
m = re.match("([^=]+)=\"(.*)\"", kv)
if not m:
m = re.match("([^=]+)=(.*)", kv)
name, value = m.groups()
# TODO: improve unescaping
key[name] = value.replace("//", "\\")
else:
class_name = path
else:
namespace = moniker
return (computer_name, namespace, class_name, key)
@mi_to_wmi_exception
def WMI(moniker="root/cimv2", privileges=None, locale_name=None, computer="",
user="", password="", user_cert_thumbprint="",
auth_type=mi.MI_AUTH_TYPE_DEFAULT, operation_timeout=None,
transport=None, protocol=mi.PROTOCOL_WMIDCOM):
computer_name, ns, class_name, key = _parse_moniker(
moniker.replace("\\", "/"))
if computer_name == '.':
computer_name = computer or '.'
conn = _Connection(computer_name=computer_name, ns=ns,
locale_name=locale_name,
operation_timeout=operation_timeout,
user=user, password=password,
user_cert_thumbprint=user_cert_thumbprint,
auth_type=auth_type,
transport=transport,
protocol=protocol)
if not class_name:
# Perform a simple operation to ensure the connection works.
# This is needed for compatibility with the WMI module.
conn.__provider
return conn
else:
return conn.get_instance(class_name, key)
| [
"# Copyright 2015 Cloudbase Solutions Srl\n",
"# All Rights Reserved.\n",
"#\n",
"# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n",
"# not use this file except in compliance with the License. You may obtain\n",
"# a copy of the License at\n",
"#\n",
"# http://www.apache.org/licenses/LICENSE-2.0\n",
"#\n",
"# Unless required by applicable law or agreed to in writing, software\n",
"# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n",
"# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n",
"# License for the specific language governing permissions and limitations\n",
"# under the License.\n",
"\n",
"import abc\n",
"import ctypes\n",
"import datetime\n",
"import importlib\n",
"import re\n",
"import six\n",
"import struct\n",
"import weakref\n",
"\n",
"import mi\n",
"from mi import mi_error\n",
"import pbr.version\n",
"\n",
"try:\n",
" import eventlet\n",
" from eventlet import patcher\n",
" from eventlet import tpool\n",
" # If eventlet is installed and the 'thread' module is patched, we'll make\n",
" # sure that other greenthreads will not be blocked while WMI operations\n",
" # are performed by using tpool.\n",
" # This behavior can be disabled by using the following flag.\n",
" EVENTLET_NONBLOCKING_MODE_ENABLED = patcher.is_monkey_patched('thread')\n",
"except ImportError:\n",
" eventlet = None\n",
" EVENTLET_NONBLOCKING_MODE_ENABLED = False\n",
"\n",
"__all__ = ['__version__']\n",
"\n",
"version_info = pbr.version.VersionInfo('PyMI')\n",
"try:\n",
" __version__ = version_info.version_string()\n",
"except AttributeError:\n",
" __version__ = None\n",
"\n",
"\n",
"def avoid_blocking_call(f):\n",
" # Performs blocking calls in a different thread using tpool.execute\n",
" # when called from a greenthread.\n",
" # Note that eventlet.getcurrent will always return a greenlet object.\n",
" # Still, in case of a greenthread, the parent greenlet will always be the\n",
" # hub loop greenlet.\n",
" def wrapper(*args, **kwargs):\n",
" if (EVENTLET_NONBLOCKING_MODE_ENABLED and\n",
" eventlet.getcurrent().parent):\n",
" return tpool.execute(f, *args, **kwargs)\n",
" else:\n",
" return f(*args, **kwargs)\n",
" return wrapper\n",
"\n",
"\n",
"def _get_eventlet_original(module_name):\n",
" if eventlet:\n",
" return eventlet.patcher.original(module_name)\n",
" else:\n",
" return importlib.import_module(module_name)\n",
"\n",
"\n",
"# Default operation timeout in seconds.\n",
"# In order to enable it, this value must be set.\n",
"DEFAULT_OPERATION_TIMEOUT = None\n",
"\n",
"WBEM_E_PROVIDER_NOT_CAPABLE = 0x80041024\n",
"\n",
"\n",
"class x_wmi(Exception):\n",
" def __init__(self, info=\"\", com_error=None):\n",
" self.info = info\n",
" self.com_error = com_error\n",
"\n",
" def __str__(self):\n",
" return \"<x_wmi: %s %s>\" % (\n",
" self.info or \"Unexpected COM Error\",\n",
" self.com_error or \"(no underlying exception)\"\n",
" )\n",
"\n",
"\n",
"class x_wmi_timed_out(x_wmi):\n",
" pass\n",
"\n",
"\n",
"class com_error(Exception):\n",
" def __init__(self, hresult, strerror, excepinfo, argerror):\n",
" self.hresult = hresult\n",
" self.strerror = strerror\n",
" self.excepinfo = excepinfo\n",
" self.argerror = argerror\n",
"\n",
"\n",
"def unsigned_to_signed(unsigned):\n",
" signed, = struct.unpack(\"l\", struct.pack(\"L\", unsigned))\n",
" return signed\n",
"\n",
"\n",
"def mi_to_wmi_exception(func):\n",
" def func_wrapper(*args, **kwargs):\n",
" try:\n",
" return func(*args, **kwargs)\n",
" except mi.error as ex:\n",
" d = ex.args[0]\n",
" hresult = unsigned_to_signed(d.get(\"error_code\", 0))\n",
" err_msg = d.get(\"message\") or \"\"\n",
" com_ex = com_error(\n",
" hresult, err_msg,\n",
" (0, None, err_msg, None, None, hresult),\n",
" None)\n",
"\n",
" if(isinstance(ex, mi.timeouterror)):\n",
" raise x_wmi_timed_out(err_msg, com_ex)\n",
" else:\n",
" raise x_wmi(err_msg, com_ex)\n",
" return func_wrapper\n",
"\n",
"_app = None\n",
"\n",
"\n",
"def _get_app():\n",
" global _app\n",
" if not _app:\n",
" _app = mi.Application()\n",
" return _app\n",
"\n",
"\n",
"class _Method(object):\n",
" def __init__(self, conn, target, method_name):\n",
" self._conn = conn\n",
" self._target = target\n",
" self._method_name = method_name\n",
"\n",
" self._params = self._conn._get_method_params(target, method_name)\n",
"\n",
" @avoid_blocking_call\n",
" @mi_to_wmi_exception\n",
" def __call__(self, *args, **kwargs):\n",
" return self._conn.invoke_method(\n",
" self._target, self._method_name, *args, **kwargs)\n",
"\n",
" def __str__(self):\n",
" try:\n",
" args = []\n",
" # an MI object's length is the number of attributes the object it\n",
" # represents has.\n",
" for i in range(len(self._params)):\n",
" key, value_type, value = self._params.get_element(i)\n",
" args.append(key)\n",
"\n",
" obj_string = '<function %s (%s)>' % (\n",
" self._method_name, ', '.join(args))\n",
"\n",
" return obj_string\n",
"\n",
" except Exception:\n",
" return super(_BaseEntity, self).__str__()\n",
"\n",
" def __repr__(self):\n",
" return self.__str__()\n",
"\n",
"\n",
"class _Path(object):\n",
" \"\"\"\n",
" Provides a SWbemObjectPath replacement.\n",
" \"\"\"\n",
" def __init__(self, item):\n",
" self._item = item\n",
"\n",
" def __str__(self):\n",
" return self.Path\n",
"\n",
" @property\n",
" def Authority(self):\n",
" raise NotImplementedError()\n",
"\n",
" @property\n",
" def Class(self):\n",
" return self._item.get_class_name()\n",
"\n",
" @property\n",
" def DisplayName(self):\n",
" return self.Path()\n",
"\n",
" @property\n",
" def IsClass(self):\n",
" return isinstance(self._item, _Class)\n",
"\n",
" @property\n",
" def IsSingleton(self):\n",
" raise NotImplementedError()\n",
"\n",
" @property\n",
" def Keys(self):\n",
" raise NotImplementedError()\n",
"\n",
" @property\n",
" def Locale(self):\n",
" raise NotImplementedError()\n",
"\n",
" @property\n",
" def Namespace(self):\n",
" return self._item.get_namespace()\n",
"\n",
" @property\n",
" def ParentNamespace(self):\n",
" raise NotImplementedError()\n",
"\n",
" @property\n",
" def Path(self):\n",
" return self._item.get_path()\n",
"\n",
" @property\n",
" def RelPath(self):\n",
" path = self._item.get_path()\n",
" return path[path.find(':') + 1:]\n",
"\n",
" @property\n",
" def Security_(self):\n",
" raise NotImplementedError()\n",
"\n",
" @property\n",
" def Server(self):\n",
" return self._item.get_server_name()\n",
"\n",
"\n",
"@six.add_metaclass(abc.ABCMeta)\n",
"class _BaseEntity(object):\n",
" _convert_references = False\n",
"\n",
" @abc.abstractmethod\n",
" def get_wrapped_object(self):\n",
" pass\n",
"\n",
" @abc.abstractmethod\n",
" def get_class_name(self):\n",
" pass\n",
"\n",
" @abc.abstractmethod\n",
" def get_class(self):\n",
" pass\n",
"\n",
" def __str__(self):\n",
" try:\n",
" obj = self.get_wrapped_object()\n",
"\n",
" obj_string = 'instance of %s\\n{' % obj.get_class_name()\n",
"\n",
" # an MI object's length is the number of attributes the object it\n",
" # represents has.\n",
" for i in range(len(obj)):\n",
" key, value_type, value = obj.get_element(i)\n",
" if value_type == mi.MI_STRING:\n",
" value = '\"%s\"' % value\n",
" obj_string += \"\\n\\t%s = %s;\" % (key, value)\n",
"\n",
" obj_string += '\\n};'\n",
"\n",
" return obj_string\n",
" except Exception:\n",
" return super(_BaseEntity, self).__str__()\n",
"\n",
" def __repr__(self):\n",
" try:\n",
" obj = self.get_wrapped_object()\n",
" return '<pymi_object: %s>' % obj.get_path()\n",
" except Exception:\n",
" return super(_BaseEntity, self).__repr__()\n",
"\n",
" @mi_to_wmi_exception\n",
" def __getattr__(self, name):\n",
" try:\n",
" # If the class is an association class, certain of its properties\n",
" # are references which contain the paths to the associated objecs.\n",
" # The WMI module translates automatically into WMI objects those\n",
" # class properties that are references. To maintain the\n",
" # compatibility with the WMI module, those class properties that\n",
" # are references are translated into objects.\n",
" obj = self.get_wrapped_object()\n",
" return self._conn._wrap_element(\n",
" *obj.get_element(name),\n",
" convert_references=self._convert_references)\n",
" except mi.error:\n",
" try:\n",
" return _Method(self._conn, self, name)\n",
" except mi.error as err:\n",
" if err.args[0].get('mi_result') == (\n",
" mi_error.MI_RESULT_METHOD_NOT_FOUND):\n",
" err_msg = (\"'%(cls_name)s' has no attribute \"\n",
" \"'%(attr_name)s'.\")\n",
" raise AttributeError(\n",
" err_msg % dict(cls_name=self.get_class_name(),\n",
" attr_name=name))\n",
" else:\n",
" raise\n",
"\n",
" @mi_to_wmi_exception\n",
" def path(self):\n",
" return _Path(self.get_wrapped_object())\n",
"\n",
"\n",
"class _Instance(_BaseEntity):\n",
" _convert_references = True\n",
"\n",
" def __init__(self, conn, instance, use_conn_weak_ref=False):\n",
" if use_conn_weak_ref:\n",
" object.__setattr__(self, \"_conn_ref\", weakref.ref(conn))\n",
" else:\n",
" object.__setattr__(self, \"_conn_ref\", conn)\n",
" object.__setattr__(self, \"_instance\", instance)\n",
" object.__setattr__(self, \"_cls_name\", None)\n",
"\n",
" @property\n",
" def _conn(self):\n",
" if isinstance(self._conn_ref, weakref.ref):\n",
" return self._conn_ref()\n",
" else:\n",
" return self._conn_ref\n",
"\n",
" def get_wrapped_object(self):\n",
" return self._instance\n",
"\n",
" def get_class_name(self):\n",
" if not self._cls_name:\n",
" object.__setattr__(self, '_cls_name',\n",
" self._instance.get_class_name())\n",
" return self._cls_name\n",
"\n",
" def get_class(self):\n",
" class_name = self.get_class_name()\n",
" return self._conn.get_class(class_name)\n",
"\n",
" @mi_to_wmi_exception\n",
" def __setattr__(self, name, value):\n",
" _, el_type, _ = self._instance.get_element(name)\n",
" self._instance[six.text_type(name)] = self._conn._unwrap_element(\n",
" el_type, value)\n",
"\n",
" @mi_to_wmi_exception\n",
" def associators(self, wmi_association_class=u\"\", wmi_result_class=u\"\",\n",
" operation_options=None):\n",
" return self._conn.get_associators(\n",
" self, wmi_association_class, wmi_result_class,\n",
" operation_options)\n",
"\n",
" @mi_to_wmi_exception\n",
" def path_(self):\n",
" return self._instance.get_path()\n",
"\n",
" @mi_to_wmi_exception\n",
" def GetText_(self, text_format):\n",
" return self._conn.serialize_instance(self)\n",
"\n",
" @mi_to_wmi_exception\n",
" def put(self, operation_options=None):\n",
" if not self._instance.get_path():\n",
" self._conn.create_instance(self, operation_options)\n",
" else:\n",
" self._conn.modify_instance(self, operation_options)\n",
"\n",
" @mi_to_wmi_exception\n",
" def Delete_(self, operation_options=None):\n",
" self._conn.delete_instance(self, operation_options)\n",
"\n",
" @mi_to_wmi_exception\n",
" def set(self, **kwargs):\n",
" for k, v in kwargs.items():\n",
" self.__setattr__(k, v)\n",
"\n",
"\n",
"class _Class(_BaseEntity):\n",
" def __init__(self, conn, class_name, cls):\n",
" self._conn = conn\n",
" self.class_name = six.text_type(class_name)\n",
" self._cls = cls\n",
"\n",
" def get_wrapped_object(self):\n",
" return self._cls\n",
"\n",
" def get_class_name(self):\n",
" return self.class_name\n",
"\n",
" def get_class(self):\n",
" return self\n",
"\n",
" @mi_to_wmi_exception\n",
" def __call__(self, *argc, **argv):\n",
" operation_options = argv.pop(\"operation_options\", None)\n",
"\n",
" fields = \"\"\n",
" for i, v in enumerate(argc):\n",
" if i > 0:\n",
" raise ValueError('Invalid argument')\n",
" if not isinstance(v, list):\n",
" raise ValueError('Invalid argument')\n",
" # TODO: sanitize input\n",
" fields = \", \".join(v)\n",
" if not fields:\n",
" fields = \"*\"\n",
"\n",
" # TODO: sanitize input\n",
" filter = \" and \".join(\n",
" \"%(k)s = '%(v)s'\" % {'k': k, 'v': v} for k, v in argv.items())\n",
" if filter:\n",
" where = \" where %s\" % filter\n",
" else:\n",
" where = \"\"\n",
"\n",
" wql = (u\"select %(fields)s from %(class_name)s%(where)s\" %\n",
" {\"fields\": fields,\n",
" \"class_name\": self.class_name,\n",
" \"where\": where})\n",
" return self._conn.query(\n",
" wql, operation_options=operation_options)\n",
"\n",
" @mi_to_wmi_exception\n",
" def new(self):\n",
" return self._conn.new_instance_from_class(self)\n",
"\n",
" @mi_to_wmi_exception\n",
" def watch_for(self, raw_wql=None, notification_type=\"operation\",\n",
" wmi_class=None, delay_secs=1, fields=[], **where_clause):\n",
" return _EventWatcher(self._conn, six.text_type(raw_wql))\n",
"\n",
"\n",
"class _EventWatcher(object):\n",
" def __init__(self, conn, wql):\n",
" native_threading = _get_eventlet_original('threading')\n",
"\n",
" self._conn = conn\n",
" self._events_queue = []\n",
" self._error = None\n",
" self._event = native_threading.Event()\n",
" self._operation = conn.subscribe(\n",
" wql, self._indication_result, self.close)\n",
" self._operation_finished = native_threading.Event()\n",
"\n",
" def _process_events(self):\n",
" if self._error:\n",
" err = self._error\n",
" self._error = None\n",
" raise x_wmi(info=err[1])\n",
" if self._events_queue:\n",
" return self._events_queue.pop(0)\n",
"\n",
" @avoid_blocking_call\n",
" def __call__(self, timeout_ms=-1):\n",
" while True:\n",
" try:\n",
" event = self._process_events()\n",
" if event:\n",
" return event\n",
"\n",
" timeout = timeout_ms / 1000.0 if timeout_ms else None\n",
" if not self._event.wait(timeout):\n",
" raise x_wmi_timed_out()\n",
" self._event.clear()\n",
" finally:\n",
" if (not self._operation or\n",
" not self._operation.has_more_results()):\n",
" self.close()\n",
" raise x_wmi(\"No more events\")\n",
"\n",
" def _indication_result(self, instance, bookmark, machine_id, more_results,\n",
" result_code, error_string, error_details):\n",
" if not more_results:\n",
" self._operation_finished.set()\n",
"\n",
" if instance:\n",
" event = _Instance(self._conn,\n",
" instance[u\"TargetInstance\"].clone(),\n",
" use_conn_weak_ref=True)\n",
" try:\n",
" previous_inst = _Instance(\n",
" self._conn, instance[u'PreviousInstance'].clone(),\n",
" use_conn_weak_ref=True)\n",
" object.__setattr__(event, 'previous', previous_inst)\n",
" except (mi.error, AttributeError):\n",
" # The 'PreviousInstance' attribute may be missing, for\n",
" # example in case of a creation event or simply\n",
" # because this field was not requested.\n",
" pass\n",
"\n",
" self._events_queue.append(event)\n",
" if error_details:\n",
" self._error = (\n",
" result_code, error_string,\n",
" _Instance(\n",
" self._conn, error_details.clone(),\n",
" use_conn_weak_ref=True))\n",
" self._event.set()\n",
"\n",
" @avoid_blocking_call\n",
" def _wait_for_operation_cancel(self):\n",
" self._operation_finished.wait()\n",
"\n",
" def close(self):\n",
" if self._operation:\n",
" self._operation.cancel()\n",
" # Those operations are asynchronous. We'll need to wait for the\n",
" # subscription to be canceled before deallocating objects,\n",
" # otherwise MI can crash when receiving further events. We rely on\n",
" # the fact that an event will be emitted once the subscription is\n",
" # canceled.\n",
" self._wait_for_operation_cancel()\n",
"\n",
" self._operation.close()\n",
"\n",
" self._event.set()\n",
" self._operation = None\n",
" self._timeout_ms = None\n",
" self._events_queue = []\n",
" self._conn = None\n",
"\n",
" def __del__(self):\n",
" self.close()\n",
"\n",
"\n",
"class _Connection(object):\n",
" def __init__(self, computer_name=\".\", ns=\"root/cimv2\", locale_name=None,\n",
" protocol=mi.PROTOCOL_WMIDCOM, cache_classes=True,\n",
" operation_timeout=None, user=\"\", password=\"\",\n",
" user_cert_thumbprint=\"\", auth_type=\"\", transport=None):\n",
" self._ns = six.text_type(ns)\n",
" self._app = _get_app()\n",
" self._protocol = six.text_type(protocol)\n",
" self._computer_name = six.text_type(computer_name)\n",
" self._transport = transport\n",
"\n",
" self._locale_name = locale_name\n",
" self._op_timeout = operation_timeout or DEFAULT_OPERATION_TIMEOUT\n",
"\n",
" self._user = user\n",
" self._password = password\n",
" self._auth_type = auth_type\n",
" self._cert_thumbprint = user_cert_thumbprint\n",
"\n",
" self._set_destination_options()\n",
" self._session = self._app.create_session(\n",
" computer_name=self._computer_name,\n",
" protocol=self._protocol,\n",
" destination_options=self._destination_options)\n",
" self._cache_classes = cache_classes\n",
" self._class_cache = {}\n",
" self._method_params_cache = {}\n",
" self._notify_on_close = []\n",
"\n",
" def _set_destination_options(self):\n",
" self._destination_options = self._app.create_destination_options()\n",
"\n",
" if self._locale_name:\n",
" self._destination_options.set_ui_locale(\n",
" locale_name=six.text_type(self._locale_name))\n",
"\n",
" if self._op_timeout is not None:\n",
" timeout = datetime.timedelta(0, self._op_timeout, 0)\n",
" self._destination_options.set_timeout(timeout)\n",
"\n",
" if self._transport:\n",
" self._destination_options.set_transport(self._transport)\n",
"\n",
" if self._user or self._cert_thumbprint:\n",
" user, domain = self._get_username_and_domain()\n",
" self._destination_options.add_credentials(\n",
" self._auth_type, domain, user, self._password,\n",
" self._cert_thumbprint)\n",
"\n",
" def _get_username_and_domain(self):\n",
" username = self._user.replace(\"/\", \"\\\\\")\n",
" if \"\\\\\" in username:\n",
" domain, user = username.split(\"\\\\\")\n",
" elif \"@\" in username:\n",
" user, domain = username.split(\"@\")\n",
" else:\n",
" user, domain = username, \"\"\n",
" return user, domain\n",
"\n",
" def _close(self):\n",
" for callback in self._notify_on_close:\n",
" callback()\n",
" self._notify_on_close = []\n",
" self._session = None\n",
" self._app = None\n",
"\n",
" @mi_to_wmi_exception\n",
" def __del__(self):\n",
" self._close()\n",
"\n",
" @mi_to_wmi_exception\n",
" def __getattr__(self, name):\n",
" return self.get_class(six.text_type(name))\n",
"\n",
" def _get_instances(self, op):\n",
" l = []\n",
" i = op.get_next_instance()\n",
" while i is not None:\n",
" l.append(_Instance(self, i.clone()))\n",
" i = op.get_next_instance()\n",
" return l\n",
"\n",
" def _get_mi_operation_options(self, operation_options=None):\n",
" if not operation_options:\n",
" return\n",
"\n",
" mi_op_options = self._app.create_operation_options()\n",
"\n",
" if operation_options.get('operation_timeout') is not None:\n",
" operation_timeout = operation_options['operation_timeout']\n",
" timeout = datetime.timedelta(0, operation_timeout, 0)\n",
" mi_op_options.set_timeout(timeout)\n",
"\n",
" for option in operation_options.get('custom_options', []):\n",
" # The value_type must be a MI type, such as MI_Array.\n",
" # The value object will then be 'casted' to that type.\n",
" option_value = self._unwrap_element(option['value_type'],\n",
" option['value'])\n",
" mi_op_options.set_custom_option(\n",
" name=six.text_type(option['name']),\n",
" value_type=option['value_type'],\n",
" value=option_value,\n",
" must_comply=option.get('must_comply', True))\n",
" return mi_op_options\n",
"\n",
" @mi_to_wmi_exception\n",
" @avoid_blocking_call\n",
" def query(self, wql, operation_options=None):\n",
" wql = wql.replace(\"\\\\\", \"\\\\\\\\\")\n",
" operation_options = self._get_mi_operation_options(\n",
" operation_options=operation_options)\n",
"\n",
" with self._session.exec_query(\n",
" ns=self._ns, query=six.text_type(wql),\n",
" operation_options=operation_options) as q:\n",
" return self._get_instances(q)\n",
"\n",
" @mi_to_wmi_exception\n",
" @avoid_blocking_call\n",
" def get_associators(self, instance, wmi_association_class=u\"\",\n",
" wmi_result_class=u\"\",\n",
" operation_options=None):\n",
" operation_options = self._get_mi_operation_options(\n",
" operation_options=operation_options)\n",
" with self._session.get_associators(\n",
" ns=self._ns, instance=instance._instance,\n",
" assoc_class=six.text_type(wmi_association_class),\n",
" result_class=six.text_type(wmi_result_class),\n",
" operation_options=operation_options) as q:\n",
" return self._get_instances(q)\n",
"\n",
" def _get_method_params(self, target, method_name):\n",
" params = None\n",
" class_name = None\n",
" if self._cache_classes:\n",
" class_name = target.get_class_name()\n",
" params = self._method_params_cache.get((class_name, method_name))\n",
"\n",
" if params is not None:\n",
" params = params.clone()\n",
" else:\n",
" mi_class = target.get_class().get_wrapped_object()\n",
" params = self._app.create_method_params(\n",
" mi_class, six.text_type(method_name))\n",
" if self._cache_classes:\n",
" self._method_params_cache[(class_name, method_name)] = params\n",
" params = params.clone()\n",
" return params\n",
"\n",
" @mi_to_wmi_exception\n",
" def invoke_method(self, target, method_name, *args, **kwargs):\n",
" mi_target = target.get_wrapped_object()\n",
" params = self._get_method_params(target, method_name)\n",
" operation_options = self._get_mi_operation_options(\n",
" operation_options=kwargs.pop('operation_options', None))\n",
"\n",
" for i, v in enumerate(args):\n",
" _, el_type, _ = params.get_element(i)\n",
" params[i] = self._unwrap_element(el_type, v)\n",
" for k, v in kwargs.items():\n",
" _, el_type, _ = params.get_element(k)\n",
" params[k] = self._unwrap_element(el_type, v)\n",
"\n",
" if not params:\n",
" params = None\n",
"\n",
" with self._session.invoke_method(\n",
" mi_target, six.text_type(method_name), params,\n",
" operation_options) as op:\n",
" l = []\n",
" r = op.get_next_instance()\n",
" elements = []\n",
" for i in six.moves.range(0, len(r)):\n",
" elements.append(r.get_element(i))\n",
"\n",
" # Sort the output params by name before returning their values.\n",
" # The WINRM and WMIDCOM protocols behave differently in how\n",
" # returned elements are ordered. This hack aligns with the WMIDCOM\n",
" # behaviour to retain compatibility with the wmi.py module.\n",
" for element in sorted(elements, key=lambda element: element[0]):\n",
" # Workaround to avoid including the return value if the method\n",
" # returns void, as there's no direct way to determine it.\n",
" # This won't work if the method is expected to return a\n",
" # boolean value!!\n",
" if element != ('ReturnValue', mi.MI_BOOLEAN, True):\n",
" l.append(self._wrap_element(*element))\n",
" return tuple(l)\n",
"\n",
" @mi_to_wmi_exception\n",
" @avoid_blocking_call\n",
" def new_instance_from_class(self, cls):\n",
" return _Instance(\n",
" self, self._app.create_instance_from_class(\n",
" cls.class_name, cls.get_wrapped_object()))\n",
"\n",
" @mi_to_wmi_exception\n",
" def serialize_instance(self, instance):\n",
" with self._app.create_serializer() as s:\n",
" return s.serialize_instance(instance._instance)\n",
"\n",
" @mi_to_wmi_exception\n",
" def get_class(self, class_name):\n",
" cls = None\n",
" if self._cache_classes:\n",
" cls = self._class_cache.get(class_name)\n",
"\n",
" if cls is None:\n",
" cls = self._get_mi_class(class_name)\n",
" if self._cache_classes and cls:\n",
" self._class_cache[class_name] = cls\n",
"\n",
" if cls is not None:\n",
" return _Class(self, class_name, cls)\n",
"\n",
" @avoid_blocking_call\n",
" def _get_mi_class(self, class_name):\n",
" with self._session.get_class(\n",
" ns=self._ns, class_name=class_name) as op:\n",
" cls = op.get_next_class()\n",
" cls = cls.clone() if cls is not None else cls\n",
" return cls\n",
"\n",
" @mi_to_wmi_exception\n",
" @avoid_blocking_call\n",
" def get_instance(self, class_name, key):\n",
" c = self.get_class(class_name)\n",
" key_instance = self.new_instance_from_class(c)\n",
" for k, v in key.items():\n",
" key_instance._instance[six.text_type(k)] = v\n",
" with self._session.get_instance(\n",
" self._ns, key_instance._instance) as op:\n",
" instance = op.get_next_instance()\n",
" if instance:\n",
" return _Instance(self, instance.clone())\n",
"\n",
" @mi_to_wmi_exception\n",
" @avoid_blocking_call\n",
" def create_instance(self, instance, operation_options=None):\n",
" operation_options = self._get_mi_operation_options(\n",
" operation_options=operation_options)\n",
" self._session.create_instance(self._ns, instance._instance,\n",
" operation_options)\n",
"\n",
" @mi_to_wmi_exception\n",
" @avoid_blocking_call\n",
" def modify_instance(self, instance, operation_options=None):\n",
" operation_options = self._get_mi_operation_options(\n",
" operation_options=operation_options)\n",
" self._session.modify_instance(self._ns, instance._instance,\n",
" operation_options)\n",
"\n",
" @mi_to_wmi_exception\n",
" @avoid_blocking_call\n",
" def _delete_instance(self, session, instance, operation_options=None):\n",
" operation_options = self._get_mi_operation_options(\n",
" operation_options=operation_options)\n",
" session.delete_instance(self._ns, instance._instance,\n",
" operation_options)\n",
"\n",
" def delete_instance(self, instance, operation_options=None):\n",
" try:\n",
" self._delete_instance(self._session, instance,\n",
" operation_options)\n",
" except x_wmi as exc:\n",
" # Deleting an instance using WMIDCOM can fail with\n",
" # WBEM_E_PROVIDER_NOT_CAPABLE.\n",
" # One affected WMI class is root/cimv2:WT_Host, there may\n",
" # be others as well.\n",
" err = ctypes.c_uint(exc.com_error.hresult).value\n",
" if (err == WBEM_E_PROVIDER_NOT_CAPABLE and\n",
" self._protocol != mi.PROTOCOL_WINRM):\n",
" tmp_session = self._app.create_session(\n",
" computer_name=self._computer_name,\n",
" protocol=mi.PROTOCOL_WINRM,\n",
" destination_options=self._destination_options)\n",
" self._delete_instance(tmp_session, instance,\n",
" operation_options)\n",
" else:\n",
" raise\n",
"\n",
" @mi_to_wmi_exception\n",
" def subscribe(self, query, indication_result_callback, close_callback):\n",
" op = self._session.subscribe(\n",
" self._ns, six.text_type(query), indication_result_callback)\n",
" self._notify_on_close.append(close_callback)\n",
" return op\n",
"\n",
" @mi_to_wmi_exception\n",
" def watch_for(self, raw_wql=None, notification_type=\"operation\",\n",
" wmi_class=None, delay_secs=1, fields=[], **where_clause):\n",
" return _EventWatcher(self, six.text_type(raw_wql))\n",
"\n",
" def _wrap_element(self, name, el_type, value, convert_references=False):\n",
" if isinstance(value, mi.Instance):\n",
" if el_type == mi.MI_INSTANCE:\n",
" return _Instance(self, value.clone())\n",
" elif el_type == mi.MI_REFERENCE:\n",
" if convert_references:\n",
" # Reload the object to populate all properties\n",
" return WMI(value.get_path(),\n",
" locale_name=self._locale_name,\n",
" operation_timeout=self._op_timeout,\n",
" user=self._user,\n",
" password=self._password,\n",
" user_cert_thumbprint=self._cert_thumbprint,\n",
" auth_type=self._auth_type,\n",
" transport=self._transport,\n",
" protocol=self._protocol)\n",
" return value.get_path()\n",
" else:\n",
" raise Exception(\n",
" \"Unsupported instance element type: %s\" % el_type)\n",
" if isinstance(value, (tuple, list)):\n",
" if el_type == mi.MI_REFERENCEA:\n",
" return tuple([i.get_path() for i in value])\n",
" elif el_type == mi.MI_INSTANCEA:\n",
" return tuple([_Instance(self, i.clone()) for i in value])\n",
" else:\n",
" return tuple(value)\n",
" else:\n",
" return value\n",
"\n",
" def _unwrap_element(self, el_type, value):\n",
" if value is not None:\n",
" if el_type == mi.MI_REFERENCE:\n",
" instance = WMI(value,\n",
" locale_name=self._locale_name,\n",
" operation_timeout=self._op_timeout,\n",
" user=self._user,\n",
" password=self._password,\n",
" user_cert_thumbprint=self._cert_thumbprint,\n",
" auth_type=self._auth_type,\n",
" transport=self._transport,\n",
" protocol=self._protocol)\n",
" if instance is None:\n",
" raise Exception(\"Reference not found: %s\" % value)\n",
" return instance._instance\n",
" elif el_type == mi.MI_INSTANCE:\n",
" return value._instance\n",
" elif el_type == mi.MI_BOOLEAN:\n",
" if isinstance(value, (str, six.text_type)):\n",
" return value.lower() in ['true', 'yes', '1']\n",
" else:\n",
" return value\n",
" elif el_type & mi.MI_ARRAY:\n",
" l = []\n",
" for item in value:\n",
" l.append(self._unwrap_element(el_type ^ mi.MI_ARRAY,\n",
" item))\n",
" return tuple(l)\n",
" else:\n",
" return value\n",
"\n",
"\n",
"def _parse_moniker(moniker):\n",
" PROTOCOL = \"winmgmts:\"\n",
" computer_name = '.'\n",
" namespace = None\n",
" path = None\n",
" class_name = None\n",
" key = None\n",
" m = re.match(\"(?:\" + PROTOCOL + r\")?//([^/]+)/([^:]*)(?::(.*))?\", moniker)\n",
" if m:\n",
" computer_name, namespace, path = m.groups()\n",
" if path:\n",
" m = re.match(\"([^.]+).(.*)\", path)\n",
" if m:\n",
" key = {}\n",
" class_name, kvs = m.groups()\n",
" for kv in kvs.split(\",\"):\n",
" m = re.match(\"([^=]+)=\\\"(.*)\\\"\", kv)\n",
" if not m:\n",
" m = re.match(\"([^=]+)=(.*)\", kv)\n",
" name, value = m.groups()\n",
" # TODO: improve unescaping\n",
" key[name] = value.replace(\"//\", \"\\\\\")\n",
" else:\n",
" class_name = path\n",
" else:\n",
" namespace = moniker\n",
" return (computer_name, namespace, class_name, key)\n",
"\n",
"\n",
"@mi_to_wmi_exception\n",
"def WMI(moniker=\"root/cimv2\", privileges=None, locale_name=None, computer=\"\",\n",
" user=\"\", password=\"\", user_cert_thumbprint=\"\",\n",
" auth_type=mi.MI_AUTH_TYPE_DEFAULT, operation_timeout=None,\n",
" transport=None, protocol=mi.PROTOCOL_WMIDCOM):\n",
" computer_name, ns, class_name, key = _parse_moniker(\n",
" moniker.replace(\"\\\\\", \"/\"))\n",
" if computer_name == '.':\n",
" computer_name = computer or '.'\n",
" conn = _Connection(computer_name=computer_name, ns=ns,\n",
" locale_name=locale_name,\n",
" operation_timeout=operation_timeout,\n",
" user=user, password=password,\n",
" user_cert_thumbprint=user_cert_thumbprint,\n",
" auth_type=auth_type,\n",
" transport=transport,\n",
" protocol=protocol)\n",
" if not class_name:\n",
" # Perform a simple operation to ensure the connection works.\n",
" # This is needed for compatibility with the WMI module.\n",
" conn.__provider\n",
" return conn\n",
" else:\n",
" return conn.get_instance(class_name, key)\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.08333333333333333,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.06666666666666667,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.05263157894736842,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.043478260869565216,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 935 | 0.000263 | false |
# -*- coding: UTF-8 -*-
# GUI Application automation and testing library
# Copyright (C) 2015 Intel Corporation
# Copyright (C) 2006 Mark Mc Mahon
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 59 Temple Place,
# Suite 330,
# Boston, MA 02111-1307 USA
from __future__ import unicode_literals
from __future__ import print_function
import time
from pywinauto import application
#from pywinauto import tests
def SakuraTest():
app = application.Application()
app.start_(r"C:\Program Files\sakura\sakura.exe")
mainwin = app[u'無題sakura']
# menu's from this application are not recovered well
# but even with Japanese Regional settings they are not
# rendered correctly by windows!
# so using keys to select a menu item
# open some dialog
mainwin.TypeKeys("%OC")
dlg = app[u'共通設定']
app[u'共通設定'][u"フリーカーソル"].Click()
dlg.MSDOS.Click()
dlg.Cancel.Click()
# quit the application
mainwin.TypeKeys("%FX")
def Main():
start = time.time()
SakuraTest()
print("Total time taken:", time.time() - start)
if __name__ == "__main__":
Main() | [
"# -*- coding: UTF-8 -*-\n",
"# GUI Application automation and testing library\n",
"# Copyright (C) 2015 Intel Corporation\n",
"# Copyright (C) 2006 Mark Mc Mahon\n",
"#\n",
"# This library is free software; you can redistribute it and/or \n",
"# modify it under the terms of the GNU Lesser General Public License \n",
"# as published by the Free Software Foundation; either version 2.1 \n",
"# of the License, or (at your option) any later version.\n",
"#\n",
"# This library is distributed in the hope that it will be useful, \n",
"# but WITHOUT ANY WARRANTY; without even the implied warranty of \n",
"# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. \n",
"# See the GNU Lesser General Public License for more details.\n",
"#\n",
"# You should have received a copy of the GNU Lesser General Public \n",
"# License along with this library; if not, write to the \n",
"# Free Software Foundation, Inc.,\n",
"# 59 Temple Place,\n",
"# Suite 330, \n",
"# Boston, MA 02111-1307 USA \n",
"from __future__ import unicode_literals\n",
"from __future__ import print_function\n",
"\n",
"import time\n",
"\n",
"from pywinauto import application\n",
"\n",
"#from pywinauto import tests\n",
"\n",
"\n",
"def SakuraTest():\n",
"\n",
"\tapp = application.Application()\n",
"\tapp.start_(r\"C:\\Program Files\\sakura\\sakura.exe\")\n",
"\n",
"\tmainwin = app[u'無題sakura']\n",
"\n",
"\t# menu's from this application are not recovered well\n",
"\t# but even with Japanese Regional settings they are not\n",
"\t# rendered correctly by windows!\n",
"\t# so using keys to select a menu item\n",
"\n",
"\t# open some dialog\n",
"\tmainwin.TypeKeys(\"%OC\")\n",
"\n",
"\tdlg = app[u'共通設定']\n",
"\n",
"\tapp[u'共通設定'][u\"フリーカーソル\"].Click()\n",
"\n",
"\tdlg.MSDOS.Click()\n",
"\n",
"\tdlg.Cancel.Click()\n",
"\n",
"\t# quit the application\n",
"\tmainwin.TypeKeys(\"%FX\")\n",
"\n",
"\n",
"def Main():\n",
"\tstart = time.time()\n",
"\n",
"\tSakuraTest()\n",
"\n",
"\tprint(\"Total time taken:\", time.time() - start)\n",
"\n",
"if __name__ == \"__main__\":\n",
"\tMain()"
] | [
0,
0,
0,
0,
0,
0.015384615384615385,
0.014285714285714285,
0.014705882352941176,
0,
0,
0.014925373134328358,
0.015151515151515152,
0.017857142857142856,
0,
0,
0.014705882352941176,
0.017543859649122806,
0,
0,
0.058823529411764705,
0.03125,
0,
0,
0,
0,
0,
0,
0,
0.034482758620689655,
0,
0,
0,
0,
0.030303030303030304,
0.0196078431372549,
0,
0.03571428571428571,
0,
0.01818181818181818,
0.017543859649122806,
0.029411764705882353,
0.02564102564102564,
0,
0.05,
0.04,
0,
0.05,
0,
0.029411764705882353,
0,
0.05263157894736842,
0,
0.05,
0,
0.041666666666666664,
0.04,
0,
0,
0,
0.047619047619047616,
0,
0.07142857142857142,
0,
0.02040816326530612,
0,
0.037037037037037035,
0.2857142857142857
] | 67 | 0.018529 | false |
# -*- coding: utf-8 -*-
from Plugins.Extensions.MediaPortal.plugin import _
from Plugins.Extensions.MediaPortal.resources.imports import *
class extremetubeGenreScreen(MPScreen):
def __init__(self, session):
self.plugin_path = mp_globals.pluginPath
self.skin_path = mp_globals.pluginPath + mp_globals.skinsPath
path = "%s/%s/defaultGenreScreenCover.xml" % (self.skin_path, config.mediaportal.skin.value)
if not fileExists(path):
path = self.skin_path + mp_globals.skinFallback + "/defaultGenreScreenCover.xml"
with open(path, "r") as f:
self.skin = f.read()
f.close()
MPScreen.__init__(self, session)
self["actions"] = ActionMap(["MP_Actions"], {
"ok" : self.keyOK,
"0" : self.closeAll,
"cancel" : self.keyCancel,
"up" : self.keyUp,
"down" : self.keyDown,
"right" : self.keyRight,
"left" : self.keyLeft
}, -1)
self['title'] = Label("ExtremeTube.com")
self['ContentTitle'] = Label("Genre:")
self.keyLocked = True
self.suchString = ''
self.genreliste = []
self.ml = MenuList([], enableWrapAround=True, content=eListboxPythonMultiContent)
self['liste'] = self.ml
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
self.keyLocked = True
url = "http://www.extremetube.com/video-categories"
getPage(url, headers={'Content-Type':'application/x-www-form-urlencoded'}).addCallback(self.genreData).addErrback(self.dataError)
def genreData(self, data):
parse = re.search('class="title-general">\s{0,1}Categories</h1>(.*?)footer', data, re.S)
Cats = re.findall('href="(.*?)".*?img"\ssrc="(.*?)".*?title="(.*?)"', parse.group(1), re.S)
if Cats:
for (Url, Image, Title) in Cats:
if Title != "High Definition Videos":
Url = "http://www.extremetube.com" + Url.replace('?fromPage=categories', '') + '?page='
self.genreliste.append((Title, Url, Image))
self.genreliste.sort()
self.genreliste.insert(0, ("Longest", "http://www.extremetube.com/videos?o=lg", None))
self.genreliste.insert(0, ("Highest Rated", "http://www.extremetube.com/videos?o=tr", None))
self.genreliste.insert(0, ("Most Popular", "http://www.extremetube.com/videos?o=mv", None))
self.genreliste.insert(0, ("Being Watched", "http://www.extremetube.com/videos?o=bw", None))
self.genreliste.insert(0, ("Recently Added", "http://www.extremetube.com/videos?o=mr", None))
self.genreliste.insert(0, ("--- Search ---", "callSuchen", None))
self.ml.setList(map(self._defaultlistcenter, self.genreliste))
self.ml.moveToIndex(0)
self.keyLocked = False
self.showInfos()
def showInfos(self):
Image = self['liste'].getCurrent()[0][2]
CoverHelper(self['coverArt']).getCover(Image)
def keyOK(self):
if self.keyLocked:
return
Name = self['liste'].getCurrent()[0][0]
if Name == "--- Search ---":
self.suchen()
else:
Link = self['liste'].getCurrent()[0][1]
self.session.open(extremetubeFilmScreen, Link, Name)
def SuchenCallback(self, callback = None, entry = None):
if callback is not None and len(callback):
Name = self['liste'].getCurrent()[0][0]
self.suchString = callback.replace(' ', '+')
Link = 'http://www.extremetube.com/videos?search=%s' % (self.suchString)
self.session.open(extremetubeFilmScreen, Link, Name)
class extremetubeFilmScreen(MPScreen, ThumbsHelper):
def __init__(self, session, Link, Name):
self.Link = Link
self.Name = Name
self.plugin_path = mp_globals.pluginPath
self.skin_path = mp_globals.pluginPath + mp_globals.skinsPath
path = "%s/%s/defaultListWideScreen.xml" % (self.skin_path, config.mediaportal.skin.value)
if not fileExists(path):
path = self.skin_path + mp_globals.skinFallback + "/defaultListWideScreen.xml"
with open(path, "r") as f:
self.skin = f.read()
f.close()
MPScreen.__init__(self, session)
ThumbsHelper.__init__(self)
self["actions"] = ActionMap(["MP_Actions"], {
"ok" : self.keyOK,
"0" : self.closeAll,
"cancel" : self.keyCancel,
"5" : self.keyShowThumb,
"up" : self.keyUp,
"down" : self.keyDown,
"right" : self.keyRight,
"left" : self.keyLeft,
"nextBouquet" : self.keyPageUp,
"prevBouquet" : self.keyPageDown,
"green" : self.keyPageNumber
}, -1)
self['title'] = Label("ExtremeTube.com")
self['ContentTitle'] = Label("Genre: %s" % self.Name)
self['F2'] = Label(_("Page"))
self['Page'] = Label(_("Page:"))
self.keyLocked = True
self.page = 1
self.filmliste = []
self.ml = MenuList([], enableWrapAround=True, content=eListboxPythonMultiContent)
self['liste'] = self.ml
self.onLayoutFinish.append(self.loadPage)
def loadPage(self):
self.keyLocked = True
self['name'].setText(_('Please wait...'))
self.filmliste = []
if re.match('.*?\?', self.Link):
delimiter = '&'
else:
delimiter = '?'
url = "%s%sformat=json&page=%s" % (self.Link, delimiter, str(self.page))
getPage(url, headers={'Content-Type':'application/x-www-form-urlencoded'}).addCallback(self.loadData).addErrback(self.dataError)
def loadData(self, data):
self.getLastPage(data, '', 'lastPage":(.*?),"')
Movies = re.findall('"id".*?real_times_viewed":(.*?),".*?specialchars_title":"(.*?)","duration":"(.*?)","video_link":"(.*?)","thumb_url":"(.*?)"', data, re.S)
if Movies:
for (Views, Title, Runtime, Url, Image) in Movies:
self.filmliste.append((decodeHtml(Title), Url.replace('\/','/'), Image.replace('\/','/'), Runtime, Views.replace('"','')))
self.ml.setList(map(self._defaultlistleft, self.filmliste))
self.ml.moveToIndex(0)
self.keyLocked = False
self.th_ThumbsQuery(self.filmliste, 0, 1, 2, None, None, self.page, self.lastpage, mode=1)
self.showInfos()
def showInfos(self):
title = self['liste'].getCurrent()[0][0]
pic = self['liste'].getCurrent()[0][2]
runtime = self['liste'].getCurrent()[0][3]
views = self['liste'].getCurrent()[0][4]
self['name'].setText(title)
self['handlung'].setText("Runtime: %s\nViews: %s" % (runtime, views))
CoverHelper(self['coverArt']).getCover(pic)
def keyOK(self):
if self.keyLocked:
return
Link = self['liste'].getCurrent()[0][1]
self.keyLocked = True
getPage(Link, headers={'Content-Type':'application/x-www-form-urlencoded'}).addCallback(self.getVideoPage).addErrback(self.dataError)
def getVideoPage(self, data):
videoPage = re.findall("quality_\d+p=(.*?)&", data, re.S)
if videoPage:
self.keyLocked = False
url = urllib.unquote(videoPage[-1])
Title = self['liste'].getCurrent()[0][0]
self.session.open(SimplePlayer, [(Title, url)], showPlaylist=False, ltype='extremetube') | [
"# -*- coding: utf-8 -*-\n",
"from Plugins.Extensions.MediaPortal.plugin import _\n",
"from Plugins.Extensions.MediaPortal.resources.imports import *\n",
"\n",
"class extremetubeGenreScreen(MPScreen):\n",
"\n",
"\tdef __init__(self, session):\n",
"\t\tself.plugin_path = mp_globals.pluginPath\n",
"\t\tself.skin_path = mp_globals.pluginPath + mp_globals.skinsPath\n",
"\n",
"\t\tpath = \"%s/%s/defaultGenreScreenCover.xml\" % (self.skin_path, config.mediaportal.skin.value)\n",
"\t\tif not fileExists(path):\n",
"\t\t\tpath = self.skin_path + mp_globals.skinFallback + \"/defaultGenreScreenCover.xml\"\n",
"\n",
"\t\twith open(path, \"r\") as f:\n",
"\t\t\tself.skin = f.read()\n",
"\t\t\tf.close()\n",
"\r\t\tMPScreen.__init__(self, session)\n",
"\n",
"\t\tself[\"actions\"] = ActionMap([\"MP_Actions\"], {\n",
"\t\t\t\"ok\" : self.keyOK,\n",
"\t\t\t\"0\" : self.closeAll,\n",
"\t\t\t\"cancel\" : self.keyCancel,\n",
"\t\t\t\"up\" : self.keyUp,\n",
"\t\t\t\"down\" : self.keyDown,\n",
"\t\t\t\"right\" : self.keyRight,\n",
"\t\t\t\"left\" : self.keyLeft\n",
"\t\t}, -1)\n",
"\n",
"\t\tself['title'] = Label(\"ExtremeTube.com\")\n",
"\t\tself['ContentTitle'] = Label(\"Genre:\")\n",
"\n",
"\t\tself.keyLocked = True\n",
"\t\tself.suchString = ''\n",
"\n",
"\t\tself.genreliste = []\n",
"\t\tself.ml = MenuList([], enableWrapAround=True, content=eListboxPythonMultiContent)\n",
"\t\tself['liste'] = self.ml\n",
"\n",
"\t\tself.onLayoutFinish.append(self.layoutFinished)\n",
"\n",
"\tdef layoutFinished(self):\n",
"\t\tself.keyLocked = True\n",
"\t\turl = \"http://www.extremetube.com/video-categories\"\n",
"\t\tgetPage(url, headers={'Content-Type':'application/x-www-form-urlencoded'}).addCallback(self.genreData).addErrback(self.dataError)\n",
"\n",
"\tdef genreData(self, data):\n",
"\t\tparse = re.search('class=\"title-general\">\\s{0,1}Categories</h1>(.*?)footer', data, re.S)\n",
"\t\tCats = re.findall('href=\"(.*?)\".*?img\"\\ssrc=\"(.*?)\".*?title=\"(.*?)\"', parse.group(1), re.S)\n",
"\t\tif Cats:\n",
"\t\t\tfor (Url, Image, Title) in Cats:\n",
"\t\t\t\tif Title != \"High Definition Videos\":\n",
"\t\t\t\t\tUrl = \"http://www.extremetube.com\" + Url.replace('?fromPage=categories', '') + '?page='\n",
"\t\t\t\t\tself.genreliste.append((Title, Url, Image))\n",
"\t\t\tself.genreliste.sort()\n",
"\t\t\tself.genreliste.insert(0, (\"Longest\", \"http://www.extremetube.com/videos?o=lg\", None))\n",
"\t\t\tself.genreliste.insert(0, (\"Highest Rated\", \"http://www.extremetube.com/videos?o=tr\", None))\n",
"\t\t\tself.genreliste.insert(0, (\"Most Popular\", \"http://www.extremetube.com/videos?o=mv\", None))\n",
"\t\t\tself.genreliste.insert(0, (\"Being Watched\", \"http://www.extremetube.com/videos?o=bw\", None))\n",
"\t\t\tself.genreliste.insert(0, (\"Recently Added\", \"http://www.extremetube.com/videos?o=mr\", None))\n",
"\t\t\tself.genreliste.insert(0, (\"--- Search ---\", \"callSuchen\", None))\n",
"\t\t\tself.ml.setList(map(self._defaultlistcenter, self.genreliste))\n",
"\t\t\tself.ml.moveToIndex(0)\n",
"\t\t\tself.keyLocked = False\n",
"\t\t\tself.showInfos()\n",
"\n",
"\tdef showInfos(self):\n",
"\t\tImage = self['liste'].getCurrent()[0][2]\n",
"\t\tCoverHelper(self['coverArt']).getCover(Image)\n",
"\n",
"\tdef keyOK(self):\n",
"\t\tif self.keyLocked:\n",
"\t\t\treturn\n",
"\t\tName = self['liste'].getCurrent()[0][0]\n",
"\t\tif Name == \"--- Search ---\":\n",
"\t\t\tself.suchen()\n",
"\n",
"\t\telse:\n",
"\t\t\tLink = self['liste'].getCurrent()[0][1]\n",
"\t\t\tself.session.open(extremetubeFilmScreen, Link, Name)\n",
"\n",
"\tdef SuchenCallback(self, callback = None, entry = None):\n",
"\t\tif callback is not None and len(callback):\n",
"\t\t\tName = self['liste'].getCurrent()[0][0]\n",
"\t\t\tself.suchString = callback.replace(' ', '+')\n",
"\t\t\tLink = 'http://www.extremetube.com/videos?search=%s' % (self.suchString)\n",
"\t\t\tself.session.open(extremetubeFilmScreen, Link, Name)\n",
"\n",
"class extremetubeFilmScreen(MPScreen, ThumbsHelper):\n",
"\n",
"\tdef __init__(self, session, Link, Name):\n",
"\t\tself.Link = Link\n",
"\t\tself.Name = Name\n",
"\t\tself.plugin_path = mp_globals.pluginPath\n",
"\t\tself.skin_path = mp_globals.pluginPath + mp_globals.skinsPath\n",
"\n",
"\t\tpath = \"%s/%s/defaultListWideScreen.xml\" % (self.skin_path, config.mediaportal.skin.value)\n",
"\t\tif not fileExists(path):\n",
"\t\t\tpath = self.skin_path + mp_globals.skinFallback + \"/defaultListWideScreen.xml\"\n",
"\n",
"\t\twith open(path, \"r\") as f:\n",
"\t\t\tself.skin = f.read()\n",
"\t\t\tf.close()\n",
"\r\t\tMPScreen.__init__(self, session)\n",
"\t\tThumbsHelper.__init__(self)\n",
"\n",
"\t\tself[\"actions\"] = ActionMap([\"MP_Actions\"], {\n",
"\t\t\t\"ok\" : self.keyOK,\n",
"\t\t\t\"0\" : self.closeAll,\n",
"\t\t\t\"cancel\" : self.keyCancel,\n",
"\t\t\t\"5\" : self.keyShowThumb,\n",
"\t\t\t\"up\" : self.keyUp,\n",
"\t\t\t\"down\" : self.keyDown,\n",
"\t\t\t\"right\" : self.keyRight,\n",
"\t\t\t\"left\" : self.keyLeft,\n",
"\t\t\t\"nextBouquet\" : self.keyPageUp,\n",
"\t\t\t\"prevBouquet\" : self.keyPageDown,\n",
"\t\t\t\"green\" : self.keyPageNumber\n",
"\t\t}, -1)\n",
"\n",
"\t\tself['title'] = Label(\"ExtremeTube.com\")\n",
"\t\tself['ContentTitle'] = Label(\"Genre: %s\" % self.Name)\n",
"\t\tself['F2'] = Label(_(\"Page\"))\n",
"\n",
"\t\tself['Page'] = Label(_(\"Page:\"))\n",
"\n",
"\t\tself.keyLocked = True\n",
"\t\tself.page = 1\n",
"\n",
"\t\tself.filmliste = []\n",
"\t\tself.ml = MenuList([], enableWrapAround=True, content=eListboxPythonMultiContent)\n",
"\t\tself['liste'] = self.ml\n",
"\n",
"\t\tself.onLayoutFinish.append(self.loadPage)\n",
"\n",
"\tdef loadPage(self):\n",
"\t\tself.keyLocked = True\n",
"\t\tself['name'].setText(_('Please wait...'))\n",
"\t\tself.filmliste = []\n",
"\t\tif re.match('.*?\\?', self.Link):\n",
"\t\t\tdelimiter = '&'\n",
"\t\telse:\n",
"\t\t\tdelimiter = '?'\n",
"\t\turl = \"%s%sformat=json&page=%s\" % (self.Link, delimiter, str(self.page))\n",
"\t\tgetPage(url, headers={'Content-Type':'application/x-www-form-urlencoded'}).addCallback(self.loadData).addErrback(self.dataError)\n",
"\n",
"\tdef loadData(self, data):\n",
"\t\tself.getLastPage(data, '', 'lastPage\":(.*?),\"')\n",
"\t\tMovies = re.findall('\"id\".*?real_times_viewed\":(.*?),\".*?specialchars_title\":\"(.*?)\",\"duration\":\"(.*?)\",\"video_link\":\"(.*?)\",\"thumb_url\":\"(.*?)\"', data, re.S)\n",
"\t\tif Movies:\n",
"\t\t\tfor (Views, Title, Runtime, Url, Image) in Movies:\n",
"\t\t\t\tself.filmliste.append((decodeHtml(Title), Url.replace('\\/','/'), Image.replace('\\/','/'), Runtime, Views.replace('\"','')))\n",
"\t\t\tself.ml.setList(map(self._defaultlistleft, self.filmliste))\n",
"\t\t\tself.ml.moveToIndex(0)\n",
"\t\t\tself.keyLocked = False\n",
"\t\t\tself.th_ThumbsQuery(self.filmliste, 0, 1, 2, None, None, self.page, self.lastpage, mode=1)\n",
"\t\t\tself.showInfos()\n",
"\n",
"\tdef showInfos(self):\n",
"\t\ttitle = self['liste'].getCurrent()[0][0]\n",
"\t\tpic = self['liste'].getCurrent()[0][2]\n",
"\t\truntime = self['liste'].getCurrent()[0][3]\n",
"\t\tviews = self['liste'].getCurrent()[0][4]\n",
"\t\tself['name'].setText(title)\n",
"\t\tself['handlung'].setText(\"Runtime: %s\\nViews: %s\" % (runtime, views))\n",
"\t\tCoverHelper(self['coverArt']).getCover(pic)\n",
"\n",
"\tdef keyOK(self):\n",
"\t\tif self.keyLocked:\n",
"\t\t\treturn\n",
"\t\tLink = self['liste'].getCurrent()[0][1]\n",
"\t\tself.keyLocked = True\n",
"\t\tgetPage(Link, headers={'Content-Type':'application/x-www-form-urlencoded'}).addCallback(self.getVideoPage).addErrback(self.dataError)\n",
"\n",
"\tdef getVideoPage(self, data):\n",
"\t\tvideoPage = re.findall(\"quality_\\d+p=(.*?)&\", data, re.S)\n",
"\t\tif videoPage:\n",
"\t\t\t\tself.keyLocked = False\n",
"\t\t\t\turl = urllib.unquote(videoPage[-1])\n",
"\t\t\t\tTitle = self['liste'].getCurrent()[0][0]\n",
"\t\t\t\tself.session.open(SimplePlayer, [(Title, url)], showPlaylist=False, ltype='extremetube')"
] | [
0,
0,
0,
0,
0.025,
0,
0.03333333333333333,
0.023255813953488372,
0.015625,
0,
0.021052631578947368,
0.037037037037037035,
0.023809523809523808,
0,
0.034482758620689655,
0.041666666666666664,
0.07692307692307693,
0,
0,
0.041666666666666664,
0.09090909090909091,
0.08333333333333333,
0.06666666666666667,
0.09090909090909091,
0.07692307692307693,
0.07142857142857142,
0.08,
0.1111111111111111,
0,
0.023255813953488372,
0.024390243902439025,
0,
0.041666666666666664,
0.043478260869565216,
0,
0.043478260869565216,
0.023809523809523808,
0.038461538461538464,
0,
0.02,
0,
0.037037037037037035,
0.041666666666666664,
0.018518518518518517,
0.022727272727272728,
0,
0.03571428571428571,
0.03296703296703297,
0.031914893617021274,
0.09090909090909091,
0.027777777777777776,
0.023809523809523808,
0.021505376344086023,
0.02040816326530612,
0.038461538461538464,
0.022222222222222223,
0.020833333333333332,
0.021052631578947368,
0.020833333333333332,
0.020618556701030927,
0.014492753623188406,
0.015151515151515152,
0.038461538461538464,
0.038461538461538464,
0.05,
0,
0.045454545454545456,
0.023255813953488372,
0.020833333333333332,
0,
0.05555555555555555,
0.047619047619047616,
0.1,
0.023809523809523808,
0.03225806451612903,
0.058823529411764705,
0,
0.125,
0.023255813953488372,
0.017857142857142856,
0,
0.08620689655172414,
0.022222222222222223,
0.023255813953488372,
0.020833333333333332,
0.013157894736842105,
0.017857142857142856,
0,
0.018867924528301886,
0,
0.023809523809523808,
0.05263157894736842,
0.05263157894736842,
0.023255813953488372,
0.015625,
0,
0.021505376344086023,
0.037037037037037035,
0.024390243902439025,
0,
0.034482758620689655,
0.041666666666666664,
0.07692307692307693,
0,
0.03333333333333333,
0,
0.020833333333333332,
0.09090909090909091,
0.08333333333333333,
0.06666666666666667,
0.07142857142857142,
0.09090909090909091,
0.07692307692307693,
0.07142857142857142,
0.07692307692307693,
0.05714285714285714,
0.05405405405405406,
0.0625,
0.1111111111111111,
0,
0.023255813953488372,
0.017857142857142856,
0.03125,
0,
0.02857142857142857,
0,
0.041666666666666664,
0.0625,
0,
0.045454545454545456,
0.023809523809523808,
0.038461538461538464,
0,
0.022727272727272728,
0,
0.047619047619047616,
0.041666666666666664,
0.022727272727272728,
0.045454545454545456,
0.05714285714285714,
0.05263157894736842,
0.125,
0.05263157894736842,
0.013333333333333334,
0.022900763358778626,
0,
0.037037037037037035,
0.02,
0.012422360248447204,
0.07692307692307693,
0.018518518518518517,
0.05511811023622047,
0.015873015873015872,
0.038461538461538464,
0.038461538461538464,
0.02127659574468085,
0.05,
0,
0.045454545454545456,
0.023255813953488372,
0.024390243902439025,
0.022222222222222223,
0.023255813953488372,
0.03333333333333333,
0.013888888888888888,
0.021739130434782608,
0,
0.05555555555555555,
0.047619047619047616,
0.1,
0.023809523809523808,
0.041666666666666664,
0.022058823529411766,
0,
0.03225806451612903,
0.03125,
0.0625,
0.07407407407407407,
0.025,
0.022222222222222223,
0.03260869565217391
] | 181 | 0.033899 | false |
# -*- coding: utf-8 -*-
# ProjectEuler/src/python/problem422.py
#
# Sequence of points on a hyperbola
# =================================
# Published on Sunday, 7th April 2013, 07:00 am
#
# Let H be the hyperbola defined by the equation 12x2 + 7xy - 12y2 = 625. Next,
# define X as the point (7, 1). It can be seen that X is in H. Now we define a
# sequence of points in H, {Pi : i 1}, as: P1 = (13, 61/4). P2 = (-43/6,
# -4). For i > 2, Pi is the unique point in H that is different from Pi-1 and
# such that line PiPi-1 is parallel to line Pi-2X. It can be shown that Pi is
# well-defined, and that its coordinates are always rational. You are given
# that P3 = (-19/2, -229/24), P4 = (1267/144, -37/12) and P7 =
# (17194218091/143327232, 274748766781/1719926784). Find Pn for n = 1114 in the
# following format:If Pn = (a/b, c/d) where the fractions are in lowest terms
# and the denominators are positive, then the answer is (a + b + c + d) mod 1
# 000 000 007. For n = 7, the answer would have been: 806236837.
import projecteuler as pe
def main():
pass
if __name__ == "__main__":
main()
| [
"# -*- coding: utf-8 -*-\n",
"# ProjectEuler/src/python/problem422.py\n",
"#\n",
"# Sequence of points on a hyperbola\n",
"# =================================\n",
"# Published on Sunday, 7th April 2013, 07:00 am\n",
"#\n",
"# Let H be the hyperbola defined by the equation 12x2 + 7xy - 12y2 = 625. Next,\n",
"# define X as the point (7, 1). It can be seen that X is in H. Now we define a\n",
"# sequence of points in H, {Pi : i 1}, as: P1 = (13, 61/4). P2 = (-43/6,\n",
"# -4). For i > 2, Pi is the unique point in H that is different from Pi-1 and\n",
"# such that line PiPi-1 is parallel to line Pi-2X. It can be shown that Pi is\n",
"# well-defined, and that its coordinates are always rational. You are given\n",
"# that P3 = (-19/2, -229/24), P4 = (1267/144, -37/12) and P7 =\n",
"# (17194218091/143327232, 274748766781/1719926784). Find Pn for n = 1114 in the\n",
"# following format:If Pn = (a/b, c/d) where the fractions are in lowest terms\n",
"# and the denominators are positive, then the answer is (a + b + c + d) mod 1\n",
"# 000 000 007. For n = 7, the answer would have been: 806236837.\n",
"\n",
"import projecteuler as pe\n",
"\n",
"def main():\n",
" pass\n",
"\n",
"if __name__ == \"__main__\":\n",
" main()\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.08333333333333333,
0,
0,
0.037037037037037035,
0
] | 26 | 0.00463 | false |
# ==============================================================================
# provision.py
# This is API library for Exosite's One-Platform provisioning interface.
# ==============================================================================
#
# Warning: pyonep version 0.8.0 introduces breaking change to the
# provisioning interface. See README.md for details.
#
# Copyright (c) 2014, Exosite LLC
# All rights reserved.
#
import urllib
import logging
import sys
from pyonep import onephttp
from .exceptions import ProvisionException
if sys.version_info < (3, 0):
urlencode = urllib.urlencode
else:
urlencode = urllib.parse.urlencode
PROVISION_BASE = '/provision'
PROVISION_ACTIVATE = PROVISION_BASE + '/activate'
PROVISION_DOWNLOAD = PROVISION_BASE + '/download'
PROVISION_MANAGE = PROVISION_BASE + '/manage'
PROVISION_MANAGE_MODEL = PROVISION_MANAGE + '/model/'
PROVISION_MANAGE_CONTENT = PROVISION_MANAGE + '/content/'
PROVISION_REGISTER = PROVISION_BASE + '/register'
log = logging.getLogger(__name__)
# Only log errors for stderr, don't log anything else
h = logging.StreamHandler()
h.setLevel(logging.ERROR)
log.addHandler(h)
class ProvisionResponse:
"""A basic class for working with responses from the provisioning API."""
def __init__(self, body, response):
self.body = body
self.response = response
self.isok = self.response.status_code < 400
def status(self):
return self.response.status_code
def reason(self):
return self.response.reason
def __repr__(self):
return self.body
def __str__(self):
return "Status: {0}, Reason: {1}, Body: {2}".format(
self.response.status_code,
self.response.reason,
self.body)
class Provision(object):
"""A connection manager for dealing with the provisioning API, given as set of options.
Args:
host: A string for the hostname of the provisioning server. Defaults to m2.exosite.com.
port: A string for the server port. Defaults to '80'.
manage_by_cik: A boolean noting if devices managed by CIKs or tokens. Defaults to True.
verbose: Unused.
httptimeout: An integer number of seconds before timing out. Defaults to 5.
https: Whether or not to use HTTPS. Defaults to False.
reuseconnection: Whether or not to reuse connections. Defaults to False.
raise_api_exceptions: Whether or not to raise 4XX and 5XX responses as errors. Defaults to
False.
curldebug: Whether or not to log requests as equivalent curl commands at the debug level.
Defaults to False.
manage_by_sharecode: When provisioning default device setups via template, whether or not
the template is referenced by shared code or by resource ID. Defaults to False.
"""
def __init__(self,
host='m2.exosite.com',
port='80',
manage_by_cik=True,
verbose=False,
httptimeout=5,
https=False,
reuseconnection=False,
raise_api_exceptions=False,
curldebug=False,
manage_by_sharecode=False):
# backward compatibility
protocol = 'https://'
if host.startswith(protocol):
host = host[len(protocol):]
self._manage_by_cik = manage_by_cik
self._manage_by_sharecode = manage_by_sharecode
self._verbose = verbose
self._onephttp = onephttp.OneP_Request(host + ':' + str(port),
https=https,
httptimeout=int(httptimeout),
reuseconnection=reuseconnection,
log=log,
curldebug=curldebug)
self._raise_api_exceptions = raise_api_exceptions
def _filter_options(self, aliases=True, comments=True, historical=True):
"""Converts a set of boolean-valued options into the relevant HTTP values."""
options = []
if not aliases:
options.append('noaliases')
if not comments:
options.append('nocomments')
if not historical:
options.append('nohistorical')
return options
def _request(self, path, key, data, method, key_is_cik, extra_headers={}):
"""Generically shared HTTP request method.
Args:
path: The API endpoint to interact with.
key: A string for the key used by the device for the API. Either a CIK or token.
data: A string for the pre-encoded data to be sent with this request.
method: A string denoting the HTTP verb to use for the request (e.g. 'GET', 'POST')
key_is_cik: Whether or not the device key used is a CIK or token.
extra_headers: A dictionary of extra headers to include with the request.
Returns:
A ProvisionResponse containing the result of the HTTP request.
"""
if method == 'GET':
if len(data) > 0:
url = path + '?' + data
else:
url = path
body = None
else:
url = path
body = data
headers = {}
if key_is_cik:
headers['X-Exosite-CIK'] = key
else:
headers['X-Exosite-Token'] = key
if method == 'POST':
headers['Content-Type'] = 'application/x-www-form-urlencoded; charset=utf-8'
headers['Accept'] = 'text/plain, text/csv, application/x-www-form-urlencoded'
headers.update(extra_headers)
body, response = self._onephttp.request(method,
url,
body,
headers)
pr = ProvisionResponse(body, response)
if self._raise_api_exceptions and not pr.isok:
raise ProvisionException(pr)
return pr
def close(self):
"""Closes any open connections.
This should only need to be called if `reuseconnection` is set to True. Once closed,
the connection may be reopened by making another API call.
"""
self.onephttp.close()
def content_create(self, key, model, contentid, meta, protected=False):
"""Creates a content entity bucket with the given `contentid`.
This method maps to
https://github.com/exosite/docs/tree/master/provision#post---create-content-entity.
Args:
key: The CIK or Token for the device
model:
contentid: The ID used to name the entity bucket
meta:
protected: Whether or not this is restricted to certain device serial numbers only.
"""
params = {'id': contentid, 'meta': meta}
if protected is not False:
params['protected'] = 'true'
data = urlencode(params)
path = PROVISION_MANAGE_CONTENT + model + '/'
return self._request(path,
key, data, 'POST', self._manage_by_cik)
def content_download(self, cik, vendor, model, contentid):
"""(Speculation) Fetches content information for a given vendor, model, and ID as chunks.
This method might map to:
https://github.com/exosite/docs/tree/master/provision#get---get-content-blob-1,
but seems to be missing serial number.
Args:
cik: The CIK for the device
vendor: The name of the vendor
model:
contentid: The ID used to name the entity bucket
"""
data = urlencode({'vendor': vendor,
'model': model,
'id': contentid})
headers = {"Accept": "*"}
return self._request(PROVISION_DOWNLOAD,
cik, data, 'GET', True, headers)
def content_info(self, key, model, contentid, vendor=None):
"""(Speculation) Fetches content information for a given vendor, model, and ID.
This method might map to:
https://github.com/exosite/docs/tree/master/provision#get---get-content-info-1,
but seems to be missing serial number.
Args:
key: The CIK or Token for the device
model:
contentid: The ID used to name the entity bucket
vendor: The name of the vendor
"""
if not vendor: # if no vendor name, key should be the owner one
path = PROVISION_MANAGE_CONTENT + model + '/' + contentid
return self._request(path, key, '', 'GET', self._manage_by_cik)
else: # if provide vendor name, key can be the device one
data = urlencode({'vendor': vendor,
'model': model,
'id': contentid,
'info': 'true'})
return self._request(PROVISION_DOWNLOAD,
key, data, 'GET', self._manage_by_cik)
def content_list(self, key, model):
"""Returns the list of content IDs for a given model.
This method maps to
https://github.com/exosite/docs/tree/master/provision#get---list-content-ids
Args:
key: The CIK or Token for the device
model:
"""
path = PROVISION_MANAGE_CONTENT + model + '/'
return self._request(path, key, '', 'GET', self._manage_by_cik)
def content_remove(self, key, model, contentid):
"""Deletes the information for the given contentid under the given model.
This method maps to
https://github.com/exosite/docs/tree/master/provision#delete---delete-content
Args:
key: The CIK or Token for the device
model:
"""
path = PROVISION_MANAGE_CONTENT + model + '/' + contentid
return self._request(path, key, '', 'DELETE', self._manage_by_cik)
def content_upload(self, key, model, contentid, data, mimetype):
"""Store the given data as a result of a query for content id given the model.
This method maps to
https://github.com/exosite/docs/tree/master/provision#post---upload-content
Args:
key: The CIK or Token for the device
model:
contentid: The ID used to name the entity bucket
data: The data blob to save
mimetype: The Content-Type to use when serving the blob later
"""
headers = {"Content-Type": mimetype}
path = PROVISION_MANAGE_CONTENT + model + '/' + contentid
return self._request(path, key, data, 'POST', self._manage_by_cik, headers)
def model_create(self, key, model, sharecode,
aliases=True, comments=True, historical=True):
options = self._filter_options(aliases, comments, historical)
if self._manage_by_sharecode:
data = urlencode({'model': model,
'code': sharecode,
'options[]': options}, doseq=True)
else:
data = urlencode({'model': model,
'rid': sharecode,
'options[]': options}, doseq=True)
return self._request(PROVISION_MANAGE_MODEL,
key, data, 'POST', self._manage_by_cik)
def model_info(self, key, model):
return self._request(PROVISION_MANAGE_MODEL + model,
key, '', 'GET', self._manage_by_cik)
def model_list(self, key):
return self._request(PROVISION_MANAGE_MODEL,
key, '', 'GET', self._manage_by_cik)
def model_remove(self, key, model):
data = urlencode({'delete': 'true',
'model': model,
'confirm': 'true'})
path = PROVISION_MANAGE_MODEL + model
return self._request(path, key, data, 'DELETE', self._manage_by_cik)
def model_update(self, key, model, clonerid,
aliases=True, comments=True, historical=True):
options = self._filter_options(aliases, comments, historical)
data = urlencode({'rid': clonerid,
'options[]': options}, doseq=True)
path = PROVISION_MANAGE_MODEL + model
return self._request(path, key, data, 'PUT', self._manage_by_cik)
def serialnumber_activate(self, model, serialnumber, vendor):
data = urlencode({'vendor': vendor,
'model': model,
'sn': serialnumber})
return self._request(PROVISION_ACTIVATE,
'', data, 'POST', self._manage_by_cik)
def serialnumber_add(self, key, model, sn):
data = urlencode({'add': 'true',
'sn': sn})
path = PROVISION_MANAGE_MODEL + model + '/'
return self._request(path, key, data, 'POST', self._manage_by_cik)
def serialnumber_add_batch(self, key, model, sns=[]):
data = urlencode({'add': 'true',
'sn[]': sns}, doseq=True)
path = PROVISION_MANAGE_MODEL + model + '/'
return self._request(path, key, data, 'POST', self._manage_by_cik)
def serialnumber_disable(self, key, model, serialnumber):
data = urlencode({'disable': 'true'})
path = PROVISION_MANAGE_MODEL + model + '/' + serialnumber
return self._request(path, key, data, 'POST', self._manage_by_cik)
def serialnumber_enable(self, key, model, serialnumber, owner):
data = urlencode({'enable': 'true', 'owner': owner})
path = PROVISION_MANAGE_MODEL + model + '/' + serialnumber
return self._request(path, key, data, 'POST', self._manage_by_cik)
def serialnumber_info(self, key, model, serialnumber, actvtn_log=False):
data = 'show=log' if actvtn_log else ''
path = PROVISION_MANAGE_MODEL + model + '/' + serialnumber
return self._request(path, key, data, 'GET', self._manage_by_cik)
def serialnumber_list(self, key, model, offset=0, limit=1000):
data = urlencode({'offset': offset, 'limit': limit})
path = PROVISION_MANAGE_MODEL + model + '/'
return self._request(path, key, data, 'GET', self._manage_by_cik)
def serialnumber_reenable(self, key, model, serialnumber):
data = urlencode({'enable': 'true'})
path = PROVISION_MANAGE_MODEL + model + '/' + serialnumber
return self._request(path, key, data, 'POST', self._manage_by_cik)
def serialnumber_remap(self, key, model, serialnumber, oldsn):
data = urlencode({'enable': 'true', 'oldsn': oldsn})
path = PROVISION_MANAGE_MODEL + model + '/' + serialnumber
return self._request(path, key, data, 'POST', self._manage_by_cik)
def serialnumber_remove(self, key, model, serialnumber):
path = PROVISION_MANAGE_MODEL + model + '/' + serialnumber
return self._request(path, key, '', 'DELETE', self._manage_by_cik)
def serialnumber_remove_batch(self, key, model, sns):
path = PROVISION_MANAGE_MODEL + model + '/'
data = urlencode({'remove': 'true', 'sn[]': sns}, doseq=True)
return self._request(path, key, data, 'POST', self._manage_by_cik)
def vendor_register(self, key, vendor):
data = urlencode({'vendor': vendor})
return self._request(PROVISION_REGISTER,
key, data, 'POST', self._manage_by_cik)
def vendor_show(self, key):
return self._request(PROVISION_REGISTER, key, '', 'GET', False)
def vendor_unregister(self, key, vendor):
data = urlencode({'delete': 'true', 'vendor': vendor})
return self._request(PROVISION_REGISTER,
key, data, 'POST', False)
| [
"# ==============================================================================\n",
"# provision.py\n",
"# This is API library for Exosite's One-Platform provisioning interface.\n",
"# ==============================================================================\n",
"#\n",
"# Warning: pyonep version 0.8.0 introduces breaking change to the\n",
"# provisioning interface. See README.md for details.\n",
"#\n",
"# Copyright (c) 2014, Exosite LLC\n",
"# All rights reserved.\n",
"#\n",
"\n",
"import urllib\n",
"import logging\n",
"import sys\n",
"from pyonep import onephttp\n",
"from .exceptions import ProvisionException\n",
"\n",
"if sys.version_info < (3, 0):\n",
" urlencode = urllib.urlencode\n",
"else:\n",
" urlencode = urllib.parse.urlencode\n",
"\n",
"PROVISION_BASE = '/provision'\n",
"PROVISION_ACTIVATE = PROVISION_BASE + '/activate'\n",
"PROVISION_DOWNLOAD = PROVISION_BASE + '/download'\n",
"PROVISION_MANAGE = PROVISION_BASE + '/manage'\n",
"PROVISION_MANAGE_MODEL = PROVISION_MANAGE + '/model/'\n",
"PROVISION_MANAGE_CONTENT = PROVISION_MANAGE + '/content/'\n",
"PROVISION_REGISTER = PROVISION_BASE + '/register'\n",
"\n",
"log = logging.getLogger(__name__)\n",
"\n",
"# Only log errors for stderr, don't log anything else\n",
"h = logging.StreamHandler()\n",
"h.setLevel(logging.ERROR)\n",
"log.addHandler(h)\n",
"\n",
"\n",
"class ProvisionResponse:\n",
" \"\"\"A basic class for working with responses from the provisioning API.\"\"\"\n",
"\n",
" def __init__(self, body, response):\n",
" self.body = body\n",
" self.response = response\n",
" self.isok = self.response.status_code < 400\n",
"\n",
" def status(self):\n",
" return self.response.status_code\n",
"\n",
" def reason(self):\n",
" return self.response.reason\n",
"\n",
" def __repr__(self):\n",
" return self.body\n",
"\n",
" def __str__(self):\n",
" return \"Status: {0}, Reason: {1}, Body: {2}\".format(\n",
" self.response.status_code,\n",
" self.response.reason,\n",
" self.body)\n",
"\n",
"\n",
"class Provision(object):\n",
" \"\"\"A connection manager for dealing with the provisioning API, given as set of options.\n",
" \n",
" Args:\n",
" host: A string for the hostname of the provisioning server. Defaults to m2.exosite.com.\n",
" port: A string for the server port. Defaults to '80'.\n",
" manage_by_cik: A boolean noting if devices managed by CIKs or tokens. Defaults to True.\n",
" verbose: Unused.\n",
" httptimeout: An integer number of seconds before timing out. Defaults to 5.\n",
" https: Whether or not to use HTTPS. Defaults to False.\n",
" reuseconnection: Whether or not to reuse connections. Defaults to False.\n",
" raise_api_exceptions: Whether or not to raise 4XX and 5XX responses as errors. Defaults to\n",
" False.\n",
" curldebug: Whether or not to log requests as equivalent curl commands at the debug level.\n",
" Defaults to False.\n",
" manage_by_sharecode: When provisioning default device setups via template, whether or not\n",
" the template is referenced by shared code or by resource ID. Defaults to False.\n",
" \"\"\"\n",
"\n",
" def __init__(self,\n",
" host='m2.exosite.com',\n",
" port='80',\n",
" manage_by_cik=True,\n",
" verbose=False,\n",
" httptimeout=5,\n",
" https=False,\n",
" reuseconnection=False,\n",
" raise_api_exceptions=False,\n",
" curldebug=False,\n",
" manage_by_sharecode=False):\n",
" # backward compatibility\n",
" protocol = 'https://'\n",
" if host.startswith(protocol):\n",
" host = host[len(protocol):]\n",
" self._manage_by_cik = manage_by_cik\n",
" self._manage_by_sharecode = manage_by_sharecode\n",
" self._verbose = verbose\n",
" self._onephttp = onephttp.OneP_Request(host + ':' + str(port),\n",
" https=https,\n",
" httptimeout=int(httptimeout),\n",
" reuseconnection=reuseconnection,\n",
" log=log,\n",
" curldebug=curldebug)\n",
" self._raise_api_exceptions = raise_api_exceptions\n",
"\n",
" def _filter_options(self, aliases=True, comments=True, historical=True):\n",
" \"\"\"Converts a set of boolean-valued options into the relevant HTTP values.\"\"\"\n",
" options = []\n",
" if not aliases:\n",
" options.append('noaliases')\n",
" if not comments:\n",
" options.append('nocomments')\n",
" if not historical:\n",
" options.append('nohistorical')\n",
" return options\n",
"\n",
" def _request(self, path, key, data, method, key_is_cik, extra_headers={}):\n",
" \"\"\"Generically shared HTTP request method.\n",
" \n",
" Args:\n",
" path: The API endpoint to interact with.\n",
" key: A string for the key used by the device for the API. Either a CIK or token.\n",
" data: A string for the pre-encoded data to be sent with this request.\n",
" method: A string denoting the HTTP verb to use for the request (e.g. 'GET', 'POST')\n",
" key_is_cik: Whether or not the device key used is a CIK or token.\n",
" extra_headers: A dictionary of extra headers to include with the request. \n",
" \n",
" Returns:\n",
" A ProvisionResponse containing the result of the HTTP request.\n",
" \"\"\"\n",
" if method == 'GET':\n",
" if len(data) > 0:\n",
" url = path + '?' + data\n",
" else:\n",
" url = path\n",
" body = None\n",
" else:\n",
" url = path\n",
" body = data\n",
"\n",
" headers = {}\n",
" if key_is_cik:\n",
" headers['X-Exosite-CIK'] = key\n",
" else:\n",
" headers['X-Exosite-Token'] = key\n",
" if method == 'POST':\n",
" headers['Content-Type'] = 'application/x-www-form-urlencoded; charset=utf-8'\n",
" headers['Accept'] = 'text/plain, text/csv, application/x-www-form-urlencoded'\n",
" headers.update(extra_headers)\n",
"\n",
" body, response = self._onephttp.request(method,\n",
" url,\n",
" body,\n",
" headers)\n",
"\n",
" pr = ProvisionResponse(body, response)\n",
" if self._raise_api_exceptions and not pr.isok:\n",
" raise ProvisionException(pr)\n",
" return pr\n",
"\n",
" def close(self):\n",
" \"\"\"Closes any open connections. \n",
" \n",
" This should only need to be called if `reuseconnection` is set to True. Once closed,\n",
" the connection may be reopened by making another API call.\n",
" \"\"\"\n",
" self.onephttp.close()\n",
"\n",
" def content_create(self, key, model, contentid, meta, protected=False):\n",
" \"\"\"Creates a content entity bucket with the given `contentid`.\n",
" \n",
" This method maps to\n",
" https://github.com/exosite/docs/tree/master/provision#post---create-content-entity.\n",
" \n",
" Args:\n",
" key: The CIK or Token for the device\n",
" model: \n",
" contentid: The ID used to name the entity bucket\n",
" meta:\n",
" protected: Whether or not this is restricted to certain device serial numbers only.\n",
" \"\"\"\n",
" params = {'id': contentid, 'meta': meta}\n",
" if protected is not False:\n",
" params['protected'] = 'true'\n",
" data = urlencode(params)\n",
" path = PROVISION_MANAGE_CONTENT + model + '/'\n",
" return self._request(path,\n",
" key, data, 'POST', self._manage_by_cik)\n",
"\n",
" def content_download(self, cik, vendor, model, contentid):\n",
" \"\"\"(Speculation) Fetches content information for a given vendor, model, and ID as chunks.\n",
" \n",
" This method might map to:\n",
" https://github.com/exosite/docs/tree/master/provision#get---get-content-blob-1,\n",
" but seems to be missing serial number.\n",
" \n",
" Args:\n",
" cik: The CIK for the device\n",
" vendor: The name of the vendor\n",
" model: \n",
" contentid: The ID used to name the entity bucket\n",
" \"\"\"\n",
" data = urlencode({'vendor': vendor,\n",
" 'model': model,\n",
" 'id': contentid})\n",
" headers = {\"Accept\": \"*\"}\n",
" return self._request(PROVISION_DOWNLOAD,\n",
" cik, data, 'GET', True, headers)\n",
"\n",
" def content_info(self, key, model, contentid, vendor=None):\n",
" \"\"\"(Speculation) Fetches content information for a given vendor, model, and ID.\n",
" \n",
" This method might map to:\n",
" https://github.com/exosite/docs/tree/master/provision#get---get-content-info-1,\n",
" but seems to be missing serial number.\n",
" \n",
" Args:\n",
" key: The CIK or Token for the device\n",
" model: \n",
" contentid: The ID used to name the entity bucket\n",
" vendor: The name of the vendor\n",
" \"\"\"\n",
" if not vendor: # if no vendor name, key should be the owner one\n",
" path = PROVISION_MANAGE_CONTENT + model + '/' + contentid\n",
" return self._request(path, key, '', 'GET', self._manage_by_cik)\n",
" else: # if provide vendor name, key can be the device one\n",
" data = urlencode({'vendor': vendor,\n",
" 'model': model,\n",
" 'id': contentid,\n",
" 'info': 'true'})\n",
" return self._request(PROVISION_DOWNLOAD,\n",
" key, data, 'GET', self._manage_by_cik)\n",
"\n",
" def content_list(self, key, model):\n",
" \"\"\"Returns the list of content IDs for a given model.\n",
" \n",
" This method maps to\n",
" https://github.com/exosite/docs/tree/master/provision#get---list-content-ids\n",
" \n",
" Args:\n",
" key: The CIK or Token for the device\n",
" model: \n",
" \"\"\"\n",
" path = PROVISION_MANAGE_CONTENT + model + '/'\n",
" return self._request(path, key, '', 'GET', self._manage_by_cik)\n",
"\n",
" def content_remove(self, key, model, contentid):\n",
" \"\"\"Deletes the information for the given contentid under the given model.\n",
" \n",
" This method maps to\n",
" https://github.com/exosite/docs/tree/master/provision#delete---delete-content\n",
" \n",
" Args:\n",
" key: The CIK or Token for the device\n",
" model: \n",
" \"\"\"\n",
" path = PROVISION_MANAGE_CONTENT + model + '/' + contentid\n",
" return self._request(path, key, '', 'DELETE', self._manage_by_cik)\n",
"\n",
" def content_upload(self, key, model, contentid, data, mimetype):\n",
" \"\"\"Store the given data as a result of a query for content id given the model.\n",
" \n",
" This method maps to\n",
" https://github.com/exosite/docs/tree/master/provision#post---upload-content\n",
" \n",
" Args:\n",
" key: The CIK or Token for the device\n",
" model: \n",
" contentid: The ID used to name the entity bucket\n",
" data: The data blob to save\n",
" mimetype: The Content-Type to use when serving the blob later\n",
" \"\"\"\n",
" headers = {\"Content-Type\": mimetype}\n",
" path = PROVISION_MANAGE_CONTENT + model + '/' + contentid\n",
" return self._request(path, key, data, 'POST', self._manage_by_cik, headers)\n",
"\n",
" def model_create(self, key, model, sharecode,\n",
" aliases=True, comments=True, historical=True):\n",
" options = self._filter_options(aliases, comments, historical)\n",
" if self._manage_by_sharecode:\n",
" data = urlencode({'model': model,\n",
" 'code': sharecode,\n",
" 'options[]': options}, doseq=True)\n",
" else:\n",
" data = urlencode({'model': model,\n",
" 'rid': sharecode,\n",
" 'options[]': options}, doseq=True)\n",
" return self._request(PROVISION_MANAGE_MODEL,\n",
" key, data, 'POST', self._manage_by_cik)\n",
"\n",
" def model_info(self, key, model):\n",
" return self._request(PROVISION_MANAGE_MODEL + model,\n",
" key, '', 'GET', self._manage_by_cik)\n",
"\n",
" def model_list(self, key):\n",
" return self._request(PROVISION_MANAGE_MODEL,\n",
" key, '', 'GET', self._manage_by_cik)\n",
"\n",
" def model_remove(self, key, model):\n",
" data = urlencode({'delete': 'true',\n",
" 'model': model,\n",
" 'confirm': 'true'})\n",
" path = PROVISION_MANAGE_MODEL + model\n",
" return self._request(path, key, data, 'DELETE', self._manage_by_cik)\n",
"\n",
" def model_update(self, key, model, clonerid,\n",
" aliases=True, comments=True, historical=True):\n",
" options = self._filter_options(aliases, comments, historical)\n",
" data = urlencode({'rid': clonerid,\n",
" 'options[]': options}, doseq=True)\n",
" path = PROVISION_MANAGE_MODEL + model\n",
" return self._request(path, key, data, 'PUT', self._manage_by_cik)\n",
"\n",
" def serialnumber_activate(self, model, serialnumber, vendor):\n",
" data = urlencode({'vendor': vendor,\n",
" 'model': model,\n",
" 'sn': serialnumber})\n",
" return self._request(PROVISION_ACTIVATE,\n",
" '', data, 'POST', self._manage_by_cik)\n",
"\n",
" def serialnumber_add(self, key, model, sn):\n",
" data = urlencode({'add': 'true',\n",
" 'sn': sn})\n",
" path = PROVISION_MANAGE_MODEL + model + '/'\n",
" return self._request(path, key, data, 'POST', self._manage_by_cik)\n",
"\n",
" def serialnumber_add_batch(self, key, model, sns=[]):\n",
" data = urlencode({'add': 'true',\n",
" 'sn[]': sns}, doseq=True)\n",
" path = PROVISION_MANAGE_MODEL + model + '/'\n",
" return self._request(path, key, data, 'POST', self._manage_by_cik)\n",
"\n",
" def serialnumber_disable(self, key, model, serialnumber):\n",
" data = urlencode({'disable': 'true'})\n",
" path = PROVISION_MANAGE_MODEL + model + '/' + serialnumber\n",
" return self._request(path, key, data, 'POST', self._manage_by_cik)\n",
"\n",
" def serialnumber_enable(self, key, model, serialnumber, owner):\n",
" data = urlencode({'enable': 'true', 'owner': owner})\n",
" path = PROVISION_MANAGE_MODEL + model + '/' + serialnumber\n",
" return self._request(path, key, data, 'POST', self._manage_by_cik)\n",
"\n",
" def serialnumber_info(self, key, model, serialnumber, actvtn_log=False):\n",
" data = 'show=log' if actvtn_log else ''\n",
" path = PROVISION_MANAGE_MODEL + model + '/' + serialnumber\n",
" return self._request(path, key, data, 'GET', self._manage_by_cik)\n",
"\n",
" def serialnumber_list(self, key, model, offset=0, limit=1000):\n",
" data = urlencode({'offset': offset, 'limit': limit})\n",
" path = PROVISION_MANAGE_MODEL + model + '/'\n",
" return self._request(path, key, data, 'GET', self._manage_by_cik)\n",
"\n",
" def serialnumber_reenable(self, key, model, serialnumber):\n",
" data = urlencode({'enable': 'true'})\n",
" path = PROVISION_MANAGE_MODEL + model + '/' + serialnumber\n",
" return self._request(path, key, data, 'POST', self._manage_by_cik)\n",
"\n",
" def serialnumber_remap(self, key, model, serialnumber, oldsn):\n",
" data = urlencode({'enable': 'true', 'oldsn': oldsn})\n",
" path = PROVISION_MANAGE_MODEL + model + '/' + serialnumber\n",
" return self._request(path, key, data, 'POST', self._manage_by_cik)\n",
"\n",
" def serialnumber_remove(self, key, model, serialnumber):\n",
" path = PROVISION_MANAGE_MODEL + model + '/' + serialnumber\n",
" return self._request(path, key, '', 'DELETE', self._manage_by_cik)\n",
"\n",
" def serialnumber_remove_batch(self, key, model, sns):\n",
" path = PROVISION_MANAGE_MODEL + model + '/'\n",
" data = urlencode({'remove': 'true', 'sn[]': sns}, doseq=True)\n",
" return self._request(path, key, data, 'POST', self._manage_by_cik)\n",
"\n",
" def vendor_register(self, key, vendor):\n",
" data = urlencode({'vendor': vendor})\n",
" return self._request(PROVISION_REGISTER,\n",
" key, data, 'POST', self._manage_by_cik)\n",
"\n",
" def vendor_show(self, key):\n",
" return self._request(PROVISION_REGISTER, key, '', 'GET', False)\n",
"\n",
" def vendor_unregister(self, key, vendor):\n",
" data = urlencode({'delete': 'true', 'vendor': vendor})\n",
" return self._request(PROVISION_REGISTER,\n",
" key, data, 'POST', False)\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010869565217391304,
0.2,
0,
0.010309278350515464,
0,
0.010309278350515464,
0,
0.011627906976744186,
0,
0.012195121951219513,
0.01,
0,
0.01020408163265306,
0,
0.01020408163265306,
0.010752688172043012,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011627906976744186,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.1111111111111111,
0,
0,
0.010638297872340425,
0.012195121951219513,
0.010416666666666666,
0,
0.022988505747126436,
0.1111111111111111,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011235955056179775,
0.011627906976744186,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.024390243902439025,
0.1111111111111111,
0.010752688172043012,
0,
0,
0,
0,
0,
0,
0.1111111111111111,
0,
0,
0.1111111111111111,
0,
0,
0.05,
0,
0,
0.010416666666666666,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.01020408163265306,
0.1111111111111111,
0,
0,
0,
0.1111111111111111,
0,
0,
0,
0.05,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011363636363636364,
0.1111111111111111,
0,
0,
0,
0.1111111111111111,
0,
0,
0.05,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.1111111111111111,
0,
0,
0.1111111111111111,
0,
0,
0.05,
0,
0,
0,
0,
0,
0.012195121951219513,
0.1111111111111111,
0,
0,
0.1111111111111111,
0,
0,
0.05,
0,
0,
0,
0,
0,
0.011494252873563218,
0.1111111111111111,
0,
0,
0.1111111111111111,
0,
0,
0.05,
0,
0,
0,
0,
0,
0,
0.011904761904761904,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 386 | 0.006364 | false |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import re
import random
import sys
import unicodedata
import my_conexion
sys.stdout.encoding
'UTF-8'
# PROCESAR MENSAJE DE USUARIO
def generarRespuesta(mensaje, STATES, username):
message_=sinAcentos(mensaje)
ans=''
vez = STATES['vez']
print(message_)
SALUDO = re.compile('^(h|H)ola|HOLA|(Q|q)u(e|é)\stal|QU(É|E)\sTAL|((B|b)uenas)|BUENAS|(Q|q)u(e|é)\sonda|QU(E|É)\sONDA|(H|h)ello|HELLO|(H|h)i|HI|(Q|q)iubo|QUIUBO|(S|s)aludos|SALUDOS|(B|b)uenos\sd(i|í)as|BUENOS\sD(I|Í)AS($|.*)');
SALUDO_MATCH = re.match(SALUDO,message_)
if SALUDO_MATCH:
ev = random.randint(1, 3)
if ev == 1:
ans =ans + "\nHola, soy DiarioBot, me encantaria platicar contigo..."
if ev == 2:
ans =ans + "\nHola, yo soy tu diario personal..."
if ev == 3:
ans =ans + "\nHola, vamos a platicar..."
SENTIMIENTO = re.compile('(.*|^)((E|e)stoy|(M|m)e\s(fue|(S|s)iento)|puse|sent(i|í))(.*|(M|m)uy)\s((F|f)el(i\í)z|(C|c)ontent(o|a)|(A|a)legre|(B|b)ien)($|.*)');
SENTIMIENTO_MATCH = re.match(SENTIMIENTO,message_)
if SENTIMIENTO_MATCH:
ev = random.randint(1, 3)
if ev == 1:
ans =ans + "\nMe alegra que te sientas así, espero todos los días te sientas de esa manera, ¿Que más pasó?..."
if ev == 2:
ans =ans + "\nQue bueno que estes así, ¿Que mas pasó?..."
if ev == 3:
ans =ans + "\nMuy bien, que siempre sea así, ¿Que mas pasó?..."
SENTIMIENTO_1 = re.compile('(.*|^)((E|e)stoy|(M|m)e\s(fue|(S|s)iento)|puse|sent(i|í))(.*|(M|m)uy)\s((T|t)riste|(M|m)al|(I|i)nfel(i|í)z|(D|d)ecaid(o|a))($|.*)');
SENTIMIENTO_MATCH = re.match(SENTIMIENTO_1,message_)
if SENTIMIENTO_MATCH:
ev = random.randint(1, 3)
if ev == 1:
ans =ans + "\nNo me gusta que te sientas asi animo, Cuenta me más..."
if ev == 2:
ans =ans + "\nQue mal en serio, Cuenta me más..."
if ev == 3:
ans =ans + "\nNo te preocupes, se que estaras mejor mañana, Cuenta me más..."
NO_MAMA = re.compile('(.*|^)(fallecio|murio|no\stengo)(|\smi)\smam(a|á)($|.*)');
NO_MATCH = re.match(NO_MAMA,message_)
if NO_MATCH:
my_conexion.cambiar_estado('mama',username)
ev = random.randint(1, 2)
if ev == 1:
STATES['mama'] = True
ans =ans + "\nLo siento mucho, se que es algo demaciado doloroso, ¿Quieres seguir platicando?..."
if ev == 2:
STATES['mama'] = True
ans =ans + "\nQue fuerte, lo siento ¿Quieres seguir platicando?..."
NO_MAMA = re.compile('(.*|^)mi\smam(a|á)\s(fallecio|murio)($|.*)');
NO_MATCH = re.match(NO_MAMA,message_)
if NO_MATCH:
my_conexion.cambiar_estado('mama',username)
ev = random.randint(1, 2)
if ev == 1:
STATES['mama'] = True
ans =ans + "\nLo siento mucho, se que es algo demaciado doloroso, ¿Quieres seguir platicando?..."
if ev == 2:
STATES['mama'] = True
ans =ans + "\nQue fuerte, lo siento ¿Quieres seguir platicando?..."
NO_PAPA = re.compile('(.*|^)(falleci(o|ó)|muri(o|ó)|no\stengo)(|\smi)\spap(a|á)($|.*)');
NO_MATCH = re.match(NO_PAPA,message_)
if NO_MATCH:
my_conexion.cambiar_estado('papa',username)
ev = random.randint(1, 2)
if ev == 1:
STATES['papa'] = True
ans =ans + "\nLo siento mucho, se que es algo demaciado doloroso, ¿Quieres seguir platicando?..."
if ev == 2:
STATES['papa'] = True
ans =ans + "\nQue fuerte, lo siento ¿Quieres seguir platicando?..."
NO_PAPA = re.compile('(.*|^)mi\spap(a|á)\s(fallecio|murio)($|.*)');
NO_MATCH = re.match(NO_PAPA,message_)
if NO_MATCH:
my_conexion.cambiar_estado('papa',username)
ev = random.randint(1, 2)
if ev == 1:
STATES['papa'] = True
ans =ans + "\nLo siento mucho, se que es algo demaciado doloroso, ¿Quieres seguir platicando?..."
if ev == 2:
STATES['papa'] = True
ans =ans + "\nQue fuerte, lo siento ¿Quieres seguir platicando?..."
NO_HERMA = re.compile('(.*|^)(fallecio|murio|no\stengo)(|\smi)\sherman(o|a)($|.*)');
NO_MATCH = re.match(NO_PAPA,message_)
if NO_MATCH:
my_conexion.cambiar_estado('hermano',username)
ev = random.randint(1, 2)
if ev == 1:
STATES['hermano'] = True
ans =ans + "\nLo siento mucho, se que es algo demaciado doloroso, ¿Quieres seguir platicando?..."
if ev == 2:
STATES['hermano'] = True
ans =ans + "\nQue fuerte, lo siento ¿Quieres seguir platicando?..."
NO_HERMA = re.compile('(.*|^)mi\sherman(o|a)\s(fallecio|murio)($|.*)');
NO_MATCH = re.match(NO_PAPA,message_)
if NO_MATCH:
my_conexion.cambiar_estado('hermano',username)
ev = random.randint(1, 2)
if ev == 1:
STATES['hermano'] = True
ans =ans + "\nLo siento mucho, se que es algo demaciado doloroso, ¿Quieres seguir platicando?..."
if ev == 2:
STATES['hermano'] = True
ans =ans + "\nQue fuerte, lo siento ¿Quieres seguir platicando?..."
NO_MAS = re.compile('(.*|^)(fallecio|murio|no\stengo)(|\smi)\s(mascota|perro|gato|pajaro|pez|rana|tortuga|iguana)($|.*)');
NO_MATCH = re.match(NO_MAS,message_)
if NO_MATCH:
my_conexion.cambiar_estado('mascota',username)
ev = random.randint(1, 2)
if ev == 1:
STATES['mascota'] = True
ans =ans + "\nLo siento mucho, se que es algo demaciado doloroso, ¿Quieres seguir platicando?..."
if ev == 2:
STATES['mascota'] = True
ans =ans + "\nQue fuerte, lo siento ¿Quieres seguir platicando?..."
NO_MAS = re.compile('(.*|^)mi\s(mascota|perro|gato|pajaro|pez|rana|tortuga|iguana)\s(fallecio|murio)($|.*)');
NO_MATCH = re.match(NO_MAS,message_)
if NO_MATCH:
my_conexion.cambiar_estado('mascota',username)
ev = random.randint(1, 2)
if ev == 1:
STATES['mascota'] = True
ans =ans + "\nLo siento mucho, se que es algo demaciado doloroso, ¿Quieres seguir platicando?..."
if ev == 2:
STATES['mascota'] = True
ans =ans + "\nQue fuerte, lo siento ¿Quieres seguir platicando?..."
NO_AMI = re.compile('(.*|^)(fallecio|murio|no\stengo)(|\smi)\smejor amig(o|a)($|.*)');
NO_MATCH = re.match(NO_AMI,message_)
if NO_MATCH:
my_conexion.cambiar_estado('amigo',username)
ev = random.randint(1, 2)
if ev == 1:
STATES['amigo'] = True
ans =ans + "\nLo siento mucho, se que es algo demaciado doloroso, ¿Quieres seguir platicando?..."
if ev == 2:
STATES['amigo'] = True
ans =ans + "\nQue fuerte, lo siento ¿Quieres seguir platicando?..."
NO_AMI = re.compile('(.*|^)mi\smejor amig(o|a)\s(fallecio|murio)($|.*)');
NO_MATCH = re.match(NO_AMI,message_)
if NO_MATCH:
my_conexion.cambiar_estado('amigo',username)
ev = random.randint(1, 2)
if ev == 1:
STATES['amigo'] = True
ans =ans + "\nLo siento mucho, se que es algo demaciado doloroso, ¿Quieres seguir platicando?..."
if ev == 2:
STATES['amigo'] = True
ans =ans + "\nQue fuerte, lo siento ¿Quieres seguir platicando?..."
NO_TRA = re.compile('(.*|^)((no\strabajo)|(no\stengo\strabajo))($|.*)');
NO_MATCH = re.match(NO_TRA,message_)
if NO_MATCH:
my_conexion.cambiar_estado('trabajo',username)
STATES['trabajo'] = True
NO_ESC = re.compile('(.*|^)no\sestudio($|.*)');
NO_MATCH = re.match(NO_ESC,message_)
if NO_MATCH:
my_conexion.cambiar_estado('escuela',username)
STATES['escuela'] = True
MAMA = re.compile('(.*|^)((M|m)i\s(mama|mamá|madre)\s(es|esta)(.*|algo|un poco|mucho|mucho muy|muy)\s(excelente|bien|alegre|animada|apasionada|cariñosa|contenta|encantada|euforica|exitada|feliz|satisfecha|orgullosa))($|.*)');
MAMA_MATCH = re.match(MAMA,message_)
if MAMA_MATCH:
ev = random.randint(1, 3)
if ev == 1:
ans =ans + "\nOh Que bien que tu mamá este asi..."
if ev == 2:
ans =ans + "\nMe alegra leer esto de tu mamá..."
if ev == 3:
ans =ans + "\nMaravilloso, que todo siga asi para tu mamá..."
STATES['mama'] = True
MAMA = re.compile('(.*|^)((M|m)i\s(mama|mamá|madre)\s(es|esta)(.*|algo|un poco|mucho|mucho muy|muy)\s(abrumada|mal|mala|enferma|afligida|agotada|amargada|angustiada|apatica|arrepentida|asustada|aterrada|avergonzada|celosa|cansada|confundida|debil|decaida|decepcionada|deprimida|desanimada|desesperada|enojada|infeliz|herida|insegura|triste|tensa|molesta|irritada))($|.*)');
MAMA_MATCH = re.match(MAMA,message_)
if MAMA_MATCH:
ev = random.randint(1, 3)
if ev == 1:
ans =ans + "\nmmm que mal que tu mama este asi..."
if ev == 2:
ans =ans + "\nLo siento, que todo mejore para tu mamá..."
if ev == 3:
ans =ans + "\nLeer esto de tu mama no me gusta, lo siento por ella..."
STATES['mama'] = True
PAPA = re.compile('(.*|^)((M|m)i\s(papa|papá|padre)\s(es|esta)(.*|algo|un poco|mucho|mucho muy|muy)\s(excelente|bien|alegre|animado|apasionado|cariñoso|contento|encantado|euforico|exitado|feliz|satisfecho|orgullos))($|.*)');
PAPA_MATCH = re.match(PAPA,message_)
if PAPA_MATCH:
ev = random.randint(1, 3)
if ev == 1:
ans =ans + "\nOh Que bien que tu papá este así..."
if ev == 2:
ans =ans + "\nMe alegra leer esto de tu papá..."
if ev == 3:
ans =ans + "\nMaravilloso, que todo siga asi para tu padre..."
STATES['papa'] = True
PAPA = re.compile('(.*|^)((M|m)i\s(papa|papá|padre)\s(es|esta)(.*|algo|un poco|mucho|mucho muy|muy)\s(abrumado|mal|afligido|agotado|malo|enfermo|amargado|angustiado|apatico|arrepentido|asustado|aterrado|avergonzado|celoso|cansado|confundido|debil|decaido|decepcionado|deprimido|desanimado|desesperado|enojado|infeliz|herido|inseguro|triste|tenso|molesto|irritado))($|.*)');
PAPA_MATCH = re.match(PAPA,message_)
if PAPA_MATCH:
ev = random.randint(1, 3)
if ev == 1:
ans =ans + "\nmmm que mal que tu papa este asi..."
if ev == 2:
ans =ans + "\nLo siento, que todo mejore para tu papa..."
if ev == 3:
ans =ans + "\nLeer esto de tu papa no me gusta, lo siento por ella..."
STATES['papa'] = True
HERMANO = re.compile('(.*|^)((M|m)i\s(herman(o|a))\s(es|esta)(.*|algo|un poco|mucho|mucho muy|muy)\s(excelente|bien|alegre|animado|apasionado|cariñoso|contento|encantado|euforico|exitado|feliz|satisfecho|orgullos))($|.*)');
HERMANO_MATCH = re.match(HERMANO,message_)
if HERMANO_MATCH:
ev = random.randint(1, 3)
if ev == 1:
ans =ans + "\nOh Que bien que tu hermano este asi..."
if ev == 2:
ans =ans + "\nMe alegra leer esto de tu hermano..."
if ev == 3:
ans =ans + "\nMaravilloso, que todo siga asi para tu hermano..."
STATES['hermano'] = True
HERMANO = re.compile('(.*|^)((M|m)i\s(herman(o|a))\s(es|esta)(.*|algo|un poco|mucho|mucho muy|muy)\s(abrumado|afligido|agotado|malo|enfermo|amargado|angustiado|apatico|arrepentido|asustado|aterrado|avergonzado|celoso|cansado|confundido|debil|decaido|decepcionado|deprimido|desanimado|desesperado|enojado|infeliz|herido|inseguro|triste|tenso|molesto|irritado))($|.*)');
HERMANO_MATCH = re.match(HERMANO,message_)
if HERMANO_MATCH:
ev = random.randint(1, 3)
if ev == 1:
ans =ans + "\nmmm que mal que tu hermano este asi..."
if ev == 2:
ans =ans + "\nLo siento, que todo mejore para tu hermano..."
if ev == 3:
ans =ans + "\nLeer esto de tu hermano no me gusta, lo siento por ella..."
STATES['hermano'] = True
MASCOTA = re.compile('(.*|^)((M|m)i(.*|s)\s(mascota|perro|gato|pajaro|pez|rana|tortuga|iguana)(.*|s)\s(es|esta|estan|son)(.*|algo|un poco|mucho|mucho muy|muy)\s(excelente(.*|s)|bien|alegre(.*|s)|animad(o|a)(.*|s)|apasionad(o|a)(.*|s)|cariños(o|a)(.*|s)|content(o|a)(.*|s)|encantad(o|a)(.*|s)|euforic(o|a)(.*|s)|exitad(o|a)(.*|s)|feliz|felices|satisfech(o|a)(.*|s)|orgullos(o|a)(.*|s)))($|.*)');
MASCOTA_MATCH = re.match(MASCOTA,message_)
if MASCOTA_MATCH:
ev = random.randint(1, 3)
if ev == 1:
ans =ans + "\nOh Que bien que tu mascota este asi..."
if ev == 2:
ans =ans + "\nMe alegra leer esto de tu mascota..."
if ev == 3:
ans =ans + "\nMaravilloso, que todo siga asi para tu mascota..."
STATES['mascota'] = True
MASCOTA = re.compile('(.*|^)((M|m)i(.*|s)\s(mascota|perro|gato|pajaro|pez|rana|tortuga|iguana)(.*|s)\s(es|esta|estan|son)(.*|algo|un poco|mucho|mucho muy|muy)\s(abrumad(o|a)(.*|s)|mal(o|a)|enferm(o|a)|afligid(o|a)(.*|s)|agotad(o|a)(.*|s)|amargad(o|a)(.*|s)|angustiad(o|a)(.*|s)|apatic(o|a)(.*|s)|arrepentid(o|a)(.*|s)|asustad(o|a)(.*|s)|aterrad(o|a)(.*|s)|avergonzad(o|a)(.*|s)|celos(o|a)(.*|s)|cansad(o|a)(.*|s)|confundid(o|a)(.*|s)|debil|debiles|decaid(o|a)(.*|s)|decepcionad(o|a)(.*|s)|deprimid(o|a)(.*|s)|desanimad(o|a)(.*|s)|desesperad(o|a)(.*|s)|enojad(o|a)(.*|s)|infeliz|infelices|herid(o|a)(.*|s)|insegur(o|a)(.*|s)|triste(.*|s)|tens(o|a)(.*|s)|molest(o|a)(.*|s)|irritad(o|a)(.*|s)))($|.*)');
MASCOTA_MATCH = re.match(MASCOTA,message_)
if MASCOTA_MATCH:
ev = random.randint(1, 3)
if ev == 1:
ans =ans + "\nmmm que mal que tu mascota este asi..."
if ev == 2:
ans =ans + "\nLo siento, que todo mejore para tu mascota..."
if ev == 3:
ans =ans + "\nLeer esto de tu mascota no me gusta, lo siento por ella..."
STATES['mascota'] = True
AMIGO = re.compile('(.*|^)((M|m)i(.*|s)\s(mejores amigos|mejor amigo|amig(o|a)(.*|s))\s(es|esta|son|estan)(.*|algo|un poco|mucho|mucho muy|muy)\s(excelente(.*|s)|bien|alegre|alegres|animad(o|a)|animad(o|a)s|apasionad(o|a)|apasionad(o|a)s|cariños(o|a)|cariños(o|a)s|content(o|a)|content(o|a)s|encantad(o|a)|encantad(o|a)s|euforic(o|a)|euforic(o|a)s|exitad(o|a)|exitad(o|a)s|feliz|felices|satisfech(a|o)|satisfech(o|a)s|orgullos(o|a)|orgullos(o|a)s))($|.*)');
AMIGO_MATCH = re.match(AMIGO,message_)
if AMIGO_MATCH:
ev = random.randint(1, 3)
if ev == 1:
ans =ans + "\nOh Que bien que tu amigo este asi..."
if ev == 2:
ans =ans + "\nMe alegra leer esto de tu amigo..."
if ev == 3:
ans =ans + "\nMaravilloso, que todo siga asi para tu amigo..."
STATES['amigo'] = True
AMIGO = re.compile('(.*|^)((M|m)i(.*|s)\s(mejores amigos|mejor amigo|amig(o|a)(.*|s))\s(es|esta|son|estan)(.*|algo|un poco|mucho|mucho muy|muy)\s(abrumad(o|a)(.*|s)|afligid(o|a)(.*|s)|agotad(o|a)(.*|s)|amargad(o|a)(.*|s)|angustiad(o|a)(.*|s)|apatic(o|a)(.*|s)|arrepentid(o|a)(.*|s)|asustad(o|a)(.*|s)|aterrad(o|a)(.*|s)|avergonzad(o|a)(.*|s)|celos(o|a)(.*|s)|cansad(o|a)(.*|s)|confundid(o|a)(.*|s)|debil|debiles|decaid(o|a)(.*|s)|decepcionad(o|a)(.*|s)|deprimid(o|a)(.*|s)|desanimad(o|a)(.*|s)|desesperad(o|a)(.*|s)|enojad(o|a)(.*|s)|infeliz|infelices|herid(o|a)(.*|s)|insegur(o|a)(.*|s)|triste(.*|s)|tens(o|a)(.*|s)|molest(o|a)(.*|s)|irritad(o|a)(.*|s)))($|.*)');
AMIGO_MATCH = re.match(AMIGO,message_)
if AMIGO_MATCH:
ev = random.randint(1, 3)
if ev == 1:
ans =ans + "\nmmm que mal que tu amigo este asi..."
if ev == 2:
ans =ans + "\nLo siento, que todo mejore para tu amigo..."
if ev == 3:
ans =ans + "\nLeer esto de tu amigo no me gusta, lo siento por ella..."
STATES['amigo'] = True
TRABAJO = re.compile('(.*|^)(((M|m)i|(E|e)l|(E|e)n\sel)\s(negocio|empleo|trabajo)\s(es|esta|son|estuvo))($|.*)');
TRABAJO_MATCH = re.match(TRABAJO,message_)
if TRABAJO_MATCH:
STATES['trabajo'] = True
ESCUELA = re.compile('(.*|^)(((M|m)i|(E|e)l|(E|e)n\sel)\s(escuela|universidad|prepa|preparatoria|secu|secundaria|primaria)\s(es|esta|son|estuvo))($|.*)');
ESCUELA_MATCH = re.match(ESCUELA,message_)
if ESCUELA_MATCH:
STATES['escuela'] = True
#Conversaciones del Bot
if vez > 0:
ev = random.randint(1, 9)
if ev == 1:
ans = ans + "\n ¿cómo te fue hoy?..."
if ev == 2:
ans = ans + "\n ¿cómo estuvo tu dia de hoy?..."
if ev == 3:
ans = ans + "\n ¿que hiciste el dia de hoy?..."
if ev == 4:
ans = ans + "\n ¿algo interesante que hicieras hoy?..."
if ev == 5:
ans = ans + "\n ¿que te paso el dia de hoy?..."
if ev == 6:
ans = ans + "\n ¿Que mas hiciste en tu dia?..."
if ev == 7:
ans = ans + "\n Y ¿Que mas?..."
if ev == 8:
ans = ans + "\n ¿Algo mas que quieras platicarme?..."
if ev == 9:
ans = ans + "\n ¿Que mas hiciste?..."
if vez == 0:
ev = random.randint(1, 4)
if ev == 1:
ans = ans + "\n ¿cómo estas hoy?..."
if ev == 2:
ans = ans + "\n ¿cómo te sientes el dia de hoy?..."
if ev == 3:
ans = ans + "\n ¿cómo te sentiste hoy?..."
if ev == 4:
ans = ans + "\n ¿que te paso el dia de hoy?..."
# REVISAR ESTADO
if STATES[username]['mama']== 'False':
print(vez)
if vez > 2:
ans=ans + "\n Cuéntame, ¿Como esta tu mamá?"
elif STATES['papa']== 'False':
if vez > 0:
ans=ans + "\n¿Cómo esta tu papá?"
elif STATES['hermano']== 'False':
if vez > 0:
ans=ans + "\n¿Que tal tu hermano?"
elif STATES['mascota']== 'False':
if vez > 0:
ans=ans + "\n¿Que tal tu mascota?"
elif STATES['amigo']== 'False':
if vez > 0:
ans=ans + "\n¿Como esta tu mejor amigo?"
elif STATES['escuela']== 'False':
if vez > 0:
ans=ans + "\n¿Como vas en la escuela?"
elif STATES['trabajo']== 'False':
if vez > 0:
ans=ans + "\n¿Como estubo el trabajo?"
ADIOS = re.compile('(H|h)asta\sluego|HASTA\sLUEGO|(A|a)di(o|ó)s|ADI(O|Ó)S|(N|n)os\svemos|NOS\sVEMOS|(C|c)hao|CHAO|(B|b)ye|BYE($)');
ADIOS_MATCH = re.match(ADIOS,message_)
if ADIOS_MATCH:
vez = 100
ev = random.randint(1, 4)
if ev == 1:
ans ="\n Adios, fue un gusto platicar contigo."
if ev == 2:
ans ="\n Adios, Me encanta platicar contigo."
if ev == 3:
ans ="\n Adios, Te deseo suerte y que tus dias sean mejores."
if ev == 4:
ans ="\n Adios, Espero que vuelvas a platicar conmigo.... ;)"
vez = vez + 1
STATES['vez'] = vez
return ans
def sinAcentos(Mensaje):
cadena= ''.join((c for c in unicodedata.normalize('NFD',unicode(Mensaje)) if unicodedata.category(c) != 'Mn'))
return cadena.decode().lower()
| [
"#!/usr/bin/env python\r\n",
"# -*- coding: utf-8 -*-\r\n",
"\r\n",
"\r\n",
"import os\r\n",
"import re \r\n",
"import random\r\n",
"import sys\r\n",
"import unicodedata\r\n",
"import my_conexion\r\n",
"sys.stdout.encoding \r\n",
"'UTF-8'\r\n",
"\r\n",
"\r\n",
"# PROCESAR MENSAJE DE USUARIO\r\n",
"\r\n",
"def generarRespuesta(mensaje, STATES, username):\r\n",
"\r\n",
" message_=sinAcentos(mensaje)\r\n",
" \r\n",
" ans=''\r\n",
" vez = STATES['vez']\r\n",
" print(message_)\r\n",
"\r\n",
" SALUDO = re.compile('^(h|H)ola|HOLA|(Q|q)u(e|é)\\stal|QU(É|E)\\sTAL|((B|b)uenas)|BUENAS|(Q|q)u(e|é)\\sonda|QU(E|É)\\sONDA|(H|h)ello|HELLO|(H|h)i|HI|(Q|q)iubo|QUIUBO|(S|s)aludos|SALUDOS|(B|b)uenos\\sd(i|í)as|BUENOS\\sD(I|Í)AS($|.*)');\r\n",
" SALUDO_MATCH = re.match(SALUDO,message_)\r\n",
" if SALUDO_MATCH:\r\n",
"\tev = random.randint(1, 3)\r\n",
"\tif ev == 1:\r\n",
" \tans =ans + \"\\nHola, soy DiarioBot, me encantaria platicar contigo...\"\r\n",
"\tif ev == 2:\r\n",
" \tans =ans + \"\\nHola, yo soy tu diario personal...\"\r\n",
"\tif ev == 3:\r\n",
" \tans =ans + \"\\nHola, vamos a platicar...\"\r\n",
"\r\n",
"\r\n",
"\r\n",
" SENTIMIENTO = re.compile('(.*|^)((E|e)stoy|(M|m)e\\s(fue|(S|s)iento)|puse|sent(i|í))(.*|(M|m)uy)\\s((F|f)el(i\\í)z|(C|c)ontent(o|a)|(A|a)legre|(B|b)ien)($|.*)');\r\n",
" SENTIMIENTO_MATCH = re.match(SENTIMIENTO,message_)\r\n",
" if SENTIMIENTO_MATCH:\r\n",
"\tev = random.randint(1, 3)\r\n",
"\tif ev == 1:\r\n",
" \tans =ans + \"\\nMe alegra que te sientas así, espero todos los días te sientas de esa manera, ¿Que más pasó?...\"\r\n",
"\tif ev == 2:\r\n",
" \tans =ans + \"\\nQue bueno que estes así, ¿Que mas pasó?...\"\r\n",
"\tif ev == 3:\r\n",
" \tans =ans + \"\\nMuy bien, que siempre sea así, ¿Que mas pasó?...\"\r\n",
" \r\n",
" SENTIMIENTO_1 = re.compile('(.*|^)((E|e)stoy|(M|m)e\\s(fue|(S|s)iento)|puse|sent(i|í))(.*|(M|m)uy)\\s((T|t)riste|(M|m)al|(I|i)nfel(i|í)z|(D|d)ecaid(o|a))($|.*)');\r\n",
" SENTIMIENTO_MATCH = re.match(SENTIMIENTO_1,message_)\r\n",
" if SENTIMIENTO_MATCH:\r\n",
"\tev = random.randint(1, 3)\r\n",
"\tif ev == 1:\r\n",
" \tans =ans + \"\\nNo me gusta que te sientas asi animo, Cuenta me más...\"\r\n",
"\tif ev == 2:\r\n",
" \tans =ans + \"\\nQue mal en serio, Cuenta me más...\"\r\n",
"\tif ev == 3:\r\n",
" \tans =ans + \"\\nNo te preocupes, se que estaras mejor mañana, Cuenta me más...\"\r\n",
" \r\n",
" NO_MAMA = re.compile('(.*|^)(fallecio|murio|no\\stengo)(|\\smi)\\smam(a|á)($|.*)');\r\n",
" NO_MATCH = re.match(NO_MAMA,message_)\r\n",
" if NO_MATCH:\r\n",
" my_conexion.cambiar_estado('mama',username)\r\n",
"\tev = random.randint(1, 2)\r\n",
"\tif ev == 1:\r\n",
" STATES['mama'] = True\r\n",
" ans =ans + \"\\nLo siento mucho, se que es algo demaciado doloroso, ¿Quieres seguir platicando?...\"\r\n",
"\tif ev == 2:\r\n",
" STATES['mama'] = True\r\n",
" ans =ans + \"\\nQue fuerte, lo siento ¿Quieres seguir platicando?...\"\r\n",
"\r\n",
" NO_MAMA = re.compile('(.*|^)mi\\smam(a|á)\\s(fallecio|murio)($|.*)');\r\n",
" NO_MATCH = re.match(NO_MAMA,message_)\r\n",
" if NO_MATCH:\r\n",
" my_conexion.cambiar_estado('mama',username)\r\n",
"\tev = random.randint(1, 2)\r\n",
"\tif ev == 1:\r\n",
" STATES['mama'] = True\r\n",
" ans =ans + \"\\nLo siento mucho, se que es algo demaciado doloroso, ¿Quieres seguir platicando?...\"\r\n",
"\tif ev == 2:\r\n",
" STATES['mama'] = True\r\n",
" ans =ans + \"\\nQue fuerte, lo siento ¿Quieres seguir platicando?...\"\r\n",
"\r\n",
" NO_PAPA = re.compile('(.*|^)(falleci(o|ó)|muri(o|ó)|no\\stengo)(|\\smi)\\spap(a|á)($|.*)');\r\n",
" NO_MATCH = re.match(NO_PAPA,message_)\r\n",
" if NO_MATCH:\r\n",
" my_conexion.cambiar_estado('papa',username)\r\n",
"\tev = random.randint(1, 2)\r\n",
"\tif ev == 1:\r\n",
" STATES['papa'] = True\r\n",
" ans =ans + \"\\nLo siento mucho, se que es algo demaciado doloroso, ¿Quieres seguir platicando?...\"\r\n",
"\tif ev == 2:\r\n",
" STATES['papa'] = True\r\n",
" ans =ans + \"\\nQue fuerte, lo siento ¿Quieres seguir platicando?...\"\r\n",
"\r\n",
" NO_PAPA = re.compile('(.*|^)mi\\spap(a|á)\\s(fallecio|murio)($|.*)');\r\n",
" NO_MATCH = re.match(NO_PAPA,message_)\r\n",
" if NO_MATCH:\r\n",
" my_conexion.cambiar_estado('papa',username)\r\n",
"\tev = random.randint(1, 2)\r\n",
"\tif ev == 1:\r\n",
" STATES['papa'] = True\r\n",
" ans =ans + \"\\nLo siento mucho, se que es algo demaciado doloroso, ¿Quieres seguir platicando?...\"\r\n",
"\tif ev == 2:\r\n",
" STATES['papa'] = True\r\n",
" ans =ans + \"\\nQue fuerte, lo siento ¿Quieres seguir platicando?...\"\r\n",
" \r\n",
" NO_HERMA = re.compile('(.*|^)(fallecio|murio|no\\stengo)(|\\smi)\\sherman(o|a)($|.*)');\r\n",
" NO_MATCH = re.match(NO_PAPA,message_)\r\n",
" if NO_MATCH: \r\n",
" my_conexion.cambiar_estado('hermano',username)\r\n",
"\tev = random.randint(1, 2)\r\n",
"\tif ev == 1:\r\n",
" STATES['hermano'] = True\r\n",
" ans =ans + \"\\nLo siento mucho, se que es algo demaciado doloroso, ¿Quieres seguir platicando?...\"\r\n",
"\tif ev == 2:\r\n",
" STATES['hermano'] = True\r\n",
" ans =ans + \"\\nQue fuerte, lo siento ¿Quieres seguir platicando?...\"\r\n",
"\r\n",
" NO_HERMA = re.compile('(.*|^)mi\\sherman(o|a)\\s(fallecio|murio)($|.*)');\r\n",
" NO_MATCH = re.match(NO_PAPA,message_)\r\n",
" if NO_MATCH:\r\n",
" my_conexion.cambiar_estado('hermano',username)\r\n",
"\tev = random.randint(1, 2)\r\n",
"\tif ev == 1:\r\n",
" STATES['hermano'] = True\r\n",
" ans =ans + \"\\nLo siento mucho, se que es algo demaciado doloroso, ¿Quieres seguir platicando?...\"\r\n",
"\tif ev == 2:\r\n",
" STATES['hermano'] = True\r\n",
" ans =ans + \"\\nQue fuerte, lo siento ¿Quieres seguir platicando?...\"\r\n",
"\r\n",
" NO_MAS = re.compile('(.*|^)(fallecio|murio|no\\stengo)(|\\smi)\\s(mascota|perro|gato|pajaro|pez|rana|tortuga|iguana)($|.*)');\r\n",
" NO_MATCH = re.match(NO_MAS,message_)\r\n",
" if NO_MATCH:\r\n",
" my_conexion.cambiar_estado('mascota',username)\r\n",
"\tev = random.randint(1, 2)\r\n",
"\tif ev == 1:\r\n",
" STATES['mascota'] = True\r\n",
" ans =ans + \"\\nLo siento mucho, se que es algo demaciado doloroso, ¿Quieres seguir platicando?...\"\r\n",
"\tif ev == 2:\r\n",
" STATES['mascota'] = True\r\n",
" ans =ans + \"\\nQue fuerte, lo siento ¿Quieres seguir platicando?...\"\r\n",
"\r\n",
" NO_MAS = re.compile('(.*|^)mi\\s(mascota|perro|gato|pajaro|pez|rana|tortuga|iguana)\\s(fallecio|murio)($|.*)');\r\n",
" NO_MATCH = re.match(NO_MAS,message_)\r\n",
" if NO_MATCH:\r\n",
" my_conexion.cambiar_estado('mascota',username)\r\n",
"\tev = random.randint(1, 2)\r\n",
"\tif ev == 1:\r\n",
" STATES['mascota'] = True\r\n",
" ans =ans + \"\\nLo siento mucho, se que es algo demaciado doloroso, ¿Quieres seguir platicando?...\"\r\n",
"\tif ev == 2:\r\n",
" STATES['mascota'] = True\r\n",
" ans =ans + \"\\nQue fuerte, lo siento ¿Quieres seguir platicando?...\"\r\n",
"\r\n",
" NO_AMI = re.compile('(.*|^)(fallecio|murio|no\\stengo)(|\\smi)\\smejor amig(o|a)($|.*)');\r\n",
" NO_MATCH = re.match(NO_AMI,message_)\r\n",
" if NO_MATCH:\r\n",
" my_conexion.cambiar_estado('amigo',username)\r\n",
"\tev = random.randint(1, 2)\r\n",
"\tif ev == 1:\r\n",
" STATES['amigo'] = True\r\n",
" ans =ans + \"\\nLo siento mucho, se que es algo demaciado doloroso, ¿Quieres seguir platicando?...\"\r\n",
"\tif ev == 2:\r\n",
" STATES['amigo'] = True\r\n",
" ans =ans + \"\\nQue fuerte, lo siento ¿Quieres seguir platicando?...\"\r\n",
"\r\n",
" NO_AMI = re.compile('(.*|^)mi\\smejor amig(o|a)\\s(fallecio|murio)($|.*)');\r\n",
" NO_MATCH = re.match(NO_AMI,message_)\r\n",
" if NO_MATCH:\r\n",
" my_conexion.cambiar_estado('amigo',username)\r\n",
"\tev = random.randint(1, 2)\r\n",
"\tif ev == 1:\r\n",
" STATES['amigo'] = True\r\n",
" ans =ans + \"\\nLo siento mucho, se que es algo demaciado doloroso, ¿Quieres seguir platicando?...\"\r\n",
"\tif ev == 2:\r\n",
" STATES['amigo'] = True\r\n",
" ans =ans + \"\\nQue fuerte, lo siento ¿Quieres seguir platicando?...\"\r\n",
"\r\n",
" NO_TRA = re.compile('(.*|^)((no\\strabajo)|(no\\stengo\\strabajo))($|.*)');\r\n",
" NO_MATCH = re.match(NO_TRA,message_)\r\n",
" if NO_MATCH:\r\n",
" my_conexion.cambiar_estado('trabajo',username)\r\n",
" STATES['trabajo'] = True\r\n",
"\r\n",
" NO_ESC = re.compile('(.*|^)no\\sestudio($|.*)');\r\n",
" NO_MATCH = re.match(NO_ESC,message_)\r\n",
" if NO_MATCH:\r\n",
" my_conexion.cambiar_estado('escuela',username)\r\n",
" STATES['escuela'] = True\r\n",
"\t\r\n",
" MAMA = re.compile('(.*|^)((M|m)i\\s(mama|mamá|madre)\\s(es|esta)(.*|algo|un poco|mucho|mucho muy|muy)\\s(excelente|bien|alegre|animada|apasionada|cariñosa|contenta|encantada|euforica|exitada|feliz|satisfecha|orgullosa))($|.*)');\r\n",
" MAMA_MATCH = re.match(MAMA,message_)\r\n",
" if MAMA_MATCH:\r\n",
"\tev = random.randint(1, 3)\r\n",
"\tif ev == 1:\r\n",
" \tans =ans + \"\\nOh Que bien que tu mamá este asi...\"\r\n",
"\tif ev == 2:\r\n",
" \tans =ans + \"\\nMe alegra leer esto de tu mamá...\"\r\n",
"\tif ev == 3:\r\n",
" \tans =ans + \"\\nMaravilloso, que todo siga asi para tu mamá...\"\r\n",
"\tSTATES['mama'] = True\r\n",
"\r\n",
" MAMA = re.compile('(.*|^)((M|m)i\\s(mama|mamá|madre)\\s(es|esta)(.*|algo|un poco|mucho|mucho muy|muy)\\s(abrumada|mal|mala|enferma|afligida|agotada|amargada|angustiada|apatica|arrepentida|asustada|aterrada|avergonzada|celosa|cansada|confundida|debil|decaida|decepcionada|deprimida|desanimada|desesperada|enojada|infeliz|herida|insegura|triste|tensa|molesta|irritada))($|.*)');\r\n",
" MAMA_MATCH = re.match(MAMA,message_)\r\n",
" if MAMA_MATCH:\r\n",
"\tev = random.randint(1, 3)\r\n",
"\tif ev == 1:\r\n",
" \tans =ans + \"\\nmmm que mal que tu mama este asi...\"\r\n",
"\tif ev == 2:\r\n",
" \tans =ans + \"\\nLo siento, que todo mejore para tu mamá...\"\r\n",
"\tif ev == 3:\r\n",
" \tans =ans + \"\\nLeer esto de tu mama no me gusta, lo siento por ella...\"\r\n",
"\tSTATES['mama'] = True\r\n",
"\r\n",
" PAPA = re.compile('(.*|^)((M|m)i\\s(papa|papá|padre)\\s(es|esta)(.*|algo|un poco|mucho|mucho muy|muy)\\s(excelente|bien|alegre|animado|apasionado|cariñoso|contento|encantado|euforico|exitado|feliz|satisfecho|orgullos))($|.*)');\r\n",
" PAPA_MATCH = re.match(PAPA,message_)\r\n",
" if PAPA_MATCH:\r\n",
"\tev = random.randint(1, 3)\r\n",
"\tif ev == 1:\r\n",
" \tans =ans + \"\\nOh Que bien que tu papá este así...\"\r\n",
"\tif ev == 2:\r\n",
" \tans =ans + \"\\nMe alegra leer esto de tu papá...\"\r\n",
"\tif ev == 3:\r\n",
" \tans =ans + \"\\nMaravilloso, que todo siga asi para tu padre...\"\r\n",
"\tSTATES['papa'] = True\r\n",
"\r\n",
" PAPA = re.compile('(.*|^)((M|m)i\\s(papa|papá|padre)\\s(es|esta)(.*|algo|un poco|mucho|mucho muy|muy)\\s(abrumado|mal|afligido|agotado|malo|enfermo|amargado|angustiado|apatico|arrepentido|asustado|aterrado|avergonzado|celoso|cansado|confundido|debil|decaido|decepcionado|deprimido|desanimado|desesperado|enojado|infeliz|herido|inseguro|triste|tenso|molesto|irritado))($|.*)');\r\n",
" PAPA_MATCH = re.match(PAPA,message_)\r\n",
" if PAPA_MATCH:\r\n",
"\tev = random.randint(1, 3)\r\n",
"\tif ev == 1:\r\n",
" \tans =ans + \"\\nmmm que mal que tu papa este asi...\"\r\n",
"\tif ev == 2:\r\n",
" \tans =ans + \"\\nLo siento, que todo mejore para tu papa...\"\r\n",
"\tif ev == 3:\r\n",
" \tans =ans + \"\\nLeer esto de tu papa no me gusta, lo siento por ella...\"\r\n",
"\tSTATES['papa'] = True\r\n",
"\r\n",
" HERMANO = re.compile('(.*|^)((M|m)i\\s(herman(o|a))\\s(es|esta)(.*|algo|un poco|mucho|mucho muy|muy)\\s(excelente|bien|alegre|animado|apasionado|cariñoso|contento|encantado|euforico|exitado|feliz|satisfecho|orgullos))($|.*)');\r\n",
" HERMANO_MATCH = re.match(HERMANO,message_)\r\n",
" if HERMANO_MATCH:\r\n",
"\tev = random.randint(1, 3)\r\n",
"\tif ev == 1:\r\n",
" \tans =ans + \"\\nOh Que bien que tu hermano este asi...\"\r\n",
"\tif ev == 2:\r\n",
" \tans =ans + \"\\nMe alegra leer esto de tu hermano...\"\r\n",
"\tif ev == 3:\r\n",
" \tans =ans + \"\\nMaravilloso, que todo siga asi para tu hermano...\"\r\n",
"\tSTATES['hermano'] = True\r\n",
"\r\n",
" HERMANO = re.compile('(.*|^)((M|m)i\\s(herman(o|a))\\s(es|esta)(.*|algo|un poco|mucho|mucho muy|muy)\\s(abrumado|afligido|agotado|malo|enfermo|amargado|angustiado|apatico|arrepentido|asustado|aterrado|avergonzado|celoso|cansado|confundido|debil|decaido|decepcionado|deprimido|desanimado|desesperado|enojado|infeliz|herido|inseguro|triste|tenso|molesto|irritado))($|.*)');\r\n",
" HERMANO_MATCH = re.match(HERMANO,message_)\r\n",
" if HERMANO_MATCH:\r\n",
"\tev = random.randint(1, 3)\r\n",
"\tif ev == 1:\r\n",
" \tans =ans + \"\\nmmm que mal que tu hermano este asi...\"\r\n",
"\tif ev == 2:\r\n",
" \tans =ans + \"\\nLo siento, que todo mejore para tu hermano...\"\r\n",
"\tif ev == 3:\r\n",
" \tans =ans + \"\\nLeer esto de tu hermano no me gusta, lo siento por ella...\"\r\n",
"\tSTATES['hermano'] = True\r\n",
"\r\n",
" MASCOTA = re.compile('(.*|^)((M|m)i(.*|s)\\s(mascota|perro|gato|pajaro|pez|rana|tortuga|iguana)(.*|s)\\s(es|esta|estan|son)(.*|algo|un poco|mucho|mucho muy|muy)\\s(excelente(.*|s)|bien|alegre(.*|s)|animad(o|a)(.*|s)|apasionad(o|a)(.*|s)|cariños(o|a)(.*|s)|content(o|a)(.*|s)|encantad(o|a)(.*|s)|euforic(o|a)(.*|s)|exitad(o|a)(.*|s)|feliz|felices|satisfech(o|a)(.*|s)|orgullos(o|a)(.*|s)))($|.*)');\r\n",
" MASCOTA_MATCH = re.match(MASCOTA,message_)\r\n",
" if MASCOTA_MATCH:\r\n",
"\tev = random.randint(1, 3)\r\n",
"\tif ev == 1:\r\n",
" \tans =ans + \"\\nOh Que bien que tu mascota este asi...\"\r\n",
"\tif ev == 2:\r\n",
" \tans =ans + \"\\nMe alegra leer esto de tu mascota...\"\r\n",
"\tif ev == 3:\r\n",
" \tans =ans + \"\\nMaravilloso, que todo siga asi para tu mascota...\"\r\n",
"\tSTATES['mascota'] = True\r\n",
"\r\n",
" MASCOTA = re.compile('(.*|^)((M|m)i(.*|s)\\s(mascota|perro|gato|pajaro|pez|rana|tortuga|iguana)(.*|s)\\s(es|esta|estan|son)(.*|algo|un poco|mucho|mucho muy|muy)\\s(abrumad(o|a)(.*|s)|mal(o|a)|enferm(o|a)|afligid(o|a)(.*|s)|agotad(o|a)(.*|s)|amargad(o|a)(.*|s)|angustiad(o|a)(.*|s)|apatic(o|a)(.*|s)|arrepentid(o|a)(.*|s)|asustad(o|a)(.*|s)|aterrad(o|a)(.*|s)|avergonzad(o|a)(.*|s)|celos(o|a)(.*|s)|cansad(o|a)(.*|s)|confundid(o|a)(.*|s)|debil|debiles|decaid(o|a)(.*|s)|decepcionad(o|a)(.*|s)|deprimid(o|a)(.*|s)|desanimad(o|a)(.*|s)|desesperad(o|a)(.*|s)|enojad(o|a)(.*|s)|infeliz|infelices|herid(o|a)(.*|s)|insegur(o|a)(.*|s)|triste(.*|s)|tens(o|a)(.*|s)|molest(o|a)(.*|s)|irritad(o|a)(.*|s)))($|.*)');\r\n",
" MASCOTA_MATCH = re.match(MASCOTA,message_)\r\n",
" if MASCOTA_MATCH:\r\n",
"\tev = random.randint(1, 3)\r\n",
"\tif ev == 1:\r\n",
" \tans =ans + \"\\nmmm que mal que tu mascota este asi...\"\r\n",
"\tif ev == 2:\r\n",
" \tans =ans + \"\\nLo siento, que todo mejore para tu mascota...\"\r\n",
"\tif ev == 3:\r\n",
" \tans =ans + \"\\nLeer esto de tu mascota no me gusta, lo siento por ella...\"\r\n",
"\tSTATES['mascota'] = True\r\n",
"\r\n",
" AMIGO = re.compile('(.*|^)((M|m)i(.*|s)\\s(mejores amigos|mejor amigo|amig(o|a)(.*|s))\\s(es|esta|son|estan)(.*|algo|un poco|mucho|mucho muy|muy)\\s(excelente(.*|s)|bien|alegre|alegres|animad(o|a)|animad(o|a)s|apasionad(o|a)|apasionad(o|a)s|cariños(o|a)|cariños(o|a)s|content(o|a)|content(o|a)s|encantad(o|a)|encantad(o|a)s|euforic(o|a)|euforic(o|a)s|exitad(o|a)|exitad(o|a)s|feliz|felices|satisfech(a|o)|satisfech(o|a)s|orgullos(o|a)|orgullos(o|a)s))($|.*)');\r\n",
" AMIGO_MATCH = re.match(AMIGO,message_)\r\n",
" if AMIGO_MATCH:\r\n",
"\tev = random.randint(1, 3)\r\n",
"\tif ev == 1:\r\n",
" \tans =ans + \"\\nOh Que bien que tu amigo este asi...\"\r\n",
"\tif ev == 2:\r\n",
" \tans =ans + \"\\nMe alegra leer esto de tu amigo...\"\r\n",
"\tif ev == 3:\r\n",
" \tans =ans + \"\\nMaravilloso, que todo siga asi para tu amigo...\"\r\n",
"\tSTATES['amigo'] = True\r\n",
"\r\n",
" AMIGO = re.compile('(.*|^)((M|m)i(.*|s)\\s(mejores amigos|mejor amigo|amig(o|a)(.*|s))\\s(es|esta|son|estan)(.*|algo|un poco|mucho|mucho muy|muy)\\s(abrumad(o|a)(.*|s)|afligid(o|a)(.*|s)|agotad(o|a)(.*|s)|amargad(o|a)(.*|s)|angustiad(o|a)(.*|s)|apatic(o|a)(.*|s)|arrepentid(o|a)(.*|s)|asustad(o|a)(.*|s)|aterrad(o|a)(.*|s)|avergonzad(o|a)(.*|s)|celos(o|a)(.*|s)|cansad(o|a)(.*|s)|confundid(o|a)(.*|s)|debil|debiles|decaid(o|a)(.*|s)|decepcionad(o|a)(.*|s)|deprimid(o|a)(.*|s)|desanimad(o|a)(.*|s)|desesperad(o|a)(.*|s)|enojad(o|a)(.*|s)|infeliz|infelices|herid(o|a)(.*|s)|insegur(o|a)(.*|s)|triste(.*|s)|tens(o|a)(.*|s)|molest(o|a)(.*|s)|irritad(o|a)(.*|s)))($|.*)');\r\n",
" AMIGO_MATCH = re.match(AMIGO,message_)\r\n",
" if AMIGO_MATCH:\r\n",
"\tev = random.randint(1, 3)\r\n",
"\tif ev == 1:\r\n",
" \tans =ans + \"\\nmmm que mal que tu amigo este asi...\"\r\n",
"\tif ev == 2:\r\n",
" \tans =ans + \"\\nLo siento, que todo mejore para tu amigo...\"\r\n",
"\tif ev == 3:\r\n",
" \tans =ans + \"\\nLeer esto de tu amigo no me gusta, lo siento por ella...\"\r\n",
"\tSTATES['amigo'] = True\r\n",
"\r\n",
" TRABAJO = re.compile('(.*|^)(((M|m)i|(E|e)l|(E|e)n\\sel)\\s(negocio|empleo|trabajo)\\s(es|esta|son|estuvo))($|.*)');\r\n",
" TRABAJO_MATCH = re.match(TRABAJO,message_)\r\n",
" if TRABAJO_MATCH:\r\n",
"\tSTATES['trabajo'] = True\r\n",
"\r\n",
" ESCUELA = re.compile('(.*|^)(((M|m)i|(E|e)l|(E|e)n\\sel)\\s(escuela|universidad|prepa|preparatoria|secu|secundaria|primaria)\\s(es|esta|son|estuvo))($|.*)');\r\n",
" ESCUELA_MATCH = re.match(ESCUELA,message_)\r\n",
" if ESCUELA_MATCH:\r\n",
"\tSTATES['escuela'] = True\r\n",
"\r\n",
"#Conversaciones del Bot\r\n",
"\r\n",
" if vez > 0:\r\n",
" ev = random.randint(1, 9)\r\n",
" if ev == 1:\r\n",
" ans = ans + \"\\n ¿cómo te fue hoy?...\"\r\n",
" if ev == 2:\r\n",
" ans = ans + \"\\n ¿cómo estuvo tu dia de hoy?...\"\r\n",
" if ev == 3:\r\n",
" ans = ans + \"\\n ¿que hiciste el dia de hoy?...\"\r\n",
" if ev == 4:\r\n",
" ans = ans + \"\\n ¿algo interesante que hicieras hoy?...\"\r\n",
" if ev == 5:\r\n",
" ans = ans + \"\\n ¿que te paso el dia de hoy?...\"\r\n",
" if ev == 6:\r\n",
" ans = ans + \"\\n ¿Que mas hiciste en tu dia?...\"\r\n",
" if ev == 7:\r\n",
" ans = ans + \"\\n Y ¿Que mas?...\"\r\n",
" if ev == 8:\r\n",
" ans = ans + \"\\n ¿Algo mas que quieras platicarme?...\"\r\n",
" if ev == 9:\r\n",
" ans = ans + \"\\n ¿Que mas hiciste?...\"\r\n",
"\r\n",
" if vez == 0:\r\n",
" ev = random.randint(1, 4)\r\n",
" if ev == 1:\r\n",
" ans = ans + \"\\n ¿cómo estas hoy?...\"\r\n",
" if ev == 2:\r\n",
" ans = ans + \"\\n ¿cómo te sientes el dia de hoy?...\"\r\n",
" if ev == 3:\r\n",
" ans = ans + \"\\n ¿cómo te sentiste hoy?...\"\r\n",
" if ev == 4:\r\n",
" ans = ans + \"\\n ¿que te paso el dia de hoy?...\"\r\n",
"\t\r\n",
" # REVISAR ESTADO\r\n",
" if STATES[username]['mama']== 'False':\r\n",
" print(vez)\r\n",
" if vez > 2:\r\n",
" ans=ans + \"\\n Cuéntame, ¿Como esta tu mamá?\"\r\n",
" elif STATES['papa']== 'False':\r\n",
" if vez > 0:\r\n",
" ans=ans + \"\\n¿Cómo esta tu papá?\"\r\n",
" elif STATES['hermano']== 'False':\r\n",
" if vez > 0:\r\n",
" ans=ans + \"\\n¿Que tal tu hermano?\"\r\n",
" elif STATES['mascota']== 'False':\r\n",
" if vez > 0:\r\n",
" ans=ans + \"\\n¿Que tal tu mascota?\"\r\n",
" elif STATES['amigo']== 'False':\r\n",
" if vez > 0:\r\n",
" ans=ans + \"\\n¿Como esta tu mejor amigo?\"\r\n",
" elif STATES['escuela']== 'False':\r\n",
" if vez > 0:\r\n",
" ans=ans + \"\\n¿Como vas en la escuela?\"\r\n",
" elif STATES['trabajo']== 'False':\r\n",
" if vez > 0:\r\n",
" ans=ans + \"\\n¿Como estubo el trabajo?\"\r\n",
"\r\n",
"\r\n",
"\r\n",
"\r\n",
" ADIOS = re.compile('(H|h)asta\\sluego|HASTA\\sLUEGO|(A|a)di(o|ó)s|ADI(O|Ó)S|(N|n)os\\svemos|NOS\\sVEMOS|(C|c)hao|CHAO|(B|b)ye|BYE($)');\r\n",
" ADIOS_MATCH = re.match(ADIOS,message_)\r\n",
" if ADIOS_MATCH:\r\n",
" vez = 100\r\n",
"\tev = random.randint(1, 4)\r\n",
"\tif ev == 1:\r\n",
" \tans =\"\\n Adios, fue un gusto platicar contigo.\"\r\n",
"\tif ev == 2:\r\n",
" \tans =\"\\n Adios, Me encanta platicar contigo.\"\r\n",
" if ev == 3:\r\n",
" \tans =\"\\n Adios, Te deseo suerte y que tus dias sean mejores.\"\r\n",
" if ev == 4:\r\n",
" \tans =\"\\n Adios, Espero que vuelvas a platicar conmigo.... ;)\"\r\n",
"\r\n",
" \r\n",
" vez = vez + 1\r\n",
" STATES['vez'] = vez\r\n",
" \r\n",
" return ans\r\n",
"\r\n",
"def sinAcentos(Mensaje):\r\n",
" cadena= ''.join((c for c in unicodedata.normalize('NFD',unicode(Mensaje)) if unicodedata.category(c) != 'Mn'))\r\n",
" return cadena.decode().lower()\r\n"
] | [
0,
0,
0,
0,
0,
0.08333333333333333,
0,
0,
0,
0,
0.045454545454545456,
0,
0,
0,
0,
0,
0,
0,
0.029411764705882353,
0.2,
0.08333333333333333,
0,
0,
0,
0.034334763948497854,
0.021739130434782608,
0,
0.07142857142857142,
0.07142857142857142,
0.05,
0.14285714285714285,
0.06666666666666667,
0.14285714285714285,
0.0784313725490196,
0,
0,
0,
0.036585365853658534,
0.017857142857142856,
0,
0.07142857142857142,
0.07142857142857142,
0.04132231404958678,
0.14285714285714285,
0.058823529411764705,
0.14285714285714285,
0.05405405405405406,
0.1,
0.024096385542168676,
0.017241379310344827,
0,
0.07142857142857142,
0.07142857142857142,
0.05,
0.14285714285714285,
0.06666666666666667,
0.14285714285714285,
0.056818181818181816,
0.16666666666666666,
0.05813953488372093,
0.023255813953488372,
0,
0.018867924528301886,
0.07142857142857142,
0.07142857142857142,
0.02857142857142857,
0.018018018018018018,
0.14285714285714285,
0.02857142857142857,
0.012345679012345678,
0,
0.0410958904109589,
0.023255813953488372,
0,
0.018867924528301886,
0.07142857142857142,
0.07142857142857142,
0.02857142857142857,
0.018018018018018018,
0.14285714285714285,
0.02857142857142857,
0.012345679012345678,
0,
0.05319148936170213,
0.023255813953488372,
0,
0.018867924528301886,
0.07142857142857142,
0.07142857142857142,
0.02857142857142857,
0.018018018018018018,
0.14285714285714285,
0.02857142857142857,
0.012345679012345678,
0,
0.0410958904109589,
0.023255813953488372,
0,
0.018867924528301886,
0.07142857142857142,
0.07142857142857142,
0.02857142857142857,
0.018018018018018018,
0.14285714285714285,
0.02857142857142857,
0.012345679012345678,
0.07142857142857142,
0.05555555555555555,
0.023255813953488372,
0.05263157894736842,
0.017857142857142856,
0.07142857142857142,
0.07142857142857142,
0.02631578947368421,
0.018018018018018018,
0.14285714285714285,
0.02631578947368421,
0.012345679012345678,
0,
0.03896103896103896,
0.023255813953488372,
0,
0.017857142857142856,
0.07142857142857142,
0.07142857142857142,
0.02631578947368421,
0.018018018018018018,
0.14285714285714285,
0.02631578947368421,
0.012345679012345678,
0,
0.0390625,
0.023809523809523808,
0,
0.017857142857142856,
0.07142857142857142,
0.07142857142857142,
0.02631578947368421,
0.018018018018018018,
0.14285714285714285,
0.02631578947368421,
0.012345679012345678,
0,
0.034782608695652174,
0.023809523809523808,
0,
0.017857142857142856,
0.07142857142857142,
0.07142857142857142,
0.02631578947368421,
0.018018018018018018,
0.14285714285714285,
0.02631578947368421,
0.012345679012345678,
0,
0.05434782608695652,
0.023809523809523808,
0,
0.018518518518518517,
0.07142857142857142,
0.07142857142857142,
0.027777777777777776,
0.018018018018018018,
0.14285714285714285,
0.027777777777777776,
0.012345679012345678,
0,
0.0379746835443038,
0.023809523809523808,
0,
0.018518518518518517,
0.07142857142857142,
0.07142857142857142,
0.027777777777777776,
0.018018018018018018,
0.14285714285714285,
0.027777777777777776,
0.012345679012345678,
0,
0.05128205128205128,
0.023809523809523808,
0,
0.017857142857142856,
0,
0,
0.03773584905660377,
0.023809523809523808,
0,
0.017857142857142856,
0,
1,
0.025974025974025976,
0.023809523809523808,
0,
0.07142857142857142,
0.07142857142857142,
0.06557377049180328,
0.14285714285714285,
0.06779661016949153,
0.14285714285714285,
0.05555555555555555,
0.08333333333333333,
0,
0.0158311345646438,
0.023809523809523808,
0,
0.07142857142857142,
0.07142857142857142,
0.06557377049180328,
0.14285714285714285,
0.058823529411764705,
0.14285714285714285,
0.04938271604938271,
0.08333333333333333,
0,
0.02608695652173913,
0.023809523809523808,
0,
0.07142857142857142,
0.07142857142857142,
0.06557377049180328,
0.14285714285714285,
0.06779661016949153,
0.14285714285714285,
0.0547945205479452,
0.08333333333333333,
0,
0.0158311345646438,
0.023809523809523808,
0,
0.07142857142857142,
0.07142857142857142,
0.06557377049180328,
0.14285714285714285,
0.058823529411764705,
0.14285714285714285,
0.04938271604938271,
0.08333333333333333,
0,
0.026200873362445413,
0.020833333333333332,
0,
0.07142857142857142,
0.07142857142857142,
0.0625,
0.14285714285714285,
0.06451612903225806,
0.14285714285714285,
0.05333333333333334,
0.07407407407407407,
0,
0.016042780748663103,
0.020833333333333332,
0,
0.07142857142857142,
0.07142857142857142,
0.0625,
0.14285714285714285,
0.056338028169014086,
0.14285714285714285,
0.05952380952380952,
0.07407407407407407,
0,
0.015,
0.020833333333333332,
0,
0.07142857142857142,
0.07142857142857142,
0.0625,
0.14285714285714285,
0.06451612903225806,
0.14285714285714285,
0.05333333333333334,
0.07407407407407407,
0,
0.0084985835694051,
0.020833333333333332,
0,
0.07142857142857142,
0.07142857142857142,
0.0625,
0.14285714285714285,
0.056338028169014086,
0.14285714285714285,
0.05952380952380952,
0.07407407407407407,
0,
0.012958963282937365,
0.022727272727272728,
0,
0.07142857142857142,
0.07142857142857142,
0.06451612903225806,
0.14285714285714285,
0.06666666666666667,
0.14285714285714285,
0.0547945205479452,
0.08,
0,
0.008955223880597015,
0.022727272727272728,
0,
0.07142857142857142,
0.07142857142857142,
0.06451612903225806,
0.14285714285714285,
0.057971014492753624,
0.14285714285714285,
0.06097560975609756,
0.08,
0,
0.05042016806722689,
0.020833333333333332,
0,
0.07407407407407407,
0,
0.0375,
0.020833333333333332,
0,
0.07407407407407407,
0,
0.04,
0,
0.058823529411764705,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0.045454545454545456,
0.022727272727272728,
0,
0,
0.017241379310344827,
0.027777777777777776,
0,
0.02127659574468085,
0.02564102564102564,
0,
0.020833333333333332,
0.02564102564102564,
0,
0.020833333333333332,
0.02702702702702703,
0,
0.018518518518518517,
0.02564102564102564,
0,
0.019230769230769232,
0.02564102564102564,
0,
0.019230769230769232,
0,
0,
0,
0,
0.051094890510948905,
0.022727272727272728,
0,
0,
0.07142857142857142,
0.07142857142857142,
0.06896551724137931,
0.14285714285714285,
0.07142857142857142,
0,
0.05555555555555555,
0,
0.05555555555555555,
0,
0.1111111111111111,
0.05263157894736842,
0,
0.1,
0,
0,
0.038461538461538464,
0.02586206896551724,
0
] | 405 | 0.045703 | false |
#!/usr/bin/python
# -*- coding:utf-8 -*-
import httplib
import urllib
import json
import urllib2
import re
import os
class BaiduImage(object):
def __init__(self):
super(BaiduImage,self).__init__()
print u'图片获取中,CTRL+C 退出程序...'
self.page = 60 #当前页数
if not os.path.exists(r'./image'):
os.mkdir(r'./image')
def request(self):
try:
while 1:
conn = httplib.HTTPConnection('image.baidu.com')
request_url ='/search/avatarjson?tn=resultjsonavatarnew&ie=utf-8&word=%E7%BE%8E%E5%A5%B3&cg=girl&rn=60&pn='+str(self.page)
headers = {'User-Agent' :'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.0','Content-type': 'test/html'}
#body = urllib.urlencode({'tn':'resultjsonavatarnew','ie':'utf-8','word':'%E7%BE%8E%E5%A5%B3','cg':'girl','pn':self.page,'rn':'60'})
conn.request('GET',request_url,headers = headers)
r= conn.getresponse()
#print r.status
if r.status == 200:
data = r.read()
data = unicode(data, errors='ignore')
decode = json.loads(data)
self.download(decode['imgs'])
self.page += 60
except Exception,e:
print e
finally:
conn.close()
def download(self,data):
for d in data:
#url = d['thumbURL'] 缩略图 尺寸200
#url = d['hoverURL'] 尺寸360
url = d['objURL']
data = urllib2.urlopen(url).read()
pattern = re.compile(r'.*/(.*?)\.jpg',re.S)
item = re.findall(pattern,url)
FileName = str('image/')+item[0]+str('.jpg')
with open(FileName,'wb') as f:
f.write(data)
if __name__ == '__main__':
bi = BaiduImage()
bi.request() | [
"#!/usr/bin/python \n",
"# -*- coding:utf-8 -*- \n",
"import httplib \n",
"import urllib \n",
"import json \n",
"import urllib2 \n",
"import re \n",
"import os \n",
" \n",
"class BaiduImage(object): \n",
" def __init__(self): \n",
" super(BaiduImage,self).__init__() \n",
" print u'图片获取中,CTRL+C 退出程序...' \n",
" self.page = 60 #当前页数 \n",
" if not os.path.exists(r'./image'): \n",
" os.mkdir(r'./image') \n",
" \n",
" def request(self): \n",
" try: \n",
" while 1: \n",
" conn = httplib.HTTPConnection('image.baidu.com') \n",
" request_url ='/search/avatarjson?tn=resultjsonavatarnew&ie=utf-8&word=%E7%BE%8E%E5%A5%B3&cg=girl&rn=60&pn='+str(self.page) \n",
" headers = {'User-Agent' :'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.0','Content-type': 'test/html'} \n",
" #body = urllib.urlencode({'tn':'resultjsonavatarnew','ie':'utf-8','word':'%E7%BE%8E%E5%A5%B3','cg':'girl','pn':self.page,'rn':'60'}) \n",
" conn.request('GET',request_url,headers = headers) \n",
" r= conn.getresponse() \n",
" #print r.status \n",
" if r.status == 200: \n",
" data = r.read() \n",
" \n",
" data = unicode(data, errors='ignore') \n",
" decode = json.loads(data) \n",
" self.download(decode['imgs']) \n",
" \n",
" self.page += 60 \n",
" except Exception,e: \n",
" print e \n",
" finally: \n",
" conn.close() \n",
" \n",
" def download(self,data): \n",
" \n",
" for d in data: \n",
" #url = d['thumbURL'] 缩略图 尺寸200 \n",
" #url = d['hoverURL'] 尺寸360 \n",
" url = d['objURL'] \n",
" data = urllib2.urlopen(url).read() \n",
" \n",
" pattern = re.compile(r'.*/(.*?)\\.jpg',re.S) \n",
" item = re.findall(pattern,url) \n",
" FileName = str('image/')+item[0]+str('.jpg') \n",
" \n",
" with open(FileName,'wb') as f: \n",
" f.write(data) \n",
" \n",
"if __name__ == '__main__': \n",
" bi = BaiduImage() \n",
" bi.request() "
] | [
0.05,
0.04,
0.058823529411764705,
0.0625,
0.07142857142857142,
0.058823529411764705,
0.08333333333333333,
0.08333333333333333,
0.3333333333333333,
0.07142857142857142,
0.038461538461538464,
0.045454545454545456,
0.025,
0.04,
0.022222222222222223,
0.01818181818181818,
0.14285714285714285,
0.04,
0.06666666666666667,
0.043478260869565216,
0.014925373134328358,
0.02127659574468085,
0.034013605442176874,
0.019867549668874173,
0.07352941176470588,
0.05,
0.058823529411764705,
0.02631578947368421,
0.02631578947368421,
0.043478260869565216,
0.016666666666666666,
0.020833333333333332,
0.019230769230769232,
0.06666666666666667,
0.029411764705882353,
0.06666666666666667,
0.045454545454545456,
0.05263157894736842,
0.037037037037037035,
0.06666666666666667,
0.06451612903225806,
0.14285714285714285,
0.037037037037037035,
0.041666666666666664,
0.0392156862745098,
0.03125,
0.02040816326530612,
0.06666666666666667,
0.034482758620689655,
0.044444444444444446,
0.01694915254237288,
0.06666666666666667,
0.044444444444444446,
0.03125,
0.14285714285714285,
0.1,
0.041666666666666664,
0.1111111111111111
] | 58 | 0.056252 | false |
#!/usr/bin/env python
import os
import csv, sys, json
def run_rnaseq_docker(basename_I,
host_dirname_I,
organism_I,
host_indexes_dir_I,
host_dirname_O,
paired_I='paired',
threads_I=2,trim3_I=3,
library_type_I='fr-firststrand',
index_type_I = '.gtf',
bowtie_options_I = '',
cufflinks_options_I = '',
):
'''Process RNA sequencing data
INPUT:
basename_I = base name of the fastq files
host_dirname_I = directory for .fastq files
organism_I = name of index
host_indexes_dir_I = directory for indexes
local_dirname_I = location for temporary output
host_dirname_O = location for output on the host
index_type_I = string for index extention (e.g., '.gtf' or '.gff')
EXAMPLE:
basename_I = 140818_11_OxicEvo04EcoliGlcM9_Broth-4
host_dirname_I = /media/proline/dmccloskey/Resequencing_RNA/fastq/140818_11_OxicEvo04EcoliGlcM9_Broth-4/ (remote storage location)
organism_I = e_coli
host_indexes_dir_I = /media/proline/dmccloskey/Resequencing_RNA/indexes/ (remote storage location)
local_dirname_I = /home/douglas/Documents/Resequencing_RNA/ (local host location)
host_dirname_O = /media/proline/dmccloskey/Resequencing_RNA/fastq/140818_11_OxicEvo04EcoliGlcM9_Broth-4/ (remote storage location)
'''
#1. create a container named rnaseq using sequencing utilities
#1. create a container named rnaseqdata using sequencing utilities
#2. mount the host file into rnaseqdata
#2. mount the rnaseqdata volumes into rnaseq
#3. run docker
#data_mount_1 = '/media/Sequencing/fastq/'
#data_mount_2 = '/media/Sequencing/indexes/'
#docker_mount_1 = '/home/user/Sequencing/fastq/'
#docker_mount_2 = '/home/user/Sequencing/indexes/'
#datacontainer_name = 'rnaseqdata';
docker_mount_1 = '/media/Sequencing/fastq/'
docker_mount_2 = '/media/Sequencing/indexes/'
user_output = '/home/user/Sequencing/output/'
container_name = 'rnaseq';
#make the processing container
rnaseq_cmd = ("process_rnaseq('%s','%s','%s','%s','%s',paired='%s',threads=%s,trim3=%s,library_type='%s',\
index_type='%s',bowtie_options_I='%s',cufflinks_options_I='%s');" %\
(basename_I, docker_mount_1,user_output,organism_I,docker_mount_2,paired_I,threads_I,trim3_I,library_type_I,
index_type_I,bowtie_options_I,cufflinks_options_I));
python_cmd = ("from sequencing_utilities.rnaseq import process_rnaseq;%s" %(rnaseq_cmd));
docker_run = ('docker run --name=%s -v %s:%s -v %s:%s -u=root dmccloskey/sequencing_utilities python3 -c "%s"' %(container_name,host_dirname_I,docker_mount_1,host_indexes_dir_I,docker_mount_2,python_cmd));
os.system(docker_run);
##make the data container (avoid permission errors)
#bash_cmd = ('cp -R %s %s && cp -R %s %s' %(data_mount_1,docker_mount_1,data_mount_2,docker_mount_2));
#docker_run = ('docker run --name=%s -v %s:%s -v %s:%s dmccloskey/sequencing_utilities bash -c "%s"' %(datacontainer_name,host_dirname_I,data_mount_1,host_indexes_dir_I,data_mount_2,bash_cmd));
#os.system(docker_run);
##make the processing container
#rnaseq_cmd = ("process_rnaseq('%s','%s','%s','%s','%s',paired='%s',threads=%s,trim3=%s);" %(basename_I, docker_mount_1,user_output,organism_I,docker_mount_2,paired_I,threads_I,trim3_I));
#python_cmd = ("from sequencing_utilities.rnaseq import process_rnaseq;%s" %(rnaseq_cmd));
#docker_run = ('docker run --name=%s --volumes-from=%s dmccloskey/sequencing_utilities python3 -c "%s"' %(container_name,datacontainer_name,python_cmd));
#os.system(docker_run);
#copy the gff file out of the docker container into a host location
docker_cp = ("docker cp %s:%s%s.bam %s" %(container_name,user_output,basename_I,host_dirname_O));
os.system(docker_cp);
docker_cp = ("docker cp %s:%s%s.gff %s" %(container_name,user_output,basename_I,host_dirname_O));
os.system(docker_cp);
docker_cp = ("docker cp %s:%s%s.sam %s" %(container_name,user_output,basename_I,host_dirname_O));
os.system(docker_cp);
docker_cp = ("docker cp %s:%s%s/ %s" %(container_name,user_output,basename_I,host_dirname_O));
os.system(docker_cp);
#delete the container and the container content:
cmd = ('docker rm -v %s' %(container_name));
#cmd = ('docker rm -v %s' %(datacontainer_name));
os.system(cmd);
def run_rnaseq_docker_fromCsvOrFile(filename_csv_I = None,filename_list_I = []):
'''Call run_rnaseq_docker on a list of basenames and directories
INPUT:
filename_list_I = [{basename_I:...,host_dirname_I:...,},...]
'''
if filename_csv_I:
filename_list_I = read_csv(filename_csv_I);
for row_cnt,row in enumerate(filename_list_I):
cmd = ("echo running rnaseq for basename %s" %(row['basename_I']));
os.system(cmd);
run_rnaseq_docker(row['basename_I'],
row['host_dirname_I'],
row['organism_I'],
row['host_indexes_dir_I'],
row['host_dirname_O'],
row['paired_I'],
row['threads_I'],
row['trim3_I'],
row['library_type_I'],
row['index_type_I'],
row['bowtie_options_I'],
row['cufflinks_options_I']);
def read_csv(filename):
"""read table data from csv file"""
data_O = [];
try:
with open(filename, 'r') as csvfile:
reader = csv.DictReader(csvfile);
try:
keys = reader.fieldnames;
for row in reader:
data_O.append(row);
except csv.Error as e:
sys.exit('file %s, line %d: %s' % (filename, reader.line_num, e));
except IOError as e:
sys.exit('%s does not exist' % e);
return data_O;
def main_singleFile():
"""process RNAseq data using docker
e.g. python3 run_rnaseq_docker.py '140818_11_OxicEvo04EcoliGlcM9_Broth-4' '/media/proline/dmccloskey/Resequencing_RNA/fastq/140818_11_OxicEvo04EcoliGlcM9_Broth-4/' 'e_coli' '/media/proline/dmccloskey/Resequencing_RNA/indexes/' '/home/douglas/Documents/Resequencing_RNA/output/' '/media/proline/dmccloskey/Resequencing_RNA/fastq/140818_11_OxicEvo04EcoliGlcM9_Broth-4/' 2 3
"""
from argparse import ArgumentParser
parser = ArgumentParser("process RNAseq data")
parser.add_argument("basename_I", help="""base name of the fastq files""")
parser.add_argument("host_dirname_I", help="""directory for .fastq files""")
parser.add_argument("organism_I", help="""name of index""")
parser.add_argument("host_indexes_dir_I", help="""directory for indexes""")
parser.add_argument("host_dirname_O", help="""location for output on the host""")
parser.add_argument("paired_I", help="""unpaired, paired, or mixed end reads (i.e., 'unpaired', 'paired', 'mixed')""")
parser.add_argument("threads_I", help="""number of processors to use""")
parser.add_argument("trim3_I", help="""trim 3 bases off of each end""")
parser.add_argument("library_type_I", help="""the library type""")
parser.add_argument("index_type_I", help="""index file type (.gtf or .gff)""")
parser.add_argument("bowtie_options_I", help="""additional command line arguments not explicitly provided""")
parser.add_argument("cufflinks_options_I", help="""additional command line arguments not explicitly provided""")
args = parser.parse_args()
run_rnaseq_docker(args.basename_I,args.host_dirname_I,
args.organism_I,args.host_indexes_dir_I,
args.host_dirname_O,
args.paired_I,
args.threads_I,args.trim3_I,
args.library_type_I,
args.index_type_I,
args.bowtie_options_I,
args.cufflinks_options_I);
def main_batchFile():
"""process RNAseq data using docker in batch
e.g. python3 run_rnaseq_docker.py '/media/proline/dmccloskey/Resequencing_RNA/rnaseq_files.csv' []
"""
from argparse import ArgumentParser
parser = ArgumentParser("process RNAseq data")
parser.add_argument("filename_csv_I", help="""list of files and parameters in a .csv""")
parser.add_argument("filename_list_I", help="""list of files and parameters e.g. [{basename_I:...,host_dirname_I:...,},...]""")
args = parser.parse_args()
run_rnaseq_docker_fromCsvOrFile(args.filename_csv_I,args.filename_list_I);
if __name__ == "__main__":
#main_singleFile();
main_batchFile(); | [
"#!/usr/bin/env python\n",
"import os\n",
"import csv, sys, json\n",
"\n",
"def run_rnaseq_docker(basename_I,\n",
" host_dirname_I,\n",
" organism_I,\n",
" host_indexes_dir_I,\n",
" host_dirname_O,\n",
" paired_I='paired',\n",
" threads_I=2,trim3_I=3,\n",
" library_type_I='fr-firststrand',\n",
" index_type_I = '.gtf',\n",
" bowtie_options_I = '',\n",
" cufflinks_options_I = '',\n",
" ):\n",
" '''Process RNA sequencing data\n",
" INPUT:\n",
" basename_I = base name of the fastq files\n",
" host_dirname_I = directory for .fastq files\n",
" organism_I = name of index\n",
" host_indexes_dir_I = directory for indexes\n",
" local_dirname_I = location for temporary output\n",
" host_dirname_O = location for output on the host\n",
" index_type_I = string for index extention (e.g., '.gtf' or '.gff')\n",
"\n",
" EXAMPLE:\n",
" basename_I = 140818_11_OxicEvo04EcoliGlcM9_Broth-4\n",
" host_dirname_I = /media/proline/dmccloskey/Resequencing_RNA/fastq/140818_11_OxicEvo04EcoliGlcM9_Broth-4/ (remote storage location)\n",
" organism_I = e_coli\n",
" host_indexes_dir_I = /media/proline/dmccloskey/Resequencing_RNA/indexes/ (remote storage location)\n",
" local_dirname_I = /home/douglas/Documents/Resequencing_RNA/ (local host location)\n",
" host_dirname_O = /media/proline/dmccloskey/Resequencing_RNA/fastq/140818_11_OxicEvo04EcoliGlcM9_Broth-4/ (remote storage location)\n",
" '''\n",
" #1. create a container named rnaseq using sequencing utilities\n",
" #1. create a container named rnaseqdata using sequencing utilities\n",
" #2. mount the host file into rnaseqdata\n",
" #2. mount the rnaseqdata volumes into rnaseq\n",
" #3. run docker\n",
" #data_mount_1 = '/media/Sequencing/fastq/'\n",
" #data_mount_2 = '/media/Sequencing/indexes/'\n",
" #docker_mount_1 = '/home/user/Sequencing/fastq/'\n",
" #docker_mount_2 = '/home/user/Sequencing/indexes/'\n",
" #datacontainer_name = 'rnaseqdata';\n",
" docker_mount_1 = '/media/Sequencing/fastq/'\n",
" docker_mount_2 = '/media/Sequencing/indexes/'\n",
" user_output = '/home/user/Sequencing/output/'\n",
" container_name = 'rnaseq';\n",
" \n",
" #make the processing container\n",
" rnaseq_cmd = (\"process_rnaseq('%s','%s','%s','%s','%s',paired='%s',threads=%s,trim3=%s,library_type='%s',\\\n",
" index_type='%s',bowtie_options_I='%s',cufflinks_options_I='%s');\" %\\\n",
" (basename_I, docker_mount_1,user_output,organism_I,docker_mount_2,paired_I,threads_I,trim3_I,library_type_I,\n",
" index_type_I,bowtie_options_I,cufflinks_options_I));\n",
" python_cmd = (\"from sequencing_utilities.rnaseq import process_rnaseq;%s\" %(rnaseq_cmd));\n",
" docker_run = ('docker run --name=%s -v %s:%s -v %s:%s -u=root dmccloskey/sequencing_utilities python3 -c \"%s\"' %(container_name,host_dirname_I,docker_mount_1,host_indexes_dir_I,docker_mount_2,python_cmd));\n",
" os.system(docker_run);\n",
" ##make the data container (avoid permission errors)\n",
" #bash_cmd = ('cp -R %s %s && cp -R %s %s' %(data_mount_1,docker_mount_1,data_mount_2,docker_mount_2));\n",
" #docker_run = ('docker run --name=%s -v %s:%s -v %s:%s dmccloskey/sequencing_utilities bash -c \"%s\"' %(datacontainer_name,host_dirname_I,data_mount_1,host_indexes_dir_I,data_mount_2,bash_cmd));\n",
" #os.system(docker_run);\n",
" ##make the processing container\n",
" #rnaseq_cmd = (\"process_rnaseq('%s','%s','%s','%s','%s',paired='%s',threads=%s,trim3=%s);\" %(basename_I, docker_mount_1,user_output,organism_I,docker_mount_2,paired_I,threads_I,trim3_I));\n",
" #python_cmd = (\"from sequencing_utilities.rnaseq import process_rnaseq;%s\" %(rnaseq_cmd));\n",
" #docker_run = ('docker run --name=%s --volumes-from=%s dmccloskey/sequencing_utilities python3 -c \"%s\"' %(container_name,datacontainer_name,python_cmd));\n",
" #os.system(docker_run);\n",
" #copy the gff file out of the docker container into a host location\n",
" docker_cp = (\"docker cp %s:%s%s.bam %s\" %(container_name,user_output,basename_I,host_dirname_O));\n",
" os.system(docker_cp);\n",
" docker_cp = (\"docker cp %s:%s%s.gff %s\" %(container_name,user_output,basename_I,host_dirname_O));\n",
" os.system(docker_cp);\n",
" docker_cp = (\"docker cp %s:%s%s.sam %s\" %(container_name,user_output,basename_I,host_dirname_O));\n",
" os.system(docker_cp);\n",
" docker_cp = (\"docker cp %s:%s%s/ %s\" %(container_name,user_output,basename_I,host_dirname_O));\n",
" os.system(docker_cp);\n",
" #delete the container and the container content:\n",
" cmd = ('docker rm -v %s' %(container_name));\n",
" #cmd = ('docker rm -v %s' %(datacontainer_name));\n",
" os.system(cmd);\n",
" \n",
"def run_rnaseq_docker_fromCsvOrFile(filename_csv_I = None,filename_list_I = []):\n",
" '''Call run_rnaseq_docker on a list of basenames and directories\n",
" INPUT:\n",
" filename_list_I = [{basename_I:...,host_dirname_I:...,},...]\n",
" '''\n",
" if filename_csv_I:\n",
" filename_list_I = read_csv(filename_csv_I);\n",
" for row_cnt,row in enumerate(filename_list_I):\n",
" cmd = (\"echo running rnaseq for basename %s\" %(row['basename_I']));\n",
" os.system(cmd);\n",
" run_rnaseq_docker(row['basename_I'],\n",
" row['host_dirname_I'],\n",
" row['organism_I'],\n",
" row['host_indexes_dir_I'],\n",
" row['host_dirname_O'],\n",
" row['paired_I'],\n",
" row['threads_I'],\n",
" row['trim3_I'],\n",
" row['library_type_I'],\n",
" row['index_type_I'],\n",
" row['bowtie_options_I'],\n",
" row['cufflinks_options_I']);\n",
" \n",
"def read_csv(filename):\n",
" \"\"\"read table data from csv file\"\"\"\n",
" data_O = [];\n",
" try:\n",
" with open(filename, 'r') as csvfile:\n",
" reader = csv.DictReader(csvfile);\n",
" try:\n",
" keys = reader.fieldnames;\n",
" for row in reader:\n",
" data_O.append(row);\n",
" except csv.Error as e:\n",
" sys.exit('file %s, line %d: %s' % (filename, reader.line_num, e));\n",
" except IOError as e:\n",
" sys.exit('%s does not exist' % e);\n",
" return data_O;\n",
"\n",
"def main_singleFile():\n",
" \"\"\"process RNAseq data using docker\n",
" e.g. python3 run_rnaseq_docker.py '140818_11_OxicEvo04EcoliGlcM9_Broth-4' '/media/proline/dmccloskey/Resequencing_RNA/fastq/140818_11_OxicEvo04EcoliGlcM9_Broth-4/' 'e_coli' '/media/proline/dmccloskey/Resequencing_RNA/indexes/' '/home/douglas/Documents/Resequencing_RNA/output/' '/media/proline/dmccloskey/Resequencing_RNA/fastq/140818_11_OxicEvo04EcoliGlcM9_Broth-4/' 2 3\n",
" \"\"\"\n",
" from argparse import ArgumentParser\n",
" parser = ArgumentParser(\"process RNAseq data\")\n",
" parser.add_argument(\"basename_I\", help=\"\"\"base name of the fastq files\"\"\")\n",
" parser.add_argument(\"host_dirname_I\", help=\"\"\"directory for .fastq files\"\"\")\n",
" parser.add_argument(\"organism_I\", help=\"\"\"name of index\"\"\")\n",
" parser.add_argument(\"host_indexes_dir_I\", help=\"\"\"directory for indexes\"\"\")\n",
" parser.add_argument(\"host_dirname_O\", help=\"\"\"location for output on the host\"\"\")\n",
" parser.add_argument(\"paired_I\", help=\"\"\"unpaired, paired, or mixed end reads (i.e., 'unpaired', 'paired', 'mixed')\"\"\")\n",
" parser.add_argument(\"threads_I\", help=\"\"\"number of processors to use\"\"\")\n",
" parser.add_argument(\"trim3_I\", help=\"\"\"trim 3 bases off of each end\"\"\")\n",
" parser.add_argument(\"library_type_I\", help=\"\"\"the library type\"\"\")\n",
" parser.add_argument(\"index_type_I\", help=\"\"\"index file type (.gtf or .gff)\"\"\")\n",
" parser.add_argument(\"bowtie_options_I\", help=\"\"\"additional command line arguments not explicitly provided\"\"\")\n",
" parser.add_argument(\"cufflinks_options_I\", help=\"\"\"additional command line arguments not explicitly provided\"\"\")\n",
" args = parser.parse_args()\n",
" run_rnaseq_docker(args.basename_I,args.host_dirname_I,\n",
" args.organism_I,args.host_indexes_dir_I,\n",
" args.host_dirname_O,\n",
" args.paired_I,\n",
" args.threads_I,args.trim3_I,\n",
" args.library_type_I,\n",
" args.index_type_I,\n",
" args.bowtie_options_I,\n",
" args.cufflinks_options_I);\n",
"\n",
"def main_batchFile():\n",
" \"\"\"process RNAseq data using docker in batch\n",
" e.g. python3 run_rnaseq_docker.py '/media/proline/dmccloskey/Resequencing_RNA/rnaseq_files.csv' []\n",
" \"\"\"\n",
" from argparse import ArgumentParser\n",
" parser = ArgumentParser(\"process RNAseq data\")\n",
" parser.add_argument(\"filename_csv_I\", help=\"\"\"list of files and parameters in a .csv\"\"\")\n",
" parser.add_argument(\"filename_list_I\", help=\"\"\"list of files and parameters e.g. [{basename_I:...,host_dirname_I:...,},...]\"\"\")\n",
" args = parser.parse_args()\n",
" run_rnaseq_docker_fromCsvOrFile(args.filename_csv_I,args.filename_list_I);\n",
"\n",
"if __name__ == \"__main__\":\n",
" #main_singleFile();\n",
" main_batchFile();"
] | [
0,
0,
0.045454545454545456,
0,
0.029411764705882353,
0.041666666666666664,
0.05,
0.03571428571428571,
0.041666666666666664,
0.037037037037037035,
0.06451612903225806,
0.024390243902439025,
0.0967741935483871,
0.0967741935483871,
0.08823529411764706,
0.09090909090909091,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.007407407407407408,
0,
0.009708737864077669,
0.011627906976744186,
0.007407407407407408,
0,
0.014925373134328358,
0.014084507042253521,
0.022727272727272728,
0.02040816326530612,
0.05263157894736842,
0.02127659574468085,
0.02040816326530612,
0.018867924528301886,
0.01818181818181818,
0.025,
0,
0,
0,
0.03225806451612903,
0.2,
0.02857142857142857,
0.009009009009009009,
0.012987012987012988,
0.07692307692307693,
0.04838709677419355,
0.031914893617021274,
0.0380952380952381,
0.037037037037037035,
0.017857142857142856,
0.018691588785046728,
0.010101010101010102,
0.03571428571428571,
0.027777777777777776,
0.010416666666666666,
0.021052631578947368,
0.012658227848101266,
0.03571428571428571,
0.013888888888888888,
0.058823529411764705,
0.038461538461538464,
0.058823529411764705,
0.038461538461538464,
0.058823529411764705,
0.038461538461538464,
0.06060606060606061,
0.038461538461538464,
0.018867924528301886,
0.04081632653061224,
0.018518518518518517,
0.05,
0.2,
0.08641975308641975,
0,
0,
0,
0,
0,
0.019230769230769232,
0.0196078431372549,
0.02631578947368421,
0.041666666666666664,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.01818181818181818,
0.1,
0.041666666666666664,
0,
0.058823529411764705,
0,
0,
0.021739130434782608,
0,
0.023809523809523808,
0,
0.025,
0,
0.024096385542168676,
0,
0.023255813953488372,
0.05263157894736842,
0,
0.043478260869565216,
0,
0.0026595744680851063,
0,
0,
0,
0,
0.012345679012345678,
0,
0,
0.011627906976744186,
0.008130081300813009,
0,
0,
0,
0.012048192771084338,
0.008771929824561403,
0.008547008547008548,
0,
0.01694915254237288,
0.015873015873015872,
0,
0,
0.0196078431372549,
0,
0,
0,
0.02040816326530612,
0,
0.045454545454545456,
0,
0.009708737864077669,
0,
0,
0,
0.010752688172043012,
0.007575757575757576,
0,
0.02531645569620253,
0,
0.037037037037037035,
0.041666666666666664,
0.09523809523809523
] | 162 | 0.021488 | false |
from docker import Client
from json import JSONEncoder
from platform import node
# Generates image inventory data in the same format as the provider (used in unit tests to validate OMI provider)
NUMBYTESPERMB = 1048576
c = Client(base_url="unix://var/run/docker.sock")
imageDict = c.images()
tempDict = dict()
for image in imageDict:
result = dict()
result["InstanceID"] = image["Id"]
name = image["RepoTags"][-1].replace("/", ":").split(":")
result["Image"] = name[-2]
result["ImageTag"] = name[-1]
result["Repository"] = name[0] if len(name) == 3 else ""
result["Computer"] = node()
result["Running"] = 0
result["Stopped"] = 0
result["Failed"] = 0
result["Paused"] = 0
result["Total"] = 0
result["ImageSize"] = str(image["Size"] / NUMBYTESPERMB) + " MB"
result["VirtualSize"] = str(image["VirtualSize"] / NUMBYTESPERMB) + " MB"
tempDict[image["Id"]] = result
containers = c.containers(quiet = True, all = True)
for container in containers:
inspect = c.inspect_container(container)
if inspect["State"]["Running"]:
tempDict[inspect["Image"]]["Running"] += 1
else:
if inspect["State"]["Paused"]:
tempDict[inspect["Image"]]["Paused"] += 1
else:
if inspect["State"]["ExitCode"]:
tempDict[inspect["Image"]]["Failed"] += 1
else:
tempDict[inspect["Image"]]["Stopped"] += 1
tempDict[inspect["Image"]]["Total"] += 1
j = JSONEncoder()
for entry in tempDict.values():
if entry["Total"] or (entry["Image"] != "<none>"):
print j.encode(entry) | [
"from docker import Client\r\n",
"from json import JSONEncoder\r\n",
"from platform import node\r\n",
"\r\n",
"# Generates image inventory data in the same format as the provider (used in unit tests to validate OMI provider)\r\n",
"\r\n",
"NUMBYTESPERMB = 1048576\r\n",
"\r\n",
"c = Client(base_url=\"unix://var/run/docker.sock\")\r\n",
"\r\n",
"imageDict = c.images()\r\n",
"tempDict = dict()\r\n",
"\r\n",
"for image in imageDict:\r\n",
"\tresult = dict()\r\n",
"\tresult[\"InstanceID\"] = image[\"Id\"]\r\n",
"\r\n",
"\tname = image[\"RepoTags\"][-1].replace(\"/\", \":\").split(\":\")\r\n",
"\tresult[\"Image\"] = name[-2]\r\n",
"\tresult[\"ImageTag\"] = name[-1]\r\n",
"\tresult[\"Repository\"] = name[0] if len(name) == 3 else \"\"\r\n",
"\tresult[\"Computer\"] = node()\r\n",
"\tresult[\"Running\"] = 0\r\n",
"\tresult[\"Stopped\"] = 0\r\n",
"\tresult[\"Failed\"] = 0\r\n",
"\tresult[\"Paused\"] = 0\r\n",
"\tresult[\"Total\"] = 0\r\n",
"\tresult[\"ImageSize\"] = str(image[\"Size\"] / NUMBYTESPERMB) + \" MB\"\r\n",
"\tresult[\"VirtualSize\"] = str(image[\"VirtualSize\"] / NUMBYTESPERMB) + \" MB\"\r\n",
"\r\n",
"\ttempDict[image[\"Id\"]] = result\r\n",
"\r\n",
"containers = c.containers(quiet = True, all = True)\r\n",
"\r\n",
"for container in containers:\r\n",
"\tinspect = c.inspect_container(container)\r\n",
"\r\n",
"\tif inspect[\"State\"][\"Running\"]:\r\n",
"\t\ttempDict[inspect[\"Image\"]][\"Running\"] += 1\r\n",
"\telse:\r\n",
"\t\tif inspect[\"State\"][\"Paused\"]:\r\n",
"\t\t\ttempDict[inspect[\"Image\"]][\"Paused\"] += 1\r\n",
"\t\telse:\r\n",
"\t\t\tif inspect[\"State\"][\"ExitCode\"]:\r\n",
"\t\t\t tempDict[inspect[\"Image\"]][\"Failed\"] += 1\r\n",
"\t\t\telse:\r\n",
"\t\t\t tempDict[inspect[\"Image\"]][\"Stopped\"] += 1\r\n",
"\r\n",
"\ttempDict[inspect[\"Image\"]][\"Total\"] += 1\r\n",
"\r\n",
"j = JSONEncoder()\r\n",
"\r\n",
"for entry in tempDict.values():\r\n",
"\tif entry[\"Total\"] or (entry[\"Image\"] != \"<none>\"):\r\n",
"\t print j.encode(entry)"
] | [
0,
0,
0,
0,
0.008695652173913044,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.05555555555555555,
0.02702702702702703,
0,
0.016666666666666666,
0.034482758620689655,
0.03125,
0.01694915254237288,
0.03333333333333333,
0.041666666666666664,
0.041666666666666664,
0.043478260869565216,
0.043478260869565216,
0.045454545454545456,
0.014925373134328358,
0.013157894736842105,
0,
0.030303030303030304,
0,
0.07547169811320754,
0,
0,
0.023255813953488372,
0,
0.029411764705882353,
0.021739130434782608,
0.125,
0.029411764705882353,
0.021739130434782608,
0.1111111111111111,
0.02702702702702703,
0.061224489795918366,
0.1,
0.06,
0,
0.023255813953488372,
0,
0,
0,
0,
0.018867924528301886,
0.11538461538461539
] | 55 | 0.024382 | false |
from django.db import models
from django.utils.translation import ugettext as _
from django_countries.fields import CountryField
CONTACT_TYPES = (
('phone', _('Phone')),
('email', _('E-Mail')),
('other', _('Other')),
)
class Category(models.Model):
class Meta:
verbose_name_plural = "categories"
name = models.CharField(_('Category name'), max_length=100)
slug = models.SlugField(_('Slug'), max_length=100)
def __str__(self):
return self.name
class Directory(models.Model):
class Meta:
verbose_name_plural = "directories"
service_provider = models.CharField(_('Service provider name'), max_length=255)
slug = models.SlugField(_('slug'), max_length=255)
description = models.TextField(_('Description'), blank=True, null=True)
service_category = models.ManyToManyField(Category, verbose_name=_('Categories'))
link = models.URLField(_('Link'), blank=True, null=True)
def __str__(self):
return self.service_provider
class ContactInfo(models.Model):
directory = models.ForeignKey(Directory, verbose_name=_('Service provider'), related_name='contacts')
type = models.CharField(_('Type'), max_length=5, choices=CONTACT_TYPES)
value = models.CharField(_('Value'), max_length=400)
def __str__(self):
return self.directory.__str__() + " " + self.type
class Address(models.Model):
class Meta:
verbose_name_plural = "addresses"
directory = models.ForeignKey(Directory, verbose_name=('Service provider'), related_name='addresses')
street = models.CharField(_('Street address'), max_length=255)
street2 = models.CharField(_('Second line'), max_length=255, blank=True, null=True)
city = models.CharField(_('City'), max_length=100)
postal_code = models.CharField(_('postal_code'), max_length=10, blank=True, null=True)
country = CountryField(verbose_name=_('Country'))
longitude = models.FloatField(_('Longitude'), blank=True, null=True)
latitude = models.FloatField(_('Latitude'), blank=True, null=True)
def __str__(self):
return "%s, %s, %s, %s" % (self.street, self.street2, self.city, self.postal_code)
| [
"from django.db import models\n",
"from django.utils.translation import ugettext as _\n",
"\n",
"from django_countries.fields import CountryField\n",
"\n",
"\n",
"CONTACT_TYPES = (\n",
" ('phone', _('Phone')),\n",
" ('email', _('E-Mail')),\n",
" ('other', _('Other')),\n",
")\n",
"\n",
"\n",
"class Category(models.Model):\n",
"\n",
" class Meta:\n",
" verbose_name_plural = \"categories\"\n",
"\n",
" name = models.CharField(_('Category name'), max_length=100)\n",
" slug = models.SlugField(_('Slug'), max_length=100)\n",
"\n",
" def __str__(self):\n",
" return self.name\n",
"\n",
"\n",
"class Directory(models.Model):\n",
"\n",
" class Meta:\n",
" verbose_name_plural = \"directories\"\n",
"\n",
" service_provider = models.CharField(_('Service provider name'), max_length=255)\n",
" slug = models.SlugField(_('slug'), max_length=255)\n",
" description = models.TextField(_('Description'), blank=True, null=True)\n",
" service_category = models.ManyToManyField(Category, verbose_name=_('Categories'))\n",
" link = models.URLField(_('Link'), blank=True, null=True)\n",
"\n",
" def __str__(self):\n",
" return self.service_provider\n",
"\n",
"\n",
"class ContactInfo(models.Model):\n",
"\n",
" directory = models.ForeignKey(Directory, verbose_name=_('Service provider'), related_name='contacts')\n",
" type = models.CharField(_('Type'), max_length=5, choices=CONTACT_TYPES)\n",
" value = models.CharField(_('Value'), max_length=400)\n",
"\n",
" def __str__(self):\n",
" return self.directory.__str__() + \" \" + self.type\n",
"\n",
"\n",
"class Address(models.Model):\n",
"\n",
" class Meta:\n",
" verbose_name_plural = \"addresses\"\n",
"\n",
" directory = models.ForeignKey(Directory, verbose_name=('Service provider'), related_name='addresses')\n",
" street = models.CharField(_('Street address'), max_length=255)\n",
" street2 = models.CharField(_('Second line'), max_length=255, blank=True, null=True)\n",
" city = models.CharField(_('City'), max_length=100)\n",
" postal_code = models.CharField(_('postal_code'), max_length=10, blank=True, null=True)\n",
" country = CountryField(verbose_name=_('Country'))\n",
"\n",
" longitude = models.FloatField(_('Longitude'), blank=True, null=True)\n",
" latitude = models.FloatField(_('Latitude'), blank=True, null=True)\n",
"\n",
" def __str__(self):\n",
" return \"%s, %s, %s, %s\" % (self.street, self.street2, self.city, self.postal_code)\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011904761904761904,
0,
0,
0.011627906976744186,
0,
0,
0,
0,
0,
0,
0,
0,
0.009433962264150943,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.009433962264150943,
0,
0.011363636363636364,
0,
0.01098901098901099,
0,
0,
0,
0,
0,
0,
0.01098901098901099
] | 67 | 0.00113 | false |
from django.conf import settings
from django.core.cache import cache
from django.db.models import Count, F, Q
from django.shortcuts import render, redirect
from django.views.generic import TemplateView
from django.http import HttpResponse
from common.views import AbsTargetSelection
from common.definitions import FULL_AMINO_ACIDS, STRUCTURAL_RULES, STRUCTURAL_SWITCHES
from common.selection import Selection
Alignment = getattr(__import__(
'common.alignment_' + settings.SITE_NAME,
fromlist=['Alignment']
), 'Alignment')
from seqsign.sequence_signature import SequenceSignature, SignatureMatch
from alignment.functions import get_proteins_from_selection
from alignment.views import TargetSelectionGprotein, TargetSelectionArrestin
from construct.views import create_structural_rule_trees, ConstructMutation
from contactnetwork.models import InteractingResiduePair
from interaction.models import ResidueFragmentInteraction
from mutation.models import MutationExperiment
from mutational_landscape.models import NaturalMutations, CancerMutations, DiseaseMutations, PTMs, NHSPrescribings
from protein.models import ProteinSegment, Protein, ProteinFamily
from residue.models import Residue,ResidueNumberingScheme, ResiduePositionSet, ResidueSet
from collections import OrderedDict
import html
import re
import time
from io import BytesIO
import xlsxwriter
class TargetSelection(AbsTargetSelection):
pass
class ResidueTablesSelection(AbsTargetSelection):
# Left panel
step = 1
number_of_steps = 2
docs = 'generic_numbering.html'
description = 'Select receptors to index by searching or browsing in the middle column. You can select entire' \
+ ' receptor families and/or individual receptors.\n\nSelected receptors will appear in the right column,' \
+ ' where you can edit the list.\n\nSelect which numbering schemes to use in the middle column.\n\nOnce you' \
+ ' have selected all your receptors, click the green button.'
# Middle section
numbering_schemes = True
# Buttons
buttons = {
'continue' : {
'label' : 'Show residue numbers',
'url' : '/residue/residuetabledisplay',
'color' : 'success',
}
}
class ResidueGprotSelection(TargetSelectionGprotein):
# Left panel
step = 1
number_of_steps = 1
docs = 'generic_numbering.html'
description = 'Select receptors to index by searching or browsing in the middle column. You can select entire' \
+ ' receptor families and/or individual receptors.\n\nSelected receptors will appear in the right column,' \
+ ' where you can edit the list.\n\nOnce you' \
+ ' have selected all your receptors, click the green button.'
# Buttons
buttons = {
'continue' : {
'label' : 'Show residue numbers',
'url' : '/residue/residuetabledisplay',
'color' : 'success',
}
}
try:
if ProteinFamily.objects.filter(slug="100_001").exists():
ppf = ProteinFamily.objects.get(slug="100_001")
pfs = ProteinFamily.objects.filter(parent=ppf.id)
ps = Protein.objects.filter(family=ppf)
tree_indent_level = []
action = 'expand'
# remove the parent family (for all other families than the root of the tree, the parent should be shown)
del ppf
except:
pass
class ResidueArrestinSelection(TargetSelectionArrestin):
# Left panel
step = 1
number_of_steps = 1
docs = 'generic_numbering.html'
description = 'Select receptors to index by searching or browsing in the middle column. You can select entire' \
+ ' receptor families and/or individual receptors.\n\nSelected receptors will appear in the right column,' \
+ ' where you can edit the list.\n\nOnce you' \
+ ' have selected all your receptors, click the green button.'
# Buttons
buttons = {
'continue' : {
'label' : 'Show residue numbers',
'url' : '/residue/residuetabledisplay',
'color' : 'success',
}
}
try:
if ProteinFamily.objects.filter(slug="200_000").exists():
ppf = ProteinFamily.objects.get(slug="200_000")
pfs = ProteinFamily.objects.filter(parent=ppf.id)
ps = Protein.objects.filter(family=ppf)
tree_indent_level = []
action = 'expand'
# remove the parent family (for all other families than the root of the tree, the parent should be shown)
del ppf
except:
pass
class ResidueTablesDisplay(TemplateView):
"""
A class rendering the residue numbering table.
"""
template_name = 'residue_table.html'
@staticmethod
def checkOrigin(target_item):
if str(target_item).split('_')[0].startswith('arr'):
output = 'arrestins'
elif str(target_item).split('_')[0].startswith('gna'):
output = "gprot"
else:
output = "GPCR"
return output
def get(self, request, *args, **kwargs):
# get the user selection from session
simple_selection = self.request.session.get('selection', False)
if simple_selection == False or not simple_selection.targets :
return redirect("residuetableselect")
# check rendering
context = self.get_context_data(**kwargs)
return render(request, self.template_name, context)
def get_context_data(self, **kwargs):
"""
Get the selection data (proteins and numbering schemes) and prepare it for display.
"""
context = super().get_context_data(**kwargs)
# get the user selection from session
simple_selection = self.request.session.get('selection', False)
origin_checked = False
# local protein list
proteins = []
# flatten the selection into individual proteins
for target in simple_selection.targets:
if target.type == 'protein':
proteins.append(target.item)
if origin_checked == False:
signalling_data = self.checkOrigin(target.item)
origin_checked = True
elif target.type == 'family':
# species filter
species_list = []
for species in simple_selection.species:
species_list.append(species.item)
# annotation filter
protein_source_list = []
for protein_source in simple_selection.annotation:
protein_source_list.append(protein_source.item)
if species_list:
family_proteins = Protein.objects.filter(family__slug__startswith=target.item.slug,
species__in=(species_list),
source__in=(protein_source_list)).select_related('residue_numbering_scheme', 'species')
else:
family_proteins = Protein.objects.filter(family__slug__startswith=target.item.slug,
source__in=(protein_source_list)).select_related('residue_numbering_scheme', 'species')
for fp in family_proteins:
proteins.append(fp)
if origin_checked == False:
signalling_data = self.checkOrigin(fp)
origin_checked = True
longest_name = 0
species_list = {}
for protein in proteins:
if protein.species.common_name not in species_list:
if len(protein.species.common_name)>10 and len(protein.species.common_name.split())>1:
name = protein.species.common_name.split()[0][0]+". "+" ".join(protein.species.common_name.split()[1:])
if len(" ".join(protein.species.common_name.split()[1:]))>11:
name = protein.species.common_name.split()[0][0]+". "+" ".join(protein.species.common_name.split()[1:])[:8]+".."
else:
name = protein.species.common_name
species_list[protein.species.common_name] = name
else:
name = species_list[protein.species.common_name]
if len(re.sub('<[^>]*>', '', protein.name)+" "+name)>longest_name:
longest_name = len(re.sub('<[^>]*>', '', protein.name)+" "+name)
# get the selection from session
selection = Selection()
if simple_selection:
selection.importer(simple_selection)
# # extract numbering schemes and proteins
if signalling_data == 'GPCR':
numbering_schemes = [x.item for x in selection.numbering_schemes]
elif signalling_data == 'gprot':
numbering_schemes = ResidueNumberingScheme.objects.filter(slug='cgn')
elif signalling_data == 'arrestins':
numbering_schemes = ResidueNumberingScheme.objects.filter(slug='can')
# # get the helices (TMs only at first)
# segments = ProteinSegment.objects.filter(category='helix', proteinfamily='GPCR')
# Get all the segments
if signalling_data == 'GPCR':
segments = ProteinSegment.objects.filter(proteinfamily='GPCR')
elif signalling_data == 'gprot':
segments = ProteinSegment.objects.filter(proteinfamily='Alpha')
elif signalling_data == 'arrestins':
segments = ProteinSegment.objects.filter(proteinfamily='Arrestin')
if ResidueNumberingScheme.objects.get(slug=settings.DEFAULT_NUMBERING_SCHEME) in numbering_schemes:
default_scheme = ResidueNumberingScheme.objects.get(slug=settings.DEFAULT_NUMBERING_SCHEME)
else:
default_scheme = numbering_schemes[0]
# prepare the dictionary
# each segment has a dictionary of positions
# default_generic_number or first scheme on the list is the key
# value is a dictionary of other gn positions and residues from selected proteins
data = OrderedDict()
for segment in segments:
data[segment.slug] = OrderedDict()
residues = Residue.objects.filter(protein_segment=segment, protein_conformation__protein__in=proteins, generic_number__isnull=False).prefetch_related('protein_conformation__protein', 'protein_conformation__state', 'protein_segment',
'generic_number__scheme', 'display_generic_number__scheme', 'alternative_generic_numbers__scheme')
for scheme in numbering_schemes:
if scheme == default_scheme and scheme.slug == settings.DEFAULT_NUMBERING_SCHEME:
for pos in list(set([x.generic_number.label for x in residues if x.protein_segment == segment])):
data[segment.slug][pos] = {scheme.slug : pos, 'seq' : ['-']*len(proteins)}
elif scheme == default_scheme:
for pos in list(set([x.generic_number.label for x in residues if x.protein_segment == segment])):
data[segment.slug][pos] = {scheme.slug : pos, 'seq' : ['-']*len(proteins)}
for residue in residues:
alternatives = residue.alternative_generic_numbers.all()
#probably no alternatives for GProts and Arrestins (?)
pos = residue.generic_number
if len(alternatives) == 0:
data[segment.slug][pos.label]['seq'][proteins.index(residue.protein_conformation.protein)] = str(residue)
else:
for alternative in alternatives:
scheme = alternative.scheme
if default_scheme.slug == settings.DEFAULT_NUMBERING_SCHEME:
pos = residue.generic_number
if scheme == pos.scheme:
data[segment.slug][pos.label]['seq'][proteins.index(residue.protein_conformation.protein)] = str(residue)
else:
if scheme.slug not in data[segment.slug][pos.label].keys():
data[segment.slug][pos.label][scheme.slug] = alternative.label
if alternative.label not in data[segment.slug][pos.label][scheme.slug]:
data[segment.slug][pos.label][scheme.slug] += " "+alternative.label
data[segment.slug][pos.label]['seq'][proteins.index(residue.protein_conformation.protein)] = str(residue)
else:
if scheme.slug not in data[segment.slug][pos.label].keys():
data[segment.slug][pos.label][scheme.slug] = alternative.label
if alternative.label not in data[segment.slug][pos.label][scheme.slug]:
data[segment.slug][pos.label][scheme.slug] += " "+alternative.label
data[segment.slug][pos.label]['seq'][proteins.index(residue.protein_conformation.protein)] = str(residue)
# Preparing the dictionary of list of lists. Dealing with tripple nested dictionary in django templates is a nightmare
flattened_data = OrderedDict.fromkeys([x.slug for x in segments], [])
for s in iter(flattened_data):
flattened_data[s] = [[data[s][x][y.slug] if y.slug in data[s][x] else '-' for y in numbering_schemes ] + data[s][x]['seq'] for x in sorted(data[s]) ]
#Purging the empty segments
clean_dict = OrderedDict()
clean_segments = []
for s in iter(flattened_data):
if flattened_data[s] != []:
clean_dict[s] = flattened_data[s]
clean_segments.append(s)
if signalling_data == 'GPCR':
context['header'] = zip([x.short_name for x in numbering_schemes] + ["<b>"+x.name
.replace(' receptor','')
.replace('-adrenoceptor','')
.replace('Olfactory','OLF')
.replace('Short-wave-sensitive', 'SWS')
.replace('Medium-wave-sensitive', 'MWS')
.replace('Long-wave-sensitive', 'LWS')
+"</b><br /> "+species_list[x.species.common_name] for x in proteins], [x.name for x in numbering_schemes] + [x.name for x in proteins],[x.name for x in numbering_schemes] + [x.entry_name for x in proteins])
context['col_length'] = len(proteins)+len(numbering_schemes)+1
elif signalling_data == 'gprot':
context['header'] = zip(["Generic<br />residue<br />number"] + ["<b>"+x.family.name.replace('NA','<sub>α')+"</sub></b><br />"+species_list[x.species.common_name]+"" for x in proteins],[x.name for x in numbering_schemes] + [x.name for x in proteins],[x.name for x in numbering_schemes] + [x.entry_name for x in proteins])
context['col_length'] = len(proteins)+1
elif signalling_data == 'arrestins':
context['header'] = zip(["Generic<br />residue<br />number"] + ["<b>"+x.name.replace('Beta','β')+"</b><br />"+species_list[x.species.common_name]+"" for x in proteins], [x.name for x in numbering_schemes] + [x.name for x in proteins],[x.name for x in numbering_schemes] + [x.entry_name for x in proteins])
context['col_length'] = len(proteins)+1
context['segments'] = clean_segments
context['data'] = clean_dict
context['number_of_schemes'] = len(numbering_schemes)
context['longest_name'] = {'div' : longest_name*2, 'height': longest_name*2+80}
context['signalling'] = signalling_data
return context
class ResidueFunctionBrowser(TemplateView):
"""
Per generic position summary of functional information
"""
template_name = 'residue_function_browser.html'
def get_context_data (self, **kwargs):
# setup caches
cache_name = "RFB"
rfb_panel = cache.get(cache_name)
# rfb_panel = None
if rfb_panel == None:
rfb_panel = {}
# Signatures
rfb_panel["signatures"] = {}
# Grab relevant segments
segments = list(ProteinSegment.objects.filter(proteinfamily='GPCR'))
# Grab High/Low CA GPCRs (class A)
high_ca = ["5ht2c_human", "acm4_human", "drd1_human", "fpr1_human", "ghsr_human", "cnr1_human", "aa1r_human", "gpr6_human", "gpr17_human", "gpr87_human"]
low_ca = ["agtr1_human", "ednrb_human", "gnrhr_human", "acthr_human", "v2r_human", "gp141_human", "gp182_human"]
# Signature High vs Low CA
high_ca_gpcrs = Protein.objects.filter(entry_name__in=high_ca).select_related('residue_numbering_scheme', 'species')
low_ca_gpcrs = Protein.objects.filter(entry_name__in=low_ca).select_related('residue_numbering_scheme', 'species')
signature = SequenceSignature()
signature.setup_alignments(segments, high_ca_gpcrs, low_ca_gpcrs)
signature.calculate_signature()
rfb_panel["signatures"]["cah"] = signature.signature
rfb_panel["signatures"]["cah_positions"] = signature.common_gn
signature = SequenceSignature()
signature.setup_alignments(segments, low_ca_gpcrs, high_ca_gpcrs)
signature.calculate_signature()
rfb_panel["signatures"]["cal"] = signature.signature
rfb_panel["signatures"]["cal_positions"] = signature.common_gn
# Grab Gi/Gs/Gq/GI12 GPCR sets (class A)
human_class_a_gpcrs = Protein.objects.filter(species_id=1, sequence_type_id=1, family__slug__startswith='001').distinct().prefetch_related('proteingprotein_set', 'residue_numbering_scheme')
gs = list(human_class_a_gpcrs.filter(proteingprotein__slug="100_001_001"))
gio = list(human_class_a_gpcrs.filter(proteingprotein__slug="100_001_002"))
gq = list(human_class_a_gpcrs.filter(proteingprotein__slug="100_001_003"))
g12 = list(human_class_a_gpcrs.filter(proteingprotein__slug="100_001_004"))
all = set(gs + gio + gq + g12)
# Create sequence signatures for the G-protein sets
for gprotein in ["gs", "gio", "gq", "g12"]:
# print("Processing " + gprotein)
# Signature receptors specific for a G-protein vs all others
signature = SequenceSignature()
signature.setup_alignments(segments, locals()[gprotein], all.difference(locals()[gprotein]))
signature.calculate_signature()
rfb_panel["signatures"][gprotein] = signature.signature
rfb_panel["signatures"][gprotein + "_positions"] = signature.common_gn
# Add class A alignment features
signature = SequenceSignature()
signature.setup_alignments(segments, human_class_a_gpcrs, [list(human_class_a_gpcrs)[0]])
signature.calculate_signature()
rfb_panel["class_a_positions"] = signature.common_gn
rfb_panel["class_a_aa"] = signature.aln_pos.consensus
rfb_panel["class_a_prop"] = signature.features_consensus_pos
# Add X-ray ligand contacts
# Optionally include the curation with the following filter: structure_ligand_pair__annotated=True
class_a_interactions = ResidueFragmentInteraction.objects.filter(
structure_ligand_pair__structure__protein_conformation__protein__family__slug__startswith="001").exclude(interaction_type__type='hidden')\
.values("rotamer__residue__generic_number__label").annotate(unique_receptors=Count("rotamer__residue__protein_conformation__protein__family_id", distinct=True))
rfb_panel["ligand_binding"] = {entry["rotamer__residue__generic_number__label"] : entry["unique_receptors"] for entry in list(class_a_interactions)}
# Add genetic variations
all_nat_muts = NaturalMutations.objects.filter(protein__family__slug__startswith="001").values("residue__generic_number__label").annotate(unique_receptors=Count("protein__family_id", distinct=True))
rfb_panel["natural_mutations"] = {entry["residue__generic_number__label"] : entry["unique_receptors"] for entry in list(all_nat_muts)}
# Add PTMs
all_ptms = PTMs.objects.filter(protein__family__slug__startswith="001").values("residue__generic_number__label").annotate(unique_receptors=Count("protein__family_id", distinct=True))
rfb_panel["ptms"] = {entry["residue__generic_number__label"] : entry["unique_receptors"] for entry in list(all_ptms)}
all_phos = PTMs.objects.filter(protein__family__slug__startswith="001").filter(modification="Phosphorylation").values("residue__generic_number__label").annotate(unique_receptors=Count("protein__family_id", distinct=True))
rfb_panel["phos"] = {entry["residue__generic_number__label"] : entry["unique_receptors"] for entry in list(all_phos)}
all_palm = PTMs.objects.filter(protein__family__slug__startswith="001").filter(modification="Palmitoylation").values("residue__generic_number__label").annotate(unique_receptors=Count("protein__family_id", distinct=True))
rfb_panel["palm"] = {entry["residue__generic_number__label"] : entry["unique_receptors"] for entry in list(all_palm)}
all_glyc = PTMs.objects.filter(protein__family__slug__startswith="001").filter(modification__endswith="Glycosylation").values("residue__generic_number__label").annotate(unique_receptors=Count("protein__family_id", distinct=True))
rfb_panel["glyc"] = {entry["residue__generic_number__label"] : entry["unique_receptors"] for entry in list(all_glyc)}
all_ubiq = PTMs.objects.filter(protein__family__slug__startswith="001").filter(modification="Ubiquitylation").values("residue__generic_number__label").annotate(unique_receptors=Count("protein__family_id", distinct=True))
rfb_panel["ubiq"] = {entry["residue__generic_number__label"] : entry["unique_receptors"] for entry in list(all_ubiq)}
# Thermostabilizing
all_thermo = ConstructMutation.objects.filter(construct__protein__family__slug__startswith="001", effects__slug='thermostabilising')\
.values("residue__generic_number__label").annotate(unique_receptors=Count("construct__protein__family_id", distinct=True))
rfb_panel["thermo_mutations"] = {entry["residue__generic_number__label"] : entry["unique_receptors"] for entry in list(all_thermo)}
# Class A ligand mutations >5 fold effect - count unique receptors
all_ligand_mutations = MutationExperiment.objects.filter(Q(foldchange__gte = 5) | Q(foldchange__lte = -5), protein__family__slug__startswith="001")\
.values("residue__generic_number__label").annotate(unique_receptors=Count("protein__family_id", distinct=True))
rfb_panel["ligand_mutations"] = {entry["residue__generic_number__label"] : entry["unique_receptors"] for entry in list(all_ligand_mutations)}
# Class A mutations with >30% increase/decrease basal activity
all_basal_mutations = MutationExperiment.objects.filter(Q(opt_basal_activity__gte = 130) | Q(opt_basal_activity__lte = 70), protein__family__slug__startswith="001")\
.values("residue__generic_number__label").annotate(unique_receptors=Count("protein__family_id", distinct=True))
rfb_panel["basal_mutations"] = {entry["residue__generic_number__label"] : entry["unique_receptors"] for entry in list(all_basal_mutations)}
# Intrasegment contacts
all_contacts = InteractingResiduePair.objects.filter(~Q(res1__protein_segment_id = F('res2__protein_segment_id')), referenced_structure__protein_conformation__protein__family__slug__startswith="001")\
.values("res1__generic_number__label").annotate(unique_receptors=Count("referenced_structure__protein_conformation__protein__family_id", distinct=True))
rfb_panel["intrasegment_contacts"] = {entry["res1__generic_number__label"] : entry["unique_receptors"] for entry in list(all_contacts)}
# Active/Inactive contacts
all_active_contacts = InteractingResiduePair.objects.filter(~Q(res2__generic_number__label = None), ~Q(res1__generic_number__label = None),\
referenced_structure__state__slug = "active", referenced_structure__protein_conformation__protein__family__slug__startswith="001")\
.values("res1__generic_number__label", "res2__generic_number__label")
# OPTIMIZE
active_contacts = {}
for entry in list(all_active_contacts):
if entry["res1__generic_number__label"] not in active_contacts:
active_contacts[entry["res1__generic_number__label"]] = set()
active_contacts[entry["res1__generic_number__label"]].update([entry["res2__generic_number__label"]])
rfb_panel["active_contacts"] = active_contacts
all_inactive_contacts = InteractingResiduePair.objects.filter(~Q(res2__generic_number__label = None), ~Q(res1__generic_number__label = None),\
referenced_structure__state__slug = "inactive", referenced_structure__protein_conformation__protein__family__slug__startswith="001")\
.values("res1__generic_number__label", "res2__generic_number__label")
# OPTIMIZE
inactive_contacts = {}
for entry in list(all_inactive_contacts):
if entry["res1__generic_number__label"] not in inactive_contacts:
inactive_contacts[entry["res1__generic_number__label"]] = set()
inactive_contacts[entry["res1__generic_number__label"]].update([entry["res2__generic_number__label"]])
rfb_panel["inactive_contacts"] = inactive_contacts
cache.set(cache_name, rfb_panel, 3600*24*7) # cache a week
# Other rules
# structural_rule_tree = create_structural_rule_trees(STRUCTURAL_RULES)
######## CREATE REFERENCE sets (or use structural rules)
## MICROSWITCHES
ms_labels = [residue.label for residue in ResiduePositionSet.objects.get(name="State (micro-)switches").residue_position.all()]
## SODIUM POCKET
sp_labels = [residue.label for residue in ResiduePositionSet.objects.get(name="Sodium ion pocket").residue_position.all()]
## ROTAMER SWITCHES
rotamer_labels = []
for entry in STRUCTURAL_SWITCHES["A"]:
if entry["Rotamer Switch"] != "-":
rotamer_labels.append(entry["AA1 Pos"])
rotamer_labels.append(entry["AA2 Pos"])
## G PROTEIN INTERACTION POSITIONS
# gprotein_labels = [residue.label for residue in ResiduePositionSet.objects.get(name="Signalling protein pocket").residue_position.all()]
# Class A G-protein X-ray contacts
# TODO: replace with automatically generated sets from X-rays stored in database
gprotein_labels = {"1x60": {"001_006_001_001", " 001_006_001_002"},
"12x48": {"001_001_003_008", " 001_006_001_001", " 001_006_001_002", " 001_009_001_001"},
"12x49": {"001_001_003_008", " 001_006_001_002"},
"12x51": {"001_006_001_002"},
"2x37": {"001_006_001_001"},
"2x39": {"001_002_022_003"},
"2x40": {"001_006_001_001"},
"3x49": {"001_001_003_008", " 001_002_022_003"},
"3x50": {"001_001_001_002", " 001_001_003_008", " 001_002_022_003", " 001_006_001_001", " 001_006_001_002", " 001_009_001_001"},
"3x53": {"001_001_001_002", " 001_001_003_008", " 001_002_022_003", " 001_006_001_001", " 001_006_001_002"},
"3x54": {"001_001_001_002", " 001_001_003_008", " 001_002_022_003", " 001_006_001_001", " 001_006_001_002", " 001_009_001_001"},
"3x55": {"001_001_003_008", " 001_006_001_002"},
"3x56": {"001_006_001_002", " 001_009_001_001"},
"34x50": {"001_001_001_002", " 001_001_003_008", " 001_002_022_003", " 001_006_001_001", " 001_006_001_002"},
"34x51": {"001_001_001_002", " 001_001_003_008", " 001_002_022_003", " 001_006_001_001", " 001_006_001_002"},
"34x52": {"001_001_003_008", " 001_002_022_003", " 001_006_001_002"},
"34x53": {"001_001_003_008", " 001_006_001_002"},
"34x54": {"001_001_003_008", " 001_002_022_003", " 001_006_001_002"},
"34x55": {"001_001_003_008", " 001_002_022_003", " 001_006_001_002", " 001_009_001_001"},
"34x57": {"001_001_001_002", " 001_002_022_003"},
"4x40": {"001_002_022_003"},
"5x61": {"001_001_001_002", " 001_001_003_008", " 001_002_022_003", " 001_006_001_001", " 001_006_001_002"},
"5x64": {"001_001_003_008", " 001_002_022_003", " 001_006_001_002"},
"5x65": {"001_001_001_002", " 001_001_003_008", " 001_002_022_003", " 001_006_001_001", " 001_006_001_002"},
"5x67": {"001_001_003_008"},
"5x68": {"001_001_001_002", " 001_001_003_008", " 001_002_022_003", " 001_006_001_001", " 001_006_001_002"},
"5x69": {"001_001_001_002", " 001_001_003_008", " 001_006_001_001", " 001_006_001_002"},
"5x71": {"001_001_003_008", " 001_006_001_001", " 001_006_001_002"},
"5x72": {"001_001_003_008", " 001_006_001_002", " 001_009_001_001"},
"5x74": {"001_001_003_008"},
"6x23": {"001_002_022_003"},
"6x24": {"001_009_001_001"},
"6x25": {"001_002_022_003", " 001_006_001_001", " 001_009_001_001"},
"6x26": {"001_002_022_003", " 001_009_001_001"},
"6x28": {"001_009_001_001"},
"6x29": {"001_001_001_002", " 001_006_001_001", " 001_006_001_002", " 001_009_001_001"},
"6x32": {"001_001_001_002", " 001_002_022_003", " 001_006_001_001", " 001_006_001_002", " 001_009_001_001"},
"6x33": {"001_001_001_002", " 001_001_003_008", " 001_002_022_003", " 001_006_001_001", " 001_006_001_002", " 001_009_001_001"},
"6x36": {"001_001_001_002", " 001_001_003_008", " 001_002_022_003", " 001_006_001_002", " 001_009_001_001"},
"6x37": {"001_001_001_002", " 001_001_003_008", " 001_006_001_001", " 001_006_001_002"},
"7x56": {"001_001_001_002", " 001_006_001_001", " 001_006_001_002", " 001_009_001_001"},
"8x47": {"001_001_001_002", " 001_002_022_003", " 001_006_001_001", " 001_009_001_001"},
"8x48": {"001_002_022_003", " 001_006_001_002", " 001_009_001_001"},
"8x49": {"001_006_001_001", " 001_006_001_002"},
"8x51": {"001_006_001_002"},
"8x56": {"001_006_001_001"}}
# TODO: replace with automatically generated sets from X-rays stored in database
# Class A Arrestin X-ray contacts
arrestin_labels = {"12x49": {"001_009_001_001"},
"2x37": {"001_009_001_001"},
"2x38": {"001_009_001_001"},
"2x39": {"001_009_001_001"},
"2x40": {"001_009_001_001"},
"2x43": {"001_009_001_001"},
"3x50": {"001_009_001_001"},
"3x54": {"001_009_001_001"},
"3x55": {"001_009_001_001"},
"3x56": {"001_009_001_001"},
"34x50": {"001_009_001_001"},
"34x51": {"001_009_001_001"},
"34x53": {"001_009_001_001"},
"34x54": {"001_009_001_001"},
"34x55": {"001_009_001_001"},
"34x56": {"001_009_001_001"},
"4x38": {"001_009_001_001"},
"5x61": {"001_009_001_001"},
"5x64": {"001_009_001_001"},
"5x68": {"001_009_001_001"},
"5x69": {"001_009_001_001"},
"5x71": {"001_009_001_001"},
"5x72": {"001_009_001_001"},
"6x24": {"001_009_001_001"},
"6x25": {"001_009_001_001"},
"6x26": {"001_009_001_001"},
"6x28": {"001_009_001_001"},
"6x29": {"001_009_001_001"},
"6x32": {"001_009_001_001"},
"6x33": {"001_009_001_001"},
"6x36": {"001_009_001_001"},
"6x37": {"001_009_001_001"},
"6x40": {"001_009_001_001"},
"8x47": {"001_009_001_001"},
"8x48": {"001_009_001_001"},
"8x49": {"001_009_001_001"},
"8x50": {"001_009_001_001"}}
# Positions in center of membrane selected using 4BVN (ADRB1) together with OPM membrane positioning
# Reference: ['1x44', '2x52', '3x36', '4x54', '5x46', '6x48', '7x43']
mid_membrane_classA = {'TM1': 44,'TM2': 52,'TM3': 36,'TM4': 54,'TM5': 46, 'TM6': 48, 'TM7': 43}
# NOTE: We might need to split this into B1 and B2 when adhesion X-rays are published
# Positions in center of membrane selected using 5XEZ (GCGR) together with OPM membrane positioning
# Reference: ['1x51', '2x58', '3x41', '4x54', '5x45', '6x49', '7x50']
mid_membrane_classB = {'TM1': 51,'TM2': 58,'TM3': 41,'TM4': 54,'TM5': 45, 'TM6': 49, 'TM7': 50}
# Positions in center of membrane selected using 4OR2 (mGLUR1) together with OPM membrane positioning
# Reference: ['1x49', '2x48', '3x40', '4x41', '5x48', '6x48', '7.39x40']
mid_membrane_classC = {'TM1': 49,'TM2': 48,'TM3': 40,'TM4': 41,'TM5': 48, 'TM6': 48, 'TM7': 40}
# Positions in center of membrane selected using 6BD4 (FZD4) together with OPM membrane positioning
# Reference: ['1x43', '2x53', '3x38', '4x53', '5x53', '6x43', '7x47']
mid_membrane_classF = {'TM1': 43,'TM2': 53,'TM3': 38,'TM4': 53,'TM5': 53, 'TM6': 43, 'TM7': 47}
# Positions within membrane layer selected using 4BVN together with OPM membrane positioning
core_membrane_classA = {'TM1': [33, 55],'TM2': [42,65],'TM3': [23,47],'TM4': [43,64],'TM5': [36,59], 'TM6': [37,60], 'TM7': [32,54]}
# TODO: other classes
core_membrane_classB = {'TM1': [33, 55],'TM2': [42,65],'TM3': [23,47],'TM4': [43,64],'TM5': [36,59], 'TM6': [37,60], 'TM7': [32,54]}
core_membrane_classC = {'TM1': [33, 55],'TM2': [42,65],'TM3': [23,47],'TM4': [43,64],'TM5': [36,59], 'TM6': [37,60], 'TM7': [32,54]}
core_membrane_classF = {'TM1': [33, 55],'TM2': [42,65],'TM3': [23,47],'TM4': [43,64],'TM5': [36,59], 'TM6': [37,60], 'TM7': [32,54]}
# Residue oriented outward of bundle (based on inactive 4BVN and active 3SN6)
outward_orientation = {
'TM1' : [29, 30, 33, 34, 36, 37, 38, 40, 41, 44, 45, 48, 51, 52, 54, 55, 58],
'TM2' : [38, 41, 45, 48, 52, 55, 56, 58, 59, 60, 62, 63, 66],
'TM3' : [23, 24, 27, 31, 48, 51, 52, 55],
'TM4' : [40, 41, 43, 44, 47, 48, 50, 51, 52, 54, 55, 58, 59, 62, 63, 81],
'TM5' : [36, 37, 38, 40, 41, 42, 44, 45, 46, 48, 49, 52, 53, 55, 56, 57, 59, 60, 62, 63, 64, 66, 67, 68, 70, 71, 73, 74],
'TM6' : [25, 28, 29, 31, 32, 34, 35, 38, 39, 42, 43, 45, 46, 49, 50, 53, 54, 56, 57, 60],
'TM7' : [33, 34, 35, 37, 40, 41, 43, 44, 48, 51, 52, 54, 55]
}
########
# prepare context for output
context = {"signatures" : []}
index = 0
for h, segment in enumerate(rfb_panel["signatures"]["gs_positions"]["gpcrdba"]):
segment_first = True
for i, position in enumerate(rfb_panel["signatures"]["gs_positions"]["gpcrdba"][segment]):
if len(position) <= 5:
# To filter segment headers with non-GN numbering
if segment_first:
context["signatures"].append({"position" : segment})
index += 1
segment_first = False
# Add data
context["signatures"].append({})
context["signatures"][index]["segment"] = segment
context["signatures"][index]["sort"] = index
context["signatures"][index]["position"] = position
# Normalized position in TM
partial_position = int(position.split('x')[1][:2])
# RESIDUE PLACEMENT
context["signatures"][index]["membane_placement"] = "-"
context["signatures"][index]["membane_segment"] = "Extracellular"
context["signatures"][index]["residue_orientation"] = "-"
if segment in mid_membrane_classA: # TM helix
# parse position
context["signatures"][index]["membane_placement"] = partial_position - mid_membrane_classA[segment]
# negative is toward cytoplasm
if segment in ['TM1', 'TM3', 'TM5', 'TM7']: # downwards
context["signatures"][index]["membane_placement"] = -1 * context["signatures"][index]["membane_placement"]
# Segment selection
if partial_position >= core_membrane_classA[segment][0] and partial_position <= core_membrane_classA[segment][1]:
context["signatures"][index]["membane_segment"] = "Membrane"
elif segment in ['TM1', 'TM3', 'TM5', 'TM7']:
if partial_position > core_membrane_classA[segment][1]:
context["signatures"][index]["membane_segment"] = "Intracellular"
else:
if partial_position < core_membrane_classA[segment][0]:
context["signatures"][index]["membane_segment"] = "Intracellular"
# Orientation
if partial_position in outward_orientation[segment]:
context["signatures"][index]["residue_orientation"] = "Outward"
else:
if partial_position > min(outward_orientation[segment]) and partial_position < max(outward_orientation[segment]):
context["signatures"][index]["residue_orientation"] = "Inward"
# Intracellular segments
elif segment in ['ICL1', 'ICL2', 'ICL3', 'TM8', 'C-term']:
context["signatures"][index]["membane_segment"] = "Intracellular"
# COUNTS: all db results in a singe loop
for key in ["ligand_binding", "natural_mutations", "thermo_mutations", "ligand_mutations", "basal_mutations", "intrasegment_contacts", "phos", "palm", "glyc", "ubiq" ]: # Add in future "gprotein_interface", "arrestin_interface"
context["signatures"][index][key] = 0
if position in rfb_panel[key]:
context["signatures"][index][key] = rfb_panel[key][position]
# G-protein interface
context["signatures"][index]["gprotein_interface"] = 0
if position in gprotein_labels:
context["signatures"][index]["gprotein_interface"] = len(gprotein_labels[position])
# Arrestin interface
context["signatures"][index]["arrestin_interface"] = 0
if position in arrestin_labels:
context["signatures"][index]["arrestin_interface"] = len(arrestin_labels[position])
# BINARY
# Microswitch
context["signatures"][index]["microswitch"] = position in ms_labels
# Sodium pocket
context["signatures"][index]["sodium"] = position in sp_labels
# Rotamer switch
context["signatures"][index]["rotamer_switch"] = position in rotamer_labels
# contacts
context["signatures"][index]["active_contacts"] = 0
if position in rfb_panel["active_contacts"]:
if position in rfb_panel["inactive_contacts"]:
context["signatures"][index]["active_contacts"] = len(rfb_panel["active_contacts"][position].difference(rfb_panel["inactive_contacts"][position]))
else:
context["signatures"][index]["active_contacts"] = len(rfb_panel["active_contacts"][position])
context["signatures"][index]["inactive_contacts"] = 0
if position in rfb_panel["inactive_contacts"]:
if position in rfb_panel["active_contacts"]:
context["signatures"][index]["inactive_contacts"] = len(rfb_panel["inactive_contacts"][position].difference(rfb_panel["active_contacts"][position]))
else:
context["signatures"][index]["inactive_contacts"] = len(rfb_panel["inactive_contacts"][position])
# CLASS A sequence + property consensus
if position in rfb_panel["class_a_positions"]["gpcrdba"][segment]:
ca_index = list(rfb_panel["class_a_positions"]["gpcrdba"][segment]).index(position)
# Sequence consensus
context["signatures"][index]["class_a_aa"] = rfb_panel["class_a_aa"][segment][position][0]
context["signatures"][index]["class_a_aa_name"] = FULL_AMINO_ACIDS[rfb_panel["class_a_aa"][segment][position][0]]
if context["signatures"][index]["class_a_aa"] == '+':
context["signatures"][index]["class_a_aa_name"] += ": "+rfb_panel["class_a_aa"][segment][position][3]
context["signatures"][index]["class_a_aa_cons"] = rfb_panel["class_a_aa"][segment][position][2]
# Property consensus
context["signatures"][index]["class_a_symb"] = rfb_panel["class_a_prop"][segment][i][0]
context["signatures"][index]["class_a_prop"] = rfb_panel["class_a_prop"][segment][i][1]
context["signatures"][index]["class_a_prop_cons"] = rfb_panel["class_a_prop"][segment][i][2]
# SEQUENCE SIGNATURES
for signature_type in ["cah", "cal", "gs", "gio", "gq", "g12"]:
if position in rfb_panel["signatures"][signature_type + "_positions"]["gpcrdba"][segment]:
ca_index = list(rfb_panel["signatures"][signature_type + "_positions"]["gpcrdba"][segment]).index(position)
context["signatures"][index][signature_type + "_score"] = rfb_panel["signatures"][signature_type][segment][ca_index][2]
context["signatures"][index][signature_type + "_prop"] = rfb_panel["signatures"][signature_type][segment][ca_index][1]
context["signatures"][index][signature_type + "_symb"] = rfb_panel["signatures"][signature_type][segment][ca_index][0]
index += 1
# Human Class A alignment - consensus/conservation
return context
def render_residue_table_excel(request):
simple_selection = request.session.get('selection', False)
origin_checked = False
# local protein list
proteins = []
# flatten the selection into individual proteins
for target in simple_selection.targets:
if target.type == 'protein':
proteins.append(target.item)
if origin_checked == False:
signalling_data = ResidueTablesDisplay.checkOrigin(target.item)
origin_checked = True
elif target.type == 'family':
# species filter
species_list = []
for species in simple_selection.species:
species_list.append(species.item)
# annotation filter
protein_source_list = []
for protein_source in simple_selection.annotation:
protein_source_list.append(protein_source.item)
if species_list:
family_proteins = Protein.objects.filter(family__slug__startswith=target.item.slug,
species__in=(species_list),
source__in=(protein_source_list)).select_related('residue_numbering_scheme', 'species')
else:
family_proteins = Protein.objects.filter(family__slug__startswith=target.item.slug,
source__in=(protein_source_list)).select_related('residue_numbering_scheme', 'species')
for fp in family_proteins:
proteins.append(fp)
if origin_checked == False:
signalling_data = ResidueTablesDisplay.checkOrigin(fp)
origin_checked = True
longest_name = 0
species_list = {}
for protein in proteins:
if protein.species.common_name not in species_list:
if len(protein.species.common_name)>10 and len(protein.species.common_name.split())>1:
name = protein.species.common_name.split()[0][0]+". "+" ".join(protein.species.common_name.split()[1:])
if len(" ".join(protein.species.common_name.split()[1:]))>11:
name = protein.species.common_name.split()[0][0]+". "+" ".join(protein.species.common_name.split()[1:])[:8]+".."
else:
name = protein.species.common_name
species_list[protein.species.common_name] = name
else:
name = species_list[protein.species.common_name]
if len(re.sub('<[^>]*>', '', protein.name)+" "+name)>longest_name:
longest_name = len(re.sub('<[^>]*>', '', protein.name)+" "+name)
# get the selection from session
selection = Selection()
if simple_selection:
selection.importer(simple_selection)
# # extract numbering schemes and proteins
if signalling_data == 'GPCR':
numbering_schemes = [x.item for x in selection.numbering_schemes]
elif signalling_data == 'gprot':
numbering_schemes = ResidueNumberingScheme.objects.filter(slug='cgn')
elif signalling_data == 'arrestins':
numbering_schemes = ResidueNumberingScheme.objects.filter(slug='can')
# Get all the segments
if signalling_data == 'GPCR':
segments = ProteinSegment.objects.filter(proteinfamily='GPCR')
elif signalling_data == 'gprot':
segments = ProteinSegment.objects.filter(proteinfamily='Alpha')
elif signalling_data == 'arrestins':
segments = ProteinSegment.objects.filter(proteinfamily='Arrestin')
if ResidueNumberingScheme.objects.get(slug=settings.DEFAULT_NUMBERING_SCHEME) in numbering_schemes:
default_scheme = ResidueNumberingScheme.objects.get(slug=settings.DEFAULT_NUMBERING_SCHEME)
else:
default_scheme = numbering_schemes[0]
# prepare the dictionary
# each helix has a dictionary of positions
# default_generic_number or first scheme on the list is the key
# value is a dictionary of other gn positions and residues from selected proteins
data = OrderedDict()
for segment in segments:
data[segment.slug] = OrderedDict()
residues = Residue.objects.filter(protein_segment=segment, protein_conformation__protein__in=proteins, generic_number__isnull=False).prefetch_related('protein_conformation__protein', 'protein_conformation__state', 'protein_segment',
'generic_number__scheme', 'display_generic_number__scheme', 'alternative_generic_numbers__scheme')
for scheme in numbering_schemes:
if scheme == default_scheme and scheme.slug == settings.DEFAULT_NUMBERING_SCHEME:
for pos in list(set([x.generic_number.label for x in residues if x.protein_segment == segment])):
data[segment.slug][pos] = {scheme.slug : pos, 'seq' : ['-']*len(proteins)}
elif scheme == default_scheme:
for pos in list(set([x.generic_number.label for x in residues if x.protein_segment == segment])):
data[segment.slug][pos] = {scheme.slug : pos, 'seq' : ['-']*len(proteins)}
for residue in residues:
alternatives = residue.alternative_generic_numbers.all()
pos = residue.generic_number
if len(alternatives) == 0:
data[segment.slug][pos.label]['seq'][proteins.index(residue.protein_conformation.protein)] = str(residue)
for alternative in alternatives:
scheme = alternative.scheme
if default_scheme.slug == settings.DEFAULT_NUMBERING_SCHEME:
pos = residue.generic_number
if scheme == pos.scheme:
data[segment.slug][pos.label]['seq'][proteins.index(residue.protein_conformation.protein)] = str(residue)
else:
if scheme.slug not in data[segment.slug][pos.label].keys():
data[segment.slug][pos.label][scheme.slug] = alternative.label
if alternative.label not in data[segment.slug][pos.label][scheme.slug]:
data[segment.slug][pos.label][scheme.slug] += " "+alternative.label
data[segment.slug][pos.label]['seq'][proteins.index(residue.protein_conformation.protein)] = str(residue)
else:
if scheme.slug not in data[segment.slug][pos.label].keys():
data[segment.slug][pos.label][scheme.slug] = alternative.label
if alternative.label not in data[segment.slug][pos.label][scheme.slug]:
data[segment.slug][pos.label][scheme.slug] += " "+alternative.label
data[segment.slug][pos.label]['seq'][proteins.index(residue.protein_conformation.protein)] = str(residue)
# Preparing the dictionary of list of lists. Dealing with tripple nested dictionary in django templates is a nightmare
flattened_data = OrderedDict.fromkeys([x.slug for x in segments], [])
for s in iter(flattened_data):
flattened_data[s] = [[data[s][x][y.slug] if y.slug in data[s][x] else "-" for y in numbering_schemes ]+data[s][x]['seq'] for x in sorted(data[s])]
#Purging the empty segments
clean_dict = OrderedDict()
clean_segments = []
for s in iter(flattened_data):
if flattened_data[s] != []:
clean_dict[s] = flattened_data[s]
clean_segments.append(s)
segments = clean_segments
flattened_data = clean_dict
header = [x.short_name for x in numbering_schemes] + [x.name+" "+species_list[x.species.common_name] for x in proteins]
# Now excel time
outstream = BytesIO()
wb = xlsxwriter.Workbook(outstream, {'in_memory': True})
sub = wb.add_format({'font_script': 2})
bold = wb.add_format({'bold': True})
worksheet = wb.add_worksheet('Residue table')
row_offset = 0
#Header row
#Numbering schemes
for i, x in enumerate(numbering_schemes):
worksheet.write(0, i, x.short_name)
#Protein names
col_offset = len(numbering_schemes)
for i, x in enumerate(proteins):
t = html_to_rich_format_string(x.name + " " + species_list[x.species.common_name], sub)
if len(t) < 2:
worksheet.write(0, col_offset + i, html.unescape(x.name + " " + species_list[x.species.common_name]))
else:
worksheet.write_rich_string(0, col_offset + i, *t)
row_offset += 1
for segment in segments:
worksheet.write(row_offset, 0, segment, bold)
row_offset += 1
for j, data_row in enumerate(flattened_data[segment]):
worksheet.write_row(row_offset + j, 0, data_row)
row_offset += len(flattened_data[segment])
wb.close()
outstream.seek(0)
response = HttpResponse(
outstream.read(),
content_type="application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"
)
response['Content-Disposition'] = "attachment; filename=residue_table.xlsx"
return response
def html_to_rich_format_string(input_text, wb_format):
ucode_str = html.unescape(input_text)
tmp =[]
for chunk in ucode_str.split('</sub>'):
if '<sub>' not in chunk:
tmp.append(chunk)
continue
t = chunk.split('<sub>')
tmp.extend([t[0], wb_format, t[1]])
return tmp
| [
"from django.conf import settings\n",
"from django.core.cache import cache\n",
"from django.db.models import Count, F, Q\n",
"from django.shortcuts import render, redirect\n",
"from django.views.generic import TemplateView\n",
"from django.http import HttpResponse\n",
"\n",
"\n",
"from common.views import AbsTargetSelection\n",
"from common.definitions import FULL_AMINO_ACIDS, STRUCTURAL_RULES, STRUCTURAL_SWITCHES\n",
"from common.selection import Selection\n",
"Alignment = getattr(__import__(\n",
" 'common.alignment_' + settings.SITE_NAME,\n",
" fromlist=['Alignment']\n",
" ), 'Alignment')\n",
"\n",
"from seqsign.sequence_signature import SequenceSignature, SignatureMatch\n",
"\n",
"from alignment.functions import get_proteins_from_selection\n",
"from alignment.views import TargetSelectionGprotein, TargetSelectionArrestin\n",
"from construct.views import create_structural_rule_trees, ConstructMutation\n",
"from contactnetwork.models import InteractingResiduePair\n",
"from interaction.models import ResidueFragmentInteraction\n",
"from mutation.models import MutationExperiment\n",
"from mutational_landscape.models import NaturalMutations, CancerMutations, DiseaseMutations, PTMs, NHSPrescribings\n",
"from protein.models import ProteinSegment, Protein, ProteinFamily\n",
"from residue.models import Residue,ResidueNumberingScheme, ResiduePositionSet, ResidueSet\n",
"\n",
"from collections import OrderedDict\n",
"\n",
"import html\n",
"import re\n",
"import time\n",
"from io import BytesIO\n",
"import xlsxwriter\n",
"\n",
"class TargetSelection(AbsTargetSelection):\n",
" pass\n",
"\n",
"class ResidueTablesSelection(AbsTargetSelection):\n",
"\n",
" # Left panel\n",
" step = 1\n",
" number_of_steps = 2\n",
" docs = 'generic_numbering.html'\n",
"\n",
" description = 'Select receptors to index by searching or browsing in the middle column. You can select entire' \\\n",
" + ' receptor families and/or individual receptors.\\n\\nSelected receptors will appear in the right column,' \\\n",
" + ' where you can edit the list.\\n\\nSelect which numbering schemes to use in the middle column.\\n\\nOnce you' \\\n",
" + ' have selected all your receptors, click the green button.'\n",
"\n",
"\n",
" # Middle section\n",
" numbering_schemes = True\n",
"\n",
"\n",
" # Buttons\n",
" buttons = {\n",
" 'continue' : {\n",
" 'label' : 'Show residue numbers',\n",
" 'url' : '/residue/residuetabledisplay',\n",
" 'color' : 'success',\n",
" }\n",
" }\n",
"\n",
"class ResidueGprotSelection(TargetSelectionGprotein):\n",
" # Left panel\n",
" step = 1\n",
" number_of_steps = 1\n",
" docs = 'generic_numbering.html'\n",
"\n",
" description = 'Select receptors to index by searching or browsing in the middle column. You can select entire' \\\n",
" + ' receptor families and/or individual receptors.\\n\\nSelected receptors will appear in the right column,' \\\n",
" + ' where you can edit the list.\\n\\nOnce you' \\\n",
" + ' have selected all your receptors, click the green button.'\n",
"\n",
" # Buttons\n",
" buttons = {\n",
" 'continue' : {\n",
" 'label' : 'Show residue numbers',\n",
" 'url' : '/residue/residuetabledisplay',\n",
" 'color' : 'success',\n",
" }\n",
" }\n",
" try:\n",
" if ProteinFamily.objects.filter(slug=\"100_001\").exists():\n",
" ppf = ProteinFamily.objects.get(slug=\"100_001\")\n",
" pfs = ProteinFamily.objects.filter(parent=ppf.id)\n",
" ps = Protein.objects.filter(family=ppf)\n",
"\n",
" tree_indent_level = []\n",
" action = 'expand'\n",
" # remove the parent family (for all other families than the root of the tree, the parent should be shown)\n",
" del ppf\n",
" except:\n",
" pass\n",
"\n",
"class ResidueArrestinSelection(TargetSelectionArrestin):\n",
" # Left panel\n",
" step = 1\n",
" number_of_steps = 1\n",
" docs = 'generic_numbering.html'\n",
"\n",
" description = 'Select receptors to index by searching or browsing in the middle column. You can select entire' \\\n",
" + ' receptor families and/or individual receptors.\\n\\nSelected receptors will appear in the right column,' \\\n",
" + ' where you can edit the list.\\n\\nOnce you' \\\n",
" + ' have selected all your receptors, click the green button.'\n",
"\n",
"\n",
" # Buttons\n",
" buttons = {\n",
" 'continue' : {\n",
" 'label' : 'Show residue numbers',\n",
" 'url' : '/residue/residuetabledisplay',\n",
" 'color' : 'success',\n",
" }\n",
" }\n",
" try:\n",
" if ProteinFamily.objects.filter(slug=\"200_000\").exists():\n",
" ppf = ProteinFamily.objects.get(slug=\"200_000\")\n",
" pfs = ProteinFamily.objects.filter(parent=ppf.id)\n",
" ps = Protein.objects.filter(family=ppf)\n",
"\n",
" tree_indent_level = []\n",
" action = 'expand'\n",
" # remove the parent family (for all other families than the root of the tree, the parent should be shown)\n",
" del ppf\n",
" except:\n",
" pass\n",
"\n",
"class ResidueTablesDisplay(TemplateView):\n",
" \"\"\"\n",
" A class rendering the residue numbering table.\n",
" \"\"\"\n",
" template_name = 'residue_table.html'\n",
"\n",
" @staticmethod\n",
" def checkOrigin(target_item):\n",
" if str(target_item).split('_')[0].startswith('arr'):\n",
" output = 'arrestins'\n",
" elif str(target_item).split('_')[0].startswith('gna'):\n",
" output = \"gprot\"\n",
" else:\n",
" output = \"GPCR\"\n",
" return output\n",
"\n",
" def get(self, request, *args, **kwargs):\n",
" # get the user selection from session\n",
" simple_selection = self.request.session.get('selection', False)\n",
" if simple_selection == False or not simple_selection.targets :\n",
" return redirect(\"residuetableselect\")\n",
"\n",
" # check rendering\n",
" context = self.get_context_data(**kwargs)\n",
" return render(request, self.template_name, context)\n",
"\n",
" def get_context_data(self, **kwargs):\n",
" \"\"\"\n",
" Get the selection data (proteins and numbering schemes) and prepare it for display.\n",
" \"\"\"\n",
" context = super().get_context_data(**kwargs)\n",
" # get the user selection from session\n",
" simple_selection = self.request.session.get('selection', False)\n",
" origin_checked = False\n",
" # local protein list\n",
" proteins = []\n",
" # flatten the selection into individual proteins\n",
" for target in simple_selection.targets:\n",
" if target.type == 'protein':\n",
" proteins.append(target.item)\n",
" if origin_checked == False:\n",
" signalling_data = self.checkOrigin(target.item)\n",
" origin_checked = True\n",
" elif target.type == 'family':\n",
" # species filter\n",
" species_list = []\n",
" for species in simple_selection.species:\n",
" species_list.append(species.item)\n",
"\n",
" # annotation filter\n",
" protein_source_list = []\n",
" for protein_source in simple_selection.annotation:\n",
" protein_source_list.append(protein_source.item)\n",
"\n",
" if species_list:\n",
" family_proteins = Protein.objects.filter(family__slug__startswith=target.item.slug,\n",
" species__in=(species_list),\n",
" source__in=(protein_source_list)).select_related('residue_numbering_scheme', 'species')\n",
" else:\n",
" family_proteins = Protein.objects.filter(family__slug__startswith=target.item.slug,\n",
" source__in=(protein_source_list)).select_related('residue_numbering_scheme', 'species')\n",
"\n",
" for fp in family_proteins:\n",
" proteins.append(fp)\n",
" if origin_checked == False:\n",
" signalling_data = self.checkOrigin(fp)\n",
" origin_checked = True\n",
"\n",
" longest_name = 0\n",
" species_list = {}\n",
" for protein in proteins:\n",
" if protein.species.common_name not in species_list:\n",
" if len(protein.species.common_name)>10 and len(protein.species.common_name.split())>1:\n",
" name = protein.species.common_name.split()[0][0]+\". \"+\" \".join(protein.species.common_name.split()[1:])\n",
" if len(\" \".join(protein.species.common_name.split()[1:]))>11:\n",
" name = protein.species.common_name.split()[0][0]+\". \"+\" \".join(protein.species.common_name.split()[1:])[:8]+\"..\"\n",
" else:\n",
" name = protein.species.common_name\n",
" species_list[protein.species.common_name] = name\n",
" else:\n",
" name = species_list[protein.species.common_name]\n",
"\n",
" if len(re.sub('<[^>]*>', '', protein.name)+\" \"+name)>longest_name:\n",
" longest_name = len(re.sub('<[^>]*>', '', protein.name)+\" \"+name)\n",
"\n",
" # get the selection from session\n",
" selection = Selection()\n",
" if simple_selection:\n",
" selection.importer(simple_selection)\n",
" # # extract numbering schemes and proteins\n",
" if signalling_data == 'GPCR':\n",
" numbering_schemes = [x.item for x in selection.numbering_schemes]\n",
" elif signalling_data == 'gprot':\n",
" numbering_schemes = ResidueNumberingScheme.objects.filter(slug='cgn')\n",
" elif signalling_data == 'arrestins':\n",
" numbering_schemes = ResidueNumberingScheme.objects.filter(slug='can')\n",
"\n",
" # # get the helices (TMs only at first)\n",
" # segments = ProteinSegment.objects.filter(category='helix', proteinfamily='GPCR')\n",
"\n",
" # Get all the segments\n",
" if signalling_data == 'GPCR':\n",
" segments = ProteinSegment.objects.filter(proteinfamily='GPCR')\n",
" elif signalling_data == 'gprot':\n",
" segments = ProteinSegment.objects.filter(proteinfamily='Alpha')\n",
" elif signalling_data == 'arrestins':\n",
" segments = ProteinSegment.objects.filter(proteinfamily='Arrestin')\n",
"\n",
" if ResidueNumberingScheme.objects.get(slug=settings.DEFAULT_NUMBERING_SCHEME) in numbering_schemes:\n",
" default_scheme = ResidueNumberingScheme.objects.get(slug=settings.DEFAULT_NUMBERING_SCHEME)\n",
" else:\n",
" default_scheme = numbering_schemes[0]\n",
"\n",
" # prepare the dictionary\n",
" # each segment has a dictionary of positions\n",
" # default_generic_number or first scheme on the list is the key\n",
" # value is a dictionary of other gn positions and residues from selected proteins\n",
" data = OrderedDict()\n",
" for segment in segments:\n",
" data[segment.slug] = OrderedDict()\n",
" residues = Residue.objects.filter(protein_segment=segment, protein_conformation__protein__in=proteins, generic_number__isnull=False).prefetch_related('protein_conformation__protein', 'protein_conformation__state', 'protein_segment',\n",
" 'generic_number__scheme', 'display_generic_number__scheme', 'alternative_generic_numbers__scheme')\n",
" for scheme in numbering_schemes:\n",
" if scheme == default_scheme and scheme.slug == settings.DEFAULT_NUMBERING_SCHEME:\n",
" for pos in list(set([x.generic_number.label for x in residues if x.protein_segment == segment])):\n",
" data[segment.slug][pos] = {scheme.slug : pos, 'seq' : ['-']*len(proteins)}\n",
" elif scheme == default_scheme:\n",
" for pos in list(set([x.generic_number.label for x in residues if x.protein_segment == segment])):\n",
" data[segment.slug][pos] = {scheme.slug : pos, 'seq' : ['-']*len(proteins)}\n",
"\n",
" for residue in residues:\n",
" alternatives = residue.alternative_generic_numbers.all()\n",
" #probably no alternatives for GProts and Arrestins (?)\n",
" pos = residue.generic_number\n",
" if len(alternatives) == 0:\n",
" data[segment.slug][pos.label]['seq'][proteins.index(residue.protein_conformation.protein)] = str(residue)\n",
" else:\n",
" for alternative in alternatives:\n",
" scheme = alternative.scheme\n",
" if default_scheme.slug == settings.DEFAULT_NUMBERING_SCHEME:\n",
" pos = residue.generic_number\n",
" if scheme == pos.scheme:\n",
" data[segment.slug][pos.label]['seq'][proteins.index(residue.protein_conformation.protein)] = str(residue)\n",
" else:\n",
" if scheme.slug not in data[segment.slug][pos.label].keys():\n",
" data[segment.slug][pos.label][scheme.slug] = alternative.label\n",
" if alternative.label not in data[segment.slug][pos.label][scheme.slug]:\n",
" data[segment.slug][pos.label][scheme.slug] += \" \"+alternative.label\n",
" data[segment.slug][pos.label]['seq'][proteins.index(residue.protein_conformation.protein)] = str(residue)\n",
" else:\n",
" if scheme.slug not in data[segment.slug][pos.label].keys():\n",
" data[segment.slug][pos.label][scheme.slug] = alternative.label\n",
" if alternative.label not in data[segment.slug][pos.label][scheme.slug]:\n",
" data[segment.slug][pos.label][scheme.slug] += \" \"+alternative.label\n",
" data[segment.slug][pos.label]['seq'][proteins.index(residue.protein_conformation.protein)] = str(residue)\n",
"\n",
" # Preparing the dictionary of list of lists. Dealing with tripple nested dictionary in django templates is a nightmare\n",
" flattened_data = OrderedDict.fromkeys([x.slug for x in segments], [])\n",
"\n",
" for s in iter(flattened_data):\n",
" flattened_data[s] = [[data[s][x][y.slug] if y.slug in data[s][x] else '-' for y in numbering_schemes ] + data[s][x]['seq'] for x in sorted(data[s]) ]\n",
"\n",
" #Purging the empty segments\n",
" clean_dict = OrderedDict()\n",
" clean_segments = []\n",
" for s in iter(flattened_data):\n",
" if flattened_data[s] != []:\n",
" clean_dict[s] = flattened_data[s]\n",
" clean_segments.append(s)\n",
"\n",
" if signalling_data == 'GPCR':\n",
" context['header'] = zip([x.short_name for x in numbering_schemes] + [\"<b>\"+x.name\n",
" .replace(' receptor','')\n",
" .replace('-adrenoceptor','')\n",
" .replace('Olfactory','OLF')\n",
" .replace('Short-wave-sensitive', 'SWS')\n",
" .replace('Medium-wave-sensitive', 'MWS')\n",
" .replace('Long-wave-sensitive', 'LWS')\n",
" +\"</b><br /> \"+species_list[x.species.common_name] for x in proteins], [x.name for x in numbering_schemes] + [x.name for x in proteins],[x.name for x in numbering_schemes] + [x.entry_name for x in proteins])\n",
" context['col_length'] = len(proteins)+len(numbering_schemes)+1\n",
" elif signalling_data == 'gprot':\n",
" context['header'] = zip([\"Generic<br />residue<br />number\"] + [\"<b>\"+x.family.name.replace('NA','<sub>α')+\"</sub></b><br />\"+species_list[x.species.common_name]+\"\" for x in proteins],[x.name for x in numbering_schemes] + [x.name for x in proteins],[x.name for x in numbering_schemes] + [x.entry_name for x in proteins])\n",
" context['col_length'] = len(proteins)+1\n",
" elif signalling_data == 'arrestins':\n",
" context['header'] = zip([\"Generic<br />residue<br />number\"] + [\"<b>\"+x.name.replace('Beta','β')+\"</b><br />\"+species_list[x.species.common_name]+\"\" for x in proteins], [x.name for x in numbering_schemes] + [x.name for x in proteins],[x.name for x in numbering_schemes] + [x.entry_name for x in proteins])\n",
" context['col_length'] = len(proteins)+1\n",
" context['segments'] = clean_segments\n",
" context['data'] = clean_dict\n",
" context['number_of_schemes'] = len(numbering_schemes)\n",
" context['longest_name'] = {'div' : longest_name*2, 'height': longest_name*2+80}\n",
" context['signalling'] = signalling_data\n",
"\n",
" return context\n",
"\n",
"class ResidueFunctionBrowser(TemplateView):\n",
" \"\"\"\n",
" Per generic position summary of functional information\n",
" \"\"\"\n",
" template_name = 'residue_function_browser.html'\n",
"\n",
" def get_context_data (self, **kwargs):\n",
" # setup caches\n",
" cache_name = \"RFB\"\n",
" rfb_panel = cache.get(cache_name)\n",
"# rfb_panel = None\n",
" if rfb_panel == None:\n",
" rfb_panel = {}\n",
"\n",
" # Signatures\n",
" rfb_panel[\"signatures\"] = {}\n",
"\n",
" # Grab relevant segments\n",
" segments = list(ProteinSegment.objects.filter(proteinfamily='GPCR'))\n",
"\n",
" # Grab High/Low CA GPCRs (class A)\n",
" high_ca = [\"5ht2c_human\", \"acm4_human\", \"drd1_human\", \"fpr1_human\", \"ghsr_human\", \"cnr1_human\", \"aa1r_human\", \"gpr6_human\", \"gpr17_human\", \"gpr87_human\"]\n",
" low_ca = [\"agtr1_human\", \"ednrb_human\", \"gnrhr_human\", \"acthr_human\", \"v2r_human\", \"gp141_human\", \"gp182_human\"]\n",
"\n",
" # Signature High vs Low CA\n",
" high_ca_gpcrs = Protein.objects.filter(entry_name__in=high_ca).select_related('residue_numbering_scheme', 'species')\n",
" low_ca_gpcrs = Protein.objects.filter(entry_name__in=low_ca).select_related('residue_numbering_scheme', 'species')\n",
"\n",
" signature = SequenceSignature()\n",
" signature.setup_alignments(segments, high_ca_gpcrs, low_ca_gpcrs)\n",
" signature.calculate_signature()\n",
" rfb_panel[\"signatures\"][\"cah\"] = signature.signature\n",
" rfb_panel[\"signatures\"][\"cah_positions\"] = signature.common_gn\n",
"\n",
" signature = SequenceSignature()\n",
" signature.setup_alignments(segments, low_ca_gpcrs, high_ca_gpcrs)\n",
" signature.calculate_signature()\n",
" rfb_panel[\"signatures\"][\"cal\"] = signature.signature\n",
" rfb_panel[\"signatures\"][\"cal_positions\"] = signature.common_gn\n",
"\n",
" # Grab Gi/Gs/Gq/GI12 GPCR sets (class A)\n",
" human_class_a_gpcrs = Protein.objects.filter(species_id=1, sequence_type_id=1, family__slug__startswith='001').distinct().prefetch_related('proteingprotein_set', 'residue_numbering_scheme')\n",
" gs = list(human_class_a_gpcrs.filter(proteingprotein__slug=\"100_001_001\"))\n",
" gio = list(human_class_a_gpcrs.filter(proteingprotein__slug=\"100_001_002\"))\n",
" gq = list(human_class_a_gpcrs.filter(proteingprotein__slug=\"100_001_003\"))\n",
" g12 = list(human_class_a_gpcrs.filter(proteingprotein__slug=\"100_001_004\"))\n",
" all = set(gs + gio + gq + g12)\n",
"\n",
" # Create sequence signatures for the G-protein sets\n",
" for gprotein in [\"gs\", \"gio\", \"gq\", \"g12\"]:\n",
"# print(\"Processing \" + gprotein)\n",
" # Signature receptors specific for a G-protein vs all others\n",
" signature = SequenceSignature()\n",
" signature.setup_alignments(segments, locals()[gprotein], all.difference(locals()[gprotein]))\n",
" signature.calculate_signature()\n",
" rfb_panel[\"signatures\"][gprotein] = signature.signature\n",
" rfb_panel[\"signatures\"][gprotein + \"_positions\"] = signature.common_gn\n",
"\n",
" # Add class A alignment features\n",
" signature = SequenceSignature()\n",
" signature.setup_alignments(segments, human_class_a_gpcrs, [list(human_class_a_gpcrs)[0]])\n",
" signature.calculate_signature()\n",
" rfb_panel[\"class_a_positions\"] = signature.common_gn\n",
" rfb_panel[\"class_a_aa\"] = signature.aln_pos.consensus\n",
" rfb_panel[\"class_a_prop\"] = signature.features_consensus_pos\n",
"\n",
" # Add X-ray ligand contacts\n",
" # Optionally include the curation with the following filter: structure_ligand_pair__annotated=True\n",
" class_a_interactions = ResidueFragmentInteraction.objects.filter(\n",
" structure_ligand_pair__structure__protein_conformation__protein__family__slug__startswith=\"001\").exclude(interaction_type__type='hidden')\\\n",
" .values(\"rotamer__residue__generic_number__label\").annotate(unique_receptors=Count(\"rotamer__residue__protein_conformation__protein__family_id\", distinct=True))\n",
"\n",
" rfb_panel[\"ligand_binding\"] = {entry[\"rotamer__residue__generic_number__label\"] : entry[\"unique_receptors\"] for entry in list(class_a_interactions)}\n",
"\n",
" # Add genetic variations\n",
" all_nat_muts = NaturalMutations.objects.filter(protein__family__slug__startswith=\"001\").values(\"residue__generic_number__label\").annotate(unique_receptors=Count(\"protein__family_id\", distinct=True))\n",
" rfb_panel[\"natural_mutations\"] = {entry[\"residue__generic_number__label\"] : entry[\"unique_receptors\"] for entry in list(all_nat_muts)}\n",
"\n",
" # Add PTMs\n",
" all_ptms = PTMs.objects.filter(protein__family__slug__startswith=\"001\").values(\"residue__generic_number__label\").annotate(unique_receptors=Count(\"protein__family_id\", distinct=True))\n",
" rfb_panel[\"ptms\"] = {entry[\"residue__generic_number__label\"] : entry[\"unique_receptors\"] for entry in list(all_ptms)}\n",
" all_phos = PTMs.objects.filter(protein__family__slug__startswith=\"001\").filter(modification=\"Phosphorylation\").values(\"residue__generic_number__label\").annotate(unique_receptors=Count(\"protein__family_id\", distinct=True))\n",
" rfb_panel[\"phos\"] = {entry[\"residue__generic_number__label\"] : entry[\"unique_receptors\"] for entry in list(all_phos)}\n",
" all_palm = PTMs.objects.filter(protein__family__slug__startswith=\"001\").filter(modification=\"Palmitoylation\").values(\"residue__generic_number__label\").annotate(unique_receptors=Count(\"protein__family_id\", distinct=True))\n",
" rfb_panel[\"palm\"] = {entry[\"residue__generic_number__label\"] : entry[\"unique_receptors\"] for entry in list(all_palm)}\n",
" all_glyc = PTMs.objects.filter(protein__family__slug__startswith=\"001\").filter(modification__endswith=\"Glycosylation\").values(\"residue__generic_number__label\").annotate(unique_receptors=Count(\"protein__family_id\", distinct=True))\n",
" rfb_panel[\"glyc\"] = {entry[\"residue__generic_number__label\"] : entry[\"unique_receptors\"] for entry in list(all_glyc)}\n",
" all_ubiq = PTMs.objects.filter(protein__family__slug__startswith=\"001\").filter(modification=\"Ubiquitylation\").values(\"residue__generic_number__label\").annotate(unique_receptors=Count(\"protein__family_id\", distinct=True))\n",
" rfb_panel[\"ubiq\"] = {entry[\"residue__generic_number__label\"] : entry[\"unique_receptors\"] for entry in list(all_ubiq)}\n",
"\n",
" # Thermostabilizing\n",
" all_thermo = ConstructMutation.objects.filter(construct__protein__family__slug__startswith=\"001\", effects__slug='thermostabilising')\\\n",
" .values(\"residue__generic_number__label\").annotate(unique_receptors=Count(\"construct__protein__family_id\", distinct=True))\n",
" rfb_panel[\"thermo_mutations\"] = {entry[\"residue__generic_number__label\"] : entry[\"unique_receptors\"] for entry in list(all_thermo)}\n",
"\n",
"\n",
" # Class A ligand mutations >5 fold effect - count unique receptors\n",
" all_ligand_mutations = MutationExperiment.objects.filter(Q(foldchange__gte = 5) | Q(foldchange__lte = -5), protein__family__slug__startswith=\"001\")\\\n",
" .values(\"residue__generic_number__label\").annotate(unique_receptors=Count(\"protein__family_id\", distinct=True))\n",
" rfb_panel[\"ligand_mutations\"] = {entry[\"residue__generic_number__label\"] : entry[\"unique_receptors\"] for entry in list(all_ligand_mutations)}\n",
"\n",
" # Class A mutations with >30% increase/decrease basal activity\n",
" all_basal_mutations = MutationExperiment.objects.filter(Q(opt_basal_activity__gte = 130) | Q(opt_basal_activity__lte = 70), protein__family__slug__startswith=\"001\")\\\n",
" .values(\"residue__generic_number__label\").annotate(unique_receptors=Count(\"protein__family_id\", distinct=True))\n",
" rfb_panel[\"basal_mutations\"] = {entry[\"residue__generic_number__label\"] : entry[\"unique_receptors\"] for entry in list(all_basal_mutations)}\n",
"\n",
" # Intrasegment contacts\n",
" all_contacts = InteractingResiduePair.objects.filter(~Q(res1__protein_segment_id = F('res2__protein_segment_id')), referenced_structure__protein_conformation__protein__family__slug__startswith=\"001\")\\\n",
" .values(\"res1__generic_number__label\").annotate(unique_receptors=Count(\"referenced_structure__protein_conformation__protein__family_id\", distinct=True))\n",
" rfb_panel[\"intrasegment_contacts\"] = {entry[\"res1__generic_number__label\"] : entry[\"unique_receptors\"] for entry in list(all_contacts)}\n",
"\n",
"\n",
" # Active/Inactive contacts\n",
" all_active_contacts = InteractingResiduePair.objects.filter(~Q(res2__generic_number__label = None), ~Q(res1__generic_number__label = None),\\\n",
" referenced_structure__state__slug = \"active\", referenced_structure__protein_conformation__protein__family__slug__startswith=\"001\")\\\n",
" .values(\"res1__generic_number__label\", \"res2__generic_number__label\")\n",
"\n",
" # OPTIMIZE\n",
" active_contacts = {}\n",
" for entry in list(all_active_contacts):\n",
" if entry[\"res1__generic_number__label\"] not in active_contacts:\n",
" active_contacts[entry[\"res1__generic_number__label\"]] = set()\n",
" active_contacts[entry[\"res1__generic_number__label\"]].update([entry[\"res2__generic_number__label\"]])\n",
" rfb_panel[\"active_contacts\"] = active_contacts\n",
"\n",
" all_inactive_contacts = InteractingResiduePair.objects.filter(~Q(res2__generic_number__label = None), ~Q(res1__generic_number__label = None),\\\n",
" referenced_structure__state__slug = \"inactive\", referenced_structure__protein_conformation__protein__family__slug__startswith=\"001\")\\\n",
" .values(\"res1__generic_number__label\", \"res2__generic_number__label\")\n",
"\n",
" # OPTIMIZE\n",
" inactive_contacts = {}\n",
" for entry in list(all_inactive_contacts):\n",
" if entry[\"res1__generic_number__label\"] not in inactive_contacts:\n",
" inactive_contacts[entry[\"res1__generic_number__label\"]] = set()\n",
" inactive_contacts[entry[\"res1__generic_number__label\"]].update([entry[\"res2__generic_number__label\"]])\n",
" rfb_panel[\"inactive_contacts\"] = inactive_contacts\n",
"\n",
" cache.set(cache_name, rfb_panel, 3600*24*7) # cache a week\n",
"\n",
" # Other rules\n",
"# structural_rule_tree = create_structural_rule_trees(STRUCTURAL_RULES)\n",
"\n",
" ######## CREATE REFERENCE sets (or use structural rules)\n",
"\n",
" ## MICROSWITCHES\n",
" ms_labels = [residue.label for residue in ResiduePositionSet.objects.get(name=\"State (micro-)switches\").residue_position.all()]\n",
"\n",
" ## SODIUM POCKET\n",
" sp_labels = [residue.label for residue in ResiduePositionSet.objects.get(name=\"Sodium ion pocket\").residue_position.all()]\n",
"\n",
" ## ROTAMER SWITCHES\n",
" rotamer_labels = []\n",
" for entry in STRUCTURAL_SWITCHES[\"A\"]:\n",
" if entry[\"Rotamer Switch\"] != \"-\":\n",
" rotamer_labels.append(entry[\"AA1 Pos\"])\n",
" rotamer_labels.append(entry[\"AA2 Pos\"])\n",
"\n",
"\n",
" ## G PROTEIN INTERACTION POSITIONS\n",
"# gprotein_labels = [residue.label for residue in ResiduePositionSet.objects.get(name=\"Signalling protein pocket\").residue_position.all()]\n",
" # Class A G-protein X-ray contacts\n",
" # TODO: replace with automatically generated sets from X-rays stored in database\n",
" gprotein_labels = {\"1x60\": {\"001_006_001_001\", \" 001_006_001_002\"},\n",
" \"12x48\": {\"001_001_003_008\", \" 001_006_001_001\", \" 001_006_001_002\", \" 001_009_001_001\"},\n",
" \"12x49\": {\"001_001_003_008\", \" 001_006_001_002\"},\n",
" \"12x51\": {\"001_006_001_002\"},\n",
" \"2x37\": {\"001_006_001_001\"},\n",
" \"2x39\": {\"001_002_022_003\"},\n",
" \"2x40\": {\"001_006_001_001\"},\n",
" \"3x49\": {\"001_001_003_008\", \" 001_002_022_003\"},\n",
" \"3x50\": {\"001_001_001_002\", \" 001_001_003_008\", \" 001_002_022_003\", \" 001_006_001_001\", \" 001_006_001_002\", \" 001_009_001_001\"},\n",
" \"3x53\": {\"001_001_001_002\", \" 001_001_003_008\", \" 001_002_022_003\", \" 001_006_001_001\", \" 001_006_001_002\"},\n",
" \"3x54\": {\"001_001_001_002\", \" 001_001_003_008\", \" 001_002_022_003\", \" 001_006_001_001\", \" 001_006_001_002\", \" 001_009_001_001\"},\n",
" \"3x55\": {\"001_001_003_008\", \" 001_006_001_002\"},\n",
" \"3x56\": {\"001_006_001_002\", \" 001_009_001_001\"},\n",
" \"34x50\": {\"001_001_001_002\", \" 001_001_003_008\", \" 001_002_022_003\", \" 001_006_001_001\", \" 001_006_001_002\"},\n",
" \"34x51\": {\"001_001_001_002\", \" 001_001_003_008\", \" 001_002_022_003\", \" 001_006_001_001\", \" 001_006_001_002\"},\n",
" \"34x52\": {\"001_001_003_008\", \" 001_002_022_003\", \" 001_006_001_002\"},\n",
" \"34x53\": {\"001_001_003_008\", \" 001_006_001_002\"},\n",
" \"34x54\": {\"001_001_003_008\", \" 001_002_022_003\", \" 001_006_001_002\"},\n",
" \"34x55\": {\"001_001_003_008\", \" 001_002_022_003\", \" 001_006_001_002\", \" 001_009_001_001\"},\n",
" \"34x57\": {\"001_001_001_002\", \" 001_002_022_003\"},\n",
" \"4x40\": {\"001_002_022_003\"},\n",
" \"5x61\": {\"001_001_001_002\", \" 001_001_003_008\", \" 001_002_022_003\", \" 001_006_001_001\", \" 001_006_001_002\"},\n",
" \"5x64\": {\"001_001_003_008\", \" 001_002_022_003\", \" 001_006_001_002\"},\n",
" \"5x65\": {\"001_001_001_002\", \" 001_001_003_008\", \" 001_002_022_003\", \" 001_006_001_001\", \" 001_006_001_002\"},\n",
" \"5x67\": {\"001_001_003_008\"},\n",
" \"5x68\": {\"001_001_001_002\", \" 001_001_003_008\", \" 001_002_022_003\", \" 001_006_001_001\", \" 001_006_001_002\"},\n",
" \"5x69\": {\"001_001_001_002\", \" 001_001_003_008\", \" 001_006_001_001\", \" 001_006_001_002\"},\n",
" \"5x71\": {\"001_001_003_008\", \" 001_006_001_001\", \" 001_006_001_002\"},\n",
" \"5x72\": {\"001_001_003_008\", \" 001_006_001_002\", \" 001_009_001_001\"},\n",
" \"5x74\": {\"001_001_003_008\"},\n",
" \"6x23\": {\"001_002_022_003\"},\n",
" \"6x24\": {\"001_009_001_001\"},\n",
" \"6x25\": {\"001_002_022_003\", \" 001_006_001_001\", \" 001_009_001_001\"},\n",
" \"6x26\": {\"001_002_022_003\", \" 001_009_001_001\"},\n",
" \"6x28\": {\"001_009_001_001\"},\n",
" \"6x29\": {\"001_001_001_002\", \" 001_006_001_001\", \" 001_006_001_002\", \" 001_009_001_001\"},\n",
" \"6x32\": {\"001_001_001_002\", \" 001_002_022_003\", \" 001_006_001_001\", \" 001_006_001_002\", \" 001_009_001_001\"},\n",
" \"6x33\": {\"001_001_001_002\", \" 001_001_003_008\", \" 001_002_022_003\", \" 001_006_001_001\", \" 001_006_001_002\", \" 001_009_001_001\"},\n",
" \"6x36\": {\"001_001_001_002\", \" 001_001_003_008\", \" 001_002_022_003\", \" 001_006_001_002\", \" 001_009_001_001\"},\n",
" \"6x37\": {\"001_001_001_002\", \" 001_001_003_008\", \" 001_006_001_001\", \" 001_006_001_002\"},\n",
" \"7x56\": {\"001_001_001_002\", \" 001_006_001_001\", \" 001_006_001_002\", \" 001_009_001_001\"},\n",
" \"8x47\": {\"001_001_001_002\", \" 001_002_022_003\", \" 001_006_001_001\", \" 001_009_001_001\"},\n",
" \"8x48\": {\"001_002_022_003\", \" 001_006_001_002\", \" 001_009_001_001\"},\n",
" \"8x49\": {\"001_006_001_001\", \" 001_006_001_002\"},\n",
" \"8x51\": {\"001_006_001_002\"},\n",
" \"8x56\": {\"001_006_001_001\"}}\n",
"\n",
" # TODO: replace with automatically generated sets from X-rays stored in database\n",
" # Class A Arrestin X-ray contacts\n",
" arrestin_labels = {\"12x49\": {\"001_009_001_001\"},\n",
" \"2x37\": {\"001_009_001_001\"},\n",
" \"2x38\": {\"001_009_001_001\"},\n",
" \"2x39\": {\"001_009_001_001\"},\n",
" \"2x40\": {\"001_009_001_001\"},\n",
" \"2x43\": {\"001_009_001_001\"},\n",
" \"3x50\": {\"001_009_001_001\"},\n",
" \"3x54\": {\"001_009_001_001\"},\n",
" \"3x55\": {\"001_009_001_001\"},\n",
" \"3x56\": {\"001_009_001_001\"},\n",
" \"34x50\": {\"001_009_001_001\"},\n",
" \"34x51\": {\"001_009_001_001\"},\n",
" \"34x53\": {\"001_009_001_001\"},\n",
" \"34x54\": {\"001_009_001_001\"},\n",
" \"34x55\": {\"001_009_001_001\"},\n",
" \"34x56\": {\"001_009_001_001\"},\n",
" \"4x38\": {\"001_009_001_001\"},\n",
" \"5x61\": {\"001_009_001_001\"},\n",
" \"5x64\": {\"001_009_001_001\"},\n",
" \"5x68\": {\"001_009_001_001\"},\n",
" \"5x69\": {\"001_009_001_001\"},\n",
" \"5x71\": {\"001_009_001_001\"},\n",
" \"5x72\": {\"001_009_001_001\"},\n",
" \"6x24\": {\"001_009_001_001\"},\n",
" \"6x25\": {\"001_009_001_001\"},\n",
" \"6x26\": {\"001_009_001_001\"},\n",
" \"6x28\": {\"001_009_001_001\"},\n",
" \"6x29\": {\"001_009_001_001\"},\n",
" \"6x32\": {\"001_009_001_001\"},\n",
" \"6x33\": {\"001_009_001_001\"},\n",
" \"6x36\": {\"001_009_001_001\"},\n",
" \"6x37\": {\"001_009_001_001\"},\n",
" \"6x40\": {\"001_009_001_001\"},\n",
" \"8x47\": {\"001_009_001_001\"},\n",
" \"8x48\": {\"001_009_001_001\"},\n",
" \"8x49\": {\"001_009_001_001\"},\n",
" \"8x50\": {\"001_009_001_001\"}}\n",
"\n",
" # Positions in center of membrane selected using 4BVN (ADRB1) together with OPM membrane positioning\n",
" # Reference: ['1x44', '2x52', '3x36', '4x54', '5x46', '6x48', '7x43']\n",
" mid_membrane_classA = {'TM1': 44,'TM2': 52,'TM3': 36,'TM4': 54,'TM5': 46, 'TM6': 48, 'TM7': 43}\n",
"\n",
" # NOTE: We might need to split this into B1 and B2 when adhesion X-rays are published\n",
" # Positions in center of membrane selected using 5XEZ (GCGR) together with OPM membrane positioning\n",
" # Reference: ['1x51', '2x58', '3x41', '4x54', '5x45', '6x49', '7x50']\n",
" mid_membrane_classB = {'TM1': 51,'TM2': 58,'TM3': 41,'TM4': 54,'TM5': 45, 'TM6': 49, 'TM7': 50}\n",
"\n",
" # Positions in center of membrane selected using 4OR2 (mGLUR1) together with OPM membrane positioning\n",
" # Reference: ['1x49', '2x48', '3x40', '4x41', '5x48', '6x48', '7.39x40']\n",
" mid_membrane_classC = {'TM1': 49,'TM2': 48,'TM3': 40,'TM4': 41,'TM5': 48, 'TM6': 48, 'TM7': 40}\n",
"\n",
" # Positions in center of membrane selected using 6BD4 (FZD4) together with OPM membrane positioning\n",
" # Reference: ['1x43', '2x53', '3x38', '4x53', '5x53', '6x43', '7x47']\n",
" mid_membrane_classF = {'TM1': 43,'TM2': 53,'TM3': 38,'TM4': 53,'TM5': 53, 'TM6': 43, 'TM7': 47}\n",
"\n",
" # Positions within membrane layer selected using 4BVN together with OPM membrane positioning\n",
" core_membrane_classA = {'TM1': [33, 55],'TM2': [42,65],'TM3': [23,47],'TM4': [43,64],'TM5': [36,59], 'TM6': [37,60], 'TM7': [32,54]}\n",
" # TODO: other classes\n",
" core_membrane_classB = {'TM1': [33, 55],'TM2': [42,65],'TM3': [23,47],'TM4': [43,64],'TM5': [36,59], 'TM6': [37,60], 'TM7': [32,54]}\n",
" core_membrane_classC = {'TM1': [33, 55],'TM2': [42,65],'TM3': [23,47],'TM4': [43,64],'TM5': [36,59], 'TM6': [37,60], 'TM7': [32,54]}\n",
" core_membrane_classF = {'TM1': [33, 55],'TM2': [42,65],'TM3': [23,47],'TM4': [43,64],'TM5': [36,59], 'TM6': [37,60], 'TM7': [32,54]}\n",
"\n",
" # Residue oriented outward of bundle (based on inactive 4BVN and active 3SN6)\n",
" outward_orientation = {\n",
" 'TM1' : [29, 30, 33, 34, 36, 37, 38, 40, 41, 44, 45, 48, 51, 52, 54, 55, 58],\n",
" 'TM2' : [38, 41, 45, 48, 52, 55, 56, 58, 59, 60, 62, 63, 66],\n",
" 'TM3' : [23, 24, 27, 31, 48, 51, 52, 55],\n",
" 'TM4' : [40, 41, 43, 44, 47, 48, 50, 51, 52, 54, 55, 58, 59, 62, 63, 81],\n",
" 'TM5' : [36, 37, 38, 40, 41, 42, 44, 45, 46, 48, 49, 52, 53, 55, 56, 57, 59, 60, 62, 63, 64, 66, 67, 68, 70, 71, 73, 74],\n",
" 'TM6' : [25, 28, 29, 31, 32, 34, 35, 38, 39, 42, 43, 45, 46, 49, 50, 53, 54, 56, 57, 60],\n",
" 'TM7' : [33, 34, 35, 37, 40, 41, 43, 44, 48, 51, 52, 54, 55]\n",
" }\n",
"\n",
" ########\n",
"\n",
" # prepare context for output\n",
" context = {\"signatures\" : []}\n",
" index = 0\n",
" for h, segment in enumerate(rfb_panel[\"signatures\"][\"gs_positions\"][\"gpcrdba\"]):\n",
" segment_first = True\n",
" for i, position in enumerate(rfb_panel[\"signatures\"][\"gs_positions\"][\"gpcrdba\"][segment]):\n",
" if len(position) <= 5:\n",
" # To filter segment headers with non-GN numbering\n",
" if segment_first:\n",
" context[\"signatures\"].append({\"position\" : segment})\n",
" index += 1\n",
" segment_first = False\n",
"\n",
" # Add data\n",
" context[\"signatures\"].append({})\n",
" context[\"signatures\"][index][\"segment\"] = segment\n",
" context[\"signatures\"][index][\"sort\"] = index\n",
" context[\"signatures\"][index][\"position\"] = position\n",
"\n",
" # Normalized position in TM\n",
" partial_position = int(position.split('x')[1][:2])\n",
"\n",
" # RESIDUE PLACEMENT\n",
" context[\"signatures\"][index][\"membane_placement\"] = \"-\"\n",
" context[\"signatures\"][index][\"membane_segment\"] = \"Extracellular\"\n",
" context[\"signatures\"][index][\"residue_orientation\"] = \"-\"\n",
" if segment in mid_membrane_classA: # TM helix\n",
" # parse position\n",
" context[\"signatures\"][index][\"membane_placement\"] = partial_position - mid_membrane_classA[segment]\n",
"\n",
" # negative is toward cytoplasm\n",
" if segment in ['TM1', 'TM3', 'TM5', 'TM7']: # downwards\n",
" context[\"signatures\"][index][\"membane_placement\"] = -1 * context[\"signatures\"][index][\"membane_placement\"]\n",
"\n",
" # Segment selection\n",
" if partial_position >= core_membrane_classA[segment][0] and partial_position <= core_membrane_classA[segment][1]:\n",
" context[\"signatures\"][index][\"membane_segment\"] = \"Membrane\"\n",
" elif segment in ['TM1', 'TM3', 'TM5', 'TM7']:\n",
" if partial_position > core_membrane_classA[segment][1]:\n",
" context[\"signatures\"][index][\"membane_segment\"] = \"Intracellular\"\n",
" else:\n",
" if partial_position < core_membrane_classA[segment][0]:\n",
" context[\"signatures\"][index][\"membane_segment\"] = \"Intracellular\"\n",
"\n",
" # Orientation\n",
" if partial_position in outward_orientation[segment]:\n",
" context[\"signatures\"][index][\"residue_orientation\"] = \"Outward\"\n",
" else:\n",
" if partial_position > min(outward_orientation[segment]) and partial_position < max(outward_orientation[segment]):\n",
" context[\"signatures\"][index][\"residue_orientation\"] = \"Inward\"\n",
" # Intracellular segments\n",
" elif segment in ['ICL1', 'ICL2', 'ICL3', 'TM8', 'C-term']:\n",
" context[\"signatures\"][index][\"membane_segment\"] = \"Intracellular\"\n",
"\n",
" # COUNTS: all db results in a singe loop\n",
" for key in [\"ligand_binding\", \"natural_mutations\", \"thermo_mutations\", \"ligand_mutations\", \"basal_mutations\", \"intrasegment_contacts\", \"phos\", \"palm\", \"glyc\", \"ubiq\" ]: # Add in future \"gprotein_interface\", \"arrestin_interface\"\n",
" context[\"signatures\"][index][key] = 0\n",
" if position in rfb_panel[key]:\n",
" context[\"signatures\"][index][key] = rfb_panel[key][position]\n",
"\n",
" # G-protein interface\n",
" context[\"signatures\"][index][\"gprotein_interface\"] = 0\n",
" if position in gprotein_labels:\n",
" context[\"signatures\"][index][\"gprotein_interface\"] = len(gprotein_labels[position])\n",
"\n",
" # Arrestin interface\n",
" context[\"signatures\"][index][\"arrestin_interface\"] = 0\n",
" if position in arrestin_labels:\n",
" context[\"signatures\"][index][\"arrestin_interface\"] = len(arrestin_labels[position])\n",
"\n",
" # BINARY\n",
"\n",
" # Microswitch\n",
" context[\"signatures\"][index][\"microswitch\"] = position in ms_labels\n",
"\n",
" # Sodium pocket\n",
" context[\"signatures\"][index][\"sodium\"] = position in sp_labels\n",
"\n",
" # Rotamer switch\n",
" context[\"signatures\"][index][\"rotamer_switch\"] = position in rotamer_labels\n",
"\n",
" # contacts\n",
" context[\"signatures\"][index][\"active_contacts\"] = 0\n",
" if position in rfb_panel[\"active_contacts\"]:\n",
" if position in rfb_panel[\"inactive_contacts\"]:\n",
" context[\"signatures\"][index][\"active_contacts\"] = len(rfb_panel[\"active_contacts\"][position].difference(rfb_panel[\"inactive_contacts\"][position]))\n",
" else:\n",
" context[\"signatures\"][index][\"active_contacts\"] = len(rfb_panel[\"active_contacts\"][position])\n",
"\n",
" context[\"signatures\"][index][\"inactive_contacts\"] = 0\n",
" if position in rfb_panel[\"inactive_contacts\"]:\n",
" if position in rfb_panel[\"active_contacts\"]:\n",
" context[\"signatures\"][index][\"inactive_contacts\"] = len(rfb_panel[\"inactive_contacts\"][position].difference(rfb_panel[\"active_contacts\"][position]))\n",
" else:\n",
" context[\"signatures\"][index][\"inactive_contacts\"] = len(rfb_panel[\"inactive_contacts\"][position])\n",
"\n",
" # CLASS A sequence + property consensus\n",
" if position in rfb_panel[\"class_a_positions\"][\"gpcrdba\"][segment]:\n",
" ca_index = list(rfb_panel[\"class_a_positions\"][\"gpcrdba\"][segment]).index(position)\n",
"\n",
" # Sequence consensus\n",
" context[\"signatures\"][index][\"class_a_aa\"] = rfb_panel[\"class_a_aa\"][segment][position][0]\n",
" context[\"signatures\"][index][\"class_a_aa_name\"] = FULL_AMINO_ACIDS[rfb_panel[\"class_a_aa\"][segment][position][0]]\n",
" if context[\"signatures\"][index][\"class_a_aa\"] == '+':\n",
" context[\"signatures\"][index][\"class_a_aa_name\"] += \": \"+rfb_panel[\"class_a_aa\"][segment][position][3]\n",
" context[\"signatures\"][index][\"class_a_aa_cons\"] = rfb_panel[\"class_a_aa\"][segment][position][2]\n",
"\n",
" # Property consensus\n",
" context[\"signatures\"][index][\"class_a_symb\"] = rfb_panel[\"class_a_prop\"][segment][i][0]\n",
" context[\"signatures\"][index][\"class_a_prop\"] = rfb_panel[\"class_a_prop\"][segment][i][1]\n",
" context[\"signatures\"][index][\"class_a_prop_cons\"] = rfb_panel[\"class_a_prop\"][segment][i][2]\n",
"\n",
" # SEQUENCE SIGNATURES\n",
" for signature_type in [\"cah\", \"cal\", \"gs\", \"gio\", \"gq\", \"g12\"]:\n",
" if position in rfb_panel[\"signatures\"][signature_type + \"_positions\"][\"gpcrdba\"][segment]:\n",
" ca_index = list(rfb_panel[\"signatures\"][signature_type + \"_positions\"][\"gpcrdba\"][segment]).index(position)\n",
" context[\"signatures\"][index][signature_type + \"_score\"] = rfb_panel[\"signatures\"][signature_type][segment][ca_index][2]\n",
" context[\"signatures\"][index][signature_type + \"_prop\"] = rfb_panel[\"signatures\"][signature_type][segment][ca_index][1]\n",
" context[\"signatures\"][index][signature_type + \"_symb\"] = rfb_panel[\"signatures\"][signature_type][segment][ca_index][0]\n",
"\n",
" index += 1\n",
"\n",
" # Human Class A alignment - consensus/conservation\n",
" return context\n",
"\n",
"\n",
"def render_residue_table_excel(request):\n",
"\n",
" simple_selection = request.session.get('selection', False)\n",
" origin_checked = False\n",
" # local protein list\n",
" proteins = []\n",
"\n",
" # flatten the selection into individual proteins\n",
" for target in simple_selection.targets:\n",
" if target.type == 'protein':\n",
" proteins.append(target.item)\n",
" if origin_checked == False:\n",
" signalling_data = ResidueTablesDisplay.checkOrigin(target.item)\n",
" origin_checked = True\n",
" elif target.type == 'family':\n",
" # species filter\n",
" species_list = []\n",
" for species in simple_selection.species:\n",
" species_list.append(species.item)\n",
"\n",
" # annotation filter\n",
" protein_source_list = []\n",
" for protein_source in simple_selection.annotation:\n",
" protein_source_list.append(protein_source.item)\n",
"\n",
" if species_list:\n",
" family_proteins = Protein.objects.filter(family__slug__startswith=target.item.slug,\n",
" species__in=(species_list),\n",
" source__in=(protein_source_list)).select_related('residue_numbering_scheme', 'species')\n",
" else:\n",
" family_proteins = Protein.objects.filter(family__slug__startswith=target.item.slug,\n",
" source__in=(protein_source_list)).select_related('residue_numbering_scheme', 'species')\n",
"\n",
" for fp in family_proteins:\n",
" proteins.append(fp)\n",
" if origin_checked == False:\n",
" signalling_data = ResidueTablesDisplay.checkOrigin(fp)\n",
" origin_checked = True\n",
"\n",
" longest_name = 0\n",
" species_list = {}\n",
" for protein in proteins:\n",
" if protein.species.common_name not in species_list:\n",
" if len(protein.species.common_name)>10 and len(protein.species.common_name.split())>1:\n",
" name = protein.species.common_name.split()[0][0]+\". \"+\" \".join(protein.species.common_name.split()[1:])\n",
" if len(\" \".join(protein.species.common_name.split()[1:]))>11:\n",
" name = protein.species.common_name.split()[0][0]+\". \"+\" \".join(protein.species.common_name.split()[1:])[:8]+\"..\"\n",
" else:\n",
" name = protein.species.common_name\n",
" species_list[protein.species.common_name] = name\n",
" else:\n",
" name = species_list[protein.species.common_name]\n",
"\n",
" if len(re.sub('<[^>]*>', '', protein.name)+\" \"+name)>longest_name:\n",
" longest_name = len(re.sub('<[^>]*>', '', protein.name)+\" \"+name)\n",
"\n",
" # get the selection from session\n",
" selection = Selection()\n",
" if simple_selection:\n",
" selection.importer(simple_selection)\n",
" # # extract numbering schemes and proteins\n",
" if signalling_data == 'GPCR':\n",
" numbering_schemes = [x.item for x in selection.numbering_schemes]\n",
" elif signalling_data == 'gprot':\n",
" numbering_schemes = ResidueNumberingScheme.objects.filter(slug='cgn')\n",
" elif signalling_data == 'arrestins':\n",
" numbering_schemes = ResidueNumberingScheme.objects.filter(slug='can')\n",
"\n",
" # Get all the segments\n",
" if signalling_data == 'GPCR':\n",
" segments = ProteinSegment.objects.filter(proteinfamily='GPCR')\n",
" elif signalling_data == 'gprot':\n",
" segments = ProteinSegment.objects.filter(proteinfamily='Alpha')\n",
" elif signalling_data == 'arrestins':\n",
" segments = ProteinSegment.objects.filter(proteinfamily='Arrestin')\n",
"\n",
" if ResidueNumberingScheme.objects.get(slug=settings.DEFAULT_NUMBERING_SCHEME) in numbering_schemes:\n",
" default_scheme = ResidueNumberingScheme.objects.get(slug=settings.DEFAULT_NUMBERING_SCHEME)\n",
" else:\n",
" default_scheme = numbering_schemes[0]\n",
"\n",
" # prepare the dictionary\n",
" # each helix has a dictionary of positions\n",
" # default_generic_number or first scheme on the list is the key\n",
" # value is a dictionary of other gn positions and residues from selected proteins\n",
" data = OrderedDict()\n",
" for segment in segments:\n",
" data[segment.slug] = OrderedDict()\n",
" residues = Residue.objects.filter(protein_segment=segment, protein_conformation__protein__in=proteins, generic_number__isnull=False).prefetch_related('protein_conformation__protein', 'protein_conformation__state', 'protein_segment',\n",
" 'generic_number__scheme', 'display_generic_number__scheme', 'alternative_generic_numbers__scheme')\n",
" for scheme in numbering_schemes:\n",
" if scheme == default_scheme and scheme.slug == settings.DEFAULT_NUMBERING_SCHEME:\n",
" for pos in list(set([x.generic_number.label for x in residues if x.protein_segment == segment])):\n",
" data[segment.slug][pos] = {scheme.slug : pos, 'seq' : ['-']*len(proteins)}\n",
" elif scheme == default_scheme:\n",
" for pos in list(set([x.generic_number.label for x in residues if x.protein_segment == segment])):\n",
" data[segment.slug][pos] = {scheme.slug : pos, 'seq' : ['-']*len(proteins)}\n",
"\n",
" for residue in residues:\n",
" alternatives = residue.alternative_generic_numbers.all()\n",
" pos = residue.generic_number\n",
" if len(alternatives) == 0:\n",
" data[segment.slug][pos.label]['seq'][proteins.index(residue.protein_conformation.protein)] = str(residue)\n",
" for alternative in alternatives:\n",
" scheme = alternative.scheme\n",
" if default_scheme.slug == settings.DEFAULT_NUMBERING_SCHEME:\n",
" pos = residue.generic_number\n",
" if scheme == pos.scheme:\n",
" data[segment.slug][pos.label]['seq'][proteins.index(residue.protein_conformation.protein)] = str(residue)\n",
" else:\n",
" if scheme.slug not in data[segment.slug][pos.label].keys():\n",
" data[segment.slug][pos.label][scheme.slug] = alternative.label\n",
" if alternative.label not in data[segment.slug][pos.label][scheme.slug]:\n",
" data[segment.slug][pos.label][scheme.slug] += \" \"+alternative.label\n",
" data[segment.slug][pos.label]['seq'][proteins.index(residue.protein_conformation.protein)] = str(residue)\n",
" else:\n",
" if scheme.slug not in data[segment.slug][pos.label].keys():\n",
" data[segment.slug][pos.label][scheme.slug] = alternative.label\n",
" if alternative.label not in data[segment.slug][pos.label][scheme.slug]:\n",
" data[segment.slug][pos.label][scheme.slug] += \" \"+alternative.label\n",
" data[segment.slug][pos.label]['seq'][proteins.index(residue.protein_conformation.protein)] = str(residue)\n",
"\n",
" # Preparing the dictionary of list of lists. Dealing with tripple nested dictionary in django templates is a nightmare\n",
" flattened_data = OrderedDict.fromkeys([x.slug for x in segments], [])\n",
"\n",
" for s in iter(flattened_data):\n",
" flattened_data[s] = [[data[s][x][y.slug] if y.slug in data[s][x] else \"-\" for y in numbering_schemes ]+data[s][x]['seq'] for x in sorted(data[s])]\n",
" #Purging the empty segments\n",
" clean_dict = OrderedDict()\n",
" clean_segments = []\n",
" for s in iter(flattened_data):\n",
" if flattened_data[s] != []:\n",
" clean_dict[s] = flattened_data[s]\n",
" clean_segments.append(s)\n",
"\n",
" segments = clean_segments\n",
" flattened_data = clean_dict\n",
"\n",
" header = [x.short_name for x in numbering_schemes] + [x.name+\" \"+species_list[x.species.common_name] for x in proteins]\n",
"\n",
"\n",
" # Now excel time\n",
"\n",
" outstream = BytesIO()\n",
" wb = xlsxwriter.Workbook(outstream, {'in_memory': True})\n",
" sub = wb.add_format({'font_script': 2})\n",
" bold = wb.add_format({'bold': True})\n",
"\n",
" worksheet = wb.add_worksheet('Residue table')\n",
"\n",
" row_offset = 0\n",
"\n",
" #Header row\n",
"\n",
" #Numbering schemes\n",
" for i, x in enumerate(numbering_schemes):\n",
" worksheet.write(0, i, x.short_name)\n",
"\n",
" #Protein names\n",
" col_offset = len(numbering_schemes)\n",
"\n",
" for i, x in enumerate(proteins):\n",
" t = html_to_rich_format_string(x.name + \" \" + species_list[x.species.common_name], sub)\n",
" if len(t) < 2:\n",
" worksheet.write(0, col_offset + i, html.unescape(x.name + \" \" + species_list[x.species.common_name]))\n",
" else:\n",
" worksheet.write_rich_string(0, col_offset + i, *t)\n",
" row_offset += 1\n",
"\n",
" for segment in segments:\n",
" worksheet.write(row_offset, 0, segment, bold)\n",
" row_offset += 1\n",
"\n",
" for j, data_row in enumerate(flattened_data[segment]):\n",
" worksheet.write_row(row_offset + j, 0, data_row)\n",
" row_offset += len(flattened_data[segment])\n",
"\n",
"\n",
" wb.close()\n",
" outstream.seek(0)\n",
" response = HttpResponse(\n",
" outstream.read(),\n",
" content_type=\"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet\"\n",
" )\n",
" response['Content-Disposition'] = \"attachment; filename=residue_table.xlsx\"\n",
"\n",
" return response\n",
"\n",
"\n",
"def html_to_rich_format_string(input_text, wb_format):\n",
"\n",
" ucode_str = html.unescape(input_text)\n",
" tmp =[]\n",
" for chunk in ucode_str.split('</sub>'):\n",
" if '<sub>' not in chunk:\n",
" tmp.append(chunk)\n",
" continue\n",
" t = chunk.split('<sub>')\n",
" tmp.extend([t[0], wb_format, t[1]])\n",
"\n",
" return tmp\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011494252873563218,
0,
0,
0,
0,
0,
0,
0.0136986301369863,
0,
0.016666666666666666,
0.012987012987012988,
0.013157894736842105,
0.017543859649122806,
0.017241379310344827,
0.02127659574468085,
0.017391304347826087,
0.015151515151515152,
0.03333333333333333,
0,
0.027777777777777776,
0,
0.08333333333333333,
0.1,
0.08333333333333333,
0.043478260869565216,
0.05555555555555555,
0,
0.023255813953488372,
0,
0,
0.02,
0,
0,
0,
0,
0,
0,
0.008547008547008548,
0.008547008547008548,
0.008403361344537815,
0,
0,
0,
0.047619047619047616,
0,
0,
0,
0.07142857142857142,
0,
0.043478260869565216,
0.021739130434782608,
0.019230769230769232,
0.030303030303030304,
0,
0,
0,
0.018518518518518517,
0,
0,
0,
0,
0,
0.008547008547008548,
0.008547008547008548,
0,
0,
0,
0,
0,
0.043478260869565216,
0.021739130434782608,
0.019230769230769232,
0.030303030303030304,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.00847457627118644,
0,
0.08333333333333333,
0,
0,
0.017543859649122806,
0,
0,
0,
0,
0,
0.008547008547008548,
0.008547008547008548,
0,
0,
0,
0,
0.07142857142857142,
0,
0.043478260869565216,
0.021739130434782608,
0.019230769230769232,
0.030303030303030304,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.00847457627118644,
0,
0.08333333333333333,
0,
0,
0.023809523809523808,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.028169014084507043,
0,
0,
0,
0,
0,
0,
0,
0,
0.010869565217391304,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.022727272727272728,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.009615384615384616,
0.019230769230769232,
0.008928571428571428,
0,
0.009615384615384616,
0.017857142857142856,
0,
0,
0,
0.020833333333333332,
0,
0,
0,
0,
0,
0,
0,
0.028037383177570093,
0.0078125,
0.023255813953488372,
0.0070921985815602835,
0,
0,
0,
0,
0,
0,
0.024096385542168676,
0.011764705882352941,
0,
0,
0,
0,
0.04,
0,
0,
0,
0,
0.012195121951219513,
0,
0.012195121951219513,
0,
0,
0.01098901098901099,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.009259259259259259,
0.009615384615384616,
0,
0,
0,
0,
0,
0,
0.011111111111111112,
0,
0,
0,
0.004081632653061225,
0.017391304347826087,
0,
0.01020408163265306,
0.00847457627118644,
0.030303030303030304,
0,
0.00847457627118644,
0.038834951456310676,
0,
0,
0,
0.014084507042253521,
0,
0,
0.007936507936507936,
0,
0,
0,
0.011764705882352941,
0,
0,
0.007246376811594203,
0,
0.010869565217391304,
0.010101010101010102,
0.009615384615384616,
0.009615384615384616,
0.007246376811594203,
0,
0.011363636363636364,
0.010526315789473684,
0.01,
0.01,
0.007462686567164179,
0,
0.007874015748031496,
0,
0,
0,
0.018518518518518517,
0,
0.027777777777777776,
0,
0,
0,
0,
0,
0,
0,
0,
0.010638297872340425,
0.05405405405405406,
0.04878048780487805,
0.05,
0.019230769230769232,
0.018867924528301886,
0.0196078431372549,
0.01818181818181818,
0,
0,
0.011799410029498525,
0,
0,
0.009287925696594427,
0,
0,
0,
0,
0.022727272727272728,
0,
0,
0,
0,
0.022727272727272728,
0,
0,
0,
0,
0,
0.023255813953488372,
0,
0,
0,
0,
0.03333333333333333,
0,
0,
0,
0,
0,
0,
0.012345679012345678,
0,
0,
0.006024096385542169,
0.008,
0,
0,
0.007751937984496124,
0.007874015748031496,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.0049504950495049506,
0.022727272727272728,
0.011363636363636364,
0.022727272727272728,
0.011363636363636364,
0,
0,
0,
0,
0.02040816326530612,
0,
0,
0.009174311926605505,
0,
0,
0.011494252873563218,
0,
0,
0,
0.00980392156862745,
0,
0,
0,
0,
0,
0,
0.009009009009009009,
0,
0.0064516129032258064,
0.005649717514124294,
0,
0.012422360248447204,
0,
0,
0.004739336492890996,
0.013605442176870748,
0,
0,
0.005128205128205128,
0.015384615384615385,
0.004273504273504274,
0.015384615384615385,
0.004291845493562232,
0.015384615384615385,
0.004132231404958678,
0.015384615384615385,
0.004291845493562232,
0.015384615384615385,
0,
0,
0.00684931506849315,
0.013605442176870748,
0.013888888888888888,
0,
0,
0.012658227848101266,
0.031055900621118012,
0.014285714285714285,
0.012987012987012988,
0,
0,
0.028089887640449437,
0.014285714285714285,
0.013157894736842105,
0,
0,
0.014084507042253521,
0.011049723756906077,
0.013513513513513514,
0,
0,
0.02564102564102564,
0.0392156862745098,
0.02631578947368421,
0.022222222222222223,
0,
0,
0,
0,
0,
0.012195121951219513,
0.008547008547008548,
0,
0,
0.03870967741935484,
0.025974025974025976,
0.022222222222222223,
0,
0,
0,
0,
0.012195121951219513,
0.022727272727272728,
0.008403361344537815,
0,
0,
0.014084507042253521,
0,
0,
0,
0,
0.015384615384615385,
0,
0.04,
0.007352941176470588,
0,
0.04,
0.007633587786259542,
0,
0.03571428571428571,
0,
0,
0,
0,
0,
0,
0,
0.046511627906976744,
0.00684931506849315,
0,
0.011235955056179775,
0,
0.01694915254237288,
0,
0,
0,
0,
0,
0,
0.006369426751592357,
0.0072992700729927005,
0.006369426751592357,
0,
0,
0.007246376811594203,
0.007246376811594203,
0.01020408163265306,
0,
0.01020408163265306,
0.00847457627118644,
0,
0,
0.0072992700729927005,
0.010309278350515464,
0.0072992700729927005,
0,
0.0072992700729927005,
0.008547008547008548,
0.010309278350515464,
0.010309278350515464,
0,
0,
0,
0.010309278350515464,
0,
0,
0.008547008547008548,
0.0072992700729927005,
0.006369426751592357,
0.0072992700729927005,
0.008547008547008548,
0.008547008547008548,
0.008547008547008548,
0.010309278350515464,
0,
0,
0,
0,
0.011235955056179775,
0,
0,
0.017543859649122806,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.009174311926605505,
0,
0.04807692307692308,
0,
0.010638297872340425,
0.009259259259259259,
0,
0.04807692307692308,
0,
0.00909090909090909,
0.012345679012345678,
0.04807692307692308,
0,
0.009259259259259259,
0,
0.04807692307692308,
0,
0.009900990099009901,
0.07801418439716312,
0,
0.07801418439716312,
0.07801418439716312,
0.07801418439716312,
0,
0.011627906976744186,
0,
0.022222222222222223,
0.013513513513513514,
0.018518518518518517,
0.023255813953488372,
0.014925373134328358,
0.0196078431372549,
0.0136986301369863,
0,
0,
0,
0,
0,
0.02631578947368421,
0,
0.011235955056179775,
0,
0.009708737864077669,
0,
0,
0,
0.012987012987012988,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011627906976744186,
0,
0.015151515151515152,
0,
0.008064516129032258,
0,
0,
0.0125,
0.007407407407407408,
0,
0,
0.007246376811594203,
0.011235955056179775,
0,
0.011904761904761904,
0.01020408163265306,
0,
0.011904761904761904,
0.01020408163265306,
0,
0,
0,
0.010869565217391304,
0,
0.007042253521126761,
0.010526315789473684,
0,
0,
0.011111111111111112,
0,
0,
0.012096774193548387,
0,
0,
0.011235955056179775,
0,
0,
0,
0,
0.009259259259259259,
0,
0,
0,
0,
0.009259259259259259,
0,
0,
0,
0,
0.011363636363636364,
0,
0,
0.012048192771084338,
0,
0,
0.010416666666666666,
0,
0,
0,
0,
0,
0.005714285714285714,
0,
0.00819672131147541,
0,
0,
0,
0,
0.005649717514124294,
0,
0.007936507936507936,
0,
0,
0.011494252873563218,
0.009259259259259259,
0,
0,
0.008695652173913044,
0.007246376811594203,
0,
0.007692307692307693,
0.008333333333333333,
0,
0,
0.008928571428571428,
0.008928571428571428,
0.008547008547008548,
0,
0,
0.011904761904761904,
0.008695652173913044,
0.007352941176470588,
0.006756756756756757,
0.006802721088435374,
0.006802721088435374,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.025,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.01,
0.020833333333333332,
0.009259259259259259,
0,
0.01,
0.018518518518518517,
0,
0,
0,
0.022727272727272728,
0,
0,
0,
0,
0,
0,
0,
0.02912621359223301,
0.008064516129032258,
0.024390243902439025,
0.0072992700729927005,
0,
0,
0,
0,
0,
0,
0.012658227848101266,
0.012345679012345678,
0,
0,
0,
0,
0.02040816326530612,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.009615384615384616,
0.01,
0,
0,
0,
0,
0,
0,
0.011627906976744186,
0,
0,
0,
0.004149377593360996,
0.018018018018018018,
0,
0.010638297872340425,
0.008771929824561403,
0.031578947368421054,
0,
0.008771929824561403,
0.04040404040404041,
0,
0,
0,
0,
0,
0.00819672131147541,
0,
0,
0,
0,
0,
0.007692307692307693,
0,
0.011904761904761904,
0.01098901098901099,
0.010416666666666666,
0.010416666666666666,
0.007692307692307693,
0,
0,
0.011494252873563218,
0.010869565217391304,
0.010869565217391304,
0.007936507936507936,
0,
0.008130081300813009,
0,
0,
0,
0.012903225806451613,
0.03125,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.008064516129032258,
0,
0,
0.047619047619047616,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.0625,
0,
0.043478260869565216,
0,
0,
0,
0.05263157894736842,
0,
0,
0,
0.010416666666666666,
0,
0.008771929824561403,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.06666666666666667,
0,
0,
0,
0.011235955056179775,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.08333333333333333,
0,
0,
0,
0,
0,
0,
0,
0
] | 937 | 0.006236 | false |
#!/usr/bin/env python3
# Copyright (c) 2016-2018 The Machinecoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Specialized SipHash-2-4 implementations.
This implements SipHash-2-4 for 256-bit integers.
"""
def rotl64(n, b):
return n >> (64 - b) | (n & ((1 << (64 - b)) - 1)) << b
def siphash_round(v0, v1, v2, v3):
v0 = (v0 + v1) & ((1 << 64) - 1)
v1 = rotl64(v1, 13)
v1 ^= v0
v0 = rotl64(v0, 32)
v2 = (v2 + v3) & ((1 << 64) - 1)
v3 = rotl64(v3, 16)
v3 ^= v2
v0 = (v0 + v3) & ((1 << 64) - 1)
v3 = rotl64(v3, 21)
v3 ^= v0
v2 = (v2 + v1) & ((1 << 64) - 1)
v1 = rotl64(v1, 17)
v1 ^= v2
v2 = rotl64(v2, 32)
return (v0, v1, v2, v3)
def siphash256(k0, k1, h):
n0 = h & ((1 << 64) - 1)
n1 = (h >> 64) & ((1 << 64) - 1)
n2 = (h >> 128) & ((1 << 64) - 1)
n3 = (h >> 192) & ((1 << 64) - 1)
v0 = 0x736f6d6570736575 ^ k0
v1 = 0x646f72616e646f6d ^ k1
v2 = 0x6c7967656e657261 ^ k0
v3 = 0x7465646279746573 ^ k1 ^ n0
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
v0 ^= n0
v3 ^= n1
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
v0 ^= n1
v3 ^= n2
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
v0 ^= n2
v3 ^= n3
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
v0 ^= n3
v3 ^= 0x2000000000000000
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
v0 ^= 0x2000000000000000
v2 ^= 0xFF
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
return v0 ^ v1 ^ v2 ^ v3
| [
"#!/usr/bin/env python3\n",
"# Copyright (c) 2016-2018 The Machinecoin Core developers\n",
"# Distributed under the MIT software license, see the accompanying\n",
"# file COPYING or http://www.opensource.org/licenses/mit-license.php.\n",
"\"\"\"Specialized SipHash-2-4 implementations.\n",
"\n",
"This implements SipHash-2-4 for 256-bit integers.\n",
"\"\"\"\n",
"\n",
"def rotl64(n, b):\n",
" return n >> (64 - b) | (n & ((1 << (64 - b)) - 1)) << b\n",
"\n",
"def siphash_round(v0, v1, v2, v3):\n",
" v0 = (v0 + v1) & ((1 << 64) - 1)\n",
" v1 = rotl64(v1, 13)\n",
" v1 ^= v0\n",
" v0 = rotl64(v0, 32)\n",
" v2 = (v2 + v3) & ((1 << 64) - 1)\n",
" v3 = rotl64(v3, 16)\n",
" v3 ^= v2\n",
" v0 = (v0 + v3) & ((1 << 64) - 1)\n",
" v3 = rotl64(v3, 21)\n",
" v3 ^= v0\n",
" v2 = (v2 + v1) & ((1 << 64) - 1)\n",
" v1 = rotl64(v1, 17)\n",
" v1 ^= v2\n",
" v2 = rotl64(v2, 32)\n",
" return (v0, v1, v2, v3)\n",
"\n",
"def siphash256(k0, k1, h):\n",
" n0 = h & ((1 << 64) - 1)\n",
" n1 = (h >> 64) & ((1 << 64) - 1)\n",
" n2 = (h >> 128) & ((1 << 64) - 1)\n",
" n3 = (h >> 192) & ((1 << 64) - 1)\n",
" v0 = 0x736f6d6570736575 ^ k0\n",
" v1 = 0x646f72616e646f6d ^ k1\n",
" v2 = 0x6c7967656e657261 ^ k0\n",
" v3 = 0x7465646279746573 ^ k1 ^ n0\n",
" v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)\n",
" v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)\n",
" v0 ^= n0\n",
" v3 ^= n1\n",
" v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)\n",
" v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)\n",
" v0 ^= n1\n",
" v3 ^= n2\n",
" v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)\n",
" v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)\n",
" v0 ^= n2\n",
" v3 ^= n3\n",
" v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)\n",
" v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)\n",
" v0 ^= n3\n",
" v3 ^= 0x2000000000000000\n",
" v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)\n",
" v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)\n",
" v0 ^= 0x2000000000000000\n",
" v2 ^= 0xFF\n",
" v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)\n",
" v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)\n",
" v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)\n",
" v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)\n",
" return v0 ^ v1 ^ v2 ^ v3\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.05555555555555555,
0,
0,
0.02857142857142857,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.037037037037037035,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 63 | 0.001923 | false |
# Copyright (c) 2011 Sam Rushing
"""ECC secp256k1 OpenSSL wrapper.
WARNING: This module does not mlock() secrets; your private keys may end up on
disk in swap! Use with caution!
This file is modified from python-machinecoinlib.
"""
import ctypes
import ctypes.util
import hashlib
ssl = ctypes.cdll.LoadLibrary(ctypes.util.find_library ('ssl') or 'libeay32')
ssl.BN_new.restype = ctypes.c_void_p
ssl.BN_new.argtypes = []
ssl.BN_bin2bn.restype = ctypes.c_void_p
ssl.BN_bin2bn.argtypes = [ctypes.c_char_p, ctypes.c_int, ctypes.c_void_p]
ssl.BN_CTX_free.restype = None
ssl.BN_CTX_free.argtypes = [ctypes.c_void_p]
ssl.BN_CTX_new.restype = ctypes.c_void_p
ssl.BN_CTX_new.argtypes = []
ssl.ECDH_compute_key.restype = ctypes.c_int
ssl.ECDH_compute_key.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p]
ssl.ECDSA_sign.restype = ctypes.c_int
ssl.ECDSA_sign.argtypes = [ctypes.c_int, ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
ssl.ECDSA_verify.restype = ctypes.c_int
ssl.ECDSA_verify.argtypes = [ctypes.c_int, ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p]
ssl.EC_KEY_free.restype = None
ssl.EC_KEY_free.argtypes = [ctypes.c_void_p]
ssl.EC_KEY_new_by_curve_name.restype = ctypes.c_void_p
ssl.EC_KEY_new_by_curve_name.argtypes = [ctypes.c_int]
ssl.EC_KEY_get0_group.restype = ctypes.c_void_p
ssl.EC_KEY_get0_group.argtypes = [ctypes.c_void_p]
ssl.EC_KEY_get0_public_key.restype = ctypes.c_void_p
ssl.EC_KEY_get0_public_key.argtypes = [ctypes.c_void_p]
ssl.EC_KEY_set_private_key.restype = ctypes.c_int
ssl.EC_KEY_set_private_key.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
ssl.EC_KEY_set_conv_form.restype = None
ssl.EC_KEY_set_conv_form.argtypes = [ctypes.c_void_p, ctypes.c_int]
ssl.EC_KEY_set_public_key.restype = ctypes.c_int
ssl.EC_KEY_set_public_key.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
ssl.i2o_ECPublicKey.restype = ctypes.c_void_p
ssl.i2o_ECPublicKey.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
ssl.EC_POINT_new.restype = ctypes.c_void_p
ssl.EC_POINT_new.argtypes = [ctypes.c_void_p]
ssl.EC_POINT_free.restype = None
ssl.EC_POINT_free.argtypes = [ctypes.c_void_p]
ssl.EC_POINT_mul.restype = ctypes.c_int
ssl.EC_POINT_mul.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
# this specifies the curve used with ECDSA.
NID_secp256k1 = 714 # from openssl/obj_mac.h
SECP256K1_ORDER = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141
SECP256K1_ORDER_HALF = SECP256K1_ORDER // 2
# Thx to Sam Devlin for the ctypes magic 64-bit fix.
def _check_result(val, func, args):
if val == 0:
raise ValueError
else:
return ctypes.c_void_p (val)
ssl.EC_KEY_new_by_curve_name.restype = ctypes.c_void_p
ssl.EC_KEY_new_by_curve_name.errcheck = _check_result
class CECKey():
"""Wrapper around OpenSSL's EC_KEY"""
POINT_CONVERSION_COMPRESSED = 2
POINT_CONVERSION_UNCOMPRESSED = 4
def __init__(self):
self.k = ssl.EC_KEY_new_by_curve_name(NID_secp256k1)
def __del__(self):
if ssl:
ssl.EC_KEY_free(self.k)
self.k = None
def set_secretbytes(self, secret):
priv_key = ssl.BN_bin2bn(secret, 32, ssl.BN_new())
group = ssl.EC_KEY_get0_group(self.k)
pub_key = ssl.EC_POINT_new(group)
ctx = ssl.BN_CTX_new()
if not ssl.EC_POINT_mul(group, pub_key, priv_key, None, None, ctx):
raise ValueError("Could not derive public key from the supplied secret.")
ssl.EC_POINT_mul(group, pub_key, priv_key, None, None, ctx)
ssl.EC_KEY_set_private_key(self.k, priv_key)
ssl.EC_KEY_set_public_key(self.k, pub_key)
ssl.EC_POINT_free(pub_key)
ssl.BN_CTX_free(ctx)
return self.k
def set_privkey(self, key):
self.mb = ctypes.create_string_buffer(key)
return ssl.d2i_ECPrivateKey(ctypes.byref(self.k), ctypes.byref(ctypes.pointer(self.mb)), len(key))
def set_pubkey(self, key):
self.mb = ctypes.create_string_buffer(key)
return ssl.o2i_ECPublicKey(ctypes.byref(self.k), ctypes.byref(ctypes.pointer(self.mb)), len(key))
def get_privkey(self):
size = ssl.i2d_ECPrivateKey(self.k, 0)
mb_pri = ctypes.create_string_buffer(size)
ssl.i2d_ECPrivateKey(self.k, ctypes.byref(ctypes.pointer(mb_pri)))
return mb_pri.raw
def get_pubkey(self):
size = ssl.i2o_ECPublicKey(self.k, 0)
mb = ctypes.create_string_buffer(size)
ssl.i2o_ECPublicKey(self.k, ctypes.byref(ctypes.pointer(mb)))
return mb.raw
def get_raw_ecdh_key(self, other_pubkey):
ecdh_keybuffer = ctypes.create_string_buffer(32)
r = ssl.ECDH_compute_key(ctypes.pointer(ecdh_keybuffer), 32,
ssl.EC_KEY_get0_public_key(other_pubkey.k),
self.k, 0)
if r != 32:
raise Exception('CKey.get_ecdh_key(): ECDH_compute_key() failed')
return ecdh_keybuffer.raw
def get_ecdh_key(self, other_pubkey, kdf=lambda k: hashlib.sha256(k).digest()):
# FIXME: be warned it's not clear what the kdf should be as a default
r = self.get_raw_ecdh_key(other_pubkey)
return kdf(r)
def sign(self, hash, low_s = True):
# FIXME: need unit tests for below cases
if not isinstance(hash, bytes):
raise TypeError('Hash must be bytes instance; got %r' % hash.__class__)
if len(hash) != 32:
raise ValueError('Hash must be exactly 32 bytes long')
sig_size0 = ctypes.c_uint32()
sig_size0.value = ssl.ECDSA_size(self.k)
mb_sig = ctypes.create_string_buffer(sig_size0.value)
result = ssl.ECDSA_sign(0, hash, len(hash), mb_sig, ctypes.byref(sig_size0), self.k)
assert 1 == result
assert mb_sig.raw[0] == 0x30
assert mb_sig.raw[1] == sig_size0.value - 2
total_size = mb_sig.raw[1]
assert mb_sig.raw[2] == 2
r_size = mb_sig.raw[3]
assert mb_sig.raw[4 + r_size] == 2
s_size = mb_sig.raw[5 + r_size]
s_value = int.from_bytes(mb_sig.raw[6+r_size:6+r_size+s_size], byteorder='big')
if (not low_s) or s_value <= SECP256K1_ORDER_HALF:
return mb_sig.raw[:sig_size0.value]
else:
low_s_value = SECP256K1_ORDER - s_value
low_s_bytes = (low_s_value).to_bytes(33, byteorder='big')
while len(low_s_bytes) > 1 and low_s_bytes[0] == 0 and low_s_bytes[1] < 0x80:
low_s_bytes = low_s_bytes[1:]
new_s_size = len(low_s_bytes)
new_total_size_byte = (total_size + new_s_size - s_size).to_bytes(1,byteorder='big')
new_s_size_byte = (new_s_size).to_bytes(1,byteorder='big')
return b'\x30' + new_total_size_byte + mb_sig.raw[2:5+r_size] + new_s_size_byte + low_s_bytes
def verify(self, hash, sig):
"""Verify a DER signature"""
return ssl.ECDSA_verify(0, hash, len(hash), sig, len(sig), self.k) == 1
def set_compressed(self, compressed):
if compressed:
form = self.POINT_CONVERSION_COMPRESSED
else:
form = self.POINT_CONVERSION_UNCOMPRESSED
ssl.EC_KEY_set_conv_form(self.k, form)
class CPubKey(bytes):
"""An encapsulated public key
Attributes:
is_valid - Corresponds to CPubKey.IsValid()
is_fullyvalid - Corresponds to CPubKey.IsFullyValid()
is_compressed - Corresponds to CPubKey.IsCompressed()
"""
def __new__(cls, buf, _cec_key=None):
self = super(CPubKey, cls).__new__(cls, buf)
if _cec_key is None:
_cec_key = CECKey()
self._cec_key = _cec_key
self.is_fullyvalid = _cec_key.set_pubkey(self) != 0
return self
@property
def is_valid(self):
return len(self) > 0
@property
def is_compressed(self):
return len(self) == 33
def verify(self, hash, sig):
return self._cec_key.verify(hash, sig)
def __str__(self):
return repr(self)
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, super(CPubKey, self).__repr__())
| [
"# Copyright (c) 2011 Sam Rushing\n",
"\"\"\"ECC secp256k1 OpenSSL wrapper.\n",
"\n",
"WARNING: This module does not mlock() secrets; your private keys may end up on\n",
"disk in swap! Use with caution!\n",
"\n",
"This file is modified from python-machinecoinlib.\n",
"\"\"\"\n",
"\n",
"import ctypes\n",
"import ctypes.util\n",
"import hashlib\n",
"\n",
"ssl = ctypes.cdll.LoadLibrary(ctypes.util.find_library ('ssl') or 'libeay32')\n",
"\n",
"ssl.BN_new.restype = ctypes.c_void_p\n",
"ssl.BN_new.argtypes = []\n",
"\n",
"ssl.BN_bin2bn.restype = ctypes.c_void_p\n",
"ssl.BN_bin2bn.argtypes = [ctypes.c_char_p, ctypes.c_int, ctypes.c_void_p]\n",
"\n",
"ssl.BN_CTX_free.restype = None\n",
"ssl.BN_CTX_free.argtypes = [ctypes.c_void_p]\n",
"\n",
"ssl.BN_CTX_new.restype = ctypes.c_void_p\n",
"ssl.BN_CTX_new.argtypes = []\n",
"\n",
"ssl.ECDH_compute_key.restype = ctypes.c_int\n",
"ssl.ECDH_compute_key.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p]\n",
"\n",
"ssl.ECDSA_sign.restype = ctypes.c_int\n",
"ssl.ECDSA_sign.argtypes = [ctypes.c_int, ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]\n",
"\n",
"ssl.ECDSA_verify.restype = ctypes.c_int\n",
"ssl.ECDSA_verify.argtypes = [ctypes.c_int, ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p]\n",
"\n",
"ssl.EC_KEY_free.restype = None\n",
"ssl.EC_KEY_free.argtypes = [ctypes.c_void_p]\n",
"\n",
"ssl.EC_KEY_new_by_curve_name.restype = ctypes.c_void_p\n",
"ssl.EC_KEY_new_by_curve_name.argtypes = [ctypes.c_int]\n",
"\n",
"ssl.EC_KEY_get0_group.restype = ctypes.c_void_p\n",
"ssl.EC_KEY_get0_group.argtypes = [ctypes.c_void_p]\n",
"\n",
"ssl.EC_KEY_get0_public_key.restype = ctypes.c_void_p\n",
"ssl.EC_KEY_get0_public_key.argtypes = [ctypes.c_void_p]\n",
"\n",
"ssl.EC_KEY_set_private_key.restype = ctypes.c_int\n",
"ssl.EC_KEY_set_private_key.argtypes = [ctypes.c_void_p, ctypes.c_void_p]\n",
"\n",
"ssl.EC_KEY_set_conv_form.restype = None\n",
"ssl.EC_KEY_set_conv_form.argtypes = [ctypes.c_void_p, ctypes.c_int]\n",
"\n",
"ssl.EC_KEY_set_public_key.restype = ctypes.c_int\n",
"ssl.EC_KEY_set_public_key.argtypes = [ctypes.c_void_p, ctypes.c_void_p]\n",
"\n",
"ssl.i2o_ECPublicKey.restype = ctypes.c_void_p\n",
"ssl.i2o_ECPublicKey.argtypes = [ctypes.c_void_p, ctypes.c_void_p]\n",
"\n",
"ssl.EC_POINT_new.restype = ctypes.c_void_p\n",
"ssl.EC_POINT_new.argtypes = [ctypes.c_void_p]\n",
"\n",
"ssl.EC_POINT_free.restype = None\n",
"ssl.EC_POINT_free.argtypes = [ctypes.c_void_p]\n",
"\n",
"ssl.EC_POINT_mul.restype = ctypes.c_int\n",
"ssl.EC_POINT_mul.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]\n",
"\n",
"# this specifies the curve used with ECDSA.\n",
"NID_secp256k1 = 714 # from openssl/obj_mac.h\n",
"\n",
"SECP256K1_ORDER = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141\n",
"SECP256K1_ORDER_HALF = SECP256K1_ORDER // 2\n",
"\n",
"# Thx to Sam Devlin for the ctypes magic 64-bit fix.\n",
"def _check_result(val, func, args):\n",
" if val == 0:\n",
" raise ValueError\n",
" else:\n",
" return ctypes.c_void_p (val)\n",
"\n",
"ssl.EC_KEY_new_by_curve_name.restype = ctypes.c_void_p\n",
"ssl.EC_KEY_new_by_curve_name.errcheck = _check_result\n",
"\n",
"class CECKey():\n",
" \"\"\"Wrapper around OpenSSL's EC_KEY\"\"\"\n",
"\n",
" POINT_CONVERSION_COMPRESSED = 2\n",
" POINT_CONVERSION_UNCOMPRESSED = 4\n",
"\n",
" def __init__(self):\n",
" self.k = ssl.EC_KEY_new_by_curve_name(NID_secp256k1)\n",
"\n",
" def __del__(self):\n",
" if ssl:\n",
" ssl.EC_KEY_free(self.k)\n",
" self.k = None\n",
"\n",
" def set_secretbytes(self, secret):\n",
" priv_key = ssl.BN_bin2bn(secret, 32, ssl.BN_new())\n",
" group = ssl.EC_KEY_get0_group(self.k)\n",
" pub_key = ssl.EC_POINT_new(group)\n",
" ctx = ssl.BN_CTX_new()\n",
" if not ssl.EC_POINT_mul(group, pub_key, priv_key, None, None, ctx):\n",
" raise ValueError(\"Could not derive public key from the supplied secret.\")\n",
" ssl.EC_POINT_mul(group, pub_key, priv_key, None, None, ctx)\n",
" ssl.EC_KEY_set_private_key(self.k, priv_key)\n",
" ssl.EC_KEY_set_public_key(self.k, pub_key)\n",
" ssl.EC_POINT_free(pub_key)\n",
" ssl.BN_CTX_free(ctx)\n",
" return self.k\n",
"\n",
" def set_privkey(self, key):\n",
" self.mb = ctypes.create_string_buffer(key)\n",
" return ssl.d2i_ECPrivateKey(ctypes.byref(self.k), ctypes.byref(ctypes.pointer(self.mb)), len(key))\n",
"\n",
" def set_pubkey(self, key):\n",
" self.mb = ctypes.create_string_buffer(key)\n",
" return ssl.o2i_ECPublicKey(ctypes.byref(self.k), ctypes.byref(ctypes.pointer(self.mb)), len(key))\n",
"\n",
" def get_privkey(self):\n",
" size = ssl.i2d_ECPrivateKey(self.k, 0)\n",
" mb_pri = ctypes.create_string_buffer(size)\n",
" ssl.i2d_ECPrivateKey(self.k, ctypes.byref(ctypes.pointer(mb_pri)))\n",
" return mb_pri.raw\n",
"\n",
" def get_pubkey(self):\n",
" size = ssl.i2o_ECPublicKey(self.k, 0)\n",
" mb = ctypes.create_string_buffer(size)\n",
" ssl.i2o_ECPublicKey(self.k, ctypes.byref(ctypes.pointer(mb)))\n",
" return mb.raw\n",
"\n",
" def get_raw_ecdh_key(self, other_pubkey):\n",
" ecdh_keybuffer = ctypes.create_string_buffer(32)\n",
" r = ssl.ECDH_compute_key(ctypes.pointer(ecdh_keybuffer), 32,\n",
" ssl.EC_KEY_get0_public_key(other_pubkey.k),\n",
" self.k, 0)\n",
" if r != 32:\n",
" raise Exception('CKey.get_ecdh_key(): ECDH_compute_key() failed')\n",
" return ecdh_keybuffer.raw\n",
"\n",
" def get_ecdh_key(self, other_pubkey, kdf=lambda k: hashlib.sha256(k).digest()):\n",
" # FIXME: be warned it's not clear what the kdf should be as a default\n",
" r = self.get_raw_ecdh_key(other_pubkey)\n",
" return kdf(r)\n",
"\n",
" def sign(self, hash, low_s = True):\n",
" # FIXME: need unit tests for below cases\n",
" if not isinstance(hash, bytes):\n",
" raise TypeError('Hash must be bytes instance; got %r' % hash.__class__)\n",
" if len(hash) != 32:\n",
" raise ValueError('Hash must be exactly 32 bytes long')\n",
"\n",
" sig_size0 = ctypes.c_uint32()\n",
" sig_size0.value = ssl.ECDSA_size(self.k)\n",
" mb_sig = ctypes.create_string_buffer(sig_size0.value)\n",
" result = ssl.ECDSA_sign(0, hash, len(hash), mb_sig, ctypes.byref(sig_size0), self.k)\n",
" assert 1 == result\n",
" assert mb_sig.raw[0] == 0x30\n",
" assert mb_sig.raw[1] == sig_size0.value - 2\n",
" total_size = mb_sig.raw[1]\n",
" assert mb_sig.raw[2] == 2\n",
" r_size = mb_sig.raw[3]\n",
" assert mb_sig.raw[4 + r_size] == 2\n",
" s_size = mb_sig.raw[5 + r_size]\n",
" s_value = int.from_bytes(mb_sig.raw[6+r_size:6+r_size+s_size], byteorder='big')\n",
" if (not low_s) or s_value <= SECP256K1_ORDER_HALF:\n",
" return mb_sig.raw[:sig_size0.value]\n",
" else:\n",
" low_s_value = SECP256K1_ORDER - s_value\n",
" low_s_bytes = (low_s_value).to_bytes(33, byteorder='big')\n",
" while len(low_s_bytes) > 1 and low_s_bytes[0] == 0 and low_s_bytes[1] < 0x80:\n",
" low_s_bytes = low_s_bytes[1:]\n",
" new_s_size = len(low_s_bytes)\n",
" new_total_size_byte = (total_size + new_s_size - s_size).to_bytes(1,byteorder='big')\n",
" new_s_size_byte = (new_s_size).to_bytes(1,byteorder='big')\n",
" return b'\\x30' + new_total_size_byte + mb_sig.raw[2:5+r_size] + new_s_size_byte + low_s_bytes\n",
"\n",
" def verify(self, hash, sig):\n",
" \"\"\"Verify a DER signature\"\"\"\n",
" return ssl.ECDSA_verify(0, hash, len(hash), sig, len(sig), self.k) == 1\n",
"\n",
" def set_compressed(self, compressed):\n",
" if compressed:\n",
" form = self.POINT_CONVERSION_COMPRESSED\n",
" else:\n",
" form = self.POINT_CONVERSION_UNCOMPRESSED\n",
" ssl.EC_KEY_set_conv_form(self.k, form)\n",
"\n",
"\n",
"class CPubKey(bytes):\n",
" \"\"\"An encapsulated public key\n",
"\n",
" Attributes:\n",
"\n",
" is_valid - Corresponds to CPubKey.IsValid()\n",
" is_fullyvalid - Corresponds to CPubKey.IsFullyValid()\n",
" is_compressed - Corresponds to CPubKey.IsCompressed()\n",
" \"\"\"\n",
"\n",
" def __new__(cls, buf, _cec_key=None):\n",
" self = super(CPubKey, cls).__new__(cls, buf)\n",
" if _cec_key is None:\n",
" _cec_key = CECKey()\n",
" self._cec_key = _cec_key\n",
" self.is_fullyvalid = _cec_key.set_pubkey(self) != 0\n",
" return self\n",
"\n",
" @property\n",
" def is_valid(self):\n",
" return len(self) > 0\n",
"\n",
" @property\n",
" def is_compressed(self):\n",
" return len(self) == 33\n",
"\n",
" def verify(self, hash, sig):\n",
" return self._cec_key.verify(hash, sig)\n",
"\n",
" def __str__(self):\n",
" return repr(self)\n",
"\n",
" def __repr__(self):\n",
" return '%s(%s)' % (self.__class__.__name__, super(CPubKey, self).__repr__())\n",
"\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.01282051282051282,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.01020408163265306,
0,
0,
0.008130081300813009,
0,
0,
0.00819672131147541,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.007633587786259542,
0,
0,
0.022222222222222223,
0,
0.011764705882352941,
0,
0,
0,
0.027777777777777776,
0,
0,
0,
0.02702702702702703,
0,
0.01818181818181818,
0,
0,
0.0625,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011627906976744186,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.009345794392523364,
0,
0,
0,
0.009433962264150943,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011904761904761904,
0,
0,
0,
0,
0.05,
0,
0,
0.011904761904761904,
0,
0,
0,
0,
0,
0,
0.010752688172043012,
0,
0,
0,
0,
0,
0,
0,
0,
0.011363636363636364,
0,
0,
0,
0,
0,
0.011111111111111112,
0,
0,
0.020618556701030927,
0.014084507042253521,
0.009433962264150943,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011764705882352941,
1
] | 226 | 0.006238 | false |
#
#------------------------------------------------------------------------------
# Copyright (c) 2013-2014, Christian Therien
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#------------------------------------------------------------------------------
#
# data_format.py - This file is part of the PySptools package.
#
"""
convert2d, convert3d, normalize functions
"""
from __future__ import division
import numpy as np
def convert2d(M):
"""
Converts a 3D data cube (m x n x p) to a 2D matrix of points
where N = m*n.
Parameters:
M: `numpy array`
A HSI cube (m x n x p).
Returns: `numpy array`
2D data matrix (N x p)
"""
if M.ndim != 3:
raise RuntimeError('in formating.convert2d, M have {0} dimension(s), expected 3 dimensions'.format(M.ndim))
h, w, numBands = M.shape
return np.reshape(M, (w*h, numBands))
def convert3d(M, h, w, sigLast=True):
"""
Converts a 1D (N) or 2D matrix (p x N) or (N x p) to a 3D
data cube (m x n x p) where N = m * n
Parameters:
N: `numpy array`
1D (N) or 2D data matrix (p x N) or (N x p)
h: `integer`
Height axis length (or y axis) of the cube.
w: `integer`
Width axis length (or x axis) of the cube.
siglast: `True [default False]`
Determine if input N is (p x N) or (N x p).
Returns: `numpy array`
A 3D data cube (m x n x p)
"""
if M.ndim > 2:
raise RuntimeError('in formating.convert2d, M have {0} dimension(s), expected 1 or 2 dimensions'.format(M.ndim))
N = np.array(M)
if sigLast == False:
if N.ndim == 1:
return np.reshape(N, (h, w), order='F')
else:
numBands, n = N.shape
return np.reshape(N.transpose(), (h, w, numBands), order='F')
if sigLast == True:
if N.ndim == 1:
return np.reshape(N, (h, w))
else:
numBands, n = N.shape
return np.reshape(N.transpose(), (h, w, numBands), order='F')
def normalize(M):
"""
Normalizes M to be in range [0, 1].
Parameters:
M: `numpy array`
1D, 2D or 3D data.
Returns: `numpy array`
Normalized data.
"""
minVal = np.min(M)
maxVal = np.max(M)
Mn = M - minVal;
if maxVal == minVal:
return np.zeros(M.shape);
else:
return Mn / (maxVal-minVal)
| [
"#\r\n",
"#------------------------------------------------------------------------------\r\n",
"# Copyright (c) 2013-2014, Christian Therien\r\n",
"#\r\n",
"# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n",
"# you may not use this file except in compliance with the License.\r\n",
"# You may obtain a copy of the License at\r\n",
"#\r\n",
"# http://www.apache.org/licenses/LICENSE-2.0\r\n",
"#\r\n",
"# Unless required by applicable law or agreed to in writing, software\r\n",
"# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n",
"# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n",
"# See the License for the specific language governing permissions and\r\n",
"# limitations under the License.\r\n",
"#------------------------------------------------------------------------------\r\n",
"#\r\n",
"# data_format.py - This file is part of the PySptools package.\r\n",
"#\r\n",
"\r\n",
"\"\"\"\r\n",
"convert2d, convert3d, normalize functions\r\n",
"\"\"\"\r\n",
"\r\n",
"from __future__ import division\r\n",
"\r\n",
"import numpy as np\r\n",
"\r\n",
"\r\n",
"def convert2d(M):\r\n",
" \"\"\"\r\n",
" Converts a 3D data cube (m x n x p) to a 2D matrix of points\r\n",
" where N = m*n.\r\n",
"\r\n",
" Parameters:\r\n",
" M: `numpy array`\r\n",
" A HSI cube (m x n x p).\r\n",
"\r\n",
" Returns: `numpy array`\r\n",
" 2D data matrix (N x p)\r\n",
" \"\"\"\r\n",
"\r\n",
" if M.ndim != 3:\r\n",
" raise RuntimeError('in formating.convert2d, M have {0} dimension(s), expected 3 dimensions'.format(M.ndim))\r\n",
"\r\n",
" h, w, numBands = M.shape\r\n",
"\r\n",
" return np.reshape(M, (w*h, numBands))\r\n",
"\r\n",
"\r\n",
"def convert3d(M, h, w, sigLast=True):\r\n",
" \"\"\"\r\n",
" Converts a 1D (N) or 2D matrix (p x N) or (N x p) to a 3D\r\n",
" data cube (m x n x p) where N = m * n\r\n",
"\r\n",
" Parameters:\r\n",
" N: `numpy array`\r\n",
" 1D (N) or 2D data matrix (p x N) or (N x p)\r\n",
"\r\n",
" h: `integer`\r\n",
" Height axis length (or y axis) of the cube.\r\n",
"\r\n",
" w: `integer`\r\n",
" Width axis length (or x axis) of the cube.\r\n",
"\r\n",
" siglast: `True [default False]`\r\n",
" Determine if input N is (p x N) or (N x p).\r\n",
"\r\n",
" Returns: `numpy array`\r\n",
" A 3D data cube (m x n x p)\r\n",
" \"\"\"\r\n",
"\r\n",
" if M.ndim > 2:\r\n",
" raise RuntimeError('in formating.convert2d, M have {0} dimension(s), expected 1 or 2 dimensions'.format(M.ndim))\r\n",
"\r\n",
" N = np.array(M)\r\n",
"\r\n",
" if sigLast == False:\r\n",
" if N.ndim == 1:\r\n",
" return np.reshape(N, (h, w), order='F')\r\n",
" else:\r\n",
" numBands, n = N.shape\r\n",
" return np.reshape(N.transpose(), (h, w, numBands), order='F')\r\n",
"\r\n",
" if sigLast == True:\r\n",
" if N.ndim == 1:\r\n",
" return np.reshape(N, (h, w))\r\n",
" else:\r\n",
" numBands, n = N.shape\r\n",
" return np.reshape(N.transpose(), (h, w, numBands), order='F')\r\n",
"\r\n",
"\r\n",
"def normalize(M):\r\n",
" \"\"\"\r\n",
" Normalizes M to be in range [0, 1].\r\n",
"\r\n",
" Parameters:\r\n",
" M: `numpy array`\r\n",
" 1D, 2D or 3D data.\r\n",
"\r\n",
" Returns: `numpy array`\r\n",
" Normalized data.\r\n",
" \"\"\"\r\n",
"\r\n",
" minVal = np.min(M)\r\n",
" maxVal = np.max(M)\r\n",
"\r\n",
" Mn = M - minVal;\r\n",
"\r\n",
" if maxVal == minVal:\r\n",
" return np.zeros(M.shape);\r\n",
" else:\r\n",
" return Mn / (maxVal-minVal)\r\n"
] | [
0,
0.012345679012345678,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012345679012345678,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.00847457627118644,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.008130081300813009,
0,
0,
0,
0.038461538461538464,
0,
0,
0,
0,
0,
0,
0.04,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.045454545454545456,
0,
0,
0.02857142857142857,
0,
0
] | 113 | 0.001715 | false |
#coding:utf-8
""" FUDAN XUANKE justmao945@gmail.com """
import sys
import urllib
import urllib2
import cookielib
import Image
import cStringIO
import data
def _u(s):
"""
This function is a stupid one, but it really works!
convert 's' to utf8
"""
if not s: return s
s = s.strip()
if not s: return s
for c in ('utf8','gbk','big5','jp','kr'):
try: return s.decode(c).encode('gbk')
except: pass
return s
def init():
""" Init urllib2 support cookie and Data image """
_cookie = urllib2.HTTPCookieProcessor(cookielib.CookieJar())
urllib2.install_opener(urllib2.build_opener(_cookie, urllib2.HTTPHandler))
_data_image = Image.open('data.bmp')
if not _data_image:
sys.stderr.write('Can not open file: data.bmp\n');
return False
_data_images = []
for i in range(0, 80, 8):
_data_images.append(tuple(_data_image.crop((i,0,i+8,16)).getdata()))
return _data_images
def get_rand(data_images, img):
"""
Require PIL.
image: crop to 4 blocks, position: ( 4,4)->(12,20); (18,4)->(26,20); (32,4)->(40,20); (46,4)->(54,20);
data_image: crop to 10 blocks, Positon from (0,0) to (80,16)
"""
_image = Image.open(cStringIO.StringIO(img.read()))
_black = (0,0,0)
_res = [-1,-1,-1,-1]
for i in range(4, 47, 14):
_image_block = tuple(_image.crop((i,4,i+8,20)).getdata())
for j, data_image in enumerate(data_images):
_tmp = -1
for k in range(0, 8*16):
if data_image[k] == _black:
if _tmp < 0:
_tmp = k
elif _image_block[k] != _image_block[_tmp]:
break;
else: ## find 'j' is the number
_res[(i-4)/14] = j
return ''.join(str(i) for i in _res)
def login(data_images, user, passwd):
_XK_LOGIN='http://xk.fudan.edu.cn/xk/loginServlet'
_XK_LOGIN_IMAGE='http://xk.fudan.edu.cn/xk/image.do'
if not user:
sys.stderr.write('ERROR:user is empty!\n');
if not passwd:
sys.stderr.write('ERROR:passwd is empty!\n');
_img = urllib2.urlopen(_XK_LOGIN_IMAGE)
_postdata = urllib.urlencode({
'studentId':user,
'password':passwd,
'rand':get_rand(data_images, _img),
'Submit2':'Submit'
})
_req = urllib2.Request(url=_XK_LOGIN, data=_postdata)
_res = urllib2.urlopen(_req).read()
return _res.find('input.jsp')
def generate_course(data_images, selectionId):
_XK_SELECT='http://xk.fudan.edu.cn/xk/doSelectServlet'
_XK_SELECT_IMAGE='http://xk.fudan.edu.cn/xk/image.do'
_img = urllib2.urlopen(_XK_SELECT_IMAGE)
_postdata = urllib.urlencode({
'selectionId':selectionId,
'xklb':'ss',
'rand':get_rand(data_images, _img),
})
_req = urllib2.Request(url=_XK_SELECT, data=_postdata)
_res = urllib2.urlopen(_req).read(4096)
return _res
### main
if __name__ == '__main__':
data_images = init()
if not data_images:
sys.exit(1)
sys.stdout.write('Logining the system...Please wait a minute\n')
if login(data_images, data.USER, data.PASSWD) == -1:
sys.stderr.write('Can not login the system...\nPlease check your ID and Password in data.py\n')
sys.exit(1)
cut_msg = lambda msg: _u(msg[msg.find('(\"')+2 : msg.find('\")')].replace('\\n','\n'))
if type(data.COURSES) == str:
msg = generate_course(data_images, data.COURSES)
sys.stdout.write('Course: %s --> %s\n' % (data.COURSES, cut_msg(msg)) )
elif type(data.COURSES) == tuple:
for c in data.COURSES:
c = c.strip()
if c:
msg = generate_course(data_images, c)
sys.stdout.write('Course: %s --> %s\n' % (c, cut_msg(msg)) )
else:
sys.stderr.write('COURSES format wrong in data.py\n')
sys.exit(1)
sys.exit(0)
| [
"#coding:utf-8\r\n",
"\r\n",
"\"\"\" FUDAN XUANKE justmao945@gmail.com \"\"\"\r\n",
"\r\n",
"import sys\r\n",
"import urllib\r\n",
"import urllib2\r\n",
"import cookielib\r\n",
"import Image\r\n",
"import cStringIO\r\n",
"import data\r\n",
"\r\n",
"\r\n",
"def _u(s):\r\n",
" \"\"\"\r\n",
" This function is a stupid one, but it really works!\r\n",
" convert 's' to utf8\r\n",
" \"\"\"\r\n",
" if not s: return s\r\n",
" s = s.strip()\r\n",
" if not s: return s\r\n",
" for c in ('utf8','gbk','big5','jp','kr'):\r\n",
" try: return s.decode(c).encode('gbk')\r\n",
" except: pass\r\n",
" return s\r\n",
"\r\n",
"def init():\r\n",
" \"\"\" Init urllib2 support cookie and Data image \"\"\"\r\n",
" _cookie = urllib2.HTTPCookieProcessor(cookielib.CookieJar())\r\n",
" urllib2.install_opener(urllib2.build_opener(_cookie, urllib2.HTTPHandler))\r\n",
" \r\n",
" _data_image = Image.open('data.bmp')\r\n",
" if not _data_image:\r\n",
" sys.stderr.write('Can not open file: data.bmp\\n');\r\n",
" return False\r\n",
" _data_images = []\r\n",
" for i in range(0, 80, 8):\r\n",
" _data_images.append(tuple(_data_image.crop((i,0,i+8,16)).getdata()))\r\n",
" return _data_images\r\n",
"\r\n",
"\r\n",
"\r\n",
"def get_rand(data_images, img):\r\n",
" \"\"\" \r\n",
" Require PIL. \r\n",
" image: crop to 4 blocks, position: ( 4,4)->(12,20); (18,4)->(26,20); (32,4)->(40,20); (46,4)->(54,20);\r\n",
" data_image: crop to 10 blocks, Positon from (0,0) to (80,16)\r\n",
" \"\"\"\r\n",
" _image = Image.open(cStringIO.StringIO(img.read()))\r\n",
" _black = (0,0,0)\r\n",
" _res = [-1,-1,-1,-1]\r\n",
" for i in range(4, 47, 14):\r\n",
" _image_block = tuple(_image.crop((i,4,i+8,20)).getdata())\r\n",
" for j, data_image in enumerate(data_images):\r\n",
" _tmp = -1\r\n",
" for k in range(0, 8*16):\r\n",
" if data_image[k] == _black:\r\n",
" if _tmp < 0:\r\n",
" _tmp = k\r\n",
" elif _image_block[k] != _image_block[_tmp]:\r\n",
" break;\r\n",
" else: ## find 'j' is the number\r\n",
" _res[(i-4)/14] = j\r\n",
" \r\n",
" return ''.join(str(i) for i in _res)\r\n",
" \r\n",
"\r\n",
"def login(data_images, user, passwd):\r\n",
" _XK_LOGIN='http://xk.fudan.edu.cn/xk/loginServlet'\r\n",
" _XK_LOGIN_IMAGE='http://xk.fudan.edu.cn/xk/image.do'\r\n",
"\r\n",
" if not user:\r\n",
" sys.stderr.write('ERROR:user is empty!\\n');\r\n",
" if not passwd:\r\n",
" sys.stderr.write('ERROR:passwd is empty!\\n');\r\n",
"\r\n",
" _img = urllib2.urlopen(_XK_LOGIN_IMAGE)\r\n",
" \r\n",
" _postdata = urllib.urlencode({\r\n",
" 'studentId':user,\r\n",
" 'password':passwd,\r\n",
" 'rand':get_rand(data_images, _img),\r\n",
" 'Submit2':'Submit'\r\n",
" })\r\n",
" \r\n",
" _req = urllib2.Request(url=_XK_LOGIN, data=_postdata)\r\n",
" _res = urllib2.urlopen(_req).read()\r\n",
" return _res.find('input.jsp')\r\n",
"\r\n",
"\r\n",
"def generate_course(data_images, selectionId):\r\n",
" _XK_SELECT='http://xk.fudan.edu.cn/xk/doSelectServlet'\r\n",
" _XK_SELECT_IMAGE='http://xk.fudan.edu.cn/xk/image.do'\r\n",
" _img = urllib2.urlopen(_XK_SELECT_IMAGE)\r\n",
" \r\n",
" _postdata = urllib.urlencode({\r\n",
" 'selectionId':selectionId,\r\n",
" 'xklb':'ss',\r\n",
" 'rand':get_rand(data_images, _img),\r\n",
" })\r\n",
" _req = urllib2.Request(url=_XK_SELECT, data=_postdata)\r\n",
" _res = urllib2.urlopen(_req).read(4096)\r\n",
" return _res\r\n",
"\r\n",
"\r\n",
"### main\r\n",
"if __name__ == '__main__':\r\n",
" data_images = init()\r\n",
" if not data_images:\r\n",
" sys.exit(1)\r\n",
" \r\n",
" sys.stdout.write('Logining the system...Please wait a minute\\n')\r\n",
" if login(data_images, data.USER, data.PASSWD) == -1:\r\n",
" sys.stderr.write('Can not login the system...\\nPlease check your ID and Password in data.py\\n')\r\n",
" sys.exit(1)\r\n",
" \r\n",
" cut_msg = lambda msg: _u(msg[msg.find('(\\\"')+2 : msg.find('\\\")')].replace('\\\\n','\\n'))\r\n",
" if type(data.COURSES) == str:\r\n",
" msg = generate_course(data_images, data.COURSES)\r\n",
" \r\n",
" sys.stdout.write('Course: %s --> %s\\n' % (data.COURSES, cut_msg(msg)) )\r\n",
" elif type(data.COURSES) == tuple:\r\n",
" for c in data.COURSES:\r\n",
" c = c.strip()\r\n",
" if c:\r\n",
" msg = generate_course(data_images, c)\r\n",
" sys.stdout.write('Course: %s --> %s\\n' % (c, cut_msg(msg)) )\r\n",
" else:\r\n",
" sys.stderr.write('COURSES format wrong in data.py\\n')\r\n",
" sys.exit(1)\r\n",
"\r\n",
" sys.exit(0)\r\n",
"\r\n",
"\r\n"
] | [
0.06666666666666667,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.14285714285714285,
0,
0,
0,
0.09090909090909091,
0.058823529411764705,
0.09090909090909091,
0.1111111111111111,
0.023255813953488372,
0.1111111111111111,
0.08333333333333333,
0,
0.07692307692307693,
0.01694915254237288,
0.015625,
0.01282051282051282,
0.25,
0.025,
0.043478260869565216,
0.017857142857142856,
0,
0.047619047619047616,
0.034482758620689655,
0.04054054054054054,
0.043478260869565216,
0,
0,
0,
0.030303030303030304,
0.25,
0.058823529411764705,
0.009433962264150943,
0,
0,
0.01818181818181818,
0.15,
0.16666666666666666,
0.03333333333333333,
0.047619047619047616,
0,
0.058823529411764705,
0.03125,
0,
0.041666666666666664,
0,
0.01818181818181818,
0.05,
0.07692307692307693,
0,
0.16666666666666666,
0.025,
0.1,
0,
0,
0.037037037037037035,
0.03571428571428571,
0,
0.0625,
0.02040816326530612,
0.05555555555555555,
0.0196078431372549,
0,
0.023255813953488372,
0.25,
0.029411764705882353,
0.043478260869565216,
0.041666666666666664,
0.024390243902439025,
0.041666666666666664,
0,
0.25,
0.017543859649122806,
0.02564102564102564,
0.030303030303030304,
0,
0,
0,
0.034482758620689655,
0.03508771929824561,
0.022727272727272728,
0.25,
0.029411764705882353,
0.03125,
0.05555555555555555,
0.024390243902439025,
0,
0.017241379310344827,
0.023255813953488372,
0.06666666666666667,
0,
0,
0.1,
0,
0.041666666666666664,
0.043478260869565216,
0,
0.25,
0.014705882352941176,
0.017857142857142856,
0.009900990099009901,
0,
0.25,
0.05555555555555555,
0.030303030303030304,
0.01818181818181818,
0.14285714285714285,
0.02564102564102564,
0.02702702702702703,
0,
0.047619047619047616,
0.07692307692307693,
0,
0.014285714285714285,
0.1111111111111111,
0,
0,
0,
0.06666666666666667,
0,
0.5
] | 134 | 0.046109 | false |
#!python3
# -*- coding:utf-8 -*-
import os
import sys
import time
import ctypes
import shutil
import subprocess
IsPy3 = sys.version_info[0] >= 3
if IsPy3:
import winreg
else:
import codecs
import _winreg as winreg
BuildType = 'Release'
IsRebuild = True
Build = 'Rebuild'
Update = False
Copy = False
CleanAll = False
BuildTimeout = 30*60
MSBuild = None
IncrediBuild = None
UseMSBuild = True #默认用MSBuild编译,如果为False则用IncrediBuild编译
#不同项目只需修改下面5个变量
SlnFile = '../Cjson_lib.sln' #相对于本py脚本路径的相对路径
UpdateDir = [] #相对于本py脚本路径的相对路径,填空不更新
ExecBatList = [] #相对于本py脚本路径的相对路径,编译前调用的脚本,可填空,执行bat会先cd到bat目录再执行
MSBuildFirstProjects = [r'Cjson_lib'] #使用MSBuild需要工程文件在解决方案sln中的路径
# MSBuild首先编译的项目,填空不指定顺序
IncrediBuildFirstProjects = ['Cjson_lib'] #使用IncrediBuild只需工程名字
#IncrediBuild首先编译的项目,填空不指定顺序
class ConsoleColor():
'''This class defines the values of color for printing on console window'''
Black = 0
DarkBlue = 1
DarkGreen = 2
DarkCyan = 3
DarkRed = 4
DarkMagenta = 5
DarkYellow = 6
Gray = 7
DarkGray = 8
Blue = 9
Green = 10
Cyan = 11
Red = 12
Magenta = 13
Yellow = 14
White = 15
class Coord(ctypes.Structure):
_fields_ = [('X', ctypes.c_short), ('Y', ctypes.c_short)]
class SmallRect(ctypes.Structure):
_fields_ = [('Left', ctypes.c_short),
('Top', ctypes.c_short),
('Right', ctypes.c_short),
('Bottom', ctypes.c_short),
]
class ConsoleScreenBufferInfo(ctypes.Structure):
_fields_ = [('dwSize', Coord),
('dwCursorPosition', Coord),
('wAttributes', ctypes.c_uint),
('srWindow', SmallRect),
('dwMaximumWindowSize', Coord),
]
class Win32API():
'''Some native methods for python calling'''
StdOutputHandle = -11
ConsoleOutputHandle = None
DefaultColor = None
@staticmethod
def SetConsoleColor(color):
'''Change the text color on console window'''
if not Win32API.DefaultColor:
if not Win32API.ConsoleOutputHandle:
Win32API.ConsoleOutputHandle = ctypes.windll.kernel32.GetStdHandle(Win32API.StdOutputHandle)
bufferInfo = ConsoleScreenBufferInfo()
ctypes.windll.kernel32.GetConsoleScreenBufferInfo(Win32API.ConsoleOutputHandle, ctypes.byref(bufferInfo))
Win32API.DefaultColor = int(bufferInfo.wAttributes & 0xFF)
if IsPy3:
sys.stdout.flush() # need flush stdout in python 3
ctypes.windll.kernel32.SetConsoleTextAttribute(Win32API.ConsoleOutputHandle, color)
@staticmethod
def ResetConsoleColor():
'''Reset the default text color on console window'''
if IsPy3:
sys.stdout.flush() # need flush stdout in python 3
ctypes.windll.kernel32.SetConsoleTextAttribute(Win32API.ConsoleOutputHandle, Win32API.DefaultColor)
class Logger():
LogFile = '@AutomationLog.txt'
LineSep = '\n'
@staticmethod
def Write(log, consoleColor = -1, writeToFile = True, printToStdout = True):
'''
consoleColor: value in class ConsoleColor, such as ConsoleColor.DarkGreen
if consoleColor == -1, use default color
'''
if printToStdout:
isValidColor = (consoleColor >= ConsoleColor.Black and consoleColor <= ConsoleColor.White)
if isValidColor:
Win32API.SetConsoleColor(consoleColor)
try:
sys.stdout.write(log)
except UnicodeError as e:
Win32API.SetConsoleColor(ConsoleColor.Red)
isValidColor = True
sys.stdout.write(str(type(e)) + ' can\'t print the log!\n')
if isValidColor:
Win32API.ResetConsoleColor()
if not writeToFile:
return
if IsPy3:
logFile = open(Logger.LogFile, 'a+', encoding = 'utf-8')
else:
logFile = codecs.open(Logger.LogFile, 'a+', 'utf-8')
try:
logFile.write(log)
# logFile.flush() # need flush in python 3, otherwise log won't be saved
except Exception as ex:
logFile.close()
sys.stdout.write('can not write log with exception: {0} {1}'.format(type(ex), ex))
@staticmethod
def WriteLine(log, consoleColor = -1, writeToFile = True, printToStdout = True):
'''
consoleColor: value in class ConsoleColor, such as ConsoleColor.DarkGreen
if consoleColor == -1, use default color
'''
Logger.Write(log + Logger.LineSep, consoleColor, writeToFile, printToStdout)
@staticmethod
def Log(log, consoleColor = -1, writeToFile = True, printToStdout = True):
'''
consoleColor: value in class ConsoleColor, such as ConsoleColor.DarkGreen
if consoleColor == -1, use default color
'''
t = time.localtime()
log = '{0}-{1:02}-{2:02} {3:02}:{4:02}:{5:02} - {6}{7}'.format(t.tm_year, t.tm_mon, t.tm_mday,
t.tm_hour, t.tm_min, t.tm_sec, log, Logger.LineSep)
Logger.Write(log, consoleColor, writeToFile, printToStdout)
@staticmethod
def DeleteLog():
if os.path.exists(Logger.LogFile):
os.remove(Logger.LogFile)
def GetMSBuildPath():
cmd = 'call "%VS120COMNTOOLS%..\\..\\VC\\vcvarsall.bat" x86\nwhere msbuild'
ftemp = open('GetMSBuildPath.bat', 'wt')
ftemp.write(cmd)
ftemp.close()
p = subprocess.Popen('GetMSBuildPath.bat', stdout = subprocess.PIPE)
p.wait()
lines = p.stdout.read().decode().splitlines()
os.remove('GetMSBuildPath.bat')
for line in lines:
if 'MSBuild.exe' in line:
return line
def GetIncrediBuildPath():
try:
key=winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r'SOFTWARE\Classes\IncrediBuild.MonitorFile\shell\open\command')
value, typeId = winreg.QueryValueEx(key, '')
if value:
start = value.find('"')
end = value.find('"', start + 1)
path = value[start+1:end]
buildConsole = os.path.join(os.path.dirname(path), 'BuildConsole.exe')
return buildConsole
except FileNotFoundError as e:
Logger.WriteLine('can not find IncrediBuild', ConsoleColor.Red)
def UpdateCode():
# put git to path first
if not shutil.which('git.exe'):
Logger.Log('找不到git.exe. 请确认安装git时将git\bin目录路径加入到环境变量path中!!!\n, 跳过更新代码!!!', ConsoleColor.Yellow)
return false
oldDir = os.getcwd()
for dir in UpdateDir:
os.chdir(dir)
ret = os.system('git pull')
os.chdir(oldDir)
if ret != 0:
Logger.Log('update {0} failed'.format(dir), ConsoleColor.Yellow)
return false
return True
def BuildProject(cmd):
for i in range(6):
Logger.WriteLine(cmd, ConsoleColor.Cyan)
buildFailed = True
startTime = time.time()
p = subprocess.Popen(cmd) #IncrediBuild不能使用stdout=subprocess.PIPE,否则会导致p.wait()不返回,可能是IncrediBuild的bug
if IsPy3:
try:
buildFailed = p.wait(BuildTimeout)
except subprocess.TimeoutExpired as e:
Logger.Log('{0}'.format(e), ConsoleColor.Yellow)
p.kill()
else:
buildFailed = p.wait()
if not UseMSBuild:
#IncrediBuild的返回值不能说明编译是否成功,需要提取输出判断
fin = open('IncrediBuild.log')
for line in fin:
if line.startswith('=========='):
Logger.Write(line, ConsoleColor.Cyan, writeToFile = True if IsPy3 else False)
if IsPy3:
start = line.find('失败') + 3 #========== 生成: 成功 1 个,失败 0 个,最新 0 个,跳过 0 个 ==========
else:#为了兼容py2做的特殊处理,很恶心
start = 0
n2 = 0
while 1:
if line[start].isdigit():
n2 += 1
if n2 == 2:
break
start = line.find(' ', start)
start += 1
end = line.find(' ', start)
failCount = int(line[start:end])
buildFailed = failCount > 0
else:
Logger.Write(line, ConsoleColor.Red, writeToFile = True if IsPy3 else False, printToStdout = True if ' error ' in line else False)
fin.close()
costTime = time.time() - startTime
Logger.WriteLine('build cost time: {0:.1f}s\n'.format(costTime), ConsoleColor.Green)
if not buildFailed:
return True
return False
def BuildAllProjects():
buildSuccess = False
cmds = []
if UseMSBuild:
if IsRebuild:
if CleanAll:
cmds.append('{0} {1} /t:Clean /p:Configuration={2} /nologo /maxcpucount /filelogger /consoleloggerparameters:ErrorsOnly'.format(MSBuild, SlnFile, 'Debug'))
cmds.append('{0} {1} /t:Clean /p:Configuration={2} /nologo /maxcpucount /filelogger /consoleloggerparameters:ErrorsOnly'.format(MSBuild, SlnFile, 'Release'))
else:
cmds.append('{0} {1} /t:Clean /p:Configuration={2} /nologo /maxcpucount /filelogger /consoleloggerparameters:ErrorsOnly'.format(MSBuild, SlnFile, BuildType))
for project in MSBuildFirstProjects:
cmds.append('{0} {1} /t:{2} /p:Configuration={3} /nologo /maxcpucount /filelogger /consoleloggerparameters:ErrorsOnly'.format(MSBuild, SlnFile, project, BuildType))
cmds.append('{0} {1} /p:Configuration={2} /nologo /maxcpucount /filelogger /consoleloggerparameters:ErrorsOnly'.format(MSBuild, SlnFile, BuildType))
else: #IncrediBuild
if IsRebuild:
if CleanAll:
cmds.append('"{0}" {1} /clean /cfg="{2}|Win32" /nologo /out=IncrediBuild.log'.format(IncrediBuild, SlnFile, 'Debug'))
cmds.append('"{0}" {1} /clean /cfg="{2}|Win32" /nologo /out=IncrediBuild.log'.format(IncrediBuild, SlnFile, 'Release'))
else:
cmds.append('"{0}" {1} /clean /cfg="{2}|Win32" /nologo /out=IncrediBuild.log'.format(IncrediBuild, SlnFile, BuildType))
for project in IncrediBuildFirstProjects:
cmds.append('"{0}" {1} /build /prj={2} /cfg="{3}|Win32" /nologo /out=IncrediBuild.log'.format(IncrediBuild, SlnFile, project, BuildType))
cmds.append('"{0}" {1} /build /cfg="{2}|Win32" /nologo /out=IncrediBuild.log'.format(IncrediBuild, SlnFile, BuildType))
for cmd in cmds:
buildSuccess = BuildProject(cmd)
if not buildSuccess:
break
return buildSuccess
def main():
if UseMSBuild:
if not os.path.exists(MSBuild):
Logger.Log('can not find msbuild.exe', ConsoleColor.Red)
return 1
else:
if not os.path.exists(IncrediBuild):
Logger.Log('can not find msbuild.exe', ConsoleColor.Red)
return 1
dir = os.path.dirname(__file__)
if dir:
oldDir = os.getcwd()
os.chdir(dir)
if Update:
if not UpdateCode():
return 1
Logger.Log('git update succeed', ConsoleColor.Green)
if Copy:
for bat in ExecBatList:
oldBatDir = os.getcwd()
batDir = os.path.dirname(bat)
batName = os.path.basename(bat)
if batDir:
os.chdir(batDir)
start = time.clock()
os.system(batName)
Logger.Log('run "{}" cost {:.1f} seconds'.format(batName, time.clock() - start), ConsoleColor.Green)
if batDir:
os.chdir(oldBatDir)
buildSuccess = BuildAllProjects()
if buildSuccess:
Logger.Log('build succeed', ConsoleColor.Green)
else:
Logger.Log('build failed', ConsoleColor.Red)
if dir:
os.chdir(oldDir)
return 0 if buildSuccess else 1
if __name__ == '__main__':
Logger.Log('run with argv ' + str(sys.argv), ConsoleColor.Green)
sys.argv = [x.lower() for x in sys.argv]
start_time = time.time()
if 'debug' in sys.argv:
BuildType = 'Debug'
if 'build' in sys.argv:
IsRebuild = False
Build = 'Build'
if 'update' in sys.argv:
Update = True
if 'copy' in sys.argv:
Copy = True
if 'clean' in sys.argv:
CleanAll = True
if 'incredibuild' in sys.argv:
UseMSBuild = False
if UseMSBuild:
MSBuild = GetMSBuildPath()
if not MSBuild:
Logger.Log('can not find MSBuild.exe', ConsoleColor.Red)
exit(1)
else:
IncrediBuild = GetIncrediBuildPath()
if not IncrediBuild:
Logger.Log('can not find BuildConsole.exe', ConsoleColor.Red)
exit(1)
cwd = os.getcwd()
Logger.WriteLine('current dir is: {0}, {1}: {2}'.format(cwd, Build, BuildType))
ret = main()
end_time = time.time()
cost_time = end_time-start_time
Logger.WriteLine('all build cost time: {0:.2f} seconds'.format(cost_time), ConsoleColor.Green)
exit(ret) | [
"#!python3\r\n",
"# -*- coding:utf-8 -*-\r\n",
"import os\r\n",
"import sys\r\n",
"import time\r\n",
"import ctypes\r\n",
"import shutil\r\n",
"import subprocess\r\n",
"IsPy3 = sys.version_info[0] >= 3\r\n",
"if IsPy3:\r\n",
" import winreg\r\n",
"else:\r\n",
" import codecs\r\n",
" import _winreg as winreg\r\n",
"\r\n",
"BuildType = 'Release'\r\n",
"IsRebuild = True\r\n",
"Build = 'Rebuild'\r\n",
"Update = False\r\n",
"Copy = False\r\n",
"CleanAll = False\r\n",
"BuildTimeout = 30*60\r\n",
"MSBuild = None\r\n",
"IncrediBuild = None\r\n",
"UseMSBuild = True #默认用MSBuild编译,如果为False则用IncrediBuild编译\r\n",
"\r\n",
"#不同项目只需修改下面5个变量\r\n",
"SlnFile = '../Cjson_lib.sln' #相对于本py脚本路径的相对路径\r\n",
"UpdateDir = [] #相对于本py脚本路径的相对路径,填空不更新\r\n",
"ExecBatList = [] #相对于本py脚本路径的相对路径,编译前调用的脚本,可填空,执行bat会先cd到bat目录再执行\r\n",
"MSBuildFirstProjects = [r'Cjson_lib'] #使用MSBuild需要工程文件在解决方案sln中的路径\r\n",
" # MSBuild首先编译的项目,填空不指定顺序\r\n",
"IncrediBuildFirstProjects = ['Cjson_lib'] #使用IncrediBuild只需工程名字\r\n",
" #IncrediBuild首先编译的项目,填空不指定顺序\r\n",
"\r\n",
"class ConsoleColor():\r\n",
" '''This class defines the values of color for printing on console window'''\r\n",
" Black = 0\r\n",
" DarkBlue = 1\r\n",
" DarkGreen = 2\r\n",
" DarkCyan = 3\r\n",
" DarkRed = 4\r\n",
" DarkMagenta = 5\r\n",
" DarkYellow = 6\r\n",
" Gray = 7\r\n",
" DarkGray = 8\r\n",
" Blue = 9\r\n",
" Green = 10\r\n",
" Cyan = 11\r\n",
" Red = 12\r\n",
" Magenta = 13\r\n",
" Yellow = 14\r\n",
" White = 15\r\n",
"\r\n",
"class Coord(ctypes.Structure):\r\n",
" _fields_ = [('X', ctypes.c_short), ('Y', ctypes.c_short)]\r\n",
"\r\n",
"class SmallRect(ctypes.Structure):\r\n",
" _fields_ = [('Left', ctypes.c_short),\r\n",
" ('Top', ctypes.c_short),\r\n",
" ('Right', ctypes.c_short),\r\n",
" ('Bottom', ctypes.c_short),\r\n",
" ]\r\n",
"\r\n",
"class ConsoleScreenBufferInfo(ctypes.Structure):\r\n",
" _fields_ = [('dwSize', Coord),\r\n",
" ('dwCursorPosition', Coord),\r\n",
" ('wAttributes', ctypes.c_uint),\r\n",
" ('srWindow', SmallRect),\r\n",
" ('dwMaximumWindowSize', Coord),\r\n",
" ]\r\n",
"\r\n",
"class Win32API():\r\n",
" '''Some native methods for python calling'''\r\n",
" StdOutputHandle = -11\r\n",
" ConsoleOutputHandle = None\r\n",
" DefaultColor = None\r\n",
"\r\n",
" @staticmethod\r\n",
" def SetConsoleColor(color):\r\n",
" '''Change the text color on console window'''\r\n",
" if not Win32API.DefaultColor:\r\n",
" if not Win32API.ConsoleOutputHandle:\r\n",
" Win32API.ConsoleOutputHandle = ctypes.windll.kernel32.GetStdHandle(Win32API.StdOutputHandle)\r\n",
" bufferInfo = ConsoleScreenBufferInfo()\r\n",
" ctypes.windll.kernel32.GetConsoleScreenBufferInfo(Win32API.ConsoleOutputHandle, ctypes.byref(bufferInfo))\r\n",
" Win32API.DefaultColor = int(bufferInfo.wAttributes & 0xFF)\r\n",
" if IsPy3:\r\n",
" sys.stdout.flush() # need flush stdout in python 3\r\n",
" ctypes.windll.kernel32.SetConsoleTextAttribute(Win32API.ConsoleOutputHandle, color)\r\n",
"\r\n",
" @staticmethod\r\n",
" def ResetConsoleColor():\r\n",
" '''Reset the default text color on console window'''\r\n",
" if IsPy3:\r\n",
" sys.stdout.flush() # need flush stdout in python 3\r\n",
" ctypes.windll.kernel32.SetConsoleTextAttribute(Win32API.ConsoleOutputHandle, Win32API.DefaultColor)\r\n",
"\r\n",
"class Logger():\r\n",
" LogFile = '@AutomationLog.txt'\r\n",
" LineSep = '\\n'\r\n",
" @staticmethod\r\n",
" def Write(log, consoleColor = -1, writeToFile = True, printToStdout = True):\r\n",
" '''\r\n",
" consoleColor: value in class ConsoleColor, such as ConsoleColor.DarkGreen\r\n",
" if consoleColor == -1, use default color\r\n",
" '''\r\n",
" if printToStdout:\r\n",
" isValidColor = (consoleColor >= ConsoleColor.Black and consoleColor <= ConsoleColor.White)\r\n",
" if isValidColor:\r\n",
" Win32API.SetConsoleColor(consoleColor)\r\n",
" try:\r\n",
" sys.stdout.write(log)\r\n",
" except UnicodeError as e:\r\n",
" Win32API.SetConsoleColor(ConsoleColor.Red)\r\n",
" isValidColor = True\r\n",
" sys.stdout.write(str(type(e)) + ' can\\'t print the log!\\n')\r\n",
" if isValidColor:\r\n",
" Win32API.ResetConsoleColor()\r\n",
" if not writeToFile:\r\n",
" return\r\n",
" if IsPy3:\r\n",
" logFile = open(Logger.LogFile, 'a+', encoding = 'utf-8')\r\n",
" else:\r\n",
" logFile = codecs.open(Logger.LogFile, 'a+', 'utf-8')\r\n",
" try:\r\n",
" logFile.write(log)\r\n",
" # logFile.flush() # need flush in python 3, otherwise log won't be saved\r\n",
" except Exception as ex:\r\n",
" logFile.close()\r\n",
" sys.stdout.write('can not write log with exception: {0} {1}'.format(type(ex), ex))\r\n",
"\r\n",
" @staticmethod\r\n",
" def WriteLine(log, consoleColor = -1, writeToFile = True, printToStdout = True):\r\n",
" '''\r\n",
" consoleColor: value in class ConsoleColor, such as ConsoleColor.DarkGreen\r\n",
" if consoleColor == -1, use default color\r\n",
" '''\r\n",
" Logger.Write(log + Logger.LineSep, consoleColor, writeToFile, printToStdout)\r\n",
"\r\n",
" @staticmethod\r\n",
" def Log(log, consoleColor = -1, writeToFile = True, printToStdout = True):\r\n",
" '''\r\n",
" consoleColor: value in class ConsoleColor, such as ConsoleColor.DarkGreen\r\n",
" if consoleColor == -1, use default color\r\n",
" '''\r\n",
" t = time.localtime()\r\n",
" log = '{0}-{1:02}-{2:02} {3:02}:{4:02}:{5:02} - {6}{7}'.format(t.tm_year, t.tm_mon, t.tm_mday,\r\n",
" t.tm_hour, t.tm_min, t.tm_sec, log, Logger.LineSep)\r\n",
" Logger.Write(log, consoleColor, writeToFile, printToStdout)\r\n",
"\r\n",
" @staticmethod\r\n",
" def DeleteLog():\r\n",
" if os.path.exists(Logger.LogFile):\r\n",
" os.remove(Logger.LogFile)\r\n",
"\r\n",
"\r\n",
"def GetMSBuildPath():\r\n",
" cmd = 'call \"%VS120COMNTOOLS%..\\\\..\\\\VC\\\\vcvarsall.bat\" x86\\nwhere msbuild'\r\n",
" ftemp = open('GetMSBuildPath.bat', 'wt')\r\n",
" ftemp.write(cmd)\r\n",
" ftemp.close()\r\n",
" p = subprocess.Popen('GetMSBuildPath.bat', stdout = subprocess.PIPE)\r\n",
" p.wait()\r\n",
" lines = p.stdout.read().decode().splitlines()\r\n",
" os.remove('GetMSBuildPath.bat')\r\n",
" for line in lines:\r\n",
" if 'MSBuild.exe' in line:\r\n",
" return line\r\n",
"\r\n",
"def GetIncrediBuildPath():\r\n",
" try:\r\n",
" key=winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r'SOFTWARE\\Classes\\IncrediBuild.MonitorFile\\shell\\open\\command')\r\n",
" value, typeId = winreg.QueryValueEx(key, '')\r\n",
" if value:\r\n",
" start = value.find('\"')\r\n",
" end = value.find('\"', start + 1)\r\n",
" path = value[start+1:end]\r\n",
" buildConsole = os.path.join(os.path.dirname(path), 'BuildConsole.exe')\r\n",
" return buildConsole\r\n",
" except FileNotFoundError as e:\r\n",
" Logger.WriteLine('can not find IncrediBuild', ConsoleColor.Red)\r\n",
"\r\n",
"def UpdateCode():\r\n",
" # put git to path first\r\n",
" if not shutil.which('git.exe'):\r\n",
" Logger.Log('找不到git.exe. 请确认安装git时将git\\bin目录路径加入到环境变量path中!!!\\n, 跳过更新代码!!!', ConsoleColor.Yellow)\r\n",
" return false\r\n",
" oldDir = os.getcwd()\r\n",
" for dir in UpdateDir:\r\n",
" os.chdir(dir)\r\n",
" ret = os.system('git pull')\r\n",
" os.chdir(oldDir)\r\n",
" if ret != 0:\r\n",
" Logger.Log('update {0} failed'.format(dir), ConsoleColor.Yellow)\r\n",
" return false\r\n",
" return True\r\n",
"\r\n",
"def BuildProject(cmd):\r\n",
" for i in range(6):\r\n",
" Logger.WriteLine(cmd, ConsoleColor.Cyan)\r\n",
" buildFailed = True\r\n",
" startTime = time.time()\r\n",
" p = subprocess.Popen(cmd) #IncrediBuild不能使用stdout=subprocess.PIPE,否则会导致p.wait()不返回,可能是IncrediBuild的bug\r\n",
" if IsPy3:\r\n",
" try:\r\n",
" buildFailed = p.wait(BuildTimeout)\r\n",
" except subprocess.TimeoutExpired as e:\r\n",
" Logger.Log('{0}'.format(e), ConsoleColor.Yellow)\r\n",
" p.kill()\r\n",
" else:\r\n",
" buildFailed = p.wait()\r\n",
" if not UseMSBuild:\r\n",
" #IncrediBuild的返回值不能说明编译是否成功,需要提取输出判断\r\n",
" fin = open('IncrediBuild.log')\r\n",
" for line in fin:\r\n",
" if line.startswith('=========='):\r\n",
" Logger.Write(line, ConsoleColor.Cyan, writeToFile = True if IsPy3 else False)\r\n",
" if IsPy3:\r\n",
" start = line.find('失败') + 3 #========== 生成: 成功 1 个,失败 0 个,最新 0 个,跳过 0 个 ==========\r\n",
" else:#为了兼容py2做的特殊处理,很恶心\r\n",
" start = 0\r\n",
" n2 = 0\r\n",
" while 1:\r\n",
" if line[start].isdigit():\r\n",
" n2 += 1\r\n",
" if n2 == 2:\r\n",
" break\r\n",
" start = line.find(' ', start)\r\n",
" start += 1\r\n",
" end = line.find(' ', start)\r\n",
" failCount = int(line[start:end])\r\n",
" buildFailed = failCount > 0\r\n",
" else:\r\n",
" Logger.Write(line, ConsoleColor.Red, writeToFile = True if IsPy3 else False, printToStdout = True if ' error ' in line else False)\r\n",
" fin.close()\r\n",
" costTime = time.time() - startTime\r\n",
" Logger.WriteLine('build cost time: {0:.1f}s\\n'.format(costTime), ConsoleColor.Green)\r\n",
" if not buildFailed:\r\n",
" return True\r\n",
" return False\r\n",
"\r\n",
"def BuildAllProjects():\r\n",
" buildSuccess = False\r\n",
" cmds = []\r\n",
" if UseMSBuild:\r\n",
" if IsRebuild:\r\n",
" if CleanAll:\r\n",
" cmds.append('{0} {1} /t:Clean /p:Configuration={2} /nologo /maxcpucount /filelogger /consoleloggerparameters:ErrorsOnly'.format(MSBuild, SlnFile, 'Debug'))\r\n",
" cmds.append('{0} {1} /t:Clean /p:Configuration={2} /nologo /maxcpucount /filelogger /consoleloggerparameters:ErrorsOnly'.format(MSBuild, SlnFile, 'Release'))\r\n",
" else:\r\n",
" cmds.append('{0} {1} /t:Clean /p:Configuration={2} /nologo /maxcpucount /filelogger /consoleloggerparameters:ErrorsOnly'.format(MSBuild, SlnFile, BuildType))\r\n",
" for project in MSBuildFirstProjects:\r\n",
" cmds.append('{0} {1} /t:{2} /p:Configuration={3} /nologo /maxcpucount /filelogger /consoleloggerparameters:ErrorsOnly'.format(MSBuild, SlnFile, project, BuildType))\r\n",
" cmds.append('{0} {1} /p:Configuration={2} /nologo /maxcpucount /filelogger /consoleloggerparameters:ErrorsOnly'.format(MSBuild, SlnFile, BuildType))\r\n",
" else: #IncrediBuild\r\n",
" if IsRebuild:\r\n",
" if CleanAll:\r\n",
" cmds.append('\"{0}\" {1} /clean /cfg=\"{2}|Win32\" /nologo /out=IncrediBuild.log'.format(IncrediBuild, SlnFile, 'Debug'))\r\n",
" cmds.append('\"{0}\" {1} /clean /cfg=\"{2}|Win32\" /nologo /out=IncrediBuild.log'.format(IncrediBuild, SlnFile, 'Release'))\r\n",
" else:\r\n",
" cmds.append('\"{0}\" {1} /clean /cfg=\"{2}|Win32\" /nologo /out=IncrediBuild.log'.format(IncrediBuild, SlnFile, BuildType))\r\n",
" for project in IncrediBuildFirstProjects:\r\n",
" cmds.append('\"{0}\" {1} /build /prj={2} /cfg=\"{3}|Win32\" /nologo /out=IncrediBuild.log'.format(IncrediBuild, SlnFile, project, BuildType))\r\n",
" cmds.append('\"{0}\" {1} /build /cfg=\"{2}|Win32\" /nologo /out=IncrediBuild.log'.format(IncrediBuild, SlnFile, BuildType))\r\n",
" for cmd in cmds:\r\n",
" buildSuccess = BuildProject(cmd)\r\n",
" if not buildSuccess:\r\n",
" break\r\n",
" return buildSuccess\r\n",
"\r\n",
"def main():\r\n",
" if UseMSBuild:\r\n",
" if not os.path.exists(MSBuild):\r\n",
" Logger.Log('can not find msbuild.exe', ConsoleColor.Red)\r\n",
" return 1\r\n",
" else:\r\n",
" if not os.path.exists(IncrediBuild):\r\n",
" Logger.Log('can not find msbuild.exe', ConsoleColor.Red)\r\n",
" return 1\r\n",
" dir = os.path.dirname(__file__)\r\n",
" if dir:\r\n",
" oldDir = os.getcwd()\r\n",
" os.chdir(dir)\r\n",
" if Update:\r\n",
" if not UpdateCode():\r\n",
" return 1\r\n",
" Logger.Log('git update succeed', ConsoleColor.Green)\r\n",
" if Copy:\r\n",
" for bat in ExecBatList:\r\n",
" oldBatDir = os.getcwd()\r\n",
" batDir = os.path.dirname(bat)\r\n",
" batName = os.path.basename(bat)\r\n",
" if batDir:\r\n",
" os.chdir(batDir)\r\n",
" start = time.clock()\r\n",
" os.system(batName)\r\n",
" Logger.Log('run \"{}\" cost {:.1f} seconds'.format(batName, time.clock() - start), ConsoleColor.Green)\r\n",
" if batDir:\r\n",
" os.chdir(oldBatDir)\r\n",
" buildSuccess = BuildAllProjects()\r\n",
" if buildSuccess:\r\n",
" Logger.Log('build succeed', ConsoleColor.Green)\r\n",
" else:\r\n",
" Logger.Log('build failed', ConsoleColor.Red)\r\n",
" if dir:\r\n",
" os.chdir(oldDir)\r\n",
" return 0 if buildSuccess else 1\r\n",
"\r\n",
"if __name__ == '__main__':\r\n",
" Logger.Log('run with argv ' + str(sys.argv), ConsoleColor.Green)\r\n",
" sys.argv = [x.lower() for x in sys.argv]\r\n",
" start_time = time.time()\r\n",
" if 'debug' in sys.argv:\r\n",
" BuildType = 'Debug'\r\n",
" if 'build' in sys.argv:\r\n",
" IsRebuild = False\r\n",
" Build = 'Build'\r\n",
" if 'update' in sys.argv:\r\n",
" Update = True\r\n",
" if 'copy' in sys.argv:\r\n",
" Copy = True\r\n",
" if 'clean' in sys.argv:\r\n",
" CleanAll = True\r\n",
" if 'incredibuild' in sys.argv:\r\n",
" UseMSBuild = False\r\n",
" if UseMSBuild:\r\n",
" MSBuild = GetMSBuildPath()\r\n",
" if not MSBuild:\r\n",
" Logger.Log('can not find MSBuild.exe', ConsoleColor.Red)\r\n",
" exit(1)\r\n",
" else:\r\n",
" IncrediBuild = GetIncrediBuildPath()\r\n",
" if not IncrediBuild:\r\n",
" Logger.Log('can not find BuildConsole.exe', ConsoleColor.Red)\r\n",
" exit(1)\r\n",
" cwd = os.getcwd()\r\n",
" Logger.WriteLine('current dir is: {0}, {1}: {2}'.format(cwd, Build, BuildType))\r\n",
" ret = main()\r\n",
" end_time = time.time()\r\n",
" cost_time = end_time-start_time\r\n",
" Logger.WriteLine('all build cost time: {0:.2f} seconds'.format(cost_time), ConsoleColor.Green)\r\n",
" exit(ret)"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.034482758620689655,
0,
0.058823529411764705,
0.0425531914893617,
0.05128205128205128,
0.029850746268656716,
0.029411764705882353,
0.06451612903225806,
0.03076923076923077,
0.08571428571428572,
0,
0.043478260869565216,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.03125,
0,
0,
0.027777777777777776,
0,
0.024390243902439025,
0.023255813953488372,
0.022727272727272728,
0.058823529411764705,
0,
0.02,
0,
0.022222222222222223,
0.020833333333333332,
0.024390243902439025,
0.020833333333333332,
0.058823529411764705,
0,
0.05263157894736842,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.00909090909090909,
0,
0.008403361344537815,
0,
0,
0.015625,
0.010752688172043012,
0,
0,
0,
0,
0,
0.015625,
0.009174311926605505,
0,
0.058823529411764705,
0,
0,
0.05263157894736842,
0.08536585365853659,
0,
0.012048192771084338,
0,
0,
0,
0.009615384615384616,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.02857142857142857,
0,
0,
0,
0,
0.011627906976744186,
0,
0,
0.010416666666666666,
0,
0,
0.08139534883720931,
0,
0.012048192771084338,
0,
0,
0.011627906976744186,
0,
0,
0.075,
0,
0.012048192771084338,
0,
0,
0,
0.009615384615384616,
0.015384615384615385,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.02702702702702703,
0,
0,
0,
0,
0,
0,
0,
0.03571428571428571,
0,
0.016666666666666666,
0,
0,
0,
0,
0,
0.011904761904761904,
0,
0,
0,
0,
0.05263157894736842,
0,
0,
0.009433962264150943,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.041666666666666664,
0,
0,
0,
0,
0.026785714285714284,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.02,
0,
0,
0,
0.030303030303030304,
0,
0.01834862385321101,
0.044444444444444446,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.03289473684210526,
0,
0,
0.010638297872340425,
0,
0,
0,
0,
0.04,
0,
0,
0,
0,
0,
0.005780346820809248,
0.005714285714285714,
0,
0.005714285714285714,
0,
0.00558659217877095,
0.006329113924050633,
0.08,
0,
0,
0.007407407407407408,
0.0072992700729927005,
0,
0.0072992700729927005,
0,
0.006622516556291391,
0.007751937984496124,
0,
0,
0,
0,
0,
0,
0.07692307692307693,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.008771929824561403,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.03571428571428571,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011764705882352941,
0,
0,
0,
0.01,
0.07692307692307693
] | 343 | 0.006466 | false |
# -*- coding: utf-8 -*-
# Copyright 2014 SAP AG.
# SPDX-FileCopyrightText: 2013 SAP SE Srdjan Boskovic <srdjan.boskovic@sap.com>
#
# SPDX-License-Identifier: Apache-2.0
import sys
import unittest
from decimal import Decimal
import locale
from pyrfc import Connection, ExternalRuntimeError
from tests.config import (
CONNECTION_INFO,
RFC_MATH,
ABAP_to_python_date,
ABAP_to_python_time,
python_to_ABAP_date,
python_to_ABAP_time,
UNICODETEST,
BYTEARRAY_TEST,
BYTES_TEST,
)
from tests.abap_system import connection_info
locale.setlocale(locale.LC_ALL)
client = Connection(**CONNECTION_INFO)
def test_structure_rejects_non_dict():
try:
IMPORTSTRUCT = {"RFCINT1": "1"}
output = client.call("STFC_STRUCTURE", IMPORTSTRUCT=[IMPORTSTRUCT])
except Exception as ex:
assert isinstance(ex, TypeError) is True
assert ex.args[0] == "dictionary required for structure parameter, received"
if sys.version > "3.0":
assert ex.args[1] == "<class 'list'>"
else:
assert ex.args[1] == "<type 'list'>"
assert ex.args[2] == "IMPORTSTRUCT"
def test_table_rejects_non_list():
try:
IMPORTSTRUCT = {"RFCINT1": "1"}
output = client.call("STFC_STRUCTURE", RFCTABLE=IMPORTSTRUCT)
except Exception as ex:
assert isinstance(ex, TypeError) is True
assert ex.args[0] == "list required for table parameter, received"
if sys.version > "3.0":
assert ex.args[1] == "<class 'dict'>"
else:
assert ex.args[1] == "<type 'dict'>"
assert ex.args[2] == "RFCTABLE"
def test_basic_datatypes():
INPUTS = [
dict(
# Float
ZFLTP=0.123456789,
# Decimal
ZDEC=12345.67,
# Currency, Quantity
ZCURR=1234.56,
ZQUAN=12.3456,
ZQUAN_SIGN=-12.345,
),
dict(
# Float
ZFLTP=Decimal("0.123456789"),
# Decimal
ZDEC=Decimal("12345.67"),
# Currency, Quantity
ZCURR=Decimal("1234.56"),
ZQUAN=Decimal("12.3456"),
ZQUAN_SIGN=Decimal("-12.345"),
),
dict(
# Float
ZFLTP="0.123456789",
# Decimal
ZDEC="12345.67",
# Currency, Quantity
ZCURR="1234.56",
ZQUAN="12.3456",
ZQUAN_SIGN="-12.345",
),
]
for is_input in INPUTS:
result = client.call("/COE/RBP_FE_DATATYPES", IS_INPUT=is_input)["ES_OUTPUT"]
for k in is_input:
in_value = is_input[k]
out_value = result[k]
if k == "ZFLTP":
assert type(out_value) is float
else:
assert type(out_value) is Decimal
if type(in_value) != type(out_value):
assert str(in_value) == str(out_value)
else:
assert in_value == out_value
def test_string_unicode():
hello = u"Hällo SAP!"
result = client.call("STFC_CONNECTION", REQUTEXT=hello)["ECHOTEXT"]
assert result == hello
hello = u"01234" * 51
result = client.call("STFC_CONNECTION", REQUTEXT=hello)["ECHOTEXT"]
assert result == hello
unicode_test = [
"string",
"四周远处都能望见",
"\U0001F4AA",
"\u0001\uf4aa",
"a\xac\u1234\u20ac\U0001F4AA",
]
for s in unicode_test:
is_input = {"ZSHLP_MAT1": s}
result = client.call("/COE/RBP_FE_DATATYPES", IS_INPUT=is_input)["ES_OUTPUT"]
assert is_input["ZSHLP_MAT1"] == result["ZSHLP_MAT1"]
def test_date_output():
lm = client.call("BAPI_USER_GET_DETAIL", USERNAME="demo")["LASTMODIFIED"]
assert len(lm["MODDATE"]) > 0
assert len(lm["MODTIME"]) > 0
def test_min_max_positive():
IS_INPUT = {
# Float
"ZFLTP_MIN": RFC_MATH["FLOAT"]["POS"]["MIN"],
"ZFLTP_MAX": RFC_MATH["FLOAT"]["POS"]["MAX"],
# Decimal
"ZDECF16_MIN": RFC_MATH["DECF16"]["POS"]["MIN"],
"ZDECF16_MAX": RFC_MATH["DECF16"]["POS"]["MAX"],
"ZDECF34_MIN": RFC_MATH["DECF34"]["POS"]["MIN"],
"ZDECF34_MAX": RFC_MATH["DECF34"]["POS"]["MAX"],
}
output = client.call("/COE/RBP_FE_DATATYPES", IS_INPUT=IS_INPUT, IV_COUNT=0)[
"ES_OUTPUT"
]
assert type(output["ZFLTP_MIN"]) is float
assert type(output["ZFLTP_MAX"]) is float
assert type(output["ZDECF16_MIN"]) is Decimal
assert type(output["ZDECF16_MAX"]) is Decimal
assert type(output["ZDECF34_MAX"]) is Decimal
assert type(output["ZDECF16_MIN"]) is Decimal
assert float(IS_INPUT["ZFLTP_MIN"]) == output["ZFLTP_MIN"]
assert float(IS_INPUT["ZFLTP_MAX"]) == output["ZFLTP_MAX"]
assert Decimal(IS_INPUT["ZDECF16_MIN"]) == output["ZDECF16_MIN"]
assert Decimal(IS_INPUT["ZDECF16_MAX"]) == output["ZDECF16_MAX"]
assert Decimal(IS_INPUT["ZDECF16_MIN"]) == output["ZDECF16_MIN"]
assert Decimal(IS_INPUT["ZDECF34_MAX"]) == output["ZDECF34_MAX"]
def test_min_max_negative():
IS_INPUT = {
# Float
"ZFLTP_MIN": RFC_MATH["FLOAT"]["NEG"]["MIN"],
"ZFLTP_MAX": RFC_MATH["FLOAT"]["NEG"]["MAX"],
# Decimal
"ZDECF16_MIN": RFC_MATH["DECF16"]["NEG"]["MIN"],
"ZDECF16_MAX": RFC_MATH["DECF16"]["NEG"]["MAX"],
"ZDECF34_MIN": RFC_MATH["DECF34"]["NEG"]["MIN"],
"ZDECF34_MAX": RFC_MATH["DECF34"]["NEG"]["MAX"],
}
output = client.call("/COE/RBP_FE_DATATYPES", IS_INPUT=IS_INPUT, IV_COUNT=0)[
"ES_OUTPUT"
]
assert type(output["ZFLTP_MIN"]) is float
assert type(output["ZFLTP_MAX"]) is float
assert type(output["ZDECF16_MIN"]) is Decimal
assert type(output["ZDECF16_MAX"]) is Decimal
assert type(output["ZDECF16_MIN"]) is Decimal
assert type(output["ZDECF34_MAX"]) is Decimal
assert float(IS_INPUT["ZFLTP_MIN"]) == output["ZFLTP_MIN"]
assert float(IS_INPUT["ZFLTP_MAX"]) == output["ZFLTP_MAX"]
assert Decimal(IS_INPUT["ZDECF16_MIN"]) == output["ZDECF16_MIN"]
assert Decimal(IS_INPUT["ZDECF16_MAX"]) == output["ZDECF16_MAX"]
assert Decimal(IS_INPUT["ZDECF16_MIN"]) == output["ZDECF16_MIN"]
assert Decimal(IS_INPUT["ZDECF34_MAX"]) == output["ZDECF34_MAX"]
def test_bcd_floats_accept_floats():
IS_INPUT = {
# Float
"ZFLTP": 0.123456789,
# Decimal
"ZDEC": 12345.67,
"ZDECF16_MIN": 12345.67,
"ZDECF34_MIN": 12345.67,
# Currency, Quantity
"ZCURR": 1234.56,
"ZQUAN": 12.3456,
"ZQUAN_SIGN": -12.345,
}
output = client.call("/COE/RBP_FE_DATATYPES", IS_INPUT=IS_INPUT, IV_COUNT=0)[
"ES_OUTPUT"
]
assert type(output["ZFLTP"]) is float
assert IS_INPUT["ZFLTP"] == output["ZFLTP"]
assert type(output["ZDEC"]) is Decimal
assert str(IS_INPUT["ZDEC"]) == str(output["ZDEC"])
assert IS_INPUT["ZDEC"] == float(output["ZDEC"])
assert type(output["ZDECF16_MIN"]) is Decimal
assert str(IS_INPUT["ZDECF16_MIN"]) == str(output["ZDECF16_MIN"])
assert IS_INPUT["ZDECF16_MIN"] == float(output["ZDECF16_MIN"])
assert type(output["ZDECF34_MIN"]) is Decimal
assert str(IS_INPUT["ZDECF34_MIN"]) == str(output["ZDECF34_MIN"])
assert IS_INPUT["ZDECF34_MIN"] == float(output["ZDECF34_MIN"])
assert type(output["ZCURR"]) is Decimal
assert str(IS_INPUT["ZCURR"]) == str(output["ZCURR"])
assert IS_INPUT["ZCURR"] == float(output["ZCURR"])
assert type(output["ZQUAN"]) is Decimal
assert str(IS_INPUT["ZQUAN"]) == str(output["ZQUAN"])
assert IS_INPUT["ZQUAN"] == float(output["ZQUAN"])
assert type(output["ZQUAN_SIGN"]) is Decimal
assert str(IS_INPUT["ZQUAN_SIGN"]) == str(output["ZQUAN_SIGN"])
assert IS_INPUT["ZQUAN_SIGN"] == float(output["ZQUAN_SIGN"])
def test_bcd_floats_accept_strings():
IS_INPUT = {
# Float
"ZFLTP": "0.123456789",
# Decimal
"ZDEC": "12345.67",
"ZDECF16_MIN": "12345.67",
"ZDECF34_MIN": "12345.67",
# Currency, Quantity
"ZCURR": "1234.56",
"ZQUAN": "12.3456",
"ZQUAN_SIGN": "-12.345",
}
output = client.call("/COE/RBP_FE_DATATYPES", IS_INPUT=IS_INPUT, IV_COUNT=0)[
"ES_OUTPUT"
]
assert type(output["ZFLTP"]) is float
assert float(IS_INPUT["ZFLTP"]) == output["ZFLTP"]
assert type(output["ZDEC"]) is Decimal
assert IS_INPUT["ZDEC"] == str(output["ZDEC"])
assert type(output["ZDECF16_MIN"]) is Decimal
assert IS_INPUT["ZDECF16_MIN"] == str(output["ZDECF16_MIN"])
assert type(output["ZDECF34_MIN"]) is Decimal
assert IS_INPUT["ZDECF34_MIN"] == str(output["ZDECF34_MIN"])
assert type(output["ZCURR"]) is Decimal
assert IS_INPUT["ZCURR"] == str(output["ZCURR"])
assert type(output["ZQUAN"]) is Decimal
assert IS_INPUT["ZQUAN"] == str(output["ZQUAN"])
assert type(output["ZQUAN_SIGN"]) is Decimal
assert IS_INPUT["ZQUAN_SIGN"] == str(output["ZQUAN_SIGN"])
"""def test_bcd_floats_accept_strings_radix_comma():
locale.setlocale(locale.LC_ALL, "de_DE")
IS_INPUT = {
# Float
"ZFLTP": "0.123456789",
# Decimal
"ZDEC": "12345.67",
"ZDECF16_MIN": "12345.67",
"ZDECF34_MIN": "12345.67",
# Currency, Quantity
"ZCURR": "1234.56",
"ZQUAN": "12.3456",
"ZQUAN_SIGN": "-12.345",
}
output = client.call("/COE/RBP_FE_DATATYPES", IS_INPUT=IS_INPUT, IV_COUNT=0)[
"ES_OUTPUT"
]
assert type(output["ZFLTP"]) is float
assert float(IS_INPUT["ZFLTP"]) == output["ZFLTP"]
assert type(output["ZDEC"]) is Decimal
assert IS_INPUT["ZDEC"] == str(output["ZDEC"])
assert type(output["ZDECF16_MIN"]) is Decimal
assert IS_INPUT["ZDECF16_MIN"] == str(output["ZDECF16_MIN"])
assert type(output["ZDECF34_MIN"]) is Decimal
assert IS_INPUT["ZDECF34_MIN"] == str(output["ZDECF34_MIN"])
assert type(output["ZCURR"]) is Decimal
assert IS_INPUT["ZCURR"] == str(output["ZCURR"])
assert type(output["ZQUAN"]) is Decimal
assert IS_INPUT["ZQUAN"] == str(output["ZQUAN"])
assert type(output["ZQUAN_SIGN"]) is Decimal
assert IS_INPUT["ZQUAN_SIGN"] == str(output["ZQUAN_SIGN"])
"""
def test_bcd_floats_accept_decimals():
IS_INPUT = {
# Float
"ZFLTP": Decimal("0.123456789"),
# Decimal
"ZDEC": Decimal("12345.67"),
"ZDECF16_MIN": Decimal("12345.67"),
"ZDECF34_MIN": Decimal("12345.67"),
# Currency, Quantity
"ZCURR": Decimal("1234.56"),
"ZQUAN": Decimal("12.3456"),
"ZQUAN_SIGN": Decimal("-12.345"),
}
output = client.call("/COE/RBP_FE_DATATYPES", IS_INPUT=IS_INPUT, IV_COUNT=0)[
"ES_OUTPUT"
]
assert type(output["ZFLTP"]) is float
assert IS_INPUT["ZFLTP"] == Decimal(str(output["ZFLTP"]))
assert type(output["ZDEC"]) is Decimal
assert IS_INPUT["ZDEC"] == Decimal(str(output["ZDEC"]))
assert type(output["ZDECF16_MIN"]) is Decimal
assert IS_INPUT["ZDECF16_MIN"] == Decimal(str(output["ZDECF16_MIN"]))
assert type(output["ZDECF34_MIN"]) is Decimal
assert IS_INPUT["ZDECF34_MIN"] == Decimal(str(output["ZDECF34_MIN"]))
assert type(output["ZCURR"]) is Decimal
assert IS_INPUT["ZCURR"] == Decimal(str(output["ZCURR"]))
assert type(output["ZQUAN"]) is Decimal
assert IS_INPUT["ZQUAN"] == Decimal(str(output["ZQUAN"]))
assert type(output["ZQUAN_SIGN"]) is Decimal
assert IS_INPUT["ZQUAN_SIGN"] == Decimal(str(output["ZQUAN_SIGN"]))
def test_raw_types_accept_bytes():
ZRAW = BYTES_TEST
DIFF = b"\x00\x00\x00\x00"
IS_INPUT = {"ZRAW": ZRAW, "ZRAWSTRING": ZRAW}
output = client.call("/COE/RBP_FE_DATATYPES", IS_INPUT=IS_INPUT, IV_COUNT=0)[
"ES_OUTPUT"
]
assert output["ZRAW"] == ZRAW + DIFF
assert output["ZRAWSTRING"] == ZRAW
assert type(output["ZRAW"]) is bytes
assert type(output["ZRAWSTRING"]) is bytes
def test_raw_types_accept_bytearray():
ZRAW = BYTEARRAY_TEST
DIFF = b"\x00\x00\x00\x00"
IS_INPUT = {"ZRAW": ZRAW, "ZRAWSTRING": ZRAW}
output = client.call("/COE/RBP_FE_DATATYPES", IS_INPUT=IS_INPUT, IV_COUNT=0)[
"ES_OUTPUT"
]
assert output["ZRAW"] == ZRAW + DIFF
assert output["ZRAWSTRING"] == ZRAW
assert type(output["ZRAW"]) is bytes
assert type(output["ZRAWSTRING"]) is bytes
def test_date_time():
DATETIME_TEST = [
{"RFCDATE": "20161231", "RFCTIME": "123456"}, # good, correct date
{"RFCDATE": "", "RFCTIME": "123456"}, # good, empty date
{"RFCDATE": " ", "RFCTIME": "123456"}, # good, space date
{"RFCDATE": "20161231", "RFCTIME": ""}, # good, empty time
{"RFCDATE": "20161231", "RFCTIME": " "}, # good, space time
{"RFCDATE": "20161231", "RFCTIME": "000000"}, # good, zero time
{"RFCDATE": "2016123", "RFCTIME": "123456"}, # shorter date
{"RFCDATE": " ", "RFCTIME": "123456"}, # shorter empty date
{"RFCDATE": "201612311", "RFCTIME": "123456"}, # longer date
{"RFCDATE": " ", "RFCTIME": "123456"}, # longer empty date
{"RFCDATE": "20161232", "RFCTIME": "123456"}, # out of range date
{"RFCDATE": 20161231, "RFCTIME": "123456"}, # wrong date type
{"RFCDATE": "20161231", "RFCTIME": "12345"}, # shorter time
{"RFCDATE": "20161231", "RFCTIME": " "}, # shorter empty time
{"RFCDATE": "20161231", "RFCTIME": "1234566"}, # longer time
{"RFCDATE": "20161231", "RFCTIME": " "}, # longer empty time
{"RFCDATE": "20161231", "RFCTIME": "123466"}, # out of range time
{"RFCDATE": "20161231", "RFCTIME": 123456}, # wrong time type
]
counter = 0
for dt in DATETIME_TEST:
counter += 1
try:
result = client.call("STFC_STRUCTURE", IMPORTSTRUCT=dt)["ECHOSTRUCT"]
assert dt["RFCDATE"] == result["RFCDATE"]
if dt["RFCTIME"] == "":
assert "000000" == result["RFCTIME"]
else:
assert dt["RFCTIME"] == result["RFCTIME"]
except Exception as e:
assert type(e) is TypeError
if counter < 13:
assert e.args[0] == "date value required, received"
assert e.args[1] == dt["RFCDATE"]
assert e.args[3] == type(dt["RFCDATE"])
assert e.args[4] == "RFCDATE"
else:
assert e.args[0] == "time value required, received"
assert e.args[1] == dt["RFCTIME"]
assert e.args[3] == type(dt["RFCTIME"])
assert e.args[4] == "RFCTIME"
assert e.args[5] == "IMPORTSTRUCT"
def test_date_accepts_string():
TEST_DATE = u"20180625"
IMPORTSTRUCT = {"RFCDATE": TEST_DATE}
IMPORTTABLE = [IMPORTSTRUCT]
output = client.call(
"STFC_STRUCTURE", IMPORTSTRUCT=IMPORTSTRUCT, RFCTABLE=IMPORTTABLE
)
if sys.version > "3.0":
assert type(output["ECHOSTRUCT"]["RFCDATE"]) is str
assert type(output["RFCTABLE"][0]["RFCDATE"]) is str
else:
assert type(output["ECHOSTRUCT"]["RFCDATE"]) is unicode
assert type(output["RFCTABLE"][0]["RFCDATE"]) is unicode
assert output["ECHOSTRUCT"]["RFCDATE"] == TEST_DATE
assert output["RFCTABLE"][0]["RFCDATE"] == TEST_DATE
def test_date_accepts_date():
TEST_DATE = ABAP_to_python_date("20180625")
IMPORTSTRUCT = {"RFCDATE": TEST_DATE}
IMPORTTABLE = [IMPORTSTRUCT]
output = client.call(
"STFC_STRUCTURE", IMPORTSTRUCT=IMPORTSTRUCT, RFCTABLE=IMPORTTABLE
)
if sys.version > "3.0":
assert type(output["ECHOSTRUCT"]["RFCDATE"]) is str
assert type(output["RFCTABLE"][0]["RFCDATE"]) is str
else:
assert type(output["ECHOSTRUCT"]["RFCDATE"]) is unicode
assert type(output["RFCTABLE"][0]["RFCDATE"]) is unicode
assert output["ECHOSTRUCT"]["RFCDATE"] == python_to_ABAP_date(TEST_DATE)
assert output["RFCTABLE"][0]["RFCDATE"] == python_to_ABAP_date(TEST_DATE)
def test_time_accepts_string():
TEST_TIME = "123456"
IMPORTSTRUCT = {"RFCTIME": TEST_TIME}
IMPORTTABLE = [IMPORTSTRUCT]
output = client.call(
"STFC_STRUCTURE", IMPORTSTRUCT=IMPORTSTRUCT, RFCTABLE=IMPORTTABLE
)
if sys.version > "3.0":
assert type(output["ECHOSTRUCT"]["RFCTIME"]) is str
assert type(output["RFCTABLE"][0]["RFCTIME"]) is str
else:
assert type(output["ECHOSTRUCT"]["RFCTIME"]) is unicode
assert type(output["RFCTABLE"][0]["RFCTIME"]) is unicode
assert output["ECHOSTRUCT"]["RFCTIME"] == TEST_TIME
assert output["RFCTABLE"][0]["RFCTIME"] == TEST_TIME
def test_time_accepts_time():
TEST_TIME = ABAP_to_python_time("123456")
IMPORTSTRUCT = {"RFCTIME": TEST_TIME}
IMPORTTABLE = [IMPORTSTRUCT]
output = client.call(
"STFC_STRUCTURE", IMPORTSTRUCT=IMPORTSTRUCT, RFCTABLE=IMPORTTABLE
)
if sys.version > "3.0":
assert type(output["ECHOSTRUCT"]["RFCTIME"]) is str
assert type(output["RFCTABLE"][0]["RFCTIME"]) is str
else:
assert type(output["ECHOSTRUCT"]["RFCTIME"]) is unicode
assert type(output["RFCTABLE"][0]["RFCTIME"]) is unicode
assert output["ECHOSTRUCT"]["RFCTIME"] == python_to_ABAP_time(TEST_TIME)
assert output["RFCTABLE"][0]["RFCTIME"] == python_to_ABAP_time(TEST_TIME)
def test_error_int_rejects_string():
IMPORTSTRUCT = {"RFCINT1": "1"}
try:
output = client.call("STFC_STRUCTURE", IMPORTSTRUCT=IMPORTSTRUCT)
except Exception as ex:
assert isinstance(ex, TypeError) is True
assert ex.args[0] == "an integer required, received"
assert ex.args[1] == IMPORTSTRUCT["RFCINT1"]
assert ex.args[3] is str
assert ex.args[4] == "RFCINT1"
assert ex.args[5] == "IMPORTSTRUCT"
try:
output = client.call("STFC_STRUCTURE", RFCTABLE=[IMPORTSTRUCT])
except Exception as ex:
assert isinstance(ex, TypeError) is True
assert isinstance(ex, TypeError) is True
assert ex.args[0] == "an integer required, received"
assert ex.args[1] == IMPORTSTRUCT["RFCINT1"]
assert ex.args[3] is str
assert ex.args[4] == "RFCINT1"
assert ex.args[5] == "RFCTABLE"
def test_error_int_rejects_float():
IMPORTSTRUCT = {"RFCINT1": 1.0}
try:
output = client.call("STFC_STRUCTURE", IMPORTSTRUCT=IMPORTSTRUCT)
except Exception as ex:
assert isinstance(ex, TypeError) is True
assert ex.args[0] == "an integer required, received"
assert ex.args[1] == IMPORTSTRUCT["RFCINT1"]
assert ex.args[3] is float
assert ex.args[4] == "RFCINT1"
assert ex.args[5] == "IMPORTSTRUCT"
try:
output = client.call("STFC_STRUCTURE", RFCTABLE=[IMPORTSTRUCT])
except Exception as ex:
assert isinstance(ex, TypeError) is True
assert ex.args[0] == "an integer required, received"
assert ex.args[1] == IMPORTSTRUCT["RFCINT1"]
assert ex.args[3] is float
assert ex.args[4] == "RFCINT1"
assert ex.args[5] == "RFCTABLE"
def test_error_string_rejects_None():
IMPORTSTRUCT = {"RFCCHAR4": None}
try:
output = client.call("STFC_STRUCTURE", IMPORTSTRUCT=IMPORTSTRUCT)
except Exception as ex:
assert isinstance(ex, TypeError) is True
assert ex.args[0] == "an string is required, received"
assert ex.args[1] == IMPORTSTRUCT["RFCCHAR4"]
assert ex.args[3] == type(None)
assert ex.args[4] == "RFCCHAR4"
assert ex.args[5] == "IMPORTSTRUCT"
try:
output = client.call("STFC_STRUCTURE", RFCTABLE=[IMPORTSTRUCT])
except Exception as ex:
assert isinstance(ex, TypeError) is True
assert ex.args[0] == "an string is required, received"
assert ex.args[1] == IMPORTSTRUCT["RFCCHAR4"]
assert ex.args[3] == type(None)
assert ex.args[4] == "RFCCHAR4"
assert ex.args[5] == "RFCTABLE"
def test_error_string_rejects_int():
IMPORTSTRUCT = {"RFCCHAR4": 1}
try:
output = client.call("STFC_STRUCTURE", IMPORTSTRUCT=IMPORTSTRUCT)
except Exception as ex:
assert isinstance(ex, TypeError) is True
assert ex.args[0] == "an string is required, received"
assert ex.args[1] == IMPORTSTRUCT["RFCCHAR4"]
assert ex.args[3] is int
assert ex.args[4] == "RFCCHAR4"
assert ex.args[5] == "IMPORTSTRUCT"
try:
output = client.call("STFC_STRUCTURE", RFCTABLE=[IMPORTSTRUCT])
except Exception as ex:
assert isinstance(ex, TypeError) is True
assert ex.args[0] == "an string is required, received"
assert ex.args[1] == IMPORTSTRUCT["RFCCHAR4"]
assert ex.args[3] is int
assert ex.args[4] == "RFCCHAR4"
assert ex.args[5] == "RFCTABLE"
def test_float_rejects_not_a_number_string():
IMPORTSTRUCT = {"RFCFLOAT": "A"}
try:
output = client.call("STFC_STRUCTURE", IMPORTSTRUCT=IMPORTSTRUCT)
except Exception as ex:
assert isinstance(ex, TypeError) is True
assert ex.args[0] == "a decimal value required, received"
assert ex.args[1] == IMPORTSTRUCT["RFCFLOAT"]
assert ex.args[3] is str
assert ex.args[4] == "RFCFLOAT"
assert ex.args[5] == "IMPORTSTRUCT"
def test_float_rejects_array():
IMPORTSTRUCT = {"RFCFLOAT": []}
try:
output = client.call("STFC_STRUCTURE", IMPORTSTRUCT=IMPORTSTRUCT)
except Exception as ex:
assert isinstance(ex, TypeError) is True
assert ex.args[0] == "a decimal value required, received"
assert ex.args[1] == []
assert ex.args[3] is list
assert ex.args[4] == "RFCFLOAT"
assert ex.args[5] == "IMPORTSTRUCT"
def test_float_rejects_comma_for_point_locale():
IMPORTSTRUCT = {"RFCFLOAT": "1,2"}
try:
output = client.call("STFC_STRUCTURE", IMPORTSTRUCT=IMPORTSTRUCT)
except Exception as ex:
assert isinstance(ex, TypeError) is True
assert ex.args[0] == "a decimal value required, received"
assert ex.args[1] == "1,2"
assert ex.args[3] is str
assert ex.args[4] == "RFCFLOAT"
assert ex.args[5] == "IMPORTSTRUCT"
def test_float_accepts_point_for_point_locale():
IMPORTSTRUCT = {"RFCFLOAT": "1.2"}
output = client.call("STFC_STRUCTURE", IMPORTSTRUCT=IMPORTSTRUCT)["ECHOSTRUCT"]
assert output["RFCFLOAT"] == 1.2
def test_float_rejects_point_for_comma_locale():
locale.setlocale(locale.LC_ALL, "de_DE")
IMPORTSTRUCT = {"RFCFLOAT": "1.2"}
try:
output = client.call("STFC_STRUCTURE", IMPORTSTRUCT=IMPORTSTRUCT)
except Exception as ex:
assert isinstance(ex, ExternalRuntimeError) is True
assert ex.code == 22
assert ex.key == "RFC_CONVERSION_FAILURE"
assert (
ex.message
== "Cannot convert string value 1.2 at position 1 for the field RFCFLOAT to type RFCTYPE_FLOAT"
)
locale.resetlocale(locale.LC_ALL)
def test_float_accepts_comma_for_comma_locale():
locale.setlocale(locale.LC_ALL, "de_DE")
IMPORTSTRUCT = {"RFCFLOAT": "1,2"}
output = client.call("STFC_STRUCTURE", IMPORTSTRUCT=IMPORTSTRUCT)["ECHOSTRUCT"]
assert output["RFCFLOAT"] == 1.2
locale.resetlocale(locale.LC_ALL)
def test_bcd_rejects_not_a_number_string():
try:
IS_INPUT = {"ZDEC": "A"}
output = client.call("/COE/RBP_FE_DATATYPES", IS_INPUT=IS_INPUT, IV_COUNT=0)[
"ES_OUTPUT"
]
except Exception as ex:
assert isinstance(ex, TypeError) is True
assert ex.args[0] == "a decimal value required, received"
assert ex.args[1] == IS_INPUT["ZDEC"]
assert ex.args[3] is str
assert ex.args[4] == "ZDEC"
assert ex.args[5] == "IS_INPUT"
def test_numc_rejects_non_string():
try:
IS_INPUT = {"ZNUMC": 1}
output = client.call("/COE/RBP_FE_DATATYPES", IS_INPUT=IS_INPUT, IV_COUNT=0)[
"ES_OUTPUT"
]
except Exception as ex:
assert isinstance(ex, TypeError) is True
assert ex.args[0] == "a numeric string is required, received"
assert ex.args[1] == IS_INPUT["ZNUMC"]
assert ex.args[3] is int
assert ex.args[4] == "ZNUMC"
assert ex.args[5] == "IS_INPUT"
def test_numc_rejects_non_numeric_string():
try:
IS_INPUT = {"ZNUMC": "a1"}
output = client.call("/COE/RBP_FE_DATATYPES", IS_INPUT=IS_INPUT, IV_COUNT=0)[
"ES_OUTPUT"
]
except Exception as ex:
assert isinstance(ex, TypeError) is True
assert ex.args[0] == "a numeric string is required, received"
assert ex.args[1] == IS_INPUT["ZNUMC"]
assert ex.args[3] is str
assert ex.args[4] == "ZNUMC"
assert ex.args[5] == "IS_INPUT"
def test_numc_rejects_empty_string():
try:
IS_INPUT = {"ZNUMC": ""}
output = client.call("/COE/RBP_FE_DATATYPES", IS_INPUT=IS_INPUT, IV_COUNT=0)[
"ES_OUTPUT"
]
except Exception as ex:
assert isinstance(ex, TypeError) is True
assert ex.args[0] == "a numeric string is required, received"
assert ex.args[1] == IS_INPUT["ZNUMC"]
assert ex.args[3] is str
assert ex.args[4] == "ZNUMC"
assert ex.args[5] == "IS_INPUT"
def test_numc_rejects_space_string():
try:
IS_INPUT = {"ZNUMC": " "}
output = client.call("/COE/RBP_FE_DATATYPES", IS_INPUT=IS_INPUT, IV_COUNT=0)[
"ES_OUTPUT"
]
except Exception as ex:
assert isinstance(ex, TypeError) is True
assert ex.args[0] == "a numeric string is required, received"
assert ex.args[1] == IS_INPUT["ZNUMC"]
assert ex.args[3] is str
assert ex.args[4] == "ZNUMC"
assert ex.args[5] == "IS_INPUT"
def test_utclong_accepts_min_max_initial():
UTCLONG = RFC_MATH["UTCLONG"]
conn = Connection(**connection_info("QM7"))
res = conn.call("ZDATATYPES", IV_UTCLONG=UTCLONG["MIN"])
assert res["EV_UTCLONG"] == UTCLONG["MIN"]
res = conn.call("ZDATATYPES", IV_UTCLONG=UTCLONG["MAX"])
assert res["EV_UTCLONG"] == UTCLONG["MAX"]
res = conn.call("ZDATATYPES", IV_UTCLONG=UTCLONG["INITIAL"])
assert res["EV_UTCLONG"] == UTCLONG["INITIAL"]
conn.close()
def test_utclong_rejects_non_string_or_invalid_format():
UTCLONG = RFC_MATH["UTCLONG"]
conn = Connection(**connection_info("QM7"))
try:
res = conn.call("ZDATATYPES", IV_UTCLONG=1)
except Exception as ex:
assert isinstance(ex, TypeError) is True
assert ex.args == (
"an string is required, received",
1,
"of type",
type(1),
"IV_UTCLONG",
)
try:
res = conn.call("ZDATATYPES", IV_UTCLONG="1")
except Exception as ex:
assert isinstance(ex, ExternalRuntimeError) is True
assert ex.code == 22
assert ex.key == "RFC_CONVERSION_FAILURE"
assert ex.message == "Cannot convert 1 to RFCTYPE_UTCLONG : illegal format"
conn.close()
#
# TypeError:
# res = conn.call("ZDATATYPES", IV_UTCLONG="1")
# pyrfc._exception.ExternalRuntimeError: RFC_CONVERSION_FAILURE (rc=22): key=RFC_CONVERSION_FAILURE, message=Cannot convert 1 to RFCTYPE_UTCLONG : illegal format [MSG: class=, type=, number=, v1-4:=;;
conn.close()
if __name__ == "__main__":
unittest.main()
| [
"# -*- coding: utf-8 -*-\n",
"\n",
"# Copyright 2014 SAP AG.\n",
"# SPDX-FileCopyrightText: 2013 SAP SE Srdjan Boskovic <srdjan.boskovic@sap.com>\n",
"#\n",
"# SPDX-License-Identifier: Apache-2.0\n",
"\n",
"import sys\n",
"import unittest\n",
"from decimal import Decimal\n",
"import locale\n",
"\n",
"from pyrfc import Connection, ExternalRuntimeError\n",
"\n",
"from tests.config import (\n",
" CONNECTION_INFO,\n",
" RFC_MATH,\n",
" ABAP_to_python_date,\n",
" ABAP_to_python_time,\n",
" python_to_ABAP_date,\n",
" python_to_ABAP_time,\n",
" UNICODETEST,\n",
" BYTEARRAY_TEST,\n",
" BYTES_TEST,\n",
")\n",
"\n",
"from tests.abap_system import connection_info\n",
"\n",
"locale.setlocale(locale.LC_ALL)\n",
"\n",
"client = Connection(**CONNECTION_INFO)\n",
"\n",
"\n",
"def test_structure_rejects_non_dict():\n",
" try:\n",
" IMPORTSTRUCT = {\"RFCINT1\": \"1\"}\n",
"\n",
" output = client.call(\"STFC_STRUCTURE\", IMPORTSTRUCT=[IMPORTSTRUCT])\n",
" except Exception as ex:\n",
" assert isinstance(ex, TypeError) is True\n",
" assert ex.args[0] == \"dictionary required for structure parameter, received\"\n",
" if sys.version > \"3.0\":\n",
" assert ex.args[1] == \"<class 'list'>\"\n",
" else:\n",
" assert ex.args[1] == \"<type 'list'>\"\n",
" assert ex.args[2] == \"IMPORTSTRUCT\"\n",
"\n",
"\n",
"def test_table_rejects_non_list():\n",
" try:\n",
" IMPORTSTRUCT = {\"RFCINT1\": \"1\"}\n",
"\n",
" output = client.call(\"STFC_STRUCTURE\", RFCTABLE=IMPORTSTRUCT)\n",
" except Exception as ex:\n",
" assert isinstance(ex, TypeError) is True\n",
" assert ex.args[0] == \"list required for table parameter, received\"\n",
" if sys.version > \"3.0\":\n",
" assert ex.args[1] == \"<class 'dict'>\"\n",
" else:\n",
" assert ex.args[1] == \"<type 'dict'>\"\n",
" assert ex.args[2] == \"RFCTABLE\"\n",
"\n",
"\n",
"def test_basic_datatypes():\n",
" INPUTS = [\n",
" dict(\n",
" # Float\n",
" ZFLTP=0.123456789,\n",
" # Decimal\n",
" ZDEC=12345.67,\n",
" # Currency, Quantity\n",
" ZCURR=1234.56,\n",
" ZQUAN=12.3456,\n",
" ZQUAN_SIGN=-12.345,\n",
" ),\n",
" dict(\n",
" # Float\n",
" ZFLTP=Decimal(\"0.123456789\"),\n",
" # Decimal\n",
" ZDEC=Decimal(\"12345.67\"),\n",
" # Currency, Quantity\n",
" ZCURR=Decimal(\"1234.56\"),\n",
" ZQUAN=Decimal(\"12.3456\"),\n",
" ZQUAN_SIGN=Decimal(\"-12.345\"),\n",
" ),\n",
" dict(\n",
" # Float\n",
" ZFLTP=\"0.123456789\",\n",
" # Decimal\n",
" ZDEC=\"12345.67\",\n",
" # Currency, Quantity\n",
" ZCURR=\"1234.56\",\n",
" ZQUAN=\"12.3456\",\n",
" ZQUAN_SIGN=\"-12.345\",\n",
" ),\n",
" ]\n",
"\n",
" for is_input in INPUTS:\n",
" result = client.call(\"/COE/RBP_FE_DATATYPES\", IS_INPUT=is_input)[\"ES_OUTPUT\"]\n",
" for k in is_input:\n",
" in_value = is_input[k]\n",
" out_value = result[k]\n",
" if k == \"ZFLTP\":\n",
" assert type(out_value) is float\n",
" else:\n",
" assert type(out_value) is Decimal\n",
" if type(in_value) != type(out_value):\n",
" assert str(in_value) == str(out_value)\n",
" else:\n",
" assert in_value == out_value\n",
"\n",
"\n",
"def test_string_unicode():\n",
" hello = u\"Hällo SAP!\"\n",
" result = client.call(\"STFC_CONNECTION\", REQUTEXT=hello)[\"ECHOTEXT\"]\n",
" assert result == hello\n",
"\n",
" hello = u\"01234\" * 51\n",
" result = client.call(\"STFC_CONNECTION\", REQUTEXT=hello)[\"ECHOTEXT\"]\n",
" assert result == hello\n",
"\n",
" unicode_test = [\n",
" \"string\",\n",
" \"四周远处都能望见\",\n",
" \"\\U0001F4AA\",\n",
" \"\\u0001\\uf4aa\",\n",
" \"a\\xac\\u1234\\u20ac\\U0001F4AA\",\n",
" ]\n",
"\n",
" for s in unicode_test:\n",
" is_input = {\"ZSHLP_MAT1\": s}\n",
" result = client.call(\"/COE/RBP_FE_DATATYPES\", IS_INPUT=is_input)[\"ES_OUTPUT\"]\n",
" assert is_input[\"ZSHLP_MAT1\"] == result[\"ZSHLP_MAT1\"]\n",
"\n",
"\n",
"def test_date_output():\n",
" lm = client.call(\"BAPI_USER_GET_DETAIL\", USERNAME=\"demo\")[\"LASTMODIFIED\"]\n",
" assert len(lm[\"MODDATE\"]) > 0\n",
" assert len(lm[\"MODTIME\"]) > 0\n",
"\n",
"\n",
"def test_min_max_positive():\n",
" IS_INPUT = {\n",
" # Float\n",
" \"ZFLTP_MIN\": RFC_MATH[\"FLOAT\"][\"POS\"][\"MIN\"],\n",
" \"ZFLTP_MAX\": RFC_MATH[\"FLOAT\"][\"POS\"][\"MAX\"],\n",
" # Decimal\n",
" \"ZDECF16_MIN\": RFC_MATH[\"DECF16\"][\"POS\"][\"MIN\"],\n",
" \"ZDECF16_MAX\": RFC_MATH[\"DECF16\"][\"POS\"][\"MAX\"],\n",
" \"ZDECF34_MIN\": RFC_MATH[\"DECF34\"][\"POS\"][\"MIN\"],\n",
" \"ZDECF34_MAX\": RFC_MATH[\"DECF34\"][\"POS\"][\"MAX\"],\n",
" }\n",
"\n",
" output = client.call(\"/COE/RBP_FE_DATATYPES\", IS_INPUT=IS_INPUT, IV_COUNT=0)[\n",
" \"ES_OUTPUT\"\n",
" ]\n",
"\n",
" assert type(output[\"ZFLTP_MIN\"]) is float\n",
" assert type(output[\"ZFLTP_MAX\"]) is float\n",
" assert type(output[\"ZDECF16_MIN\"]) is Decimal\n",
" assert type(output[\"ZDECF16_MAX\"]) is Decimal\n",
" assert type(output[\"ZDECF34_MAX\"]) is Decimal\n",
" assert type(output[\"ZDECF16_MIN\"]) is Decimal\n",
"\n",
" assert float(IS_INPUT[\"ZFLTP_MIN\"]) == output[\"ZFLTP_MIN\"]\n",
" assert float(IS_INPUT[\"ZFLTP_MAX\"]) == output[\"ZFLTP_MAX\"]\n",
" assert Decimal(IS_INPUT[\"ZDECF16_MIN\"]) == output[\"ZDECF16_MIN\"]\n",
" assert Decimal(IS_INPUT[\"ZDECF16_MAX\"]) == output[\"ZDECF16_MAX\"]\n",
" assert Decimal(IS_INPUT[\"ZDECF16_MIN\"]) == output[\"ZDECF16_MIN\"]\n",
" assert Decimal(IS_INPUT[\"ZDECF34_MAX\"]) == output[\"ZDECF34_MAX\"]\n",
"\n",
"\n",
"def test_min_max_negative():\n",
" IS_INPUT = {\n",
" # Float\n",
" \"ZFLTP_MIN\": RFC_MATH[\"FLOAT\"][\"NEG\"][\"MIN\"],\n",
" \"ZFLTP_MAX\": RFC_MATH[\"FLOAT\"][\"NEG\"][\"MAX\"],\n",
" # Decimal\n",
" \"ZDECF16_MIN\": RFC_MATH[\"DECF16\"][\"NEG\"][\"MIN\"],\n",
" \"ZDECF16_MAX\": RFC_MATH[\"DECF16\"][\"NEG\"][\"MAX\"],\n",
" \"ZDECF34_MIN\": RFC_MATH[\"DECF34\"][\"NEG\"][\"MIN\"],\n",
" \"ZDECF34_MAX\": RFC_MATH[\"DECF34\"][\"NEG\"][\"MAX\"],\n",
" }\n",
"\n",
" output = client.call(\"/COE/RBP_FE_DATATYPES\", IS_INPUT=IS_INPUT, IV_COUNT=0)[\n",
" \"ES_OUTPUT\"\n",
" ]\n",
"\n",
" assert type(output[\"ZFLTP_MIN\"]) is float\n",
" assert type(output[\"ZFLTP_MAX\"]) is float\n",
" assert type(output[\"ZDECF16_MIN\"]) is Decimal\n",
" assert type(output[\"ZDECF16_MAX\"]) is Decimal\n",
" assert type(output[\"ZDECF16_MIN\"]) is Decimal\n",
" assert type(output[\"ZDECF34_MAX\"]) is Decimal\n",
"\n",
" assert float(IS_INPUT[\"ZFLTP_MIN\"]) == output[\"ZFLTP_MIN\"]\n",
" assert float(IS_INPUT[\"ZFLTP_MAX\"]) == output[\"ZFLTP_MAX\"]\n",
" assert Decimal(IS_INPUT[\"ZDECF16_MIN\"]) == output[\"ZDECF16_MIN\"]\n",
" assert Decimal(IS_INPUT[\"ZDECF16_MAX\"]) == output[\"ZDECF16_MAX\"]\n",
" assert Decimal(IS_INPUT[\"ZDECF16_MIN\"]) == output[\"ZDECF16_MIN\"]\n",
" assert Decimal(IS_INPUT[\"ZDECF34_MAX\"]) == output[\"ZDECF34_MAX\"]\n",
"\n",
"\n",
"def test_bcd_floats_accept_floats():\n",
" IS_INPUT = {\n",
" # Float\n",
" \"ZFLTP\": 0.123456789,\n",
" # Decimal\n",
" \"ZDEC\": 12345.67,\n",
" \"ZDECF16_MIN\": 12345.67,\n",
" \"ZDECF34_MIN\": 12345.67,\n",
" # Currency, Quantity\n",
" \"ZCURR\": 1234.56,\n",
" \"ZQUAN\": 12.3456,\n",
" \"ZQUAN_SIGN\": -12.345,\n",
" }\n",
"\n",
" output = client.call(\"/COE/RBP_FE_DATATYPES\", IS_INPUT=IS_INPUT, IV_COUNT=0)[\n",
" \"ES_OUTPUT\"\n",
" ]\n",
" assert type(output[\"ZFLTP\"]) is float\n",
" assert IS_INPUT[\"ZFLTP\"] == output[\"ZFLTP\"]\n",
"\n",
" assert type(output[\"ZDEC\"]) is Decimal\n",
" assert str(IS_INPUT[\"ZDEC\"]) == str(output[\"ZDEC\"])\n",
" assert IS_INPUT[\"ZDEC\"] == float(output[\"ZDEC\"])\n",
"\n",
" assert type(output[\"ZDECF16_MIN\"]) is Decimal\n",
" assert str(IS_INPUT[\"ZDECF16_MIN\"]) == str(output[\"ZDECF16_MIN\"])\n",
" assert IS_INPUT[\"ZDECF16_MIN\"] == float(output[\"ZDECF16_MIN\"])\n",
"\n",
" assert type(output[\"ZDECF34_MIN\"]) is Decimal\n",
" assert str(IS_INPUT[\"ZDECF34_MIN\"]) == str(output[\"ZDECF34_MIN\"])\n",
" assert IS_INPUT[\"ZDECF34_MIN\"] == float(output[\"ZDECF34_MIN\"])\n",
"\n",
" assert type(output[\"ZCURR\"]) is Decimal\n",
" assert str(IS_INPUT[\"ZCURR\"]) == str(output[\"ZCURR\"])\n",
" assert IS_INPUT[\"ZCURR\"] == float(output[\"ZCURR\"])\n",
"\n",
" assert type(output[\"ZQUAN\"]) is Decimal\n",
" assert str(IS_INPUT[\"ZQUAN\"]) == str(output[\"ZQUAN\"])\n",
" assert IS_INPUT[\"ZQUAN\"] == float(output[\"ZQUAN\"])\n",
"\n",
" assert type(output[\"ZQUAN_SIGN\"]) is Decimal\n",
" assert str(IS_INPUT[\"ZQUAN_SIGN\"]) == str(output[\"ZQUAN_SIGN\"])\n",
" assert IS_INPUT[\"ZQUAN_SIGN\"] == float(output[\"ZQUAN_SIGN\"])\n",
"\n",
"\n",
"def test_bcd_floats_accept_strings():\n",
" IS_INPUT = {\n",
" # Float\n",
" \"ZFLTP\": \"0.123456789\",\n",
" # Decimal\n",
" \"ZDEC\": \"12345.67\",\n",
" \"ZDECF16_MIN\": \"12345.67\",\n",
" \"ZDECF34_MIN\": \"12345.67\",\n",
" # Currency, Quantity\n",
" \"ZCURR\": \"1234.56\",\n",
" \"ZQUAN\": \"12.3456\",\n",
" \"ZQUAN_SIGN\": \"-12.345\",\n",
" }\n",
"\n",
" output = client.call(\"/COE/RBP_FE_DATATYPES\", IS_INPUT=IS_INPUT, IV_COUNT=0)[\n",
" \"ES_OUTPUT\"\n",
" ]\n",
" assert type(output[\"ZFLTP\"]) is float\n",
" assert float(IS_INPUT[\"ZFLTP\"]) == output[\"ZFLTP\"]\n",
"\n",
" assert type(output[\"ZDEC\"]) is Decimal\n",
" assert IS_INPUT[\"ZDEC\"] == str(output[\"ZDEC\"])\n",
"\n",
" assert type(output[\"ZDECF16_MIN\"]) is Decimal\n",
" assert IS_INPUT[\"ZDECF16_MIN\"] == str(output[\"ZDECF16_MIN\"])\n",
"\n",
" assert type(output[\"ZDECF34_MIN\"]) is Decimal\n",
" assert IS_INPUT[\"ZDECF34_MIN\"] == str(output[\"ZDECF34_MIN\"])\n",
"\n",
" assert type(output[\"ZCURR\"]) is Decimal\n",
" assert IS_INPUT[\"ZCURR\"] == str(output[\"ZCURR\"])\n",
"\n",
" assert type(output[\"ZQUAN\"]) is Decimal\n",
" assert IS_INPUT[\"ZQUAN\"] == str(output[\"ZQUAN\"])\n",
"\n",
" assert type(output[\"ZQUAN_SIGN\"]) is Decimal\n",
" assert IS_INPUT[\"ZQUAN_SIGN\"] == str(output[\"ZQUAN_SIGN\"])\n",
"\n",
"\n",
"\"\"\"def test_bcd_floats_accept_strings_radix_comma():\n",
" locale.setlocale(locale.LC_ALL, \"de_DE\")\n",
" IS_INPUT = {\n",
" # Float\n",
" \"ZFLTP\": \"0.123456789\",\n",
" # Decimal\n",
" \"ZDEC\": \"12345.67\",\n",
" \"ZDECF16_MIN\": \"12345.67\",\n",
" \"ZDECF34_MIN\": \"12345.67\",\n",
" # Currency, Quantity\n",
" \"ZCURR\": \"1234.56\",\n",
" \"ZQUAN\": \"12.3456\",\n",
" \"ZQUAN_SIGN\": \"-12.345\",\n",
" }\n",
"\n",
" output = client.call(\"/COE/RBP_FE_DATATYPES\", IS_INPUT=IS_INPUT, IV_COUNT=0)[\n",
" \"ES_OUTPUT\"\n",
" ]\n",
" assert type(output[\"ZFLTP\"]) is float\n",
" assert float(IS_INPUT[\"ZFLTP\"]) == output[\"ZFLTP\"]\n",
"\n",
" assert type(output[\"ZDEC\"]) is Decimal\n",
" assert IS_INPUT[\"ZDEC\"] == str(output[\"ZDEC\"])\n",
"\n",
" assert type(output[\"ZDECF16_MIN\"]) is Decimal\n",
" assert IS_INPUT[\"ZDECF16_MIN\"] == str(output[\"ZDECF16_MIN\"])\n",
"\n",
" assert type(output[\"ZDECF34_MIN\"]) is Decimal\n",
" assert IS_INPUT[\"ZDECF34_MIN\"] == str(output[\"ZDECF34_MIN\"])\n",
"\n",
" assert type(output[\"ZCURR\"]) is Decimal\n",
" assert IS_INPUT[\"ZCURR\"] == str(output[\"ZCURR\"])\n",
"\n",
" assert type(output[\"ZQUAN\"]) is Decimal\n",
" assert IS_INPUT[\"ZQUAN\"] == str(output[\"ZQUAN\"])\n",
"\n",
" assert type(output[\"ZQUAN_SIGN\"]) is Decimal\n",
" assert IS_INPUT[\"ZQUAN_SIGN\"] == str(output[\"ZQUAN_SIGN\"])\n",
"\"\"\"\n",
"\n",
"\n",
"def test_bcd_floats_accept_decimals():\n",
" IS_INPUT = {\n",
" # Float\n",
" \"ZFLTP\": Decimal(\"0.123456789\"),\n",
" # Decimal\n",
" \"ZDEC\": Decimal(\"12345.67\"),\n",
" \"ZDECF16_MIN\": Decimal(\"12345.67\"),\n",
" \"ZDECF34_MIN\": Decimal(\"12345.67\"),\n",
" # Currency, Quantity\n",
" \"ZCURR\": Decimal(\"1234.56\"),\n",
" \"ZQUAN\": Decimal(\"12.3456\"),\n",
" \"ZQUAN_SIGN\": Decimal(\"-12.345\"),\n",
" }\n",
"\n",
" output = client.call(\"/COE/RBP_FE_DATATYPES\", IS_INPUT=IS_INPUT, IV_COUNT=0)[\n",
" \"ES_OUTPUT\"\n",
" ]\n",
" assert type(output[\"ZFLTP\"]) is float\n",
" assert IS_INPUT[\"ZFLTP\"] == Decimal(str(output[\"ZFLTP\"]))\n",
"\n",
" assert type(output[\"ZDEC\"]) is Decimal\n",
" assert IS_INPUT[\"ZDEC\"] == Decimal(str(output[\"ZDEC\"]))\n",
"\n",
" assert type(output[\"ZDECF16_MIN\"]) is Decimal\n",
" assert IS_INPUT[\"ZDECF16_MIN\"] == Decimal(str(output[\"ZDECF16_MIN\"]))\n",
"\n",
" assert type(output[\"ZDECF34_MIN\"]) is Decimal\n",
" assert IS_INPUT[\"ZDECF34_MIN\"] == Decimal(str(output[\"ZDECF34_MIN\"]))\n",
"\n",
" assert type(output[\"ZCURR\"]) is Decimal\n",
" assert IS_INPUT[\"ZCURR\"] == Decimal(str(output[\"ZCURR\"]))\n",
"\n",
" assert type(output[\"ZQUAN\"]) is Decimal\n",
" assert IS_INPUT[\"ZQUAN\"] == Decimal(str(output[\"ZQUAN\"]))\n",
"\n",
" assert type(output[\"ZQUAN_SIGN\"]) is Decimal\n",
" assert IS_INPUT[\"ZQUAN_SIGN\"] == Decimal(str(output[\"ZQUAN_SIGN\"]))\n",
"\n",
"\n",
"def test_raw_types_accept_bytes():\n",
" ZRAW = BYTES_TEST\n",
" DIFF = b\"\\x00\\x00\\x00\\x00\"\n",
" IS_INPUT = {\"ZRAW\": ZRAW, \"ZRAWSTRING\": ZRAW}\n",
" output = client.call(\"/COE/RBP_FE_DATATYPES\", IS_INPUT=IS_INPUT, IV_COUNT=0)[\n",
" \"ES_OUTPUT\"\n",
" ]\n",
" assert output[\"ZRAW\"] == ZRAW + DIFF\n",
" assert output[\"ZRAWSTRING\"] == ZRAW\n",
" assert type(output[\"ZRAW\"]) is bytes\n",
" assert type(output[\"ZRAWSTRING\"]) is bytes\n",
"\n",
"\n",
"def test_raw_types_accept_bytearray():\n",
" ZRAW = BYTEARRAY_TEST\n",
" DIFF = b\"\\x00\\x00\\x00\\x00\"\n",
" IS_INPUT = {\"ZRAW\": ZRAW, \"ZRAWSTRING\": ZRAW}\n",
" output = client.call(\"/COE/RBP_FE_DATATYPES\", IS_INPUT=IS_INPUT, IV_COUNT=0)[\n",
" \"ES_OUTPUT\"\n",
" ]\n",
" assert output[\"ZRAW\"] == ZRAW + DIFF\n",
" assert output[\"ZRAWSTRING\"] == ZRAW\n",
" assert type(output[\"ZRAW\"]) is bytes\n",
" assert type(output[\"ZRAWSTRING\"]) is bytes\n",
"\n",
"\n",
"def test_date_time():\n",
" DATETIME_TEST = [\n",
" {\"RFCDATE\": \"20161231\", \"RFCTIME\": \"123456\"}, # good, correct date\n",
" {\"RFCDATE\": \"\", \"RFCTIME\": \"123456\"}, # good, empty date\n",
" {\"RFCDATE\": \" \", \"RFCTIME\": \"123456\"}, # good, space date\n",
" {\"RFCDATE\": \"20161231\", \"RFCTIME\": \"\"}, # good, empty time\n",
" {\"RFCDATE\": \"20161231\", \"RFCTIME\": \" \"}, # good, space time\n",
" {\"RFCDATE\": \"20161231\", \"RFCTIME\": \"000000\"}, # good, zero time\n",
" {\"RFCDATE\": \"2016123\", \"RFCTIME\": \"123456\"}, # shorter date\n",
" {\"RFCDATE\": \" \", \"RFCTIME\": \"123456\"}, # shorter empty date\n",
" {\"RFCDATE\": \"201612311\", \"RFCTIME\": \"123456\"}, # longer date\n",
" {\"RFCDATE\": \" \", \"RFCTIME\": \"123456\"}, # longer empty date\n",
" {\"RFCDATE\": \"20161232\", \"RFCTIME\": \"123456\"}, # out of range date\n",
" {\"RFCDATE\": 20161231, \"RFCTIME\": \"123456\"}, # wrong date type\n",
" {\"RFCDATE\": \"20161231\", \"RFCTIME\": \"12345\"}, # shorter time\n",
" {\"RFCDATE\": \"20161231\", \"RFCTIME\": \" \"}, # shorter empty time\n",
" {\"RFCDATE\": \"20161231\", \"RFCTIME\": \"1234566\"}, # longer time\n",
" {\"RFCDATE\": \"20161231\", \"RFCTIME\": \" \"}, # longer empty time\n",
" {\"RFCDATE\": \"20161231\", \"RFCTIME\": \"123466\"}, # out of range time\n",
" {\"RFCDATE\": \"20161231\", \"RFCTIME\": 123456}, # wrong time type\n",
" ]\n",
" counter = 0\n",
" for dt in DATETIME_TEST:\n",
" counter += 1\n",
" try:\n",
" result = client.call(\"STFC_STRUCTURE\", IMPORTSTRUCT=dt)[\"ECHOSTRUCT\"]\n",
" assert dt[\"RFCDATE\"] == result[\"RFCDATE\"]\n",
" if dt[\"RFCTIME\"] == \"\":\n",
" assert \"000000\" == result[\"RFCTIME\"]\n",
" else:\n",
" assert dt[\"RFCTIME\"] == result[\"RFCTIME\"]\n",
" except Exception as e:\n",
" assert type(e) is TypeError\n",
"\n",
" if counter < 13:\n",
" assert e.args[0] == \"date value required, received\"\n",
" assert e.args[1] == dt[\"RFCDATE\"]\n",
" assert e.args[3] == type(dt[\"RFCDATE\"])\n",
" assert e.args[4] == \"RFCDATE\"\n",
" else:\n",
" assert e.args[0] == \"time value required, received\"\n",
" assert e.args[1] == dt[\"RFCTIME\"]\n",
" assert e.args[3] == type(dt[\"RFCTIME\"])\n",
" assert e.args[4] == \"RFCTIME\"\n",
" assert e.args[5] == \"IMPORTSTRUCT\"\n",
"\n",
"\n",
"def test_date_accepts_string():\n",
" TEST_DATE = u\"20180625\"\n",
"\n",
" IMPORTSTRUCT = {\"RFCDATE\": TEST_DATE}\n",
" IMPORTTABLE = [IMPORTSTRUCT]\n",
" output = client.call(\n",
" \"STFC_STRUCTURE\", IMPORTSTRUCT=IMPORTSTRUCT, RFCTABLE=IMPORTTABLE\n",
" )\n",
" if sys.version > \"3.0\":\n",
" assert type(output[\"ECHOSTRUCT\"][\"RFCDATE\"]) is str\n",
" assert type(output[\"RFCTABLE\"][0][\"RFCDATE\"]) is str\n",
" else:\n",
" assert type(output[\"ECHOSTRUCT\"][\"RFCDATE\"]) is unicode\n",
" assert type(output[\"RFCTABLE\"][0][\"RFCDATE\"]) is unicode\n",
" assert output[\"ECHOSTRUCT\"][\"RFCDATE\"] == TEST_DATE\n",
" assert output[\"RFCTABLE\"][0][\"RFCDATE\"] == TEST_DATE\n",
"\n",
"\n",
"def test_date_accepts_date():\n",
" TEST_DATE = ABAP_to_python_date(\"20180625\")\n",
"\n",
" IMPORTSTRUCT = {\"RFCDATE\": TEST_DATE}\n",
" IMPORTTABLE = [IMPORTSTRUCT]\n",
" output = client.call(\n",
" \"STFC_STRUCTURE\", IMPORTSTRUCT=IMPORTSTRUCT, RFCTABLE=IMPORTTABLE\n",
" )\n",
" if sys.version > \"3.0\":\n",
" assert type(output[\"ECHOSTRUCT\"][\"RFCDATE\"]) is str\n",
" assert type(output[\"RFCTABLE\"][0][\"RFCDATE\"]) is str\n",
" else:\n",
" assert type(output[\"ECHOSTRUCT\"][\"RFCDATE\"]) is unicode\n",
" assert type(output[\"RFCTABLE\"][0][\"RFCDATE\"]) is unicode\n",
" assert output[\"ECHOSTRUCT\"][\"RFCDATE\"] == python_to_ABAP_date(TEST_DATE)\n",
" assert output[\"RFCTABLE\"][0][\"RFCDATE\"] == python_to_ABAP_date(TEST_DATE)\n",
"\n",
"\n",
"def test_time_accepts_string():\n",
" TEST_TIME = \"123456\"\n",
"\n",
" IMPORTSTRUCT = {\"RFCTIME\": TEST_TIME}\n",
" IMPORTTABLE = [IMPORTSTRUCT]\n",
" output = client.call(\n",
" \"STFC_STRUCTURE\", IMPORTSTRUCT=IMPORTSTRUCT, RFCTABLE=IMPORTTABLE\n",
" )\n",
" if sys.version > \"3.0\":\n",
" assert type(output[\"ECHOSTRUCT\"][\"RFCTIME\"]) is str\n",
" assert type(output[\"RFCTABLE\"][0][\"RFCTIME\"]) is str\n",
" else:\n",
" assert type(output[\"ECHOSTRUCT\"][\"RFCTIME\"]) is unicode\n",
" assert type(output[\"RFCTABLE\"][0][\"RFCTIME\"]) is unicode\n",
" assert output[\"ECHOSTRUCT\"][\"RFCTIME\"] == TEST_TIME\n",
" assert output[\"RFCTABLE\"][0][\"RFCTIME\"] == TEST_TIME\n",
"\n",
"\n",
"def test_time_accepts_time():\n",
" TEST_TIME = ABAP_to_python_time(\"123456\")\n",
"\n",
" IMPORTSTRUCT = {\"RFCTIME\": TEST_TIME}\n",
" IMPORTTABLE = [IMPORTSTRUCT]\n",
" output = client.call(\n",
" \"STFC_STRUCTURE\", IMPORTSTRUCT=IMPORTSTRUCT, RFCTABLE=IMPORTTABLE\n",
" )\n",
" if sys.version > \"3.0\":\n",
" assert type(output[\"ECHOSTRUCT\"][\"RFCTIME\"]) is str\n",
" assert type(output[\"RFCTABLE\"][0][\"RFCTIME\"]) is str\n",
" else:\n",
" assert type(output[\"ECHOSTRUCT\"][\"RFCTIME\"]) is unicode\n",
" assert type(output[\"RFCTABLE\"][0][\"RFCTIME\"]) is unicode\n",
" assert output[\"ECHOSTRUCT\"][\"RFCTIME\"] == python_to_ABAP_time(TEST_TIME)\n",
" assert output[\"RFCTABLE\"][0][\"RFCTIME\"] == python_to_ABAP_time(TEST_TIME)\n",
"\n",
"\n",
"def test_error_int_rejects_string():\n",
" IMPORTSTRUCT = {\"RFCINT1\": \"1\"}\n",
" try:\n",
" output = client.call(\"STFC_STRUCTURE\", IMPORTSTRUCT=IMPORTSTRUCT)\n",
" except Exception as ex:\n",
" assert isinstance(ex, TypeError) is True\n",
" assert ex.args[0] == \"an integer required, received\"\n",
" assert ex.args[1] == IMPORTSTRUCT[\"RFCINT1\"]\n",
" assert ex.args[3] is str\n",
" assert ex.args[4] == \"RFCINT1\"\n",
" assert ex.args[5] == \"IMPORTSTRUCT\"\n",
" try:\n",
" output = client.call(\"STFC_STRUCTURE\", RFCTABLE=[IMPORTSTRUCT])\n",
" except Exception as ex:\n",
" assert isinstance(ex, TypeError) is True\n",
" assert isinstance(ex, TypeError) is True\n",
" assert ex.args[0] == \"an integer required, received\"\n",
" assert ex.args[1] == IMPORTSTRUCT[\"RFCINT1\"]\n",
" assert ex.args[3] is str\n",
" assert ex.args[4] == \"RFCINT1\"\n",
" assert ex.args[5] == \"RFCTABLE\"\n",
"\n",
"\n",
"def test_error_int_rejects_float():\n",
" IMPORTSTRUCT = {\"RFCINT1\": 1.0}\n",
" try:\n",
" output = client.call(\"STFC_STRUCTURE\", IMPORTSTRUCT=IMPORTSTRUCT)\n",
" except Exception as ex:\n",
" assert isinstance(ex, TypeError) is True\n",
" assert ex.args[0] == \"an integer required, received\"\n",
" assert ex.args[1] == IMPORTSTRUCT[\"RFCINT1\"]\n",
" assert ex.args[3] is float\n",
" assert ex.args[4] == \"RFCINT1\"\n",
" assert ex.args[5] == \"IMPORTSTRUCT\"\n",
"\n",
" try:\n",
" output = client.call(\"STFC_STRUCTURE\", RFCTABLE=[IMPORTSTRUCT])\n",
" except Exception as ex:\n",
" assert isinstance(ex, TypeError) is True\n",
" assert ex.args[0] == \"an integer required, received\"\n",
" assert ex.args[1] == IMPORTSTRUCT[\"RFCINT1\"]\n",
" assert ex.args[3] is float\n",
" assert ex.args[4] == \"RFCINT1\"\n",
" assert ex.args[5] == \"RFCTABLE\"\n",
"\n",
"\n",
"def test_error_string_rejects_None():\n",
" IMPORTSTRUCT = {\"RFCCHAR4\": None}\n",
" try:\n",
" output = client.call(\"STFC_STRUCTURE\", IMPORTSTRUCT=IMPORTSTRUCT)\n",
" except Exception as ex:\n",
" assert isinstance(ex, TypeError) is True\n",
" assert ex.args[0] == \"an string is required, received\"\n",
" assert ex.args[1] == IMPORTSTRUCT[\"RFCCHAR4\"]\n",
" assert ex.args[3] == type(None)\n",
" assert ex.args[4] == \"RFCCHAR4\"\n",
" assert ex.args[5] == \"IMPORTSTRUCT\"\n",
"\n",
" try:\n",
" output = client.call(\"STFC_STRUCTURE\", RFCTABLE=[IMPORTSTRUCT])\n",
" except Exception as ex:\n",
" assert isinstance(ex, TypeError) is True\n",
" assert ex.args[0] == \"an string is required, received\"\n",
" assert ex.args[1] == IMPORTSTRUCT[\"RFCCHAR4\"]\n",
" assert ex.args[3] == type(None)\n",
" assert ex.args[4] == \"RFCCHAR4\"\n",
" assert ex.args[5] == \"RFCTABLE\"\n",
"\n",
"\n",
"def test_error_string_rejects_int():\n",
" IMPORTSTRUCT = {\"RFCCHAR4\": 1}\n",
" try:\n",
" output = client.call(\"STFC_STRUCTURE\", IMPORTSTRUCT=IMPORTSTRUCT)\n",
" except Exception as ex:\n",
" assert isinstance(ex, TypeError) is True\n",
" assert ex.args[0] == \"an string is required, received\"\n",
" assert ex.args[1] == IMPORTSTRUCT[\"RFCCHAR4\"]\n",
" assert ex.args[3] is int\n",
" assert ex.args[4] == \"RFCCHAR4\"\n",
" assert ex.args[5] == \"IMPORTSTRUCT\"\n",
" try:\n",
" output = client.call(\"STFC_STRUCTURE\", RFCTABLE=[IMPORTSTRUCT])\n",
" except Exception as ex:\n",
" assert isinstance(ex, TypeError) is True\n",
" assert ex.args[0] == \"an string is required, received\"\n",
" assert ex.args[1] == IMPORTSTRUCT[\"RFCCHAR4\"]\n",
" assert ex.args[3] is int\n",
" assert ex.args[4] == \"RFCCHAR4\"\n",
" assert ex.args[5] == \"RFCTABLE\"\n",
"\n",
"\n",
"def test_float_rejects_not_a_number_string():\n",
" IMPORTSTRUCT = {\"RFCFLOAT\": \"A\"}\n",
" try:\n",
" output = client.call(\"STFC_STRUCTURE\", IMPORTSTRUCT=IMPORTSTRUCT)\n",
" except Exception as ex:\n",
" assert isinstance(ex, TypeError) is True\n",
" assert ex.args[0] == \"a decimal value required, received\"\n",
" assert ex.args[1] == IMPORTSTRUCT[\"RFCFLOAT\"]\n",
" assert ex.args[3] is str\n",
" assert ex.args[4] == \"RFCFLOAT\"\n",
" assert ex.args[5] == \"IMPORTSTRUCT\"\n",
"\n",
"\n",
"def test_float_rejects_array():\n",
" IMPORTSTRUCT = {\"RFCFLOAT\": []}\n",
" try:\n",
" output = client.call(\"STFC_STRUCTURE\", IMPORTSTRUCT=IMPORTSTRUCT)\n",
" except Exception as ex:\n",
" assert isinstance(ex, TypeError) is True\n",
" assert ex.args[0] == \"a decimal value required, received\"\n",
" assert ex.args[1] == []\n",
" assert ex.args[3] is list\n",
" assert ex.args[4] == \"RFCFLOAT\"\n",
" assert ex.args[5] == \"IMPORTSTRUCT\"\n",
"\n",
"\n",
"def test_float_rejects_comma_for_point_locale():\n",
" IMPORTSTRUCT = {\"RFCFLOAT\": \"1,2\"}\n",
" try:\n",
" output = client.call(\"STFC_STRUCTURE\", IMPORTSTRUCT=IMPORTSTRUCT)\n",
" except Exception as ex:\n",
" assert isinstance(ex, TypeError) is True\n",
" assert ex.args[0] == \"a decimal value required, received\"\n",
" assert ex.args[1] == \"1,2\"\n",
" assert ex.args[3] is str\n",
" assert ex.args[4] == \"RFCFLOAT\"\n",
" assert ex.args[5] == \"IMPORTSTRUCT\"\n",
"\n",
"\n",
"def test_float_accepts_point_for_point_locale():\n",
" IMPORTSTRUCT = {\"RFCFLOAT\": \"1.2\"}\n",
" output = client.call(\"STFC_STRUCTURE\", IMPORTSTRUCT=IMPORTSTRUCT)[\"ECHOSTRUCT\"]\n",
" assert output[\"RFCFLOAT\"] == 1.2\n",
"\n",
"\n",
"def test_float_rejects_point_for_comma_locale():\n",
" locale.setlocale(locale.LC_ALL, \"de_DE\")\n",
" IMPORTSTRUCT = {\"RFCFLOAT\": \"1.2\"}\n",
" try:\n",
" output = client.call(\"STFC_STRUCTURE\", IMPORTSTRUCT=IMPORTSTRUCT)\n",
" except Exception as ex:\n",
" assert isinstance(ex, ExternalRuntimeError) is True\n",
" assert ex.code == 22\n",
" assert ex.key == \"RFC_CONVERSION_FAILURE\"\n",
" assert (\n",
" ex.message\n",
" == \"Cannot convert string value 1.2 at position 1 for the field RFCFLOAT to type RFCTYPE_FLOAT\"\n",
" )\n",
" locale.resetlocale(locale.LC_ALL)\n",
"\n",
"\n",
"def test_float_accepts_comma_for_comma_locale():\n",
" locale.setlocale(locale.LC_ALL, \"de_DE\")\n",
" IMPORTSTRUCT = {\"RFCFLOAT\": \"1,2\"}\n",
" output = client.call(\"STFC_STRUCTURE\", IMPORTSTRUCT=IMPORTSTRUCT)[\"ECHOSTRUCT\"]\n",
" assert output[\"RFCFLOAT\"] == 1.2\n",
" locale.resetlocale(locale.LC_ALL)\n",
"\n",
"\n",
"def test_bcd_rejects_not_a_number_string():\n",
"\n",
" try:\n",
" IS_INPUT = {\"ZDEC\": \"A\"}\n",
" output = client.call(\"/COE/RBP_FE_DATATYPES\", IS_INPUT=IS_INPUT, IV_COUNT=0)[\n",
" \"ES_OUTPUT\"\n",
" ]\n",
" except Exception as ex:\n",
" assert isinstance(ex, TypeError) is True\n",
" assert ex.args[0] == \"a decimal value required, received\"\n",
" assert ex.args[1] == IS_INPUT[\"ZDEC\"]\n",
" assert ex.args[3] is str\n",
" assert ex.args[4] == \"ZDEC\"\n",
" assert ex.args[5] == \"IS_INPUT\"\n",
"\n",
"\n",
"def test_numc_rejects_non_string():\n",
" try:\n",
" IS_INPUT = {\"ZNUMC\": 1}\n",
" output = client.call(\"/COE/RBP_FE_DATATYPES\", IS_INPUT=IS_INPUT, IV_COUNT=0)[\n",
" \"ES_OUTPUT\"\n",
" ]\n",
" except Exception as ex:\n",
" assert isinstance(ex, TypeError) is True\n",
" assert ex.args[0] == \"a numeric string is required, received\"\n",
" assert ex.args[1] == IS_INPUT[\"ZNUMC\"]\n",
" assert ex.args[3] is int\n",
" assert ex.args[4] == \"ZNUMC\"\n",
" assert ex.args[5] == \"IS_INPUT\"\n",
"\n",
"\n",
"def test_numc_rejects_non_numeric_string():\n",
" try:\n",
" IS_INPUT = {\"ZNUMC\": \"a1\"}\n",
" output = client.call(\"/COE/RBP_FE_DATATYPES\", IS_INPUT=IS_INPUT, IV_COUNT=0)[\n",
" \"ES_OUTPUT\"\n",
" ]\n",
" except Exception as ex:\n",
" assert isinstance(ex, TypeError) is True\n",
" assert ex.args[0] == \"a numeric string is required, received\"\n",
" assert ex.args[1] == IS_INPUT[\"ZNUMC\"]\n",
" assert ex.args[3] is str\n",
" assert ex.args[4] == \"ZNUMC\"\n",
" assert ex.args[5] == \"IS_INPUT\"\n",
"\n",
"\n",
"def test_numc_rejects_empty_string():\n",
" try:\n",
" IS_INPUT = {\"ZNUMC\": \"\"}\n",
" output = client.call(\"/COE/RBP_FE_DATATYPES\", IS_INPUT=IS_INPUT, IV_COUNT=0)[\n",
" \"ES_OUTPUT\"\n",
" ]\n",
" except Exception as ex:\n",
" assert isinstance(ex, TypeError) is True\n",
" assert ex.args[0] == \"a numeric string is required, received\"\n",
" assert ex.args[1] == IS_INPUT[\"ZNUMC\"]\n",
" assert ex.args[3] is str\n",
" assert ex.args[4] == \"ZNUMC\"\n",
" assert ex.args[5] == \"IS_INPUT\"\n",
"\n",
"\n",
"def test_numc_rejects_space_string():\n",
" try:\n",
" IS_INPUT = {\"ZNUMC\": \" \"}\n",
" output = client.call(\"/COE/RBP_FE_DATATYPES\", IS_INPUT=IS_INPUT, IV_COUNT=0)[\n",
" \"ES_OUTPUT\"\n",
" ]\n",
" except Exception as ex:\n",
" assert isinstance(ex, TypeError) is True\n",
" assert ex.args[0] == \"a numeric string is required, received\"\n",
" assert ex.args[1] == IS_INPUT[\"ZNUMC\"]\n",
" assert ex.args[3] is str\n",
" assert ex.args[4] == \"ZNUMC\"\n",
" assert ex.args[5] == \"IS_INPUT\"\n",
"\n",
"\n",
"def test_utclong_accepts_min_max_initial():\n",
" UTCLONG = RFC_MATH[\"UTCLONG\"]\n",
" conn = Connection(**connection_info(\"QM7\"))\n",
"\n",
" res = conn.call(\"ZDATATYPES\", IV_UTCLONG=UTCLONG[\"MIN\"])\n",
" assert res[\"EV_UTCLONG\"] == UTCLONG[\"MIN\"]\n",
"\n",
" res = conn.call(\"ZDATATYPES\", IV_UTCLONG=UTCLONG[\"MAX\"])\n",
" assert res[\"EV_UTCLONG\"] == UTCLONG[\"MAX\"]\n",
"\n",
" res = conn.call(\"ZDATATYPES\", IV_UTCLONG=UTCLONG[\"INITIAL\"])\n",
" assert res[\"EV_UTCLONG\"] == UTCLONG[\"INITIAL\"]\n",
"\n",
" conn.close()\n",
"\n",
"\n",
"def test_utclong_rejects_non_string_or_invalid_format():\n",
" UTCLONG = RFC_MATH[\"UTCLONG\"]\n",
" conn = Connection(**connection_info(\"QM7\"))\n",
" try:\n",
" res = conn.call(\"ZDATATYPES\", IV_UTCLONG=1)\n",
" except Exception as ex:\n",
" assert isinstance(ex, TypeError) is True\n",
" assert ex.args == (\n",
" \"an string is required, received\",\n",
" 1,\n",
" \"of type\",\n",
" type(1),\n",
" \"IV_UTCLONG\",\n",
" )\n",
"\n",
" try:\n",
" res = conn.call(\"ZDATATYPES\", IV_UTCLONG=\"1\")\n",
" except Exception as ex:\n",
" assert isinstance(ex, ExternalRuntimeError) is True\n",
" assert ex.code == 22\n",
" assert ex.key == \"RFC_CONVERSION_FAILURE\"\n",
" assert ex.message == \"Cannot convert 1 to RFCTYPE_UTCLONG : illegal format\"\n",
"\n",
" conn.close()\n",
" #\n",
" # TypeError:\n",
"\n",
" # res = conn.call(\"ZDATATYPES\", IV_UTCLONG=\"1\")\n",
" # pyrfc._exception.ExternalRuntimeError: RFC_CONVERSION_FAILURE (rc=22): key=RFC_CONVERSION_FAILURE, message=Cannot convert 1 to RFCTYPE_UTCLONG : illegal format [MSG: class=, type=, number=, v1-4:=;;\n",
"\n",
" conn.close()\n",
"\n",
"\n",
"if __name__ == \"__main__\":\n",
" unittest.main()\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011764705882352941,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011627906976744186,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011627906976744186,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012195121951219513,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012195121951219513,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012195121951219513,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012195121951219513,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012195121951219513,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012195121951219513,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012195121951219513,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012195121951219513,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012195121951219513,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.017857142857142856,
0,
0,
0,
0,
0.017857142857142856,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.025,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.025,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011904761904761904,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.009259259259259259,
0,
0,
0,
0,
0,
0,
0,
0.011904761904761904,
0,
0,
0,
0,
0,
0,
0,
0,
0.011627906976744186,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011627906976744186,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011627906976744186,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011627906976744186,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011627906976744186,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011904761904761904,
0,
0,
0,
0,
0,
0,
0.004878048780487805,
0,
0,
0,
0,
0,
0
] | 799 | 0.000424 | false |
#! /usr/bin/env python
# encoding: utf-8
# Matt Clarkson, 2012
'''
DPAPI access library (http://msdn.microsoft.com/en-us/library/ms995355.aspx)
This file uses code originally created by Crusher Joe:
http://article.gmane.org/gmane.comp.python.ctypes/420
And modified by Wayne Koorts:
http://stackoverflow.com/questions/463832/using-dpapi-with-python
'''
from ctypes import windll, byref, cdll, Structure, POINTER, c_char, c_buffer
from ctypes.wintypes import DWORD
from waflib.Configure import conf
LocalFree = windll.kernel32.LocalFree
memcpy = cdll.msvcrt.memcpy
CryptProtectData = windll.crypt32.CryptProtectData
CryptUnprotectData = windll.crypt32.CryptUnprotectData
CRYPTPROTECT_UI_FORBIDDEN = 0x01
try:
extra_entropy = 'cl;ad13 \0al;323kjd #(adl;k$#ajsd'.encode('ascii')
except AttributeError:
extra_entropy = 'cl;ad13 \0al;323kjd #(adl;k$#ajsd'
class DATA_BLOB(Structure):
_fields_ = [
('cbData', DWORD),
('pbData', POINTER(c_char))
]
def get_data(blob_out):
cbData = int(blob_out.cbData)
pbData = blob_out.pbData
buffer = c_buffer(cbData)
memcpy(buffer, pbData, cbData)
LocalFree(pbData);
return buffer.raw
@conf
def dpapi_encrypt_data(self, input_bytes, entropy = extra_entropy):
'''
Encrypts data and returns byte string
:param input_bytes: The data to be encrypted
:type input_bytes: String or Bytes
:param entropy: Extra entropy to add to the encryption process (optional)
:type entropy: String or Bytes
'''
if not isinstance(input_bytes, bytes) or not isinstance(entropy, bytes):
self.fatal('The inputs to dpapi must be bytes')
buffer_in = c_buffer(input_bytes, len(input_bytes))
buffer_entropy = c_buffer(entropy, len(entropy))
blob_in = DATA_BLOB(len(input_bytes), buffer_in)
blob_entropy = DATA_BLOB(len(entropy), buffer_entropy)
blob_out = DATA_BLOB()
if CryptProtectData(byref(blob_in), 'python_data', byref(blob_entropy),
None, None, CRYPTPROTECT_UI_FORBIDDEN, byref(blob_out)):
return get_data(blob_out)
else:
self.fatal('Failed to decrypt data')
@conf
def dpapi_decrypt_data(self, encrypted_bytes, entropy = extra_entropy):
'''
Decrypts data and returns byte string
:param encrypted_bytes: The encrypted data
:type encrypted_bytes: Bytes
:param entropy: Extra entropy to add to the encryption process (optional)
:type entropy: String or Bytes
'''
if not isinstance(encrypted_bytes, bytes) or not isinstance(entropy, bytes):
self.fatal('The inputs to dpapi must be bytes')
buffer_in = c_buffer(encrypted_bytes, len(encrypted_bytes))
buffer_entropy = c_buffer(entropy, len(entropy))
blob_in = DATA_BLOB(len(encrypted_bytes), buffer_in)
blob_entropy = DATA_BLOB(len(entropy), buffer_entropy)
blob_out = DATA_BLOB()
if CryptUnprotectData(byref(blob_in), None, byref(blob_entropy), None,
None, CRYPTPROTECT_UI_FORBIDDEN, byref(blob_out)):
return get_data(blob_out)
else:
self.fatal('Failed to decrypt data')
| [
"#! /usr/bin/env python\n",
"# encoding: utf-8\n",
"# Matt Clarkson, 2012\n",
"\n",
"'''\n",
"DPAPI access library (http://msdn.microsoft.com/en-us/library/ms995355.aspx)\n",
"This file uses code originally created by Crusher Joe:\n",
"http://article.gmane.org/gmane.comp.python.ctypes/420\n",
"And modified by Wayne Koorts:\n",
"http://stackoverflow.com/questions/463832/using-dpapi-with-python\n",
"'''\n",
"\n",
"from ctypes import windll, byref, cdll, Structure, POINTER, c_char, c_buffer\n",
"from ctypes.wintypes import DWORD\n",
"from waflib.Configure import conf\n",
"\n",
"LocalFree = windll.kernel32.LocalFree\n",
"memcpy = cdll.msvcrt.memcpy\n",
"CryptProtectData = windll.crypt32.CryptProtectData\n",
"CryptUnprotectData = windll.crypt32.CryptUnprotectData\n",
"CRYPTPROTECT_UI_FORBIDDEN = 0x01\n",
"try:\n",
"\textra_entropy = 'cl;ad13 \\0al;323kjd #(adl;k$#ajsd'.encode('ascii')\n",
"except AttributeError:\n",
"\textra_entropy = 'cl;ad13 \\0al;323kjd #(adl;k$#ajsd'\n",
"\n",
"class DATA_BLOB(Structure):\n",
"\t_fields_ = [\n",
"\t\t('cbData', DWORD),\n",
"\t\t('pbData', POINTER(c_char))\n",
"\t]\n",
"\n",
"def get_data(blob_out):\n",
"\tcbData = int(blob_out.cbData)\n",
"\tpbData = blob_out.pbData\n",
"\tbuffer = c_buffer(cbData)\n",
"\tmemcpy(buffer, pbData, cbData)\n",
"\tLocalFree(pbData);\n",
"\treturn buffer.raw\n",
"\n",
"@conf\n",
"def dpapi_encrypt_data(self, input_bytes, entropy = extra_entropy):\n",
"\t'''\n",
"\tEncrypts data and returns byte string\n",
"\n",
"\t:param input_bytes: The data to be encrypted\n",
"\t:type input_bytes: String or Bytes\n",
"\t:param entropy: Extra entropy to add to the encryption process (optional)\n",
"\t:type entropy: String or Bytes\n",
"\t'''\n",
"\tif not isinstance(input_bytes, bytes) or not isinstance(entropy, bytes):\n",
"\t\tself.fatal('The inputs to dpapi must be bytes')\n",
"\tbuffer_in = c_buffer(input_bytes, len(input_bytes))\n",
"\tbuffer_entropy = c_buffer(entropy, len(entropy))\n",
"\tblob_in = DATA_BLOB(len(input_bytes), buffer_in)\n",
"\tblob_entropy = DATA_BLOB(len(entropy), buffer_entropy)\n",
"\tblob_out = DATA_BLOB()\n",
"\n",
"\tif CryptProtectData(byref(blob_in), 'python_data', byref(blob_entropy), \n",
"\t\tNone, None, CRYPTPROTECT_UI_FORBIDDEN, byref(blob_out)):\n",
"\t\treturn get_data(blob_out)\n",
"\telse:\n",
"\t\tself.fatal('Failed to decrypt data')\n",
"\n",
"@conf\n",
"def dpapi_decrypt_data(self, encrypted_bytes, entropy = extra_entropy):\n",
"\t'''\n",
"\tDecrypts data and returns byte string\n",
"\n",
"\t:param encrypted_bytes: The encrypted data\n",
"\t:type encrypted_bytes: Bytes\n",
"\t:param entropy: Extra entropy to add to the encryption process (optional)\n",
"\t:type entropy: String or Bytes\n",
"\t'''\n",
"\tif not isinstance(encrypted_bytes, bytes) or not isinstance(entropy, bytes):\n",
"\t\tself.fatal('The inputs to dpapi must be bytes')\n",
"\tbuffer_in = c_buffer(encrypted_bytes, len(encrypted_bytes))\n",
"\tbuffer_entropy = c_buffer(entropy, len(entropy))\n",
"\tblob_in = DATA_BLOB(len(encrypted_bytes), buffer_in)\n",
"\tblob_entropy = DATA_BLOB(len(entropy), buffer_entropy)\n",
"\tblob_out = DATA_BLOB()\n",
"\tif CryptUnprotectData(byref(blob_in), None, byref(blob_entropy), None,\n",
"\t\tNone, CRYPTPROTECT_UI_FORBIDDEN, byref(blob_out)):\n",
"\t\treturn get_data(blob_out)\n",
"\telse:\n",
"\t\tself.fatal('Failed to decrypt data')\n",
"\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.014492753623188406,
0,
0.018867924528301886,
0,
0.03571428571428571,
0.07142857142857142,
0.047619047619047616,
0.03333333333333333,
0.3333333333333333,
0,
0.041666666666666664,
0.03225806451612903,
0.038461538461538464,
0.037037037037037035,
0.03125,
0.1,
0.05263157894736842,
0,
0.16666666666666666,
0.029411764705882353,
0.2,
0.02564102564102564,
0,
0.021739130434782608,
0.027777777777777776,
0.013333333333333334,
0.03125,
0.2,
0.013513513513513514,
0.02,
0.034482758620689655,
0.02,
0.03508771929824561,
0.034482758620689655,
0.06666666666666667,
0,
0.02702702702702703,
0.03389830508474576,
0.03571428571428571,
0.14285714285714285,
0.02564102564102564,
0,
0.16666666666666666,
0.027777777777777776,
0.2,
0.02564102564102564,
0,
0.022727272727272728,
0.03333333333333333,
0.013333333333333334,
0.03125,
0.2,
0.01282051282051282,
0.02,
0.030303030303030304,
0.02,
0.03278688524590164,
0.034482758620689655,
0.06666666666666667,
0.013888888888888888,
0.03773584905660377,
0.03571428571428571,
0.14285714285714285,
0.02564102564102564,
1
] | 87 | 0.049275 | false |
# -*- coding: utf-8 -*-
"""
Train algorithm based on Delta - rule
"""
from neurolab.core import Train
class TrainDelta(Train):
"""
Train with Delta rule
:Support networks:
newp (one-layer perceptron)
:Parameters:
input: array like (l x net.ci)
train input patterns
target: array like (l x net.co)
train target patterns
epochs: int (default 500)
Number of train epochs
show: int (default 100)
Print period
goal: float (default 0.01)
The goal of train
lr: float (default 0.01)
learning rate
"""
def __init__(self, net, input, target, lr=0.01):
self.lr = lr
def __call__(self, net, input, target):
layer = net.layers[0]
while True:
e = self.error(net, input, target)
self.epochf(e, net, input, target)
for inp, tar in zip(input, target):
out = net.step(inp)
err = tar - out
err.shape = err.size, 1
inp.shape = 1, inp.size
layer.np['w'] += self.lr * err * inp
err.shape = err.size
layer.np['b'] += self.lr * err
return None
| [
"# -*- coding: utf-8 -*-\n",
"\"\"\"\n",
"Train algorithm based on Delta - rule\n",
"\n",
"\"\"\"\n",
"\n",
"from neurolab.core import Train\n",
"\n",
"\n",
"class TrainDelta(Train):\n",
"\n",
" \"\"\"\n",
" Train with Delta rule\n",
"\n",
" :Support networks:\n",
" newp (one-layer perceptron)\n",
" :Parameters:\n",
" input: array like (l x net.ci)\n",
" train input patterns\n",
" target: array like (l x net.co)\n",
" train target patterns\n",
" epochs: int (default 500)\n",
" Number of train epochs\n",
" show: int (default 100)\n",
" Print period\n",
" goal: float (default 0.01)\n",
" The goal of train\n",
" lr: float (default 0.01)\n",
" learning rate\n",
"\n",
" \"\"\"\n",
"\n",
" def __init__(self, net, input, target, lr=0.01):\n",
" self.lr = lr\n",
"\n",
" def __call__(self, net, input, target):\n",
" layer = net.layers[0]\n",
" while True:\n",
" e = self.error(net, input, target)\n",
" self.epochf(e, net, input, target)\n",
" for inp, tar in zip(input, target):\n",
" out = net.step(inp)\n",
" err = tar - out\n",
" err.shape = err.size, 1\n",
" inp.shape = 1, inp.size\n",
" layer.np['w'] += self.lr * err * inp\n",
" err.shape = err.size\n",
" layer.np['b'] += self.lr * err\n",
" return None\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 49 | 0 | false |
# -*- coding: utf-8 -*-
from Plugins.Extensions.MediaPortal.plugin import _
from Plugins.Extensions.MediaPortal.resources.imports import *
def movshare(self, data, url, hostername):
print "movshare:", hostername
m = re.search('flashvars.filekey=(.*?);', data, re.S)
if not m:
keyvar = filecode = None
l = re.search('\}\(\'(.*?)\',\'(.*?)\',\'(.*?)\',\'(.*?)\'\)\);', data)
if l:
w = l.group(1)
i = l.group(2)
s = l.group(3)
e = l.group(4)
crypt1 = self.movshare_code1(w,i,s,e)
print "crypt1", crypt1
m = re.search('\}\(\'(.*?)\',\'(.*?)\',\'(.*?)\',\'(.*?)\'\)', crypt1)
if m:
w = m.group(1)
i = m.group(2)
s = m.group(3)
e = m.group(4)
crypt2 = self.movshare_code1(w,i,s,e)
print "crypt2", crypt2
n = re.search('\}\(\'(.*?)\',\'(.*?)\',\'(.*?)\',\'(.*?)\'.*?return.*?\}\(\'(.*?)\',\'(.*?)\',\'(.*?)\',\'(.*?)\'\)', crypt2)
print "n", n
if n:
w2 = n.group(5)
i2 = n.group(6)
s2 = n.group(7)
e2 = n.group(8)
crypt3 = self.movshare_code1(w2,i2,s2,e2)
print "crypt3", crypt3
m = re.search('flashvars.file="(.*?)"', crypt3, re.S)
if m:
filecode = m.group(1)
print filecode
key = re.search('flashvars.filekey="(.*?)"', crypt3)
if key:
print "11111111111111111111111111111"
keyvar = key.group(1)
print "key 1:", keyvar
else:
print "code3 - 222222222222222222222222222"
t = re.search('(\d+.\d+.\d+.\d+-\w+)', crypt3, re.S)
keyvar = t and t.group(1)
print keyvar
elif crypt3:
sUnpacked = unpack(crypt3)
print sUnpacked
print "code mit crypt"
m = re.search('flashvars.file="(.*?)"', sUnpacked, re.S)
filecode = m and m.group(1)
key = re.search('flashvars.filekey=".*?"', sUnpacked)
if key:
print "11111111111111111111111111111"
keyvar = key.group(1)
print "key 1:", keyvar
else:
print "sUnpacked - 222222222222222222222222222"
print filecode
t = re.search('flashvars.filekey=(.*?);', sUnpacked, re.S)
key = t and t.group(1)
t = re.search(';var %s=(.*?);' % key, sUnpacked, re.S)
key1 = t and t.group(1)
t = re.search(';var %s=(.*?);' % key1, sUnpacked, re.S)
key2 = t and t.group(1)
t = re.search(';var %s=(.*?);' % key2, sUnpacked, re.S)
key3 = t and t.group(1)
t = re.search(';var %s="(.*?)";' % key3, sUnpacked, re.S)
keyvar = t and t.group(1)
else:
fk = m.group(1)
m = re.search('var %s="(.*?)";.*?flashvars.file="(.*?)";' % fk, data, re.S)
if m:
filecode = m and m.group(2)
keyvar = m and m.group(1)
print "filecode:",filecode,"keyvar:",keyvar
else:
m = re.search('flashvars.filekey=(.*?);', data, re.S)
if m:
fk = m.group(1)
m = re.search('flashvars.file="(.*?)".*?%s' %fk, data, re.S)
filecode = m.group(1)
keyvar = fk.replace('.', '%2E').replace('-', '%2D')
print "filecode:",filecode,"keyvar:",keyvar
if filecode and keyvar:
if hostername == "movshare":
print "movshare"
url = "http://www.movshare.net/api/player.api.php?cid3=undefined&key=%s&user=undefined&numOfErrors=0&cid=undefined&file=%s&cid2=undefined" % ( keyvar, filecode)
elif hostername == "nowvideo":
print "nowvideo"
url = "http://www.nowvideo.eu/api/player.api.php?cid3=undefined&key=%s&user=undefined&numOfErrors=0&cid=undefined&file=%s&cid2=undefined" % ( keyvar, filecode)
elif hostername == "divxstage":
print "divxstage"
url = "http://www.divxstage.eu/api/player.api.php?cid3=undefined&key=%s&user=undefined&numOfErrors=0&cid=undefined&file=%s&cid2=undefined" % ( keyvar, filecode)
elif hostername == "novamov":
print "novamov"
url = "http://www.novamov.com/api/player.api.php?cid3=undefined&key=%s&user=undefined&numOfErrors=0&cid=undefined&file=%s&cid2=undefined" % ( keyvar, filecode)
elif hostername == "videoweed":
print "videoweed"
url = "http://www.videoweed.es/api/player.api.php?cid3=undefined&key=%s&user=undefined&numOfErrors=0&cid=undefined&file=%s&cid2=undefined" % ( keyvar, filecode)
elif hostername == "cloudtime":
print "cloudtime"
url = "http://www.cloudtime.to/api/player.api.php?cid3=undefined&key=%s&user=undefined&numOfErrors=0&cid=undefined&file=%s&cid2=undefined" % ( keyvar, filecode)
elif hostername == "vidgg":
print "vidgg"
url = "http://www.vid.gg/api/player.api.php?pass=undefined&key=%s&numOfErrors=0&cid2=undefined&cid=undefined&user=undefined&file=%s&cid3=undefined" % ( keyvar, filecode)
else:
self.stream_not_found()
print url
getPage(url, method='GET').addCallback(self.movshare_xml).addErrback(self.errorload)
else:
self.stream_not_found()
def movshare_code1(self,w,i,s,e):
lIll=0
ll1I=0
Il1l=0
ll1l=[]
l1lI=[]
while not len(w)+len(i)+len(s)+len(e)==len(ll1l)+len(l1lI)+len(e):
if lIll < 5:
l1lI.append(w[lIll])
elif lIll < len(w):
ll1l.append(w[lIll])
lIll += 1
if ll1I < 5:
l1lI.append(i[ll1I])
elif ll1I < len(i):
ll1l.append(i[ll1I])
ll1I += 1
if Il1l < 5:
l1lI.append(s[Il1l])
elif Il1l < len(s):
ll1l.append(s[Il1l])
Il1l += 1
str1 = ''
lI1l = str1.join( ll1l )
I1lI = str1.join( l1lI )
ll1I = 0
l1ll = []
lIll = 0
while lIll < len(ll1l):
ll11 = -1
if ord(I1lI[ll1I])%2:
ll11 = 1
l1ll.append(chr(self.movshare_base36decode(lI1l[lIll:lIll+2]) - ll11))
ll1I += 1
if ll1I >= len(l1lI):
ll1I = 0
lIll += 2
return str1.join(l1ll)
def movshare_base36decode(self, number):
return int(number,36)
def movshare_xml(self, data):
file_link = re.search('url=(.+?)&title=', data)
if file_link:
stream_url = urllib.unquote(file_link.group(1))
self._callback(stream_url)
else:
self.stream_not_found() | [
"# -*- coding: utf-8 -*-\n",
"from Plugins.Extensions.MediaPortal.plugin import _\n",
"from Plugins.Extensions.MediaPortal.resources.imports import *\n",
"\n",
"def movshare(self, data, url, hostername):\n",
"\tprint \"movshare:\", hostername\n",
"\tm = re.search('flashvars.filekey=(.*?);', data, re.S)\n",
"\tif not m:\n",
"\t\tkeyvar = filecode = None\n",
"\t\tl = re.search('\\}\\(\\'(.*?)\\',\\'(.*?)\\',\\'(.*?)\\',\\'(.*?)\\'\\)\\);', data)\n",
"\t\tif l:\n",
"\t\t\tw = l.group(1)\n",
"\t\t\ti = l.group(2)\n",
"\t\t\ts = l.group(3)\n",
"\t\t\te = l.group(4)\n",
"\t\t\tcrypt1 = self.movshare_code1(w,i,s,e)\n",
"\t\t\tprint \"crypt1\", crypt1\n",
"\t\t\tm = re.search('\\}\\(\\'(.*?)\\',\\'(.*?)\\',\\'(.*?)\\',\\'(.*?)\\'\\)', crypt1)\n",
"\t\t\tif m:\n",
"\t\t\t\tw = m.group(1)\n",
"\t\t\t\ti = m.group(2)\n",
"\t\t\t\ts = m.group(3)\n",
"\t\t\t\te = m.group(4)\n",
"\t\t\t\tcrypt2 = self.movshare_code1(w,i,s,e)\n",
"\t\t\t\tprint \"crypt2\", crypt2\n",
"\t\t\t\tn = re.search('\\}\\(\\'(.*?)\\',\\'(.*?)\\',\\'(.*?)\\',\\'(.*?)\\'.*?return.*?\\}\\(\\'(.*?)\\',\\'(.*?)\\',\\'(.*?)\\',\\'(.*?)\\'\\)', crypt2)\n",
"\t\t\t\tprint \"n\", n\n",
"\t\t\t\tif n:\n",
"\t\t\t\t\tw2 = n.group(5)\n",
"\t\t\t\t\ti2 = n.group(6)\n",
"\t\t\t\t\ts2 = n.group(7)\n",
"\t\t\t\t\te2 = n.group(8)\n",
"\t\t\t\t\tcrypt3 = self.movshare_code1(w2,i2,s2,e2)\n",
"\t\t\t\t\tprint \"crypt3\", crypt3\n",
"\t\t\t\t\tm = re.search('flashvars.file=\"(.*?)\"', crypt3, re.S)\n",
"\t\t\t\t\tif m:\n",
"\t\t\t\t\t\tfilecode = m.group(1)\n",
"\t\t\t\t\t\tprint filecode\n",
"\t\t\t\t\t\tkey = re.search('flashvars.filekey=\"(.*?)\"', crypt3)\n",
"\t\t\t\t\t\tif key:\n",
"\t\t\t\t\t\t\tprint \"11111111111111111111111111111\"\n",
"\t\t\t\t\t\t\tkeyvar = key.group(1)\n",
"\t\t\t\t\t\t\tprint \"key 1:\", keyvar\n",
"\t\t\t\t\t\telse:\n",
"\t\t\t\t\t\t\tprint \"code3 - 222222222222222222222222222\"\n",
"\t\t\t\t\t\t\tt = re.search('(\\d+.\\d+.\\d+.\\d+-\\w+)', crypt3, re.S)\n",
"\t\t\t\t\t\t\tkeyvar = t and t.group(1)\n",
"\t\t\t\t\t\t\tprint keyvar\n",
"\t\t\t\t\telif crypt3:\n",
"\t\t\t\t\t\tsUnpacked = unpack(crypt3)\n",
"\t\t\t\t\t\tprint sUnpacked\n",
"\t\t\t\t\t\tprint \"code mit crypt\"\n",
"\t\t\t\t\t\tm = re.search('flashvars.file=\"(.*?)\"', sUnpacked, re.S)\n",
"\t\t\t\t\t\tfilecode = m and m.group(1)\n",
"\n",
"\t\t\t\t\t\tkey = re.search('flashvars.filekey=\".*?\"', sUnpacked)\n",
"\t\t\t\t\t\tif key:\n",
"\t\t\t\t\t\t\tprint \"11111111111111111111111111111\"\n",
"\t\t\t\t\t\t\tkeyvar = key.group(1)\n",
"\t\t\t\t\t\t\tprint \"key 1:\", keyvar\n",
"\t\t\t\t\t\telse:\n",
"\t\t\t\t\t\t\tprint \"sUnpacked - 222222222222222222222222222\"\n",
"\t\t\t\t\t\t\tprint filecode\n",
"\t\t\t\t\t\t\tt = re.search('flashvars.filekey=(.*?);', sUnpacked, re.S)\n",
"\t\t\t\t\t\t\tkey = t and t.group(1)\n",
"\t\t\t\t\t\t\tt = re.search(';var %s=(.*?);' % key, sUnpacked, re.S)\n",
"\t\t\t\t\t\t\tkey1 = t and t.group(1)\n",
"\t\t\t\t\t\t\tt = re.search(';var %s=(.*?);' % key1, sUnpacked, re.S)\n",
"\t\t\t\t\t\t\tkey2 = t and t.group(1)\n",
"\t\t\t\t\t\t\tt = re.search(';var %s=(.*?);' % key2, sUnpacked, re.S)\n",
"\t\t\t\t\t\t\tkey3 = t and t.group(1)\n",
"\t\t\t\t\t\t\tt = re.search(';var %s=\"(.*?)\";' % key3, sUnpacked, re.S)\n",
"\t\t\t\t\t\t\tkeyvar = t and t.group(1)\n",
"\telse:\n",
"\t\tfk = m.group(1)\n",
"\t\tm = re.search('var %s=\"(.*?)\";.*?flashvars.file=\"(.*?)\";' % fk, data, re.S)\n",
"\t\tif m:\n",
"\t\t\tfilecode = m and m.group(2)\n",
"\t\t\tkeyvar = m and m.group(1)\n",
"\t\t\tprint \"filecode:\",filecode,\"keyvar:\",keyvar\n",
"\t\telse:\n",
"\t\t\tm = re.search('flashvars.filekey=(.*?);', data, re.S)\n",
"\t\t\tif m:\n",
"\t\t\t\tfk = m.group(1)\n",
"\t\t\t\tm = re.search('flashvars.file=\"(.*?)\".*?%s' %fk, data, re.S)\n",
"\t\t\t\tfilecode = m.group(1)\n",
"\t\t\t\tkeyvar = fk.replace('.', '%2E').replace('-', '%2D')\n",
"\t\t\t\tprint \"filecode:\",filecode,\"keyvar:\",keyvar\n",
"\tif filecode and keyvar:\n",
"\t\tif hostername == \"movshare\":\n",
"\t\t\tprint \"movshare\"\n",
"\t\t\turl = \"http://www.movshare.net/api/player.api.php?cid3=undefined&key=%s&user=undefined&numOfErrors=0&cid=undefined&file=%s&cid2=undefined\" % ( keyvar, filecode)\n",
"\t\telif hostername == \"nowvideo\":\n",
"\t\t\tprint \"nowvideo\"\n",
"\t\t\turl = \"http://www.nowvideo.eu/api/player.api.php?cid3=undefined&key=%s&user=undefined&numOfErrors=0&cid=undefined&file=%s&cid2=undefined\" % ( keyvar, filecode)\n",
"\t\telif hostername == \"divxstage\":\n",
"\t\t\tprint \"divxstage\"\n",
"\t\t\turl = \"http://www.divxstage.eu/api/player.api.php?cid3=undefined&key=%s&user=undefined&numOfErrors=0&cid=undefined&file=%s&cid2=undefined\" % ( keyvar, filecode)\n",
"\t\telif hostername == \"novamov\":\n",
"\t\t\tprint \"novamov\"\n",
"\t\t\turl = \"http://www.novamov.com/api/player.api.php?cid3=undefined&key=%s&user=undefined&numOfErrors=0&cid=undefined&file=%s&cid2=undefined\" % ( keyvar, filecode)\n",
"\t\telif hostername == \"videoweed\":\n",
"\t\t\tprint \"videoweed\"\n",
"\t\t\turl = \"http://www.videoweed.es/api/player.api.php?cid3=undefined&key=%s&user=undefined&numOfErrors=0&cid=undefined&file=%s&cid2=undefined\" % ( keyvar, filecode)\n",
"\t\telif hostername == \"cloudtime\":\n",
"\t\t\tprint \"cloudtime\"\n",
"\t\t\turl = \"http://www.cloudtime.to/api/player.api.php?cid3=undefined&key=%s&user=undefined&numOfErrors=0&cid=undefined&file=%s&cid2=undefined\" % ( keyvar, filecode)\n",
"\t\telif hostername == \"vidgg\":\n",
"\t\t\tprint \"vidgg\"\n",
"\t\t\turl = \"http://www.vid.gg/api/player.api.php?pass=undefined&key=%s&numOfErrors=0&cid2=undefined&cid=undefined&user=undefined&file=%s&cid3=undefined\" % ( keyvar, filecode)\n",
"\t\telse:\n",
"\t\t\tself.stream_not_found()\n",
"\t\tprint url\n",
"\t\tgetPage(url, method='GET').addCallback(self.movshare_xml).addErrback(self.errorload)\n",
"\telse:\n",
"\t\tself.stream_not_found()\n",
"\n",
"def movshare_code1(self,w,i,s,e):\n",
"\tlIll=0\n",
"\tll1I=0\n",
"\tIl1l=0\n",
"\tll1l=[]\n",
"\tl1lI=[]\n",
"\twhile not len(w)+len(i)+len(s)+len(e)==len(ll1l)+len(l1lI)+len(e):\n",
"\t\tif lIll < 5:\n",
"\t\t\tl1lI.append(w[lIll])\n",
"\t\telif lIll < len(w):\n",
"\t\t\tll1l.append(w[lIll])\n",
"\t\tlIll += 1\n",
"\t\tif ll1I < 5:\n",
"\t\t\tl1lI.append(i[ll1I])\n",
"\t\telif ll1I < len(i):\n",
"\t\t\tll1l.append(i[ll1I])\n",
"\t\tll1I += 1\n",
"\t\tif Il1l < 5:\n",
"\t\t\tl1lI.append(s[Il1l])\n",
"\t\telif Il1l < len(s):\n",
"\t\t\tll1l.append(s[Il1l])\n",
"\t\tIl1l += 1\n",
"\tstr1 = ''\n",
"\tlI1l = str1.join( ll1l )\n",
"\tI1lI = str1.join( l1lI )\n",
"\tll1I = 0\n",
"\tl1ll = []\n",
"\tlIll = 0\n",
"\twhile lIll < len(ll1l):\n",
"\t\tll11 = -1\n",
"\t\tif ord(I1lI[ll1I])%2:\n",
"\t\t\tll11 = 1\n",
"\t\tl1ll.append(chr(self.movshare_base36decode(lI1l[lIll:lIll+2]) - ll11))\n",
"\t\tll1I += 1\n",
"\t\tif ll1I >= len(l1lI):\n",
"\t\t\tll1I = 0\n",
"\t\tlIll += 2\n",
"\treturn str1.join(l1ll)\n",
"\n",
"def movshare_base36decode(self, number):\n",
"\treturn int(number,36)\n",
"\n",
"def movshare_xml(self, data):\n",
"\tfile_link = re.search('url=(.+?)&title=', data)\n",
"\tif file_link:\n",
"\t\tstream_url = urllib.unquote(file_link.group(1))\n",
"\t\tself._callback(stream_url)\n",
"\telse:\n",
"\t\tself.stream_not_found()"
] | [
0,
0,
0,
0,
0.023255813953488372,
0.03225806451612903,
0.01818181818181818,
0.09090909090909091,
0.037037037037037035,
0.08108108108108109,
0.125,
0.05555555555555555,
0.05555555555555555,
0.05555555555555555,
0.05555555555555555,
0.0975609756097561,
0.038461538461538464,
0.05405405405405406,
0.1111111111111111,
0.05263157894736842,
0.05263157894736842,
0.05263157894736842,
0.05263157894736842,
0.09523809523809523,
0.037037037037037035,
0.05384615384615385,
0.058823529411764705,
0.1,
0.047619047619047616,
0.047619047619047616,
0.047619047619047616,
0.047619047619047616,
0.0851063829787234,
0.03571428571428571,
0.01694915254237288,
0.09090909090909091,
0.03571428571428571,
0.047619047619047616,
0.01694915254237288,
0.07142857142857142,
0.022222222222222223,
0.034482758620689655,
0.03333333333333333,
0.08333333333333333,
0.0196078431372549,
0.1,
0.030303030303030304,
0.05,
0.05555555555555555,
0.030303030303030304,
0.045454545454545456,
0.034482758620689655,
0.015873015873015872,
0.029411764705882353,
0,
0.016666666666666666,
0.07142857142857142,
0.022222222222222223,
0.034482758620689655,
0.03333333333333333,
0.08333333333333333,
0.01818181818181818,
0.045454545454545456,
0.015151515151515152,
0.03333333333333333,
0.016129032258064516,
0.03225806451612903,
0.015873015873015872,
0.03225806451612903,
0.015873015873015872,
0.03225806451612903,
0.015384615384615385,
0.030303030303030304,
0.14285714285714285,
0.05555555555555555,
0.01282051282051282,
0.125,
0.03225806451612903,
0.034482758620689655,
0.0851063829787234,
0.125,
0.017543859649122806,
0.1111111111111111,
0.05,
0.03076923076923077,
0.038461538461538464,
0.017857142857142856,
0.08333333333333333,
0.04,
0.03225806451612903,
0.05,
0.018292682926829267,
0.030303030303030304,
0.05,
0.018404907975460124,
0.029411764705882353,
0.047619047619047616,
0.018292682926829267,
0.03125,
0.05263157894736842,
0.018404907975460124,
0.029411764705882353,
0.047619047619047616,
0.018292682926829267,
0.029411764705882353,
0.047619047619047616,
0.018292682926829267,
0.03333333333333333,
0.058823529411764705,
0.017341040462427744,
0.125,
0.037037037037037035,
0.08333333333333333,
0.022988505747126436,
0.14285714285714285,
0.038461538461538464,
0,
0.14705882352941177,
0.25,
0.25,
0.25,
0.2222222222222222,
0.2222222222222222,
0.029411764705882353,
0.06666666666666667,
0.041666666666666664,
0.045454545454545456,
0.041666666666666664,
0.08333333333333333,
0.06666666666666667,
0.041666666666666664,
0.045454545454545456,
0.041666666666666664,
0.08333333333333333,
0.06666666666666667,
0.041666666666666664,
0.045454545454545456,
0.041666666666666664,
0.08333333333333333,
0.09090909090909091,
0.11538461538461539,
0.11538461538461539,
0.1,
0.09090909090909091,
0.1,
0.04,
0.08333333333333333,
0.08333333333333333,
0.08333333333333333,
0.0136986301369863,
0.08333333333333333,
0.041666666666666664,
0.08333333333333333,
0.08333333333333333,
0.041666666666666664,
0,
0.024390243902439025,
0.08695652173913043,
0,
0.03333333333333333,
0.02040816326530612,
0.06666666666666667,
0.02,
0.034482758620689655,
0.14285714285714285,
0.08
] | 166 | 0.056269 | false |
from pymongo import MongoClient
from collections import defaultdict
class Searcher:
def __init__(self, ip, port, db, collection, user, pwd):
self.client = MongoClient(ip, port)
self.db = self.client[db]
self.db.authenticate(user, pwd)
self.goofle = self.db[collection]
self.spchars = {"á":"a", "à":"a", "â":"a", "ä":"a", "é":"e",
"è":"e", "ê":"e", "ë":"e", "í":"i", "ì":"i",
"î":"i", "ï":"i", "ó":"o", "ò":"o", "ô":"o",
"ö":"o", "ú":"u", "ù":"u", "û":"u", "ü":"u",
"ñ":"n", "!":"", "|":"", "°":"", "¬":"", "\"":"",
"#":"", "$":"", "%":"", "&":"", "/":"", "(":"",
")":"", "=":"", "'":"", "?":"", "¡":"", "¿":"",
"@":"", "¨":"", "´":"", "+":"", "*":"", "~":"",
"<":"", ">":"", "{":"", "[":"", "^":"", "}":"",
"`":"", "]":"", ";":"", ",":"", ".":"", ":":"",
"-":"", "_":"" }
def search(self, words, top):
result = defaultdict(lambda: 0)
for word in words:
word = word.lower()
for v, c in self.spchars.items():
word = word.replace(v, c)
files = self.goofle.find_one({"word": word}, {"_id": False, "files": True})
if files is None:
continue
for file_data in files["files"]:
file_name, count = file_data.values()
result[file_name] += count
resultlist = sorted(result.items(), key=lambda x:x[1], reverse = True)
return resultlist[:top]
| [
"from pymongo import MongoClient\n",
"from collections import defaultdict\n",
"\n",
"class Searcher:\n",
" def __init__(self, ip, port, db, collection, user, pwd):\n",
" self.client = MongoClient(ip, port)\n",
" self.db = self.client[db]\n",
" self.db.authenticate(user, pwd)\n",
" self.goofle = self.db[collection]\n",
" self.spchars = {\"á\":\"a\", \"à\":\"a\", \"â\":\"a\", \"ä\":\"a\", \"é\":\"e\",\n",
" \"è\":\"e\", \"ê\":\"e\", \"ë\":\"e\", \"í\":\"i\", \"ì\":\"i\",\n",
" \"î\":\"i\", \"ï\":\"i\", \"ó\":\"o\", \"ò\":\"o\", \"ô\":\"o\",\n",
" \"ö\":\"o\", \"ú\":\"u\", \"ù\":\"u\", \"û\":\"u\", \"ü\":\"u\",\n",
" \"ñ\":\"n\", \"!\":\"\", \"|\":\"\", \"°\":\"\", \"¬\":\"\", \"\\\"\":\"\",\n",
" \"#\":\"\", \"$\":\"\", \"%\":\"\", \"&\":\"\", \"/\":\"\", \"(\":\"\",\n",
" \")\":\"\", \"=\":\"\", \"'\":\"\", \"?\":\"\", \"¡\":\"\", \"¿\":\"\",\n",
" \"@\":\"\", \"¨\":\"\", \"´\":\"\", \"+\":\"\", \"*\":\"\", \"~\":\"\",\n",
" \"<\":\"\", \">\":\"\", \"{\":\"\", \"[\":\"\", \"^\":\"\", \"}\":\"\",\n",
" \"`\":\"\", \"]\":\"\", \";\":\"\", \",\":\"\", \".\":\"\", \":\":\"\",\n",
" \"-\":\"\", \"_\":\"\" }\n",
"\n",
" def search(self, words, top):\n",
" result = defaultdict(lambda: 0)\n",
" for word in words:\n",
" word = word.lower()\n",
" for v, c in self.spchars.items():\n",
" word = word.replace(v, c)\n",
" files = self.goofle.find_one({\"word\": word}, {\"_id\": False, \"files\": True})\n",
" if files is None:\n",
" continue\n",
" for file_data in files[\"files\"]:\n",
" file_name, count = file_data.values()\n",
" result[file_name] += count\n",
" resultlist = sorted(result.items(), key=lambda x:x[1], reverse = True)\n",
" return resultlist[:top]\n"
] | [
0,
0,
0,
0.0625,
0.01694915254237288,
0,
0,
0,
0,
0.07692307692307693,
0.07692307692307693,
0.07692307692307693,
0.07692307692307693,
0.08571428571428572,
0.08823529411764706,
0.08823529411764706,
0.08823529411764706,
0.08823529411764706,
0.08823529411764706,
0.08108108108108109,
0,
0.03125,
0,
0,
0.038461538461538464,
0.025,
0,
0.024390243902439025,
0.041666666666666664,
0,
0.02564102564102564,
0,
0,
0.04,
0
] | 35 | 0.034901 | false |
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from clr import AddReference
AddReference("System")
AddReference("QuantConnect.Algorithm")
AddReference("QuantConnect.Common")
from System import *
from QuantConnect import *
from QuantConnect.Algorithm import *
### <summary>
### Regression test for consistency of hour data over a reverse split event in US equities.
### </summary>
### <meta name="tag" content="using data" />
### <meta name="tag" content="regression test" />
class HourReverseSplitRegressionAlgorithm(QCAlgorithm):
def Initialize(self):
self.SetStartDate(2013, 11, 7)
self. SetEndDate(2013, 11, 8)
self.SetCash(100000)
self.SetBenchmark(lambda x: 0)
self.symbol = self.AddEquity("VXX", Resolution.Hour).Symbol
def OnData(self, slice):
if slice.Bars.Count == 0: return
if (not self.Portfolio.Invested) and self.Time.date() == self.EndDate.date():
self.Buy(self.symbol, 1) | [
"# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.\n",
"# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.\n",
"#\n",
"# Licensed under the Apache License, Version 2.0 (the \"License\");\n",
"# you may not use this file except in compliance with the License.\n",
"# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0\n",
"#\n",
"# Unless required by applicable law or agreed to in writing, software\n",
"# distributed under the License is distributed on an \"AS IS\" BASIS,\n",
"# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
"# See the License for the specific language governing permissions and\n",
"# limitations under the License.\n",
"\n",
"from clr import AddReference\n",
"AddReference(\"System\")\n",
"AddReference(\"QuantConnect.Algorithm\")\n",
"AddReference(\"QuantConnect.Common\")\n",
"\n",
"from System import *\n",
"from QuantConnect import *\n",
"from QuantConnect.Algorithm import *\n",
"\n",
"\n",
"### <summary>\n",
"### Regression test for consistency of hour data over a reverse split event in US equities.\n",
"### </summary>\n",
"### <meta name=\"tag\" content=\"using data\" />\n",
"### <meta name=\"tag\" content=\"regression test\" />\n",
"class HourReverseSplitRegressionAlgorithm(QCAlgorithm):\n",
"\n",
" def Initialize(self):\n",
" self.SetStartDate(2013, 11, 7)\n",
" self. SetEndDate(2013, 11, 8)\n",
" self.SetCash(100000)\n",
" self.SetBenchmark(lambda x: 0)\n",
"\n",
" self.symbol = self.AddEquity(\"VXX\", Resolution.Hour).Symbol\n",
" \n",
" def OnData(self, slice):\n",
" if slice.Bars.Count == 0: return\n",
" if (not self.Portfolio.Invested) and self.Time.date() == self.EndDate.date():\n",
" self.Buy(self.symbol, 1)"
] | [
0,
0.012345679012345678,
0,
0,
0,
0.011764705882352941,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.047619047619047616,
0.037037037037037035,
0.02702702702702703,
0,
0,
0.07142857142857142,
0.021739130434782608,
0.06666666666666667,
0.022222222222222223,
0.02,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.2,
0,
0.024390243902439025,
0.011627906976744186,
0.027777777777777776
] | 42 | 0.014325 | false |