reponame
stringlengths 2
39
| files
list | median_score
float64 0
11.5
|
---|---|---|
felipeort | [
{
"content": "# https://docs.python.org/3/library/socket.html\n\nimport socket\nimport dns.resolver\n\n# 0) Vamos a consultar el estado de un buzón de correo utilizando el protocolo POP3\n\nusuario = \"ort-grupo2\"\npassword = \"<PASSWORD>\"\n\ndominio = 'lab.ort.edu.uy' # Nombre de dominio al que quiero enviar correo\n\n\n# 1) Determinar servidor SMTP del dominio lab.ort.edu.uy\n\nrestpuesta_consulta_MX = dns.resolver.resolve(dominio, 'MX')\nprint(\"Respuesta a consulta registro MX: \", \"\\r\\n\", restpuesta_consulta_MX)\n\nmail_exchange = restpuesta_consulta_MX[0].exchange\nprint(\"Mail Exchange: \", \"\\r\\n\", mail_exchange)\nprint(type(mail_exchange))\n\nmail_exchange_str = str(mail_exchange)\nprint(\"Mail Exchange STR: \", \"\\r\\n\", mail_exchange_str)\nprint(type(mail_exchange_str))\n\n# 2) Necesito determinar el registro A del servidor de correo encontrado en (1)\n# 2) Puedo obtenerlo mediante una consulta independiente\n# 2) O puede encontrarse como Additional Record en la consulta anterior\n\nmail_exchange_A = restpuesta_consulta_MX.nameserver\nprint(\"Registro A de Mail Exchange: \", \"\\r\\n\", mail_exchange_A)\nprint(type(mail_exchange_A))\n\n\n# 3) Puedo consultar el estado de la casilla\n\nPUERTO = 110 # Puerto definido por la IANA para POP3\n\n\nwith socket.socket(socket.AF_INET, socket.SOCK_STREAM) as mi_socket:\n mi_socket.connect((mail_exchange_A, PUERTO))\n respuesta = mi_socket.recv(2048)\n\n # Establecemos conexión POP3 con el servidor de correo.\n print('La respuesta recibida es: ')\n print(respuesta.decode())\n\n # Naturalmente debería contemplar la ocurrencia de errores y manejarlos de forma acorde\n # Por ejemplo utilizando try / catch\n # Pero ese detalle esta más allá del alcance del ejemplo\n\n MENSAJE_POP3 = \"USER\" + \" \" + usuario + \"\\r\\n\"\n print(MENSAJE_POP3)\n mi_socket.sendall(MENSAJE_POP3.encode())\n\n respuesta = mi_socket.recv(2048)\n print('La respuesta recibida es: ')\n print(respuesta.decode())\n\n MENSAJE_POP3 = \"PASS\" + \" \" + password + \"\\r\\n\"\n print(MENSAJE_POP3)\n mi_socket.sendall(MENSAJE_POP3.encode())\n\n respuesta = mi_socket.recv(2048)\n print('La respuesta recibida es: ')\n print(respuesta.decode())\n\n MENSAJE_POP3 = \"LIST\" + \"\\r\\n\"\n print(MENSAJE_POP3)\n mi_socket.sendall(MENSAJE_POP3.encode())\n\n respuesta = mi_socket.recv(2048)\n print('La respuesta recibida es: ')\n print(respuesta.decode())\n\n MENSAJE_POP3 = \"RETR 1\" + \"\\r\\n\"\n print(MENSAJE_POP3)\n mi_socket.sendall(MENSAJE_POP3.encode())\n\n respuesta = mi_socket.recv(2048)\n print('La respuesta recibida es: ')\n print(respuesta.decode())\n\n MENSAJE_POP3 = \"DELE 1\" + \"\\r\\n\"\n print(MENSAJE_POP3)\n mi_socket.sendall(MENSAJE_POP3.encode())\n\n respuesta = mi_socket.recv(2048)\n print('La respuesta recibida es: ')\n print(respuesta.decode())\n\n MENSAJE_POP3 = \"QUIT\" + \"\\r\\n\"\n print(MENSAJE_POP3)\n mi_socket.sendall(MENSAJE_POP3.encode())\n\n respuesta = mi_socket.recv(2048)\n print('La respuesta recibida es: ')\n print(respuesta.decode())\n\n mi_socket.close()\n\n\n\n\n",
"id": "6917932",
"language": "Python",
"matching_score": 5.365405559539795,
"max_stars_count": 0,
"path": "pop3_simple.py"
},
{
"content": "# https://docs.python.org/3/library/socket.html\n\nimport socket\nimport dns.resolver\n\n# 0) Vamos a enviar un correo desde <EMAIL> a <EMAIL>\n\ncasilla_origen = \"ort-grupo1\"\ncasilla_destino = \"ort-grupo2\"\n\ndominio = 'lab.ort.edu.uy' # Nombre de dominio al que quiero enviar correo\n\n\n# 1) Determinar servidor SMTP del dominio lab.ort.edu.uy\n\nrestpuesta_consulta_MX = dns.resolver.resolve(dominio, 'MX')\nprint(\"Respuesta a consulta registro MX: \", \"\\r\\n\", restpuesta_consulta_MX)\n\nmail_exchange = restpuesta_consulta_MX[0].exchange\nprint(\"Mail Exchange: \", \"\\r\\n\", mail_exchange)\nprint(type(mail_exchange))\n\nmail_exchange_str = str(mail_exchange)\nprint(\"Mail Exchange STR: \", \"\\r\\n\", mail_exchange_str)\nprint(type(mail_exchange_str))\n\n# 2) Necesito determinar el registro A del servidor de correo encontrado en (1)\n# 2) Puedo obtenerlo mediante una consulta independiente\n# 2) O puede encontrarse como Additional Record en la consulta anterior\n\nmail_exchange_A = restpuesta_consulta_MX.nameserver\nprint(\"Registro A de Mail Exchange: \", \"\\r\\n\", mail_exchange_A)\nprint(type(mail_exchange_A))\n\n\n# 3) Puedo comenzar a elaborar el \"sobre\" del correo utilizando el protocolo SMTP\n\nPUERTO = 25 # Puerto definido por la IANA para SMTP\n\n\nwith socket.socket(socket.AF_INET, socket.SOCK_STREAM) as mi_socket:\n mi_socket.connect((mail_exchange_A, PUERTO))\n respuesta = mi_socket.recv(2048)\n\n # Establecemos conexión SMTP con el servidor de correo.\n print('La respuesta recibida es: ')\n print(respuesta.decode())\n\n # Naturalmente debería contemplar la ocurrencia de errores y manejarlos de forma acorde\n # Por ejemplo utilizando try / catch\n # Pero ese detalle esta más allá del alcance del ejemplo\n\n MENSAJE_SMTP = \"HELO\" + \" \" + dominio + \"\\r\\n\"\n print(MENSAJE_SMTP)\n mi_socket.sendall(MENSAJE_SMTP.encode())\n\n respuesta = mi_socket.recv(2048)\n print('La respuesta recibida es: ')\n print(respuesta.decode())\n\n MENSAJE_SMTP = \"MAIL FROM:\" + \" \" + \"<\" + casilla_origen + \">\" + \"\\r\\n\"\n print(MENSAJE_SMTP)\n mi_socket.sendall(MENSAJE_SMTP.encode())\n\n respuesta = mi_socket.recv(2048)\n print('La respuesta recibida es: ')\n print(respuesta.decode())\n\n MENSAJE_SMTP = \"RCPT TO:\" + \" \" + \"<\" + casilla_destino + \">\" + \"\\r\\n\"\n print(MENSAJE_SMTP)\n mi_socket.sendall(MENSAJE_SMTP.encode())\n\n respuesta = mi_socket.recv(2048)\n print('La respuesta recibida es: ')\n print(respuesta.decode())\n\n MENSAJE_SMTP = \"DATA\" + \"\\r\\n\"\n print(MENSAJE_SMTP)\n mi_socket.sendall(MENSAJE_SMTP.encode())\n\n respuesta = mi_socket.recv(2048)\n print('La respuesta recibida es: ')\n print(respuesta.decode())\n\n # 4) Dentro del \"sobre\" SMTP va la \"carta\"\n\n DATA = \"From: <EMAIL>\" + \"\\r\\n\" + \"To: <EMAIL> \" + \"\\r\\n\" + \\\n \"Subject: Felicidades\" + \"\\r\\n\" + \"@Feliz año 2021 !\" + \"\\r\\n\" + \".\" + \"\\r\\n\"\n\n print(DATA)\n\n mi_socket.sendall(DATA.encode())\n\n respuesta = mi_socket.recv(2048)\n print('La respuesta recibida es: ')\n print(respuesta.decode())\n\n # 4) La \"carta\" ya fue enviada, puedo cerrar la sesión SMTP\n\n MENSAJE_SMTP = \"QUIT\" + \"\\r\\n\"\n print(MENSAJE_SMTP)\n mi_socket.sendall(MENSAJE_SMTP.encode())\n\n respuesta = mi_socket.recv(2048)\n print('La respuesta recibida es: ')\n print(respuesta.decode())\n\n mi_socket.close()\n\n\n\n\n",
"id": "494903",
"language": "Python",
"matching_score": 1.2427406311035156,
"max_stars_count": 0,
"path": "smtp_simple_send.py"
},
{
"content": "# https://docs.python.org/3/library/socket.html\n\nimport socket\n\n\n# Datos del Laboratorio\n\nSITIO_WEB = 'example.com' \nPUERTO = 80 # Puerto donde escucha el servidor HTTP\n\n#################################################################\n# PRIMERO: Consulto al DNS por el registro A de example.com\n#################################################################\n\nIPv4_SERVIDOR = socket.gethostbyname(SITIO_WEB)\n\nprint('La direccion IPv4 de' + SITIO_WEB + 'es: ')\nprint(IPv4_SERVIDOR)\n\n######################################################################\n# SEGUNDO: Damos formato a un string que represente una consulta HTTP\n######################################################################\n\n# Metodo GET\nGET = \"GET / HTTP/1.1\" + \"\\r\\n\"\n\n# Encabezado HOST\nHOST = \"Host: \" + SITIO_WEB + \"\\r\\n\"\n\n# Encabezado User-Agent\nUSER_AGENT = \"User-agent: \" + \"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:76.0) Gecko/20100101 Firefox/76.0\" + \"\\r\\n\"\n\n# Encabezado Accept\nACCEPT = \"Accept: \" + \"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\" + \"\\r\\n\"\n\n# Encabezado Accept-Language\nACCEPT_LANGUAGE = \"Accept-Language: \" + \"es-ES,es;q=0.8,en-US;q=0.5,en;q=0.3\" + \"\\r\\n\"\n\n# Encabezado Connection\nCONNECTION = \"Connection: \" + \"keep-alive\" + \"\\r\\n\"\n\n# Concatenamos:\nMENSAJE_HTTP = GET + HOST + ACCEPT + ACCEPT_LANGUAGE + USER_AGENT + CONNECTION + \"\\r\\n\"\n\nprint('El mensaje HTTP es: ')\nprint(MENSAJE_HTTP)\n\n#############################################################################################\n# TERCERO: Creamos un socket TCP tipo STREAM donde escribimos el mensaje y leemos la respueta\n#############################################################################################\n\nwith socket.socket(socket.AF_INET, socket.SOCK_STREAM) as mi_socket:\n mi_socket.connect((IPv4_SERVIDOR, PUERTO))\n mi_socket.sendall(MENSAJE_HTTP.encode('utf-8'))\n respuesta = mi_socket.recv(2048)\n mi_socket.close()\n\nprint('La respuesta recibida es: ')\nprint(respuesta.decode('ascii'))\n",
"id": "9063557",
"language": "Python",
"matching_score": 7.402853965759277,
"max_stars_count": 0,
"path": "cliente_http_sockets.py"
},
{
"content": "# <NAME>: Por más ejemplos consultar:\n# https://docs.python.org/3/library/socket.html\n# https://docs.python.org/3/library/telnetlib.html\n\nimport socket\nfrom telnetlib import Telnet\n\n# Datos del Laboratorio\n\nSITIO_WEB = 'example.com' \nPUERTO = 80 # Puerto donde escucha el servidor HTTP\n\n\n#################################################################\n# PRIMERO: Consulto al DNS por el registro A de SITIO_WEB\n#################################################################\n\nIPv4_SERVIDOR = socket.gethostbyname(SITIO_WEB)\n\nprint('La direccion IPv4 de ' + SITIO_WEB + ' es: ')\nprint(IPv4_SERVIDOR)\n\n######################################################################\n# SEGUNDO: Damos formato a un string que represente una consulta HTTP\n######################################################################\n\n# Metodo GET\nGET = \"GET / HTTP/1.1\" + \"\\r\\n\"\n\n# Encabezado HOST\nHOST = \"Host: \" + SITIO_WEB + \"\\r\\n\"\n\n# Encabezado User-Agent\nUSER_AGENT = \"User-agent: \" + \"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:76.0) Gecko/20100101 Firefox/76.0\" + \"\\r\\n\"\n\n# Encabezado Accept\nACCEPT = \"Accept: \" + \"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\" + \"\\r\\n\"\n\n# Encabezado Accept-Language\nACCEPT_LANGUAGE = \"Accept-Language: \" + \"es-ES,es;q=0.8,en-US;q=0.5,en;q=0.3\" + \"\\r\\n\"\n\n# Encabezado Connection\nCONNECTION = \"Connection: \" + \"close\" + \"\\r\\n\"\n\n# Concatenamos:\nMENSAJE_HTTP = GET + HOST + ACCEPT + ACCEPT_LANGUAGE + USER_AGENT + CONNECTION + \"\\r\\n\"\n\nprint('El mensaje HTTP es: ')\nprint(MENSAJE_HTTP)\n\n\n#############################################################################################\n# TERCERO: Utilizamos TELNET para establecer una conexion TCP con el puerto 80 del servidor\n# En esta conexion escribimos el mensaje HTTP y leemos la respuesta\n#############################################################################################\n\nwith Telnet(IPv4_SERVIDOR, PUERTO) as mi_telnet:\n mi_telnet.write(MENSAJE_HTTP.encode('ascii'))\n print(mi_telnet.read_all().decode('ascii'))\n mi_telnet.close()\n",
"id": "8846157",
"language": "Python",
"matching_score": 3.7293102741241455,
"max_stars_count": 0,
"path": "cliente_http_telnet.py"
},
{
"content": "# https://docs.python.org/3/library/http.client.html#module-http.client\n\nimport http.client\n\n# Datos del Laboratorio\n\nSITIO_WEB = 'example.com' \n\n##########################################################################\n# PRIMERO y UNICO PASO: Hablo el protocolo HTTP con el servidor\n##########################################################################\n\nconexion = http.client.HTTPConnection(SITIO_WEB)\nconexion.request(\"GET\", \"/\")\nrespuesta = conexion.getresponse()\nprint('Codigo de respuesta: ')\nprint(respuesta.status, respuesta.reason)\ndatos = respuesta.read()\nprint('Datos en la respuesta: ')\nprint(datos.decode('ascii'))\nconexion.close()\n",
"id": "1547455",
"language": "Python",
"matching_score": 1.7168238162994385,
"max_stars_count": 0,
"path": "cliente_http.py"
}
] | 3.72931 |
PradeepKadubandi | [
{
"content": "# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n# This file is derived from Habitat-Sim - https://github.com/facebookresearch/habitat-sim.git\n\nimport habitat_sim\nimport habitat_sim.agent\nimport habitat_sim.bindings as hsim\n\n\n# build SimulatorConfiguration\ndef make_cfg(SIM):\n sim_cfg = hsim.SimulatorConfiguration()\n\n if SIM.SCENE_ID == \"none\":\n SIM.SCENE_ID = \"data/scene_datasets/habitat-test-scenes/skokloster-castle.glb\"\n sim_cfg.scene_id = SIM.SCENE_ID\n\n sim_cfg.enable_physics = SIM.PHYSICS\n if SIM.PHYSICS:\n sim_cfg.physics_config_file = SIM.PHYSICS_CONFIG_FILE\n # sim_cfg.gpu_device_id = 0\n # sim_cfg.scene.id = settings[\"scene\"]\n\n # define default sensor parameters (see src/esp/Sensor/Sensor.h)\n sensors = dict()\n for i in range(len(SIM.AGENT.SENSORS.NAMES)):\n sensors[SIM.AGENT.SENSORS.NAMES[i]] = {\n \"sensor_type\": getattr(hsim.SensorType, SIM.AGENT.SENSORS.TYPES[i]),\n \"resolution\": [\n SIM.AGENT.SENSORS.RESOLUTIONS[i][0],\n SIM.AGENT.SENSORS.RESOLUTIONS[i][1],\n ],\n \"position\": [\n SIM.AGENT.SENSORS.POSES[i][0],\n SIM.AGENT.SENSORS.POSES[i][1],\n SIM.AGENT.SENSORS.POSES[i][2],\n ],\n \"orientation\": [\n SIM.AGENT.SENSORS.POSES[i][3],\n SIM.AGENT.SENSORS.POSES[i][4],\n SIM.AGENT.SENSORS.POSES[i][5],\n ],\n }\n\n if SIM.noisy:\n # sensors['rgb']['noise_model'] = 'GaussianNoiseModel' # We don't use RGB Noise\n sensors['depth']['noise_model'] = 'RedwoodDepthNoiseModel'\n\n # create sensor specifications\n sensor_specs = []\n for sensor_uuid, sensor_params in sensors.items():\n sensor_spec = hsim.CameraSensorSpec()\n sensor_spec.uuid = sensor_uuid\n sensor_spec.sensor_type = sensor_params[\"sensor_type\"]\n sensor_spec.resolution = sensor_params[\"resolution\"]\n sensor_spec.position = sensor_params[\"position\"]\n sensor_spec.gpu2gpu_transfer = False # Todo: Move this to config\n if SIM.noisy and sensor_uuid in ('depth'):\n sensor_spec.noise_model = sensor_params['noise_model']\n print(\"==== Initialized Sensor Spec: =====\")\n print(\"Sensor uuid: \", sensor_spec.uuid)\n print(\"Sensor type: \", sensor_spec.sensor_type)\n print(\"Sensor position: \", sensor_spec.position)\n print(\"===================================\")\n\n sensor_specs.append(sensor_spec)\n\n # create agent specifications\n # TODO: Accomodate more agents\n agent_cfg = habitat_sim.agent.AgentConfiguration()\n agent_cfg.sensor_specifications = sensor_specs\n # TODO: Move agent actions to config\n agent_cfg.action_space = {\n \"move_forward\": habitat_sim.agent.ActionSpec(\n \"move_forward\", habitat_sim.agent.ActuationSpec(amount=1.0) if not SIM.noisy else \n habitat_sim.agent.PyRobotNoisyActuationSpec(amount=1.0)\n ),\n \"turn_left\": habitat_sim.agent.ActionSpec(\n \"turn_left\", habitat_sim.agent.ActuationSpec(amount=10.0) if not SIM.noisy else \n habitat_sim.agent.PyRobotNoisyActuationSpec(amount=10.0)\n ),\n \"turn_right\": habitat_sim.agent.ActionSpec(\n \"turn_right\", habitat_sim.agent.ActuationSpec(amount=10.0) if not SIM.noisy else \n habitat_sim.agent.PyRobotNoisyActuationSpec(amount=10.0)\n ),\n }\n sim_cfg.default_agent_id = SIM.DEFAULT_AGENT_ID\n # # override action space to no-op to test physics\n # if sim_cfg.enable_physics:\n # agent_cfg.action_space = {\n # \"move_forward\": habitat_sim.agent.ActionSpec(\n # \"move_forward\", habitat_sim.agent.ActuationSpec(amount=0.0)\n # )\n # }\n\n return habitat_sim.Configuration(sim_cfg, [agent_cfg])\n",
"id": "3736211",
"language": "Python",
"matching_score": 1.0906580686569214,
"max_stars_count": 0,
"path": "droidlet/lowlevel/locobot/remote/pyrobot/habitat/sim_utils.py"
},
{
"content": "\"\"\"\nCopyright (c) Facebook, Inc. and its affiliates.\n\"\"\"\n\nimport numpy as np\n\nMAX_MAP_SIZE = 4097\nMAP_INIT_SIZE = 1025\nBIG_I = MAX_MAP_SIZE\nBIG_J = MAX_MAP_SIZE\n\n\ndef no_y_l1(self, xyz, k):\n \"\"\" returns the l1 distance between two standard coordinates\"\"\"\n return np.linalg.norm(np.asarray([xyz[0], xyz[2]]) - np.asarray([k[0], k[2]]), ord=1)\n\n\n# TODO tighter integration with reference objects table, main memory update\n# should probably sync PlaceField maps without explicit perception updates\n# Node type for complicated-shaped obstacles that aren't \"objects\" e.g. walls?\n# currently just represented as occupancy cells with no memid\n# FIXME allow multiple memids at a single location in the map\n\n\nclass PlaceField:\n \"\"\"\n maintains a grid-based map of some slice(s) of the world, and \n the state representations needed to track active exploration.\n\n the .place_fields attribute is a dict with keys corresponding to heights, \n and values {\"map\": 2d numpy array, \"updated\": 2d numpy array, \"memids\": 2d numpy array}\n place_fields[h][\"map\"] is an occupany map at the the height h (in agent coordinates)\n a location is 0 if there is nothing there or it is unseen, 1 if occupied \n place_fields[h][\"memids\"] gives a memid index for the ReferenceObject at that location, \n if there is a ReferenceObject linked to that spatial location.\n the PlaceField keeps a mappping from the indices to memids in \n self.index2memid and self.memid2index\n place_fields[h][\"updated\"] gives the last update time of that location (in agent's internal time)\n if -1, it has neer been updated\n\n the .map2real method converts a location from a map to world coords\n the .real2map method converts a location from the world to the map coords\n\n droidlet.interpreter.robot.tasks.CuriousExplore uses the can_examine method to decide \n which objects to explore next:\n 1. for each new candidate coordinate, it fetches the closest examined coordinate.\n 2. if this closest coordinate is within a certain threshold (1 meter) of the current coordinate, \n or if that region has been explored upto a certain number of times (2, for redundancy),\n it is not explored, since a 'close-enough' region in space has already been explored. \n \"\"\"\n\n def __init__(self, memory, pixels_per_unit=1):\n self.get_time = memory.get_time\n\n self.index2memid = []\n self.memid2index = {}\n\n self.examined = {}\n self.examined_id = set()\n self.last = None\n\n self.maps = {}\n self.maybe_add_memid(\"NULL\")\n self.maybe_add_memid(memory.self_memid)\n # FIXME, want slices, esp for mc... init after first perception\n # with h=y2slice(y) instead of using 0\n self.map_size = self.extend_map(h=0)\n\n self.pixels_per_unit = pixels_per_unit\n\n # gives an index allowing quick lookup by memid\n # each entry is keyed by a memid and is a dict\n # {str(h*BIG_I*BIG_J + i*BIG_J + j) : True}\n # for each placed h, i ,j\n self.memid2locs = {}\n\n def ijh2idx(self, i, j, h):\n return str(h * BIG_I * BIG_J + i * BIG_J + j)\n\n def idx2ijh(self, idx):\n idx = int(idx)\n j = idx % BIG_J\n idx = (idx - j) // BIG_J\n i = idx % BIG_I\n h = (idx - i) // BIG_I\n return i, j, h\n\n def pop_memid_loc(self, memid, i, j, h):\n idx = self.hij2idx(h, i, j)\n del self.memid2locs[memid][idx]\n\n def maybe_delete_loc(self, i, j, h, t, memid=\"NULL\"):\n \"\"\"\n remove a loc from the maps and from memid2loc index.\n if memid is set, only removes the loc if the memid matches\n \"\"\"\n current_memid = self.index2memid[int(self.maps[h][\"memids\"][i, j])]\n if memid == \"NULL\" or current_memid == memid:\n self.maps[h][\"memids\"][i, j] = self.memid2index[\"NULL\"]\n self.maps[h][\"map\"][i, j] = 0\n self.maps[h][\"updated\"][i, j] = t\n idx = self.ijh2idx(i, j, h)\n # maybe error/warn if its not there?\n if self.memid2locs.get(memid):\n self.memid2locs[memid].pop(idx, None)\n if len(self.memid2locs[memid]) == 0:\n self.memid2locs.pop(memid, None)\n\n def delete_loc_by_memid(self, memid, t, is_move=False):\n \"\"\"\n remove all locs corresponding to a memid. \n if is_move is set, asserts that there is precisely one loc\n corresponding to the memid\n \"\"\"\n assert memid\n assert memid != \"NULL\"\n count = 0\n for idx in self.memid2locs.get(memid, []):\n i, j, h = self.idx2ijh(idx)\n self.maps[h][\"memids\"][i, j] = 0\n self.maps[h][\"map\"][i, j] = 0\n self.maps[h][\"updated\"][i, j] = t\n count = count + 1\n if is_move and count > 1:\n # eventually allow moving \"large\" objects\n raise Exception(\n \"tried to delete more than one pixel from the place_field by memid with is_move set\"\n )\n self.memid2locs.pop(memid, None)\n\n def update_map(self, changes):\n \"\"\"\n changes is a list of dicts of the form \n {\"pos\": (x, y, z),\n \"memid\": str (default \"NULL\"),\n \"is_obstacle\": bool (default True),\n \"is_move\": bool (default False),\n \"is_delete\": bool (default False) }\n pos is required if is_delete is False. \n all other fields are always optional.\n \n \"is_obstacle\" tells whether the agent can traverse that location\n if \"is_move\" is False, the change is taken as is; if \"is_move\" is True, if the\n change corresponds to a memid, and the memid is located somewhere on the map,\n the old location is removed when the new one is set. For now, to move complicated objects\n that cover many pixels, do not use is_move, and instead move them \"by hand\"\n by issuing a list of changes deleting the old now empty locations and adding the\n new now-filled locations\n \"is_delete\" True without a memid means whatever is in that location is to be removed.\n if a memid is set, the remove will occur only if the memid matches.\n \n the \"is_obstacle\" status can be changed without changing memid etc.\n \"\"\"\n t = self.get_time()\n for c in changes:\n is_delete = c.get(\"is_delete\", False)\n is_move = c.get(\"is_move\", False)\n memid = c.get(\"memid\", \"NULL\")\n p = c.get(\"pos\")\n if p is None:\n assert is_delete\n # if the change is a remove, and is specified by memid:\n if not memid:\n raise Exception(\"tried to update a map location without a location or a memid\")\n # warn if empty TODO?\n self.delete_loc_by_memid(memid, t)\n else:\n x, y, z = p\n h = self.y2slice(y)\n i, j = self.real2map(x, z, h)\n s = max(i - self.map_size + 1, j - self.map_size + 1, -i, -j)\n if s > 0:\n self.extend_map(s)\n i, j = self.real2map(x, z, h)\n s = max(i - self.map_size + 1, j - self.map_size + 1, -i, -j)\n if s > 0:\n # the map can not been extended enough to handle these bc MAX_MAP_SIZE\n # FIXME appropriate warning or error?\n continue\n if is_delete:\n self.maybe_delete_loc(i, j, h, t, memid=memid)\n else:\n if is_move:\n assert memid != \"NULL\"\n self.delete_loc_by_memid(memid, t, is_move=True)\n self.maps[h][\"memids\"][i, j] = self.memid2index.get(\n memid, self.maybe_add_memid(memid)\n )\n self.maps[h][\"map\"][i, j] = c.get(\"is_obstacle\", 1)\n self.maps[h][\"updated\"][i, j] = t\n if not self.memid2locs.get(memid):\n self.memid2locs[memid] = {}\n self.memid2locs[memid][self.ijh2idx(i, j, h)] = True\n\n # FIXME, want slices, esp for mc\n def y2slice(self, y):\n return 0\n\n def real2map(self, x, z, h):\n \"\"\"\n convert an x, z coordinate in agent space to a pixel on the map\n \"\"\"\n n = self.maps[h][\"map\"].shape[0]\n i = x * self.pixels_per_unit\n j = z * self.pixels_per_unit\n i = i + n // 2\n j = j + n // 2\n return round(i), round(j)\n\n def map2real(self, i, j, h):\n \"\"\"\n convert an i, j pixel coordinate in the map to agent space\n \"\"\"\n n = self.maps[h][\"map\"].shape[0]\n i = i - n // 2\n j = j - n // 2\n x = i / self.pixels_per_unit\n z = j / self.pixels_per_unit\n return x, z\n\n def maybe_add_memid(self, memid):\n \"\"\" \n adds an entry to the mapping from memids to ints to put on map.\n these are never removed\n \"\"\"\n idx = self.memid2index.get(memid)\n if idx is None:\n idx = len(self.index2memid)\n self.index2memid.append(memid)\n self.memid2index[memid] = idx\n return idx\n\n def extend_map(self, h=None, extension=1):\n assert extension >= 0\n if not h and len(self.maps) == 1:\n h = list(self.maps.keys())[0]\n if not self.maps.get(h):\n self.maps[h] = {}\n for m, v in {\"updated\": -1, \"map\": 0, \"memids\": 0}.items():\n self.maps[h][m] = v * np.ones((MAP_INIT_SIZE, MAP_INIT_SIZE))\n w = self.maps[h][\"map\"].shape[0]\n new_w = w + 2 * extension\n if new_w > MAX_MAP_SIZE:\n return -1\n for m, v in {\"updated\": -1, \"map\": 0, \"memids\": 0}.items():\n new_map = v * np.ones((new_w, new_w))\n new_map[extension:-extension, extension:-extension] = self.maps[h][m]\n self.maps[h][m] = new_map\n return new_w\n\n def get_closest(self, xyz):\n \"\"\"returns closest examined point to xyz\"\"\"\n c = None\n dist = 1.5\n for k, v in self.examined.items():\n if no_y_l1(k, xyz) < dist:\n dist = no_y_l1(k, xyz)\n c = k\n if c is None:\n self.examined[xyz] = 0\n return xyz\n return c\n\n def update(self, target):\n \"\"\"called each time a region is examined. Updates relevant states.\"\"\"\n self.last = self.get_closest(target[\"xyz\"])\n self.examined_id.add(target[\"eid\"])\n self.examined[self.last] += 1\n\n def clear_examined(self):\n self.examined = {}\n self.examined_id = set()\n self.last = None\n\n def can_examine(self, x):\n \"\"\"decides whether to examine x or not.\"\"\"\n loc = x[\"xyz\"]\n k = self.get_closest(x[\"xyz\"])\n val = True\n if self.last is not None and self.l1(cls.last, k) < 1:\n val = False\n val = self.examined[k] < 2\n print(\n f\"can_examine {x['eid'], x['label'], x['xyz'][:2]}, closest {k[:2]}, can_examine {val}\"\n )\n print(f\"examined[k] = {self.examined[k]}\")\n return val\n\n\nif __name__ == \"__main__\":\n W = {0: {0: {0: True}, 1: {2: {3: True}}}, 1: {5: True}}\n idxs = [0, 1, 2, 3]\n",
"id": "11028709",
"language": "Python",
"matching_score": 1.4603281021118164,
"max_stars_count": 0,
"path": "droidlet/memory/place_field.py"
},
{
"content": "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n''' \nThis script syncs Mephisto (MTurk) allowlists and blocklists between the\nlocal Mephisto DB and shared lists (.txt files) in an S3 bucket.\n\nCurrently implemented are the interaction job and vision annotation job\nlists, but the structure is extendable to future qualifications as well.\n'''\n\nimport argparse\nimport os\nimport logging\nimport boto3\nimport copy\n\nfrom mephisto.data_model.worker import Worker\nfrom mephisto.abstractions.databases.local_database import LocalMephistoDB\nfrom mephisto.tools.data_browser import DataBrowser as MephistoDataBrowser\n\nfrom droidlet_static_html_task.pilot_config import PILOT_ALLOWLIST_QUAL_NAME as interaction_whitelist\nfrom droidlet_static_html_task.pilot_config import PILOT_BLOCK_QUAL_NAME as interaction_blacklist\nfrom vision_annotation_task.pilot_config import PILOT_ALLOWLIST_QUAL_NAME as vision_annotation_whitelist\nfrom vision_annotation_task.pilot_config import PILOT_BLOCK_QUAL_NAME as vision_annotation_blacklist\n\nqual_dict = {\"interaction\": {\n \"allow\": interaction_whitelist, \n \"block\": interaction_blacklist }, \n \"vision_annotation\": {\n \"allow\": vision_annotation_whitelist, \n \"block\": vision_annotation_blacklist } }\n\ndb = LocalMephistoDB()\nmephisto_data_browser = MephistoDataBrowser(db=db)\n\ns3 = boto3.client('s3')\n\nlogging.basicConfig(level=\"INFO\")\n\n\ndef import_s3_lists(bucket: str):\n # Assumed S3 allowlist key example: (bucket)/interaction/allow.txt\n \n output_dict = copy.deepcopy(qual_dict)\n \n for task in output_dict.keys():\n for list_type in output_dict[task].keys():\n key = f\"{task}/{list_type}.txt\"\n try:\n with open('list.txt', 'wb') as f:\n s3.download_fileobj(bucket, key, f)\n logging.info(f\"{task} {list_type}list downloaded successfully\")\n with open('list.txt', 'r') as f:\n output_dict[task][list_type] = [line.strip() for line in f.readlines()]\n except:\n logging.info(f\"{task} {list_type}list not found on S3, creating new S3 {list_type}list\")\n output_dict[task][list_type] = []\n\n os.remove(\"list.txt\")\n return output_dict\n\n\ndef add_workers_to_quals(add_list: list, qual: str):\n\n for turker in add_list:\n #First add the worker to the database, or retrieve them if they already exist\n try:\n db_id = db.new_worker(turker, 'mturk')\n worker = Worker.get(db, db_id)\n except:\n worker = db.find_workers(turker, 'mturk')[0]\n \n # Add the worker to the relevant list\n try:\n db.make_qualification(qual)\n except:\n pass\n else:\n logging.debug(f\"{qual} qualification not exists, so create one\")\n worker.grant_qualification(qual, 1)\n\n #Check to make sure the qualification was added successfully\n if not worker.is_qualified(qual):\n logging.info(f\"!!! {worker} not successfully qualified, debug\")\n else:\n logging.info(f\"Worker {worker.worker_name} added to list {qual}\")\n\n\ndef pull_local_lists():\n # Pull the qual lists from local Mephisto DB into a formatted dict\n\n output_dict = copy.deepcopy(qual_dict)\n\n logging.info(f\"Retrieving qualification lists from local Mephisto DB\")\n for task in output_dict.keys():\n for list_type in output_dict[task].keys():\n # If syncing for the first time, qualifications may not yet exist\n try:\n logging.info(f'attempting to make qualification: {qual_dict[task][list_type]}')\n db.make_qualification(qual_dict[task][list_type])\n except:\n logging.info(f'Qualification {qual_dict[task][list_type]} already exists')\n pass\n qual_list = mephisto_data_browser.get_workers_with_qualification(qual_dict[task][list_type])\n output_dict[task][list_type] = [worker.worker_name.strip(\"\\n\") for worker in qual_list]\n\n return output_dict\n\n\ndef compare_qual_lists(s3_lists: dict, local_lists: dict):\n # Compare two dicts of lists representing the local and S3 states, return a dict with the differences\n\n diff_dict = copy.deepcopy(qual_dict)\n\n logging.info(f\"Comparing qualification lists and checking for differences\")\n for t in diff_dict.keys():\n for l in diff_dict[t].keys():\n diff_dict[t][l] = {}\n diff_dict[t][l][\"s3_exclusive\"] = [x for x in s3_lists[t][l] if x not in local_lists[t][l]]\n diff_dict[t][l][\"local_exclusive\"] = [x for x in local_lists[t][l] if x not in s3_lists[t][l]]\n\n return diff_dict\n\n\ndef update_lists(bucket:str, diff_dict: dict):\n # Iterate through the differences between local and S3 lists and update both to be in sync\n\n for t in diff_dict.keys():\n for l in diff_dict[t].keys():\n for e in diff_dict[t][l].keys():\n\n if e == \"s3_exclusive\" and len(diff_dict[t][l][e]) > 0:\n add_workers_to_quals(diff_dict[t][l][e], qual_dict[t][l])\n\n elif e == \"local_exclusive\" and len(diff_dict[t][l][e]) > 0:\n logging.info(f\"Writing new workers to {t} {l} shared list on S3: {diff_dict[t][l][e]}\")\n\n filename = l + \".txt\"\n with open(filename, \"w\") as f:\n for line in diff_dict[t][l][e]:\n f.write(line.strip('\\n') + '\\n')\n \n upload_key = t + \"/\" + filename\n s3.upload_file(filename, bucket, upload_key)\n logging.info(f\"S3 upload succeeded\")\n \n os.remove(filename)\n\n else:\n logging.info(f\"No {e} workers on {t} {l} list, no update performed\")\n\n return\n\n\ndef main(bucket: str):\n # Pull shared lists from S3 and local qual lists\n s3_list_dict = import_s3_lists(bucket)\n local_list_dict = pull_local_lists()\n\n # Compare them for differences\n diff_dict = compare_qual_lists(s3_list_dict, local_list_dict)\n \n # Update local and s3 lists to match\n update_lists(bucket, diff_dict)\n\n return\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--s3_bucket\", type=str, help=\"S3 bucket where allowlists are stored\")\n opts = parser.parse_args()\n main(opts.s3_bucket)",
"id": "11924951",
"language": "Python",
"matching_score": 1.008400559425354,
"max_stars_count": 0,
"path": "droidlet/tools/crowdsourcing/sync_whitelists.py"
}
] | 1.090658 |
Pendar2 | [
{
"content": "from .tensorboard_cb import *\r\nfrom .version import __version__",
"id": "4645451",
"language": "Python",
"matching_score": 0,
"max_stars_count": 23,
"path": "fastai_tensorboard_callback/__init__.py"
},
{
"content": "from tensorboardX import SummaryWriter\nfrom fastai.basics import *\n\n@dataclass\nclass TensorboardLogger(Callback):\n learn:Learner\n run_name:str\n histogram_freq:int=100\n path:str=None\n def __post_init__(self):\n self.path = self.path or os.path.join(self.learn.path, \"logs\")\n self.log_dir = os.path.join(self.path, self.run_name)\n def on_train_begin(self, **kwargs):\n self.writer = SummaryWriter(log_dir=self.log_dir)\n def on_epoch_end(self, **kwargs):\n iteration = kwargs[\"iteration\"]\n metrics = kwargs[\"last_metrics\"]\n metrics_names = [\"valid_loss\"] + [o.__name__ for o in self.learn.metrics]\n \n for val, name in zip(metrics, metrics_names):\n self.writer.add_scalar(name, val, iteration)\n \n for name, emb in self.learn.model.named_children():\n if isinstance(emb, nn.Embedding):\n self.writer.add_embedding(list(emb.parameters())[0], global_step=iteration, tag=name)\n \n def on_batch_end(self, **kwargs):\n iteration = kwargs[\"iteration\"]\n loss = kwargs[\"last_loss\"]\n \n self.writer.add_scalar(\"learning_rate\", self.learn.opt.lr, iteration)\n self.writer.add_scalar(\"momentum\", self.learn.opt.mom, iteration)\n \n self.writer.add_scalar(\"loss\", loss, iteration)\n if iteration%self.histogram_freq==0:\n for name, param in self.learn.model.named_parameters():\n self.writer.add_histogram(name, param, iteration)\n def on_train_end(self, **kwargs):\n try:\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n dummy_input = next(iter(self.learn.data.train_dl))[0]\n self.writer.add_graph(self.learn.model, tuple(dummy_input))\n except Exception as e:\n print(\"Unable to create graph.\")\n print(e)\n self.writer.close()",
"id": "6010134",
"language": "Python",
"matching_score": 0,
"max_stars_count": 23,
"path": "fastai_tensorboard_callback/tensorboard_cb.py"
}
] | 0 |
00-MSME | [
{
"content": "from wtforms.validators import DataRequired, Optional, Length\nfrom wtforms import Form, BooleanField, StringField, PasswordField, validators, SelectMultipleField, IntegerField, FileField, HiddenField, TextAreaField\nfrom wtforms.widgets import TextArea\n\nclass PersonForm(Form):\n\n name = StringField('NAME', validators=[DataRequired()])\n email = StringField('EMAIL', validators=[DataRequired(), validators.Email()])\n\n\nclass EntityForm(Form):\n\n name = StringField('NAME', validators=[DataRequired()])\n description = TextAreaField('DESCRIPTION', [validators.optional(), validators.length(max=1024)])\n serial = StringField('SERIAL')\n configs = SelectMultipleField('CONFIG FILES')\n\n\nclass ConfigForm(Form):\n\n name = StringField('NAME', validators=[DataRequired()])\n path = StringField('PATH /etc/opt/augment00/', validators=[DataRequired()])\n file_text = HiddenField('TEXT', [DataRequired()])\n\n\nclass CommandForm(Form):\n\n command_text = HiddenField('TEXT', [DataRequired()])\n\n",
"id": "10865281",
"language": "Python",
"matching_score": 2.1733505725860596,
"max_stars_count": 1,
"path": "src/forms.py"
},
{
"content": "import logging\nimport json\n\n\nfrom google.appengine.api import users\nfrom google.appengine.api import memcache\nfrom google.appengine.ext import ndb\n\n\nfrom flask import Flask, request, redirect, flash\n\nfrom constants import *\n\napp = Flask(__name__)\n\napp.secret_key = FLASK_SECRET_KEY\n\nfrom models import Person, Entity, ConfigFile\nfrom forms import PersonForm, EntityForm, ConfigForm, CommandForm\nfrom shared import render_login_template, with_person\nfrom augment_exceptions import NonUniqueException\nfrom utilities import firebase\n\n\n@app.route('/person/new', methods=[\"POST\", \"GET\"])\ndef new_person():\n google_user = users.get_current_user()\n if google_user is not None:\n google_id = google_user.user_id()\n existing_user = Person.with_google_id(google_id)\n if existing_user is None:\n form = PersonForm(request.form)\n if request.method == 'POST':\n email = form.email.data\n name = form.name.data\n try:\n Person.create(name, email, google_id)\n flash(\"New account created.\", \"success\")\n return redirect(\"/\")\n except NonUniqueException as e:\n flash(\"Failed to create new account. %s\" % e.message, \"warning\")\n return redirect(\"/\")\n else:\n return render_login_template(\"account-form.html\", form=form)\n else:\n flash(\"Welcome back\", \"info\")\n return redirect(\"/\")\n else:\n return redirect(\"/\")\n\n\n\n@app.route('/person/update', methods=[\"POST\", \"GET\"])\n@with_person\ndef update_person(person=None):\n\n form = PersonForm(request.form, name=person.name, email=person.email)\n if request.method == 'POST':\n email = form.email.data\n name = form.name.data\n try:\n person.name = name\n person.email = email\n\n return redirect(\"/person/account\")\n except NonUniqueException as e:\n flash(\"We couldn't update your account account. %s\" % e.message, \"warning\")\n redirect(\"/person/new\")\n else:\n return render_login_template(\"account-form.html\", form=form)\n\n\n@app.route('/person/regenerate-apikey', methods=[\"POST\"])\n@with_person\ndef regenerate_api_key(person=None):\n person.reset_api_key()\n flash(\"Your API Key has been regenerated\", \"info\")\n return redirect(\"/\")\n\n\n@app.route('/person/delete', methods=[\"POST\"])\n@with_person\ndef delete_person(person=None):\n person.remove()\n return redirect(\"/\")\n\n\n## CRUD for entities\n\n@app.route('/entity/new', methods=[\"POST\", \"GET\"])\n@with_person\ndef new_entity(person=None):\n form = EntityForm(request.form)\n\n form.configs.choices = [(c.key, c.name) for c in person.configs]\n\n if request.method == 'POST':\n\n entity = person.add_new_entity(name=form.name.data,\n description=form.description.data,\n config = [ndb.Key(\"ConfigFile\", k, parent=person.key) for k in form.configs.data if ndb.Key(\"ConfigFile\", k, parent=person.key) is not None]\n )\n\n entity_uuid = entity.key.id()\n flash(\"Your new entity has been created\", \"info\")\n return redirect(\"/entity/%s\" % entity_uuid)\n else:\n return render_login_template(\"entity-form.html\", form=form)\n\n\ndef _allowed_entity(entity_uuid, person):\n\n entity = Entity.get_by_id(entity_uuid)\n if entity is None:\n flash(\"We couldn't find this entity\", \"warning\")\n return None, redirect(\"/\")\n else:\n if entity.person_key == person.key:\n return entity, None\n else:\n flash(\"You can only see your own entities\", \"info\")\n return None, redirect(\"/\")\n\n\n\n@app.route('/entity/<entity_uuid>', methods=[\"GET\"])\n@with_person\ndef entity(entity_uuid, person=None):\n\n entity, rsp = _allowed_entity(entity_uuid, person)\n if entity is None:\n return rsp\n\n creds_json = {\n \"entity_uuid\": entity_uuid,\n \"private_key\": entity.private_key,\n \"public_key\": entity.public_key,\n \"url\": \"%s/api/config/%s\" % (URL_BASE, entity_uuid)\n }\n\n creds = json.dumps(creds_json, indent=4)\n tag = entity_uuid[:8]\n\n return render_login_template(\"entity.html\", entity=entity, person=person, creds=creds, tag=tag)\n\n\n\n@app.route('/entity/<entity_uuid>/update', methods=[\"POST\", \"GET\"])\n@with_person\ndef update_entity(entity_uuid, person=None):\n\n entity, rsp = _allowed_entity(entity_uuid, person)\n if entity is None:\n return rsp\n\n form = EntityForm(request.form,\n name=entity.name,\n description=entity.description,\n serial=entity.serial\n )\n\n form.configs.choices = [(c.key.id(), c.name) for c in person.configs]\n\n if request.method == 'POST':\n entity.config = [ndb.Key(\"ConfigFile\", k, parent=person.key) for k in form.configs.data if ndb.Key(\"ConfigFile\", k, parent=person.key) is not None]\n entity.name = form.name.data\n entity.serial = None if not form.serial.data else form.serial.data\n entity.description = form.description.data\n entity.put()\n\n return redirect(\"/entity/%s\" % entity_uuid)\n else:\n return render_login_template(\"entity-form.html\", form=form)\n\n\n@app.route('/entity/<entity_uuid>/delete', methods=[\"POST\"])\n@with_person\ndef delete_entity(entity_uuid, person=None):\n\n entity, rsp = _allowed_entity(entity_uuid, person)\n if entity is None:\n return rsp\n\n entity.key.delete()\n return redirect(\"/\")\n\n\n@app.route('/entity/<entity_uuid>/regenerate', methods=[\"POST\"])\n@with_person\ndef regenerate(entity_uuid, person=None):\n\n entity, rsp = _allowed_entity(entity_uuid, person)\n if entity is None:\n return rsp\n\n private_key = entity.regenerate_keys()\n # aiven.add_influx_password(entity)\n memcache.add(entity_uuid, private_key, time=5, namespace=\"private\")\n flash(\"Take a copy of the credentials below as you won't see them again\", \"info\")\n return redirect(\"/entity/%s\" % entity_uuid)\n\n\n@app.route('/entity/<entity_uuid>/command', methods=[\"GET\", \"POST\"])\n@with_person\ndef command(entity_uuid, person=None):\n\n entity, rsp = _allowed_entity(entity_uuid, person)\n if entity is None:\n return rsp\n\n form = CommandForm(request.form)\n\n if request.method == 'POST':\n\n command = form.command_text.data\n try:\n as_json = json.loads(command)\n except Exception as e:\n flash(\"Your command wasn't valid json\", \"error\")\n return render_login_template(\"command-form.html\", form=form, entity=entity)\n\n firebase_service = firebase.get_service()\n firebase_service.send_message(entity_uuid, command_json=as_json)\n\n flash(\"Your command has been sent\", \"info\")\n return redirect(\"/entity/%s\" % entity_uuid)\n\n else:\n return render_login_template(\"command-form.html\", form=form, entity=entity)\n\n\n## CRUD for config\n\n\ndef _allowed_config(config_uuid, person):\n config_file = ConfigFile.get_by_id(config_uuid, parent=person.key)\n if config_file is None:\n flash(\"We couldn't find this config file\", \"warning\")\n return None, redirect(\"/\")\n else:\n if config_file.key.parent() == person.key:\n return config_file, None\n else:\n flash(\"You can only see your own config files\", \"warning\")\n return redirect(\"/\")\n\n\n@app.route('/config/<config_uuid>', methods=[\"POST\", \"GET\"])\n@with_person\ndef config(config_uuid, person=None):\n\n config_file, rsp = _allowed_config(config_uuid, person)\n if config_file is None:\n return rsp\n\n return render_login_template(\"config-file.html\", config=config_file, person=person)\n\n\n@app.route('/config/<config_uuid>/update', methods=[\"POST\", \"GET\"])\n@with_person\ndef update_config(config_uuid, person=None):\n\n config_file, rsp = _allowed_config(config_uuid, person)\n if config_file is None:\n return rsp\n\n form = ConfigForm(request.form,\n name=config_file.name,\n path=config_file.path,\n file_text=config_file.text\n )\n\n if request.method == 'POST':\n config_file.name = form.name.data\n config_file.path = form.path.data\n config_file.text = form.file_text.data\n config_file.put()\n\n return redirect(\"/config/%s\" % config_uuid)\n else:\n return render_login_template(\"config-form.html\", form=form, config=config_file)\n\n\n@app.route('/config/<config_uuid>/delete', methods=[\"POST\"])\n@with_person\ndef delete_config(config_uuid, person=None):\n\n config_file, rsp = _allowed_config(config_uuid, person)\n if config_file is None:\n return rsp\n\n config_file.key.delete()\n return redirect(\"/\")\n\n\n@app.route('/config/new', methods=[\"POST\", \"GET\"])\n@with_person\ndef new_config(person=None):\n\n form = ConfigForm(request.form)\n\n if request.method == 'POST':\n config = person.add_config_file(form.name.data,\n form.file_text.data,\n form.path.data\n )\n config_uuid = config.key.id()\n return redirect(\"/config/%s\" % config_uuid)\n else:\n return render_login_template(\"config-form.html\", form=form)\n\n\n@app.errorhandler(500)\ndef server_error(e):\n # Log the error and stacktrace.\n logging.exception('An error occurred during a request.')\n return 'An internal error occurred.', 500\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n",
"id": "8787505",
"language": "Python",
"matching_score": 3.4467201232910156,
"max_stars_count": 1,
"path": "src/account.py"
},
{
"content": "from config_local import ( FLASK_SECRET_KEY,\n FIREBASE_URL)\n\nURL_BASE = \"http://augment00.org\"\n\n",
"id": "826111",
"language": "Python",
"matching_score": 0.1485789716243744,
"max_stars_count": 1,
"path": "src/constants.py"
},
{
"content": "import logging\nimport json\nfrom functools import wraps\n\nfrom google.appengine.ext import ndb\n\nfrom flask import Flask, request\napp = Flask(__name__)\n\nfrom models import Entity, Person\nfrom utilities import firebase, keys\n\nfrom constants import *\n\n\ndef is_signed(func):\n @wraps(func)\n def decorated_view(*args, **kwargs):\n path = request.path\n print path\n sig = request.values.get(\"sig\")\n if sig is None:\n print \"no sig\"\n return (\"Permission denied\", 401, {})\n parts = path.split(\"/\")\n entity_uuid = parts[-2]\n key = ndb.Key(\"Entity\", entity_uuid)\n entity = key.get()\n if entity is None:\n return (\"Not found\", 403, {})\n ok = keys.verify_sig(path, sig, entity.public_key)\n if not ok:\n print \"not ok\"\n return (\"Permission denied\", 401, {})\n return func(*args, entity=entity, **kwargs)\n return decorated_view\n\n\ndef with_api_key(func):\n @wraps(func)\n def decorated_view(*args, **kwargs):\n as_json = request.get_json(force=True)\n if as_json is None:\n print \"no json\"\n return (\"Permission denied, no json data\", 401, {})\n google_id = as_json.get(\"user_id\")\n api_key = as_json.get(\"api_key\")\n if google_id is None or api_key is None:\n return (\"Permission denied\", 401, {})\n\n person = Person.with_google_id(google_id)\n if person is None:\n return (\"Permission denied\", 401, {})\n\n if person.api_key != api_key:\n return (\"Permission denied\", 401, {})\n\n return func(*args, person=person, **kwargs)\n return decorated_view\n\n\n@app.route('/api/config/<entity_uuid>/<nonce>', methods=[\"GET\"])\n@is_signed\ndef api_config(entity_uuid, nonce, entity=None):\n\n serial = request.values.get(\"serial\")\n if entity.serial is not None and serial != entity.serial:\n return (\"Conflict - Serial doesn't match\", 409, {})\n else:\n if entity.serial is None:\n entity.serial = serial\n entity.put()\n\n return json.dumps(entity.as_json())\n\n\n@app.route('/api/firebase-token/<entity_uuid>/<nonce>', methods=[\"GET\"])\n@is_signed\ndef api_token(entity_uuid, nonce, entity=None):\n\n data = {\n \"entity_uuid\": entity_uuid,\n \"firebase_custom_token\": firebase.create_custom_token(entity_uuid)\n }\n\n return json.dumps(data)\n\n\n@app.route('/api/schema/<entity_uuid>', methods=[\"POST\"])\ndef set_entity_schema(entity_uuid):\n key = ndb.Key(\"Entity\", entity_uuid)\n entity = key.get()\n if entity is None:\n return \"not found\", 403\n else:\n as_json = request.get_json(force=True)\n entity.schema = as_json\n entity.put()\n return \"ok\", 200\n\n\n############################################################\n\n\n\n@app.route('/api/new-entity', methods=[\"POST\"])\n@with_api_key\ndef new_entity(person=None):\n as_json = request.get_json(force=True)\n entity = Entity.create(person.key, name=as_json[\"name\"])\n entity_uuid = entity.key.id()\n return entity_uuid, 201\n\n\n@app.route('/api/entity/<entity_uuid>', methods=[\"GET\"])\n@with_api_key\ndef get_entity_info(entity_uuid, person=None):\n\n key = ndb.Key(\"Entity\", entity_uuid)\n entity = key.get()\n\n if entity is None:\n return \"not found\", 403\n\n if entity.person_key != person.key:\n return (\"Permission denied\", 401, {})\n\n return json.dumps(entity.as_json()), 200\n\n\n@app.route('/api/entity/<entity_uuid>/add-value', methods=[\"POST\"])\n@with_api_key\ndef add_value(entity_uuid, person=None):\n\n key = ndb.Key(\"Entity\", entity_uuid)\n entity = key.get()\n\n if entity.person_key != person.key:\n return (\"Permission denied\", 401, {})\n\n as_json = request.get_json(force=True)\n\n value_name = as_json[\"name\"]\n value = as_json[\"value\"]\n\n if value_name == \"name\":\n entity.name = value\n else:\n entity.template_values[value_name] = value\n\n entity.put()\n return \"ok\", 200\n\n\n@app.route('/api/entity/<entity_uuid>/send-command', methods=[\"POST\"])\n@with_api_key\ndef send_command(entity_uuid, person=None):\n\n key = ndb.Key(\"Entity\", entity_uuid)\n entity = key.get()\n\n if entity.person_key != person.key:\n return (\"Permission denied\", 401, {})\n\n as_json = request.get_json(force=True)\n\n if not frozenset(as_json[\"rpc\"].keys()) == {\"method\", \"params\"}:\n\n return (\"Malformed request\", 400, {})\n\n firebase_service = firebase.get_service()\n firebase_service.send_message(entity_uuid, command_json=as_json[\"rpc\"])\n\n return \"ok\", 200\n\n\n@app.errorhandler(500)\ndef server_error(e):\n # Log the error and stacktrace.\n logging.exception('An error occurred during a request.')\n return 'An internal error occurred.', 500",
"id": "7825347",
"language": "Python",
"matching_score": 3.859786033630371,
"max_stars_count": 1,
"path": "src/api.py"
},
{
"content": "import random\nimport uuid\nfrom Crypto.PublicKey import RSA\nfrom google.appengine.ext import ndb\nfrom google.appengine.api import users\nfrom base64 import b64encode, b64decode\nfrom jinja2 import Template\n\nfrom utilities import firebase, keys\n\nfrom augment_exceptions import NonUniqueException\nfrom constants import *\n\n\nALPHA_NUMERIC = \"abcdefghijklmnopqrstuvwxyz0123456789\"\n\ndef generateNewRandomAlphaNumeric(length):\n random.seed()\n values = []\n for i in range(length):\n values.append(random.choice(ALPHA_NUMERIC))\n return \"\".join(values)\n\n\nclass Name(ndb.Model):\n pass\n\n\nclass Email(ndb.Model):\n pass\n\n\nclass GoogleId(ndb.Model):\n pass\n\n\nclass Person(ndb.Model):\n name_key = ndb.KeyProperty(kind=\"Name\", required=True)\n email_key = ndb.KeyProperty(kind=\"Email\", required=True)\n google_id_key = ndb.KeyProperty(kind=\"GoogleId\")\n api_key = ndb.StringProperty()\n\n\n @classmethod\n def create(cls, name, email, google_id):\n name_key = cls._new_unique_key(Name, name)\n email_key = cls._new_unique_key(Email, email)\n google_id_key = cls._new_unique_key(GoogleId, google_id)\n person_uuid = str(uuid.uuid4())\n api_key = generateNewRandomAlphaNumeric(30)\n person = cls(name_key=name_key,\n email_key=email_key,\n google_id_key=google_id_key,\n id=person_uuid,\n api_key=api_key)\n person.put()\n return person\n\n def get_name(self):\n return self.name_key.id()\n\n def set_name(self, new_name):\n self._set_unique_attribute(Name, \"name_key\", new_name)\n\n def get_email(self):\n return self.email_key.id()\n\n def set_email(self, new_email):\n self._set_unique_attribute(Email, \"email_key\", new_email)\n\n def get_google_id(self):\n return self.google_id_key.id()\n\n @classmethod\n def with_email(cls, email):\n key = ndb.Key(Email, email)\n return cls.query(cls.email_key == key).get()\n\n @classmethod\n def with_name(cls, name):\n key = ndb.Key(Name, name)\n return cls.query(cls.name_key == key).get()\n\n @classmethod\n def with_google_id(cls, google_id):\n key = ndb.Key(GoogleId, google_id)\n return cls.query(cls.google_id_key == key).get()\n\n @staticmethod\n def _new_unique_key(attribute_class, new_value):\n\n new_attribute_key = ndb.Key(attribute_class, new_value)\n existing_attribute_obj = new_attribute_key.get()\n\n if existing_attribute_obj is not None:\n raise NonUniqueException(\"The value %s for %s is adready in use\" % (new_value, attribute_class))\n else:\n new_attribute_obj = attribute_class(key=new_attribute_key)\n new_attribute_obj.put()\n\n return new_attribute_key\n\n\n @ndb.transactional(xg=True)\n def _set_unique_attribute(self, attribute_class, attribute_key_name, new_value):\n\n current_attribute_key = getattr(self, attribute_key_name)\n current_value = current_attribute_key.id()\n\n if current_value == new_value:\n return\n\n new_attribute_key = self._new_unique_key(attribute_class, new_value)\n current_attribute_key.delete()\n setattr(self, attribute_key_name, new_attribute_key)\n self.put()\n\n\n def add_new_entity(self, **kwargs):\n return Entity.create(self.key, **kwargs)\n\n\n @property\n def entities(self):\n return [e for e in Entity.query(Entity.person_key == self.key).iter()]\n\n @property\n def configs(self):\n return [c for c in ConfigFile.query(ancestor=self.key).iter()]\n\n\n def remove(self):\n ndb.delete_multi(ConfigFile.query(ancestor=self.key).iter(keys_only=True))\n ndb.delete_multi(Entity.query(Entity.person_key == self.key).iter(keys_only=True))\n self.name_key.delete()\n self.email_key.delete()\n self.google_id_key.delete()\n self.key.delete()\n\n\n def reset_api_key(self):\n\n self.api_key = generateNewRandomAlphaNumeric(30)\n self.put()\n\n\n\n def add_config_file(self, name, text, path):\n\n config_uuid = str(uuid.uuid4())\n\n config_file = ConfigFile(id=config_uuid,\n parent=self.key,\n name=name,\n text=text,\n path=path)\n config_file.put()\n return config_file\n\n\n name = property(get_name, set_name)\n email = property(get_email, set_email)\n google_id = property(get_google_id)\n\n\nclass ConfigFile(ndb.Model):\n\n name = ndb.StringProperty()\n text = ndb.TextProperty()\n path = ndb.StringProperty()\n\n def as_json(self, entity):\n\n entity_uuid = entity.key.id()\n template_values = entity.template_values\n\n template = Template(self.text)\n\n return {\n \"text\": template.render(uuid=entity_uuid, **template_values),\n \"path\": self.path\n }\n\n\nclass Entity(ndb.Model):\n\n name = ndb.StringProperty()\n description = ndb.TextProperty()\n created = ndb.DateTimeProperty(auto_now_add=True)\n person_key = ndb.KeyProperty(kind=\"Person\", required=True)\n public_key = ndb.TextProperty()\n private_key = ndb.TextProperty()\n serial = ndb.StringProperty()\n config = ndb.KeyProperty(ConfigFile, repeated=True)\n template_values = ndb.JsonProperty(default={})\n schema = ndb.JsonProperty(default={})\n\n\n def as_json(self):\n\n entity_uuid = self.key.id()\n\n return {\n \"name\": self.name,\n \"description\": self.description,\n \"created\": str(self.created),\n \"person_key\": self.person_key.id(),\n \"public_key\": self.public_key,\n \"config\": [c.get().as_json(self) for c in self.config]\n }\n\n\n @property\n def config_files(self):\n configs = [c.get() for c in self.config]\n return configs\n\n def add_config_file(self, config_file):\n key = config_file.key\n if not key in self.config:\n self.config.append(key)\n self.put()\n\n\n def remove_config_file(self, config_file):\n key = config_file.key\n if key in self.config:\n self.config.remove(key)\n self.put()\n\n\n def regenerate_keys(self):\n public, private = keys.newkeys(2048)\n self.private_key = private.exportKey('PEM')\n self.public_key = public.exportKey('PEM')\n self.put()\n\n return self.private_key\n\n\n @classmethod\n def create(cls, person_key, **kwargs):\n\n public, private = keys.newkeys(2048)\n private_key = private.exportKey('PEM')\n public_key = public.exportKey('PEM')\n entity_uuid = str(uuid.uuid4())\n\n entity = cls(id=entity_uuid,\n person_key=person_key,\n public_key=public_key,\n private_key=private_key,\n **kwargs)\n entity.put()\n\n return entity\n\n\n\n\n\n\n\n\n",
"id": "11218136",
"language": "Python",
"matching_score": 3.1314334869384766,
"max_stars_count": 1,
"path": "src/models.py"
},
{
"content": "\n\nclass NonUniqueException(Exception):\n pass",
"id": "1196314",
"language": "Python",
"matching_score": 0.013430426828563213,
"max_stars_count": 1,
"path": "src/augment_exceptions.py"
},
{
"content": "import json\nimport base64\nimport time\nimport datetime\nimport httplib2\n\nfrom google.appengine.api import app_identity\nfrom mode import *\n\n\ndef get_service():\n\n mode = application_mode()\n if mode == APPLICATION_MODE_PRODUCTION:\n firebase_service = FirebaseService()\n elif mode == APPLICATION_MODE_TEST:\n print \"Using dummy firebase service\"\n firebase_service = DummyFirebaseService()\n else:\n print \"Using dummy firebase service\"\n firebase_service = DummyFirebaseService()\n\n return firebase_service\n\n\nfrom constants import FIREBASE_URL\n\n_FIREBASE_SCOPES = [\n 'https://www.googleapis.com/auth/firebase.database',\n 'https://www.googleapis.com/auth/userinfo.email']\n\n_IDENTITY_ENDPOINT = ('https://identitytoolkit.googleapis.com/google.identity.identitytoolkit.v1.IdentityToolkit')\n\n\ndef create_custom_token(uid, valid_minutes=60):\n \"\"\"Create a secure token for the given id.\n\n This method is used to create secure custom JWT tokens to be passed to\n clients. It takes a unique id (uid) that will be used by Firebase's\n security rules to prevent unauthorized access. In this case, the uid will\n be the channel id which is a combination of user_id and game_key\n \"\"\"\n\n # use the app_identity service from google.appengine.api to get the\n # project's service account email automatically\n client_email = app_identity.get_service_account_name()\n\n # print \"client_email: \", client_email\n\n now = int(time.time())\n # encode the required claims\n # per https://firebase.google.com/docs/auth/server/create-custom-tokens\n payload = base64.b64encode(json.dumps({\n 'iss': client_email,\n 'sub': client_email,\n 'aud': _IDENTITY_ENDPOINT,\n 'uid': uid, # the important parameter, as it will be the channel id\n 'iat': now,\n 'exp': now + (valid_minutes * 60),\n }))\n # add standard header to identify this as a JWT\n header = base64.b64encode(json.dumps({'typ': 'JWT', 'alg': 'RS256'}))\n to_sign = '{}.{}'.format(header, payload)\n # Sign the jwt using the built in app_identity service\n return '{}.{}'.format(to_sign, base64.b64encode(app_identity.sign_blob(to_sign)[1]))\n\n\n\nclass DummyFirebaseService(object):\n\n def send_message(self, u_id, command_json=None):\n\n return None\n\n\nclass FirebaseService(object):\n\n\n def _get_http(self):\n from oauth2client.client import GoogleCredentials\n # from oauth2client.client import GoogleCredentials\n \"\"\"Provides an authed http object.\"\"\"\n http = httplib2.Http()\n # Use application default credentials to make the Firebase calls\n # https://firebase.google.com/docs/reference/rest/database/user-auth\n creds = GoogleCredentials.get_application_default().create_scoped(_FIREBASE_SCOPES)\n creds.authorize(http)\n return http\n\n\n\n def firebase_put(self, path, value=None):\n \"\"\"Writes data to Firebase.\n\n An HTTP PUT writes an entire object at the given database path. Updates to\n fields cannot be performed without overwriting the entire object\n\n Args:\n path - the url to the Firebase object to write.\n value - a json string.\n \"\"\"\n response, content = self._get_http().request(path, method='PUT', body=value)\n return json.loads(content)\n\n\n\n def send_message(self, u_id, command_json=None):\n\n url = '{}/channels/{}.json'.format(FIREBASE_URL, u_id)\n\n dt = datetime.datetime.now()\n ts = dt.strftime(\"%Y%m%d-%H%M%S-%f\")\n message_json = {\n ts: command_json\n }\n\n message = json.dumps(message_json, indent=4)\n\n if message:\n return self._get_http().request(url, 'PATCH', body=message)\n else:\n return self._get_http().request(url, 'DELETE')",
"id": "4039171",
"language": "Python",
"matching_score": 2.1790411472320557,
"max_stars_count": 1,
"path": "src/utilities/firebase.py"
},
{
"content": "import os\n\nAPPLICATION_MODE_TEST = \"test\"\nAPPLICATION_MODE_DEVELOPMENT = \"development\"\nAPPLICATION_MODE_PRODUCTION = \"production\"\n\nAPPLICATION_MODE = None\n\ndef application_mode():\n\n global APPLICATION_MODE\n\n if APPLICATION_MODE is None:\n\n server_software = os.environ.get(\"SERVER_SOFTWARE\")\n\n if server_software is None:\n APPLICATION_MODE = APPLICATION_MODE_PRODUCTION\n elif server_software.startswith(\"Development\"):\n APPLICATION_MODE = APPLICATION_MODE_DEVELOPMENT\n else:\n APPLICATION_MODE = APPLICATION_MODE_PRODUCTION\n\n print \"mode: \", APPLICATION_MODE\n return APPLICATION_MODE",
"id": "11887259",
"language": "Python",
"matching_score": 0.043692100793123245,
"max_stars_count": 1,
"path": "src/utilities/mode.py"
},
{
"content": "import unittest\nimport json\n\n# from google.appengine.api import memcache\nfrom google.appengine.ext import ndb\nfrom google.appengine.ext import testbed\n\nfrom models import Person, Entity, Name\nfrom utilities.firebase import create_custom_token\nimport utilities.keys as keys\n\n\nclass ModelsTestCase(unittest.TestCase):\n\n def setUp(self):\n # First, create an instance of the Testbed class.\n self.testbed = testbed.Testbed()\n # Then activate the testbed, which prepares the service stubs for use.\n self.testbed.activate()\n\n self.policy = testbed.datastore_stub_util.PseudoRandomHRConsistencyPolicy(probability=0)\n # Next, declare which service stubs you want to use.\n self.testbed.init_datastore_v3_stub(consistency_policy=self.policy)\n self.testbed.init_memcache_stub()\n # Clear ndb's in-context cache between tests.\n # This prevents data from leaking between tests.\n # Alternatively, you could disable caching by\n # using ndb.get_context().set_cache_policy(False)\n ndb.get_context().clear_cache()\n\n\n def tearDown(self):\n self.testbed.deactivate()\n\n\n def testAddPerson(self):\n\n person = Person.create(\"paul\", \"<EMAIL>\", \"123456789\")\n self.assertIsNotNone(person)\n key = person.key\n got = key.get()\n self.assertIsNotNone(got)\n\n\n def testAddPersonWithSameName(self):\n\n Person.create(\"paul\", \"<EMAIL>\", \"123456789\")\n self.assertRaises(Exception, Person.create, \"paul\", \"<EMAIL>\", \"1234567890\")\n\n\n def testChangeName(self):\n\n person = Person.create(\"paul\", \"<EMAIL>\", \"123456789\")\n # self.assertEqual(1, len(Name.query().fetch(2)))\n self.assertEqual(person.name, \"paul\")\n person.name = \"sol\"\n self.assertEqual(person.name, \"sol\")\n\n name_key = ndb.Key(Name, \"sol\")\n existing_name = name_key.get()\n self.assertIsNotNone(existing_name)\n\n\n def testChangeNameFail(self):\n\n person = Person.create(\"paul\", \"<EMAIL>\", \"123456789\")\n person2 = Person.create(\"sol\", \"<EMAIL>\", \"987654321\")\n # self.assertEqual(1, len(Name.query().fetch(2)))\n self.assertEqual(person.name, \"paul\")\n self.assertEqual(person2.name, \"sol\")\n\n self.assertRaises(Exception, person.set_name, \"sol\")\n\n\nclass ModelsTestCaseWithoutConsistancy(unittest.TestCase):\n\n def setUp(self):\n # First, create an instance of the Testbed class.\n self.testbed = testbed.Testbed()\n # Then activate the testbed, which prepares the service stubs for use.\n self.testbed.activate()\n\n # self.policy = testbed.datastore_stub_util.PseudoRandomHRConsistencyPolicy(probability=0)\n # Next, declare which service stubs you want to use.\n self.testbed.init_datastore_v3_stub()\n self.testbed.init_memcache_stub()\n self.testbed.init_app_identity_stub()\n # Clear ndb's in-context cache between tests.\n # This prevents data from leaking between tests.\n # Alternatively, you could disable caching by\n # using ndb.get_context().set_cache_policy(False)\n ndb.get_context().clear_cache()\n\n\n def tearDown(self):\n self.testbed.deactivate()\n\n\n def testFindByEmail(self):\n person = Person.create(\"paul\", \"<EMAIL>\", \"123456789\")\n found = Person.with_email(\"<EMAIL>\")\n self.assertEqual(found.name, \"paul\")\n\n\n def testFindByName(self):\n person = Person.create(\"paul\", \"<EMAIL>\", \"123456789\")\n found = Person.with_name(\"paul\")\n self.assertEqual(found.name, \"paul\")\n\n\n def testFindByGoogleId(self):\n person = Person.create(\"paul\", \"<EMAIL>\", \"123456789\")\n found = Person.with_google_id(\"123456789\")\n self.assertEqual(found.name, \"paul\")\n\n\n def testFindByGoogleIdNotExisting(self):\n person = Person.create(\"paul\", \"<EMAIL>\", \"123456789\")\n found = Person.with_google_id(\"asdfghjk\")\n self.assertTrue(found is None)\n\n\n def testAddNewEntity(self):\n person = Person.create(\"paul\", \"<EMAIL>\", \"123456789\")\n entity = person.add_new_entity(name=\"elephant\")\n\n\n def testpersonEntities(self):\n person = Person.create(\"paul\", \"<EMAIL>\", \"123456789\")\n entity = person.add_new_entity(name=\"elephant\")\n entities = person.entities\n self.assertTrue(len(entities) == 1)\n\n\n def testAddConfigFile(self):\n person = Person.create(\"paul\", \"<EMAIL>\", \"123456789\")\n entity = person.add_new_entity(name=\"elephant\")\n\n config_file = person.add_config_file(\"test\", \"a whole bunch of text\", \"a/path/file.txt\")\n\n entity.add_config_file(config_file)\n self.assertTrue(len(entity.config) == 1)\n self.assertEqual(entity.config[0].get().text, \"a whole bunch of text\")\n entity.add_config_file(config_file)\n self.assertTrue(len(entity.config) == 1)\n\n config_file_2 = person.add_config_file(\"test2\", \"another a whole bunch of text\", \"a/path/file.txt\")\n entity.add_config_file(config_file_2)\n self.assertTrue(len(entity.config) == 2)\n\n configs = person.configs\n self.assertTrue(len(configs) == 2)\n\n\n def testRemoveConfigFile(self):\n person = Person.create(\"paul\", \"<EMAIL>\", \"123456789\")\n entity = person.add_new_entity(name=\"elephant\")\n\n config_file = person.add_config_file(\"test\", \"a whole bunch of text\", \"a/path/file.txt\")\n\n entity.add_config_file(config_file)\n self.assertTrue(len(entity.config) == 1)\n self.assertEqual(entity.config[0].get().text, \"a whole bunch of text\")\n entity.add_config_file(config_file)\n self.assertTrue(len(entity.config) == 1)\n\n config_file_2 = person.add_config_file(\"test2\", \"another a whole bunch of text\", \"a/path/file.txt\")\n entity.add_config_file(config_file_2)\n self.assertTrue(len(entity.config) == 2)\n entity.remove_config_file(config_file_2)\n self.assertTrue(len(entity.config) == 1)\n\n\n def test_signing(self):\n\n person = Person.create(\"paul\", \"<EMAIL>\", \"123456789\")\n entity = person.add_new_entity(name=\"elephant\")\n url = \"https://augment00.org/entity/12345678\"\n salt = \"asdfghjkl\"\n sig = keys.sign_url(url, entity.private_key, salt)\n mine = keys.verify_sig(url, sig, entity.public_key, salt)\n self.assertTrue(mine)\n\n\n def test_signing_fails(self):\n\n person = Person.create(\"paul\", \"<EMAIL>\", \"123456789\")\n entity = person.add_new_entity(name=\"elephant\")\n url = \"https://augment00.org/entity/12345678\"\n salt = \"asdfghjkl\"\n sig = keys.sign_url(url, entity.private_key, salt)\n mine = keys.verify_sig(url, sig, entity.public_key, \"123456\")\n self.assertFalse(mine)\n\n\n def test_entity_json(self):\n\n person = Person.create(\"paul\", \"<EMAIL>\", \"123456789\")\n entity = person.add_new_entity(name=\"elephant\")\n config_file = person.add_config_file(\"test\", \"A whole bunch of text\\nwith a line return\", \"a/path/file.txt\")\n entity.add_config_file(config_file)\n as_json = entity.as_json()\n\n as_json_string = json.dumps(entity.as_json(), indent=4)\n\n loaded = json.loads(as_json_string)\n\n self.assertEqual(loaded[\"config\"][0][\"text\"], \"A whole bunch of text\\nwith a line return\")\n\n\n\n def test_config_templating(self):\n\n person = Person.create(\"paul\", \"<EMAIL>\", \"123456789\")\n entity = person.add_new_entity(name=\"elephant\")\n config_file = person.add_config_file(\"test\", \"A whole bunch of text\\nwith uuid {{ uuid }}\", \"a/path/file.txt\")\n entity.add_config_file(config_file)\n as_json = entity.as_json()\n\n as_json_string = json.dumps(entity.as_json(), indent=4)\n\n loaded = json.loads(as_json_string)\n\n self.assertEqual(loaded[\"config\"][0][\"text\"], \"A whole bunch of text\\nwith uuid %s\" % entity.key.id())\n\n\n def test_config_firebase(self):\n\n person = Person.create(\"paul\", \"<EMAIL>\", \"123456789\")\n entity = person.add_new_entity(name=\"elephant\")\n config_file = person.add_config_file(\"test\", '{\"firebase\": \"{{ firebase }}\"}', \"a/path/file.txt\")\n entity.add_config_file(config_file)\n as_json = entity.as_json()\n\n as_json_string = json.dumps(entity.as_json(), indent=4)\n\n loaded = json.loads(as_json_string)\n\n as_json = json.loads(loaded[\"config\"][0][\"text\"])\n\n\n def test_token(self):\n entity_uuid = \"<KEY>\"\n token = create_custom_token(entity_uuid)\n",
"id": "1291403",
"language": "Python",
"matching_score": 4.556753158569336,
"max_stars_count": 1,
"path": "tests/test_models.py"
},
{
"content": "import unittest\nimport json\n\n# from google.appengine.api import memcache\nfrom google.appengine.ext import ndb\nfrom google.appengine.ext import testbed\n\nfrom models import Person, Entity, Name\n\nimport api\n\n\nclass ApiTestCase(unittest.TestCase):\n\n def setUp(self):\n self.testbed = testbed.Testbed()\n self.testbed.activate()\n self.testbed.init_datastore_v3_stub()\n self.testbed.init_memcache_stub()\n self.app = api.app.test_client()\n ndb.get_context().clear_cache()\n\n\n def tearDown(self):\n self.testbed.deactivate()\n\n\n def testAddEntity(self):\n person = Person.create(\"paul\", \"<EMAIL>\", \"123456789\")\n found = Person.with_email(\"<EMAIL>\")\n self.assertEqual(found.name, \"paul\")\n\n self.assertTrue(person.api_key is not None)\n\n data = {\n \"user_id\": person.get_google_id(),\n \"api_key\": person.api_key,\n \"name\": \"test\",\n \"description\": \"fishcakes\"\n }\n\n url = \"/api/new-entity\"\n\n rsp = self.app.post(url, data=json.dumps(data))\n self.assertTrue(rsp.status_code == 201)\n\n entity_uuid = rsp.data\n entity = Entity.get_by_id(entity_uuid)\n self.assertTrue(entity is not None)\n\n return person, entity\n\n\n def testAddValue(self):\n person, entity = self.testAddEntity()\n\n entity_uuid = entity.key.id()\n\n\n data = {\n \"user_id\": person.get_google_id(),\n \"api_key\": person.api_key,\n \"name\": \"test\",\n \"value\": \"fishfinger\"\n }\n\n url = \"/api/entity/%s/add-value\" % entity_uuid\n\n rsp = self.app.post(url, data=json.dumps(data))\n print rsp.status_code\n self.assertTrue(rsp.status_code == 200)\n\n entity = Entity.get_by_id(entity_uuid)\n\n self.assertEqual(entity.template_values[\"test\"], \"fishfinger\")\n\n\n def testSendCommand(self):\n\n person, entity = self.testAddEntity()\n\n entity_uuid = entity.key.id()\n\n\n data = {\n \"user_id\": person.get_google_id(),\n \"api_key\": person.api_key,\n \"rpc\":{\n \"method\": \"test\",\n \"params\": [\"fishfinger\"]\n }\n }\n\n url = \"/api/entity/%s/send-command\" % entity_uuid\n\n rsp = self.app.post(url, data=json.dumps(data))\n print rsp.status_code\n self.assertTrue(rsp.status_code == 200)\n\n\n",
"id": "2648116",
"language": "Python",
"matching_score": 1.125720500946045,
"max_stars_count": 1,
"path": "tests/test_api.py"
},
{
"content": "import json\n\ntry:\n from requests import *\nexcept ImportError, e:\n import urllib\n from google.appengine.api import urlfetch\n\n\n class ResponseWrapper(object):\n\n def __init__(self, response):\n self.response = response\n\n @property\n def status_code(self):\n return int(self.response.status_code)\n\n @property\n def headers(self):\n return self.response.headers\n\n @property\n def content(self):\n return self.response.content\n\n def json(self):\n return json.loads(self.response.content)\n\n\n def get(url, **kwargs):\n return request(\"GET\", url, **kwargs)\n\n\n def post(url, **kwargs):\n return request(\"POST\", url, **kwargs)\n\n\n def request(method, url, **kwargs):\n\n if \"params\" in kwargs.keys():\n querystring = urllib.urlencode(kwargs[\"params\"])\n url = \"%s?%s\" % (url, querystring)\n\n if \"data\" in kwargs.keys():\n data = kwargs[\"data\"]\n if type(data) == type({}):\n payload = urllib.urlencode(data)\n elif type(data) == type(\"\"):\n payload = data\n elif hasattr(data, \"read\"):\n payload = data.read()\n else:\n payload = None\n else:\n payload = None\n\n if \"json\" in kwargs.keys():\n payload = json.dumps(kwargs[\"json\"])\n\n if \"headers\" in kwargs.keys():\n headers = kwargs[\"headers\"]\n else:\n headers = {}\n\n if \"allow_redirects\" in kwargs.keys():\n follow_redirects = kwargs[\"allow_redirects\"]\n else:\n follow_redirects = True\n\n if \"timeout\" in kwargs.keys():\n deadline = kwargs[\"timeout\"]\n else:\n deadline = 5\n\n if \"verify\" in kwargs.keys():\n validate_certificate = kwargs[\"validate_certificate\"]\n else:\n validate_certificate = False\n\n print \"payload\", payload\n print \"headers\", headers\n print url\n\n resp = urlfetch.fetch(url,\n payload=payload,\n method=method,\n headers=headers,\n allow_truncated=False,\n follow_redirects=follow_redirects,\n deadline=deadline,\n validate_certificate=validate_certificate)\n\n return ResponseWrapper(resp)\n\n\n",
"id": "12380471",
"language": "Python",
"matching_score": 0.6433623433113098,
"max_stars_count": 1,
"path": "src/utilities/requests_shim.py"
},
{
"content": "from functools import wraps\n\nfrom flask import Flask, render_template, redirect, request\nfrom google.appengine.api import users\nfrom models import Person\n\n\ndef render_login_template(template, **kwargs):\n\n user = users.get_current_user()\n if user:\n login_url = users.create_logout_url(request.url)\n url_linktext = 'logout'\n else:\n login_url = users.create_login_url(request.url)\n url_linktext = 'login'\n\n return render_template(template, login_url=login_url, url_linktext=url_linktext, **kwargs)\n\n\ndef with_person(func):\n @wraps(func)\n def decorated_view(*args, **kwargs):\n google_user = users.get_current_user()\n if google_user is not None:\n google_id = google_user.user_id()\n person = Person.with_google_id(google_id)\n if person is None:\n return redirect(\"/\")\n else:\n return func(*args, person=person, **kwargs)\n else:\n raise Exception(\"no google user in new_person\")\n return decorated_view\n",
"id": "11314913",
"language": "Python",
"matching_score": 3.351944923400879,
"max_stars_count": 1,
"path": "src/shared.py"
},
{
"content": "import logging\n\nfrom google.appengine.api import users\n\nfrom flask import Flask, redirect, flash\n\nfrom constants import *\n\napp = Flask(__name__)\n\napp.secret_key = FLASK_SECRET_KEY\n\nfrom shared import render_login_template, with_person\nfrom models import Person\n\n\n@app.route('/', methods=[\"GET\"])\ndef home():\n google_user = users.get_current_user()\n if google_user is not None:\n google_id = google_user.user_id()\n person = Person.with_google_id(google_id)\n if person is not None:\n return render_login_template(\"account.html\", person=person)\n else:\n flash(\"choose a name and email to use with your augment00 account\", \"info\")\n return redirect(\"/person/new\")\n else:\n return render_login_template(\"intro.html\")\n\n\n@app.route('/about', methods=[\"GET\"])\ndef about():\n google_user = users.get_current_user()\n if google_user is not None:\n google_id = google_user.user_id()\n person = Person.with_google_id(google_id)\n else:\n person = None\n\n return render_login_template(\"about.html\", person=person)\n\n\n@app.errorhandler(500)\ndef server_error(e):\n # Log the error and stacktrace.\n logging.exception('An error occurred during a request.')\n return 'An internal error occurred.', 500",
"id": "4944071",
"language": "Python",
"matching_score": 3.263780117034912,
"max_stars_count": 1,
"path": "src/main.py"
},
{
"content": "import logging\n\nfrom flask import Flask\n\napp = Flask(__name__)\n\n\n@app.route('/admin')\ndef admin_hello():\n return 'augment00 admin'\n\n\n@app.errorhandler(500)\ndef server_error(e):\n # Log the error and stacktrace.\n logging.exception('An error occurred during a request.')\n return 'An internal error occurred.', 500",
"id": "11951012",
"language": "Python",
"matching_score": 0.7263830900192261,
"max_stars_count": 1,
"path": "src/admin.py"
}
] | 2.176196 |
kazweda | [
{
"content": "# https://coderslegacy.com/python/tkinter-canvas/\n# https://twitter.com/aemkei/status/1378106734871461890\nfrom tkinter import *\n\ndef create_box(canvas, x, y):\n a = 4\n coord = x * a, y * a, x * a + a, y * a + a\n canvas.create_rectangle(\n coord,\n fill=\"grey\"\n )\n\ndef draw_xor(canvas):\n for j in range(256):\n for i in range(256):\n if (i ^ j) % 9:\n create_box(canvas, i, j)\n\nroot = Tk()\n\nframe = Frame(root, width=512, height=512)\nframe.pack()\ncanvas = Canvas(frame, width=512, height=512)\ncanvas.pack()\ndraw_xor(canvas)\n\nroot.mainloop()\n",
"id": "11615930",
"language": "Python",
"matching_score": 0,
"max_stars_count": 1,
"path": "drawxor.py"
},
{
"content": "import json\n\nwith open('project.json') as f:\n df = json.load(f)\n\nr = {}\nfor skey in df['targets'][0]['blocks']:\n k = df['targets'][0]['blocks'][skey]['opcode']\n if k in r:\n r[k] += 1\n else:\n r[k] = 1\nprint(sorted(r.items(), key=lambda x:x[0]))\n",
"id": "2874135",
"language": "Python",
"matching_score": 0,
"max_stars_count": 0,
"path": "filecheck.py"
},
{
"content": "def fibo(x):\n if x == 0:\n return 0\n if x <= 2:\n return 1\n return fibo(x-1) + fibo(x-2)",
"id": "3268546",
"language": "Python",
"matching_score": 1,
"max_stars_count": 0,
"path": "fibonacci.py"
},
{
"content": "import unittest\nimport fibonacci\n\nclass TestFibonacci(unittest.TestCase):\n\n cases = [[0,0],[1,1],[2,1],[3,2]]\n\n def test_fibo(self):\n for i in range(len(self.cases)):\n self.assertAlmostEqual(fibonacci.fibo(self.cases[i][0]), self.cases[i][1])\n\nif __name__ == '__main__':\n unittest.main()\n",
"id": "2795105",
"language": "Python",
"matching_score": 1,
"max_stars_count": 0,
"path": "test-fibonacci.py"
}
] | 0.5 |
muziyongshixin | [
{
"content": "import argparse\nimport logging\nimport os\nimport pickle\nimport socket\nimport time\nfrom datetime import datetime\nfrom pprint import pformat\n\nimport numpy\nimport torch\nfrom torch.utils.tensorboard import SummaryWriter\n\nimport enhanced_data as data\nfrom binary_SAEM_model import BinarySAEM\nfrom screen_model import BinaryBaseModel\nfrom evaluation import validate_binary, validate\nfrom SAEM_model import SAEM\nfrom reranking_model import ReRankSAEM\n\nlogger = logging.getLogger(__name__)\nfrom utils import init_logging, get_hamming_dist\n\nglobal tb_logger\n\n\ndef main():\n global tb_logger\n # Hyper Parameters\n parser = argparse.ArgumentParser()\n parser.add_argument('--data_path', default='data/scan_data/',\n help='path to datasets')\n\n ####################################### coco data #############################################\n # parser.add_argument('--image_root_dir', default='data/mscoco/',\n # help='path to coco_root_dir')\n # parser.add_argument('--concept_file_path', default='data/coco_concept/coco_imgid_to_rela+obj+categ_vec.pickle', #'/S4/MI/liyz/data/coco_concept/new_imgid_2_concept_idxs.pkl',\n # help='path concept label file')\n # parser.add_argument('--concept_num', default=642 + 1000 + 91, type=int, help='caption的 concept标签类别数')\n # parser.add_argument('--data_name', default='coco_precomp',\n # help='{coco,f30k}_precomp')\n ####################################### above coco data #############################################\n\n\n ####################################### flickr data #############################################\n parser.add_argument('--image_root_dir', default='data/f30k/flickr30/images',\n help='path to coco_root_dir')\n parser.add_argument('--concept_file_path', default='data/f30k_concept/f30k_imgid_to_obj_rela_concept_vec.pickle',\n help='path concept label fi le')\n parser.add_argument('--concept_num', default=2000, type=int,\n help='caption的 concept标签类别数')\n parser.add_argument('--data_name', default='f30k_precomp',\n help='{coco,f30k}_precomp')\n ####################################### above flickr data #############################################\n\n parser.add_argument('--need_raw_image', default=0, type=int,\n help='是否使用原始图片作为输入,1表示需要,0表示不需要')\n parser.add_argument('--need_roi_feature', default=1, type=int,\n help='是否需要使用faster rcnn提取的roi feature作为输入')\n parser.add_argument('--need_adversary_data', default=0, type=int,\n help='是否使用adversary的文本数据进行训练')\n parser.add_argument('--need_rephrase_data', default=0, type=int,\n help='是否使用rephrase的文本数据进行训练')\n parser.add_argument('--need_concept_label', default=0, type=int,\n help='是否使用文本的concept label进行训练')\n\n parser.add_argument('--part_train_data', default='', type=str, help='和hash方法比较的时候只使用1w训练集')\n\n parser.add_argument('--adversary_step', default=-1, type=int,\n help='After how many epochs to start adversary training')\n parser.add_argument('--adversary_num', default=10, type=int,\n help='After how many epochs to start adversary training')\n parser.add_argument('--adversary_type', default='noun', type=str,\n help='the adversary sample type {noun,num,rela,mixed}')\n parser.add_argument('--adv_margin', default=0.5, type=float,\n help='the adversary loss margin')\n\n parser.add_argument('--image_size', default=256, type=int,\n help='the raw image size to feed into the image network')\n parser.add_argument('--model_name', default='rerank_model',\n help='{coco,f30k}_precomp')\n parser.add_argument('--margin', default=0.2, type=float,\n help='Rank loss margin.')\n parser.add_argument('--num_epochs', default=100, type=int,\n help='Number of training epochs.')\n parser.add_argument('--batch_size', default=64, type=int,\n help='Size of a training mini-batch.')\n parser.add_argument('--word_dim', default=300, type=int,\n help='Dimensionality of the word embedding.')\n parser.add_argument('--embed_size', default=1024, type=int,\n help='Dimensionality of the joint embedding.')\n parser.add_argument('--grad_clip', default=2., type=float,\n help='Gradient clipping threshold.')\n parser.add_argument('--learning_rate', default=.0001, type=float,\n help='Initial learning rate.')\n parser.add_argument('--lr_update', default=10, type=int,\n help='Number of epochs to update the learning rate.')\n parser.add_argument('--workers', default=4, type=int,\n help='Number of data loader workers.')\n parser.add_argument('--data_eager', default=False,\n help='Number of data loader workers.')\n parser.add_argument('--log_step', default=10, type=int,\n help='Number of steps to print and record the log.')\n parser.add_argument('--val_step', default=1000, type=int,\n help='Number of steps to run validation.')\n parser.add_argument('--logger_name', default='./runs/runX/log',\n help='Path to save Tensorboard log.')\n\n parser.add_argument('--resume', default='', type=str, metavar='PATH',\n help='path to latest checkpoint (default: none)')\n parser.add_argument('--max_violation', default=True, action='store_true',\n help='Use max instead of sum in the rank loss.')\n parser.add_argument('--img_dim', default=2048, type=int,\n help='Dimensionality of the image embedding.')\n parser.add_argument('--final_dims', default=256, type=int,\n help='dimension of final codes.')\n parser.add_argument('--max_words', default=32, type=int,\n help='maximum number of words in a sentence.')\n parser.add_argument(\"--bert_path\",\n default='data/bert_ckpt/uncased_L-12_H-768_A-12/',\n type=str,\n help=\"The BERT model path.\")\n parser.add_argument(\"--txt_stru\", default='cnn',\n help=\"Whether to use pooling or cnn or rnn\")\n parser.add_argument(\"--trans_cfg\", default='t_cfg.json',\n help=\"config file for image transformer\")\n parser.add_argument(\"--remark\", default='',\n help=\"description about the experiments\")\n parser.add_argument(\"--binary\", default='True',\n help=\"generate binary hash code?\")\n\n parser.add_argument('--test_split', default='test', help='test data split name [test/testall]')\n opt = parser.parse_args()\n logger.info(pformat(vars(opt)))\n\n logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO)\n TIMESTAMP = \"{0:%Y-%m-%dT%H-%M-%S/}\".format(datetime.now())\n opt.logger_name = opt.logger_name + TIMESTAMP\n\n # create experiments dir\n exp_root_dir = 'runs/testing/'\n cur_time = time.localtime(int(time.time()))\n time_info = time.strftime('%Y_%m_%d,%H_%M_%S', cur_time)\n host_name = socket.gethostname()\n exp_name = '{data_name}/{time}_{host}_{remark}'.format(data_name=opt.data_name, time=time_info, host=host_name,\n remark=opt.remark)\n exp_dir = os.path.join(exp_root_dir, exp_name)\n if not os.path.exists(exp_dir):\n os.makedirs(exp_dir)\n opt.exp_dir = exp_dir\n\n init_logging(opt.exp_dir)\n\n tb_dir = os.path.join(exp_dir, 'tensor_board')\n os.makedirs(tb_dir)\n tb_logger = SummaryWriter(log_dir=tb_dir)\n\n opt.vocab_file = opt.bert_path + 'vocab.txt'\n opt.bert_config_file = opt.bert_path + 'bert_config.json'\n opt.init_checkpoint = opt.bert_path + 'pytorch_model.bin'\n opt.do_lower_case = True\n\n # Load data loaders\n test_loader = data.get_test_loader(opt.test_split, opt.data_name, opt.batch_size, opt.workers, opt)\n\n # Construct the modea\n if opt.model_name == 'screen_model':\n model = BinaryBaseModel(opt)\n elif opt.model_name == 'rerank_model':\n model = ReRankSAEM(opt)\n elif opt.model_name == 'binary_saem':\n model = BinarySAEM(opt)\n else:\n model = SAEM(opt)\n\n if os.path.isfile(opt.resume):\n logger.info(\"=> loading checkpoint '{}'\".format(opt.resume))\n checkpoint = torch.load(opt.resume)\n start_epoch = checkpoint['epoch']\n best_rsum = checkpoint['best_rsum']\n model.load_state_dict(checkpoint['model'])\n # Eiters is used to show logs as the continuation of another\n # training\n model.Eiters = checkpoint['Eiters']\n logger.info(\"=> loaded checkpoint '{}' (epoch {}, best_rsum {})\".format(opt.resume, start_epoch, best_rsum))\n if opt.binary == 'True':\n logger.info('validation in the binary mode....')\n validate_binary(opt, test_loader, model, save_sm_matrix_dir=exp_dir,save_hash_code=False)\n else:\n validate(opt, test_loader, model, save_sm_matrix_dir=exp_dir)\n else:\n logger.error(\"=> no checkpoint found at '{}'\".format(opt.resume))\n\n\nif __name__ == '__main__':\n main()\n",
"id": "9654648",
"language": "Python",
"matching_score": 5.567138195037842,
"max_stars_count": 2,
"path": "test.py"
},
{
"content": "# -*- coding: utf-8 -*-\nimport argparse\nimport logging\nimport logging.config\nimport os\nimport shutil\nimport socket\nimport time\nfrom pprint import pformat\n\nimport torch\nfrom torch.utils.tensorboard import SummaryWriter\n\nimport enhanced_data as data\nfrom binary_SAEM_model import BinarySAEM\nfrom evaluation import AverageMeter, LogCollector\nfrom evaluation import validate, validate_binary\nfrom SAEM_model import SAEM\nfrom screen_model import BinaryBaseModel\nfrom reranking_model import ReRankSAEM\nfrom utils import init_logging\n\nlogger = logging.getLogger(__name__)\nglobal tb_logger\n\ndef main():\n global tb_logger\n # Hyper Parameters\n parser = argparse.ArgumentParser()\n parser.add_argument('--data_path', default='data/scan_data/',\n help='path to datasets')\n\n ####################################### coco data #############################################\n parser.add_argument('--image_root_dir', default='data/mscoco/',\n help='path to coco_root_dir')\n parser.add_argument('--concept_file_path', default='data/coco_concept/coco_imgid_to_rela+obj+categ_vec.pickle', #'/S4/MI/liyz/data/coco_concept/new_imgid_2_concept_idxs.pkl',\n help='path concept label file')\n parser.add_argument('--concept_num', default=642 + 1000 + 91, type=int, help='caption的 concept标签类别数')\n parser.add_argument('--data_name', default='coco_precomp',\n help='{coco,f30k}_precomp')\n\n ####################################### flickr data #############################################\n # parser.add_argument('--image_root_dir', default='data/f30k/flickr30/images',\n # help='path to coco_root_dir')\n # parser.add_argument('--concept_file_path', default='data/f30k_concept/f30k_imgid_to_obj_rela_concept_vec.pickle',\n # help='path concept label file')\n # parser.add_argument('--concept_num', default=2000, type=int,\n # help='caption的 concept标签类别数')\n # parser.add_argument('--data_name', default='f30k_precomp',\n # help='{coco,f30k}_precomp')\n\n\n parser.add_argument('--need_raw_image', default=0,type=int,\n help='是否使用原始图片作为输入,1表示需要,0表示不需要')\n parser.add_argument('--need_roi_feature', default=1,type=int,\n help='是否需要使用faster rcnn提取的roi feature作为输入')\n parser.add_argument('--need_adversary_data', default=0,type=int,\n help='是否使用adversary的文本数据进行训练')\n parser.add_argument('--need_rephrase_data', default=0,type=int,\n help='是否使用rephrase的文本数据进行训练')\n parser.add_argument('--need_concept_label', default=1,type=int,\n help='是否使用文本的concept label进行训练')\n\n parser.add_argument('--part_train_data',default='', type=str,help='和hash方法比较的时候只使用1w训练集')\n\n parser.add_argument('--adversary_step', default=-1, type=int,\n help='After how many epochs to start adversary training')\n parser.add_argument('--adversary_num', default=10, type=int,\n help='use how many adversary sentences in training')\n parser.add_argument('--adversary_type', default='noun', type=str,\n help='the adversary sample type {noun,num,rela,mixed}')\n parser.add_argument('--adv_margin', default=0.5, type=float,\n help='the adversary loss margin')\n\n parser.add_argument('--image_size', default=256, type=int,\n help='the raw image size to feed into the image network')\n parser.add_argument('--model_name', default='rerank_model',\n help='{rerank,screen,binary_saem}_model')\n parser.add_argument('--margin', default=0.2, type=float,\n help='Rank loss margin.')\n parser.add_argument('--num_epochs', default=100, type=int,\n help='Number of training epochs.')\n parser.add_argument('--batch_size', default=128, type=int,\n help='Size of a training mini-batch.')\n parser.add_argument('--word_dim', default=300, type=int,\n help='Dimensionality of the word embedding.')\n parser.add_argument('--embed_size', default=1024, type=int,\n help='Dimensionality of the joint embedding.')\n parser.add_argument('--grad_clip', default=2., type=float,\n help='Gradient clipping threshold.')\n parser.add_argument('--learning_rate', default=.0001, type=float,\n help='Initial learning rate.')\n parser.add_argument('--lr_update', default=10, type=int,\n help='Number of epochs to update the learning rate.')\n parser.add_argument('--workers', default=4, type=int,\n help='Number of data loader workers.')\n parser.add_argument('--data_eager', default=False,\n help='Number of data loader workers.')\n parser.add_argument('--log_step', default=10, type=int,\n help='Number of steps to print and record the log.')\n parser.add_argument('--val_step', default=1000, type=int,\n help='Number of steps to run validation.')\n parser.add_argument('--logger_name', default='./runs/runX/log',\n help='Path to save Tensorboard log.')\n\n parser.add_argument('--resume', default='', type=str, metavar='PATH',\n help='path to latest checkpoint (default: none)')\n parser.add_argument('--max_violation', default=True, action='store_true',\n help='Use max instead of sum in the rank loss.')\n parser.add_argument('--img_dim', default=2048, type=int,\n help='Dimensionality of the image embedding.')\n parser.add_argument('--final_dims', default=256, type=int,\n help='dimension of final codes.')\n parser.add_argument('--max_words', default=32, type=int,\n help='maximum number of words in a sentence.')\n parser.add_argument(\"--bert_path\",\n default='data/bert_ckpt/uncased_L-12_H-768_A-12/',\n type=str,\n help=\"The BERT model path.\")\n parser.add_argument(\"--txt_stru\", default='cnn',\n help=\"Whether to use pooling or cnn or rnn\")\n parser.add_argument(\"--trans_cfg\", default='t_cfg.json',\n help=\"config file for image transformer\")\n parser.add_argument(\"--remark\", default='',\n help=\"description about the experiments\")\n parser.add_argument(\"--binary\", default='True',\n help=\"generate binary hash code?\")\n\n\n opt = parser.parse_args()\n\n\n\n # create experiments dir\n exp_root_dir = 'runs/'\n cur_time = time.localtime(int(time.time()))\n time_info = time.strftime('%Y_%m_%d,%H_%M_%S', cur_time)\n host_name = socket.gethostname()\n exp_name = '{data_name}/{time}_{host}_{remark}'.format(data_name=opt.data_name, time=time_info, host=host_name,\n remark=opt.remark)\n exp_dir = os.path.join(exp_root_dir, exp_name)\n if not os.path.exists(exp_dir):\n os.makedirs(exp_dir)\n opt.exp_dir = exp_dir\n\n init_logging(opt.exp_dir)\n\n # pprint(vars(opt))\n logger.info(pformat(vars(opt)))\n\n # logging.basicConfig(format='%(asctime)s %(message)s', level=logging.INFO,filename=os.path.join(exp_dir,'info.log'),filemode='a')\n # TIMESTAMP = \"{0:%Y-%m-%dT%H-%M-%S/}\".format(datetime.now())\n # opt.logger_name = opt.logger_name + TIMESTAMP\n\n # tb_logger.configure(opt.logger_name, flush_secs=5)\n tb_dir = os.path.join(exp_dir, 'tensor_board')\n os.makedirs(tb_dir)\n tb_logger = SummaryWriter(log_dir=tb_dir)\n\n opt.vocab_file = os.path.join(opt.bert_path, 'vocab.txt')\n opt.bert_config_file = os.path.join(opt.bert_path, 'bert_config.json')\n opt.init_checkpoint = os.path.join(opt.bert_path, 'pytorch_model.bin')\n opt.do_lower_case = True\n\n # Load data loaders\n train_loader, val_loader = data.get_loaders(opt.data_name, opt.batch_size, opt.workers, opt)\n\n # Construct the model\n if opt.model_name == 'rerank_model':\n model = ReRankSAEM(opt)\n logger.info('Model name is ReRankSAEM model')\n elif opt.model_name == 'screen_model':\n model = BinaryBaseModel(opt)\n logger.info('Model name is Binary Base model')\n elif opt.model_name == 'binary_saem_model':\n logger.warning('Training SAEM binary models !!!!!!!!!!.....')\n model = BinarySAEM(opt)\n else:\n model = SAEM(opt)\n\n start_epoch = 0\n best_rsum = 0\n # optionally resume from a checkpoint\n if opt.resume:\n if os.path.isfile(opt.resume):\n logger.info(\"=> loading checkpoint '{}'\".format(opt.resume))\n checkpoint = torch.load(opt.resume)\n start_epoch = checkpoint['epoch']\n best_rsum = checkpoint['best_rsum']\n model.load_state_dict(checkpoint['model'])\n # Eiters is used to show logs as the continuation of another\n # training\n model.Eiters = checkpoint['Eiters']\n logger.info(\"=> loaded checkpoint '{}' (epoch {}, best_rsum {})\"\n .format(opt.resume, start_epoch, best_rsum))\n validate(opt, val_loader, model)\n else:\n logger.error(\"=> no checkpoint found at '{}'\".format(opt.resume))\n\n if torch.cuda.device_count() > 1:\n model.use_data_parallel()\n logger.info('=> using data parallel...')\n\n # Train the Model\n for epoch in range(start_epoch, opt.num_epochs):\n adjust_learning_rate(opt, model.optimizer, epoch)\n\n # train for one epoch\n train(opt, train_loader, model, epoch, val_loader)\n\n # evaluate on validation set\n rsum = validate_binary(opt, val_loader, model, tb_logger=tb_logger)\n # rsum = validate(opt, val_loader, model)\n\n # remember best R@ sum and save checkpoint\n is_best = rsum > best_rsum\n best_rsum = max(rsum, best_rsum)\n if is_best:\n save_checkpoint({\n 'epoch': epoch + 1,\n 'model': model.state_dict(),\n 'best_rsum': best_rsum,\n 'opt': opt,\n 'Eiters': model.Eiters,\n }, is_best, filename='checkpoint_{}.pth.tar'.format(epoch), prefix=os.path.join(exp_dir, 'checkpoints'))\n\n\ndef train(opt, train_loader, model, epoch, val_loader):\n logger.info('=================== start training epoch {} ================'.format(epoch))\n # average meters to record the training statistics\n batch_time = AverageMeter()\n data_time = AverageMeter()\n train_logger = LogCollector()\n # make sure train logger is used\n model.logger = train_logger\n\n end = time.time()\n for i, train_data in enumerate(train_loader):\n # switch to train mode\n model.train_start()\n\n # measure data loading time\n data_time.update(time.time() - end)\n\n # Update the model\n model.train_emb(epoch, train_data)\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n # Print log info\n if model.Eiters % opt.log_step == 0:\n logger.info('[{0}][{1}/{2}] {e_log}'.format(epoch, i, len(train_loader), e_log=str(model.logger)))\n\n\n # validate at every val_step\n if model.Eiters % opt.val_step == 0:\n validate_binary(opt, val_loader, model, tb_logger=tb_logger)\n # else:\n # rsum = validate(opt, val_loader, model)\n # validate(opt, val_loader, model)\n # validate_binary(opt, val_loader, model)\n # Record logs in tensorboard\n tb_logger.add_scalar('train/epoch', epoch, global_step=model.Eiters)\n tb_logger.add_scalar('train/batch_time', batch_time.avg, global_step=epoch)\n tb_logger.add_scalar('train/data_time', data_time.avg, global_step=epoch)\n model.logger.tb_log(tb_logger, prefix='train/', step=epoch)\n\n\n#\n# def validate(opt, val_loader, model):\n# # compute the encoding for all the validation images and captions\n# img_embs, cap_embs, cap_lens = encode_data(model, val_loader, opt, opt.log_step, logger.info)\n#\n# img_embs = numpy.array([img_embs[i] for i in range(0, len(img_embs), 5)])\n#\n# start = time.time()\n# sims = 1 - cdist(img_embs, cap_embs, metric='cosine')\n# end = time.time()\n# logger.info(\"calculate similarity time:{}\".format(end - start))\n#\n# # caption retrieval\n# (r1, r5, r10, medr, meanr) = i2t(img_embs, cap_embs, cap_lens, sims)\n# logger.info(\"Image to text: %.1f, %.1f, %.1f, %.1f, %.1f\" % (r1, r5, r10, medr, meanr))\n# # image retrieval\n# (r1i, r5i, r10i, medri, meanr) = t2i(img_embs, cap_embs, cap_lens, sims)\n# logger.info(\"Text to image: %.1f, %.1f, %.1f, %.1f, %.1f\" % (r1i, r5i, r10i, medri, meanr))\n# # sum of recalls to be used for early stopping\n# currscore = r1 + r5 + r10 + r1i + r5i + r10i\n#\n# # record metrics in tensorboard\n# tb_logger.add_scalar('val/r1_i2t', r1, global_step=model.Eiters)\n# tb_logger.add_scalar('val/r5_i2t', r5, global_step=model.Eiters)\n# tb_logger.add_scalar('val/r10_i2t', r10, global_step=model.Eiters)\n# tb_logger.add_scalar('val/medr_i2t', medr, global_step=model.Eiters)\n# tb_logger.add_scalar('val/meanr_i2t', meanr, global_step=model.Eiters)\n# tb_logger.add_scalar('val/r1i_t2i', r1i, global_step=model.Eiters)\n# tb_logger.add_scalar('val/r5i_t2i', r5i, global_step=model.Eiters)\n# tb_logger.add_scalar('val/r10i_t2i', r10i, global_step=model.Eiters)\n# tb_logger.add_scalar('val/medri_t2i', medri, global_step=model.Eiters)\n# tb_logger.add_scalar('val/meanr_t2i', meanr, global_step=model.Eiters)\n# tb_logger.add_scalar('val/rsum_t2i', currscore, global_step=model.Eiters)\n#\n# return currscore\n\n\n# def validate_binary(opt, val_loader, model):\n# # compute the encoding for all the validation images and captions\n# img_embs, cap_embs, cap_lens = encode_data(model, val_loader, opt, opt.log_step, logger.info)\n#\n# img_embs = img_embs[::5, ...]\n# img_embs = torch.sign(torch.from_numpy(img_embs)).long()\n# cap_embs = torch.sign(torch.from_numpy(cap_embs)).long()\n#\n# start = time.time()\n# sims = get_hamming_dist(img_embs, cap_embs) # hamming distance matrix 1000*5000\n# end = time.time()\n# logger.info(\"calculate similarity time:{}\".format(end - start))\n#\n# # caption retrieval\n# topk_r_i2tb = i2t_binary(sims, topk=(1, 5, 10, 50, 100, 200))\n# logger.info(\"Image to text: {}\".format(str(topk_r_i2tb)))\n# # image retrieval\n# topk_r_t2ib = t2i_binary(sims, topk=(1, 5, 10, 50, 100, 200))\n# logger.info(\"Text to image: {}\".format(str(topk_r_t2ib)))\n# # sum of recalls to be used for early stopping\n# currscore = [ri2t for k, ri2t in topk_r_i2tb.items()] + [rt2i for k, rt2i in topk_r_t2ib.items()]\n# currscore = sum(currscore)\n#\n# # record metrics in tensorboard\n# for k, recall in topk_r_i2tb.items():\n# tb_logger.add_scalar('val/i2t_{}'.format(k), recall, global_step=model.Eiters)\n# for k, recall in topk_r_t2ib.items():\n# tb_logger.add_scalar('val/t2i_{}'.format(k), recall, global_step=model.Eiters)\n#\n# return currscore\n\n\ndef save_checkpoint(state, is_best, filename='checkpoint.pth.tar', prefix=''):\n tries = 3\n error = None\n if not os.path.exists(prefix):\n os.makedirs(prefix)\n\n save_path = os.path.join(prefix, filename)\n best_path = os.path.join(prefix, 'model_best.pth.tar')\n # deal with unstable I/O. Usually not necessary.\n while tries:\n try:\n torch.save(state, save_path)\n logger.info('save checkpoint to {}'.format(save_path))\n if is_best:\n shutil.copyfile(save_path, best_path)\n logger.info('copy best checkpoint to {}'.format(best_path))\n except IOError as e:\n error = e\n tries -= 1\n else:\n break\n logging.error('model save {} failed, remaining {} trials'.format(filename, tries))\n if not tries:\n raise error\n\n\ndef adjust_learning_rate(opt, optimizer, epoch):\n \"\"\"Sets the learning rate to the initial LR\n decayed by 10 every 30 epochs\"\"\"\n\n lr = opt.learning_rate * (0.1 ** (epoch // opt.lr_update))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n\ndef accuracy(output, target, topk=(1,)):\n \"\"\"Computes the precision@k for the specified values of k\"\"\"\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res\n\n\nif __name__ == '__main__':\n main()\n",
"id": "6602588",
"language": "Python",
"matching_score": 5.25858736038208,
"max_stars_count": 2,
"path": "train_reranking.py"
},
{
"content": "from __future__ import print_function\nimport os\n\nimport sys\nfrom data import get_test_loader\nimport time\nimport numpy as np\nimport torch\nfrom SAEM_model import SAEM\nfrom collections import OrderedDict\nimport time\nimport logging\nfrom utils import get_hamming_dist, save_similarity_matrix\nfrom scipy.spatial.distance import cdist\nimport pickle\n\nlogger = logging.getLogger(__name__)\n\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=0):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / (.0001 + self.count)\n\n def __str__(self):\n \"\"\"String representation for logging\n \"\"\"\n # for values that should be recorded exactly e.g. iteration number\n if self.count == 0:\n return str(self.val)\n # for stats\n return '%.4f (%.4f)' % (self.val, self.avg)\n\n\nclass LogCollector(object):\n \"\"\"A collection of logging objects that can change from train to val\"\"\"\n\n def __init__(self):\n # to keep the order of logged variables deterministic\n self.meters = OrderedDict()\n\n def clear_all(self):\n del self.meters\n self.meters = OrderedDict()\n\n def update(self, k, v, n=0):\n # create a new meter if previously not recorded\n if k not in self.meters:\n self.meters[k] = AverageMeter()\n self.meters[k].update(v, n)\n\n def __str__(self):\n \"\"\"Concatenate the meters in one log line\n \"\"\"\n s = ''\n for i, (k, v) in enumerate(self.meters.items()):\n if i > 0:\n s += ' '\n s += k + ' ' + str(v)\n return s\n\n def tb_log(self, tb_logger, prefix='', step=None):\n \"\"\"Log using tensorboard\n \"\"\"\n for k, v in self.meters.items():\n tb_logger.add_scalar(prefix + k, v.avg, global_step=step)\n\n\ndef encode_data(model, data_loader, log_step=10, tb_logger=None):\n \"\"\"Encode all images and captions loadable by `data_loader`, 数据中不包含 adversary data和 rephrase data\n \"\"\"\n batch_time = AverageMeter()\n val_logger = LogCollector()\n\n # switch to evaluate mode\n model.val_start()\n\n end = time.time()\n\n # np array to keep all the embeddings\n img_embs = None\n cap_embs = None\n cap_lens = None\n\n # max_n_word = 0\n # for i, (images, input_ids, attention_mask, token_type_ids, lengths, ids) in enumerate(data_loader):\n # max_n_word = max(max_n_word, max(lengths))\n eval_start_time=time.time()\n for i, batch_data in enumerate(data_loader):\n # make sure val logger is used\n model.logger = val_logger\n\n # compute the embeddings\n ids, img_emb, cap_emb, re_phrase_emb, adv_emb, concept_data, cap_len = model.forward_emb(20, batch_data,\n volatile=True)\n # img_emb, cap_emb, cap_len, ids = model.forward_emb(20, batch_data, volatile=True)\n # print(img_emb)\n if img_embs is None:\n if img_emb.dim() == 3:\n img_embs = np.zeros((len(data_loader.dataset), img_emb.size(1), img_emb.size(2)))\n else:\n img_embs = np.zeros((len(data_loader.dataset), img_emb.size(1)))\n cap_embs = np.zeros((len(data_loader.dataset), cap_emb.size(1)))\n cap_lens = [0] * len(data_loader.dataset)\n # cache embeddings\n ids = batch_data[-1]\n img_embs[ids] = img_emb.data.cpu().numpy().copy()\n cap_embs[ids] = cap_emb.data.cpu().numpy().copy()\n # for j, nid in enumerate(ids):\n # cap_lens[nid] = cap_len[j]\n\n # measure accuracy and record loss\n model.forward_loss(10, img_emb, cap_emb, re_phrase_emb, adv_emb, cap_len, ids, concept_data=concept_data)\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % log_step == 0:\n logger.info('Test: [{0}/{1}]\\t {e_log}\\t Time {batch_time.val:.3f}({batch_time.avg:.3f})\\t'\n .format(i, len(data_loader), batch_time=batch_time, e_log=str(model.logger)))\n # del images, input_ids, attention_mask, token_type_ids\n if tb_logger is not None:\n model.logger.tb_log(tb_logger, prefix='val/', step=model.Eiters)\n logger.info('evaluation use time is {}'.format(time.time()-eval_start_time))\n return img_embs, cap_embs, cap_lens\n\n\ndef encode_data_with_adversary(model, data_loader, log_step=10, tb_logger=None):\n \"\"\"Encode all images and captions loadable by `data_loader`,数据中包含adversary data\n \"\"\"\n batch_time = AverageMeter()\n val_logger = LogCollector()\n\n # switch to evaluate mode\n model.val_start()\n\n end = time.time()\n\n # np array to keep all the embeddings\n img_embs = []\n cap_embs = []\n cap_lens = []\n\n for i, batch_data in enumerate(data_loader):\n # make sure val logger is used\n model.logger = val_logger\n\n # compute the embeddings\n ids, img_emb, cap_emb, re_phrase_emb, adv_emb, concept_data, cap_len = model.forward_emb(200, batch_data,\n volatile=True)\n # B, dim B,dim B,R,dim B,A,dim\n batch_size, code_dim = img_emb.shape\n\n # measure accuracy and record loss\n model.forward_loss(10, img_emb, cap_emb, re_phrase_emb, adv_emb, cap_len, ids, concept_data=concept_data)\n\n for j in range(batch_size):\n img_embs.append(img_emb[j].cpu().detach())\n tmp_cap = [cap_emb[j].cpu().detach(), re_phrase_emb[j].cpu().detach(), adv_emb[j].cpu().detach()]\n cap_embs.append(tmp_cap)\n\n del ids, img_emb, cap_emb, re_phrase_emb, adv_emb, cap_len\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % log_step == 0:\n logger.info('Test: [{0}/{1}]\\t{e_log}\\tTime {batch_time.val:.3f}({batch_time.avg:.3f})\\t'\n .format(i, len(data_loader), batch_time=batch_time, e_log=str(model.logger)))\n # del images, input_ids, attention_mask, token_type_ids\n if tb_logger is not None:\n model.logger.tb_log(tb_logger, prefix='val/', step=model.Eiters)\n return img_embs, cap_embs, cap_lens\n\n\ndef i2t(images, captions, caplens, sims, npts=None, return_ranks=False):\n \"\"\"\n Images->Text (Image Annotation)\n Images: (N, n_region, d) matrix of images\n Captions: (5N, max_n_word, d) matrix of captions\n CapLens: (5N) array of caption lengths\n sims: (N, 5N) matrix of similarity im-cap\n 每张图片查出五个gt句子,将gt句子中rank最高的作为结果\n \"\"\"\n npts = images.shape[0]\n ranks = np.zeros(npts)\n top1 = np.zeros(npts)\n for index in range(npts):\n inds = np.argsort(sims[index])[::-1]\n # Score\n rank = 1e20\n for i in range(5 * index, 5 * index + 5, 1):\n tmp = np.where(inds == i)[0][0]\n if tmp < rank:\n rank = tmp\n ranks[index] = rank\n top1[index] = inds[0]\n\n # Compute metrics\n r1 = 100.0 * len(np.where(ranks < 1)[0]) / len(ranks)\n r5 = 100.0 * len(np.where(ranks < 5)[0]) / len(ranks)\n r10 = 100.0 * len(np.where(ranks < 10)[0]) / len(ranks)\n medr = np.floor(np.median(ranks)) + 1\n meanr = ranks.mean() + 1\n if return_ranks:\n return (r1, r5, r10, medr, meanr), (ranks, top1)\n else:\n return (r1, r5, r10, medr, meanr)\n\n\ndef t2i(images, captions, caplens, sims, npts=None, return_ranks=False):\n \"\"\"\n Text->Images (Image Search)\n Images: (N, n_region, d) matrix of images\n Captions: (5N, max_n_word, d) matrix of captions\n CapLens: (5N) array of caption lengths\n sims: (N, 5N) matrix of similarity im-cap\n 五个句子分别查,得到的结果取平均。\n \"\"\"\n npts = images.shape[0]\n ranks = np.zeros(5 * npts)\n top1 = np.zeros(5 * npts)\n\n # --> (5N(caption), N(image))\n sims = sims.T\n\n for index in range(npts):\n for i in range(5):\n inds = np.argsort(sims[5 * index + i])[::-1]\n ranks[5 * index + i] = np.where(inds == index)[0][0]\n top1[5 * index + i] = inds[0]\n\n # Compute metrics\n r1 = 100.0 * len(np.where(ranks < 1)[0]) / len(ranks)\n r5 = 100.0 * len(np.where(ranks < 5)[0]) / len(ranks)\n r10 = 100.0 * len(np.where(ranks < 10)[0]) / len(ranks)\n medr = np.floor(np.median(ranks)) + 1\n meanr = ranks.mean() + 1\n if return_ranks:\n return (r1, r5, r10, medr, meanr), (ranks, top1)\n else:\n return (r1, r5, r10, medr, meanr)\n\n\ndef t2i_binary(matching_score_matrix, topk=(1, 5, 10, 50, 100, 200)):\n ''' 计算text查image 的结果。 每一行是一个text 和所有5000个image haming 距离\n :param matching_score_matrix: 5000*1000 tensor matrix\n :param topk: tuple to calculate different topk accuracy score\n :return: list\n '''\n # assert matching_score_matrix.shape==(5000,1000)\n\n msm = matching_score_matrix.clone()\n msm = msm.transpose_(0, 1) # size 5000,1000\n\n n_caps, n_imgs = msm.shape\n assert n_caps == n_imgs * 5\n\n result = {}\n for k in topk:\n max_score, max_score_idx = msm.topk(k=k, dim=1, largest=False)\n correct_count = 0\n for i in range(n_caps):\n cur_i_topk = max_score_idx[i]\n j = i // 5\n if j in cur_i_topk:\n correct_count += 1\n\n acc = correct_count / n_caps\n result[k] = acc\n return result\n\n\ndef i2t_binary(matching_score_matrix, topk=(1, 5, 10, 50, 100, 200)):\n '''计算image查text的结果。 每一行是一个image 和所有5000个captions haming 距离\n :param matching_score_matrix: n*n tensor matrix\n :param topk: tuple to calculate different topk accuracy score\n :return: list\n '''\n msm = matching_score_matrix.clone() # size 1000,5000\n n_imgs, n_caps = msm.shape\n assert n_imgs * 5 == n_caps\n result = {}\n for k in topk:\n max_score, max_score_idx = msm.topk(k=k, dim=1, largest=False)\n correct_count = 0\n for i in range(n_imgs):\n cur_i_topk = max_score_idx[i]\n for j in range(i * 5, i * 5 + 5): # 五句话只要有一句在里面就可以认为命中\n if j in cur_i_topk:\n correct_count += 1\n break\n\n acc = correct_count / n_imgs\n result[k] = acc\n return result\n\n\ndef img2text_adversary(img_embs, cap_embs, **kwargs):\n '''\n :param img_embs: List, [one image code]* 5000\n :param cap_embs: List, [[ori_cap_code,reph_cap_code,adv_cap_code]]*5000\n :return:\n '''\n topk = (1, 5, 10, 50, 100, 200, 500)\n img_codes = torch.stack(img_embs, dim=0)[::5, ...] # 1000,256\n cap_codes = []\n adv_num=-1\n for sample in cap_embs:\n cur = [sample[0].squeeze()]\n adv_num=len(sample[2])\n for adv in sample[2]:\n cur.append(adv)\n cap_codes += cur\n cap_codes = torch.stack(cap_codes, dim=0) # 11*5*1000, 256\n\n img_codes = torch.sign(img_codes).long()\n cap_codes = torch.sign(cap_codes).long()\n\n sims = get_hamming_dist(img_codes, cap_codes) # hamming distance matrix 1000*55000\n\n n_imgs, n_caps = sims.shape\n assert n_imgs * 5 * (adv_num+1) == n_caps\n\n base_matrix = sims[:, ::(adv_num + 1)] # 1000,5000\n print('base matrix shape is ', base_matrix.shape)\n result = {}\n for k in topk:\n # max_score, max_score_idx = sims.topk(k=k, dim=1, largest=False)\n correct_count = 0\n for i in range(n_imgs):\n cur_base_row = base_matrix[i]\n t_l = i * 5 * (adv_num + 1)\n t_r = (i + 1) * 5 * (adv_num + 1)\n\n tmp = []\n for j in range(t_l + 1, t_r, (adv_num + 1)):\n tmp.append(sims[i, j:j + adv_num])\n cur_adv_row = torch.cat(tmp, dim=-1)\n cur_row = torch.cat([cur_base_row, cur_adv_row], dim=-1) # 5000+55\n scores, cur_i_topk = cur_row.topk(k=k, dim=-1, largest=False)\n for j in range(i * 5, (i + 1) * 5): # 五句话只要有一句在里面就可以认为命中, 每个image有55个句子,其中5个是gt\n if j in cur_i_topk:\n correct_count += 1\n break\n acc = correct_count / n_imgs\n result[k] = acc\n logger.info(\"Adversary retrieval (Add 10 adversary sentences for each original caption)\\n\" +\n \"Image to text: {}\".format(str(result)))\n\n if 'save_sm_matrix_dir' in kwargs:\n save_path = os.path.join(kwargs['save_sm_matrix_dir'], 'adv_retrieval_i2t_sm_matrix.npz')\n save_similarity_matrix(sims.cpu().detach().numpy(), save_path)\n\n return result\n\n\ndef text2image_rephrase(img_embs, cap_embs, **kwargs):\n ''' 使用re-phrase的句子作为query来查图片\n :param img_embs: List, [one image code]* 5000\n :param cap_embs: List, [[ori_cap_code,reph_cap_code,adv_cap_code]]*5000\n :return:\n '''\n topk = (1, 5, 10, 50, 100, 200, 500)\n img_codes = torch.stack(img_embs, dim=0)[::5, ...] # 1000,256\n cap_codes = []\n for sample in cap_embs:\n cur = []\n for reph in sample[1]:\n cur.append(reph)\n cap_codes += cur\n cap_codes = torch.stack(cap_codes, dim=0) # 2*5*1000, 256\n\n img_codes = torch.sign(img_codes).long()\n cap_codes = torch.sign(cap_codes).long()\n\n sims = get_hamming_dist(cap_codes, img_codes) # hamming distance matrix 10000,1000\n\n n_caps, n_imgs = sims.shape\n assert n_caps == n_imgs * 5 * 2\n\n result = {}\n for k in topk:\n max_score, max_score_idx = sims.topk(k=k, dim=1, largest=False)\n correct_count = 0\n for i in range(n_caps):\n cur_i_topk = max_score_idx[i]\n j = i // 10\n if j in cur_i_topk:\n correct_count += 1\n\n acc = correct_count / n_caps\n result[k] = acc\n\n logger.info(\"Adversary retrieval (Using 2 re-phrase sentences as query)\\n\" +\n \"Text to image: {}\".format(str(result)))\n\n if 'save_sm_matrix_dir' in kwargs:\n save_path = os.path.join(kwargs['save_sm_matrix_dir'], 'adv_retrieval_t2i_sm_matrix.npz')\n save_similarity_matrix(sims.cpu().detach().numpy(), save_path)\n return result\n\n\ndef validate(opt, val_loader, model, **kwargs):\n tb_logger = kwargs['tb_logger'] if 'tb_logger' in kwargs else None\n # compute the encoding for all the validation images and captions,使用的是连续向量\n start = time.time()\n img_embs, cap_embs, cap_lens = encode_data(model, val_loader, opt.log_step, tb_logger)\n end = time.time()\n print(\"calculate backbone time:\", end - start)\n\n print(img_embs.shape, cap_embs.shape)\n # save_vec_path = os.path.join(opt.exp_dir, 'saved_vector.pkl')\n # save_vector_to_file(data={'img_vec': img_embs, 'cap_vec': cap_embs}, file_name=save_vec_path)\n\n img_embs = np.array([img_embs[i] for i in range(0, len(img_embs), 5)])\n\n start = time.time()\n sims = 1 - cdist(img_embs, cap_embs, metric='cosine')\n end = time.time()\n logger.info(\"calculate similarity time:{}\".format(end - start))\n\n # caption retrieval\n (r1, r5, r10, medr, meanr) = i2t(img_embs, cap_embs, cap_lens, sims)\n logging.info(\"Image to text: %.1f, %.1f, %.1f, %.1f, %.1f\" % (r1, r5, r10, medr, meanr))\n # image retrieval\n (r1i, r5i, r10i, medri, meanr) = t2i(img_embs, cap_embs, cap_lens, sims)\n logging.info(\"Text to image: %.1f, %.1f, %.1f, %.1f, %.1f\" % (r1i, r5i, r10i, medri, meanr))\n # sum of recalls to be used for early stopping\n currscore = r1 + r5 + r10 + r1i + r5i + r10i\n\n return currscore\n\n\ndef validate_binary(opt, val_loader, model, **kwargs):\n tb_logger = kwargs['tb_logger'] if 'tb_logger' in kwargs else None\n adv_i2t, adv_t2i = None, None\n ori_i2tb, ori_t2ib = None, None\n\n save_code_flag = True if 'save_hash_code' in kwargs and kwargs['save_hash_code'] is True else False\n\n # compute the encoding for all the validation images and captions\n if opt.need_adversary_data or opt.need_rephrase_data:\n img_embs, cap_embs, cap_lens = encode_data_with_adversary(model, val_loader, opt.log_step, tb_logger)\n\n ori_i2tb, ori_t2ib = original_retrieval_with_adversary_data(img_embs, cap_embs, **kwargs)\n\n if opt.need_adversary_data:\n adv_i2t = img2text_adversary(img_embs, cap_embs, **kwargs)\n if opt.need_rephrase_data:\n adv_t2i = text2image_rephrase(img_embs, cap_embs, **kwargs)\n\n else:\n img_embs, cap_embs, cap_lens = encode_data(model, val_loader, opt.log_step, tb_logger)\n\n ori_i2tb, ori_t2ib = original_retrieval(img_embs, cap_embs, **kwargs)\n\n if tb_logger is not None:\n for k, val in ori_i2tb.items():\n tb_logger.add_scalar('val/ori_i2tb_top{}'.format(k), val, global_step=model.Eiters)\n for k, val in ori_t2ib.items():\n tb_logger.add_scalar('val/ori_t2ib_top{}'.format(k), val, global_step=model.Eiters)\n if adv_i2t is not None:\n for k, val in adv_i2t.items():\n tb_logger.add_scalar('val/adv_i2t_top{}'.format(k), val, global_step=model.Eiters)\n if adv_t2i is not None:\n for k, val in adv_t2i.items():\n tb_logger.add_scalar('val/adv_t2i_top{}'.format(k), val, global_step=model.Eiters)\n\n if save_code_flag:\n save_path = os.path.join(opt.exp_dir, 'saved_hash_code.pickle')\n save_hashcodes(img_embs, cap_embs, save_path)\n\n currscore = [ri2t for k, ri2t in ori_i2tb.items() if k < 50] + [rt2i for k, rt2i in ori_t2ib.items() if k < 50]\n\n # currscore=[s for k ,s in adv_i2t.items() if k<100] #仅考虑adversary 效果最好的时候保存最优checkpoint\n\n r_sum = sum(currscore)\n return r_sum\n\n\ndef original_retrieval(img_embs, cap_embs, **kwargs):\n img_codes = torch.from_numpy(img_embs[::5, ...]) # 1000,256\n cap_codes = torch.from_numpy(cap_embs) # 5*1000, 256\n\n img_codes = torch.sign(img_codes).long()\n cap_codes = torch.sign(cap_codes).long()\n\n sims = get_hamming_dist(img_codes, cap_codes) # hamming distance matrix 1000*5000\n\n # caption retrieval\n topk_r_i2tb = i2t_binary(sims)\n logger.info(\"Original retrieval, Image to text: {}\".format(str(topk_r_i2tb)))\n # image retrieval\n topk_r_t2ib = t2i_binary(sims)\n logger.info(\"Original retrieval, Text to image: {}\".format(str(topk_r_t2ib)))\n\n if 'save_sm_matrix_dir' in kwargs:\n save_path = os.path.join(kwargs['save_sm_matrix_dir'], 'original_retrieval_i2t_sm_matrix.npz')\n save_similarity_matrix(sims.cpu().detach().numpy(), save_path)\n\n return topk_r_i2tb, topk_r_t2ib\n\n\ndef original_retrieval_with_adversary_data(img_embs, cap_embs, **kwargs):\n '''\n :param img_embs:\n :param cap_embs: 得到的数据中包含adversary的data\n :param topk:\n :return:\n '''\n img_codes = torch.stack(img_embs, dim=0)[::5, ...] # 1000,256\n cap_codes = []\n for sample in cap_embs:\n cap_codes.append(sample[0].squeeze())\n cap_codes = torch.stack(cap_codes, dim=0) # 5*1000, 256\n\n img_codes = torch.sign(img_codes).long()\n cap_codes = torch.sign(cap_codes).long()\n\n sims = get_hamming_dist(img_codes, cap_codes) # hamming distance matrix 1000*5000\n\n # caption retrieval\n topk_r_i2tb = i2t_binary(sims)\n logger.info(\"Original retrieval, Image to text: {}\".format(str(topk_r_i2tb)))\n # image retrieval\n topk_r_t2ib = t2i_binary(sims)\n logger.info(\"Original retrieval, Text to image: {}\".format(str(topk_r_t2ib)))\n\n if 'save_sm_matrix_dir' in kwargs:\n save_path = os.path.join(kwargs['save_sm_matrix_dir'], 'original_retrieval_i2t_sm_matrix.npz')\n save_similarity_matrix(sims.cpu().detach().numpy(), save_path)\n return topk_r_i2tb, topk_r_t2ib\n\n\ndef save_hashcodes(img_embeds, cap_embeds, save_path):\n if isinstance(img_embeds, np.ndarray):\n img_embeds = torch.from_numpy(img_embeds)\n cap_embeds = torch.from_numpy(cap_embeds)\n img_hash_code = torch.sign(img_embeds).cpu().numpy().astype(np.int8)\n cap_hash_code = torch.sign(cap_embeds).cpu().numpy().astype(np.int8)\n\n hash_code = {'img_code': img_hash_code, 'cap_code': cap_hash_code}\n pickle.dump(hash_code, open(save_path, 'wb'))\n logger.info('save hash code to file {} successfully.'.format(save_path))\n",
"id": "5714916",
"language": "Python",
"matching_score": 3.813795328140259,
"max_stars_count": 2,
"path": "evaluation.py"
},
{
"content": "import logging\r\nimport logging.config\r\nimport os\r\nimport torch\r\nimport pickle\r\nimport numpy as np\r\n\r\nlogger=logging.getLogger(__name__)\r\n\r\n\r\ndef init_logging(exp_dir, config_path='config/logging_config.yaml'):\r\n \"\"\"\r\n initial logging module with config\r\n :param config_path:\r\n :return:\r\n \"\"\"\r\n import yaml, sys\r\n try:\r\n with open(config_path, 'r') as f:\r\n config = yaml.load(f.read(), Loader=yaml.FullLoader)\r\n config[\"handlers\"][\"info_file_handler\"][\"filename\"] = os.path.join(exp_dir, \"info.log\")\r\n config[\"handlers\"][\"time_file_handler\"][\"filename\"] = os.path.join(exp_dir, \"time.log\")\r\n config[\"handlers\"][\"error_file_handler\"][\"filename\"] = os.path.join(exp_dir, \"error.log\")\r\n\r\n logging.config.dictConfig(config)\r\n except IOError:\r\n sys.stderr.write('logging config file \"%s\" not found' % config_path)\r\n logging.basicConfig(level=logging.DEBUG)\r\n\r\n\r\ndef get_hamming_dist(img_code, cap_code):\r\n if torch.cuda.is_available():\r\n device='cuda'\r\n else:\r\n device='cpu'\r\n code_len = img_code.shape[1]\r\n similarity_matrix = []\r\n for i in range(0, img_code.shape[0], 10): # 分片计算防止爆内存\r\n cur_query_code = img_code[i:i + 10].to(device) # size(10,code_len)\r\n cur_matrix=[]\r\n for j in range(0,cap_code.shape[0],1000):\r\n cur_ref_code=cap_code[j:j+1000].to(device)\r\n cur_part=(code_len - (cur_query_code.unsqueeze(1) * cur_ref_code.unsqueeze(0)).sum(dim=-1)) / 2 # size(10,1000)\r\n cur_part=cur_part.cpu()\r\n cur_matrix.append(cur_part)\r\n cur_matrix = torch.cat(cur_matrix,dim=-1).cpu()\r\n similarity_matrix.append(cur_matrix)\r\n similarity_matrix = torch.cat(similarity_matrix, dim=0).cpu()\r\n return similarity_matrix\r\n\r\n\r\n\r\ndef save_vector_to_file(data, file_name):\r\n pickle.dump(data, open(file_name, 'wb'))\r\n logger.info('save vector file to {}'.format(file_name))\r\n\r\n\r\ndef save_similarity_matrix(matrix_data,save_path):\r\n np.save(save_path,matrix_data)\r\n logger.info('save similarity matrix data into file: {}'.format(save_path))\r\n\r\n",
"id": "3207339",
"language": "Python",
"matching_score": 1.1062053442001343,
"max_stars_count": 2,
"path": "utils.py"
},
{
"content": "import os\r\nimport json\r\nfrom tqdm import tqdm\r\nimport pickle\r\nimport numpy as np\r\nimport json\r\nfrom tqdm import tqdm\r\nimport os\r\nfrom sklearn import metrics\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.cluster import KMeans\r\nfrom sklearn.datasets.samples_generator import make_blobs\r\nimport torch\r\nfrom numpy import linalg\r\nimport time\r\n\r\n\r\n# val_path = '/S4/MI/data/mscoco/annotations/captions_val2014.json'\r\n# train_path = '/S4/MI/data/mscoco/annotations/captions_train2014.json'\r\n#\r\n# save_path = '/S4/MI/liyz/data/scan_data/coco_precomp/id2filename.json'\r\n#\r\n# all_paths = [val_path, train_path]\r\n# id2filename = {}\r\n#\r\n# for file in all_paths:\r\n# data = json.load(open(file))\r\n# images = data['images']\r\n# print('cur file path is {}'.format(file))\r\n# for ele in tqdm(images):\r\n# id = ele['id']\r\n# file_path = ele['file_name']\r\n# if 'train' in file_path:\r\n# file_path = os.path.join('train2014', file_path)\r\n# elif 'val' in file_path:\r\n# file_path = os.path.join('val2014', file_path)\r\n# else:\r\n# print(file_path)\r\n# id2filename[id] = file_path\r\n#\r\n# json.dump(id2filename, open(save_path, 'w'))\r\n# print('save mapping file into {}'.format(save_path))\r\n\r\n# image_json_info_dir = '/S4/MI/liyz/data/coco_concept/new_tuples'\r\n# concept_to_cls_file = '/S4/MI/liyz/data/coco_concept/tuple_2_cls.pkl'\r\n# save_path = '/S4/MI/liyz/data/coco_concept/imgid_2_concept_idxs.pkl'\r\n#\r\n#\r\n# result={}\r\n#\r\n# def get_all_paths(dir):\r\n# paths = []\r\n# for file in os.listdir(dir):\r\n# cur = os.path.join(dir, file)\r\n# paths.append(cur)\r\n# return paths\r\n#\r\n# tuple2idx=pickle.load(open(concept_to_cls_file,'rb'))\r\n#\r\n# all_paths=get_all_paths(image_json_info_dir)\r\n#\r\n# zero_cnt=0\r\n# for file in tqdm(all_paths):\r\n# cur=json.load(open(file))\r\n# img_id=cur['id']\r\n# cur_label=np.zeros((642),dtype=np.int)\r\n# concept_tuple=cur['tuple']\r\n# for concept in concept_tuple:\r\n# tmp=' '.join(concept)\r\n# cls=tuple2idx[tmp] if tmp in tuple2idx else []\r\n# for idx in cls:\r\n# cur_label[idx]=1\r\n#\r\n# if cur_label.sum()==0:\r\n# zero_cnt+=1\r\n# if zero_cnt%1000==0:\r\n# print(zero_cnt,'============')\r\n# result[img_id]=cur_label\r\n#\r\n#\r\n# pickle.dump(result,open(save_path,'wb'))\r\n# print('finished')\r\n#\r\n\r\n# concept_to_cls_file = '/S4/MI/liyz/data/coco_concept/tuple_2_cls.pkl'\r\n# save_path='/S4/MI/liyz/data/coco_concept/class_id_2_concepts.json'\r\n# tuple2idx=pickle.load(open(concept_to_cls_file,'rb'))\r\n#\r\n# result={i:[] for i in range(643)}\r\n# for concepts,idxs in tqdm(tuple2idx.items()):\r\n# for id in idxs :\r\n# result[id].append(concepts)\r\n#\r\n# json.dump(result,open(save_path,'w'))\r\n\r\n\r\ndef kmeans():\r\n X, y = make_blobs(n_samples=100, n_features=2, centers=[[-1, -1], [0, 0], [1, 1], [2, 2]],\r\n cluster_std=[0.4, 0.2, 0.2, 0.2], random_state=9)\r\n print(X.shape)\r\n plt.scatter(X[:, 0], X[:, 1], marker='o') # 假设暂不知道y类别,不设置c=y,使用kmeans聚类\r\n plt.show()\r\n # X=torch.randn(1000,200).numpy()\r\n y_pred = KMeans(n_clusters=4, random_state=9).fit_predict(X)\r\n print(y_pred.shape)\r\n print(y_pred)\r\n plt.scatter(X[:, 0], X[:, 1], c=y_pred)\r\n plt.show()\r\n\r\n\r\n# kmeans()\r\n\r\n\r\ndef hamming_distance_v1(A, b):\r\n r = (1 << np.arange(8))[:, None]\r\n na = A.shape[0]\r\n H = np.zeros((na), dtype=np.float32)\r\n for i in range(0, na):\r\n a = A[i, :]\r\n c = np.bitwise_xor(a, b)\r\n d = c & r\r\n H[i] = np.count_nonzero(d != 0)\r\n return H\r\n\r\n\r\ndef hamming_distance_v2(A, b):\r\n db_size = A.shape[0]\r\n r = np.expand_dims(np.expand_dims((1 << np.arange(8)), axis=0).repeat(db_size, axis=0), axis=1)\r\n result = np.count_nonzero(np.expand_dims(np.bitwise_xor(A, b), axis=-1) & r != 0, axis=-1).sum(axis=-1)\r\n return result\r\n\r\n\r\n\r\n\r\n\r\ndef hamming_distance_v3(A, b):\r\n import gmpy2\r\n from gmpy2 import mpz, hamdist, pack\r\n na = len(A)\r\n H = np.zeros((na), dtype=np.float32)\r\n b = pack(b, 64)\r\n for i in range(0, na):\r\n a = A[i]\r\n a = pack(a, 64)\r\n H[i] = gmpy2.hamdist(a, b)\r\n # H[i] = gmpy2.popcount(np.bitwise_xor(a,b))\r\n return H\r\n\r\n\r\ntable = np.array(\r\n [0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 1, 2, 2, 3, 2, 3,\r\n 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4,\r\n 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4,\r\n 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5, 2, 3, 3, 4, 3, 4, 4, 5,\r\n 3, 4, 4, 5, 4, 5, 5, 6, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6,\r\n 6, 7, 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6, 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 3, 4, 4, 5,\r\n 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7, 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8],dtype=np.uint8)\r\n\r\ndef hamming_distance_v4(A, b):\r\n query_n, word_n = b.shape\r\n db_size, word_n1 = A.shape\r\n\r\n assert word_n == word_n1\r\n Dh = np.zeros((query_n, db_size), 'uint16')\r\n\r\n for i in range(query_n):\r\n c = np.bitwise_xor(A, b[i]) # db_size,word_n\r\n for n in range(word_n1):\r\n cur_idx = c[:, n]\r\n Dh[i,:]=Dh[i,:]+table[cur_idx]\r\n return Dh\r\n\r\n\r\nA = np.zeros((100, 4), dtype=np.uint8)\r\nb=np.zeros((4,4),dtype=np.uint8)\r\nresult=hamming_distance_v4(A,b)\r\nprint(result)\r\nprint(result.shape)\r\n#\r\n# start = time.time()\r\n# for i in range(2):\r\n# b = np.zeros((4,), dtype=np.int64)\r\n# result = hamming_distance_v1(A, b)\r\n# print('100 test on hamming 256 bit time={} function v1'.format(time.time() - start))\r\nexit(0)\r\n\r\n\r\ndef get_cosine_similarity_v1(A, b):\r\n \"\"\"\r\n compute cosine similarity between two tensors\r\n x1: Tensor of shape (h1, w)\r\n x2: Tensor of shape (h2, w)\r\n Return pairwise cosine distance for each row vector in x1, x2 as\r\n a Tensor of shape (h1, h2)\r\n \"\"\"\r\n x1 = A\r\n x2 = b\r\n db_size = x1.shape[0]\r\n result = np.zeros((db_size,), dtype=np.float32)\r\n x1_norm = x1 / linalg.norm(x2, axis=1)[:, None]\r\n x2_norm = x2 / linalg.norm(x2, axis=1)[:, None]\r\n for i in range(db_size):\r\n cur = np.matmul(x1_norm[i, :], x2_norm.T)\r\n result[i] = cur.squeeze()\r\n return result\r\n\r\n\r\ndef get_cosine_similarity_v2(A, b):\r\n \"\"\"\r\n compute cosine similarity between two tensors\r\n x1: Tensor of shape (h1, w)\r\n x2: Tensor of shape (h2, w)\r\n Return pairwise cosine distance for each row vector in x1, x2 as\r\n a Tensor of shape (h1, h2)\r\n \"\"\"\r\n x1 = A\r\n x2 = b\r\n x1_norm = x1 / linalg.norm(x2, axis=1)[:, None]\r\n x2_norm = x2 / linalg.norm(x2, axis=1)[:, None]\r\n res = np.matmul(x1_norm, x2_norm.T)\r\n res = res.squeeze()\r\n return res\r\n\r\n\r\ndef get_l2_distance(A, b):\r\n diff = ((A - b) ** 2).sum(axis=1)\r\n diff = diff ** 0.5\r\n # print(diff.shape)\r\n return diff\r\n\r\n\r\nA = np.random.randn(100000, 512)\r\nstart = time.time()\r\nfor i in range(100):\r\n b = np.random.randn(1, 512)\r\n result = hamming_distance_v4(A, b)\r\nprint('100 test on continue vec {} dimension 1024 cos_sim function v2'.format(time.time() - start))\r\nexit(0)\r\n\r\n#\r\n# A=np.array([[1,3,1,0],[77,0,1,0]],dtype=np.int64)\r\n# b=np.array([[1,3,1,0]],dtype=np.int64)\r\n# H2=hamming_distance_v2(A,b)\r\n# print(H2)\r\n# b=np.array([1,3,1,0],dtype=np.int64)\r\n# H=hamming_distance_v3(A,b)\r\n# print(H)\r\n\r\n\r\n# SIZES=[]\r\nSIZES = [200, 5000, 10000, 50000, 100000]\r\nQUERY_NUMBER = 100\r\n\r\nfor DATABASE_SIZE in SIZES:\r\n print('cur data base size is {}=============================='.format(DATABASE_SIZE))\r\n # test for conitnus vector\r\n #\r\n A = np.random.randn(DATABASE_SIZE, 256)\r\n start = time.time()\r\n for i in range(QUERY_NUMBER):\r\n b = np.random.randn(1, 256)\r\n result = get_cosine_similarity_v1(A, b)\r\n print('100 test on continue vec {} dimension 256 cos_sim function v1'.format(time.time() - start))\r\n\r\n A = np.random.randn(DATABASE_SIZE, 256)\r\n start = time.time()\r\n for i in range(QUERY_NUMBER):\r\n b = np.random.randn(1, 256)\r\n result = get_cosine_similarity_v2(A, b)\r\n print('100 test on continue vec {} dimension 256 cos_sim function v2'.format(time.time() - start))\r\n\r\n A = np.random.randn(DATABASE_SIZE, 256)\r\n start = time.time()\r\n for i in range(QUERY_NUMBER):\r\n b = np.random.randn(1, 256)\r\n result = get_l2_distance(A, b)\r\n print('100 test on continue vec {} dimension 256 l2 function v2'.format(time.time() - start))\r\n\r\n A = np.zeros((DATABASE_SIZE, 4), dtype=np.int64)\r\n start = time.time()\r\n for i in range(QUERY_NUMBER):\r\n b = np.zeros((4,), dtype=np.int64)\r\n result = hamming_distance_v1(A, b)\r\n print('100 test on hamming 256 bit time={} function v1'.format(time.time() - start))\r\n\r\n A = np.zeros((DATABASE_SIZE, 4), dtype=np.int64)\r\n start = time.time()\r\n for i in range(QUERY_NUMBER):\r\n b = np.zeros((1, 4), dtype=np.int64)\r\n result = hamming_distance_v2(A, b)\r\n print('100 test on hamming 256 bit time={} function v2'.format(time.time() - start))\r\n\r\n A = np.zeros((DATABASE_SIZE, 4), dtype=np.int64)\r\n A = [[int(x) for x in A[i]] for i in range(DATABASE_SIZE)]\r\n start = time.time()\r\n for i in range(QUERY_NUMBER):\r\n b = np.zeros((4,), dtype=np.int64)\r\n b = [int(x) for x in b]\r\n result = hamming_distance_v3(A, b)\r\n print('100 test on hamming 256 bit time={} function v3'.format(time.time() - start))\r\n\r\n A = np.zeros((DATABASE_SIZE, 32), dtype=np.int64)\r\n start = time.time()\r\n for i in range(QUERY_NUMBER):\r\n b = np.zeros((1, 32), dtype=np.int64)\r\n result = hamming_distance_v2(A, b)\r\n print('100 test on hamming 2048 bit time={} function v2'.format(time.time() - start))\r\n\r\n A = np.zeros((DATABASE_SIZE, 32), dtype=np.int64)\r\n A = [[int(x) for x in A[i]] for i in range(DATABASE_SIZE)]\r\n start = time.time()\r\n for i in range(QUERY_NUMBER):\r\n b = np.zeros((32,), dtype=np.int64)\r\n b = [int(x) for x in b]\r\n result = hamming_distance_v3(A, b)\r\n print('100 test on hamming 2048 bit time={} function v3'.format(time.time() - start))\r\n",
"id": "966547",
"language": "Python",
"matching_score": 3.135158061981201,
"max_stars_count": 2,
"path": "tools/search_time_counting.py"
},
{
"content": "import json\r\nfrom tqdm import tqdm\r\nimport os\r\nfrom sklearn import metrics\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.cluster import KMeans\r\nfrom sklearn.datasets.samples_generator import make_blobs\r\nimport numpy as np\r\nimport torch\r\nimport pickle\r\n\r\n\r\ndef get_imgid2object():\r\n input_file_path = '/m/liyz/sg_matching/s2g/spice/coco_output/merged.json'\r\n\r\n data = json.load(open(input_file_path))\r\n print('total caption number is {}'.format(len(data)))\r\n all_obj = {}\r\n imgid2objs = {}\r\n\r\n for info in tqdm(data):\r\n img_id = info['image_id'].split('_')[0]\r\n ref_tuples = info['ref_tuples']\r\n if img_id not in imgid2objs:\r\n imgid2objs[img_id] = set()\r\n\r\n for tuple in ref_tuples:\r\n if len(tuple['tuple']) == 1:\r\n obj = tuple['tuple'][0]\r\n imgid2objs[img_id].add(obj)\r\n all_obj[obj] = all_obj.get(obj, 0) + 1\r\n\r\n print('total image id number is {}'.format(len(imgid2objs)))\r\n print('total objects number is {}'.format(len(all_obj)))\r\n\r\n for imgid, objs in imgid2objs.items():\r\n objs = list(objs)\r\n imgid2objs[imgid] = objs\r\n\r\n imgid2objs_save_path = './coco_imgid2objs.json'\r\n json.dump(imgid2objs, open(imgid2objs_save_path, 'w'))\r\n print('save to {} successfully.'.format(imgid2objs_save_path))\r\n\r\n all_obj_save_path = './coco_obj_freq.json'\r\n json.dump(all_obj, open(all_obj_save_path, 'w'))\r\n print('save to {} successfully.'.format(all_obj_save_path))\r\n\r\n\r\ndef kmeans(data, k_cluster=1000):\r\n # X, y = make_blobs(n_samples=100, n_features=2, centers=[[-1, -1], [0, 0], [1, 1], [2, 2]],\r\n # cluster_std=[0.4, 0.2, 0.2, 0.2], random_state=9)\r\n # print(X.shape)\r\n # plt.scatter(X[:, 0], X[:, 1], marker='o') # 假设暂不知道y类别,不设置c=y,使用kmeans聚类\r\n # plt.show()\r\n # X=torch.randn(500,200).numpy()\r\n print('the input data shape is: ', data.shape)\r\n X = data\r\n y_pred = KMeans(n_clusters=k_cluster, random_state=9).fit_predict(X)\r\n print('output result shape is ', y_pred.shape)\r\n # print(y_pred)\r\n # plt.scatter(X[:, 0], X[:, 1], c=y_pred)\r\n # plt.show()\r\n return y_pred\r\n\r\n\r\ndef load_vector_dict(vector_file_path):\r\n print('loading glove vector file...')\r\n pickle_file_path = vector_file_path + '_pickle.pkl'\r\n word2vector = {}\r\n if os.path.exists(pickle_file_path):\r\n word2vector = pickle.load(open(pickle_file_path, 'rb'))\r\n print('load from pickle directly')\r\n else:\r\n with open(vector_file_path, 'r') as f:\r\n for line in tqdm(f.readlines()):\r\n line = line.strip()\r\n infos = line.split()\r\n word = infos[0]\r\n vec = np.array([float(x) for x in infos[1:]])\r\n word2vector[word] = vec\r\n pickle.dump(word2vector, open(pickle_file_path, 'wb'))\r\n print('save dict file into pickle file: {}'.format(pickle_file_path))\r\n vec_dim = word2vector['hello'].shape[0]\r\n print('reading glove vector file finished... vector dimension is {}'.format(vec_dim))\r\n # print(len(word2vector),word2vector['hello'])\r\n # print(word2vector['hammer'])\r\n return word2vector, vec_dim\r\n\r\n\r\ndef get_all_obj(obj_freq_file, word2vec, threshold):\r\n obj2freq = json.load(open(obj_freq_file))\r\n # print(word2vec.keys())\r\n used_obj = []\r\n used_vectors = []\r\n for obj, cnt in obj2freq.items():\r\n if cnt>=threshold and obj in word2vec:\r\n # print(obj)\r\n used_obj.append(obj)\r\n used_vectors.append(word2vec[obj])\r\n print(len(used_obj),len(used_vectors))\r\n print('using threshold {}, the useful object number is {}'.format(threshold, len(used_obj)))\r\n used_vectors = np.stack(used_vectors, axis=0)\r\n return used_obj, used_vectors\r\n\r\n\r\ndef get_clustered_result(glove_file_path, obj_freq_file_path, save_word2clus_id_path, save_clus_id2words,\r\n k_cluster=1000):\r\n word2vec,vec_dim = load_vector_dict(vector_file_path=glove_file_path)\r\n used_obj, used_vectors = get_all_obj(obj_freq_file=obj_freq_file_path, word2vec=word2vec, threshold=10)\r\n\r\n clustered_idxs = kmeans(used_vectors, k_cluster=1000)\r\n\r\n word2clus_id = {}\r\n clus_id2words = {i: [] for i in range(k_cluster)}\r\n for i in range(len(used_obj)):\r\n word = used_obj[i]\r\n idx = int(clustered_idxs[i])\r\n word2clus_id[word] = idx\r\n clus_id2words[idx].append(word)\r\n\r\n json.dump(word2clus_id, open(save_word2clus_id_path, 'w'))\r\n json.dump(clus_id2words, open(save_clus_id2words, 'w'))\r\n\r\n print('finished.........')\r\n\r\n\r\n\r\nglove_file_path='/S4/MI/liyz/data/glove/glove.6B.200d.txt'\r\nobj_freq_file_path='/S4/MI/liyz/saem_retrieval/data/cocoid2obj/coco_obj_freq.json'\r\nsave_word2clus_id_path='/S4/MI/liyz/saem_retrieval/data/cocoid2obj/obj_to_clustered_id.json'\r\nsave_clus_id2words='/S4/MI/liyz/saem_retrieval/data/cocoid2obj/clustered_id_to_obj.json'\r\nget_clustered_result(glove_file_path,obj_freq_file_path,save_word2clus_id_path,save_clus_id2words,k_cluster=1000)\r\n\r\n",
"id": "7652716",
"language": "Python",
"matching_score": 0.3014737069606781,
"max_stars_count": 2,
"path": "tools/test.py"
},
{
"content": "import random\nimport math\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.autograd as autograd\nfrom torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence\nfrom torch.nn import Parameter\nimport torchvision as tv\n\nimport tokenization\nfrom bert import BertConfig, BertModel\nimport bert\n\ndef freeze_layers(model):\n for child in model.children():\n for param in child.parameters():\n param.requires_grad = False\n\n\ndef transfer_ckpt(ori_data):\n import collections\n result = collections.OrderedDict()\n for k in ori_data:\n new_k = k.replace('bert.', '')\n new_k = new_k.replace('LayerNorm.weight', 'LayerNorm.gamma')\n new_k = new_k.replace('LayerNorm.bias', 'LayerNorm.beta')\n result[new_k] = ori_data[k]\n return result\n\n\nclass BertMapping(nn.Module):\n \"\"\"\n \"\"\"\n\n def __init__(self, opt):\n super(BertMapping, self).__init__()\n bert_config = BertConfig.from_json_file(opt.bert_config_file)\n self.bert = BertModel(bert_config)\n ori_ckpt = torch.load(opt.init_checkpoint, map_location='cpu')\n transed_ckpt = transfer_ckpt(ori_ckpt)\n self.bert.load_state_dict(transed_ckpt, strict=False)\n freeze_layers(self.bert)\n self.txt_stru = opt.txt_stru\n\n if opt.txt_stru == 'pooling':\n self.dropout = nn.Dropout(bert_config.hidden_dropout_prob)\n self.mapping = nn.Linear(bert_config.hidden_size, opt.final_dims)\n elif opt.txt_stru == 'cnn':\n Ks = [1, 2, 3]\n in_channel = 1\n out_channel = 512\n embedding_dim = bert_config.hidden_size\n self.convs1 = nn.ModuleList([nn.Conv2d(in_channel, out_channel, (K, embedding_dim)) for K in Ks])\n self.dropout = nn.Dropout(bert_config.hidden_dropout_prob)\n self.mapping = nn.Linear(len(Ks) * out_channel, opt.final_dims)\n elif opt.txt_stru == 'rnn':\n embedding_dim = bert_config.hidden_size\n self.bi_gru = opt.bi_gru\n self.rnn = nn.GRU(embedding_dim, opt.embed_size, opt.num_layers, batch_first=True, bidirectional=opt.bi_gru)\n self.dropout = nn.Dropout(bert_config.hidden_dropout_prob)\n self.mapping = nn.Linear(opt.embed_size, opt.final_dims)\n elif opt.txt_stru == 'trans':\n bert_config = BertConfig.from_json_file(opt.img_trans_cfg)\n self.layer = bert.BERTLayer(bert_config)\n self.dropout = nn.Dropout(bert_config.hidden_dropout_prob)\n self.mapping = nn.Linear(768, opt.final_dims)\n\n\n def forward(self, input_ids, attention_mask, token_type_ids, lengths):\n # print('bert input',input_ids.shape)\n all_encoder_layers, pooled_output = self.bert(input_ids, token_type_ids=token_type_ids,attention_mask=attention_mask)\n if self.txt_stru == 'pooling':\n output = self.mapping(all_encoder_layers[-1])\n output = torch.mean(output, 1)\n code = output\n elif self.txt_stru == 'cnn':\n x = all_encoder_layers[-1].unsqueeze(1) # (batch_size, 1, token_num, embedding_dim)\n x = [F.relu(conv(x)).squeeze(3) for conv in self.convs1] # [(batch_size, out_channel, W), ...]*len(Ks)\n x = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in x] # [(N, Co), ...]*len(Ks)\n output = torch.cat(x, 1)\n elif self.txt_stru == 'rnn':\n x = all_encoder_layers[-1] # (batch_size, token_num, embedding_dim)\n packed = pack_padded_sequence(x, lengths, batch_first=True)\n # Forward propagate RNN\n out, _ = self.rnn(packed)\n # Reshape *final* output to (batch_size, hidden_size)\n padded = pad_packed_sequence(out, batch_first=True)\n cap_emb, cap_len = padded\n if self.bi_gru:\n cap_emb = (cap_emb[:, :, :cap_emb.size(2) / 2] + cap_emb[:, :, cap_emb.size(2) / 2:]) / 2\n else:\n cap_emb = cap_emb\n output = torch.mean(cap_emb, 1)\n elif self.txt_stru == 'trans':\n\n hidden_states = self.mapping(all_encoder_layers[-1])\n extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)\n extended_attention_mask = extended_attention_mask.float()\n extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0\n hidden_states = self.layer(hidden_states, extended_attention_mask)\n # output = hidden_states[:, 0, :]\n output = torch.mean(hidden_states, 1)\n\n output = self.dropout(output)\n code = self.mapping(output)\n # code = F.tanh(code)\n code = F.normalize(code, p=2, dim=1)\n return code\n\n\nclass BertBinaryMapping(nn.Module):\n \"\"\"\n \"\"\"\n\n def __init__(self, opt):\n super(BertBinaryMapping, self).__init__()\n bert_config = BertConfig.from_json_file(opt.bert_config_file)\n self.bert = BertModel(bert_config)\n ori_ckpt = torch.load(opt.init_checkpoint, map_location='cpu')\n transed_ckpt = transfer_ckpt(ori_ckpt)\n self.bert.load_state_dict(transed_ckpt, strict=False)\n freeze_layers(self.bert)\n self.txt_stru = opt.txt_stru\n\n if opt.txt_stru == 'pooling':\n self.dropout = nn.Dropout(bert_config.hidden_dropout_prob)\n self.mapping = nn.Linear(bert_config.hidden_size, opt.final_dims)\n elif opt.txt_stru == 'cnn':\n Ks = [1, 2, 3]\n in_channel = 1\n out_channel = 512\n embedding_dim = bert_config.hidden_size\n self.convs1 = nn.ModuleList([nn.Conv2d(in_channel, out_channel, (K, embedding_dim)) for K in Ks])\n self.dropout = nn.Dropout(bert_config.hidden_dropout_prob)\n self.mapping = nn.Linear(len(Ks) * out_channel, opt.final_dims)\n elif opt.txt_stru == 'rnn':\n embedding_dim = bert_config.hidden_size\n self.bi_gru = opt.bi_gru\n self.rnn = nn.GRU(embedding_dim, opt.embed_size, opt.num_layers, batch_first=True, bidirectional=opt.bi_gru)\n self.dropout = nn.Dropout(bert_config.hidden_dropout_prob)\n self.mapping = nn.Linear(opt.embed_size, opt.final_dims)\n elif opt.txt_stru == 'trans':\n bert_config = BertConfig.from_json_file(opt.img_trans_cfg)\n self.layer = bert.BERTLayer(bert_config)\n self.dropout = nn.Dropout(bert_config.hidden_dropout_prob)\n self.mapping = nn.Linear(768, opt.final_dims)\n\n def forward(self, input_ids, attention_mask, token_type_ids, lengths):\n # print('bert input',input_ids.shape)\n all_encoder_layers, pooled_output = self.bert(input_ids, token_type_ids=token_type_ids,\n attention_mask=attention_mask)\n if self.txt_stru == 'pooling':\n output = self.mapping(all_encoder_layers[-1])\n output = torch.mean(output, 1)\n code = output\n elif self.txt_stru == 'cnn':\n x = all_encoder_layers[-1].unsqueeze(1) # (batch_size, 1, token_num, embedding_dim)\n x = [F.relu(conv(x)).squeeze(3) for conv in self.convs1] # [(batch_size, out_channel, W), ...]*len(Ks)\n x = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in x] # [(N, Co), ...]*len(Ks)\n output = torch.cat(x, 1)\n elif self.txt_stru == 'rnn':\n x = all_encoder_layers[-1] # (batch_size, token_num, embedding_dim)\n packed = pack_padded_sequence(x, lengths, batch_first=True)\n # Forward propagate RNN\n out, _ = self.rnn(packed)\n # Reshape *final* output to (batch_size, hidden_size)\n padded = pad_packed_sequence(out, batch_first=True)\n cap_emb, cap_len = padded\n if self.bi_gru:\n cap_emb = (cap_emb[:, :, :cap_emb.size(2) / 2] + cap_emb[:, :, cap_emb.size(2) / 2:]) / 2\n else:\n cap_emb = cap_emb\n output = torch.mean(cap_emb, 1)\n elif self.txt_stru == 'trans':\n\n hidden_states = self.mapping(all_encoder_layers[-1])\n extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)\n extended_attention_mask = extended_attention_mask.float()\n extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0\n hidden_states = self.layer(hidden_states, extended_attention_mask)\n # output = hidden_states[:, 0, :]\n output = torch.mean(hidden_states, 1)\n\n output = self.dropout(output)\n code = self.mapping(output) # batch * dim\n\n code=torch.softmax(code,dim=-1)\n median,m_idx=torch.median(code,dim=-1)\n code= code - (median.unsqueeze(1)+1e-8)\n code = torch.tanh(code*10)\n # code = F.normalize(code, p=2, dim=1)\n return code\n\n",
"id": "11848689",
"language": "Python",
"matching_score": 6.043989181518555,
"max_stars_count": 2,
"path": "text_net.py"
},
{
"content": "import random\nimport math\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.autograd as autograd\nfrom torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence\nfrom torch.nn import Parameter\nfrom bert import BertConfig, BertModel\nimport bert\nfrom torchvision.models import *\n\n\nclass CnnMapping(nn.Module):\n def __init__(self, z, c):\n '''\n z: image patch dim\n c: final embedding dim\n '''\n super(CnnMapping, self).__init__()\n Co = 256 # number of channel for each kernel size\n Ks = [1, 2, 3] # kernel size\n self.convs1 = nn.ModuleList([nn.Conv2d(1, Co, (K, z)) for K in Ks])\n # torch.nn.Conv2d(in_channels, out_channels, kernel_size,...)\n self.dropout = nn.Dropout(p=0.1)\n self.fc1 = nn.Linear(len(Ks) * Co, c)\n\n def forward(self, x):\n # x: (batch_size, token_num, embedding_dim)\n x = x.unsqueeze(1) # (batch_size, 1, token_num, embedding_dim)\n x = [F.relu(conv(x)).squeeze(3) for conv in self.convs1] # [(N, Co, W), ...]*len(Ks)\n x = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in x] # [(N, Co), ...]*len(Ks)\n x = torch.cat(x, 1)\n\n # x = self.dropout(x) # (N, len(Ks)*Co)\n codes = F.normalize(self.fc1(x), p=2, dim=1) # (N, C)\n return codes\n\n\nclass RnnMapping(nn.Module):\n\n def __init__(self, z, c, num_layers=1, use_bi_gru=True):\n '''\n z: image patch dim\n c: final embedding dim\n '''\n super(RnnMapping, self).__init__()\n self.use_bi_gru = use_bi_gru\n self.rnn = nn.GRU(z, c, num_layers, batch_first=True, bidirectional=use_bi_gru)\n\n def forward(self, x):\n lengths = [36] * x.size(0)\n packed = pack_padded_sequence(x, lengths, batch_first=True)\n\n # Forward propagate RNN\n out, _ = self.rnn(packed)\n\n # Reshape *final* output to (batch_size, hidden_size)\n padded = pad_packed_sequence(out, batch_first=True)\n emb, _ = padded\n\n if self.use_bi_gru:\n emb = (emb[:,:,:emb.size(2)/2] + emb[:,:,emb.size(2)/2:])/2\n\n embed = torch.mean(emb, 1) # (batch_size, final_dims)\n codes = F.normalize(embed, p=2, dim=1) # (N, C)\n return codes\n\n\nclass TransformerMapping(nn.Module):\n \"\"\" Self-attention layer for image branch\n \"\"\"\n def __init__(self, opt):\n super(TransformerMapping, self).__init__()\n self.opt = opt\n bert_config = BertConfig.from_json_file(opt.trans_cfg)\n self.layer = bert.BERTLayer(bert_config)\n self.mapping = nn.Linear(opt.img_dim, bert_config.hidden_size)\n\n self.mapping2 = nn.Linear(bert_config.hidden_size, opt.final_dims)\n\n def forward(self, x):\n # x: (batch_size, patch_num, img_dim)\n x = self.mapping(x) # x: (batch_size, patch_num, final_dims)\n attention_mask = torch.ones(x.size(0), x.size(1))\n if torch.cuda.is_available():\n attention_mask = attention_mask.cuda()\n extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)\n extended_attention_mask = extended_attention_mask.float()\n extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0\n hidden_states = self.layer(x, extended_attention_mask)\n hidden_states = self.mapping2(hidden_states)\n embed = torch.mean(hidden_states, 1) # (batch_size, final_dims)\n codes = F.normalize(embed, p=2, dim=1) # (N, C)\n # codes=F.tanh(embed)\n return codes\n\n\nclass FcMapping(nn.Module):\n \"\"\" MLP for image branch.\n \"\"\"\n def __init__(self, opt):\n super(FcMapping, self).__init__()\n self.fc1 = nn.Linear(opt.img_dim, opt.final_dims)\n # self.fc2 = nn.Linear(opt.final_dims*2, opt.final_dims)\n\n def forward(self, x):\n # x: (batch_size, patch_num, img_dim)\n x = self.fc1(x)\n # x = F.relu(x)\n # x = self.fc2(x)\n embed = torch.mean(x, 1) # (batch_size, final_dims)\n codes = F.normalize(embed, p=2, dim=1)\n return codes\n\n\n\nclass TransformerBinaryMapping(nn.Module):\n \"\"\" Self-attention layer for image branch\n \"\"\"\n def __init__(self, opt):\n super(TransformerBinaryMapping, self).__init__()\n self.opt = opt\n bert_config = BertConfig.from_json_file(opt.trans_cfg)\n self.layer = bert.BERTLayer(bert_config)\n self.mapping = nn.Linear(opt.img_dim, bert_config.hidden_size)\n\n self.mapping2 = nn.Linear(bert_config.hidden_size, opt.final_dims)\n\n def forward(self, x):\n # x: (batch_size, patch_num, img_dim)\n x = self.mapping(x) # x: (batch_size, patch_num, final_dims)\n attention_mask = torch.ones(x.size(0), x.size(1))\n if torch.cuda.is_available():\n attention_mask = attention_mask.cuda()\n extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)\n extended_attention_mask = extended_attention_mask.float()\n extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0\n hidden_states = self.layer(x, extended_attention_mask)\n hidden_states = self.mapping2(hidden_states) #batch+size, final_dims\n\n hidden_states=torch.softmax(hidden_states,dim=-1)\n embed = torch.mean(hidden_states, 1) # (batch_size, final_dims)\n\n median,m_idx=torch.median(embed,dim=-1)\n hash_code=embed-(median.unsqueeze(1)+1e-8)\n codes=torch.tanh(hash_code*100)\n\n\n return codes\n\n\nclass ResNetEncoder(nn.Module):\n def __init__(self, opt):\n super(ResNetEncoder,self).__init__()\n self.code_len=opt.final_dims\n self.backbone=resnet50(pretrained=True)\n fc_input_dim=self.backbone.fc.in_features\n self.backbone.fc=nn.Linear(in_features=fc_input_dim,out_features=self.code_len)\n\n def forward(self, x):\n embed=self.backbone(x)\n\n median, m_idx = torch.median(embed, dim=-1)\n hash_code = embed - (median.unsqueeze(1) + 1e-8)\n\n codes=torch.tanh(hash_code*10)\n return codes\n\n\n\n\n",
"id": "1695159",
"language": "Python",
"matching_score": 1.51581871509552,
"max_stars_count": 2,
"path": "image_net.py"
},
{
"content": "# coding=utf-8\n# Copyleft 2019 project LXRT.\n\nimport torch.nn as nn\nimport torch\nfrom param import args\nfrom ssrp.entry_ssrp import SSRP\nfrom lxrt.modeling import BertLayerNorm, GeLU\n\n# Max length including <bos> and <eos>\nMAX_VQA_LENGTH = 36\n\n\nclass VQAModel_SSRP(nn.Module):\n def __init__(self, num_answers):\n super().__init__()\n\n # Build ssrp encoder\n self.ssrp_encoder = SSRP(\n args,\n max_seq_length=MAX_VQA_LENGTH\n )\n feats_dim = self.ssrp_encoder.feats_dim\n probe_dim = self.ssrp_encoder.probe_dim\n hid_dim = 768\n\n self.probe_feats_trans = nn.Sequential(\n nn.Linear(in_features=probe_dim * 2, out_features=probe_dim),\n GeLU(),\n BertLayerNorm(probe_dim, eps=1e-12),\n nn.Linear(probe_dim, hid_dim)\n )\n\n self.g_align = nn.Sequential(\n nn.Linear(in_features=feats_dim * 2, out_features=feats_dim),\n GeLU(),\n BertLayerNorm(feats_dim, eps=1e-12),\n nn.Linear(feats_dim, hid_dim)\n )\n\n self.fq = nn.Sequential(\n nn.Linear(in_features=hid_dim * 2, out_features=hid_dim * 2),\n GeLU(),\n BertLayerNorm(hid_dim * 2, eps=1e-12),\n nn.Linear(hid_dim * 2, hid_dim * 2)\n )\n\n # VQA Answer heads\n self.logit_fc = nn.Sequential(\n nn.Linear(hid_dim * 2, hid_dim * 2),\n GeLU(),\n BertLayerNorm(hid_dim * 2, eps=1e-12),\n nn.Linear(hid_dim * 2, num_answers)\n )\n self.probe_feats_trans.apply(self.ssrp_encoder.encoder.init_bert_weights)\n self.g_align.apply(self.ssrp_encoder.encoder.init_bert_weights)\n self.fq.apply(self.ssrp_encoder.encoder.init_bert_weights)\n self.logit_fc.apply(self.ssrp_encoder.encoder.init_bert_weights)\n\n def forward(self, feat, pos, sent):\n \"\"\"\n b -- batch_size, o -- object_number, f -- visual_feature_size\n\n :param feat: (b, o, f)\n :param pos: (b, o, 4)\n :param sent: (b,) Type -- list of string\n :param leng: (b,) Type -- int numpy array\n :return: (b, num_answer) The logit of each answers.\n \"\"\"\n # x = self.lxrt_encoder(sent, (feat, pos))\n # logit = self.logit_fc(x)\n\n (lang_output, visn_output), pooled_output, (\n vis_probe, lang_probe, vis_probe_vec, lang_probe_vec) = self.ssrp_encoder(sent, feat,\n visual_attention_mask=None, pos=pos)\n\n B, _, _ = vis_probe.size()\n\n v_ = visn_output.mean(dim=1)\n f_align = self.g_align(torch.cat([v_, pooled_output], dim=-1))\n\n f_probe = self.probe_feats_trans(torch.cat([vis_probe.view(B, -1), lang_probe.view(B, -1)], dim=-1))\n\n q = self.fq(torch.cat((f_align, f_probe), dim=-1))\n logit = self.logit_fc(q)\n return logit\n",
"id": "11847714",
"language": "Python",
"matching_score": 1.8390145301818848,
"max_stars_count": 1,
"path": "src/tasks/vqa_model_ssrp.py"
},
{
"content": "import torch\r\nimport torch.nn as nn\r\nfrom torch import Tensor\r\nimport torch.nn.functional as F\r\n\r\n\r\nclass Loss_S_Probe(nn.Module):\r\n def __init__(self, ) -> None:\r\n super(Loss_S_Probe, self).__init__()\r\n\r\n def forward(self, input: Tensor, input_mask: Tensor, target: Tensor):\r\n '''\r\n :param input: predict probe (B,max_seq_len,max_seq_len)\r\n :param target: parsed tree probe (B,max_seq_len,max_seq_len)\r\n :return: loss scalar\r\n '''\r\n loss = 0.\r\n B, max_len, _ = input.size()\r\n if B == 0:\r\n return loss\r\n\r\n for i in range(B):\r\n raw_sent_len = input_mask[i].sum() - 2\r\n if raw_sent_len<=0: # 有些sample的句子是空的,会导致出现nan错误,因此需要判断是否句子为空\r\n loss+=0\r\n continue\r\n cur_loss = torch.abs(target[i, :raw_sent_len, :raw_sent_len] - input[i, 1:1 + raw_sent_len,\r\n 1:1 + raw_sent_len] ** 2).mean() # 删除CLS 和 SEP token的结果\r\n loss += cur_loss\r\n return loss / B\r\n # assert input.size() == target.size()\r\n # B, max_seq_len, _ = input.size()\r\n # loss = torch.abs(input - target).mean()\r\n # return loss\r\n\r\n\r\nclass Loss_SCL(nn.Module):\r\n '''\r\n this loss function is ported from:\r\n https://github.com/sthalles/SimCLR/blob/1848fc934ad844ae630e6c452300433fe99acfd9/simclr.py#L76\r\n '''\r\n\r\n def __init__(self, temperature=0.07, n_views=2) -> None:\r\n super(Loss_SCL, self).__init__()\r\n self.n_views = n_views # 'Number of views for contrastive learning training. default=2\r\n self.device = 'cuda' if torch.cuda.is_available() else 'cpu'\r\n self.temperature = temperature\r\n self.criterion = nn.CrossEntropyLoss().to(self.device)\r\n\r\n def forward(self, vis_probe_vec: Tensor, sent_probe_vec: Tensor):\r\n B, dim = vis_probe_vec.size()\r\n assert B % self.n_views == 0\r\n self.batch_size = B // self.n_views\r\n\r\n vis_logits, vis_labels = self.info_nce_loss(vis_probe_vec)\r\n vis_loss = self.criterion(vis_logits, vis_labels)\r\n\r\n sent_logits, sent_labels = self.info_nce_loss(sent_probe_vec)\r\n sent_loss = self.criterion(sent_logits, sent_labels)\r\n return vis_loss + sent_loss\r\n\r\n def info_nce_loss(self, features):\r\n labels = torch.cat([torch.arange(self.batch_size) for _ in range(self.n_views)],\r\n dim=0) # [0,1,2,3, for n_views times] if batch size =4\r\n labels = (labels.unsqueeze(0) == labels.unsqueeze(1)).float()\r\n labels = labels.cuda()\r\n\r\n features = F.normalize(features, dim=1)\r\n\r\n similarity_matrix = torch.matmul(features, features.T)\r\n # assert similarity_matrix.shape == (\r\n # self.args.n_views * self.args.batch_size, self.args.n_views * self.args.batch_size)\r\n # assert similarity_matrix.shape == labels.shape\r\n\r\n # discard the main diagonal from both: labels and similarities matrix\r\n mask = torch.eye(labels.shape[0], dtype=torch.long).bool().to(self.device)\r\n labels = labels[~mask].view(labels.shape[0], -1)\r\n similarity_matrix = similarity_matrix[~mask].view(similarity_matrix.shape[0], -1)\r\n # assert similarity_matrix.shape == labels.shape\r\n\r\n # select and combine multiple positives\r\n positives = similarity_matrix[labels.bool()].view(labels.shape[0], -1)\r\n\r\n # select only the negatives the negatives\r\n negatives = similarity_matrix[~labels.bool()].view(similarity_matrix.shape[0], -1)\r\n\r\n logits = torch.cat([positives, negatives], dim=1)\r\n labels = torch.zeros(logits.shape[0], dtype=torch.long).to(self.device)\r\n\r\n logits = logits / self.temperature\r\n return logits, labels\r\n\r\n\r\nclass Loss_XCL(nn.Module):\r\n def __init__(self, temperature=0.07, n_views=2) -> None:\r\n super(Loss_XCL, self).__init__()\r\n self.n_views = n_views # 'Number of views for contrastive learning training. default=2\r\n self.device = 'cuda' if torch.cuda.is_available() else 'cpu'\r\n self.temperature = temperature\r\n self.criterion = nn.CrossEntropyLoss().to(self.device)\r\n\r\n def forward(self, vis_probe_vec: Tensor, sent_probe_vec: Tensor):\r\n B, dim = vis_probe_vec.size()\r\n assert B % self.n_views == 0\r\n self.batch_size = B // self.n_views\r\n\r\n vis_logits, vis_labels = self.info_nce_loss(vis_probe_vec, sent_probe_vec)\r\n vis_loss = self.criterion(vis_logits, vis_labels)\r\n\r\n sent_logits, sent_labels = self.info_nce_loss(sent_probe_vec, vis_probe_vec)\r\n sent_loss = self.criterion(sent_logits, sent_labels)\r\n return vis_loss + sent_loss\r\n\r\n def info_nce_loss(self, query_features, ref_features):\r\n labels = torch.cat([torch.arange(self.batch_size) for _ in range(self.n_views)],\r\n dim=0) # [0,1,2,3, for n_views times] if batch size =4\r\n labels = (labels.unsqueeze(0) == labels.unsqueeze(1)).float()\r\n labels = labels.cuda()\r\n\r\n query_features = F.normalize(query_features, dim=1)\r\n ref_features = F.normalize(ref_features, dim=1)\r\n\r\n similarity_matrix = torch.matmul(query_features, ref_features.T)\r\n # assert similarity_matrix.shape == (\r\n # self.args.n_views * self.args.batch_size, self.args.n_views * self.args.batch_size)\r\n # assert similarity_matrix.shape == labels.shape\r\n\r\n # discard the main diagonal from both: labels and similarities matrix\r\n mask = torch.eye(labels.shape[0], dtype=torch.long).bool().to(self.device)\r\n labels = labels[~mask].view(labels.shape[0], -1)\r\n similarity_matrix = similarity_matrix[~mask].view(similarity_matrix.shape[0], -1)\r\n # assert similarity_matrix.shape == labels.shape\r\n\r\n # select and combine multiple positives\r\n positives = similarity_matrix[labels.bool()].view(labels.shape[0], -1)\r\n\r\n # select only the negatives the negatives\r\n negatives = similarity_matrix[~labels.bool()].view(similarity_matrix.shape[0], -1)\r\n\r\n logits = torch.cat([positives, negatives], dim=1)\r\n labels = torch.zeros(logits.shape[0], dtype=torch.long).to(self.device)\r\n\r\n logits = logits / self.temperature\r\n return logits, labels\r\n\r\n\r\n\r\n\r\n",
"id": "9803698",
"language": "Python",
"matching_score": 2.1482491493225098,
"max_stars_count": 1,
"path": "src/lxrt/loss_functions.py"
},
{
"content": "import torch\nimport torch.nn as nn\nimport numpy as np\n\n\ndef pdist(x1, x2):\n \"\"\"\n compute euclidean distance between two tensors\n x1: Tensor of shape (h1, w)\n x2: Tensor of shape (h2, w)\n Return pairwise euclidean distance for each row vector in x1, x2 as\n a Tensor of shape (h1, h2)\n \"\"\"\n x1_square = torch.sum(x1 * x1, 1).view(-1, 1)\n x2_square = torch.sum(x2 * x2, 1).view(1, -1)\n return torch.sqrt(x1_square - 2 * torch.mm(x1, x2.transpose(0, 1)) + x2_square + 1e-4)\n\n\ndef pdist_cos(x1, x2):\n \"\"\"\n compute cosine similarity between two tensors\n x1: Tensor of shape (h1, w)\n x2: Tensor of shape (h2, w)\n Return pairwise cosine distance for each row vector in x1, x2 as\n a Tensor of shape (h1, h2)\n \"\"\"\n x1_norm = x1 / x1.norm(dim=1)[:, None]\n x2_norm = x2 / x2.norm(dim=1)[:, None]\n res = torch.mm(x1_norm, x2_norm.transpose(0, 1))\n mask = torch.isnan(res)\n res[mask] = 0\n return res\n\n\ndef dist_l2(x1, x2):\n \"\"\"\n compute l2 distance between two tensors\n x1: Tensor of shape (h1, w)\n x2: Tensor of shape (h2, w)\n Return pairwise l2 distance for each row vector in x1, x2 as\n a Tensor of shape (h1, h2)\n \"\"\"\n x1=x1.unsqueeze(0)\n x2=x2.unsqueeze(1)\n diff_square=(x1-x2)**2 # h1,h2,w\n diff_sum=diff_square.sum(dim=-1)\n l2_dist=diff_sum**0.5\n return l2_dist\n\n\n\ndef cosine_similarity(x1, x2, dim=1, eps=1e-8):\n \"\"\"Returns cosine similarity between x1 and x2, computed along dim.\"\"\"\n w12 = torch.sum(x1 * x2, dim)\n w1 = torch.norm(x1, 2, dim)\n w2 = torch.norm(x2, 2, dim)\n return (w12 / (w1 * w2).clamp(min=eps)).squeeze()\n\n\n\n\n\nclass ContrastiveLoss(nn.Module):\n \"\"\"\n Compute contrastive loss\n \"\"\"\n\n def __init__(self, opt, margin=0, max_violation=False):\n super(ContrastiveLoss, self).__init__()\n self.opt = opt\n self.margin = margin\n self.max_violation = max_violation\n\n\n def forward(self, im, s, s_l, sample_index):\n scores = pdist_cos(im, s) # 计算两个矩阵之间的cosine similarity\n\n diagonal = scores.diag().view(im.size(0), 1)\n d1 = diagonal.expand_as(scores)\n d2 = diagonal.t().expand_as(scores)\n\n # compare every diagonal score to scores in its column\n # caption retrieval\n cost_s = (self.margin + scores - d1).clamp(min=0)\n # compare every diagonal score to scores in its row\n # image retrieval\n cost_im = (self.margin + scores - d2).clamp(min=0)\n\n # clear diagonals\n I = torch.eye(scores.size(0)) > .5\n if torch.cuda.is_available():\n I = I.cuda()\n cost_s = cost_s.masked_fill_(I, 0)\n cost_im = cost_im.masked_fill_(I, 0)\n\n # keep the maximum violating negative for each query\n if self.max_violation:\n cost_s = cost_s.max(1)[0]\n cost_im = cost_im.max(0)[0]\n return cost_s.sum() + cost_im.sum()\n\n\nclass L2Loss(nn.Module):\n \"\"\"\n Compute L2 loss\n \"\"\"\n def __init__(self, opt, margin=0, max_violation=False):\n super(L2Loss, self).__init__()\n self.opt = opt\n self.margin = margin\n self.max_violation = max_violation\n\n def forward(self, im, s, s_l, ids):\n scores = dist_l2(im, s) # 计算两个矩阵之间的l2 distance, 越小越好\n\n diagonal = scores.diag().view(im.size(0), 1)\n d1 = diagonal.expand_as(scores)\n d2 = diagonal.t().expand_as(scores)\n\n # compare every diagonal score to scores in its column\n # caption retrieval\n cost_s = (self.margin + d1 - scores).clamp(min=0)\n # compare every diagonal score to scores in its row\n # image retrieval\n cost_im = (self.margin + d2 - scores ).clamp(min=0)\n\n # clear diagonals\n I = torch.eye(scores.size(0)) > .5\n if torch.cuda.is_available():\n I = I.cuda()\n cost_s = cost_s.masked_fill_(I, 0)\n cost_im = cost_im.masked_fill_(I, 0)\n\n # keep the maximum violating negative for each query\n if self.max_violation:\n cost_s = cost_s.max(1)[0]\n cost_im = cost_im.max(0)[0]\n return cost_s.sum() + cost_im.sum()\n\n\nclass NPairLoss(nn.Module):\n \"\"\"\n N-Pair loss\n <NAME>. \"Improved Deep Metric Learning with Multi-class N-pair Loss Objective,\" Advances in Neural Information\n Processing Systems. 2016.\n http://papers.nips.cc/paper/6199-improved-deep-metric-learning-with-multi-class-n-pair-loss-objective\n \"\"\"\n\n def __init__(self, l2_reg=0.02, max_violation=True):\n super(NPairLoss, self).__init__()\n self.l2_reg = l2_reg\n self.max_violation = max_violation\n\n def forward(self, im, s, s_l, ids):\n target = ids / 5\n n_negatives = self.get_n_pairs(target)\n\n loss_im = self.n_pair_loss(im, s, s[n_negatives])\n loss_s = self.n_pair_loss(s, im, im[n_negatives])\n\n losses = loss_im + loss_s\n\n return losses\n\n @staticmethod\n def get_n_pairs(labels):\n \"\"\"\n Get index of n-pairs and n-negatives\n :param labels: label vector of mini-batch\n :return: A tensor n_negatives (n, n-1)\n \"\"\"\n n_pairs = np.arange(len(labels))\n n_negatives = []\n for i in range(len(labels)):\n negative = np.concatenate([n_pairs[:i], n_pairs[i + 1:]])\n n_negatives.append(negative)\n\n n_negatives = np.array(n_negatives)\n\n return torch.LongTensor(n_negatives)\n\n def n_pair_loss(self, anchors, positives, negatives):\n \"\"\"\n Calculates N-Pair loss\n :param anchors: A torch.Tensor, (n, embedding_size)\n :param positives: A torch.Tensor, (n, embedding_size)\n :param negatives: A torch.Tensor, (n, n-1, embedding_size)\n :return: A scalar\n \"\"\"\n anchors = torch.unsqueeze(anchors, dim=1) # (n, 1, embedding_size)\n positives = torch.unsqueeze(positives, dim=1) # (n, 1, embedding_size)\n\n x = torch.matmul(anchors, (negatives - positives).transpose(1, 2)) # (n, 1, n-1)\n\n if not self.max_violation:\n x = torch.sum(torch.exp(x), 2) # (n, 1)\n loss = torch.mean(torch.log(1 + x))\n else:\n cost = x.max(2)[0]\n loss = torch.log(1 + cost).sum()\n return loss\n\n @staticmethod\n def l2_loss(anchors, positives):\n \"\"\"\n Calculates L2 norm regularization loss\n :param anchors: A torch.Tensor, (n, embedding_size)\n :param positives: A torch.Tensor, (n, embedding_size)\n :return: A scalar\n \"\"\"\n return torch.sum(anchors ** 2 + positives ** 2) / anchors.shape[0]\n\n\nclass AngularLoss(NPairLoss):\n \"\"\"\n Angular loss\n <NAME>. \"Deep Metric Learning with Angular Loss,\" CVPR, 2017\n https://arxiv.org/pdf/1708.01682.pdf\n \"\"\"\n\n def __init__(self, l2_reg=0.02, angle_bound=1., lambda_ang=2, max_violation=True):\n super(AngularLoss, self).__init__()\n self.l2_reg = l2_reg\n self.angle_bound = angle_bound\n self.lambda_ang = lambda_ang\n self.max_violation = max_violation\n\n def forward(self, im, s, s_l, indexs):\n target = indexs / 5\n n_negatives = self.get_n_pairs(target)\n\n loss_im = self.angular_loss(im, s, s[n_negatives])\n loss_s = self.angular_loss(s, im, im[n_negatives])\n\n losses = loss_im + loss_s\n\n return losses\n\n def angular_loss(self, anchors, positives, negatives, angle_bound=1.):\n \"\"\"\n Calculates angular loss\n :param anchors: A torch.Tensor, (n, embedding_size)\n :param positives: A torch.Tensor, (n, embedding_size)\n :param negatives: A torch.Tensor, (n, n-1, embedding_size)\n :param angle_bound: tan^2 angle\n :return: A scalar\n \"\"\"\n anchors = torch.unsqueeze(anchors, dim=1) # (n, 1, embedding_size)\n positives = torch.unsqueeze(positives, dim=1) # (n, 1, embedding_size)\n\n x = 4. * angle_bound * torch.matmul((anchors + positives), negatives.transpose(1, 2)) \\\n - 2. * (1. + angle_bound) * torch.matmul(anchors, positives.transpose(1, 2)) # (n, 1, n-1)\n\n if not self.max_violation:\n # Preventing overflow\n with torch.no_grad():\n t = torch.max(x, dim=2)[0]\n\n x = torch.exp(x - t.unsqueeze(dim=1))\n x = torch.log(torch.exp(-t) + torch.sum(x, 2))\n loss = torch.mean(t + x)\n else:\n cost = x.max(2)[0]\n loss = torch.log(1 + torch.exp(cost)).sum()\n\n return loss\n\n\nclass RePhraseLoss(nn.Module):\n \"\"\"\n Compute contrastive loss\n \"\"\"\n def __init__(self):\n super(RePhraseLoss, self).__init__()\n def forward(self, raw_caps,re_phrase_caps):\n B,R,D=re_phrase_caps.shape\n cos_m=torch.cosine_similarity(raw_caps.unsqueeze(1),re_phrase_caps,dim=-1) # size is B,R 越大越好\n re_phrase_loss= 1. - cos_m\n re_phrase_loss=re_phrase_loss.sum()\n return re_phrase_loss\n\n\n\n\nclass AdversaryLoss(nn.Module):\n \"\"\"\n Compute contrastive loss\n \"\"\"\n def __init__(self, margin=-0.8):\n super(AdversaryLoss, self).__init__()\n self.margin = margin\n\n def forward(self, raw_caps,adv_caps,imgs):\n B,A,D=adv_caps.shape\n cos_m=torch.cosine_similarity(raw_caps.unsqueeze(1),adv_caps,dim=-1) # size is B,A #越小越好\n adversary_loss=cos_m+self.margin\n adversary_loss=adversary_loss.clamp(min=0)\n adversary_loss=adversary_loss.sum()\n return adversary_loss\n\n\n\nclass AdversaryLossWithImg(nn.Module):\n \"\"\"\n Compute contrastive loss\n \"\"\"\n def __init__(self, margin=-0.8):\n super(AdversaryLossWithImg, self).__init__()\n self.margin = margin\n\n def forward(self, raw_caps,adv_caps,imgs):\n B,A,D=adv_caps.shape\n pair_cos=torch.cosine_similarity(imgs,raw_caps,dim=-1).unsqueeze(1) # B,1\n adv_cos=torch.cosine_similarity(imgs.unsqueeze(1),adv_caps,dim=-1) # size is B,A #越小越好\n\n diff=adv_cos-pair_cos+self.margin\n adv_loss=diff.clamp(0)\n # adv_loss=adv_loss.max(dim=-1)[0]\n adv_loss=adv_loss.sum()\n return adv_loss\n\n",
"id": "8495821",
"language": "Python",
"matching_score": 1.8858777284622192,
"max_stars_count": 2,
"path": "loss.py"
},
{
"content": "import torch\nimport torch.nn as nn\nimport torch.nn.init\nimport torchvision.models as models\nfrom torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence\nfrom torch.nn.utils.weight_norm import weight_norm\nimport torch.backends.cudnn as cudnn\nfrom torch.nn.utils.clip_grad import clip_grad_norm\nimport numpy as np\nfrom collections import OrderedDict\nimport torch.nn.functional as F\nimport math\nimport text_net\nimport loss\nimport image_net\n\nfrom IPython import embed\n\n\ndef l1norm(X, dim, eps=1e-8):\n \"\"\"L1-normalize columns of X\n \"\"\"\n norm = torch.abs(X).sum(dim=dim, keepdim=True) + eps\n X = torch.div(X, norm)\n return X\n\n\ndef l2norm(X, dim, eps=1e-8):\n \"\"\"L2-normalize columns of X\n \"\"\"\n norm = torch.pow(X, 2).sum(dim=dim, keepdim=True).sqrt() + eps\n X = torch.div(X, norm)\n return X\n\n\nclass SAEM(object):\n \"\"\"\n \"\"\"\n\n def __init__(self, opt):\n self.logger=None\n\n # Build Models\n self.grad_clip = opt.grad_clip\n self.txt_enc = text_net.BertMapping(opt)\n self.img_enc = image_net.TransformerMapping(opt)\n # self.img_enc = image_net.RnnMapping(opt.img_dim, opt.final_dims, 1)\n # self.img_enc = image_net.CnnMapping(opt.img_dim, opt.final_dims)\n\n if torch.cuda.is_available():\n self.txt_enc.cuda()\n self.img_enc.cuda()\n cudnn.benchmark = True\n\n # Loss and Optimizer\n self.criterion = loss.ContrastiveLoss(opt=opt, margin=opt.margin, max_violation=opt.max_violation)\n self.criterion2 = loss.AngularLoss()\n self.re_phrase_criterion = loss.RePhraseLoss()\n self.adv_criterion = loss.AdversaryLoss(margin=opt.adv_margin)\n\n # self.criterion = loss.L2Loss(opt=opt, margin=opt.margin, max_violation=opt.max_violation)\n # self.criterion2 = loss.AngularLoss()\n\n params = list(self.txt_enc.parameters())\n params += list(self.img_enc.parameters())\n params = filter(lambda p: p.requires_grad, params)\n self.params = params\n\n self.optimizer = torch.optim.Adam(params, lr=opt.learning_rate)\n\n self.Eiters = 0\n self.opt = opt\n\n def state_dict(self):\n if isinstance(self.img_enc, nn.DataParallel):\n state_dict = [self.img_enc.modules.state_dict(), self.txt_enc.modules.state_dict()]\n else:\n state_dict = [self.img_enc.state_dict(), self.txt_enc.state_dict()]\n return state_dict\n\n def load_state_dict(self, state_dict):\n self.img_enc.load_state_dict(state_dict[0])\n self.txt_enc.load_state_dict(state_dict[1])\n\n def use_data_parallel(self):\n self.txt_enc = nn.DataParallel(self.txt_enc)\n self.img_enc = nn.DataParallel(self.img_enc)\n\n def train_start(self):\n \"\"\"switch to train mode\n \"\"\"\n self.img_enc.train()\n self.txt_enc.train()\n\n def val_start(self):\n \"\"\"switch to evaluate mode\n \"\"\"\n self.img_enc.eval()\n self.txt_enc.eval()\n\n def bert_data(self, images, input_ids, attention_mask, token_type_ids, lengths, ids):\n return images, input_ids, attention_mask, token_type_ids, lengths, ids\n\n def enhanced_bert_data(self, image, \\\n raw_input_ids, raw_input_mask, raw_input_type_ids, \\\n re_phrase_ids, re_phrase_mask, re_phrase_type_ids, \\\n adv_ids, adv_mask, adv_type_ids, index, img_id):\n return index, img_id, image, \\\n raw_input_ids, raw_input_mask, raw_input_type_ids, \\\n re_phrase_ids, re_phrase_mask, re_phrase_type_ids, \\\n adv_ids, adv_mask, adv_type_ids\n\n def forward_emb(self, epoch, batch_data, volatile=False):\n \"\"\"Compute the image and caption embeddings\n \"\"\"\n # images, input_ids, attention_mask, token_type_ids, lengths, ids = self.bert_data(*batch_data)\n index, img_id, images, \\\n raw_input_ids, raw_input_mask, raw_input_type_ids, \\\n re_phrase_ids, re_phrase_mask, re_phrase_type_ids, \\\n adv_ids, adv_mask, adv_type_ids = self.enhanced_bert_data(*batch_data)\n\n B, R, L = re_phrase_ids.shape\n _, A, _ = adv_ids.shape\n\n # Set mini-batch dataset\n if torch.cuda.is_available():\n images = images.cuda()\n\n raw_input_ids = raw_input_ids.cuda()\n raw_input_mask = raw_input_mask.cuda()\n raw_input_type_ids = raw_input_type_ids.cuda()\n\n re_phrase_ids = re_phrase_ids.view(B * R, L).cuda()\n re_phrase_mask = re_phrase_mask.view(B * R, L).cuda()\n re_phrase_type_ids = re_phrase_type_ids.view(B * R, L).cuda()\n\n adv_ids = adv_ids.view(B * A, L).cuda()\n adv_mask = adv_mask.view(B * A, L).cuda()\n adv_type_ids = adv_type_ids.view(B * A, L).cuda()\n\n # input_ids = input_ids.cuda()\n # attention_mask = attention_mask.cuda()\n # token_type_ids = token_type_ids.cuda()\n # forward text\n # print('model input',input_ids.shape)\n raw_cap_code = self.txt_enc(raw_input_ids, raw_input_mask, raw_input_type_ids, None)\n if epoch > 5:\n re_phrase_code = self.txt_enc(re_phrase_ids, re_phrase_mask, re_phrase_type_ids, None).view(B, R, -1)\n adv_code = self.txt_enc(adv_ids, adv_mask, adv_type_ids, None).view(B, A, -1)\n else:\n re_phrase_code=None\n adv_code=None\n\n cap_lens = None\n\n # forward image\n img_code = self.img_enc(images)\n\n return img_id, img_code, raw_cap_code, re_phrase_code, adv_code, cap_lens\n\n def forward_loss(self, epoch, img_emb, cap_emb, re_phrase_emb, adv_emb, cap_len, ids, **kwargs):\n \"\"\"Compute the loss given pairs of image and caption embeddings\n \"\"\"\n # alpha = 0\n loss1 = self.criterion(img_emb, cap_emb, cap_len, ids)\n\n\n if epoch>5:\n re_phrase_loss = self.re_phrase_criterion(cap_emb, re_phrase_emb)\n adversary_loss = self.adv_criterion(cap_emb, adv_emb)\n else:\n re_phrase_loss=torch.tensor(0.0)\n adversary_loss=torch.tensor(0.0)\n\n # alpha = 1\n if epoch > 20 or self.criterion2 is None:\n alpha = 0\n loss2 = torch.tensor(0.0)\n else:\n alpha = 0.5 * (0.1 ** (epoch // 5))\n # loss2 = self.criterion2(img_emb , cap_emb , cap_len, ids)\n loss2 = self.criterion2(img_emb / img_emb.norm(dim=1)[:, None], cap_emb / cap_emb.norm(dim=1)[:, None],\n cap_len, ids)\n\n self.logger.update('Loss1', loss1.item(), img_emb.size(0))\n self.logger.update('Loss2', loss2.item(), img_emb.size(0))\n self.logger.update('rep_Loss', re_phrase_loss.item(), img_emb.size(0))\n self.logger.update('adv_Loss', adversary_loss.item(), img_emb.size(0))\n\n l2_reg = torch.tensor(0., dtype=torch.float)\n if torch.cuda.is_available():\n l2_reg = l2_reg.cuda()\n no_decay = ['bias', 'gamma', 'beta']\n for n, p in self.img_enc.named_parameters():\n en = n.split('.')[-1]\n if en not in no_decay:\n l2_reg += torch.norm(p)\n # for n, p in self.txt_enc.mapping.named_parameters():\n # en = n.split('.')[-1]\n # if en not in no_decay:\n # l2_reg += torch.norm(p)\n # for n, p in self.txt_enc.layer.named_parameters():\n # en = n.split('.')[-1]\n # if en not in no_decay:\n # l2_reg += torch.norm(p)\n reg_loss = 0.01 * l2_reg\n\n beta = 0.5\n gamma = 0.5\n\n return loss1 + alpha * loss2 + beta * re_phrase_loss + gamma * adversary_loss + reg_loss\n # return loss2 + reg_loss\n\n def train_emb(self, epoch, batch_data, ids=None, *args):\n \"\"\"One training step given images and captions.\n \"\"\"\n self.Eiters += 1\n self.logger.update('Eit', self.Eiters)\n self.logger.update('lr', self.optimizer.param_groups[0]['lr'])\n\n # compute the embeddings\n img_ids, img_code, raw_cap_code, re_phrase_code, adv_code, cap_lens = self.forward_emb(epoch, batch_data)\n\n # measure accuracy and record loss\n self.optimizer.zero_grad()\n loss = self.forward_loss(epoch, img_code, raw_cap_code, re_phrase_code, adv_code, cap_lens, img_ids)\n\n # compute gradient and do SGD step\n loss.backward()\n if self.grad_clip > 0:\n clip_grad_norm(self.params, self.grad_clip)\n self.optimizer.step()\n",
"id": "7640580",
"language": "Python",
"matching_score": 7.416121959686279,
"max_stars_count": 2,
"path": "SAEM_model.py"
},
{
"content": "import torch\nimport torch.nn as nn\nimport torch.nn.init\nimport torchvision.models as models\nfrom torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence\nfrom torch.nn.utils.weight_norm import weight_norm\nimport torch.backends.cudnn as cudnn\nfrom torch.nn.utils.clip_grad import clip_grad_norm\nimport numpy as np\nfrom collections import OrderedDict\nimport torch.nn.functional as F\nimport math\nimport text_net\nimport loss\nimport image_net\n\nfrom IPython import embed\n\n\ndef l1norm(X, dim, eps=1e-8):\n \"\"\"L1-normalize columns of X\n \"\"\"\n norm = torch.abs(X).sum(dim=dim, keepdim=True) + eps\n X = torch.div(X, norm)\n return X\n\n\ndef l2norm(X, dim, eps=1e-8):\n \"\"\"L2-normalize columns of X\n \"\"\"\n norm = torch.pow(X, 2).sum(dim=dim, keepdim=True).sqrt() + eps\n X = torch.div(X, norm)\n return X\n\n\nclass ReRankSAEM(object):\n \"\"\"\n \"\"\"\n def __init__(self, opt):\n self.logger = None\n\n # Build Models\n self.grad_clip = opt.grad_clip\n\n self.txt_enc = text_net.BertBinaryMapping(opt)\n self.img_enc = image_net.TransformerBinaryMapping(opt)\n self.concept_enc = nn.Sequential(nn.Linear(in_features=opt.final_dims, out_features=1024),\n nn.ReLU(),\n nn.Linear(in_features=1024, out_features=opt.concept_num),\n nn.Sigmoid()) if opt.need_concept_label else None\n\n # self.img_enc = image_net.RnnMapping(opt.img_dim, opt.final_dims, 1)\n # self.img_enc = image_net.CnnMapping(opt.img_dim, opt.final_dims)\n\n if torch.cuda.is_available():\n self.txt_enc.cuda()\n self.img_enc.cuda()\n self.concept_enc.cuda() if self.concept_enc is not None else None\n cudnn.benchmark = True\n\n # Loss and Optimizer\n self.criterion = loss.ContrastiveLoss(opt=opt, margin=opt.margin, max_violation=opt.max_violation)\n self.criterion2 = loss.AngularLoss()\n self.re_phrase_criterion = loss.RePhraseLoss()\n self.concept_loss = nn.BCELoss()\n\n # self.adv_criterion = loss.AdversaryLoss(margin=opt.adv_margin)\n self.adv_criterion=loss.AdversaryLossWithImg(margin=opt.adv_margin)\n\n\n # self.criterion = loss.L2Loss(opt=opt, margin=opt.margin, max_violation=opt.max_violation)\n # self.criterion2 = loss.AngularLoss()\n\n params = list(self.txt_enc.parameters())\n params += list(self.img_enc.parameters())\n if self.concept_enc is not None :\n params+=list(self.concept_enc.parameters())\n params = filter(lambda p: p.requires_grad, params)\n self.params = params\n\n self.optimizer = torch.optim.Adam(params, lr=opt.learning_rate)\n\n self.Eiters = 0\n self.opt = opt\n\n def state_dict(self):\n if isinstance(self.img_enc, nn.DataParallel):\n state_dict = [self.img_enc.modules.state_dict(), self.txt_enc.modules.state_dict()]\n if self.concept_enc is not None:\n state_dict.append(self.concept_enc.modules.state_dict())\n else:\n state_dict = [self.img_enc.state_dict(), self.txt_enc.state_dict()]\n if self.concept_enc is not None:\n state_dict.append(self.concept_enc.state_dict())\n return state_dict\n\n def load_state_dict(self, state_dict):\n self.img_enc.load_state_dict(state_dict[0])\n self.txt_enc.load_state_dict(state_dict[1])\n if len(state_dict)>=3 and self.concept_enc is not None:\n self.concept_enc.load_state_dict(state_dict[2])\n\n def use_data_parallel(self):\n self.txt_enc = nn.DataParallel(self.txt_enc)\n self.img_enc = nn.DataParallel(self.img_enc)\n if self.concept_enc is not None:\n self.concept_enc = nn.DataParallel(self.concept_enc)\n\n def train_start(self):\n \"\"\"switch to train mode\n \"\"\"\n self.img_enc.train()\n self.txt_enc.train()\n if self.concept_enc is not None :\n self.concept_enc.train()\n\n def val_start(self):\n \"\"\"switch to evaluate mode\n \"\"\"\n self.img_enc.eval()\n self.txt_enc.eval()\n if self.concept_enc is not None :\n self.concept_enc.eval()\n\n def unpack_batch_data(self, *batch_data):\n\n if torch.cuda.is_available():\n tmp = []\n for x in batch_data:\n if isinstance(x, torch.Tensor):\n tmp.append(x.cuda())\n else:\n tmp.append(x)\n else:\n tmp = batch_data\n\n raw_image, roi_feature, concept_label, \\\n raw_input_ids, raw_input_mask, raw_input_type_ids, \\\n re_phrase_ids, re_phrase_mask, re_phrase_type_ids, \\\n adv_ids, adv_mask, adv_type_ids, \\\n img_id, sample_index = tmp\n\n return raw_image, roi_feature, concept_label, \\\n raw_input_ids, raw_input_mask, raw_input_type_ids, \\\n re_phrase_ids, re_phrase_mask, re_phrase_type_ids, \\\n adv_ids, adv_mask, adv_type_ids, \\\n img_id, sample_index\n\n def forward_emb(self, epoch, batch_data, volatile=False):\n \"\"\"Compute the image and caption embeddings\n \"\"\"\n # images, input_ids, attention_mask, token_type_ids, lengths, ids = self.bert_data(*batch_data)\n raw_image, roi_feature, concept_label, \\\n raw_input_ids, raw_input_mask, raw_input_type_ids, \\\n re_phrase_ids, re_phrase_mask, re_phrase_type_ids, \\\n adv_ids, adv_mask, adv_type_ids, \\\n img_id, sample_index = self.unpack_batch_data(*batch_data)\n\n batch_size=len(img_id)\n raw_cap_code = None\n re_phrase_code = torch.zeros((batch_size,1))\n adv_code = torch.zeros((batch_size,1))\n img_code = None\n cap_lens = None\n\n # forward image\n img_code = self.img_enc(roi_feature)\n\n raw_cap_code = self.txt_enc(raw_input_ids, raw_input_mask, raw_input_type_ids, None)\n if epoch > self.opt.adversary_step:\n if self.opt.need_rephrase_data:\n B, R, L = re_phrase_ids.shape\n re_phrase_ids = re_phrase_ids.view(B * R, L)\n re_phrase_mask = re_phrase_mask.view(B * R, L)\n re_phrase_type_ids = re_phrase_type_ids.view(B * R, L)\n re_phrase_code = self.txt_enc(re_phrase_ids, re_phrase_mask, re_phrase_type_ids, None).view(B, R, -1)\n if self.opt.need_adversary_data:\n B, A, L = adv_ids.shape\n adv_ids = adv_ids.view(B * A, L)\n adv_mask = adv_mask.view(B * A, L)\n adv_type_ids = adv_type_ids.view(B * A, L)\n adv_code = self.txt_enc(adv_ids, adv_mask, adv_type_ids, None).view(B, A, -1)\n\n concept_pred=None\n if self.opt.need_concept_label:\n concept_pred = self.concept_enc(raw_cap_code)\n concept_data = [concept_pred, concept_label]\n\n return sample_index, img_code, raw_cap_code, re_phrase_code, adv_code, concept_data, cap_lens\n\n def forward_loss(self, epoch, img_emb, cap_emb, re_phrase_emb, adv_emb, cap_len, sample_index, **kwargs):\n \"\"\"Compute the loss given pairs of image and caption embeddings\n \"\"\"\n alpha = 0\n beta = 0.5\n gamma = 0.5\n theta = 0.5\n\n loss1 = torch.tensor(0.0)\n loss2 = torch.tensor(0.0)\n re_phrase_loss = torch.tensor(0.0)\n adversary_loss = torch.tensor(0.0)\n concept_loss = torch.tensor(0.0)\n\n loss1 = self.criterion(img_emb, cap_emb, cap_len, sample_index)\n\n if epoch > self.opt.adversary_step and self.opt.need_rephrase_data:\n re_phrase_loss = self.re_phrase_criterion(img_emb, re_phrase_emb)# todo 测试将imgembedding 用于训练rephrase句子\n if epoch> self.opt.adversary_step and self.opt.need_adversary_data:\n adversary_loss = self.adv_criterion(cap_emb, adv_emb,img_emb)\n\n if self.opt.need_concept_label and 'concept_data' in kwargs:\n pred_concept, concept_label = kwargs['concept_data']\n concept_loss = self.concept_loss.forward(pred_concept, concept_label)\n\n # alpha = 1\n if epoch <= 20 and self.criterion2 is not None:\n alpha = 0.5 * (0.1 ** (epoch // 5))\n # loss2 = self.criterion2(img_emb , cap_emb , cap_len, ids)\n loss2 = self.criterion2(img_emb / img_emb.norm(dim=1)[:, None], cap_emb / cap_emb.norm(dim=1)[:, None],\n cap_len, sample_index)\n\n self.logger.update('Loss1', loss1.item(), img_emb.size(0))\n self.logger.update('Loss2', loss2.item(), img_emb.size(0))\n self.logger.update('rep_Loss', re_phrase_loss.item(), img_emb.size(0))\n self.logger.update('adv_Loss', adversary_loss.item(), img_emb.size(0))\n self.logger.update('concept_Loss', concept_loss.item(), img_emb.size(0))\n\n l2_reg = torch.tensor(0., dtype=torch.float)\n if torch.cuda.is_available():\n l2_reg = l2_reg.cuda()\n no_decay = ['bias', 'gamma', 'beta']\n for n, p in self.img_enc.named_parameters():\n en = n.split('.')[-1]\n if en not in no_decay:\n l2_reg += torch.norm(p)\n # for n, p in self.txt_enc.mapping.named_parameters():\n # en = n.split('.')[-1]\n # if en not in no_decay:\n # l2_reg += torch.norm(p)\n # for n, p in self.txt_enc.layer.named_parameters():\n # en = n.split('.')[-1]\n # if en not in no_decay:\n # l2_reg += torch.norm(p)\n reg_loss = 0.01 * l2_reg\n\n total_loss = loss1 + alpha * loss2 + reg_loss + \\\n beta * re_phrase_loss + gamma * adversary_loss + \\\n theta * concept_loss\n\n return total_loss\n # return loss2 + reg_loss\n\n def train_emb(self, epoch, batch_data, ids=None, *args):\n \"\"\"One training step given images and captions.\n \"\"\"\n self.Eiters += 1\n self.logger.update('Eit', self.Eiters)\n self.logger.update('lr', self.optimizer.param_groups[0]['lr'])\n\n # compute the embeddings\n sample_index, img_code, raw_cap_code, re_phrase_code, adv_code, concept_data, cap_lens = self.forward_emb(epoch,\n batch_data)\n\n # measure accuracy and record loss\n self.optimizer.zero_grad()\n loss = self.forward_loss(epoch, img_code, raw_cap_code, re_phrase_code, adv_code, cap_lens, sample_index,\n concept_data=concept_data)\n\n # compute gradient and do SGD step\n loss.backward()\n if self.grad_clip > 0:\n clip_grad_norm(self.params, self.grad_clip)\n self.optimizer.step()\n",
"id": "3736729",
"language": "Python",
"matching_score": 4.819981098175049,
"max_stars_count": 2,
"path": "reranking_model.py"
},
{
"content": "# -*- coding: utf-8 -*-\r\n\r\nimport torch\r\nimport torch.utils.data as data\r\nimport torchvision.transforms as transforms\r\nimport os\r\nfrom PIL import Image\r\nimport numpy as np\r\nimport tokenization\r\nimport json\r\nimport logging\r\nfrom tqdm import tqdm\r\nimport pickle\r\nfrom IPython import embed\r\nimport random\r\n\r\nlogger = logging.getLogger(__name__)\r\n\r\n\r\ndef convert_to_feature(raw, seq_length, tokenizer):\r\n line = tokenization.convert_to_unicode(raw)\r\n tokens_a = tokenizer.tokenize(line)\r\n # Modifies `tokens_a` in place so that the total\r\n # length is less than the specified length.\r\n # Account for [CLS] and [SEP] with \"- 2\"\r\n if len(tokens_a) > seq_length - 2:\r\n tokens_a = tokens_a[0:(seq_length - 2)]\r\n\r\n tokens = [\"[CLS]\"] + tokens_a + [\"[SEP]\"]\r\n # tokens.append(\"[CLS]\")\r\n # input_type_ids.append(0)\r\n # for token in tokens_a:\r\n # tokens.append(token)\r\n # input_type_ids.append(0)\r\n # tokens.append(\"[SEP]\")\r\n # input_type_ids.append(0)\r\n\r\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\r\n # Zero-pad up to the sequence length.\r\n input_ids += [0] * (seq_length - len(input_ids))\r\n # while len(input_ids) < seq_length:\r\n # input_ids.append(0)\r\n # input_mask.append(0)\r\n # input_type_ids.append(0)\r\n\r\n # The mask has 1 for real tokens and 0 for padding tokens. Only real tokens are attended to.\r\n input_mask = [1] * seq_length\r\n if len(tokens) < seq_length:\r\n input_mask[-(seq_length - len(tokens)):] = [0] * (seq_length - len(tokens))\r\n\r\n input_type_ids = [0] * seq_length\r\n\r\n assert len(input_ids) == seq_length\r\n assert len(input_mask) == seq_length\r\n assert len(input_type_ids) == seq_length\r\n\r\n input_ids = torch.tensor(input_ids, dtype=torch.long)\r\n input_mask = torch.tensor(input_mask, dtype=torch.long)\r\n input_type_ids = torch.tensor(input_type_ids, dtype=torch.long)\r\n\r\n return tokens, input_ids, input_mask, input_type_ids\r\n\r\n\r\nclass PrecompDataset(data.Dataset):\r\n \"\"\"\r\n Load precomputed captions and image features\r\n Possible options: f30k_precomp, coco_precomp\r\n \"\"\"\r\n\r\n def __init__(self, data_path, data_split, opt, eager_execution=False, **kwargs):\r\n\r\n self.data_split=data_split\r\n self.opt=opt\r\n self.need_raw_image = opt.need_raw_image\r\n self.need_concept_label = opt.need_concept_label\r\n self.transform = kwargs['transform'] if 'transform' in kwargs else None\r\n self.roi_feature = None\r\n self.concept_label = None\r\n self.part_idx2ori_idx=None\r\n\r\n self.img_ids = []\r\n if 'coco' in data_path:\r\n img_ids_file=os.path.join(data_path, '{}_ids.txt'.format(data_split))\r\n elif 'f30' in data_path:\r\n img_ids_file=os.path.join(data_path,'{}_imgids.txt'.format(data_split))\r\n with open(img_ids_file) as f:\r\n for line in f:\r\n cur_img_id = line.strip()\r\n self.img_ids.append(cur_img_id)\r\n\r\n logger.info('>>>construct dataset for {}, split is {}'.format(data_path,data_split))\r\n\r\n if self.need_concept_label:\r\n self.concept_label = pickle.load(open(opt.concept_file_path, 'rb'))\r\n\r\n # captions_file_path = os.path.join(data_path, '{}_caps+rephrase+adv.json'.format(data_split))\r\n captions_file_path = os.path.join(data_path, '{}_caps+rephrase+30advs.json'.format(data_split))\r\n\r\n data = json.load(open(captions_file_path))\r\n logger.info('cur data split is {}. captions samples number is {}'.format(data_split, len(data)))\r\n\r\n self.tokenizer = tokenization.FullTokenizer(vocab_file=opt.vocab_file, do_lower_case=opt.do_lower_case)\r\n self.eager_execution = eager_execution\r\n self.max_words = opt.max_words\r\n\r\n self.need_adversary_data = opt.need_adversary_data\r\n self.need_rephrase_data = opt.need_rephrase_data\r\n self.adv_num=opt.adversary_num\r\n # Captions\r\n # if self.eager_execution:\r\n # logger.info('data eager execution is activated,preprocessing captions to tensor...')\r\n # self.captions = {}\r\n # for key, caps in tqdm(data.items()):\r\n #\r\n # raw_caption = caps['raw'].strip()\r\n # raw_data = convert_to_feature(raw_caption, self.max_words, self.tokenizer)\r\n #\r\n # re_phrase_captions = [caps['re-pharse'][0][0], caps['re-pharse'][1][0]]\r\n # re_phrase_data = []\r\n # for re_phr in re_phrase_captions:\r\n # re_phr = re_phr.strip()\r\n # re_phr = re_phr.replace('.', ' .')\r\n # re_phrase_data.append(convert_to_feature(re_phr, self.max_words, self.tokenizer))\r\n #\r\n # adversary_captions = caps['adversary']\r\n # adversary_data = []\r\n # for adv_cap in adversary_captions:\r\n # adv_cap = adv_cap.strip()\r\n # adversary_data.append(convert_to_feature(adv_cap, self.max_words, self.tokenizer))\r\n #\r\n # tmp = {'raw': raw_data, 're_phrase': re_phrase_data, 'adversary': adversary_data}\r\n # self.captions[key] = tmp\r\n # else:\r\n # self.captions = data\r\n\r\n self.captions = data\r\n\r\n if self.need_raw_image:\r\n # imgid to image file path mapping\r\n mapping_file_path = os.path.join(data_path, 'id2filename.json')\r\n self.imgid2filepath = json.load(open(mapping_file_path))\r\n self.img_paths = []\r\n for cur_img_id in self.img_ids:\r\n self.img_paths.append(os.path.join(opt.image_root_dir, self.imgid2filepath[str(cur_img_id)]))\r\n\r\n self.need_roi_feature = opt.need_roi_feature\r\n if self.need_roi_feature:\r\n # Image features\r\n self.roi_feature = np.load(os.path.join(data_path, '%s_ims.npy' % data_split))\r\n logger.info('faster rcnn image feature loading finished...')\r\n\r\n self.length = len(self.captions)\r\n # rkiros data has redundancy in images, we divide by 5, 10crop doesn't\r\n if (self.roi_feature is not None and self.roi_feature.shape[0] != self.length) or len(self.img_ids)!=self.length:\r\n self.im_div = 5\r\n else:\r\n self.im_div = 1\r\n\r\n logger.info('cur data split is {}, self.im_div={}'.format(data_split,self.im_div))\r\n\r\n # the development set for coco is large and so validation would be slow\r\n if data_split == 'dev':\r\n self.length = 5000\r\n\r\n if data_split=='train' and opt.part_train_data != '':\r\n self.part_idx2ori_idx={}\r\n part_train_ids=set(json.load(open(opt.part_train_data))['split2ids']['train'])\r\n for i,img_id in enumerate(self.img_ids):\r\n if img_id in part_train_ids:\r\n self.part_idx2ori_idx[len(self.part_idx2ori_idx)]=i\r\n self.length=len(self.part_idx2ori_idx)*5\r\n logger.info('using training img number is {}, self.length={}'.format(len(self.part_idx2ori_idx),self.length))\r\n\r\n\r\n def __getitem__(self, index):\r\n if self.part_idx2ori_idx is not None :\r\n part_sample_idx=index//5\r\n sent_shift=index%5\r\n ori_idx=self.part_idx2ori_idx[part_sample_idx]\r\n index=ori_idx*5+sent_shift\r\n # handle the image redundancy\r\n sample_index = int(index / self.im_div)\r\n img_id = self.img_ids[sample_index]\r\n\r\n raw_image = []\r\n roi_feature = []\r\n concept_label = []\r\n\r\n if self.need_raw_image:\r\n img_path = self.img_paths[sample_index]\r\n raw_image = Image.open(img_path).convert('RGB')\r\n if self.transform is not None:\r\n raw_image = self.transform(raw_image)\r\n\r\n if self.need_roi_feature:\r\n roi_feature = torch.tensor(self.roi_feature[sample_index])\r\n\r\n if self.need_concept_label:\r\n if int(img_id) not in self.concept_label:\r\n concept_label=torch.zeros(self.opt.concept_num)\r\n else:\r\n concept_label = torch.from_numpy(self.concept_label[int(img_id)]).float()\r\n\r\n raw_input_ids, raw_input_mask, raw_input_type_ids, \\\r\n re_phrase_ids, re_phrase_mask, re_phrase_type_ids, \\\r\n adv_ids, adv_mask, adv_type_ids = self.get_cap_tensor(index)\r\n\r\n return raw_image, roi_feature, concept_label, \\\r\n raw_input_ids, raw_input_mask, raw_input_type_ids, \\\r\n re_phrase_ids, re_phrase_mask, re_phrase_type_ids, \\\r\n adv_ids, adv_mask, adv_type_ids, \\\r\n img_id, sample_index\r\n\r\n def get_cap_tensor(self, index):\r\n '''\r\n :param index:数据的index\r\n :return: caption 相关的tensor数据\r\n '''\r\n re_phrase_ids, re_phrase_mask, re_phrase_type_ids = [], [], []\r\n adv_ids, adv_mask, adv_type_ids = [], [], []\r\n\r\n # if self.eager_execution:\r\n # caption_data = self.captions[str(index)]\r\n # raw_tokens, raw_input_ids, raw_input_mask, raw_input_type_ids = caption_data['raw']\r\n #\r\n # if self.need_rephrase_data:\r\n # re_phrase_tokens, re_phrase_ids, re_phrase_mask, re_phrase_type_ids = [], [], [], []\r\n # for r_to, r_ids, r_mask, r_type_ids in caption_data['re_phrase']:\r\n # re_phrase_tokens.append(r_to)\r\n # re_phrase_ids.append(r_ids)\r\n # re_phrase_mask.append(r_mask)\r\n # re_phrase_type_ids.append(r_type_ids)\r\n #\r\n # re_phrase_ids = torch.stack(re_phrase_ids, dim=0)\r\n # re_phrase_mask = torch.stack(re_phrase_mask, dim=0)\r\n # re_phrase_type_ids = torch.stack(re_phrase_type_ids, dim=0)\r\n # if self.need_adversary_data:\r\n # adv_tokens, adv_ids, adv_mask, adv_type_ids = [], [], [], []\r\n # for a_to, a_ids, a_mask, a_type_ids in caption_data['adversary']:\r\n # adv_tokens.append(a_to)\r\n # adv_ids.append(a_ids)\r\n # adv_mask.append(a_mask)\r\n # adv_type_ids.append(a_type_ids)\r\n #\r\n # adv_ids = torch.stack(adv_ids, dim=0)\r\n # adv_mask = torch.stack(adv_mask, dim=0)\r\n # adv_type_ids = torch.stack(adv_type_ids, dim=0)\r\n #\r\n # else:\r\n caption = self.captions[str(index)]\r\n raw_tokens, raw_input_ids, raw_input_mask, raw_input_type_ids = convert_to_feature(caption['raw'],\r\n self.max_words,\r\n self.tokenizer)\r\n\r\n if self.need_rephrase_data:\r\n re_phrase_tokens, re_phrase_ids, re_phrase_mask, re_phrase_type_ids = [], [], [], []\r\n for re_phrase_caption in [caption['re-pharse'][0][0], caption['re-pharse'][1][0]]:\r\n re_phrase_caption = re_phrase_caption.strip()\r\n re_phrase_caption = re_phrase_caption.replace('.', ' .')\r\n r_to, r_ids, r_mask, r_type_ids = convert_to_feature(re_phrase_caption, self.max_words,\r\n self.tokenizer)\r\n re_phrase_tokens.append(r_to)\r\n re_phrase_ids.append(r_ids)\r\n re_phrase_mask.append(r_mask)\r\n re_phrase_type_ids.append(r_type_ids)\r\n\r\n re_phrase_ids = torch.stack(re_phrase_ids, dim=0)\r\n re_phrase_mask = torch.stack(re_phrase_mask, dim=0)\r\n re_phrase_type_ids = torch.stack(re_phrase_type_ids, dim=0)\r\n\r\n\r\n if self.need_adversary_data:\r\n adv_tokens, adv_ids, adv_mask, adv_type_ids = [], [], [], []\r\n all_adv_caps=caption['adversary']\r\n if self.opt.adversary_type=='noun':\r\n used_adv_caps=all_adv_caps['noun']\r\n elif self.opt.adversary_type=='num':\r\n used_adv_caps=all_adv_caps['num']\r\n elif self.opt.adversary_type=='rela':\r\n used_adv_caps=all_adv_caps['rela']\r\n elif self.opt.adversary_type=='mixed':\r\n used_adv_caps=all_adv_caps['noun']+all_adv_caps['num']+all_adv_caps['rela']\r\n random.shuffle(used_adv_caps)\r\n else:\r\n used_adv_caps=[]\r\n\r\n for adversary_caption in used_adv_caps:\r\n adversary_caption = adversary_caption.strip()\r\n a_to, a_ids, a_mask, a_type_ids = convert_to_feature(adversary_caption, self.max_words,\r\n self.tokenizer)\r\n adv_tokens.append(a_to)\r\n adv_ids.append(a_ids)\r\n adv_mask.append(a_mask)\r\n adv_type_ids.append(a_type_ids)\r\n\r\n adv_ids = torch.stack(adv_ids, dim=0)\r\n adv_mask = torch.stack(adv_mask, dim=0)\r\n adv_type_ids = torch.stack(adv_type_ids, dim=0)\r\n if self.adv_num>0:\r\n adv_ids=adv_ids[:self.adv_num]\r\n adv_mask=adv_mask[:self.adv_num]\r\n adv_type_ids=adv_type_ids[:self.adv_num]\r\n\r\n return raw_input_ids, raw_input_mask, raw_input_type_ids, \\\r\n re_phrase_ids, re_phrase_mask, re_phrase_type_ids, \\\r\n adv_ids, adv_mask, adv_type_ids\r\n\r\n def __len__(self):\r\n return self.length\r\n\r\n\r\ndef collate_fn_bert(data):\r\n \"\"\"Build mini-batch tensors from a list of (image, caption) tuples.\r\n Args:\r\n data: list of (image, caption) tuple.\r\n - image: torch tensor of shape (3, 256, 256).\r\n - caption: torch tensor of shape (?); variable length.\r\n\r\n Returns:\r\n images: torch tensor of shape (batch_size, 3, 256, 256).\r\n targets: torch tensor of shape (batch_size, padded_length).\r\n lengths: list; valid length for each padded caption.\r\n \"\"\"\r\n # Sort a data list by caption length\r\n data.sort(key=lambda x: sum(x[2]), reverse=True)\r\n images, input_ids, input_mask, input_type_ids, ids, img_ids = zip(*data)\r\n\r\n # Merge images (convert tuple of 3D tensor to 4D tensor)\r\n images = torch.stack(images, 0)\r\n\r\n # Merget captions (convert tuple of 1D tensor to 2D tensor)\r\n lengths = [torch.sum(cap) for cap in input_mask]\r\n input_ids = torch.stack(input_ids, 0)\r\n input_mask = torch.stack(input_mask, 0)\r\n input_type_ids = torch.stack(input_type_ids, 0)\r\n\r\n ids = np.array(ids)\r\n\r\n return images, input_ids, input_mask, input_type_ids, lengths, ids\r\n\r\n\r\n\r\n\r\ndef get_precomp_loader(data_path, data_split, opt, batch_size=64, shuffle=True, num_workers=2, transform=None):\r\n \"\"\"Returns torch.utils.data.DataLoader for custom coco dataset.\"\"\"\r\n dset = PrecompDataset(data_path, data_split, opt, transform=transform)\r\n data_loader = torch.utils.data.DataLoader(dataset=dset,\r\n batch_size=batch_size,\r\n shuffle=shuffle,\r\n pin_memory=True,\r\n # collate_fn=collate_fn_bert,\r\n num_workers=num_workers)\r\n return data_loader\r\n\r\n\r\ndef get_loaders(data_name, batch_size, workers, opt):\r\n dpath = os.path.join(opt.data_path, data_name)\r\n\r\n if opt.need_raw_image:\r\n image_size = opt.image_size\r\n train_transform = transforms.Compose([\r\n transforms.Resize((image_size, image_size)),\r\n transforms.RandomHorizontalFlip(),\r\n transforms.ColorJitter(contrast=0.4),\r\n transforms.ToTensor(),\r\n transforms.Normalize([0.485, 0.456, 0.406],\r\n [0.229, 0.224, 0.225])\r\n ])\r\n val_transform = transforms.Compose([\r\n transforms.Resize((image_size, image_size)),\r\n transforms.ToTensor(),\r\n transforms.Normalize([0.485, 0.456, 0.406],\r\n [0.229, 0.224, 0.225])\r\n ])\r\n else:\r\n train_transform = val_transform = None\r\n\r\n train_loader = get_precomp_loader(dpath, 'train', opt, batch_size, True, workers, transform=train_transform)\r\n\r\n val_loader = get_precomp_loader(dpath, 'test', opt, batch_size // 4, False, workers, transform=val_transform)\r\n\r\n return train_loader, val_loader\r\n\r\n\r\ndef get_test_loader(split_name, data_name, batch_size, workers, opt):\r\n dpath = os.path.join(opt.data_path, data_name)\r\n image_size = opt.image_size\r\n val_transform = transforms.Compose([\r\n transforms.Resize((image_size, image_size)),\r\n transforms.ToTensor(),\r\n transforms.Normalize([0.485, 0.456, 0.406],\r\n [0.229, 0.224, 0.225])\r\n ])\r\n test_loader = get_precomp_loader(dpath, split_name, opt, batch_size, False, workers, transform=val_transform)\r\n return test_loader\r\n",
"id": "654556",
"language": "Python",
"matching_score": 7.494227886199951,
"max_stars_count": 2,
"path": "enhanced_data.py"
},
{
"content": "import torch\nimport torch.utils.data as data\nimport torchvision.transforms as transforms\nimport os\nfrom PIL import Image\nimport numpy as np\nimport json\nimport tokenization\n\n\ndef convert_to_feature(raw, seq_length, tokenizer):\n line = tokenization.convert_to_unicode(raw)\n tokens_a = tokenizer.tokenize(line)\n # Modifies `tokens_a` in place so that the total\n # length is less than the specified length.\n # Account for [CLS] and [SEP] with \"- 2\"\n if len(tokens_a) > seq_length - 2:\n tokens_a = tokens_a[0:(seq_length - 2)]\n\n tokens = [\"[CLS]\"] + tokens_a + [\"[SEP]\"]\n # tokens.append(\"[CLS]\")\n # input_type_ids.append(0)\n # for token in tokens_a:\n # tokens.append(token)\n # input_type_ids.append(0)\n # tokens.append(\"[SEP]\")\n # input_type_ids.append(0)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n # Zero-pad up to the sequence length.\n input_ids += [0] * (seq_length - len(input_ids))\n # while len(input_ids) < seq_length:\n # input_ids.append(0)\n # input_mask.append(0)\n # input_type_ids.append(0)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real tokens are attended to.\n input_mask = [1] * seq_length\n if len(tokens) < seq_length:\n input_mask[-(seq_length - len(tokens)):] = [0] * (seq_length - len(tokens))\n\n input_type_ids = [0] * seq_length\n\n assert len(input_ids) == seq_length\n assert len(input_mask) == seq_length\n assert len(input_type_ids) == seq_length\n\n return tokens, input_ids, input_mask, input_type_ids\n\n\nclass PrecompDataset(data.Dataset):\n \"\"\"\n Load precomputed captions and image features\n Possible options: f30k_precomp, coco_precomp\n \"\"\"\n\n def __init__(self, data_path, data_split, opt, **kwargs):\n loc = data_path + '/'\n image_root_dir = opt.image_root_dir\n\n self.need_ori_image = opt.need_ori_image\n self.transform = kwargs['transform'] if 'transform' in kwargs else None\n if self.need_ori_image:\n # imgid to image file path mapping\n mapping_file_path = os.path.join(data_path, 'id2filename.json')\n self.imgid2filepath = json.load(open(mapping_file_path))\n\n # image_ids\n self.img_ids = []\n self.img_paths = []\n with open(os.path.join(loc, '{}_ids.txt'.format(data_split))) as f:\n for line in f:\n cur_img_id = line.strip()\n self.img_ids.append(cur_img_id)\n self.img_paths.append(os.path.join(image_root_dir, self.imgid2filepath[str(cur_img_id)]))\n\n # Captions\n self.captions = []\n with open(loc + '%s_caps.txt' % data_split, 'rb') as f:\n for line in f:\n self.captions.append(line.strip())\n\n # Image features\n self.images = np.load(loc + '%s_ims.npy' % data_split)\n self.length = len(self.captions)\n # rkiros data has redundancy in images, we divide by 5, 10crop doesn't\n if self.images.shape[0] != self.length:\n self.im_div = 5\n else:\n self.im_div = 1\n # the development set for coco is large and so validation would be slow\n if data_split == 'dev':\n self.length = 5000\n\n self.tokenizer = tokenization.FullTokenizer(vocab_file=opt.vocab_file, do_lower_case=opt.do_lower_case)\n self.max_words = opt.max_words\n\n def __getitem__(self, index):\n # handle the image redundancy\n sample_index = int(index / self.im_div)\n\n img_id = self.img_ids[sample_index]\n img_path = self.img_paths[sample_index]\n ori_image = Image.open(img_path).convert('RGB')\n if self.transform is not None:\n ori_image = self.transform(ori_image)\n\n rois = torch.Tensor(self.images[sample_index])\n caption = self.captions[index]\n tokens, input_ids, input_mask, input_type_ids = convert_to_feature(caption, self.max_words, self.tokenizer)\n\n input_ids = torch.tensor(input_ids, dtype=torch.long)\n input_mask = torch.tensor(input_mask, dtype=torch.long)\n input_type_ids = torch.tensor(input_type_ids, dtype=torch.long)\n return ori_image, rois, input_ids, input_mask, input_type_ids, index, sample_index\n\n def __len__(self):\n return self.length\n\n\ndef collate_fn_bert(data):\n \"\"\"Build mini-batch tensors from a list of (image, caption) tuples.\n Args:\n data: list of (image, caption) tuple.\n - image: torch tensor of shape (3, 256, 256).\n - caption: torch tensor of shape (?); variable length.\n\n Returns:\n images: torch tensor of shape (batch_size, 3, 256, 256).\n targets: torch tensor of shape (batch_size, padded_length).\n lengths: list; valid length for each padded caption.\n \"\"\"\n # Sort a data list by caption length\n data.sort(key=lambda x: sum(x[2]), reverse=True)\n ori_images, rois, input_ids, input_mask, input_type_ids, ids, img_ids = zip(*data)\n\n # Merge images (convert tuple of 3D tensor to 4D tensor)\n ori_images = torch.stack(ori_images, 0)\n images = torch.stack(rois, 0)\n\n # Merget captions (convert tuple of 1D tensor to 2D tensor)\n lengths = [torch.sum(cap) for cap in input_mask]\n input_ids = torch.stack(input_ids, 0)\n input_mask = torch.stack(input_mask, 0)\n input_type_ids = torch.stack(input_type_ids, 0)\n\n ids = np.array(ids)\n\n return ori_images, images, input_ids, input_mask, input_type_ids, lengths, ids\n\n\ndef get_precomp_loader(data_path, data_split, opt, batch_size=100, shuffle=True, num_workers=2, transform=None):\n \"\"\"Returns torch.utils.data.DataLoader for custom coco dataset.\"\"\"\n dset = PrecompDataset(data_path, data_split, opt, transform=transform)\n data_loader = torch.utils.data.DataLoader(dataset=dset,\n batch_size=batch_size,\n shuffle=shuffle,\n pin_memory=True,\n collate_fn=collate_fn_bert,\n num_workers=num_workers)\n return data_loader\n\n\ndef get_loaders(data_name, batch_size, workers, opt):\n dpath = os.path.join(opt.data_path, data_name)\n\n if opt.need_ori_image:\n image_size = opt.image_size\n train_transform = transforms.Compose([\n transforms.Resize((image_size, image_size)),\n transforms.RandomHorizontalFlip(),\n transforms.ColorJitter(contrast=0.4),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225])\n ])\n val_transform = transforms.Compose([\n transforms.Resize((image_size, image_size)),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225])\n ])\n else:\n train_transform = val_transform = None\n\n train_loader = get_precomp_loader(dpath, 'train', opt, batch_size, True, workers, transform=train_transform)\n\n val_loader = get_precomp_loader(dpath, 'dev', opt, batch_size // 4, False, workers, transform=val_transform)\n return train_loader, val_loader\n\n\ndef get_test_loader(split_name, data_name, batch_size,\n workers, opt):\n dpath = os.path.join(opt.data_path, data_name)\n test_loader = get_precomp_loader(dpath, split_name, opt, batch_size, False, workers)\n return test_loader\n",
"id": "2213518",
"language": "Python",
"matching_score": 1.5794655084609985,
"max_stars_count": 2,
"path": "data.py"
},
{
"content": "# coding=utf-8\n# Copyleft 2019 project LXRT.\n# import os\n# import sys\n# base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # 找到当前项目的项目的路径\n# print(base_dir)\n# sys.path.append(base_dir) # 将找到的项目的路径导入当前系统路径\n\nimport os\nimport sys\n\nbase_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # 找到当前项目的项目的路径\nprint(base_dir)\nsys.path.append(base_dir) # 将找到的项目的路径导入当前系统路径\n\nimport collections\nimport os\nimport random\n\nfrom tqdm import tqdm\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader\n\nfrom param import args\nfrom pretrain.lxmert_data import InputExample, LXMERTDataset, SSRPTorchDataset, LXMERTEvaluator\n# from lxrt.entry import set_visual_config\nfrom lxrt.tokenization import BertTokenizer\nfrom lxrt.modeling import SSRP_Encoder, SSRP_Probe\nfrom lxrt.modeling import VISUAL_CONFIG\nfrom lxrt.loss_functions import Loss_S_Probe, Loss_XCL, Loss_SCL\nimport wandb\n\nwandb.init(settings=wandb.Settings(start_method='fork'),\n project=\"ssrp_finetune_stage2\",\n notes=args.remark,\n tags=[\"baseline\", \"stage2\"],\n entity='muziyongshixin')\n\n\nDataTuple = collections.namedtuple(\"DataTuple\", 'dataset torchdset loader evaluator')\n\n\ndef my_collate_fn(x):\n assert len(x) > 0\n tuple_size = len(x)\n if len(x[0]) == 1:\n return x\n else: # 说明使用了data augmentation\n raw_data = [t[0] for t in x]\n aug_data = [t[1] for t in x]\n return raw_data + aug_data\n\n\ndef set_visual_config(args):\n VISUAL_CONFIG.l_layers = args.llayers\n VISUAL_CONFIG.x_layers = args.xlayers\n VISUAL_CONFIG.r_layers = args.rlayers\n\n\ndef get_tuple(splits: str, bs: int, shuffle=False, drop_last=False, topk=-1) -> DataTuple:\n # Decide which QA datasets would be used in pre-training.\n # Options: vqa, gqa, visual7w\n # Note: visual7w is a part of vgqa, we take the name here.\n qa_sets = args.qa_sets\n if qa_sets is not None:\n qa_sets = set(qa_set.lower().strip() for qa_set in qa_sets.split(\",\"))\n\n # Build dataset, data loader, and evaluator.\n dset = LXMERTDataset(splits, qa_sets=qa_sets)\n tset = SSRPTorchDataset(dset, topk, img_feats_dir='data/img_feats', use_augmentation=True)\n\n data_loader = DataLoader(\n tset, batch_size=bs,\n shuffle=shuffle, num_workers=args.num_workers,\n collate_fn=my_collate_fn,\n drop_last=drop_last, pin_memory=True\n )\n evaluator = LXMERTEvaluator(dset)\n print('finished {} get_tuple process...'.format(splits))\n return DataTuple(dataset=dset, torchdset=tset, loader=data_loader, evaluator=evaluator)\n\n\nvalid_batch_size = (50 * torch.cuda.device_count()) if args.multiGPU else 64\nvalid_tuple = get_tuple(args.valid, valid_batch_size, shuffle=False, drop_last=False, topk=5000)\n\ntrain_tuple = get_tuple(args.train, args.batch_size, shuffle=True, drop_last=True)\nprint('finished all data preparation process ....')\n\n\nclass SSRPInputFeatures(object):\n \"\"\"A single set of features of data.\"\"\"\n\n def __init__(self,\n input_ids, input_mask, segment_ids, lm_label_ids, sent_probe,\n visual_feats, obj_labels,\n is_matched, ans):\n self.input_ids = input_ids\n self.input_mask = input_mask\n self.segment_ids = segment_ids\n self.lm_label_ids = lm_label_ids\n self.sent_probe = sent_probe\n self.visual_feats = visual_feats\n self.obj_labels = obj_labels\n\n self.is_matched = is_matched\n\n self.ans = ans\n\n\nLOSSES_NAME = ('s_probe', 'scl', 'xcl')\n\n\nclass SSRP:\n def __init__(self, max_seq_length):\n super().__init__()\n self.max_seq_length = max_seq_length\n\n self.tokenizer = BertTokenizer.from_pretrained(\n \"bert-base-uncased\",\n do_lower_case=True\n )\n if self.tokenizer is None:\n from IPython import embed\n embed()\n\n # Build model\n print('begin building models...')\n set_visual_config(args)\n\n self.encoder = SSRP_Encoder.from_pretrained(\n \"bert-base-uncased\",\n mode='lxr'\n )\n self.probe_head = SSRP_Probe()\n print('model construction is finished...')\n\n if args.load_lxmert is None:\n raise ValueError('encoder parameter is required...')\n # Load lxmert would not load the answer head.\n self.load_encoder(args.load_lxmert)\n if args.load_probe_head is not None:\n self.load_probe_head(args.load_probe_head)\n else:\n print('train the probe head from scratch...')\n\n # GPU Options\n self.encoder = self.encoder.cuda()\n self.probe_head = self.probe_head.cuda()\n if args.multiGPU:\n self.encoder = nn.DataParallel(self.encoder)\n self.probe_head = nn.DataParallel(self.probe_head)\n\n # keep the encoder with evaluation mode and freeze the parameter\n self.encoder.eval()\n for param in self.encoder.parameters():\n param.requeires_grad = False\n\n # loss functions\n self.Loss_S_Probe = Loss_S_Probe()\n self.Loss_SCL = Loss_SCL()\n self.Loss_XCL = Loss_XCL()\n\n # Optimizer\n train_ld = train_tuple.loader\n from lxrt.optimization import BertAdam\n batch_per_epoch = len(train_ld)\n t_total = int(batch_per_epoch * args.epochs)\n warmup_ratio = 0.05\n warmup_iters = int(t_total * warmup_ratio)\n print(\"Batch per epoch: %d\" % batch_per_epoch)\n print(\"Total Iters: %d\" % t_total)\n print(\"Warm up Iters: %d\" % warmup_iters)\n self.optim = BertAdam(self.probe_head.parameters(), lr=args.lr, warmup=warmup_ratio, t_total=t_total)\n\n wandb.config.update(args)\n\n def convert_example_to_features(self, example: InputExample, max_seq_length) -> SSRPInputFeatures:\n \"\"\"\n Convert a raw sample (pair of sentences as tokenized strings) into a proper training sample with\n IDs, LM labels, input_mask, CLS and SEP tokens etc.\n :param example: InputExample, containing sentence input as strings and is_next label\n :param max_seq_length: int, maximum length of sequence.\n :param tokenizer: Tokenizer\n :return: InputFeatures, containing all inputs and labels of one sample as IDs (as used for model training)\n \"\"\"\n (sent, sent_probe) = example.sent\n tokens = self.tokenizer.tokenize(sent.strip())\n\n # Account for [CLS] and [SEP] with \"- 2\"\n if len(tokens) > max_seq_length - 2:\n tokens = tokens[:(max_seq_length - 2)]\n\n # concatenate lm labels and account for CLS, SEP, SEP\n tokens = ['[CLS]'] + tokens + ['[SEP]']\n input_ids = self.tokenizer.convert_tokens_to_ids(tokens)\n\n # 获得sent probe matrix, 所有的大小都调整到max_seq_len,max_seq_len\n pad_sent_probe = np.zeros((max_seq_length, max_seq_length)) # 注意这里的矩阵不包含CLS和SEP token\n if len(sent_probe) > max_seq_length - 2:\n sent_probe = sent_probe[:max_seq_length - 2, :max_seq_length - 2] # 最多取前面的max_seq_len-2个单词\n\n pad_sent_probe[:len(sent_probe), :len(sent_probe)] = sent_probe # 有用的元素填充在左上角\n\n # Mask & Segment Word\n lm_label_ids = [-1] * len(input_ids)\n input_mask = [1] * len(input_ids)\n segment_ids = [0] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n lm_label_ids.append(-1)\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n assert len(lm_label_ids) == max_seq_length\n\n feat, boxes = example.visual_feats\n obj_labels, obj_confs = example.obj_labels\n attr_labels, attr_confs = example.attr_labels\n\n # QA answer label\n if example.label is None or len(example.label) == 0 or example.is_matched != 1:\n # 1. No label 2. Label is pruned 3. unmatched visual + language pair\n ans = -1\n else:\n keys, values = zip(*example.label.items())\n if len(keys) == 1:\n ans = keys[0]\n else:\n value_sum = sum(values)\n prob = [value / value_sum for value in values]\n choice = np.random.multinomial(1, prob).argmax()\n ans = keys[choice]\n\n features = SSRPInputFeatures(\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n lm_label_ids=lm_label_ids,\n sent_probe=pad_sent_probe,\n visual_feats=(feat, boxes),\n obj_labels={\n 'obj': (obj_labels, obj_confs),\n 'attr': (attr_labels, attr_confs),\n 'feat': (None, None),\n },\n is_matched=example.is_matched,\n ans=ans,\n )\n return features\n\n def forward(self, examples):\n train_features = [self.convert_example_to_features(example, self.max_seq_length)\n for example in examples]\n\n # language Inputs\n input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long).cuda()\n input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long).cuda()\n segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long).cuda()\n sent_target_probe = torch.tensor([f.sent_probe for f in train_features], dtype=torch.float).cuda()\n\n # Visual Inputs\n feats = torch.from_numpy(np.stack([f.visual_feats[0] for f in train_features])).cuda()\n pos = torch.from_numpy(np.stack([f.visual_feats[1] for f in train_features])).cuda()\n\n \"\"\"\n SSRP_Encoder.forward(self, input_ids, token_type_ids=None, attention_mask=None, visual_feats=None,pos=None,\n visual_attention_mask=None):\n \"\"\"\n (lang_output, visn_output), pooled_output = self.encoder(input_ids, segment_ids, input_mask, visual_feats=feats,\n pos=pos)\n\n vis_probe, lang_probe, vis_probe_vec, lang_probe_vec = self.probe_head(lang_output, visn_output)\n\n losses = {}\n loss_s_probe = self.Loss_S_Probe(lang_probe, input_mask, sent_target_probe)\n loss_scl = self.Loss_SCL(vis_probe_vec, lang_probe_vec)\n loss_xcl = self.Loss_XCL(vis_probe_vec, lang_probe_vec)\n\n loss_cl_all = loss_s_probe + loss_scl + loss_xcl\n losses['s_probe'] = loss_s_probe.detach()\n losses['scl'] = loss_scl.detach()\n losses['xcl'] = loss_xcl.detach()\n return loss_cl_all, losses\n\n def train_batch(self, optim, batch):\n optim.zero_grad()\n loss, losses = self.forward(batch)\n loss.backward()\n nn.utils.clip_grad_norm_(self.probe_head.parameters(), 1.)\n optim.step()\n return loss.item(), losses\n\n def valid_batch(self, batch):\n with torch.no_grad():\n loss, losses = self.forward(batch)\n return loss.item(), losses\n\n def train(self, train_tuple: DataTuple, eval_tuple: DataTuple):\n train_ld = train_tuple.loader\n # Train\n best_eval_loss = 9595.\n for epoch in range(args.epochs):\n # Train\n print('====== begin training {} epoch ====='.format(epoch))\n batch_per_epoch = len(train_ld)\n total_loss = 0.\n total_losses = {n: 0. for n in LOSSES_NAME}\n for batch in tqdm(train_ld, total=len(train_ld)):\n loss, losses = self.train_batch(self.optim, batch)\n total_loss += loss\n for loss_name, val in losses.items():\n total_losses[loss_name] += val\n\n print(\"The training loss for Epoch %d is %0.4f\" % (epoch, total_loss / batch_per_epoch))\n losses_str = \"The losses are \"\n\n\n train_log = {'train_{}'.format(name): total_losses[name] for name in total_losses}\n wandb.log(train_log)\n for name, loss in total_losses.items():\n losses_str += \"%s: %0.4f \" % (name, loss / batch_per_epoch)\n\n print(losses_str)\n\n # Eval\n print('====== begin evaluate {} epoch ====='.format(epoch))\n avg_eval_loss = self.evaluate_epoch(eval_tuple, iters=-1)\n\n # Save\n if avg_eval_loss < best_eval_loss:\n best_eval_loss = avg_eval_loss\n self.save(\"BEST_EVAL_LOSS\")\n if epoch % 2 == 0:\n self.save(\"Epoch%02d\" % (epoch + 1))\n\n def evaluate_epoch(self, eval_tuple: DataTuple, iters: int = -1):\n self.probe_head.eval()\n eval_ld = eval_tuple.loader\n total_loss = 0.\n total_losses = {n: 0. for n in LOSSES_NAME}\n for i, batch in enumerate(eval_ld):\n loss, losses = self.valid_batch(batch)\n total_loss += loss\n for loss_name, val in losses.items():\n total_losses[loss_name] += val\n if i == iters:\n break\n\n eval_log = {'eval_{}'.format(name): total_losses[name] for name in total_losses}\n wandb.log(eval_log)\n print(\"The valid loss is %0.4f\" % (total_loss / len(eval_ld)))\n losses_str = \"The losses are \"\n for name, loss in total_losses.items():\n losses_str += \"%s: %0.4f \" % (name, loss / len(eval_ld))\n print(losses_str)\n\n return total_loss / len(eval_ld)\n\n def save(self, name):\n torch.save(self.probe_head.state_dict(),\n os.path.join(args.output, \"%s_probe_head.pth\" % name))\n\n def load_encoder(self, path):\n print(\"Load LXMERT model from %s\" % path)\n state_dict = torch.load(path)\n\n # Do not load any answer head\n for key in list(state_dict.keys()):\n if 'answer' in key:\n state_dict.pop(key)\n\n # Change Multi GPU to single GPU\n new_state_dict = {}\n for key, value in state_dict.items():\n if key.startswith(\"module.\"):\n new_state_dict[key[len(\"module.\"):]] = value\n state_dict = new_state_dict\n\n load_keys = set(state_dict.keys())\n model_keys = set(self.encoder.state_dict().keys())\n print()\n print(\"Keys in loaded but not in model:\")\n for key in sorted(load_keys.difference(model_keys)):\n print(key)\n print()\n print(\"Keys in model but not in loaded:\")\n for key in sorted(model_keys.difference(load_keys)):\n print(key)\n print()\n\n self.encoder.load_state_dict(state_dict, strict=False)\n\n def load_probe_head(self, path):\n print(\"Load LXMERT model from %s\" % path)\n state_dict = torch.load(\"%s_probe_head.pth\" % path)\n\n # Change Multi GPU to single GPU\n new_state_dict = {}\n for key, value in state_dict.items():\n if key.startswith(\"module.\"):\n new_state_dict[key[len(\"module.\"):]] = value\n state_dict = new_state_dict\n\n load_keys = set(state_dict.keys())\n model_keys = set(self.probe_head.state_dict().keys())\n print()\n print(\"Keys in loaded but not in model:\")\n for key in sorted(load_keys.difference(model_keys)):\n print(key)\n print()\n print(\"Keys in model but not in loaded:\")\n for key in sorted(model_keys.difference(load_keys)):\n print(key)\n print()\n\n self.probe_head.load_state_dict(state_dict, strict=False)\n\n\nif __name__ == \"__main__\":\n ssrp = SSRP(max_seq_length=36)\n\n ssrp.train(train_tuple, valid_tuple)\n",
"id": "3985980",
"language": "Python",
"matching_score": 4.048899173736572,
"max_stars_count": 1,
"path": "src/ssrp_finetune.py"
},
{
"content": "# coding=utf-8\n# Copyleft 2019 project LXRT.\n\nfrom collections import defaultdict\nimport json\nimport random\n\nimport numpy as np\nfrom torch.utils.data import Dataset\n\nfrom param import args\nfrom pretrain.qa_answer_table import AnswerTable\nfrom utils import load_obj_tsv\nfrom IPython import embed\nimport pickle\nimport os\n\nTINY_IMG_NUM = 100\nFAST_IMG_NUM = 5000\n\nimport socket\n\nhost_name = socket.gethostname()\nif host_name == 'xxxxx':\n Split2ImgFeatPath = {\n 'mscoco_train': '/ssd1/liyz/lxmert/data/mscoco_imgfeat/train2014_obj36.tsv',\n 'mscoco_minival': '/ssd1/liyz/lxmert/data/mscoco_imgfeat/val2014_obj36.tsv',\n 'mscoco_nominival': '/ssd1/liyz/lxmert/data/mscoco_imgfeat/val2014_obj36.tsv',\n 'vgnococo': '/ssd1/liyz/lxmert/data/vg_gqa_imgfeat/vg_gqa_obj36.tsv',\n }\n print('run on {}\\n using data path:{}'.format(host_name, str(Split2ImgFeatPath)))\nelse:\n Split2ImgFeatPath = {\n 'mscoco_train': 'data/mscoco_imgfeat/train2014_obj36.tsv',\n 'mscoco_minival': 'data/mscoco_imgfeat/val2014_obj36.tsv',\n 'mscoco_nominival': 'data/mscoco_imgfeat/val2014_obj36.tsv',\n 'vgnococo': 'data/vg_gqa_imgfeat/vg_gqa_obj36.tsv',\n 'mscoco_train_aug': 'data/mscoco_imgfeat/train2014_obj36.tsv',\n 'mscoco_minival_aug': 'data/mscoco_imgfeat/val2014_obj36.tsv',\n 'mscoco_nominival_aug': 'data/mscoco_imgfeat/val2014_obj36.tsv',\n }\n\nSplit2SentProbePath = {\n 'mscoco_train_aug': 'data/probe/mscoco_train_prob_matrix.pickle',\n 'mscoco_minival_aug': 'data/probe/mscoco_minival_prob_matrix.pickle',\n 'mscoco_nominival_aug': 'data/probe/mscoco_nominival_prob_matrix.pickle'\n}\n\n\nclass InputExample(object):\n \"\"\"A single training/test example for the language model.\"\"\"\n\n def __init__(self, uid, sent, visual_feats=None,\n obj_labels=None, attr_labels=None,\n is_matched=None, label=None):\n self.uid = uid\n self.sent = sent\n self.visual_feats = visual_feats\n self.obj_labels = obj_labels\n self.attr_labels = attr_labels\n self.is_matched = is_matched # whether the visual and obj matched\n self.label = label\n\n\nclass LXMERTDataset:\n def __init__(self, splits: str, qa_sets=None):\n \"\"\"\n :param splits: The data sources to be loaded\n :param qa_sets: if None, no action\n o.w., only takes the answers appearing in these dsets\n and remove all unlabeled data (MSCOCO captions)\n \"\"\"\n self.name = splits\n self.sources = splits.split(',')\n\n # Loading datasets to data\n self.data = []\n for source in self.sources:\n self.data.extend(json.load(open(\"data/lxmert/%s.json\" % source)))\n print(\"Load %d data from %s\" % (len(self.data), self.name))\n\n # Create answer table according to the qa_sets\n self.answer_table = AnswerTable(qa_sets)\n print(\"Load an answer table of size %d.\" % (len(self.answer_table.ans2id_map())))\n\n # Modify the answers\n for datum in self.data:\n labelf = datum['labelf']\n for cat, labels in labelf.items():\n for label in labels:\n for ans in list(label.keys()):\n new_ans = self.answer_table.convert_ans(ans)\n if self.answer_table.used(new_ans):\n if ans != new_ans:\n label[new_ans] = label.pop(ans)\n else:\n label.pop(ans)\n\n def __len__(self):\n return len(self.data)\n\n\ndef make_uid(img_id, dset, sent_idx):\n return \"{}_{}_{}\".format(img_id, dset, sent_idx)\n\n\n\"\"\"\nExample in obj tsv:\nFIELDNAMES = [\"img_id\", \"img_h\", \"img_w\", \"objects_id\", \"objects_conf\",\n \"attrs_id\", \"attrs_conf\", \"num_boxes\", \"boxes\", \"features\"]\n\"\"\"\n\n\nclass LXMERTTorchDataset(Dataset):\n def __init__(self, dataset: LXMERTDataset, topk=-1, use_augmentation=False):\n super().__init__()\n self.raw_dataset = dataset\n self.task_matched = args.task_matched\n\n self.use_augmentation=use_augmentation\n if self.use_augmentation:\n used_sent_cat= ('mscoco','mscoco_rephrase')# default to use ('mscoco','mscoco_rephrase'),no use vqa data\n else:\n used_sent_cat=('mscoco')\n if args.tiny:\n topk = TINY_IMG_NUM\n elif args.fast:\n topk = FAST_IMG_NUM\n\n # Load the dataset\n img_data = []\n for source in self.raw_dataset.sources:\n img_data.extend(load_obj_tsv(Split2ImgFeatPath[source], topk))\n\n self.imgid2img = {}\n for img_datum in img_data:\n self.imgid2img[img_datum['img_id']] = img_datum\n\n # Filter out the dataset\n used_data = []\n for datum in self.raw_dataset.data:\n if datum['img_id'] in self.imgid2img:\n used_data.append(datum)\n\n # Flatten the dataset (into one sent + one image entries)\n\n ###====================== one datum sample is below =====================================\n # {'img_id': 'COCO_val2014_000000203564',\n # 'labelf': {'vqa': [{'10:10': 1}, {'no': 1, 'yes': 0.3}, {'clock': 1}]},\n # 'sentf': {'mscoco': ['A bicycle replica with a clock as the front wheel.',\n # 'The bike has a clock as a tire.',\n # 'A black metal bicycle with a clock inside the front wheel.',\n # 'A bicycle figurine in which the front wheel is replaced with a clock\\n',\n # 'A clock with the appearance of the wheel of a bicycle '],\n # 'vqa': ['What is the clock saying the time is?',\n # 'Is it possible to ride the bicycle?',\n # 'What is the front wheel of the bike?']}}\n ##=======================================================================================\n self.data = []\n for datum in used_data:\n sentf = datum['sentf']\n for sents_cat, sents in sentf.items():\n if used_sent_cat is not None and sents_cat not in used_sent_cat:\n continue # only use the specified sentence category , default is ('mscoco','mscoco_rephrase')\n if sents_cat in datum['labelf']:\n labels = datum['labelf'][sents_cat]\n else:\n labels = None\n for sent_idx, sent in enumerate(sents):\n if isinstance(sent,list): # this block code is for rephrase data\n for j,s in enumerate(sent):\n new_datum={\n 'uid': make_uid(datum['img_id'], sents_cat, '{}_{}'.format(sent_idx,j)),\n 'img_id': datum['img_id'],\n 'sent': s\n }\n if labels is not None:\n new_datum['label'] = labels[sent_idx]\n self.data.append(new_datum)\n else:\n new_datum = {\n 'uid': make_uid(datum['img_id'], sents_cat, sent_idx),\n 'img_id': datum['img_id'],\n 'sent': sent\n }\n if labels is not None:\n new_datum['label'] = labels[sent_idx]\n self.data.append(new_datum)\n print(\"Use %d data in torch dataset\" % (len(self.data))) # self.data里包含vqa的文本数据\n\n def __len__(self):\n return len(self.data)\n\n\n def random_feat(self):\n \"\"\"Get a random obj feat from the dataset.\"\"\"\n datum = self.data[random.randint(0, len(self.data) - 1)]\n img_id = datum['img_id']\n img_info = self.imgid2img[img_id]\n feat = img_info['features'][random.randint(0, 35)]\n return feat\n\n def get_img_feat(self,img_id, use_augmentation=False):\n\n # Get image info\n img_info = self.imgid2img[img_id]\n obj_num = img_info['num_boxes']\n feats = img_info['features'].copy()\n boxes = img_info['boxes'].copy()\n obj_labels = img_info['objects_id'].copy()\n obj_confs = img_info['objects_conf'].copy()\n attr_labels = img_info['attrs_id'].copy()\n attr_confs = img_info['attrs_conf'].copy()\n assert obj_num == len(boxes) == len(feats)\n\n # Normalize the boxes (to 0 ~ 1)\n img_h, img_w = img_info['img_h'], img_info['img_w']\n boxes = boxes.copy()\n\n if use_augmentation: # 当前只有图片级别的水平翻转是支持的\n if random.randint(0,100)&1==1:\n boxes[:,(0,2)]= img_w-boxes[:,(2,0)] #注意左右翻转之后,左上角的横坐标变成了右上角横坐标\n else:\n boxes=boxes\n\n\n boxes[:, (0, 2)] /= img_w\n boxes[:, (1, 3)] /= img_h\n np.testing.assert_array_less(boxes, 1 + 1e-5)\n np.testing.assert_array_less(-boxes, 0 + 1e-5)\n\n\n return (feats, boxes), (obj_labels, obj_confs), (attr_labels, attr_confs)\n\n\n def __getitem__(self, item: int):\n datum = self.data[item]\n\n uid = datum['uid']\n img_id = datum['img_id']\n\n\n # get img feats\n (feats, boxes), (obj_labels, obj_confs), (attr_labels, attr_confs)=self.get_img_feat(img_id,use_augmentation=self.use_augmentation)\n\n # If calculating the matched loss, replace the sentence with an sentence\n # corresponding to other image.\n is_matched = 1\n sent = datum['sent']\n if self.task_matched:\n if random.random() < 0.5:\n is_matched = 0\n other_datum = self.data[random.randint(0, len(self.data) - 1)]\n while other_datum['img_id'] == img_id:\n other_datum = self.data[random.randint(0, len(self.data) - 1)]\n sent = other_datum['sent']\n\n # Label, convert answer to id\n if 'label' in datum:\n label = datum['label'].copy()\n for ans in list(label.keys()):\n label[self.raw_dataset.answer_table.ans2id(ans)] = label.pop(ans)\n else:\n label = None\n\n # Create target\n example = InputExample(\n uid, sent, (feats, boxes),\n (obj_labels, obj_confs), (attr_labels, attr_confs),\n is_matched, label\n )\n return example\n\n\nclass LXMERTEvaluator:\n def __init__(self, dataset: LXMERTDataset):\n self.raw_dataset = dataset\n\n # Create QA Eval Data\n self.data = []\n for datum in self.raw_dataset.data:\n sentf = datum['sentf']\n for sents_cat, sents in sentf.items():\n if sents_cat in datum['labelf']: # A labeled dataset\n labels = datum['labelf'][sents_cat]\n for sent_idx, sent in enumerate(sents):\n new_datum = {\n 'uid': make_uid(datum['img_id'], sents_cat, sent_idx),\n 'img_id': datum['img_id'],\n 'sent': sent,\n 'dset': sents_cat,\n 'label': labels[sent_idx]\n }\n self.data.append(new_datum)\n\n # uid2datum\n self.uid2datum = {}\n for datum in self.data:\n self.uid2datum[datum['uid']] = datum\n\n def evaluate(self, uid2ans: dict, pprint=False):\n score = 0.\n cnt = 0\n dset2score = defaultdict(lambda: 0.)\n dset2cnt = defaultdict(lambda: 0)\n for uid, ans in uid2ans.items():\n if uid not in self.uid2datum: # Not a labeled data\n continue\n datum = self.uid2datum[uid]\n label = datum['label']\n dset = datum['dset']\n if ans in label:\n score += label[ans]\n dset2score[dset] += label[ans]\n cnt += 1\n dset2cnt[dset] += 1\n accu = score / cnt\n dset2accu = {}\n for dset in dset2cnt:\n dset2accu[dset] = dset2score[dset] / dset2cnt[dset]\n\n if pprint:\n accu_str = \"Overall Accu %0.4f, \" % (accu)\n sorted_keys = sorted(dset2accu.keys())\n for key in sorted_keys:\n accu_str += \"%s Accu %0.4f, \" % (key, dset2accu[key])\n print(accu_str)\n\n return accu, dset2accu\n\n def dump_result(self, uid2ans: dict, path):\n raise NotImplemented\n\n\nclass SSRPTorchDataset(Dataset):\n def __init__(self, dataset: LXMERTDataset, topk=-1, img_feats_dir='data/img_feats', use_augmentation=True):\n super().__init__()\n self.raw_dataset = dataset\n self.task_matched = args.task_matched\n self.used_sent_category = {'mscoco', 'mscoco_rephrase'}\n self.img_feats_dir = img_feats_dir\n\n if args.tiny:\n topk = TINY_IMG_NUM\n elif args.fast:\n topk = FAST_IMG_NUM\n\n # Load the dataset\n img_data = []\n for source in self.raw_dataset.sources:\n img_data.extend(load_obj_tsv(Split2ImgFeatPath[source], topk))\n\n self.imgid2img = {}\n for img_datum in img_data:\n self.imgid2img[img_datum['img_id']] = img_datum\n\n self.img_augmentation_methods = ['img_hflip', 'roi_hflip', 'roi_r90', 'roi_r180', 'roi_r270',\n 'roi_jit0.8', 'roi_jit1.2']\n self.use_data_augmentation = use_augmentation\n\n # Filter out the dataset\n used_data = []\n for datum in self.raw_dataset.data:\n if datum['img_id'] in self.imgid2img:\n used_data.append(datum)\n\n # Flatten the dataset (into one sent + one image entries)\n\n ###====================== one datum sample is below =====================================\n # {'img_id': 'COCO_val2014_000000561629',\n # 'labelf': {'vqa': [{'carpet': 0.3, 'paper': 0.9, 'scissors': 0.9},\n # {'10': 0.6,\n # '16': 0.3,\n # '20': 0.3,\n # '44': 0.3,\n # '70': 0.3,\n # '72': 0.3,\n # '8': 0.3,\n # 'lot': 0.3},\n # {'red': 0.3}]},\n # 'sentf': {'mscoco': ['A little boy with scissors and construction paper.',\n # 'A toddler holds scissors up in the air around a big '\n # 'mess of cut up paper.',\n # 'A boy is cutting up pieces of construction paper.',\n # 'A boy is sitting on a floor cutting up paper.',\n # 'A small child is playing on the floor and is surrounded '\n # 'by torn up pieces of paper.'],\n # 'vqa': ['What is this kid playing with?',\n # 'How many pieces of paper are there?',\n # 'What color is the paper on the floor?'],\n # 'mscoco_rephrase': [['A little boy with scissors and construction paper.',\n # 'A boy with scissors and building paper.'],\n # ['A toddler holds a pair of scissors in the air around a large '\n # 'jumble of sliced paper.',\n # 'A child holds scissors in the air around a large pile of '\n # 'shredded paper.'],\n # ['A boy cuts up building paper.',\n # 'A boy cuts pieces of construction paper.'],\n # ['A boy sits on the floor and cuts paper.',\n # 'The boy is sitting on the floor, cutting paper.'],\n # ['A small child plays on the floor and is surrounded by torn '\n # 'pieces of paper.',\n # 'A small child plays on the floor and is surrounded by torn '\n # 'pieces of paper.']]}}\n ##=======================================================================================\n self.data = []\n self.rephrase_data = {}\n for datum in used_data:\n sentf = datum['sentf']\n for sents_cat, sents in sentf.items():\n if sents_cat != 'mscoco': # 只使用mscoco的caption数据\n continue\n labels = None\n for sent_idx, sent in enumerate(sents):\n if sent_idx >= 5: # 每个imgid最多使用前5句caption,因为backtranslation得到的结果只有前5句话的结果\n break\n\n sent_id = make_uid(datum['img_id'], sents_cat, sent_idx)\n new_datum = {\n 'uid': sent_id,\n 'img_id': datum['img_id'],\n 'sent': sent\n }\n if labels is not None:\n new_datum['label'] = labels[sent_idx]\n self.data.append(new_datum)\n\n ## sentence data augmentation\n if self.use_data_augmentation:\n rephrased_sents = sentf['mscoco_rephrase'][sent_idx]\n aug_datas = []\n for j in range(min(len(rephrased_sents),2)): # some case have more than 2 rephrase sents\n new_datum = {\n 'uid': make_uid(img_id=datum['img_id'], dset='mscoco_rephrase',\n sent_idx='{}_{}'.format(sent_idx, j)),\n 'img_id': datum['img_id'],\n 'sent': rephrased_sents[j]\n }\n aug_datas.append(new_datum)\n self.rephrase_data[sent_id] = aug_datas\n print(\"Use %d data in torch dataset\" % (len(self.data))) # self.data里包含vqa的文本数据\n\n\n self.probe_matrix={}\n for source in self.raw_dataset.sources:\n cur_source_matrix_data=pickle.load(open(Split2SentProbePath[source],'rb'))\n self.probe_matrix.update(cur_source_matrix_data)\n\n\n\n\n\n\n def load_img_feats(self, feats_path, key='img_raw', **kwargs):\n # img_all_data = pickle.load(open(feats_path, 'rb'))\n # # Get image info\n # img_info = img_all_data[key]\n # obj_num = img_info['num_boxes']\n # feats = img_info['features'].copy()\n # boxes = img_info['boxes'].copy()\n # assert obj_num == len(boxes) == len(feats)\n # if 'only_feats' in kwargs:\n # obj_labels = None\n # obj_confs = None\n # attr_labels = None\n # attr_confs = None\n # else:\n # obj_labels = img_info['objects_id'].copy()\n # obj_confs = img_info['objects_conf'].copy()\n # attr_labels = img_info['attrs_id'].copy()\n # attr_confs = img_info['attrs_conf'].copy()\n #\n # # Normalize the boxes (to 0 ~ 1)\n # img_h, img_w = img_info['img_h'], img_info['img_w']\n # boxes = boxes.copy()\n # boxes[:, (0, 2)] /= img_w\n # boxes[:, (1, 3)] /= img_h\n # np.testing.assert_array_less(boxes, 1 + 1e-5)\n # np.testing.assert_array_less(-boxes, 0 + 1e-5)\n\n # Get image info\n img_id=feats_path.split('/')[-1].split('.')[0]\n img_info = self.imgid2img[img_id]\n obj_num = img_info['num_boxes']\n feats = img_info['features'].copy()\n boxes = img_info['boxes'].copy()\n obj_labels = img_info['objects_id'].copy()\n obj_confs = img_info['objects_conf'].copy()\n attr_labels = img_info['attrs_id'].copy()\n attr_confs = img_info['attrs_conf'].copy()\n assert obj_num == len(boxes) == len(feats)\n\n # Normalize the boxes (to 0 ~ 1)\n img_h, img_w = img_info['img_h'], img_info['img_w']\n boxes = boxes.copy()\n boxes[:, (0, 2)] /= img_w\n boxes[:, (1, 3)] /= img_h\n np.testing.assert_array_less(boxes, 1 + 1e-5)\n np.testing.assert_array_less(-boxes, 0 + 1e-5)\n\n return (feats, boxes), (obj_labels, obj_confs), (attr_labels, attr_confs)\n\n def load_sent_probe(self,img_id, uid): # todo to implement\n\n # sent_probe = np.zeros((36,36))\n sent_probe = self.probe_matrix[img_id][uid]\n return sent_probe\n\n def __getitem__(self, item: int):\n datum = self.data[item]\n\n uid = datum['uid']\n img_id = datum['img_id']\n\n # If calculating the matched loss, replace the sentence with an sentence\n # corresponding to other image.\n is_matched = 1\n sent = datum['sent']\n sent_probe = self.load_sent_probe(img_id,uid)\n\n feats_path = os.path.join(self.img_feats_dir, '{}.pickle'.format(img_id))\n (feats, boxes), (obj_labels, obj_confs), (attr_labels, attr_confs) = self.load_img_feats(feats_path)\n\n label = None\n # Create target\n example = InputExample(\n uid, (sent, sent_probe), (feats, boxes),\n (obj_labels, obj_confs), (attr_labels, attr_confs),\n is_matched, label\n )\n if not self.use_data_augmentation:\n return example\n else: # get augmentation data\n\n rephrased_sents = self.rephrase_data[uid]\n chosen_sent = random.choice(rephrased_sents)\n r_sent = chosen_sent['sent']\n r_uid = chosen_sent['uid']\n r_sent_probe = self.load_sent_probe(img_id,r_uid)\n\n img_aug_method = random.choice(self.img_augmentation_methods)\n (r_feats, r_boxes), (r_obj_labels, r_obj_confs), (r_attr_labels, r_attr_confs) = self.load_img_feats(\n feats_path, key=img_aug_method)\n\n r_example = InputExample(\n r_uid, (r_sent, r_sent_probe), (r_feats, r_boxes),\n (r_obj_labels, r_obj_confs), (r_attr_labels, r_attr_confs),\n is_matched, label\n )\n\n return example, r_example\n\n def __len__(self):\n return len(self.data)\n\n",
"id": "12410218",
"language": "Python",
"matching_score": 2.4402458667755127,
"max_stars_count": 1,
"path": "src/pretrain/lxmert_data.py"
},
{
"content": "import stanza\r\nimport json\r\nfrom tqdm import tqdm\r\nfrom pprint import pprint\r\nfrom multiprocessing import Pool, cpu_count\r\nfrom queue import Queue\r\nimport time\r\nfrom multiprocessing import Manager\r\nimport multiprocessing\r\n\r\n\r\nuse_multi_process=True\r\nmulti_num=4\r\n\r\n\r\ncur_nlp_processor = stanza.Pipeline('en', \"./\", use_gpu=True)\r\n\r\ndef prepare_inputs(json_path):\r\n json_data = json.load(open(json_path))\r\n all_data = []\r\n for ele in tqdm(json_data):\r\n raw = ele['sentf']['mscoco']\r\n reph = ele['sentf']['mscoco_rephrase']\r\n cur_sample_sents = raw + [y for x in reph for y in x]\r\n all_data.append((ele['img_id'], cur_sample_sents))\r\n print('prepare finished, data path={} all data num={}'.format(json_path, len(all_data)))\r\n return all_data\r\n\r\n\r\ndef process_one(ele_data, return_dict):\r\n img_id,cur_sample_sents=ele_data\r\n print(img_id, flush=True)\r\n in_docs = [stanza.Document([], text=d) for d in cur_sample_sents] # Wrap each document with a stanza.Document object\r\n # cur_nlp_processor = nlp_queue.get(block=True)\r\n result = cur_nlp_processor(in_docs)\r\n assert len(cur_sample_sents) == len(result)\r\n\r\n cur_sample_result = []\r\n for i in range(len(result)):\r\n doc = result[i]\r\n if len(doc.sentences) > 0:\r\n sent = doc.sentences[0]\r\n cur = [{'id': word.id, 'word': word.text, 'head_id': word.head, 'head': sent.words[\r\n word.head - 1].text if word.head > 0 else \"root\", 'deprel': word.deprel}\r\n for word in sent.words]\r\n else:\r\n print('something wrong with doc')\r\n pprint(img_id)\r\n cur = []\r\n cur_sample_result.append(cur)\r\n\r\n return_dict[img_id]=cur_sample_result\r\n\r\n\r\n\r\ndef process_all(json_path,save_path):\r\n\r\n start_time=time.time()\r\n thread_pool = Pool(multi_num)\r\n print('using {} process...'.format(multi_num))\r\n\r\n all_data=prepare_inputs(json_path)\r\n\r\n manager=Manager()\r\n return_dict = manager.dict()\r\n\r\n result=[]\r\n for ele in all_data[:]:\r\n # cur_nlp_processor = nlp_queue.get(block=True)\r\n result.append(thread_pool.apply_async(func=process_one, args=(ele, return_dict)))\r\n # nlp_queue.put(cur_nlp_processor)\r\n\r\n thread_pool.close()\r\n thread_pool.join() # 调用join之前,先调用close函数,否则会出错。执行完close后不会有新的进程加入到pool,join函数等待所有子进程结束\r\n\r\n merged_result={}\r\n for key,val in return_dict.items():\r\n merged_result[key]=val\r\n\r\n json.dump(merged_result,open(save_path,'w'))\r\n print('finished all process in {} s, save {} samples to {}'.format(time.time()-start_time,len(return_dict),save_path))\r\n\r\n\r\nif __name__ == '__main__':\r\n multiprocessing.set_start_method('spawn')\r\n all_paths=[('../lxmert/mscoco_train_aug.json','../probe/mscoco_train_prob.json'),\r\n ('../lxmert/mscoco_minival_aug.json','../probe/mscoco_minival_prob.json'),\r\n ('../lxmert/mscoco_nominival_aug.json','../probe/mscoco_nominival_prob.json')]\r\n\r\n for json_path,save_path in all_paths:\r\n process_all(json_path,save_path)\r\n\r\n\r\n#\r\n# nlp = stanza.Pipeline('en', \"./\",use_gpu=True) # Build the pipeline, specify part-of-speech processor's batch size\r\n#\r\n# minval_data = json.load(open('../lxmert/mscoco_minval_aug.json'))\r\n# nominval_data = json.load(open('../lxmert/mscoco_nominval_aug.json'))\r\n# train_data = json.load(open('../lxmert/mscoco_train_aug.json'))\r\n#\r\n#\r\n# minval_data_result={}\r\n# for ele in tqdm(minval_data):\r\n# raw = ele['sentf']['mscoco']\r\n# reph = ele['sentf']['mscoco_rephrase']\r\n# cur_sample_sents = raw + [y for x in reph for y in x]\r\n# # cur_sample_sents=['hello my name is liyongzhi, I am a student in china']*100\r\n# in_docs = [stanza.Document([], text=d) for d in\r\n# cur_sample_sents] # Wrap each document with a stanza.Document object\r\n# result = nlp(in_docs)\r\n# assert len(cur_sample_sents) == len(result)\r\n#\r\n# cur_sample_result = []\r\n# for i in range(len(result)):\r\n# doc = result[i]\r\n# if len(doc.sentences)>0:\r\n# sent = doc.sentences[0]\r\n# cur = [{'id': word.id, 'word': word.text, 'head_id': word.head, 'head': sent.words[\r\n# word.head - 1].text if word.head > 0 else \"root\", 'deprel': word.deprel}\r\n# for word in sent.words]\r\n# else:\r\n# print('something wrong with doc')\r\n# pprint(ele)\r\n# cur=[]\r\n# cur_sample_result.append(cur)\r\n# minval_data_result[ele['img_id']]=cur_sample_result\r\n#\r\n# json.dump(minval_data_result,open('../lxmert/mscoco_minval_prob.json','w'))\r\n# print('finished minval data processing...')\r\n#\r\n",
"id": "2794012",
"language": "Python",
"matching_score": 3.3600664138793945,
"max_stars_count": 1,
"path": "data/stanza/generate_probe.py"
},
{
"content": "import numpy as np\r\nfrom tqdm import tqdm\r\nimport json\r\nimport networkx as nx\r\nimport pickle\r\nimport time\r\n\r\n\r\ndef make_uid(img_id, dset, sent_idx):\r\n return \"{}_{}_{}\".format(img_id, dset, sent_idx)\r\n\r\n\r\ndef get_one_sent_probe(sent):\r\n G = nx.Graph()\r\n all_edge = []\r\n for edge in sent:\r\n u = edge['id']\r\n v = edge['head_id']\r\n all_edge.append((u, v))\r\n G.add_edges_from(all_edge)\r\n # print(G.number_of_edges())\r\n gen = nx.all_pairs_shortest_path(G)\r\n shortest_path = dict(gen)\r\n probe_size = len(sent)\r\n probe = np.ones((probe_size, probe_size)) * -1\r\n for i in range(probe_size):\r\n for j in range(probe_size):\r\n probe[i][j] = len(shortest_path[i + 1][j + 1]) - 1 # stanza的结果单词从1开始编号\r\n return probe\r\n\r\n\r\ndef generate_probe_matrix(json_path, save_path):\r\n start_time = time.time()\r\n data = json.load(open(json_path))\r\n\r\n all_result = {}\r\n for img_id, img_sample in tqdm(data.items()):\r\n raw_sent_num = len(img_sample) - 10\r\n\r\n img_result = {}\r\n for i, sent in enumerate(img_sample):\r\n if i < raw_sent_num:\r\n sent_cat = 'mscoco'\r\n sent_idx = i\r\n else:\r\n sent_cat = 'mscoco_rephrase'\r\n raw_idx = (i - raw_sent_num) // 2\r\n j = (i - raw_sent_num) & 1\r\n sent_idx = '{}_{}'.format(raw_idx, j)\r\n\r\n key = make_uid(img_id, sent_cat, sent_idx)\r\n probe_matrix = get_one_sent_probe(sent)\r\n img_result[key] = probe_matrix\r\n all_result[img_id] = img_result\r\n\r\n pickle.dump(all_result, open(save_path, 'wb'))\r\n print('save probe matrix data to {}, total data number is {}, using time is {}'.format(save_path, len(all_result),\r\n time.time() - start_time))\r\n\r\n\r\n\r\njson_path='/m/liyz/lxmert/data/probe/mscoco_minival_prob.json'\r\nsave_path='/m/liyz/lxmert/data/probe/mscoco_minival_prob_matrix.pickle'\r\n\r\nnominival_json_path='/m/liyz/lxmert/data/probe/mscoco_nominival_prob.json'\r\nnominival_save_path='/m/liyz/lxmert/data/probe/mscoco_nominival_prob_matrix.pickle'\r\ngenerate_probe_matrix(nominival_json_path,nominival_save_path)\r\n\r\ntrian_json_path='/m/liyz/lxmert/data/probe/mscoco_train_prob.json'\r\ntrain_save_path='/m/liyz/lxmert/data/probe/mscoco_train_prob_matrix.pickle'\r\ngenerate_probe_matrix(trian_json_path,train_save_path)",
"id": "11540946",
"language": "Python",
"matching_score": 0.6104688048362732,
"max_stars_count": 1,
"path": "data/stanza/get_probe_matrix.py"
},
{
"content": "from googletrans import Translator\r\nimport time\r\nimport json\r\nimport random\r\n\r\nimport torch\r\nen2de = torch.hub.load('pytorch/fairseq', 'transformer.wmt16.en-de', tokenizer='moses', bpe='subword_nmt')\r\n\r\n\r\n\r\n\r\ndest_langs=['zh-cn','de','fr','es','ru']\r\n\r\nuseful_proxies=[\"192.168.3.11:8787\",\"172.16.58.3:8080\",'192.168.3.11:88', '192.168.3.11:3128', '172.16.31.10:3128', '172.16.58.3:80', '172.16.58.3:8888', '172.16.17.32:80', '192.168.3.11:8888', '172.16.58.3:80', '192.168.127.12:443', '192.168.3.11:9999', '172.16.31.10:80', '172.16.31.10:8080', '172.16.31.10:80', '192.168.127.12:3128', '172.16.58.3:80']\r\n\r\n\r\ndef read_ori_files(file_path):\r\n sentences = []\r\n with open(file_path, 'r') as f:\r\n for line in f.readlines():\r\n line = line.strip()\r\n sentences.append(line)\r\n return sentences\r\n\r\n\r\ndef translate(input_sentences, translator_pool,batch_size=20,target_lang='zh-cn'):\r\n result={}\r\n\r\n i=0\r\n t_idx=0\r\n while i<len(input_sentences):\r\n batch_sents = input_sentences[i:i + batch_size]\r\n translator = translator_pool[t_idx % len(translator_pool)]\r\n t_idx = i\r\n try:\r\n target_rt = translator.translate(batch_sents, src='en', dest=target_lang)\r\n trans_sents = [rt.text for rt in target_rt]\r\n back_trans = translator.translate(trans_sents, src=target_lang, dest='en')\r\n back_trans_sents=[x.text for x in back_trans]\r\n for j in range(len(batch_sents)):\r\n result[batch_sents[j]] = back_trans_sents[j]\r\n if len(result) % 500 == 0:\r\n print('{} sentences have been back translated'.format(len(result)))\r\n print(trans_sents)\r\n print(back_trans_sents)\r\n print(i)\r\n i += batch_size\r\n except:\r\n print(\"proxy {} failed\".format(translator.session.proxies))\r\n t_idx=random.randint(0,100)\r\n\r\n return result\r\n\r\nif __name__ == '__main__':\r\n translator_pool=[Translator(service_urls=['translate.google.cn'])]\r\n # for pro in useful_proxies:\r\n # translator = Translator(service_urls=['translate.google.cn'],proxies={'https':pro})\r\n # translator_pool.append(translator)\r\n\r\n translator=translator_pool[0]\r\n rt=translator.translate('hello my name is jack', dest='zh-cn')\r\n print(rt.text)\r\n file_list=['C:/Users/木子-勇士心/Desktop/caps/test_caps.txt',\r\n 'C:/Users/木子-勇士心/Desktop/caps/dev_caps.txt',\r\n 'C:/Users/木子-勇士心/Desktop/caps/train_caps.txt']\r\n for file in file_list:\r\n all_sents = read_ori_files(file_path=file)\r\n print('{} file read finished'.format(file))\r\n result=translate(all_sents, translator_pool)\r\n save_path=file.split('.')[0]+\"_trans.json\"\r\n json.dump(result,open(save_path,'w'))\r\n print('save translated result into {}'.format(save_path))\r\n\r\n\r\n\r\n\r\n\r\n\r\n LANGUAGES = {\r\n 'af': 'afrikaans',\r\n 'sq': 'albanian',\r\n 'am': 'amharic',\r\n 'ar': 'arabic',\r\n 'hy': 'armenian',\r\n 'az': 'azerbaijani',\r\n 'eu': 'basque',\r\n 'be': 'belarusian',\r\n 'bn': 'bengali',\r\n 'bs': 'bosnian',\r\n 'bg': 'bulgarian',\r\n 'ca': 'catalan',\r\n 'ceb': 'cebuano',\r\n 'ny': 'chichewa',\r\n 'zh-cn': 'chinese (simplified)',\r\n 'zh-tw': 'chinese (traditional)',\r\n 'co': 'corsican',\r\n 'hr': 'croatian',\r\n 'cs': 'czech',\r\n 'da': 'danish',\r\n 'nl': 'dutch',\r\n 'en': 'english',\r\n 'eo': 'esperanto',\r\n 'et': 'estonian',\r\n 'tl': 'filipino',\r\n 'fi': 'finnish',\r\n 'fr': 'french',\r\n 'fy': 'frisian',\r\n 'gl': 'galician',\r\n 'ka': 'georgian',\r\n 'de': 'german',\r\n 'el': 'greek',\r\n 'gu': 'gujarati',\r\n 'ht': 'haitian creole',\r\n 'ha': 'hausa',\r\n 'haw': 'hawaiian',\r\n 'iw': 'hebrew',\r\n 'hi': 'hindi',\r\n 'hmn': 'hmong',\r\n 'hu': 'hungarian',\r\n 'is': 'icelandic',\r\n 'ig': 'igbo',\r\n 'id': 'indonesian',\r\n 'ga': 'irish',\r\n 'it': 'italian',\r\n 'ja': 'japanese',\r\n 'jw': 'javanese',\r\n 'kn': 'kannada',\r\n 'kk': 'kazakh',\r\n 'km': 'khmer',\r\n 'ko': 'korean',\r\n 'ku': 'kurdish (kurmanji)',\r\n 'ky': 'kyrgyz',\r\n 'lo': 'lao',\r\n 'la': 'latin',\r\n 'lv': 'latvian',\r\n 'lt': 'lithuanian',\r\n 'lb': 'luxembourgish',\r\n 'mk': 'macedonian',\r\n 'mg': 'malagasy',\r\n 'ms': 'malay',\r\n 'ml': 'malayalam',\r\n 'mt': 'maltese',\r\n 'mi': 'maori',\r\n 'mr': 'marathi',\r\n 'mn': 'mongolian',\r\n 'my': 'myanmar (burmese)',\r\n 'ne': 'nepali',\r\n 'no': 'norwegian',\r\n 'ps': 'pashto',\r\n 'fa': 'persian',\r\n 'pl': 'polish',\r\n 'pt': 'portuguese',\r\n 'pa': 'punjabi',\r\n 'ro': 'romanian',\r\n 'ru': 'russian',\r\n 'sm': 'samoan',\r\n 'gd': 'scots gaelic',\r\n 'sr': 'serbian',\r\n 'st': 'sesotho',\r\n 'sn': 'shona',\r\n 'sd': 'sindhi',\r\n 'si': 'sinhala',\r\n 'sk': 'slovak',\r\n 'sl': 'slovenian',\r\n 'so': 'somali',\r\n 'es': 'spanish',\r\n 'su': 'sundanese',\r\n 'sw': 'swahili',\r\n 'sv': 'swedish',\r\n 'tg': 'tajik',\r\n 'ta': 'tamil',\r\n 'te': 'telugu',\r\n 'th': 'thai',\r\n 'tr': 'turkish',\r\n 'uk': 'ukrainian',\r\n 'ur': 'urdu',\r\n 'uz': 'uzbek',\r\n 'vi': 'vietnamese',\r\n 'cy': 'welsh',\r\n 'xh': 'xhosa',\r\n 'yi': 'yiddish',\r\n 'yo': 'yoruba',\r\n 'zu': 'zulu',\r\n 'fil': 'Filipino',\r\n 'he': 'Hebrew'\r\n }\r\n\r\n",
"id": "7374452",
"language": "Python",
"matching_score": 3.474207639694214,
"max_stars_count": 2,
"path": "tools/paraphrase.py"
},
{
"content": "import os\r\nimport json\r\nfrom tqdm import tqdm\r\nimport torch\r\n\r\n# Compare the results with English-Russian round-trip translation:\r\nen2ru = torch.hub.load('pytorch/fairseq', 'transformer.wmt19.en-ru.single_model', tokenizer='moses', bpe='fastbpe')\r\nru2en = torch.hub.load('pytorch/fairseq', 'transformer.wmt19.ru-en.single_model', tokenizer='moses', bpe='fastbpe')\r\n\r\nparaphrase = ru2en.translate(en2ru.translate('PyTorch Hub is an awesome interface!'))\r\nprint(paraphrase)\r\nassert paraphrase == 'PyTorch is a great interface!'\r\n\r\n\r\n\r\ndef check_drive():\r\n try:\r\n path = 'drive/scan_data_caps/f30k/test_caps.txt'\r\n if not os.path.exists(path):\r\n return False\r\n with open(path, 'r') as f:\r\n for line in f.readlines():\r\n print(line.strip())\r\n break\r\n test_data = {'hello': 100}\r\n save_path = 'drive/scan_data_caps/f30k/debug.json'\r\n json.dump(test_data, open(save_path, 'w'))\r\n if os.path.exists(save_path):\r\n os.remove(save_path)\r\n return True\r\n else:\r\n return False\r\n except:\r\n return False\r\n\r\n\r\ndef read_ori_files(file_path):\r\n sentences = []\r\n with open(file_path, 'r') as f:\r\n for line in f.readlines():\r\n line = line.strip()\r\n sentences.append(line)\r\n return sentences\r\n\r\n\r\ndef back_translation(all_sents, tl1, tl2):\r\n result = {}\r\n beam_size = [3]\r\n batch_size=500\r\n batch_cnt=len(all_sents)/batch_size\r\n batch_cnt=int(batch_cnt) if batch_cnt*batch_size==len(all_sents) else int(batch_cnt)+1\r\n print(\"cur file total sentences number is {}\".format(len(all_sents)))\r\n for i in tqdm(range(batch_cnt)):\r\n cur_batch_sent=all_sents[i*batch_size:(i+1)*batch_size]\r\n cur_result = {sent:[] for sent in cur_batch_sent}\r\n for bs in beam_size:\r\n dest_rt = tl1.translate(cur_batch_sent, beam=bs)\r\n back_batch_sent = tl2.translate(dest_rt, beam=bs)\r\n for j in range(len(cur_batch_sent)):\r\n src_sent=cur_batch_sent[j]\r\n back_sent=back_batch_sent[j]\r\n cur_result[src_sent].append(back_sent)\r\n result.update(cur_result)\r\n return result\r\n\r\n\r\ndef main(all_file_path, translator1, translator2):\r\n for file_path in all_file_path:\r\n print(\"begin processing file {}\".format(file_path))\r\n all_sents = read_ori_files(file_path)\r\n\r\n result_path = file_path + '_{}.json'.format(BACK_NAME)\r\n result = back_translation(all_sents, tl1=translator1, tl2=translator2)\r\n json.dump(result, open(result_path, 'w'))\r\n print('save result json file to {}'.format(result_path))\r\n\r\n print('All file processing finished')\r\n\r\n\r\nall_file_paths = ['drive/scan_data_caps/f30k/test_caps.txt',\r\n 'drive/scan_data_caps/f30k/dev_caps.txt',\r\n 'drive/scan_data_caps/f30k/train_caps.txt',\r\n 'drive/scan_data_caps/coco/test_caps.txt',\r\n 'drive/scan_data_caps/coco/testall_caps.txt',\r\n 'drive/scan_data_caps/coco/dev_caps.txt',\r\n 'drive/scan_data_caps/coco/train_caps.txt']\r\n\r\nBACK_NAME = 'en2ru2en'\r\n\r\ntranslator1 = en2ru.cuda()\r\ntranslator2 = ru2en.cuda()\r\nmain(all_file_paths, translator1, translator2)\r\n",
"id": "1362611",
"language": "Python",
"matching_score": 1.3408496379852295,
"max_stars_count": 2,
"path": "tools/back_translate.py"
},
{
"content": "import json\r\n\r\n\r\nfile_path='C:\\\\Users\\\\木子-勇士心\\\\Desktop\\\\caps\\\\train_caps.txt'\r\ndata={}\r\n\r\nline_cnt=0\r\nwith open(file_path,'rb') as f:\r\n for line in f.readlines():\r\n line=line.strip()\r\n data[line]=0\r\n line_cnt+=1\r\n\r\nprint(line_cnt)\r\nprint(len(data))\r\n\r\n\r\n\r\n",
"id": "12562432",
"language": "Python",
"matching_score": 0.2866838276386261,
"max_stars_count": 2,
"path": "tools/check_file.py"
},
{
"content": "import json\r\nimport os\r\nfrom tqdm import tqdm\r\nimport random\r\n\r\nDATANAME = ['f30k_precomp']\r\n\r\nroot_dir = '/S4/MI/liyz/data/scan_data'\r\nen2de_back_trans_dir = '/S4/MI/liyz/data/scan_data/back_translation/en2de2en/'\r\nen2ru_back_trans_dir = '/S4/MI/liyz/data/scan_data/back_translation/en2ru2en/'\r\n\r\n\r\ndef main(data_name):\r\n print('current dataname is {}'.format(data_name))\r\n data_split = ['test', 'dev', 'train']\r\n adversary_type = ['_ex_noun', '_ex_num', '_ex_rela']\r\n\r\n if data_name == 'coco_precomp':\r\n data_split.append('testall')\r\n data_set_dir = os.path.join(root_dir, data_name)\r\n\r\n for spt in data_split:\r\n cur_raw_captions_file = os.path.join(data_set_dir, '{}_caps.txt'.format(spt))\r\n print('begin execute file is: {}'.format(cur_raw_captions_file))\r\n\r\n en2de_back_file = os.path.join(en2de_back_trans_dir, \"{}/{}_caps_en2de2en.json\".format(data_name, spt))\r\n en2de_back_trans = json.load(open(en2de_back_file)) # 德语回翻的re-phrase句子\r\n\r\n en2ru_back_file = os.path.join(en2ru_back_trans_dir, '{}/{}_caps_en2ru2en.json'.format(data_name, spt))\r\n en2ru_back_trans = json.load(open(en2ru_back_file)) # 俄语回翻的re-phrase句子\r\n\r\n noun_adv_dir = os.path.join(data_set_dir, '{}_ex_noun'.format(spt))\r\n num_adv_dir = os.path.join(data_set_dir, '{}_ex_num'.format(spt))\r\n rela_adv_dir = os.path.join(data_set_dir, '{}_ex_rela'.format(spt))\r\n\r\n with open(cur_raw_captions_file) as f:\r\n all_captions = f.readlines() # 原始句子\r\n\r\n result = {}\r\n for i in tqdm(range(len(all_captions))):\r\n tmp = {}\r\n\r\n cur_cap = all_captions[i].strip()\r\n tmp['raw'] = cur_cap\r\n try:\r\n en2de = en2de_back_trans[cur_cap]\r\n except:\r\n print(\"WARNING!!! {} not in the en2de file\".format(cur_cap))\r\n en2de=cur_cap\r\n try:\r\n en2ru = en2ru_back_trans[cur_cap]\r\n except:\r\n print(\"WARNING!!! {} not in the en2ru file\".format(cur_cap))\r\n en2ru = cur_cap\r\n\r\n tmp['re-pharse'] = [en2de, en2ru]\r\n\r\n adv_caps = {}\r\n\r\n noun_adv_file = os.path.join(noun_adv_dir, '{}.txt'.format(i))\r\n with open(noun_adv_file) as f:\r\n all_adv_noun_caps = f.readlines()\r\n random.shuffle(all_adv_noun_caps)\r\n noun_adv=[x.strip() for x in all_adv_noun_caps[:10]]\r\n while len(noun_adv)<10:\r\n noun_adv.append('unk')\r\n\r\n num_adv_file = os.path.join(num_adv_dir, '{}.txt'.format(i))\r\n with open(num_adv_file) as f:\r\n all_adv_num_caps = f.readlines()\r\n random.shuffle(all_adv_num_caps)\r\n num_adv=[x.strip() for x in all_adv_num_caps[:10]]\r\n while len(num_adv)<10:\r\n num_adv.append('unk')\r\n\r\n rela_adv_file = os.path.join(rela_adv_dir, '{}.txt'.format(i))\r\n with open(rela_adv_file) as f:\r\n all_adv_rela_caps =f.readlines()\r\n random.shuffle(all_adv_rela_caps)\r\n rela_adv=[x.strip() for x in all_adv_rela_caps[:10]]\r\n while len(rela_adv)<10:\r\n rela_adv.append('unk')\r\n\r\n adv_caps={'noun':noun_adv,'num':num_adv,'rela':rela_adv}\r\n\r\n # j = 0\r\n # while len(adv_caps) < 5 and j < len(all_adv_noun_caps):\r\n # adv_caps.append(all_adv_noun_caps[j].strip())\r\n # j += 1\r\n #\r\n # j = 0\r\n # while len(adv_caps) < 8 and j < len(all_adv_rela_caps):\r\n # adv_caps.append(all_adv_rela_caps[j].strip())\r\n # j += 1\r\n #\r\n # j = 0\r\n # while len(adv_caps) < 10 and j < len(all_adv_num_caps):\r\n # adv_caps.append(all_adv_num_caps[j].strip())\r\n # j += 1\r\n #\r\n # if len(adv_caps) < 10: # 如果小于10个句子,从所有的adv句子中随机选补齐,否则的话使用unk当做一个句子补齐10句\r\n # all_adv_caps = all_adv_noun_caps + all_adv_rela_caps + all_adv_num_caps\r\n # need_caps = 10 - len(adv_caps)\r\n # t_caps = random.choices(all_adv_caps, k=need_caps) if len(all_adv_caps) > 0 else ['<unk>'] * need_caps\r\n # adv_caps += t_caps\r\n #\r\n # assert len(adv_caps) == 10\r\n\r\n tmp['adversary'] = adv_caps\r\n\r\n result[i] = tmp\r\n\r\n save_file_path = os.path.join(data_set_dir, '{}_caps+rephrase+30advs.json'.format(spt))\r\n json.dump(result, open(save_file_path, 'w'))\r\n print('save processed file into {}'.format(save_file_path))\r\n\r\n\r\nif __name__ == '__main__':\r\n for data_name in DATANAME:\r\n main(data_name)\r\n",
"id": "1636029",
"language": "Python",
"matching_score": 1.1259591579437256,
"max_stars_count": 2,
"path": "tools/preprocess_captions.py"
},
{
"content": "from stanza.server import CoreNLPClient\r\nimport json\r\nimport os\r\nfrom tqdm import tqdm\r\nfile_paths=['/S4/MI/data/mscoco/annotations/captions_val2014.json','/S4/MI/data/mscoco/annotations/captions_train2014.json']\r\nsave_path='/S4/MI/liyz/coco_triplets_corenlp.json'\r\nresult={}\r\n\r\n\r\nwith CoreNLPClient(annotators=['openie'], timeout=30000, memory='64G',threads=32) as client:\r\n for file in file_paths:\r\n print('executing file {}'.format(file_paths))\r\n data=json.load(open(file))\r\n captions=data['annotations']\r\n for cap in tqdm(captions):\r\n img_id=cap['image_id']\r\n if img_id not in result:\r\n result[img_id]=[]\r\n raw_sent = cap['caption']\r\n ann=client.annotate(raw_sent)\r\n parse_result=ann.sentence[0].openieTriple\r\n for triplet in parse_result:\r\n tmp=[triplet.subject,triplet.relation,triplet.object]\r\n result[img_id].append(tmp)\r\n json.dump(result,open(save_path,'w'))\r\n print('save result file to {}'.format(save_path))\r\n\r\n\r\n\r\n",
"id": "11135940",
"language": "Python",
"matching_score": 0.44050508737564087,
"max_stars_count": 2,
"path": "tools/coco_corenlp_triplets.py"
}
] | 2.294248 |
DE-Bass | [
{
"content": "\"\"\"\nFrom the 26 March meeting, the plan was:\n1) Fix 2D separation and overall R flux ratio. Find best fit PSF.\n\n2) Extract spectra of A and B components. This is best done with a *good seeing* night and doesn't have to \nbe done for every data set. Save these spectra.\n\n2) Fix B spectrum, and using the PSFs from step (1) extract the 2D positions of the A and\nB components.\n\"\"\"\n\nfrom __future__ import division, print_function\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport astropy.io.fits as pyfits\nimport glob\nimport scipy.optimize as op\nimport scipy.signal as sig\nimport time\nimport multiprocessing\nplt.ion()\n\n#Settings\nmultiprocess=False #Setting this for a macbook changes total time from ~9 to ~5 seconds. Only a moderte help!\nMIN_PEAK=20\nWAVE = np.arange(6400.0,7000.0,0.25)\nddir = '/Volumes/MyPassport/data/wifes/20190225_red/'\nddir = '/Volumes/MyPassport/data/wifes/20190226_red/'\nddir = '/Users/mireland/data/pds70/190225/' #!!! This comes from \n#ddir = '/Users/mireland/data/pds70/190225/' #From Marusa's reduction.\nfns = np.sort(glob.glob(ddir + '*p11.fits'))\n\n#---------------------------------\n#Local function declarations\n\ndef gauss_line(p,x):\n \"A simple 1D Gaussian\"\n return p[0]*np.exp(-(x-p[1])**2/(2*p[2]**2))\n\ndef gauss_line_resid(p,x,y, gain=1.0, rnoise=3.0):\n \"Residuals for fitting to a 1D Gaussian\"\n return (gauss_line(p,x) - y)/10. #np.sqrt(np.maximum(y,0) + rnoise**2)\n\ndef lsq_gauss_line( args ):\n \"\"\"\n Fit a Gaussian to data y(x)\n \n Parameters\n ----------\n args: tuple\n nline, guess_center, width_guess, xfit, yfit\n \n Notes\n -----\n nline: int\n index of this line\n guess_center: float\n initial guess position\n \"\"\"\n fit = op.least_squares(gauss_line_resid, [args[4][args[1]], args[1], args[2]], method='lm', \\\n xtol=1e-04, ftol=1e-4, f_scale=[3.,1.,1.], args=(args[3], args[4]))\n if (fit.x[2]<0.6) or (fit.x[1]>args[3][-1]) or (fit.x[1]<0): #This is unphysical!\n return args[0], fit.x[1], 0., fit.x[2], 0.\n else:\n cov = np.linalg.inv(fit.jac.T.dot(fit.jac))\n return args[0], fit.x[1], 1/cov[1,1], fit.x[2], 1/cov[2,2]\n\n#---------------------------------\n#Main \"Script\" code\npas = []\nmjds = []\nxcs = []\nxc_sigs = []\nxws = []\nxw_sigs = []\n\n#Loop through files and make a 1D or 2D analysis.\nfor f in fns:\n ff = pyfits.open(f)\n pas.append(ff[0].header['TELPAN'])\n mjds.append(ff[0].header['MJD-OBS'])\n dd = ff[0].data[:,8:-8,13:-2]\n \n #Subtract off local sky contribution\n meds = np.median(dd.reshape(dd.shape[0], dd.shape[1]*dd.shape[2]), axis=1).reshape(dd.shape[0],1,1)\n dd -= meds\n \n #Find the maxima in every column.\n max_ix = np.argmax(dd, axis=1)\n maxs = np.max(dd, axis=1)\n \n #Prepare our result arrays\n xc_mn = np.zeros_like(maxs)\n xc_ivar = np.zeros_like(maxs)\n xw_mn = np.zeros_like(maxs)\n xw_ivar = np.zeros_like(maxs)\n xfit = np.arange(dd.shape[1])\n \n #Now prepare the data\n jobs = []\n for ii in range(dd.shape[0]):\n for jj in range(dd.shape[2]):\n if maxs[ii,jj] > MIN_PEAK:\n jobs.append( (ii*dd.shape[2]+jj,max_ix[ii,jj], 2.0, xfit, dd[ii,:,jj]) )\n \n print('Running jobs for file: ' + f)\n then = time.time()\n if multiprocess:\n with multiprocessing.Pool(None) as mypool:\n results = mypool.imap_unordered(lsq_gauss_line,jobs,4) \n # Process the results\n for r in results:\n xc_mn[r[0]//dd.shape[2],r[0] % dd.shape[2]] = r[1]\n xc_ivar[r[0]//dd.shape[2],r[0] % dd.shape[2]] = r[2]\n xw_mn[r[0]//dd.shape[2],r[0] % dd.shape[2]] = r[3]\n xw_ivar[r[0]//dd.shape[2],r[0] % dd.shape[2]] = r[4]\n else:\n for j in jobs:\n j0, xc, ivar, xw, xw_oneivar = lsq_gauss_line(j)\n xc_mn[j[0]//dd.shape[2],j[0] % dd.shape[2]] = xc\n xc_ivar[j[0]//dd.shape[2],j[0] % dd.shape[2]] = ivar\n xw_mn[j[0]//dd.shape[2],j[0] % dd.shape[2]] = xw\n xw_ivar[j[0]//dd.shape[2],j[0] % dd.shape[2]] = xw_oneivar\n print('Total time: {:5.2f}s'.format(time.time()-then))\n xcs.append(np.sum(xc_mn*xc_ivar, axis=1)/np.sum(xc_ivar, axis=1))\n xc_sigs.append(1./np.sqrt(np.sum(xc_ivar, axis=1)))\n xws.append(np.sum(xw_mn*xw_ivar, axis=1)/np.sum(xw_ivar, axis=1))\n xw_sigs.append(1./np.sqrt(np.sum(xw_ivar, axis=1)))\n\nxcs = np.array(xcs)\nxc_sigs = np.array(xc_sigs)\nxws = np.array(xws)\nxw_sigs = np.array(xw_sigs)\n\ngood = np.where(np.median(xc_sigs, axis=1) < 0.06)[0]\npas = np.array(pas)[good]\nmjds = np.array(mjds)[good]\nxcs = xcs[good]\nxc_sigs = xc_sigs[good]\nxws = xws[good]\nxw_sigs = xw_sigs[good]\nfilt_xcs = xcs - sig.medfilt(xcs,(1,201))\nsign = (2*(pas==150)-1).reshape(len(pas),1)\n\nplt.figure(1)\nplt.clf()\nplt.plot(WAVE, np.sum(filt_xcs*sign/np.sum(np.abs(sign))*500., axis=0))\nplt.axis([6400,6700,-30,30])\nplt.xlabel(r'Wavelength ($\\AA$)')\nplt.ylabel('Offset (mas)')\nplt.tight_layout()",
"id": "4606466",
"language": "Python",
"matching_score": 6.794853687286377,
"max_stars_count": 0,
"path": "spectroastro.py"
},
{
"content": "\"\"\"\nFrom the 26 March meeting, the plan was:\n1) Fix 2D separation and overall R flux ratio. Find best fit PSF.\n\nIssues... the best fit PSF can't just be a Gaussian. It is naturally the convolution of\nmultiple functional forms, i.e. something that is positive everywhere. On a quick search, \nI can't find any obvious parameterisations. Options...\n\na: Just use the interpolated PSF with a correction for the companion. Problem: we don't\nknow how to correct for the companion, so will have to do this iteratively.\n\nb: Use a \"distortion map\". \n\nc: Use a functional form that can be negative and don't worry about details. \n\n2) Extract spectra of A and B components. This is best done with a *good seeing* night and doesn't have to \nbe done for every data set. Save these spectra.\n\n3) Fix B spectrum, and using the PSFs from step (1) extract the 2D positions of the A and\nB components.\n\nStar: GaiaDR2 6110141563309613184\n...\nis: \n2.343 arcsec North\n0.472 arcsec East\n\nIt is 3.726 mags fainter in Rp.\n\"\"\"\n\nfrom __future__ import division, print_function\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport astropy.io.fits as pyfits\nimport glob\nimport scipy.optimize as op\nimport scipy.signal as sig\nimport time\nimport multiprocessing\nimport pdb\nplt.ion()\n\n#Settings\nmultiprocess=False #Setting this for a macbook changes total time from ~9 to ~5 seconds. Only a moderte help!\nMIN_PEAK=20\nNPSF_PARAMS = 5\nWAVE = np.arange(6400.0,7000.0,0.25)\nddir = '/Users/mireland/data/pds70/190225/' #!!! This comes from \n#ddir = '/Users/mireland/data/pds70/190225/' #From Marusa's reduction.\nfns = np.sort(glob.glob(ddir + '*p11.fits'))\nxscale = 1.0 #arcsec/pix\nyscale = 0.5 #arcsec/pix\n\n#---------------------------------\n#Local function declarations\n\ndef PSF(p,x,y,companion_params=None):\n \"\"\"A simple 2D PSF based on a Gaussian.\n \n Parameters\n ----------\n p: numpy array\n Parameters for the PSF.\n p[0]: x coordinate offset\n p[1]: x coordinate width\n p[2]: y coordinate offset\n p[3]: y coordinate width\n p[4]: Total flux\n p[5]: 2nd order symmetric term\n \n x: x coordinate in arcsec.\n y: y coordinate in arcsec.\n \"\"\"\n xp = (x-p[0])/p[1]\n yp = (y-p[2])/p[3]\n if companion_params != None:\n xp_comp = (x-p[0]-companion_params[1])/p[1]\n yp_comp = (y-p[2]-companion_params[2])/p[3]\n return p[4]*(np.exp(-(xp**2 + yp**2)/2.0) + companion_params[0]*np.exp(-(xp_comp**2 + yp_comp**2)/2.0))\n else:\n return p[4]*np.exp(-(xp**2 + yp**2)/2.0)\n\ndef PSF_resid(p,x,y,data, gain=1.0, rnoise=3.0):\n \"Residuals for fitting to a 1D Gaussian\"\n return ((PSF(p,x,y) - data)/10.).flatten() #np.sqrt(np.maximum(y,0) + rnoise**2)\n\ndef lsq_PSF( args ):\n \"\"\"\n Fit a Gaussian to data y(x)\n \n Parameters\n ----------\n args: tuple\n guess_p, xfit, yfit\n \n Notes\n -----\n nline: int\n index of this line\n guess_center: float\n initial guess position\n \"\"\"\n fit = op.least_squares(PSF_resid, args[0], method='lm', \\\n xtol=1e-04, ftol=1e-4, f_scale=[3.,1.,1.], args=(args[1], args[2], args[3]))\n #Check for unphysical solutions and set c_inv to zero for those solutions...\n c_inv = fit.jac.T.dot(fit.jac)\n return fit.x, c_inv\n\n#---------------------------------\n#Main \"Script\" code\npas = []\nmjds = []\nfits = []\nsigs = []\nyx_peak = np.zeros( (len(WAVE), 2), dtype=np.int)\npeak_vals = np.zeros( len(WAVE) )\n\ndds = []\n#Loop through files and make a 2D fit.\nfor f in fns[-3:]:\n ff = pyfits.open(f)\n pas.append(ff[0].header['TELPAN'])\n mjds.append(ff[0].header['MJD-OBS'])\n dd = ff[0].data[:,8:-8,13:-2]\n dds += [dd]\n \n \n #Subtract off local sky contribution. Could be more sophisticated!\n meds = np.median(dd.reshape(dd.shape[0], dd.shape[1]*dd.shape[2]), axis=1).reshape(dd.shape[0],1,1)\n dd -= meds\n \n #Find the maxima in every column.\n for i in range(len(WAVE)):\n yx_peak[i] = np.unravel_index(np.argmax(dd[i]), dd[i].shape)\n peak_vals[i] = dd[i, yx_peak[i][0], yx_peak[i][1]]\n \n #Create the x and y arrays\n xs, ys = np.meshgrid(np.arange(dd.shape[2])*xscale, np.arange(dd.shape[1])*yscale)\n \n #Now fit to every wavelength\n for i in range(len(WAVE)):\n fit, sig = lsq_PSF( ([yx_peak[i,1]*xscale,1,yx_peak[i,0]*yscale,1,peak_vals[i]], xs, ys, dd[i]) )\n fits += [fit]\n sigs += [sig]\n\nfits = np.array(fits)\nfits = fits.reshape( (len(fns), len(WAVE), NPSF_PARAMS) )\n\ngood = np.where(np.median(fits[:,:,4], axis=1) > 100)[0]\n\n#Now find an average offset as a function of wavelength.\nNE_offset = np.zeros( (len(WAVE),2) )\nfor i in good:\n NE_offset[:,0] += np.cos(np.radians(pas[i]))*fits[i,:,2] + np.sin(np.radians(pas[i]))*fits[i,:,0]\n NE_offset[:,1] += np.cos(np.radians(pas[i]))*fits[i,:,0] - np.sin(np.radians(pas[i]))*fits[i,:,2]\nNE_offset /= len(fns)\n \n\n",
"id": "11022778",
"language": "Python",
"matching_score": 2.1521413326263428,
"max_stars_count": 0,
"path": "spectroastro2D.py"
},
{
"content": "\"\"\"\nThis function is based on <NAME>'s 2001 Michelson School\nweb document. It's designed to be used directly for fitting...\n\nIt comes from \"binary_position.pro\", and IDL program.\n\nNB for differentiation, see mathematica/binary_diff.nb\nDone some timing tests on my laptop: this procedure is dominated\nby calculation, not interpretation when there are more than 30 jds.\n\nMeaning of negative numbers:\nNegative eccentricity: change \\omega by 180 degrees and T by half a\n period.\nNegative inclination: the same as positive inclination!\n\ne.g.\njds = Time.now().jd -np.arange(100)*10\nmy_orb = random_orbits()\nrho, theta, vr = binary_orbit(my_orb, jds)\nplt.plot(jds - Time.now().jd, vr)\n\nNext step: use the (e.g.) 1e6 random orbits to simulate orbits for all\nbinary stars, and then scale the orbits e.g. 10 times for each binary\nto simulate different masses, inclinations and system velocity.\n\nthen compute the mean likelihood = exp( \\chi^2/2 )\n\nDo the same for the \"single star\" model with the identical system velocity distribution\nbut no orbit (i.e. the model is just the system velocity).\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pdb\nfrom astropy.time import Time\nplt.ion()\n\ndef calc_likelihood(scaled_rvs, observed_rvs):\n \"\"\"\n This function calculates the likelihood of an orbit fitting the observed data\n \"\"\"\n chi_squared = np.sum((observed_rvs - scaled_rvs)**2./scaled_rvs)\n mean_likelihood = np.exp(chi_squared/2. )\n return mean_likelihood\n\ndef scale_rv(normalised_rvs, period_in_days, m1, m2, inclination):\n ##ADD PARAMETERS MASS (based on RV template?)\n ##ADD MASS RATIO PRIORS?\n #period comes of params used to calculate the binary orbit?\n \"\"\"Normalise our radial velocities based on actual physical parameters\n \n Parameters\n ----------\n normalised_rvs:\n Radial velocities in semi-major axis units per unit time\n \n period_in_days:\n \n m1:\n Primary mass in solar masses.\n \n m2:\n Secondary mass in solar masses.\n \n incliation:\n inclination in degrees.\n \n Returns\n -------\n Radial velocities in km/s\n \n \"\"\"\n \n #params['i'] = np.degrees(np.arccos(np.random.random(int(n_orb))))\n \n #FIXME should be from astropy.constants\n year_in_days = 365.25 \n AU_in_km = 150e6\n day_in_seconds = 3600.*24.\n \n #Implement Kepler's third law\n a1_plus_a2_in_AU = ((m1 + m2)*(period_in_days/year_in_days)**2)**(1/3.)\n \n #Convert to primary distance only\n #FIXME: Check this.\n a1 = a1_plus_a2_in_AU * m2/(m1+m2)\n \n #Scale radial velocity to km/s\n return normalised_rvs * a1 * np.sin(np.radians(inclination)) * AU_in_km / day_in_seconds\n \n\ndef random_orbits(p_prior_type='LogNorm', p_max=365.25*20, e_prior_type='Uniform', \\\n e_max=0.95, n_orb=int(1e6), mass_prior=None, i_prior=None, \\\n p_mean=5.03, p_sdev=2.28):\n \"\"\"Randomly select a set of binary orbit parameters based on priors\n \n Parameters\n ----------\n mass_prior:\n Do we have a prior on mass? If not, semi-major axes will be set to 1 and \n normalisation will have to occur after this.\n \n i_prior:\n Do we have a prior on inclination? If not, it will be set to 90 degrees, and\n the affect of the sin(i) distribution will happen later.\n \"\"\"\n params = {}\n \n #Start with period\n if p_prior_type == 'LogNorm':\n logp_all = np.linspace(-1,np.log10(p_max),256)\n #Log period at the middle of each bin.\n logp_mid = 0.5*(logp_all[1:] + logp_all[:-1])\n #PDF at the middle of each bin\n logp_pdf = np.exp(-((logp_mid-p_mean)**2.)/(2.*(p_sdev**2.)))\n #Cumultative distribution function computation\n logp_cdf = np.append(0,np.cumsum(logp_pdf))\n logp_cdf /= logp_cdf[-1]\n #Invert this function through interpolation, and evaluate at\n #a bunch of points selected at random.\n logps = np.interp(np.random.random(n_orb), logp_cdf, logp_all)\n params['P'] = 10**logps\n else:\n return UserWarning(\"Period prior type not implemented yet!\")\n \n if e_prior_type=='Uniform':\n params['e'] = e_max * np.random.random(n_orb)\n else:\n return UserWarning(\"Eccentricity prior type not implemented yet!\")\n \n #Time of periastron passage\n #Set it to be a random fraction of 1 period in the past.\n params['T0'] = Time.now().jd - params['P']*np.random.random(n_orb)\n \n #Now fill in the rest.\n if mass_prior is not None:\n return UserWarning(\"Mass prior type not implemented yet!\")\n \n #Semi-major axis to 1.0 (normalise later! (In function scale_rv?))\n params['a'] = np.ones(n_orb)\n \n #Now fill in the rest.\n if i_prior is not None:\n return UserWarning(\"Inclination prior type not implemented yet!\")\n else:\n params['i'] = np.ones(n_orb)*90\n\n #Position angle of line of nodes.\n params['w'] = np.random.random(n_orb)*360\n \n #Longitude of perioastron\n params['n'] = np.random.random(n_orb)*360\n \n #Get random inclinations on a *sphere* by:\n inc_rand = np.degrees(np.arccos(np.random.random(int(n_orb))))\n params['i'] = inc_rand\n \n return params\n \n \n\ndef binary_orbit(params, jds, niter_anomaly=5, do_deriv=False,plot_orbit_no=0):\n \"\"\"Compute the separation and position angle of a binary given\n a list of epochs.\n \n Parameters\n ----------\n params: numpy array(7)\n T0: The time of periastron passage\n P: The period\n a: The semi-major axis\n e: The eccentricity\n n: Capital omega ( an orbital angle )\n w: Little omega\n i: The inclination\n jds: numpy array\n The list of dates to compute the binary position. Same time units as\n T0 and P\n niter_anomaly: int\n Number of fixed point iterations to compute the eccentric anomaly\n do_deriv: bool\n Do we compute the derivative of the orbital parameters? Needed for \n explicit chi-squared Hessian computation (which can be super-useful)\n \n Returns\n -------\n (rho,theta,vr,[deriv])\n Separation in units of a, position angle in degrees, \n velocity in units of semi major per unit per time \n \"\"\"\n #jds is a numpy array.\n t = jds-params['T0'][plot_orbit_no]\n P = params['P'][plot_orbit_no]\n a = params['a'][plot_orbit_no]\n e = abs(params['e'][plot_orbit_no])\n n = params['n'][plot_orbit_no]*np.pi/180.\n w = params['w'][plot_orbit_no]*np.pi/180.\n i = params['i'][plot_orbit_no]*np.pi/180.\n #Allow a negative eccentricity to have the obvious meaning.\n if (params['e'][plot_orbit_no] < 0):\n t += P/2.\n w += np.pi\n\n #The mean anomaly \n #(p,t) -> M \n #Tr1 (transformation 1)\n #Sequence is (p,t,e) -> M -> bE -> nu -> alpha. We want: \n #dAdp = dnudbE.dbEdM.dMdp\n #dAdt = dnudbE.dbEdM.dMdp\n #dAde = dnudbE.dbEde + dnude\n dMdt = -2*np.pi/P #- sign because time _since_ periastron passage.\n dMdp = -2*np.pi*t/P**2 #NB Can be very large.\n M = 2*np.pi*(t % P)/P\n\n #The eccentric anomaly, M = E - esinE\n #(M,e) -> (bE,e) ;Tr2\n #1 = dbEdM - e dbEdM cos(bE) \n #0 = dbEde - e*dbEde*cos(bE) - sin(bE)\n bE = M+e*np.sin(M)+e**2/2*np.sin(2*M)\n for k in range(0,niter_anomaly):\n bE=bE+(M-bE+e*np.sin(bE))/(1-e*np.cos(bE))\n\n #The `true anomaly'. With a pi ambiguity,\n #nu = 2*atan(sqrt((1+e)/(1-e))*tan(bE/2))\n #(bE,e) -> (nu,e) ;Tr3\n nu=2*np.arctan2(np.sqrt((1+e)/(1-e))*np.sin(bE/2), np.cos(bE/2))\n\n #Offset for nu\n #(nu,w) -> alpha ;Tr4\n alpha=nu+w\n\n if do_deriv:\n dbEdM = 1./(1-e*np.cos(bE))\n dbEde = np.sin(bE)/(1-e*np.cos(bE))\n #Derivatives are now for alpha (just an offset from nu)\n #From mathematica...\n dnude = np.sin(bE)/(e-1)/np.sqrt((1+e)/(1-e))/(e*np.cos(bE)-1)\n dnudbE = (e-1)*np.sqrt((1+e)/(1-e))/(e*np.cos(bE) - 1)\n dAdp = dnudbE*dbEdM*dMdp\n dAdt = dnudbE*dbEdM*dMdt\n dAde = dnudbE*dbEde + dnude\n \n #Final calculations (square brackets mean trivial):\n #(alpha,e,i) [+a,n] -> (rho,theta) Tr5\n #We have dAd(p,t,e,w), with alpha=A. Only complex for\n #e, where we need:\n #drhode = drhodA.dAde + drhodnu.dAde + drhode\n #dthde = dthdA.dAde + dthde\n #Also, drhodp = drhodA.dAdp etc...\n\n #For the derivative part of the code\n sqtmp = np.sqrt(np.cos(alpha)**2 + np.sin(alpha)**2*np.cos(i)**2)\n rho=a*(1-e**2)/(1 + e*np.cos(nu))*sqtmp\n theta=np.arctan2(np.sin(alpha)*np.cos(i),np.cos(alpha))+n\n\n if do_deriv:\n drhodA = a*(e**2-1)/(1+e*np.cos(nu))*np.cos(alpha)*np.sin(alpha)*(np.sin(i))**2/sqtmp\n drhodnu = -a*e*(e**2-1)*np.sin(nu)/(1+e*np.cos(nu))**2*sqtmp\n drhode = -a*(2*e+(1+e**2)*np.cos(nu))*sqtmp/(1 + e*np.cos(nu))**2\n drhodi = -a*(1-e**2)/(1+e*np.cos(nu))*np.cos(i)*np.sin(i)*(np.sin(alpha))**2/sqtmp\n dthdA = np.cos(i)/(np.cos(alpha))**2/(1+(np.cos(i)*np.tan(alpha))**2)\n dthdi = -np.sin(i)*np.tan(alpha)/(1+(np.cos(i)*np.tan(alpha))**2)\n #[T0,P,a,e,n,w,i]\n drho = np.array([(drhodA+drhodnu)*dAdt, (drhodA+drhodnu)*dAdp, rho/a, drhode+(drhodA+drhodnu)*dAde, \\\n np.zeros(len(jds)), drhodA*np.pi/180., drhodi*np.pi/180.])\n dth = np.array([dthdA*dAdt, dthdA*dAdp, np.zeros(len(jds)), dthdA*dAde, np.ones(len(jds))*np.pi/180., dthdA*np.pi/180., \\\n dthdi*np.pi/180.])*180/np.pi\n deriv = (drho,dth)\n \n #The radial velocity is in units of semi major axis units per time unit.\n #e.g. if a is in km and P is in seconds, then vr is in km/s.\n #if a is in milli-arcsec and P is in days, then vr has to be multiplied by\n #(1 AU in km) / (1 day in s) / (parallax in milli-arcsec)\n # [Note that the old IDL code didn't have the amplitude out the front]\n vr = 2*np.pi*a*np.sin(i)/P/np.sqrt(1 - e**2) * (np.cos(alpha) + e*np.cos(w))\n\n if do_deriv:\n return rho, theta*180/np.pi, vr, deriv\n else:\n return rho, theta*180/np.pi, vr\n \ndef plot_orbit(params,jds,rho,rho_sig,theta,theta_sig):\n \"\"\"Make a pretty orbital plot\n \"\"\"\n rho_mod,theta_mod,dummy = binary_orbit(params, jds)\n plt.clf()\n w_ao = np.where(theta_sig > 0)[0]\n plt.plot(rho[w_ao]*np.sin(np.radians(theta[w_ao])),rho[w_ao]*np.cos(np.radians(theta[w_ao])),'.')\n w_lunar = np.where(theta_sig < 0)[0]\n for ix in w_lunar:\n midpoint = np.array([rho[ix]*np.sin(np.radians(theta[ix])),rho[ix]*np.cos(np.radians(theta[ix]))])\n segment = np.array([100*np.cos(np.radians(theta[ix])),-100*np.sin(np.radians(theta[ix]))])\n start_pt = midpoint - segment\n end_pt = midpoint + segment\n plt.plot([start_pt[0],end_pt[0]],[start_pt[1],end_pt[1]],'r-')\n plt.plot([midpoint[0],rho_mod[ix]*cos(np.radians(theta_mod[ix]))],[midpoint[1],rho_mod[ix]*sin(np.radians(theta_mod[ix]))])\n mint = np.min([params[0],np.min(jds)])\n maxt = np.max([params[0] + params[1],np.max(jds)])\n times = mint + (maxt-mint)*np.arange(1001)/1e3\n rho_orbit,theta_orbit,dummy = binary_orbit(params, times)\n plt.plot(rho_orbit*np.sin(np.radians(theta_orbit)),rho_orbit*np.cos(np.radians(theta_orbit)))\n\ndef leastsq_orbit_fun(params,jds,rho,rho_sig,theta,theta_sig):\n \"\"\"A function for scipy.optimize.leastsq. Lunar occultation\n measurements can be indicated by theta_sig < 0. These will \n be placed at the \"\"\"\n model_rho,model_theta,dummy = binary_orbit(params,jds)\n theta_diff = ((model_theta - theta + 180) % 360)-180\n if (np.sum(model_rho) != np.sum(model_rho)):\n raise UserWarning\n w_ao = np.where(theta_sig > 0)[0]\n retvect = np.append( (model_rho[w_ao] - rho[w_ao])/rho_sig[w_ao], theta_diff[w_ao]/theta_sig[w_ao])\n w_lunar = np.where(theta_sig < 0)[0]\n if len(w_lunar)>0:\n proj = model_rho[w_lunar]*np.cos(np.radians(theta[w_lunar] - model_theta[w_lunar]))\n retvect = np.append(retvect,(proj-rho[w_lunar])/rho_sig[w_lunar])\n# import pdb; pdb.set_trace()\n return retvect\n \ndef leastsq_orbit_deriv(params,jds,rho,rho_sig,theta,theta_sig):\n \"\"\"A function returning the derivative for scipy.optimize.leastsq\"\"\"\n model_rho,model_theta,dummy,deriv = binary_orbit(params,jds, do_deriv=True)\n w_lunar = np.where(theta_sig < 0)[0]\n if (len(w_lunar)>0):\n import pdb; pdb.set_trace() #Not implemented yet!\n orbit_fun_deriv = np.concatenate((deriv[0]/np.tile(rho_sig,7).reshape(7,len(jds)),\n deriv[1]/np.tile(theta_sig,7).reshape(7,len(jds))),axis=1)\n return orbit_fun_deriv.T\n \ndef binary_lnprob(params,jds,rho,rho_sig,theta,theta_sig):\n \"\"\"For use with e.g. emcee. Return chi^2/2.\n \"\"\"\n if (params[3] > 1):\n return -np.inf\n if (params[3] < 0):\n return -np.inf\n retval = -0.5*np.sum(leastsq_orbit_fun(params,jds,rho,rho_sig,theta,theta_sig)**2)\n# model_rho,model_theta,dummy = binary_orbit(params,jds)\n# #Difference modulo 360\n# theta_diff = ((theta -model_theta + 180) % 360)-180\n# retval = -0.5*(np.sum( ((rho-model_rho)/rho_sig)**2) + np.sum((theta_diff/theta_sig)**2))\n #We really shouldn't have NaNs... but if we do, it would be good to stop here for\n #bugshooting.\n if (retval != retval):\n raise UserWarning\n return retval\n",
"id": "10853421",
"language": "Python",
"matching_score": 1.8029574155807495,
"max_stars_count": 0,
"path": "binary_orbit.py"
},
{
"content": "\"\"\"Here we create some fake data using templates, and try to fit to this data \nusing process_stellar to extract the radial velocities using TODCOR\"\"\"\n\nfrom __future__ import division, print_function\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.interpolate import UnivariateSpline\nfrom scipy.interpolate import InterpolatedUnivariateSpline\nimport process_stellar\nplt.ion()\n\ndir = 'RV_templates/'\ninfiles = [dir + '9000g40p00k2v50.txt', dir + '5250g35p00k2v50.txt']\n\nrvs = [0,150]\nrvs = np.array(rvs)\n\nfluxes = [1, .3]\n\nspectrograph_R = 7000.\n#spectrograph_R = 3000.\n\nwavelims = [3900,5000]\nsnr = 100.0\n#-------------------\ndwave = 0.5*(wavelims[0] + wavelims[1])/spectrograph_R/2 #2 pixel sampling\nnwave = int((wavelims[1]-wavelims[0])/dwave)\nwave_pixelated = np.linspace(wavelims[0], wavelims[1], nwave) \n\n#Zero the spectrum\nspect = np.zeros(nwave)\n\n#Add in each template by interpolating onto the new wavelength grid.\nfor fname, rv, flux in zip(infiles, rvs, fluxes):\n wave_spect = np.loadtxt(fname)\n x = wave_spect[:,0]*(1+rv/3e5)\n y = wave_spect[:,1]/np.median(wave_spect[:,1])\n spl = InterpolatedUnivariateSpline(x, y, k=3)\n spect += flux*spl(wave_pixelated)\n \n \n#Convolve to the resolution of the spectrograph (2 pixels)\ng = 0.5**np.arange(-2,3)**2\nspect = np.convolve(spect, g, mode='same')\nspect += np.median(spect)*np.random.normal(size=len(spect))/snr\n\nsig = np.ones_like(spect)*np.median(spect)/snr\nresults = \\\n process_stellar.calc_rv_todcor(spect,wave_pixelated, sig, infiles, plotit=True, \\\n smooth_distance=1001, window_divisor=20, nwave_log=int(1e5))\n \nprint(results)\nprint(\"Computed delta RV: {0:6.2f} +/- {1:6.2f}\".format(results[0]-results[2], np.sqrt(results[1]**2 + results[3]**2)))\nprint(\"Actual delta RV: {0:6.2f}\".format(rvs[0]-rvs[1]))\nprint(\"Computed mean RV: {0:6.2f} +/- {1:6.2f}\".format((results[0]+results[2])*.5, np.sqrt(results[1]**2 + results[3]**2)/2))\nprint(\"Actual mean RV: {0:6.2f}\".format((rvs[0]+rvs[1])*.5))\nprint(\"Difference: {0:6.2f} +/- {1:6.2f}\".format((results[0]+results[2])*.5 - (rvs[0]+rvs[1])*.5, np.sqrt(results[1]**2 + results[3]**2)/2))\n",
"id": "11077580",
"language": "Python",
"matching_score": 2.627366781234741,
"max_stars_count": 0,
"path": "fit_fake_data.py"
},
{
"content": "\"\"\"\nTo run, type:\n\n%run process_stellar\n%run test_process_stellar\n\"\"\"\nfrom __future__ import print_function\ntry:\n import pyfits\nexcept:\n import astropy.io.fits as pyfits\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport glob\nimport pickle\nplt.ion()\n\nmode='plot' #fit, plot or test\n\n#From Margaret:\n#They were all obtained with 'T2m3wb-20141110.113949-0831.p08.fits', using RV_templates/8500g40p00k2v50.txt and 5000g35p00k2v50.txt and alpha =0.1.\n\nif mode=='fit':\n files = glob.glob('C:/Users/Margaret/MPhil/rvs_p08files_TTHor_2016/*.fits')\n base_path = 'C:/Users/Margaret/MPhil/rvs_p08files_TTHor_2016/'\n for file in files: \n file_num = int(file.split('.')[1].split('-')[1])\n flux,wave = read_and_find_star_p08(file)\n spectrum,sig = weighted_extract_spectrum(flux)\n helcor = pyfits.getheader(file)['RADVEL']\n wave_log, spect_int, model_spect = calc_rv_todcor(spectrum,wave,sig,\\\n ['C:/Users/Margaret/MPhil/RV_templates/8500g40p00k2v50.txt',\\\n 'C:/Users/Margaret/MPhil/RV_templates/5000g35p00k2v50.txt'],\\\n alpha=0.1,out_fn='rvs.txt',jd=file_num,return_fitted=True,\\\n heliocentric_correction=helcor)\n\n plt.clf()\n plt.plot(wave_log, spect_int, label='Data')\n plt.plot(wave_log, model_spect, label='Model')\n plt.legend()\n plt.xlabel('Wavelength')\nelif mode=='plot':\n #** To test the flux normalization of todcor **\n templates_dir = '/Users/mireland/python/pywifes/tools/margaret/'\n template_fns = glob.glob(templates_dir+'*txt')\n plot_p08_dir = '/Users/mireland/data/wifes/rvs_p08files/'\n plot_p08_dir = '/Users/mireland/python/pywifes/tools/margaret/'\n\n flux, wave = read_and_find_star_p08(plot_p08_dir + 'T2m3wb-20141110.113949-0831.p08.fits')\n spectrum, sig = weighted_extract_spectrum(flux)\n dummy = calc_rv_todcor(spectrum, wave,sig, [template_fns[3], template_fns[1]], bad_intervals=[[0,3810], [5005, 5028]], alpha=0.25, plotit=True)\n #dummy = calc_rv_todcor(spectrum, wave,sig, [template_fns[3], template_fns[1]], bad_intervals=[[0,4200], [5067,5075],[5500,6000]], alpha=0.25, plotit=True)\n #dummy = calc_rv_todcor(spectrum, wave,sig, [template_fns[2], template_fns[1]], bad_intervals=[[0,4200], [5067,5075],[5500,6000]], alpha=0.25, plotit=True)\nelif mode=='test':\n #*** lines below test todcor ***\n binspect,binwave,binsig=make_fake_binary(spectrum, wave, sig, ['RV_templates/9000g40p00k2v150.txt','RV_templates/5250g35p00k2v150.txt'],0.5,-200,+200)\n calc_rv_todcor(binspect,binwave,binsig,['RV_templates/9000g40p00k2v150.txt','RV_templates/5250g35p00k2v150.txt'],alpha=0.5)\n\n rv,rv_sig = calc_rv_template(spectrum,wave,sig,'template_conv', ([0,5400],[6870,6890]))\n rv,rv_sig = calc_rv_template(spectrum,wave,sig,template_fns, ([0,5400],[6870,6890]))\n\n",
"id": "10393151",
"language": "Python",
"matching_score": 2.268437623977661,
"max_stars_count": 0,
"path": "test_process_stellar.py"
},
{
"content": "import process_stellar\nimport matplotlib.pyplot as plt\nconv_tlusty_spect = process_stellar.conv_tlusty_spect\nconv_phoenix_spect = process_stellar.conv_phoenix_spect\nrv_process_dir = process_stellar.rv_process_dir\nimport pdb\nimport time\nimport numpy as np\n\n\n##CONVOLVING TEMPLATES\n#conv_tlusty_spect('/Volumes/UTRAID/TLUSTY/BGvispec_v2/','tlusty_conv')\n#conv_phoenix_spect('/Volumes/UTRAID/phoenix_hires/PHOENIX-ACES-AGSS-COND-2011/Z-0.0/foruse/','phoenix_conv')\n\n\n##RUNNING RV FITTER ON DATA\n##Executing from the code directory:\n#rv_process_dir('/Volumes/UTRAID/wifes_data/140619/reduction_red_150806',\n#template_conv_dir='./phoenix_conv/',outdir='arizz_outputs/140619/phoenix',mask_ha_emission=True)\n\n#rv_process_dir('/Users/arizz/python/pywifes/tools/test_intput',\n#template_conv_dir='/Users/arizz/python/pywifes/tools/full_conv/',outdir='/Users/arizz/python/pywifes/tools/testing_outputs',mask_ha_emission=False)\n\nindirs = np.array(['140623','140622','140621','140619'])\n\nfor ii in indirs:\n indir= '/Volumes/UTRAID/wifes_data/'+ii+'/reduction_red_150806'\n odir = 'arizz_outputs/'+indir.split('/')[4]+'/both'\n #pdb.set_trace()\n rv_process_dir(indir,template_conv_dir='/Users/arizz/python/pywifes/tools/full_conv/',outdir=odir,mask_ha_emission=False)\n",
"id": "1630043",
"language": "Python",
"matching_score": 0.540456235408783,
"max_stars_count": 0,
"path": "runscript.py"
}
] | 2.210289 |
MTCloudVision | [
{
"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import print_function\nimport os\nimport sys\ncurr_path = os.path.abspath(os.path.dirname(__file__))\nsys.path.append(os.path.join(curr_path, \"../python\"))\nimport mxnet as mx\nimport random\nimport argparse\nimport cv2\nimport time\nimport traceback\nimport pdb\ntry:\n import multiprocessing\nexcept ImportError:\n multiprocessing = None\n\ndef read_video_txt(path_in,frame_per_video):\n \"\"\"\n read txtfile\n Parameters:\n ---------\n name : str\n txtfile path\n frame_per_video:int\n frame per video\n Returns:\n ---------\n [index,image_path,label] as iterator\n \"\"\"\n num_index=0\n with open(path_in) as fin:\n while True:\n line = fin.readline()\n #print line\n if not line:\n break\n line = [i.strip() for i in line.strip().split(' ')]\n line_len = len(line)\n if line_len < 3:\n print('lst should at least has three parts, but only has %s parts for %s' %(line_len, line))\n continue\n try:\n \n label = float(line[-1])\n length = int(line[1])\n new_length = 1\n average_duration = length/frame_per_video\n for i in range(0,frame_per_video):\n if average_duration >= new_length:\n if (i+1)*average_duration <= length:\n offset = int(random.uniform(i* average_duration,(i+1)*average_duration))\n else:\n offset = int(random.uniform(i* average_duration,length))\n if offset <= 0:\n offset = 1\n elif offset >= length:\n offset = length\n image_path = line[0] + \"/img_%05d.jpg\"%(offset)\n index = int(num_index + i)\n item = [index] + [image_path] + [label]\n yield item\n\n num_index += frame_per_video\n except Exception as e:\n print('Parsing lst met error for %s, detail: %s' %(line, e))\n continue\n\ndef image_encode(args, i, item, q_out):\n \"\"\"\n loading and processing image\n\n Parameters:\n ---------\n args:\n image augment argument\n\n i: int\n the index of image in iterator\n item : list\n index,image_path and label of image in datasets \n q_out : queue\n saving resluts in the form of (i,pack_img,item)\n\n \"\"\"\n # fullpath = os.path.join(args.root, item[1])\n fullpath = item[1] \n if len(item) > 3 and args.pack_label:\n header = mx.recordio.IRHeader(0, item[2:], item[0], 0)\n else:\n header = mx.recordio.IRHeader(0, item[2], item[0], 0)\n\n if args.pass_through:\n try:\n with open(fullpath, 'rb') as fin:\n img = fin.read()\n s = mx.recordio.pack(header, img)\n q_out.put((i, s, item))\n except Exception as e:\n traceback.print_exc()\n print('pack_img error:', item[1], e)\n q_out.put((i, None, item))\n return\n\n try:\n img = cv2.imread(fullpath, args.color)\n except:\n traceback.print_exc()\n print('imread error trying to load file: %s ' % fullpath)\n q_out.put((i, None, item))\n return\n if img is None:\n print('imread read blank (None) image for file: %s' % fullpath)\n q_out.put((i, None, item))\n return\n if args.center_crop:\n if img.shape[0] > img.shape[1]:\n margin = (img.shape[0] - img.shape[1]) // 2;\n img = img[margin:margin + img.shape[1], :]\n else:\n margin = (img.shape[1] - img.shape[0]) // 2;\n img = img[:, margin:margin + img.shape[0]]\n if args.resize:\n if img.shape[0] > img.shape[1]:\n newsize = (args.resize, img.shape[0] * args.resize // img.shape[1])\n else:\n newsize = (img.shape[1] * args.resize // img.shape[0], args.resize)\n img = cv2.resize(img, newsize)\n\n try:\n s = mx.recordio.pack_img(header, img, quality=args.quality, img_fmt=args.encoding)\n q_out.put((i, s, item))\n except Exception as e:\n traceback.print_exc()\n print('pack_img error on file: %s' % fullpath, e)\n q_out.put((i, None, item))\n return\n\ndef read_worker(args, q_in, q_out):\n \"\"\"\n loading and processing image by multiprocessing\n Parameters:\n ---------\n args:\n image augment argument\n q_in:multiprocessing.Queue()\n the index of image in iterator, index image_path and label ofimage in datasets\n\n\n q_out:multiprocessing.Queue()\n saving resluts in the form of (i,pack_img,item)\n \"\"\"\n while True:\n deq = q_in.get()\n if deq is None:\n break\n i, item = deq\n image_encode(args, i, item, q_out)\n\ndef write_worker(q_out, fname, working_dir):\n \"\"\"\n saving image in the form of rec by by multiprocessing\n Parameters:\n ---------\n q_out: multiprocessing.Queue\n contain processed image in the form of (i,pack_img,item)\n fname:str\n txtfile path\n working_dir:str\n path to folder containing txtfile\n \"\"\"\n pre_time = time.time()\n count = 0\n fname = os.path.basename(fname)\n fname_rec = os.path.splitext(fname)[0] + '.rec'\n fname_idx = os.path.splitext(fname)[0] + '.idx'\n record = mx.recordio.MXIndexedRecordIO(os.path.join(working_dir, fname_idx),\n os.path.join(working_dir, fname_rec), 'w')\n buf = {}\n more = True\n while more:\n deq = q_out.get()\n if deq is not None:\n i, s, item = deq\n buf[i] = (s, item)\n else:\n more = False\n while count in buf:\n s, item = buf[count]\n del buf[count]\n if s is not None:\n record.write_idx(item[0], s)\n\n if count % 1000 == 0:\n cur_time = time.time()\n print('time:', cur_time - pre_time, ' count:', count)\n pre_time = cur_time\n count += 1\n\ndef parse_args():\n parser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n description='Create an image list or \\\n make a record database by reading from an image list')\n parser.add_argument('prefix', help='prefix of input/output txt and rec files.')\n parser.add_argument('--frame-per-video', type=int, default=3,help='frame per video')\n rgroup = parser.add_argument_group('Options for creating database')\n rgroup.add_argument('--pass-through', action='store_true',\n help='whether to skip transformation and save image as is')\n rgroup.add_argument('--resize', type=int, default=0,\n help='resize the shorter edge of image to the newsize, original images will\\\n be packed by default.')\n rgroup.add_argument('--center-crop', action='store_true',\n help='specify whether to crop the center image to make it rectangular.')\n rgroup.add_argument('--quality', type=int, default=95,\n help='JPEG quality for encoding, 1-100; or PNG compression for encoding, 1-9')\n rgroup.add_argument('--num-thread', type=int, default=1,\n help='number of thread to use for encoding. order of images will be different\\\n from the input list if >1. the input list will be modified to match the\\\n resulting order.')\n rgroup.add_argument('--color', type=int, default=1, choices=[-1, 0, 1],\n help='specify the color mode of the loaded image.\\\n 1: Loads a color image. Any transparency of image will be neglected. It is the default flag.\\\n 0: Loads image in grayscale mode.\\\n -1:Loads image as such including alpha channel.')\n rgroup.add_argument('--encoding', type=str, default='.jpg', choices=['.jpg', '.png'],\n help='specify the encoding of the images.')\n rgroup.add_argument('--pack-label', action='store_true',\n help='Whether to also pack multi dimensional label in the record file')\n args = parser.parse_args()\n args.prefix = os.path.abspath(args.prefix)\n return args\n\nif __name__ == '__main__':\n args = parse_args()\n if os.path.isdir(args.prefix):\n working_dir = args.prefix\n else:\n working_dir = os.path.dirname(args.prefix)\n files = [os.path.join(working_dir, fname) for fname in os.listdir(working_dir)\n if os.path.isfile(os.path.join(working_dir, fname))]\n count = 0\n for fname in files:\n if fname.startswith(args.prefix) and fname.endswith('.txt'):\n print('Creating .rec file from', fname, 'in', working_dir)\n count += 1\n \n image_list = read_video_txt(fname,args.frame_per_video)\n # -- write_record -- #\n if args.num_thread > 1 and multiprocessing is not None:\n q_in = [multiprocessing.Queue(1024) for i in range(args.num_thread)]\n q_out = multiprocessing.Queue(1024)\n read_process = [multiprocessing.Process(target=read_worker, args=(args, q_in[i], q_out)) \\\n for i in range(args.num_thread)]\n for p in read_process:\n p.start()\n write_process = multiprocessing.Process(target=write_worker, args=(q_out, fname, working_dir))\n write_process.start()\n\n for i, item in enumerate(image_list):\n q_in[i % len(q_in)].put((i, item))\n for q in q_in:\n q.put(None)\n for p in read_process:\n p.join()\n\n q_out.put(None)\n write_process.join()\n else:\n print('multiprocessing not available, fall back to single threaded encoding')\n try:\n import Queue as queue\n except ImportError:\n import queue\n q_out = queue.Queue()\n fname = os.path.basename(fname)\n fname_rec = os.path.splitext(fname)[0] + '.rec'\n fname_idx = os.path.splitext(fname)[0] + '.idx'\n record = mx.recordio.MXIndexedRecordIO(os.path.join(working_dir, fname_idx),\n os.path.join(working_dir, fname_rec), 'w')\n cnt = 0\n pre_time = time.time()\n for i, item in enumerate(image_list):\n print (i ,item)\n image_encode(args, i, item, q_out)\n if q_out.empty():\n continue\n _, s, _ = q_out.get()\n record.write_idx(item[0], s)\n if cnt % 1000 == 0:\n cur_time = time.time()\n print('time:', cur_time - pre_time, ' count:', cnt)\n pre_time = cur_time\n cnt += 1\n if not count:\n print('Did not find and list file with prefix %s'%args.prefix)\n",
"id": "3866478",
"language": "Python",
"matching_score": 1.9988588094711304,
"max_stars_count": 48,
"path": "video2rec.py"
},
{
"content": "# -*- coding: utf-8 -*-\nimport argparse\nimport numpy as np\n\ndef model_argparser():\n parser = argparse.ArgumentParser(description='train a video classification model')\n parser.add_argument('--width', type=int, default=224,help='image width')\n parser.add_argument('--height', type=int, default=224,help='image height')\n parser.add_argument('--network-json',type=str,help='network symbol file')\n parser.add_argument('--params', type=str,help='pretrained params')\n parser.add_argument('--lr',type=float,default=0.001,help='the initial learning rate')\n parser.add_argument('--wd',type=float,default=0.0005,help='weight decay')\n parser.add_argument('--optimizer', type=str, default='sgd',help='optimizer')\n parser.add_argument('--momentum', type=float, default=0.9,help='momentum for sgd')\n parser.add_argument('--lr-factor-step',type=int, default=1, help='the number of epoch to factor the lr, must be larger than 1')\n parser.add_argument('--lr-factor', type=float, default=0.1,help='times the lr with a factor for every lr-factor-step epoch')\n parser.add_argument('--clip-gradient', type=float, default=40.,help='clip min/max gradient to prevent extreme value')\n parser.add_argument('--begin-epoch', type=int, default=0,help='begin epoch')\n parser.add_argument('--num-epochs', type=int, default=100,help='the number of training epochs')\n parser.add_argument('--gpus', type=str, default='0',help='the gpus will be used, e.g \"0,1,2,3\"\"')\n parser.add_argument('--display-step', type=int, default=5,help='display step')\n parser.add_argument('--frame-per-video', type=int, default=3,help='frame per video')\n parser.add_argument('--batch-size', type=int, default=1,help='the batch size(video per batch)')\n parser.add_argument('--save-model-prefix', type=str,help='the prefix of the model to save')\n parser.add_argument('--train-rec-dir', type=str, required=True,help='train rec dir')\n parser.add_argument('--val-rec-dir', type=str,help='val rec dir ')\n #parser.add_argument('--phase',type=str,required=True,choices=['Train','Test'],help='the phase of data')\n parser.add_argument('--is-flow',type=lambda x: x.lower() in (\"yes\", 'true', 't', '1'), default='False',help='the type pf image')\n parser.add_argument('--shuffle', type=lambda x: x.lower() in (\"yes\", 'true', 't', '1'), default='False',help='shuffle the data')\n parser.add_argument('--resize',type=int,default=0,help='resize the image size')\n parser.add_argument('--rand-crop',type=lambda x: x.lower() in (\"yes\", 'true', 't', '1'), default='False')\n parser.add_argument('--rand-resize',type=lambda x: x.lower() in (\"yes\", 'true', 't', '1'), default='False')\n parser.add_argument('--rand-mirror',type=lambda x: x.lower() in (\"yes\", 'true', 't', '1'), default='False')\n parser.add_argument('--mean',type = np.array,default=np.array([[104, 117, 123]]).astype('uint8'))\n parser.add_argument('--std',type = np.array,default=np.array([1, 1, 1]).astype('uint8'))\n parser.add_argument('--num-workers', type=int, default=0,help='number of io workers')\n parser.add_argument('--log', type=str,help='the name of log file')\n return parser.parse_args()\n",
"id": "8852689",
"language": "Python",
"matching_score": 1.7379595041275024,
"max_stars_count": 48,
"path": "test/model_args.py"
},
{
"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n'''\n'''\n\nimport numpy as np\nimport mxnet as mx\nimport random\nimport os\n\nclass VideoDataIter(object): \n \"\"\"\n pack VideoDataIter by rec file\n Parameters:\n ---------\n args:\n image augment argument\n rec_dir:str\n rec file path\n provide_data:\n The name and shape of data provided by this iterator.\n provide_label:\n The name and shape of label provided by this iterator.\n \"\"\" \n def __init__(self,args,rec_dir=None,provide_data=None, provide_label=None):\n if args.is_flow:\n self.data_shape = (1,args.width,args.height)\n else:\n self.data_shape = (3,args.width,args.height) \n self.resize = args.resize\n self.rand_crop = args.rand_crop\n self.rand_resize = args.rand_resize\n self.rand_mirror = args.rand_mirror\n self.mean = args.mean\n self.std = args.std\n self.batch_size = args.batch_size\n self.frame_per_video = args.frame_per_video\n self.provide_data = provide_data\n self.provide_label = provide_label\n self.rec=[os.path.join(rec_dir, fname) for fname in os.listdir(rec_dir)\n if os.path.isfile(os.path.join(rec_dir, fname))]\n self.formal_iter_data = mx.io.ImageRecordIter( path_imgrec = self.rec[0],\n aug_seg = mx.image.CreateAugmenter(data_shape=self.data_shape,mean=self.mean,std= self.std,rand_mirror=self.rand_mirror),\n data_name = 'data',\n label_name = 'softmax_label',\n batch_size = self.batch_size * self.frame_per_video,\n data_shape = self.data_shape,\n preprocess_threads = 4,\n rand_mirror = self.rand_mirror,)\n\n\n def __iter__(self):\n return self\n \n\n def next(self):\n try:\n next_data_batch = next(self.formal_iter_data)\n formal_data= next_data_batch.data[0]\n new_data = formal_data.reshape((self.batch_size,self.frame_per_video,self.data_shape[0],self.data_shape[1],self.data_shape[2]))\n new_data=[new_data]\n formal_label = next_data_batch.label[0]\n new_label = [label.asnumpy()[0] for i ,label in enumerate(formal_label) if i%self.frame_per_video ==0]\n new_label = mx.nd.array(new_label)\n new_label =[new_label]\n return mx.io.DataBatch(data=new_data,label=new_label)\n \n except StopIteration:\n raise StopIteration\n \n def reset(self):\n index = random.randint(0,len(self.rec)-1)\n self.formal_iter_data = mx.io.ImageRecordIter( path_imgrec= self.rec[index],\n aug_seg = mx.image.CreateAugmenter(data_shape=self.data_shape,mean=self.mean,std= self.std,rand_mirror=self.rand_mirror),\n data_name = 'data',\n label_name = 'softmax_label',\n batch_size = self.batch_size * self.frame_per_video,\n data_shape = self.data_shape,\n preprocess_threads = 4,\n rand_mirror = self.rand_mirror,)\n \n\n \n \n\n\n\n",
"id": "12029821",
"language": "Python",
"matching_score": 2.8604578971862793,
"max_stars_count": 48,
"path": "dataloader.py"
},
{
"content": "# -*- coding: utf-8 -*-\nimport os\nROOT_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..')\nimport sys\nsys.path.insert(0, ROOT_DIR)\nfrom dataloader import *\nfrom model_args import *\n\nif __name__ == '__main__':\n model_args = model_argparser()\n provide_data=[('data',(model_args.batch_size,model_args.frame_per_video,3,model_args.height,model_args.width)),]\n provide_label= [('label',(model_args.batch_size,)),]\n train_iter = VideoDataIter(model_args,rec_dir=model_args.train_rec_dir,provide_data=provide_data,provide_label=provide_label) \n for batch in train_iter:\n print batch\n",
"id": "4001601",
"language": "Python",
"matching_score": 1.568794846534729,
"max_stars_count": 48,
"path": "test/test.py"
}
] | 1.868409 |
spdk | [
{
"content": "#!/usr/bin/env python\n# BSD LICENSE\n#\n# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in\n# the documentation and/or other materials provided with the\n# distribution.\n# * Neither the name of Intel Corporation nor the names of its\n# contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nfrom subprocess import check_call, call, check_output, Popen, PIPE\nimport xmlrpclib\nimport re\nimport os\nimport sys\nimport time\nimport datetime\nimport ConfigParser\nimport socket\nimport threading\n\nfio_template = \"\"\"\n[global]\nthread=1\ninvalidate=1\nrw=%(testtype)s\nrwmixread=%(rw_mixread)d\nioengine=libaio\nbs=%(blocksize)d\ndirect=1\nsize=%(size)s\niodepth=%(iodepth)d\nnorandommap=1\n%(verify)s\nverify_dump=1\nnumjobs=1\n%(trim)s\n\"\"\"\n\nfio_job_template = \"\"\"\n[job%(jobnumber)d]\nfilename=%(device)s\n\"\"\"\n\nverify_template = \"\"\"\ndo_verify=1\nverify=pattern\nverify_pattern=\"spdk\"\n\"\"\"\n\nverify_write_template = \"\"\"\n[global]\nthread=1\ninvalidate=1\nrw=%(testtype)s\nioengine=libaio\nbs=%(blocksize)d\niodepth=%(iodepth)d\nnorandommap=1\ndirect=1\nsize=%(size)s\nverify_dump=1\nnumjobs=1\ndo_verify=0\nverify=pattern\nverify_pattern=\"spdk\"\n\"\"\"\n\ntrim_template = \"\"\"\ntrim_percentage=10\ntrim_verify_zero=1\n\n\"\"\"\n\n\ndef start_fio_client():\n get_fio_version()\n start_time = datetime.datetime.now()\n print \"start_time is \", start_time\n path = os.path.realpath(__file__)\n current_path = os.path.dirname(path)\n config = ConfigParser.ConfigParser()\n file_name = current_path + '/Fio_test.conf'\n config.readfp(open(file_name))\n target_ip = config.get(\"target\", \"nvmf_addr\")\n target = xmlrpclib.ServerProxy(\n config.get('target', 'nvmf_testserver_addr'))\n devices_nvme = get_lsblk()\n print \"Found devices: \", devices_nvme\n time.sleep(2)\n io_range = config.get(\"test\", \"io_size\")\n queue_range = config.get(\"test\", \"queue_depth\")\n test_type = config.get(\"test\", \"test_types\")\n run_size = config.get(\"test\", \"runsize\")\n verify = config.get(\"test\", \"verify\")\n devices = get_lsblk()\n io_range_list = get_range(io_range)\n queue_range_list = get_range(queue_range)\n io_sizes = list(power_of_2_range(io_range_list[0], io_range_list[1]))\n queue_depths = list(power_of_2_range(\n queue_range_list[0], queue_range_list[1]))\n fio_executable = '/usr/bin/fio'\n device_paths = ['/dev/' + dev for dev in devices]\n sys.stdout.flush()\n if verify == \"False\":\n verify = False\n else:\n verify = True\n rwmixread = config.get('test', 'rwmixread')\n rwmixread = int(rwmixread)\n log = \"\"\n for io_size in io_sizes:\n for depth in queue_depths:\n if verify:\n if test_type == \"read\" or test_type == \"randread\":\n for singledevice in device_paths:\n singledevice = [singledevice]\n fio = Popen([fio_executable, '-'], stdin=PIPE)\n write_depth = 1\n fio.communicate(create_verify_fio_config(\n io_size, int(write_depth), singledevice, 'write', run_size))\n fio.stdin.close()\n rc = fio.wait()\n print \"FIO write operation completed with code %d\\n\" % rc\n time.sleep(3)\n sys.stdout.flush()\n fio = Popen([fio_executable, '-'], stdin=PIPE)\n fio.communicate(\n create_fio_config(\n io_size,\n depth,\n device_paths,\n test_type,\n run_size,\n verify,\n rwmixread))\n fio.stdin.close()\n rc = fio.wait()\n print \"FIO completed with code %d\\n\" % rc\n sys.stdout.flush()\n if rc != 0:\n log += \"Failed %s at Size %d, queue depth %d\\n\" % (\n test_type, io_size, depth)\n if len(log) == 0:\n print \"All tests passed\"\n return \"All tests passed\"\n else:\n print log\n return log\n\n\ndef create_fio_config(size, q_depth, devices, test,\n run_size, rwmixread, verify=False):\n if not verify:\n verifyfio = \"\"\n else:\n verifyfio = verify_template\n if test == \"trim\" or test == \"randtrim\":\n trim_tem = trim_template\n else:\n trim_tem = \"\"\n fiofile = fio_template % {\n \"blocksize\": size,\n \"iodepth\": q_depth,\n \"testtype\": test,\n \"rw_mixread\": rwmixread,\n \"verify\": verifyfio,\n \"trim\": trim_tem,\n \"size\": run_size}\n for (i, dev) in enumerate(devices):\n fiofile += fio_job_template % {\"jobnumber\": i, \"device\": dev}\n return fiofile\n\n\ndef create_verify_fio_config(size, q_depth, devices, test, run_size):\n fiofile = verify_write_template % {\n \"blocksize\": size, \"iodepth\": q_depth, \"testtype\": test, \"size\": run_size}\n for (i, dev) in enumerate(devices):\n fiofile += fio_job_template % {\"jobnumber\": i, \"device\": dev}\n return fiofile\n\n\ndef set_device_parameter(devices, filename_template, value):\n for dev in devices:\n filename = filename_template % dev\n f = open(filename, 'r+b')\n f.write(value)\n f.close()\n\n\ndef configure_devices(devices):\n set_device_parameter(devices, \"/sys/block/%s/queue/nomerges\", \"2\")\n set_device_parameter(devices, \"/sys/block/%s/queue/nr_requests\", \"128\")\n set_device_parameter(devices, \"/sys/block/%s/queue/scheduler\", \"noop\")\n\n\ndef get_fio_version():\n output = check_output('fio --version', shell=True)\n version = re.findall(\"fio-([0-9]+.*)\", output)\n tupleversion = version[0]\n versionstr = tupleversion\n tupleversion = tupleversion.split('.')\n for i in range(len(tupleversion)):\n tupleversion[i] = int(tupleversion[i])\n tupleversion = tuple(tupleversion)\n if tupleversion < (2, 1, 14):\n print \"fio version must be 2.1.14 or above. Your fio version is \", versionstr\n sys.exit(1)\n\n\ndef power_of_2_range(start, end):\n n = convert_units(start)\n while n <= convert_units(end):\n yield n\n n = n * 2\n\n\ndef convert_units(num):\n if isinstance(num, type(str())):\n if not num.isdigit():\n multipliers = {'K': 1024, 'M': 1024**2, 'G': 1024**3, 'T': 1024**4}\n x = int(num[:-1])\n prefix = num[-1].upper()\n return x * multipliers[prefix]\n else:\n return int(num)\n else:\n return num\n\n\ndef get_range(item):\n range_string = item.split('-')\n range_list = [x.strip() for x in range_string]\n if len(range_list) == 1:\n range_list.append(range_list[0])\n return range_list\n\n\ndef get_lsblk():\n lsblk_log = check_output(\"lsblk -l -o NAME\", shell=True)\n return re.findall(\"(nvme[0-9]+n1+)\\n\", lsblk_log)\n",
"id": "10922393",
"language": "Python",
"matching_score": 8.20736312866211,
"max_stars_count": 3,
"path": "tests/lib/Fio_test.py"
},
{
"content": "#!/usr/bin/env python\n# BSD LICENSE\n#\n# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in\n# the documentation and/or other materials provided with the\n# distribution.\n# * Neither the name of Intel Corporation nor the names of its\n# contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nfrom subprocess import check_call, call, check_output, Popen, PIPE\nimport xmlrpclib\nimport re\nimport os\nimport sys\nimport time\nimport datetime\nimport ConfigParser\nimport socket\nimport threading\n\nfio_template = \"\"\"\n[global]\nthread=1\ninvalidate=1\nrw=%(testtype)s\ntime_based=1\nruntime=%(run_time)d\nrwmixread=%(rw_mixread)d\nioengine=libaio\ndirect=1\nbs=%(blocksize)d\niodepth=%(iodepth)d\ndirect=1\n%(verify)s\nverify_dump=1\nverify_async=10\n%(trim)s\n\n\"\"\"\n\nverify_template = \"\"\"\ndo_verify=1\nverify=pattern\nverify_pattern=\"spdk\"\n\"\"\"\n\nverify_write_template = \"\"\"\ndo_verify=0\nverify=pattern\nverify_pattern=\"spdk\"\n\"\"\"\n\nfio_job_template = \"\"\"\n[job%(jobnumber)d]\nfilename=%(device)s\n\n\"\"\"\n\ntrim_template = \"\"\"\ntrim_percentage=10\ntrim_verify_zero=1\n\n\"\"\"\n\n\ndef start_fio_client():\n start_time = datetime.datetime.now()\n path = os.path.realpath(__file__)\n current_path = os.path.dirname(path)\n config = ConfigParser.ConfigParser()\n file_name = current_path + '/Fio_test.conf'\n config.readfp(open(file_name))\n io_range = get_range(config, 'test', 'io_size')\n queue_range = get_range(config, 'test', 'queue_depth')\n io_sizes = list(power_of_2_range(io_range[0], io_range[1]))\n queue_depths = list(power_of_2_range(queue_range[0], queue_range[1]))\n run_time = config.get('test', 'runtime')\n run_time = int(run_time)\n verify = config.get('test', 'verify')\n verify = str(verify)\n if verify == \"False\":\n verify = False\n else:\n verify = True\n rwmixread = config.get('test', 'rwmixread')\n rwmixread = int(rwmixread)\n log = \"\"\n target_ip = config.get('target', 'iscsi_addr')\n target = xmlrpclib.ServerProxy(\n config.get('target', 'iscsi_testserver_addr'))\n time.sleep(5)\n devices = get_target_devices()\n print \"Found devices: {0}\".format(devices)\n test_types = config.get('test', 'test_types').split()\n old_config = configure_devices(devices)\n if config.has_option('test', 'fio_path'):\n fio_executable = config.get('test', 'fio_path')\n else:\n fio_executable = '/usr/bin/fio'\n try:\n device_paths = ['/dev/' + dev for dev in devices]\n for size in io_sizes:\n for q_depth in queue_depths:\n for test in test_types:\n print size, q_depth\n sys.stdout.flush()\n if verify:\n if test == \"read\" or test == \"randread\":\n for singledevice in device_paths:\n singledevice = [singledevice]\n fio = Popen([fio_executable, '-'], stdin=PIPE)\n write_depth = 1\n if size == 512:\n run_time_data = 600\n elif size == 4096:\n run_time_data = 1000\n elif size == 262144:\n run_time_data = 1800\n test_data = \"write\"\n fio.communicate(\n create_fio_config(\n size,\n int(write_depth),\n singledevice,\n test_data,\n int(run_time_data),\n verify,\n rwmixread,\n writedata=True))\n fio.stdin.close()\n rc = fio.wait()\n print \"FIO write operation completed with code {0}\\n\".format(rc)\n sys.stdout.flush()\n time.sleep(3)\n fio = Popen([fio_executable, '-'], stdin=PIPE)\n fio.communicate(create_fio_config(\n size, q_depth, device_paths, test, run_time, verify, rwmixread, writedata=False))\n fio.stdin.close()\n rc = fio.wait()\n print \"FIO completed with code {0}\\n\".format(rc)\n sys.stdout.flush()\n time.sleep(1)\n if rc != 0:\n log += \"Failed %s at Size %d, queue depth %d\\n\" % (\n test, size, q_depth)\n finally:\n end_time = datetime.datetime.now()\n duration = end_time - start_time\n print \"duration is {0}\".format(duration)\n restore_configuration(devices, old_config)\n print \"*\" * 10\n if len(log) == 0:\n print \"All tests passed\"\n return \"All tests passed\"\n else:\n print log\n return log\n\n\ndef get_fio_version():\n output = check_output('fio --version', shell=True)\n version = re.findall(\"fio-([0-9]+.*)\", output)\n tupleversion = version[0]\n versionstr = tupleversion\n tupleversion = tupleversion.split('.')\n for i in range(len(tupleversion)):\n tupleversion[i] = int(tupleversion[i])\n tupleversion = tuple(tupleversion)\n if tupleversion < (2, 1, 14):\n print \"fio version must be 2.1.14 or above. Your fio version is {0}\".format(versionstr)\n sys.exit(1)\n else:\n print \"fio version is Ok\"\n\n\ndef get_target_devices():\n output = check_output('iscsiadm -m session -P 3', shell=True)\n return re.findall(\"Attached scsi disk (sd[a-z]+)\", output)\n\n\ndef create_fio_config(size, q_depth, devices, test,\n run_time, verify, rwmixread, writedata=False):\n if not verify:\n verifyfio = \"\"\n else:\n if writedata:\n verifyfio = verify_write_template\n else:\n verifyfio = verify_template\n if test == \"trim\" or test == \"randtrim\":\n trim_tem = trim_template\n else:\n trim_tem = \"\"\n fiofile = fio_template % {\"blocksize\": size, \"iodepth\": q_depth, \"testtype\": test,\n \"run_time\": run_time, \"rw_mixread\": rwmixread, \"verify\": verifyfio, \"trim\": trim_tem}\n for (i, dev) in enumerate(devices):\n fiofile += fio_job_template % {\"jobnumber\": i, \"device\": dev}\n return fiofile\n\n\ndef configure_devices(devices):\n config = {}\n disable_merge_file = \"/sys/block/%s/queue/nomerges\"\n for dev in devices:\n filename = disable_merge_file % dev\n if os.path.isfile(filename):\n time.sleep(2)\n f = open(filename, 'r+b')\n config[filename] = f.read()\n f.seek(0)\n f.write('2') # The value 2 full disables merges.\n f.close()\n return config\n\n\ndef restore_configuration(devices, old_config):\n for filename in old_config:\n if os.path.isfile(filename):\n f = open(filename, 'wb')\n f.write(old_config[filename])\n f.close()\n\n\ndef power_of_2_range(start, end):\n n = convert_units(start)\n while n <= convert_units(end):\n yield n\n n = n * 2\n\n\ndef convert_units(num):\n if isinstance(num, type(str())):\n if not num.isdigit():\n multipliers = {'K': 1024, 'M': 1024**2, 'G': 1024**3, 'T': 1024**4}\n x = int(num[:-1])\n prefix = num[-1].upper()\n return x * multipliers[prefix]\n else:\n return int(num)\n else:\n return num\n\n\ndef get_range(config, section, item):\n range_string = config.get(section, item).split('-')\n range_list = [x.strip() for x in range_string]\n if len(range_list) == 1:\n range_list.append(range_list[0])\n return range_list\n",
"id": "1041876",
"language": "Python",
"matching_score": 2.9536585807800293,
"max_stars_count": 3,
"path": "tests/lib/Fio_iscsi_test.py"
},
{
"content": "#!/usr/bin/env python\n# BSD LICENSE\n#\n# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in\n# the documentation and/or other materials provided with the\n# distribution.\n# * Neither the name of Intel Corporation nor the names of its\n# contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport time\nimport os\nimport sys\nimport pdb\nimport re\nfrom test_case import TestCase\nfrom exception import VerifyFailure\nfrom Test_base_utils import write_fio_config, generate_nvmf_tgt_file\nimport Fio_test\nimport Fio_iscsi_test\n\niscsibackend = [\n \"iscsi_aiobackend\",\n \"iscsi_malloc\",\n \"iscsi_nvme\",\n \"iscsi_multiconnection\",\n \"iscsi_rxtxqueue\"]\nnvmfbackend = [\n \"nvmf_aiobackend\",\n \"nvmf_malloc\",\n \"nvmf_nvme\",\n \"nvmf_multiconnection\",\n \"nvmf_rxtxqueue\"]\n\n\nclass TestFio(object):\n\n def set_up_all(self, test_case_obj, backendname,\n all_run_size, all_run_time):\n \"\"\"\n Run at the start of each test suite.\n fio Prerequisites\n \"\"\"\n if self.nic == \"ConnectX4\":\n self.tester.send_expect(\"modprobe mlx5_ib\", \"#\", 5)\n if self.nic == \"ConnectX3\":\n self.tester.send_expect(\"modprobe mlx4_ib\", \"#\", 5)\n if self.nic == \"chelsio_40gb\":\n self.tester.send_expect(\"modprobe iw_cxgb4\", \"#\", 5)\n self.backend = backendname\n self.DEFAULT_RUN_SIZE = all_run_size\n self.DEFAULT_RUN_TIME = all_run_time\n self.tester_ports = []\n self.dut_ports = []\n self.dut_ports_all = self.dut.get_ports()\n self.tester_ports_all = self.tester.get_ports()\n self.is_port = self._get_nic_driver(self.nic)\n for i, self.dut_port in enumerate(self.dut_ports_all[1]):\n if self.dut_port == self.is_port + '\\r':\n self.dut_port_nic = self.dut_ports_all[0][i]\n self.dut_ports.append(self.dut_port_nic)\n for j, self.tester_port in enumerate(self.tester_ports_all[1]):\n if self.tester_port == self.is_port + '\\r':\n self.tester_port_nic = self.tester_ports_all[0][j]\n self.tester_ports.append(self.tester_port_nic)\n self.verify(len(self.dut_ports) >= 1, \"Insufficient ports\")\n self.dut_port_0_inf = self.dut_ports[0]\n self.dut_port_1_inf = self.dut_ports[1]\n self.tester_port_0_inf = self.tester_ports[0]\n self.tester_port_1_inf = self.tester_ports[1]\n self.dut_ips = {'net_seg_1': \"192.168.1.11\",\n 'net_seg_2': \"192.168.2.11\",\n 'net_seg_3': \"192.168.3.11\",\n 'net_seg_4': \"192.168.4.11\"}\n self.tester_ips = {'net_seg_1': \"192.168.1.10\",\n 'net_seg_2': \"192.168.2.10\",\n 'net_seg_3': \"192.168.3.2\",\n 'net_seg_4': \"192.168.4.2\"}\n self.dut.send_expect(\"cd %s \" % self.dut.base_dir, \"# \", 5)\n self.initial_real_path = self.dut.base_dir\n self.dut_utils_path = self.initial_real_path + \"/etc/spdk\"\n self.dut_iscsi_config_path = self.initial_real_path + \"/etc/spdk/iscsi.conf.in\"\n self.dut_nvmf_config_path = self.initial_real_path + \"/etc/spdk/nvmf.conf.in\"\n self.dut_fiotest_path = self.dut_utils_path\n test_suite_path = os.getcwd() + \"/../tests\"\n self.tester_fiotest_path = \"%s/lib/\" % test_suite_path\n self.tester_fiotest_conf = self.tester_fiotest_path + \"Fio_test.conf\"\n self.tester_fiotest_client = self.tester_fiotest_path + \"Fio_test.py\"\n self.tester_utils_path = \"%s/lib/\" % test_suite_path\n self.tester_utils_file = self.tester_utils_path + \"Test_base_utils.py\"\n self.copy_file_to_dut(self.tester_utils_file, self.dut_utils_path)\n if self.backend != \"nvmf_aiobackend\":\n self.dut.send_expect(\n 'sed -i \"s/ AIO/# AIO/\" %s' %\n self.dut_nvmf_config_path, \"# \", 10)\n self.dut.send_expect(\n 'sed -i \"s#/dev/sdb#/dev/device1#\" %s' %\n self.dut_nvmf_config_path, \"# \", 10)\n self.dut.send_expect(\n 'sed -i \"s#/dev/sdc#/dev/device2#\" %s' %\n self.dut_nvmf_config_path, \"# \", 10)\n else:\n # self.dut.send_expect('sed -i \"s#/dev/sdb#/dev/aio_device1#\" %s' % self.dut_nvmf_config_path, \"# \", 10)\n # self.dut.send_expect('sed -i \"s#/dev/sdc#/dev/aio_device2#\" %s' % self.dut_nvmf_config_path, \"# \", 10)\n nvmf_path = os.path.dirname(os.path.dirname(__file__))\n path = nvmf_path + '/lib/Test_base_fio.py'\n path_file = open(path, \"r\")\n is_aiobackend = re.findall(\n r\"\\n+ # self.dut.send_expect(.*)\",\n path_file.read())\n if is_aiobackend[\n 0] == '(\\'sed -i \"s#/dev/sdb#/dev/aio_device1#\" %s\\' % self.dut_nvmf_config_path, \"# \", 10)':\n path1 = nvmf_path + \\\n \"/lib/Test_base_fio.py:125: E265 block comment should start with '# ' \"\n print \"Please modify aio_device1, The path is\", path1\n self.verify(False, \"Not setting target backend!!!\")\n if is_aiobackend[\n 1] == '(\\'sed -i \"s#/dev/sdc#/dev/aio_device2#\" %s\\' % self.dut_nvmf_config_path, \"# \", 10)':\n path2 = nvmf_path + \\\n \"/lib/Test_base_fio.py:126: E265 block comment should start with '# ' \"\n print \"Please modify aio_device2, The path is\", path2\n self.verify(False, \"Not setting target backend!!!\")\n self.dut.send_expect(\n 'sed -i \"s/#MaxQueueDepth 128/MaxQueueDepth 1024/\" %s' %\n self.dut_nvmf_config_path, \"# \", 10)\n self.dut.send_expect(\n 'sed -i \"s/#MaxIOSize 131072/MaxIOSize 131072/\" %s' %\n self.dut_nvmf_config_path, \"# \", 10)\n self.dut.send_expect(\n \"sed -i 's/192.168.2.21/192.168.1.11/' %s\" %\n self.dut_iscsi_config_path, \"# \", 10)\n self.dut.send_expect(\n \"sed -i 's/192.168.2.0/192.168.1.0/' %s\" %\n self.dut_iscsi_config_path, \"# \", 10)\n if self.backend == \"iscsi_multiconnection\" or \"iscsi_rxtxqueue\":\n self.dut.send_expect(\n \"sed -i 's/#ReactorMask 0xFFFF/ReactorMask 0xFFFF/' %s\" %\n self.dut_iscsi_config_path, \"# \", 10)\n self.write_fio_target()\n\n def set_up(self):\n \"\"\"\n Run before each test case.\n \"\"\"\n if self.backend in iscsibackend:\n self.tester.send_expect(\"ifconfig %s %s\" % (self.tester_port_0_inf,\n self.tester_ips['net_seg_1']), \"# \", 5)\n self.tester.send_expect(\"ifconfig %s %s\" % (self.tester_port_1_inf,\n self.tester_ips['net_seg_2']), \"# \", 5)\n self.dut.send_expect(\"ifconfig %s %s\" % (self.dut_port_0_inf,\n self.dut_ips['net_seg_1']), \"# \", 5)\n self.dut.send_expect(\"ifconfig %s %s\" % (self.dut_port_1_inf,\n self.dut_ips['net_seg_2']), \"# \", 5)\n self.create_iscsi_config()\n self.dut.send_expect(\n \"ps -ef|grep iscsi_tgt|grep -v grep|awk '{print $2}'|xargs kill -15 & \",\n \"# \",\n 200)\n time.sleep(120)\n self.dut.send_expect(\"NRHUGE=12288 ./scripts/setup.sh\", \"#\", 200)\n self.dut.send_expect(\n \"./app/iscsi_tgt/iscsi_tgt -c iscsi.conf >> TestSPDK.log 2>&1 &\", \"# \", 200)\n time.sleep(30)\n self.tester.send_expect(\n \"iscsiadm -m discovery -t st -p 192.168.1.11\", \"# \", 10)\n self.tester.send_expect(\"iscsiadm -m node --login\", \"# \", 10)\n if self.backend in nvmfbackend:\n self.tester.send_expect(\n \"ifconfig %s %s\" %\n (self.tester_port_0_inf, self.tester_ips['net_seg_3']), \"# \", 5)\n self.tester.send_expect(\n \"ifconfig %s %s\" %\n (self.tester_port_1_inf, self.tester_ips['net_seg_4']), \"# \", 5)\n self.dut.send_expect(\n \"ifconfig %s %s\" %\n (self.dut_port_0_inf, self.dut_ips['net_seg_3']), \"# \", 5)\n self.dut.send_expect(\n \"ifconfig %s %s\" %\n (self.dut_port_1_inf, self.dut_ips['net_seg_4']), \"# \", 5)\n self.create_nvmf_tgt_config()\n self.dut.send_expect(\n \"ps -ef|grep nvmf_tgt|grep -v grep|awk '{print $2}'|xargs kill -15 & \",\n \"# \",\n 200)\n time.sleep(120)\n self.dut.send_expect(\"NRHUGE=12288 ./scripts/setup.sh\", \"#\", 200)\n self.dut.send_expect(\n \"./app/nvmf_tgt/nvmf_tgt -c nvmf.conf >> TestSPDK.log 2>&1 & \", \"# \", 200)\n time.sleep(30)\n print \"Waiting for connecting nvmf target...\"\n self.tester.send_expect(\"modprobe nvme-rdma\", \"# \", 5)\n self.tester.send_expect(\"modprobe nvme-fabrics\", \"# \", 5)\n self.dut.send_expect(\n \"nvme discover -t rdma -a 192.168.3.11 -s 4420\", \"# \", 5)\n self.tester.send_expect(\n 'nvme connect -t rdma -n \"nqn.2016-06.io.spdk:cnode1\" -a 192.168.3.11 -s 4420',\n \"# \",\n 5)\n self.tester.send_expect(\n 'nvme connect -t rdma -n \"nqn.2016-06.io.spdk:cnode2\" -a 192.168.3.11 -s 4420',\n \"# \",\n 5)\n if self.backend == \"nvmf_malloc\":\n number = 6\n for i in range(number):\n n = i + 3\n self.tester.send_expect(\n 'nvme connect -t rdma -n \"nqn.2016-06.io.spdk:cnode{}\" -a 192.168.3.11 -s 4420'.format(\n n),\n \"# \",\n 5)\n if self.backend == \"nvmf_multiconnection\":\n number = 126\n for i in range(number):\n n = i + 3\n self.tester.send_expect(\n 'nvme connect -t rdma -n \"nqn.2016-06.io.spdk:cnode{}\" -a 192.168.3.11 -s 4420'.format(\n n),\n \"# \",\n 5)\n\n def create_nvmf_tgt_config(self):\n self.dut.send_expect(\n \"rm -rf nvmf.conf && cp etc/spdk/nvmf.conf.in nvmf.conf \", \"# \", 200)\n self.dut.send_expect(\n \"python etc/spdk/Test_base_utils.py generate_nvmf_tgt_file %s nvmf.conf \" %\n self.backend, \"# \", 200)\n\n def create_iscsi_config(self):\n self.dut.send_expect(\n \"rm -rf iscsi.conf && cp etc/spdk/iscsi.conf.in iscsi.conf \", \"# \", 200)\n self.dut.send_expect(\n \"python etc/spdk/Test_base_utils.py generate_iscsi_file %s iscsi.conf \" % self.backend, \"# \", 200)\n\n def write_fio_target(self):\n target_options = {'iscsi_addr': self.dut_ips['net_seg_1'],\n 'iscsi_testserver_addr': 'http://' + self.dut_ips['net_seg_2'] + ':8000',\n 'nvmf_addr': self.dut_ips['net_seg_3'],\n 'nvmf_testserver_addr': 'http://' + self.dut_ips['net_seg_4'] + ':8000'}\n write_fio_config(self.tester_fiotest_conf, 'target', **target_options)\n\n def write_fio_test(self, test_type, io_size, queue_depth,\n runtime, runsize, verify, rwmixread):\n test_options = {'fio_path': '/usr/bin/fio',\n 'test_types': test_type,\n 'io_size': io_size,\n 'queue_depth': queue_depth,\n 'runsize': runsize,\n 'runtime': runtime,\n 'verify': verify,\n 'rwmixread': rwmixread\n }\n write_fio_config(self.tester_fiotest_conf, 'test', **test_options)\n\n def copy_file_to_dut(self, file_in_tester, dut_file_path):\n self.dut.session.copy_file_to(file_in_tester)\n file_name = file_in_tester.split('/')[-1]\n self.dut.send_expect(\"mv -f /root/%s %s\" %\n (file_name, dut_file_path), \"# \", 5)\n\n def kill_dut_process(self, process):\n command = \"ps aux | grep {0} | grep -v grep | awk '{{print $2}}'\".format(\n process)\n out = self.dut.alt_session.send_expect(command, \"# \", 10)\n if not out:\n print \"There is no process [ {0} ] in dut!!!\".format(process)\n else:\n self.dut.alt_session.send_expect(\n \"kill -15 %s\" % out.splitlines()[0], \"# \", 10)\n time.sleep(120)\n out = self.dut.alt_session.send_expect(command, \"# \", 10)\n if out:\n print \"kill dut process [ {0} ] failed!!!\".format(process)\n\n def kill_target(self):\n \"\"\"\n Kill nvmf target when finish one test case\n \"\"\"\n if self.backend in iscsibackend:\n self.tester.send_expect(\"iscsiadm -m node --logout\", \"# \")\n self.tester.send_expect(\"iscsiadm -m node -o delete\", \"# \")\n self.kill_dut_process(\"iscsi_tgt\")\n time.sleep(3)\n if self.backend in nvmfbackend:\n self.tester.send_expect(\n 'nvme disconnect -n \"nqn.2016-06.io.spdk:cnode1\"', \"# \")\n self.tester.send_expect(\n 'nvme disconnect -n \"nqn.2016-06.io.spdk:cnode2\"', \"# \")\n if self.backend == \"nvmf_malloc\":\n number = 6\n for i in range(number):\n idx = i + 3\n self.tester.send_expect(\n 'nvme disconnect -n \"nqn.2016-06.io.spdk:cnode{}\"'.format(idx), \"# \", 5)\n if self.backend == \"nvmf_multiconnection\":\n number = 126\n for i in range(number):\n idx = i + 3\n self.tester.send_expect(\n 'nvme disconnect -n \"nqn.2016-06.io.spdk:cnode{}\"'.format(idx), \"# \", 5)\n self.kill_dut_process(\"nvmf_tgt\")\n\n def fio_test(self, **fio_conf):\n \"\"\"\n Run fio to do workload from nvmf for combination of io size 512 ~ 256k, queue depth 1 ~ 128\n \"\"\"\n if 'test_type' not in fio_conf or \\\n 'io_size' not in fio_conf or \\\n 'queue_depth' not in fio_conf:\n self.verify(False, \"fio test do not have correct keys in dict!!!\")\n if not fio_conf['test_type'] or \\\n not fio_conf['io_size'] or \\\n not fio_conf['queue_depth']:\n self.verify(False, \"fio test have null values in dict!!!\")\n if 'runsize' not in fio_conf or not fio_conf['runsize']:\n fio_conf['runsize'] = '512M'\n if 'runtime' not in fio_conf or not fio_conf['runtime']:\n fio_conf['runtime'] = '10'\n if 'verify' not in fio_conf:\n fio_conf['verify'] = True\n if 'rwmixread' not in fio_conf:\n fio_conf['rwmixread'] = '50'\n test_type = str(fio_conf['test_type'])\n io_size = str(fio_conf['io_size'])\n queue_depth = str(fio_conf['queue_depth'])\n runsize = str(fio_conf['runsize'])\n runtime = str(fio_conf['runtime'])\n verify = bool(fio_conf['verify'])\n rwmixread = str(fio_conf['rwmixread'])\n if self.backend in iscsibackend:\n if self.backend == \"iscsi_rxtxqueue\" or self.backend == \"iscsi_multiconnection\":\n self.write_fio_test(\n test_type,\n io_size,\n queue_depth,\n runtime,\n \"512M\",\n verify,\n rwmixread)\n else:\n self.write_fio_test(\n test_type,\n io_size,\n queue_depth,\n runtime,\n \"512M\",\n verify,\n \"50\")\n if self.backend in nvmfbackend:\n if self.backend == \"nvmf_rxtxqueue\" or self.backend == \"nvmf_multiconnection\":\n self.write_fio_test(\n test_type,\n io_size,\n queue_depth,\n \"20\",\n runsize,\n verify,\n rwmixread)\n else:\n self.write_fio_test(\n test_type,\n io_size,\n queue_depth,\n \"20\",\n runsize,\n verify,\n \"50\")\n time.sleep(5)\n if self.backend in iscsibackend:\n out = Fio_iscsi_test.start_fio_client()\n if self.backend in nvmfbackend:\n out = Fio_test.start_fio_client()\n self.verify(\n \"All tests passed\" in out, \"test_%s_%s_%s failed\" %\n (test_type, io_size, queue_depth))\n\n def fio_read(self, io_size, queue_depth, verify=True,\n runsize=None, runtime=None):\n if runtime is None:\n runtime = self.DEFAULT_RUN_TIME\n if runsize is None:\n runsize = self.DEFAULT_RUN_SIZE\n fio_conf = {}\n fio_conf['test_type'] = 'read'\n fio_conf['io_size'] = str(io_size)\n fio_conf['queue_depth'] = str(queue_depth)\n fio_conf['verify'] = verify\n fio_conf['runtime'] = str(runtime)\n fio_conf['runsize'] = str(runsize)\n self.fio_test(**fio_conf)\n\n def fio_write(self, io_size, queue_depth, verify=True,\n runsize=None, runtime=None):\n if runtime is None:\n runtime = self.DEFAULT_RUN_TIME\n if runsize is None:\n runsize = self.DEFAULT_RUN_SIZE\n fio_conf = {}\n fio_conf['test_type'] = 'write'\n fio_conf['io_size'] = str(io_size)\n fio_conf['queue_depth'] = str(queue_depth)\n fio_conf['runtime'] = str(runtime)\n fio_conf['runsize'] = str(runsize)\n fio_conf['verify'] = verify\n self.fio_test(**fio_conf)\n\n def fio_rw(self, io_size, queue_depth, rwmixread,\n verify=True, runsize=None, runtime=None):\n if runtime is None:\n runtime = self.DEFAULT_RUN_TIME\n if runsize is None:\n runsize = self.DEFAULT_RUN_SIZE\n fio_conf = {}\n fio_conf['test_type'] = 'rw'\n fio_conf['io_size'] = str(io_size)\n fio_conf['queue_depth'] = str(queue_depth)\n fio_conf['runtime'] = str(runtime)\n fio_conf['runsize'] = str(runsize)\n fio_conf['verify'] = verify\n fio_conf['rwmixread'] = str(rwmixread)\n self.fio_test(**fio_conf)\n\n def fio_randread(self, io_size, queue_depth, verify=True,\n runsize=None, runtime=None):\n if runtime is None:\n runtime = self.DEFAULT_RUN_TIME\n if runsize is None:\n runsize = self.DEFAULT_RUN_SIZE\n fio_conf = {}\n fio_conf['test_type'] = 'randread'\n fio_conf['io_size'] = str(io_size)\n fio_conf['queue_depth'] = str(queue_depth)\n fio_conf['runtime'] = str(runtime)\n fio_conf['runsize'] = str(runsize)\n fio_conf['verify'] = verify\n self.fio_test(**fio_conf)\n\n def fio_randwrite(self, io_size, queue_depth,\n verify=True, runsize=None, runtime=None):\n if runtime is None:\n runtime = self.DEFAULT_RUN_TIME\n if runsize is None:\n runsize = self.DEFAULT_RUN_SIZE\n fio_conf = {}\n fio_conf['test_type'] = 'randwrite'\n fio_conf['io_size'] = str(io_size)\n fio_conf['queue_depth'] = str(queue_depth)\n fio_conf['runtime'] = str(runtime)\n fio_conf['runsize'] = str(runsize)\n fio_conf['verify'] = verify\n self.fio_test(**fio_conf)\n\n def fio_randrw(self, io_size, queue_depth, rwmixread,\n verify=True, runsize=None, runtime=None):\n if runtime is None:\n runtime = self.DEFAULT_RUN_TIME\n if runsize is None:\n runsize = self.DEFAULT_RUN_SIZE\n fio_conf = {}\n fio_conf['test_type'] = 'randrw'\n fio_conf['io_size'] = str(io_size)\n fio_conf['queue_depth'] = str(queue_depth)\n fio_conf['runtime'] = str(runtime)\n fio_conf['runsize'] = str(runsize)\n fio_conf['verify'] = verify\n fio_conf['rwmixread'] = str(rwmixread)\n self.fio_test(**fio_conf)\n\n def fio_trim(self, io_size, queue_depth, verify=True,\n runsize=None, runtime=None):\n if runtime is None:\n runtime = self.DEFAULT_RUN_TIME\n if runsize is None:\n runsize = self.TRIM_RUN_SIZE\n fio_conf = {}\n fio_conf['test_type'] = 'trim'\n fio_conf['io_size'] = str(io_size)\n fio_conf['queue_depth'] = str(queue_depth)\n fio_conf['runsize'] = str(runsize)\n fio_conf['runtime'] = str(runtime)\n fio_conf['verify'] = verify\n self.fio_test(**fio_conf)\n\n def fio_randtrim(self, io_size, queue_depth, verify=True,\n runsize=None, runtime=None):\n if runtime is None:\n runtime = self.DEFAULT_RUN_TIME\n if runsize is None:\n runsize = self.TRIM_RUN_SIZE\n fio_conf = {}\n fio_conf['test_type'] = 'randtrim'\n fio_conf['io_size'] = str(io_size)\n fio_conf['queue_depth'] = str(queue_depth)\n fio_conf['runsize'] = str(runsize)\n fio_conf['runtime'] = str(runtime)\n fio_conf['verify'] = verify\n self.fio_test(**fio_conf)\n\n def fio_trimwrite(self, io_size, queue_depth,\n verify=True, runsize=None, runtime=None):\n if runtime is None:\n runtime = self.DEFAULT_RUN_TIME\n if runsize is None:\n runsize = self.TRIM_RUN_SIZE\n fio_conf = {}\n fio_conf['test_type'] = 'trimwrite'\n fio_conf['io_size'] = str(io_size)\n fio_conf['queue_depth'] = str(queue_depth)\n fio_conf['runsize'] = str(runsize)\n fio_conf['runtime'] = str(runtime)\n fio_conf['verify'] = verify\n self.fio_test(**fio_conf)\n\n def test_fio_read_512_1_verify(self):\n self.fio_read(512, 1, True)\n\n def test_fio_read_4096_16_verify(self):\n self.fio_read(4096, 16, True)\n\n def test_fio_read_256k_64_verify(self):\n self.fio_read('256k', 64, True)\n\n def test_fio_write_512_1_verify(self):\n self.fio_write(512, 1, True)\n\n def test_fio_write_4096_16_verify(self):\n self.fio_write(4096, 16, True)\n\n def test_fio_write_256k_64_verify(self):\n self.fio_write('256k', 64, True)\n\n def test_fio_rw_512_1_verify(self):\n self.fio_rw(512, 1, True)\n\n def test_fio_rw_4096_16_verify(self):\n self.fio_rw(4096, 16, True)\n\n def test_fio_rw_256k_64_verify(self):\n self.fio_rw('256k', 64, True)\n\n def test_fio_randread_512_1_verify(self):\n self.fio_randread(512, 1, True)\n\n def test_fio_randread_4096_16_verify(self):\n self.fio_randread(4096, 16, True)\n\n def test_fio_randread_256k_64_verify(self):\n self.fio_randread('256k', 64, True)\n\n def test_fio_randwrite_512_1_verify(self):\n self.fio_randwrite(512, 1, True)\n\n def test_fio_randwrite_4096_16_verify(self):\n self.fio_randwrite(4096, 16, True)\n\n def test_fio_randwrite_256k_64_verify(self):\n self.fio_randwrite('256k', 64, True)\n\n def test_fio_randrw_512_1_verify(self):\n self.fio_randrw(512, 1, True)\n\n def test_fio_randrw_4096_16_verify(self):\n self.fio_randrw(4096, 16, True)\n\n def test_fio_randrw_256k_64_verify(self):\n self.fio_randrw('256k', 64, True)\n\n def tear_down(self):\n \"\"\"\n Run after each test case.\n \"\"\"\n self.kill_target()\n",
"id": "3953190",
"language": "Python",
"matching_score": 5.1209635734558105,
"max_stars_count": 3,
"path": "tests/lib/Test_base_fio.py"
},
{
"content": "#!/usr/bin/env python\n# BSD LICENSE\n#\n# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in\n# the documentation and/or other materials provided with the\n# distribution.\n# * Neither the name of Intel Corporation nor the names of its\n# contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport time\nimport datetime\nimport os\nimport sys\nfrom test_case import TestCase\nfrom Filesystem_integrity_test import FileSystem_integrity\n\n\nclass TestFileSystemIntegrity(object):\n\n def set_up_all(self, test_case_obj, backendname):\n \"\"\"\n Run at the start of each test suite.\n fio Prerequisites\n \"\"\"\n if self.nic == \"ConnectX4\":\n self.tester.send_expect(\"modprobe mlx5_ib\", \"#\", 5)\n if self.nic == \"ConnectX3\":\n self.tester.send_expect(\"modprobe mlx4_ib\", \"#\", 5)\n if self.nic == \"chelsio_40gb\":\n self.tester.send_expect(\"modprobe iw_cxgb4\", \"#\", 5)\n self.backend = backendname\n self.dut_ports = []\n self.tester_ports = []\n self.dut_ports_all = self.dut.get_ports()\n self.tester_ports_all = self.tester.get_ports()\n self.is_port = self._get_nic_driver(self.nic)\n for i, self.dut_port in enumerate(self.dut_ports_all[1]):\n if self.dut_port == self.is_port + '\\r':\n self.dut_port_nic = self.dut_ports_all[0][i]\n self.dut_ports.append(self.dut_port_nic)\n for j, self.tester_port in enumerate(self.tester_ports_all[1]):\n if self.tester_port == self.is_port + '\\r':\n self.tester_port_nic = self.tester_ports_all[0][j]\n self.tester_ports.append(self.tester_port_nic)\n self.verify(len(self.dut_ports) >= 1, \"Insufficient ports\")\n self.dut_port_0_inf = self.dut_ports[0]\n self.tester_port_0_inf = self.tester_ports[0]\n\n self.dut.send_expect(\"cd %s\" % self.dut.base_dir, \"# \", 5)\n self.initial_real_path = self.dut.base_dir\n test_suite_path = os.getcwd() + \"/../tests\"\n self.tester_utils_path = \"%s/lib/\" % test_suite_path\n self.tester_utils_file = self.tester_utils_path + \"Test_base_utils.py\"\n self.dut_utils_path = self.initial_real_path + \"/etc/spdk\"\n self.nvmf_config_path = self.initial_real_path + \"/etc/spdk/nvmf.conf.in\"\n self.iscsi_config_path = self.initial_real_path + \"/etc/spdk/iscsi.conf.in\"\n self.copy_file_to_dut(self.tester_utils_file, self.dut_utils_path)\n self.dut.send_expect('sed -i \"s/ AIO/# AIO/\" %s' %\n self.nvmf_config_path, \"# \", 10)\n self.dut.send_expect(\n 'sed -i \"s/#MaxQueueDepth 128/MaxQueueDepth 1024/\" %s' % self.nvmf_config_path, \"# \", 10)\n self.dut.send_expect(\n 'sed -i \"s/#MaxIOSize 131072/MaxIOSize 131072/\" %s' % self.nvmf_config_path, \"# \", 10)\n self.dut.send_expect(\n 'sed -i \"s/TransportId/#TransportId/\" %s' % self.nvmf_config_path, \"# \", 10)\n self.dut.send_expect(\n 'sed -i \"s/RetryCount 4/#RetryCount 4/\" %s' % self.nvmf_config_path, \"# \", 10)\n self.dut.send_expect(\n \"sed -i 's/192.168.2.21/192.168.1.11/' %s\" % self.iscsi_config_path, \"# \", 10)\n self.dut.send_expect(\n \"sed -i 's/192.168.2.0/192.168.1.0/' %s\" % self.iscsi_config_path, \"# \", 10)\n\n def copy_file_to_dut(self, file_in_tester, dut_file_path):\n self.dut.session.copy_file_to(file_in_tester)\n file_name = file_in_tester.split('/')[-1]\n self.dut.send_expect(\"mv -f /root/%s %s\" %\n (file_name, dut_file_path), \"# \", 5)\n\n def kill_target(self):\n \"\"\"\n Kill target when finish one test case\n \"\"\"\n if self.backend == \"nvmf_nvme\":\n self.tester.send_expect(\"umount /home/devicedev0*\", \"# \", 10)\n self.tester.send_expect(\"rm -rf /home/devicedev0*\", \"# \", 10)\n self.tester.send_expect(\n 'nvme disconnect -n \"nqn.2016-06.io.spdk:cnode1\"', \"# \", 10)\n self.tester.send_expect(\n 'nvme disconnect -n \"nqn.2016-06.io.spdk:cnode2\"', \"# \", 10)\n self.tester.send_expect(\n 'nvme disconnect -n \"nqn.2016-06.io.spdk:cnode3\"', \"# \", 10)\n self.tester.send_expect(\n 'nvme disconnect -n \"nqn.2016-06.io.spdk:cnode4\"', \"# \", 10)\n out = self.dut.alt_session.send_expect(\n \"ps aux | grep nvmf_tgt | awk '{print $2}'\", \"# \", 10)\n self.dut.send_expect(\"kill -15 %s\" % out.splitlines()[0], \"# \", 10)\n time.sleep(120)\n if self.backend == \"iscsi_nvme\":\n self.tester.send_expect(\"umount /home/devicedev0*\", \"# \", 10)\n self.tester.send_expect(\"rm -rf /home/devicedev0*\", \"# \", 10)\n self.tester.send_expect(\"iscsiadm -m node --logout\", \"# \")\n self.tester.send_expect(\"iscsiadm -m node -o delete\", \"# \")\n out = self.dut.alt_session.send_expect(\n \"ps aux | grep iscsi_tgt | awk '{print $2}'\", \"# \", 10)\n self.dut.send_expect(\"kill -15 %s\" % out.splitlines()[0], \"# \", 10)\n time.sleep(120)\n\n def set_up(self):\n \"\"\"\n Run before each test case.\n \"\"\"\n if self.backend == \"nvmf_nvme\":\n self.tester.send_expect(\n \"ifconfig %s 192.168.3.2\" %\n self.tester_port_0_inf, \"# \", 5)\n self.dut.send_expect(\n \"ifconfig %s 192.168.3.11\" %\n self.dut_port_0_inf, \"# \", 5)\n self.dut.send_expect(\n \"rm -rf nvmf.conf && cp etc/spdk/nvmf.conf.in nvmf.conf \", \"# \", 200)\n self.dut.send_expect(\n \"python etc/spdk/Test_base_utils.py generate_nvmf_tgt_file nvmf_nvme nvmf.conf\",\n \"# \",\n 200)\n self.dut.send_expect(\"NRHUGE=12288 ./scripts/setup.sh\", \"#\", 200)\n self.dut.send_expect(\n \"ps -ef|grep nvmf_tgt|grep -v grep|awk '{print $2}'|xargs kill -15 & \",\n \"# \",\n 200)\n time.sleep(120)\n self.dut.send_expect(\n \"./app/nvmf_tgt/nvmf_tgt -c nvmf.conf >> TestSPDK.log 2>&1 &\", \"# \", 200)\n time.sleep(40)\n print \"Waiting for nvmf target to connect...\"\n time.sleep(2)\n self.tester.send_expect(\"modprobe nvme-rdma\", \"# \", 10)\n time.sleep(2)\n self.tester.send_expect(\"modprobe nvme-fabrics\", \"# \", 10)\n time.sleep(2)\n self.tester.send_expect(\n \"nvme discover -t rdma -a 192.168.3.11 -s 4420\", \"# \", 10)\n time.sleep(10)\n self.tester.send_expect(\n 'nvme connect -t rdma -n \"nqn.2016-06.io.spdk:cnode1\" -a 192.168.3.11 -s 4420',\n \"# \",\n 10,\n verify=True)\n self.tester.send_expect(\n 'nvme connect -t rdma -n \"nqn.2016-06.io.spdk:cnode2\" -a 192.168.3.11 -s 4420',\n \"# \",\n 10,\n verify=True)\n self.tester.send_expect(\n 'nvme connect -t rdma -n \"nqn.2016-06.io.spdk:cnode3\" -a 192.168.3.11 -s 4420',\n \"# \",\n 10,\n verify=True)\n self.tester.send_expect(\n 'nvme connect -t rdma -n \"nqn.2016-06.io.spdk:cnode4\" -a 192.168.3.11 -s 4420',\n \"# \",\n 10,\n verify=True)\n if self.backend == \"iscsi_nvme\":\n self.tester.send_expect(\n \"ifconfig %s 192.168.1.10\" %\n self.tester_port_0_inf, \"# \", 5)\n self.dut.send_expect(\n \"ifconfig %s 192.168.1.11\" %\n self.dut_port_0_inf, \"# \", 5)\n self.dut.send_expect(\n \"rm -rf iscsi.conf && cp etc/spdk/iscsi.conf.in iscsi.conf \", \"# \", 200)\n self.dut.send_expect(\n \"python etc/spdk/Test_base_utils.py generate_iscsi_file iscsi_nvme iscsi.conf \",\n \"# \",\n 200)\n self.dut.send_expect(\"NRHUGE=12288 ./scripts/setup.sh\", \"#\", 200)\n self.dut.send_expect(\n \"./app/iscsi_tgt/iscsi_tgt -c iscsi.conf >> TestSPDK.log 2>&1 & \", \"# \", 200)\n time.sleep(40)\n self.tester.send_expect(\n \"iscsiadm -m discovery -t st -p 192.168.1.11\", \"# \", 10)\n time.sleep(10)\n self.tester.send_expect(\n \"iscsiadm -m node --login\", \"# \", 10, verify=True)\n\n def test_ext4_large_file(self):\n start_time = datetime.datetime.now()\n ext4test = FileSystem_integrity('ext4', self.backend)\n # change 30 for daily testing\n # change 120 for weekend testing\n out = ext4test.run_filesystem_integrity(30, 'large')\n end_time = datetime.datetime.now()\n duration = end_time - start_time\n print \"duration of ext4_large_file is {0}\".format(duration)\n self.verify(\"All tests passed\" in out,\n \"test ext4 large file system failed\")\n\n def test_ext4_small_file(self):\n start_time = datetime.datetime.now()\n ext4test = FileSystem_integrity('ext4', self.backend)\n # change 400 for daily testing\n # change 1200 for weekend testing\n out = ext4test.run_filesystem_integrity(400, 'small')\n end_time = datetime.datetime.now()\n duration = end_time - start_time\n print \"duration of ext4_small_file is {0}\".format(duration)\n self.verify(\"All tests passed\" in out,\n \"test ext4 small file system failed\")\n\n def test_ext4_compile_kernel(self):\n start_time = datetime.datetime.now()\n ext4test = FileSystem_integrity('ext4', self.backend)\n # change 20 for daily testing\n # change 120 for weekend testing\n out = ext4test.onlycompilekernel(20)\n end_time = datetime.datetime.now()\n duration = end_time - start_time\n print \"duration of ext4_compile_kernel is {0}\".format(duration)\n self.verify(\"All tests passed\" in out,\n \"test ext4 compile kernel failed\")\n\n def test_btrfs_large_file(self):\n start_time = datetime.datetime.now()\n btrfstest = FileSystem_integrity('btrfs', self.backend)\n # change 30 for daily testing\n # change 120 for weekend testing\n out = btrfstest.run_filesystem_integrity(30, 'large')\n end_time = datetime.datetime.now()\n duration = end_time - start_time\n print \"duration of btrfs_large_file is {0}\".format(duration)\n self.verify(\"All tests passed\" in out,\n \"test btrfs large file system failed\")\n\n def test_btrfs_small_file(self):\n start_time = datetime.datetime.now()\n btrfstest = FileSystem_integrity('btrfs', self.backend)\n # change 400 for daily testing\n # change 1200 for weekend testing\n out = btrfstest.run_filesystem_integrity(400, 'small')\n end_time = datetime.datetime.now()\n duration = end_time - start_time\n print \"duration of btrfs_small_file is {0}\".format(duration)\n self.verify(\"All tests passed\" in out,\n \"test btrfs small file system failed\")\n\n def test_btrfs_compile_kernel(self):\n start_time = datetime.datetime.now()\n btrfstest = FileSystem_integrity('btrfs', self.backend)\n # change 20 for daily testing\n # change 120 for weekend testing\n out = btrfstest.onlycompilekernel(20)\n end_time = datetime.datetime.now()\n duration = end_time - start_time\n print \"duration of btrfs_compile_kernel is {0}\".format(duration)\n self.verify(\"All tests passed\" in out,\n \"test btrfs compile kernel failed\")\n\n def test_xfs_large_file(self):\n start_time = datetime.datetime.now()\n xfstest = FileSystem_integrity('xfs', self.backend)\n # change 15 for daily testing\n # change 50 for weekend testing\n out = xfstest.run_filesystem_integrity(15, 'large')\n end_time = datetime.datetime.now()\n duration = end_time - start_time\n print \"duration of xfs_large_file is {0}\".format(duration)\n self.verify(\"All tests passed\" in out,\n \"test xfs large file system failed\")\n\n def test_xfs_small_file(self):\n start_time = datetime.datetime.now()\n xfstest = FileSystem_integrity('xfs', self.backend)\n # change 400 for daily testing\n # change 1200 for weekend testing\n out = xfstest.run_filesystem_integrity(400, 'small')\n end_time = datetime.datetime.now()\n duration = end_time - start_time\n print \"duration of xfs_small_file is {0}\".format(duration)\n self.verify(\"All tests passed\" in out,\n \"test xfs small file system failed\")\n\n def test_xfs_compile_kernel(self):\n start_time = datetime.datetime.now()\n xfstest = FileSystem_integrity('xfs', self.backend)\n # change 20 for daily testing\n # change 120 for weekend testing\n out = xfstest.onlycompilekernel(20)\n end_time = datetime.datetime.now()\n duration = end_time - start_time\n print \"duration of xfs_compile_kernel is {0}\".format(duration)\n self.verify(\"All tests passed\" in out,\n \"test xfs compile kernel failed\")\n\n def tear_down(self):\n \"\"\"\n Run after each test case.\n \"\"\"\n print 'tear down'\n self.kill_target()\n",
"id": "6073076",
"language": "Python",
"matching_score": 3.071225643157959,
"max_stars_count": 3,
"path": "tests/lib/Test_base_filesystem.py"
},
{
"content": "#!/usr/bin/env python\n# BSD LICENSE\n#\n# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in\n# the documentation and/or other materials provided with the\n# distribution.\n# * Neither the name of Intel Corporation nor the names of its\n# contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport re\nimport sys\nimport os\nimport ConfigParser\nfrom StringIO import StringIO\nfrom subprocess import check_call, call, check_output, Popen, PIPE\n\n\nclass ParseConfig(object):\n\n def __init__(self, config_path):\n self.conf = ConfigParser.ConfigParser()\n self.conf_path = config_path\n self.conf.read(self.conf_path)\n\n def get_sections(self):\n return self.conf.sections()\n\n def get_section_options(self, section):\n return self.conf.options(section)\n\n def get_section_items(self, section):\n return self.conf.items(section)\n\n def get_section_option(self, section, option):\n return self.conf.get(section, option)\n\n def add_section(self, section):\n if self.conf.has_section(section):\n pass\n else:\n self.conf.add_section(section)\n\n def set_section_option(self, section, option, value):\n self.add_section(section)\n self.conf.set(section, option, value)\n\n def write_file(self):\n self.conf.write(open(self.conf_path, \"w\"))\n\n\ndef write_fio_config(config_path, section, **kwargs):\n \"\"\"\n kwargs:\n {option1: value,\n option2: value,\n }\n \"\"\"\n config = ParseConfig(config_path)\n for option in kwargs.keys():\n config.set_section_option(section, option, kwargs[option])\n\n config.write_file()\n\n\ndef generate_iscsi_file(backend, filename):\n check_call(\"sed -i '/\\[Nvme/,$d' \" + filename, shell=True)\n if backend == \"iscsi_nvme\" or backend == \"iscsi_rxtxqueue\":\n output = check_output(\"lspci -nnn\", shell=True)\n bus_numbers = re.findall(\n \"([0-9][0-9]:[0-9][0-9].[0-9]) Non-Volatile memory controller\", output)\n nvme_ctrl_num = len(bus_numbers)\n all_luns = int(nvme_ctrl_num)\n\n filedesc = open(filename, 'a')\n filedesc.write(\"\\n[Gpt]\\n\")\n filedesc.write(\"\\nDisable Yes\\n\")\n filedesc.write(\"\\n[Nvme] \\n\")\n for i, value in enumerate(bus_numbers):\n filedesc.write(\n '\\n TransportId \"trtype:PCIe traddr:0000:{}\" Nvme{} \\n'.format(value, i))\n filedesc.write(\" RetryCount {} \\n\".format(nvme_ctrl_num))\n filedesc.write(\" Timeout 0 \\n\")\n filedesc.write(\" ActionOnTimeout None \\n\")\n filedesc.write(\" AdminPollRate 100000 \\n\")\n filedesc.write(\" HotplugEnable Yes \\n\")\n idx = 0\n for i in range(all_luns):\n target_id = idx + 1\n filedesc.write(\"\\n[TargetNode\" + str(target_id) + \"]\\n\")\n filedesc.write(\" TargetName disk\" + str(target_id) + \"\\n\")\n filedesc.write(\" Mapping PortalGroup1 InitiatorGroup1\\n\")\n filedesc.write(\" AuthMethod Auto\\n\")\n filedesc.write(\" AuthGroup AuthGroup1\\n\")\n filedesc.write(\" UseDigest Auto\\n\")\n filedesc.write(\" QueueDepth 128\\n\")\n filedesc.write(\" LUN0 Nvme\" + str(idx) + \"n1\" + \"\\n\")\n idx = idx + 1\n filedesc.close()\n\n if backend == \"iscsi_aiobackend\":\n filedesc = open(filename, 'a')\n filedesc.write(\"\\n[Gpt]\\n\")\n filedesc.write(\"\\nDisable Yes\\n\")\n filedesc.write(\"\\n[AIO] \\n\")\n filedesc.write(\" AIO /dev/sdf AIO0\\n\")\n filedesc.write(\" AIO /dev/sde AIO1\\n\")\n\n all_luns = 2\n idx = 0\n for i in range(all_luns):\n target_id = idx + 1\n filedesc.write(\"\\n[TargetNode\" + str(target_id) + \"]\\n\")\n filedesc.write(\" TargetName disk\" + str(target_id) + \"\\n\")\n filedesc.write(\" Mapping PortalGroup1 InitiatorGroup1\\n\")\n filedesc.write(\" AuthMethod Auto\\n\")\n filedesc.write(\" AuthGroup AuthGroup1\\n\")\n filedesc.write(\" UseDigest Auto\\n\")\n filedesc.write(\" QueueDepth 128\\n\")\n filedesc.write(\" LUN0 AIO\" + str(idx) + \"\\n\")\n idx = idx + 1\n filedesc.close()\n\n if backend == \"iscsi_malloc\":\n all_luns = 2\n idx = 0\n filedesc = open(filename, 'a')\n filedesc.write(\"\\n[Gpt]\\n\")\n filedesc.write(\"\\nDisable Yes\\n\")\n filedesc.write(\"\\n[Malloc] \\n\")\n filedesc.write(\" NumberOfLuns 2\\n\")\n filedesc.write(\" LunSizeInMB 128\\n\")\n filedesc.write(\" BlockSize 512\\n\")\n for i in range(all_luns):\n target_id = idx + 1\n filedesc.write(\"\\n[TargetNode\" + str(target_id) + \"]\\n\")\n filedesc.write(\" TargetName disk\" + str(target_id) + \"\\n\")\n filedesc.write(\" Mapping PortalGroup1 InitiatorGroup1\\n\")\n filedesc.write(\" AuthMethod Auto\\n\")\n filedesc.write(\" AuthGroup AuthGroup1\\n\")\n filedesc.write(\" UseDigest Auto\\n\")\n filedesc.write(\" QueueDepth 128\\n\")\n filedesc.write(\" LUN0 Malloc\" + str(idx) + \"\\n\")\n idx = idx + 1\n filedesc.close()\n\n if backend == \"iscsi_multiconnection\":\n output = check_output(\"lspci -nnn\", shell=True)\n bus_numbers = re.findall(\n \"([0-9][0-9]:[0-9][0-9].[0-9]) Non-Volatile memory controller\", output)\n nvme_ctrl_num = len(bus_numbers)\n all_luns = int(nvme_ctrl_num)\n\n filedesc = open(filename, 'a')\n filedesc.write(\"\\n[Gpt]\\n\")\n filedesc.write(\"\\nDisable Yes\\n\")\n filedesc.write(\"\\n[Nvme] \\n\")\n for i, value in enumerate(bus_numbers):\n filedesc.write(\n ' TransportID \"trtype:PCIe traddr:0000:{}\" Nvme{}\\n'.format(value, i))\n filedesc.write(\" RetryCount 128 \\n\")\n filedesc.write(\" Timeout 0 \\n\")\n filedesc.write(\" ActionOnTimeout None \\n\")\n filedesc.write(\" AdminPollRate 100000 \\n\")\n filedesc.write(\" HotplugEnable Yes \\n\")\n filedesc.write(\"\\n[Split] \\n\")\n filedesc.write(\" Split Nvme0n1 22 1 \\n\")\n filedesc.write(\" Split Nvme1n1 22 1 \\n\")\n filedesc.write(\" Split Nvme2n1 22 1 \\n\")\n filedesc.write(\" Split Nvme3n1 22 1 \\n\")\n filedesc.write(\" Split Nvme4n1 22 1 \\n\")\n filedesc.write(\" Split Nvme5n1 18 1 \\n\")\n idx = 0\n target_id = 1\n all_nodes = 22\n for i in range(all_luns):\n node = 0\n for i in range(all_nodes):\n filedesc.write(\"\\n[TargetNode\" + str(target_id) + \"]\\n\")\n filedesc.write(\" TargetName disk\" + str(target_id) + \"\\n\")\n filedesc.write(\" Mapping PortalGroup1 InitiatorGroup1\\n\")\n filedesc.write(\" AuthMethod Auto\\n\")\n filedesc.write(\" AuthGroup AuthGroup1\\n\")\n filedesc.write(\" UseDigest Auto\\n\")\n filedesc.write(\" QueueDepth 32\\n\")\n filedesc.write(\" LUN0 Nvme\" + str(idx) +\n \"n1p\" + str(node) + \"\\n\")\n node = node + 1\n target_id = target_id + 1\n idx = idx + 1\n filedesc.close()\n check_call(\"sed -i '/\\[TargetNode129/,$d' \" + filename, shell=True)\n\n\ndef generate_nvmf_tgt_file(backend, filename):\n if backend == \"nvmf_nvme\" or backend == \"nvmf_rxtxqueue\":\n check_call(\"sed -i '/\\[Nvme/,$d' \" + filename, shell=True)\n output = check_output(\"lspci -nnn\", shell=True)\n bus_numbers = re.findall(\n \"([0-9][0-9]:[0-9][0-9].[0-9]) Non-Volatile memory controller\", output)\n nvme_ctrl_num = len(bus_numbers)\n all_luns = int(nvme_ctrl_num)\n idx = 0\n filedesc = open(filename, 'a')\n filedesc.write(\"\\n[Gpt]\\n\")\n filedesc.write(\"\\nDisable Yes\\n\")\n filedesc.write(\"\\n[Nvme] \\n\")\n for i, value in enumerate(bus_numbers):\n filedesc.write(\n '\\n TransportId \"trtype:PCIe traddr:0000:{}\" Nvme{} \\n'.format(value, i))\n filedesc.write(\"\\n NvmeRetryCount {} \\n\".format(all_luns))\n filedesc.write(\"\\n ResetControllerOnTimeout Yes \\n\")\n filedesc.write(\"\\n NvmeTimeoutValue 30 \\n\")\n filedesc.write(\"\\n AdminPollRate 100000 \\n\")\n for i in range(all_luns):\n target_id = idx + 1\n filedesc.write(\"\\n[Subsystem\" + str(target_id) + \"]\\n\")\n filedesc.write(\" NQN nqn.2016-06.io.spdk:cnode\" +\n str(target_id) + \"\\n\")\n filedesc.write(\" Core 0\\n\")\n filedesc.write(\" Listen RDMA 192.168.3.11:4420\\n\")\n filedesc.write(\" SN SPDK\" + str(target_id) + \"\\n\")\n filedesc.write(\" Namespace Nvme\" + str(idx) + \"n1\" + \"\\n\")\n idx = idx + 1\n filedesc.close()\n\n if backend == \"nvmf_aiobackend\":\n check_call(\"sed -i '/\\[Subsystem/,$d' \" + filename, shell=True)\n filedesc = open(filename, 'a')\n filedesc.write(\"\\n[Gpt]\\n\")\n filedesc.write(\"\\nDisable Yes\\n\")\n all_luns = 2\n idx = 0\n for i in range(all_luns):\n target_id = idx + 1\n filedesc.write(\"\\n[Subsystem\" + str(target_id) + \"]\\n\")\n filedesc.write(\" NQN nqn.2016-06.io.spdk:cnode\" +\n str(target_id) + \"\\n\")\n filedesc.write(\" Core 0\\n\")\n filedesc.write(\" Listen RDMA 192.168.3.11:4420\\n\")\n filedesc.write(\" SN SPDK\" + str(target_id) + \"\\n\")\n filedesc.write(\" Namespace AIO\" + str(idx) + \"\\n\")\n idx = idx + 1\n filedesc.close()\n\n if backend == \"nvmf_malloc\":\n check_call(\"sed -i '/\\[Split/,$d' \" + filename, shell=True)\n all_luns = 8\n idx = 0\n filedesc = open(filename, 'a')\n filedesc.write(\"\\n[Gpt]\\n\")\n filedesc.write(\"\\nDisable Yes\\n\")\n for i in range(all_luns):\n target_id = idx + 1\n filedesc.write(\"\\n[Subsystem\" + str(target_id) + \"]\\n\")\n filedesc.write(\n \" NQN nqn.2016-06.io.spdk:cnode\" +\n str(target_id) +\n \"\\n\")\n filedesc.write(\" Core 0\\n\")\n filedesc.write(\" Listen RDMA 192.168.3.11:4420\\n\")\n filedesc.write(\" SN SPDK\" + str(target_id) + \"\\n\")\n filedesc.write(\" Namespace Malloc\" + str(idx) + \"\\n\")\n idx = idx + 1\n filedesc.close()\n\n if backend == \"nvmf_multiconnection\":\n check_call(\"sed -i '/\\[Nvme/,$d' \" + filename, shell=True)\n output = check_output(\"lspci -nnn\", shell=True)\n bus_numbers = re.findall(\n \"([0-9][0-9]:[0-9][0-9].[0-9]) Non-Volatile memory controller\", output)\n nvme_ctrl_num = len(bus_numbers)\n all_luns = int(nvme_ctrl_num)\n filedesc = open(filename, 'a')\n filedesc.write(\"\\n[Gpt]\\n\")\n filedesc.write(\"\\nDisable Yes\\n\")\n filedesc.write(\"\\n[Nvme] \\n\")\n for i, value in enumerate(bus_numbers):\n filedesc.write(\n '\\n TransportId \"trtype:PCIe traddr:0000:{}\" Nvme{} \\n'.format(value, i))\n filedesc.write(\"\\n NvmeRetryCount {} \\n\".format(all_luns))\n filedesc.write(\"\\n ResetControllerOnTimeout Yes \\n\")\n filedesc.write(\"\\n NvmeTimeoutValue 30 \\n\")\n filedesc.write(\"\\n AdminPollRate 100000 \\n\")\n filedesc.write(\"\\n[Split] \\n\")\n filedesc.write(\" Split Nvme0n1 22 1 \\n\")\n filedesc.write(\" Split Nvme1n1 22 1 \\n\")\n filedesc.write(\" Split Nvme2n1 22 1 \\n\")\n filedesc.write(\" Split Nvme3n1 22 1 \\n\")\n filedesc.write(\" Split Nvme4n1 22 1 \\n\")\n filedesc.write(\" Split Nvme5n1 18 1 \\n\")\n idx = 0\n target_id = 1\n all_nodes = 22\n for i in range(all_luns):\n node = 0\n for i in range(all_nodes):\n filedesc.write(\"\\n[Subsystem\" + str(target_id) + \"]\\n\")\n filedesc.write(\" NQN nqn.2016-06.io.spdk:cnode\" + str(target_id) + \"\\n\")\n filedesc.write(\" Core 0\\n\")\n filedesc.write(\" Listen RDMA 192.168.3.11:4420\\n\")\n filedesc.write(\" SN SPDK\" + str(target_id) + \"\\n\")\n filedesc.write(\" Namespace Nvme\" + str(idx) +\n \"n1p\" + str(node) + \"\\n\")\n node = node + 1\n target_id = target_id + 1\n idx = idx + 1\n filedesc.close()\n check_call(\"sed -i '/\\[Subsystem129/,$d' \" + filename, shell=True)\n\nif __name__ == \"__main__\":\n if (len(sys.argv) < 4):\n print \"usage:\"\n print \" \" + sys.argv[0] + \" <method name> <backend name> \"\n sys.exit(1)\n\n method_name = sys.argv[1]\n backend_name = sys.argv[2]\n\n if method_name == \"generate_iscsi_file\":\n iscsi_file = sys.argv[3]\n generate_iscsi_file(backend_name, iscsi_file)\n\n if method_name == \"generate_nvmf_tgt_file\":\n nvmf_file = sys.argv[3]\n generate_nvmf_tgt_file(backend_name, nvmf_file)\n\n if method_name == \"write_fio_config\":\n write_fio_config(config_file, sections, **kwargs)\n",
"id": "5487400",
"language": "Python",
"matching_score": 2.919869899749756,
"max_stars_count": 3,
"path": "tests/lib/Test_base_utils.py"
},
{
"content": "#!/usr/bin/env python\n# BSD LICENSE\n#\n# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in\n# the documentation and/or other materials provided with the\n# distribution.\n# * Neither the name of Intel Corporation nor the names of its\n# contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport re\nimport time\nimport os\nfrom subprocess import check_call, call, check_output, Popen, PIPE\nfrom threading import Thread\nimport threading\n\n\nclass FileSystem_integrity:\n\n def __init__(self, filesystemtype, backendname):\n self.filetype = filesystemtype\n self.backend = backendname\n self.kernel_package_path = \"/home/linux-config.tar.gz\"\n print \"The kernel package's path is \", self.kernel_package_path\n kernel = call(\"ls /home/linux-config.tar.gz\", shell=True)\n if kernel != 0:\n print \"Please add kernel source.\"\n time.sleep(300)\n self.iso_package_path = \"/home/RHEL6.5-20131111.0-Server-x86_64-DVD1.iso\"\n print \"The iso package's path is \", self.iso_package_path\n iso = call(\n \"ls /home/RHEL6.5-20131111.0-Server-x86_64-DVD1.iso\",\n shell=True)\n if iso != 0:\n print \"Please add iso source.\"\n time.sleep(300)\n call(\"rm -rf /home/devicedev*\", shell=True)\n\n def getdevicepath(self):\n if self.backend == \"nvmf_nvme\":\n output = check_output(\"lsblk -l -o NAME\", shell=True)\n devices = re.findall(\"(nvme[0-9]n1)\\n\", output)\n self.device_paths = ['/dev/' + dev for dev in devices]\n if self.backend == \"iscsi_nvme\":\n output = check_output(\"iscsiadm -m session -P 3\", shell=True)\n devices = re.findall(\"Attached scsi disk (sd[a-z]+)\", output)\n time.sleep(10)\n self.device_paths = ['/dev/' + dev for dev in devices]\n\n def format_devices(self):\n print \"the file system for the devices is: \", self.filetype\n self.new_dev_paths = []\n for dev_path in self.device_paths:\n if self.backend == \"nvmf_nvme\":\n dev_paths = (dev_path) + \"p\"\n if self.backend == \"iscsi_nvme\":\n dev_paths = dev_path\n cmd = \"parted -s {} mklabel msdos\".format(dev_path)\n check_call(cmd, shell=True)\n dev = (dev_path).lstrip('/dev/')\n\n optimal_io_size = check_output(\n \"cat /sys/block/{}/queue/optimal_io_size\".format(dev), shell=True)\n alignment_offset = check_output(\n \"cat /sys/block/{}/alignment_offset\".format(dev), shell=True)\n physical_block_size = check_output(\n \"cat /sys/block/{}/queue/physical_block_size\".format(dev), shell=True)\n optimal_io_size = int(optimal_io_size)\n alignment_offset = int(alignment_offset)\n physical_block_size = int(physical_block_size)\n\n sector_num = (optimal_io_size + alignment_offset) / \\\n physical_block_size\n if sector_num == 0:\n sector_num = 2048\n sector_number = str(sector_num) + \"s\"\n\n cmd = 'parted -s {0} mkpart primary {1} 100% '.format(\n dev_path, sector_number)\n check_call(cmd, shell=True)\n\n new_dev_path = (dev_paths) + \"1\"\n\n if \"btrfs\" == self.filetype:\n call(\"mkfs.btrfs -f {}\".format(new_dev_path), shell=True)\n\n if \"ext4\" == self.filetype:\n call(\"mkfs.ext4 -F {}\".format(new_dev_path), shell=True)\n\n if \"xfs\" == self.filetype:\n call(\"mkfs.xfs -f {}\".format(new_dev_path), shell=True)\n\n self.new_dev_paths.append(new_dev_path)\n\n def compilekernel(self):\n retval = \"\"\n call(\"mkdir -p /home/devicedev00\", shell=True)\n try:\n check_call(\n \"mount -o rw {} /home/devicedev00\".format(self.new_dev_paths[0]), shell=True)\n except:\n os._exit(1)\n check_call(\n \"dd if={} of=/home/devicedev00/linux_package\".format(self.kernel_package_path), shell=True)\n cmd = \"cd /home/devicedev00/ && tar -xvf linux_package\"\n tarpro = Popen(cmd, shell=True)\n rc = tarpro.wait()\n if rc != 0:\n retval = \"tar command failed\"\n return retval\n print \"The kernel version is 4.3.3\"\n cmd = \"cd /home/devicedev00/linux-4.3.3\"\n makepro = Popen(cmd, shell=True)\n rc = makepro.wait()\n if rc != 0:\n retval = \"make command failed\"\n else:\n retval = \"All tests passed\"\n call(\"umount /home/devicedev00\", shell=True)\n call(\"rm -rf /home/devicedev00\", shell=True)\n time.sleep(20)\n\n return retval\n\n def onlycompilekernel(self, count):\n time.sleep(10)\n self.getdevicepath()\n self.format_devices()\n retval = \"\"\n call(\"rm -rf /home/devicedev0*\", shell=True)\n if len(self.new_dev_paths) < 4:\n number = len(self.new_dev_paths)\n else:\n number = 4\n for i in range(number):\n dir_name = \"/home/devicedev0\" + str(i) + \"/\"\n call(\"mkdir -p {}\".format(dir_name), shell=True)\n try:\n call(\n \"mount {} {}\".format(\n self.new_dev_paths[i],\n dir_name),\n shell=True)\n except:\n os._exit(1)\n call(\"dd if={} of={}/linux_package\".format(self.kernel_package_path,\n dir_name), shell=True)\n call(\"cd {} && tar -xvf linux_package\".format(dir_name), shell=True)\n for i in range(count):\n cmd = \"cd /home/devicedev00/linux* && make clean && make -j 64 &\"\n cmd1 = \"cd /home/devicedev01/linux* && make clean && make -j 64 &\"\n cmd2 = \"cd /home/devicedev02/linux* && make clean && make -j 64 &\"\n cmd3 = \"cd /home/devicedev03/linux* && make clean && make -j 64\"\n makepro = call(cmd, shell=True)\n makepro1 = call(cmd1, shell=True)\n makepro2 = call(cmd2, shell=True)\n time.sleep(60)\n makepro3 = call(cmd3, shell=True)\n if makepro != 0 or makepro1 != 0 or makepro2 != 0 or makepro3 != 0:\n retval = \"make command failed\"\n break\n else:\n retval = \"All tests passed\"\n time.sleep(30)\n call(\"umount /home/devicedev* &\", shell=True)\n time.sleep(60)\n call(\"rm -rf /home/devicedev*\", shell=True)\n time.sleep(20)\n\n return retval\n\n def run_threading_function(self, dev_path, number, thread_num):\n if number <= 4:\n print \"This is device %s\", number\n else:\n sys.exit(0)\n dev_name = dev_path\n dir_name = \"/home/devicedev0\" + str(number) + \"/\"\n if os.path.isdir(dir_name):\n pass\n else:\n call(\"mkdir -p {}\".format(dir_name), shell=True)\n self.all_dir_name.append(dir_name)\n if int(self.current_loop) == 0 and 0 == int(thread_num):\n try:\n check_call(\n \"mount -o rw {} {}\".format(dev_name, dir_name), shell=True)\n except:\n print \"mount command run failed \"\n self.fail_count = 1\n return \"mount command fail\"\n ddfile = dir_name + \"newddfile-\" + str(thread_num)\n try:\n check_call(\"touch {}\".format(ddfile), shell=True)\n except:\n print \"touch command failed\"\n self.fail_count = 1\n return \"touch comand failed\"\n cmd = ' cd {0} && dd if={1} of={2} bs=1M'.format(\n dir_name, self.dd_file_path, ddfile)\n ddcmd = Popen(cmd, shell=True)\n rc = ddcmd.wait()\n if rc != 0:\n print \"dd cmd run fail\"\n self.fail_count = 1\n return \"dd cmd failed\"\n try:\n self.new_sha256_value = check_output(\n \"sha256sum {}\".format(ddfile), shell=True)\n except:\n print \"sha256sum command failed\"\n return \"sha256sum command failed\"\n self.new_sha256_value = re.split('[ ]+', self.new_sha256_value)[0]\n if self.sha256_value == self.new_sha256_value:\n pass\n else:\n print \"sha256 comparition failed.\"\n self.fail_count += 1\n print self.new_sha256_value\n print self.sha256_value\n try:\n check_call(\"rm -rf {}\".format(ddfile), shell=True)\n except:\n print \"rm command failed\"\n self.fail_count = 1\n return \"rm command failed\"\n\n def run_single_thread(self, dev_paths, index):\n all_dev_paths = dev_paths\n num_dev = len(all_dev_paths)\n if num_dev < 4:\n print \"This is device %s\", number\n else:\n num_dev = 4\n for i in range(num_dev):\n thread1 = Thread(target=self.run_threading_function,\n args=(all_dev_paths[i], i, index,))\n thread1.start()\n\n num1 = threading.activeCount()\n num1 = int(num1)\n\n def run_thread(self):\n file_count = 8\n if \"large\" == self.test_type:\n for i in range(file_count):\n self.run_single_thread(self.new_dev_paths, i)\n\n time.sleep(12)\n while True:\n cmd = check_output(\"ps -C dd|wc -l\", shell=True)\n if 1 == int(cmd):\n break\n else:\n continue\n else:\n for j in range(file_count):\n self.run_single_thread(self.new_dev_paths, j)\n time.sleep(1)\n\n def unmount_dir(self):\n print self.all_dir_name\n for dir_name in self.all_dir_name:\n call(\"umount {}\".format(dir_name), shell=True)\n call(\"rm -rf /home/nvme*\", shell=True)\n\n def run_the_cycles(self):\n self.run_thread()\n while True:\n num = threading.activeCount()\n\n if 1 == num:\n if int(self.current_loop) == (int(self.run_count) - 1):\n self.unmount_dir()\n break\n\n def run_filesystem_integrity(self, run_count, test_type):\n self.run_count = int(run_count)\n self.current_loop = 90\n if test_type == 'large':\n self.dd_file_path = self.iso_package_path\n elif test_type == 'small':\n self.dd_file_path = self.kernel_package_path\n else:\n self.dd_file_path = self.kernel_package_path\n self.test_type = test_type\n time.sleep(10)\n self.sha256_value = check_output(\n \"sha256sum {}\".format(self.dd_file_path), shell=True)\n time.sleep(10)\n self.sha256_value = re.split('[ ]+', self.sha256_value)[0]\n self.fail_count = 0\n time.sleep(10)\n self.getdevicepath()\n self.format_devices()\n\n self.all_dir_name = []\n for i in range(self.run_count):\n self.current_loop = i\n self.run_the_cycles()\n print self.fail_count\n if self.fail_count != 0:\n print \"some tests failed\"\n return \"some tests failed\"\n else:\n retval = self.compilekernel()\n return retval\n",
"id": "7593055",
"language": "Python",
"matching_score": 0.9238834381103516,
"max_stars_count": 3,
"path": "tests/lib/Filesystem_integrity_test.py"
},
{
"content": "# BSD LICENSE\n#\n# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in\n# the documentation and/or other materials provided with the\n# distribution.\n# * Neither the name of Intel Corporation nor the names of its\n# contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport re\nfrom time import sleep\nfrom settings import NICS, DRIVERS\nfrom crb import Crb\nfrom net_device import GetNicObj\n\n\nclass Dut(Crb):\n\n PCI_DEV_CACHE_KEY = 'dut_pci_dev_info'\n\n def __init__(self, crb, serializer):\n self.NAME = 'dut'\n super(Dut, self).__init__(crb, serializer, self.NAME)\n self.host_init_flag = False\n self.tester = None\n self.ports_info = None\n self.ports_map = []\n\n def get_ip_address(self):\n \"\"\"\n Get DUT's ip address.\n \"\"\"\n return self.crb['IP']\n\n def get_password(self):\n \"\"\"\n Get DUT's login password.\n \"\"\"\n return self.crb['pass']\n\n def dut_prerequisites(self):\n \"\"\"\n Configure DUT's NICs.\n \"\"\"\n self.pci_devices_information()\n self.restore_interfaces()\n\n def get_ports(self):\n \"\"\"\n Get DUT's NICs information.\n \"\"\"\n ethname = []\n drivername = []\n driver = []\n nic = []\n for key in DRIVERS:\n nic = key\n driver = DRIVERS[key]\n for (pci_bus, pci_id) in self.pci_devices_info:\n addr_array = pci_bus.split(':')\n port = GetNicObj(self, addr_array[0], addr_array[1], addr_array[2])\n eth = port.get_interface_name()\n self.enable_ipv6(eth)\n if len(eth) >= 12:\n status1 = eth.split()\n self.enable_ipv6(status1[0])\n self.enable_ipv6(status1[1])\n out1 = self.send_expect(\"ethtool -i %s\" % status1[0], \"# \")\n out2 = self.send_expect(\"ethtool -i %s\" % status1[1], \"# \")\n status2 = re.findall(r\"driver:\\s+(.*)\", out1)\n status3 = re.findall(r\"driver:\\s+(.*)\", out2)\n if status2:\n drivername.append(status2[0])\n ethname.append(status1[0])\n if status3:\n drivername.append(status3[0])\n ethname.append(status1[1])\n if not status2:\n self.logger.error(\"ERROR: unexpected output\")\n if not status3:\n self.logger.error(\"ERROR: unexpected output\")\n else:\n out = self.send_expect(\"ethtool -i %s\" % eth, \"# \")\n status = re.findall(r\"driver:\\s+(.*)\", out)\n if status:\n drivername.append(status[0])\n ethname.append(eth)\n if not status:\n self.logger.error(\"ERROR: unexpected output\")\n return ethname, drivername\n\n def restore_interfaces(self):\n \"\"\"\n Restore Linux interfaces.\n \"\"\"\n if self.skip_setup:\n return\n try:\n for (pci_bus, pci_id) in self.pci_devices_info:\n addr_array = pci_bus.split(':')\n port = GetNicObj(self, addr_array[0], addr_array[\n 1], addr_array[2])\n itf = port.get_interface_name()\n self.enable_ipv6(itf)\n self.send_expect(\"ifconfig %s up\" % itf, \"# \")\n if port.get_interface2_name():\n itf = port.get_interface2_name()\n self.enable_ipv6(itf)\n self.send_expect(\"ifconfig %s up\" % itf, \"# \")\n except Exception as e:\n self.logger.error(\" !!! Restore ITF: \" + e.message)\n sleep(2)\n\n def close(self):\n \"\"\"\n Close ssh session of DUT.\n \"\"\"\n if self.session:\n self.session.close()\n self.session = None\n if self.alt_session:\n self.alt_session.close()\n self.alt_session = None\n if self.host_init_flag:\n self.host_session.close()\n\n def crb_exit(self):\n \"\"\"\n Recover all resource before crb exit\n \"\"\"\n self.close()\n",
"id": "7901102",
"language": "Python",
"matching_score": 4.5081257820129395,
"max_stars_count": 3,
"path": "framework/dut.py"
},
{
"content": "# BSD LICENSE\n#\n# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in\n# the documentation and/or other materials provided with the\n# distribution.\n# * Neither the name of Intel Corporation nor the names of its\n# contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport re\nfrom settings import TIMEOUT\nfrom ssh_connection import SSHConnection\nfrom logger import getLogger\n\n\nclass Crb(object):\n\n \"\"\"\n Get the information of NIC and setup for SPDK.\n \"\"\"\n\n def __init__(self, crb, serializer, name):\n self.crb = crb\n self.skip_setup = False\n self.serializer = serializer\n self.ports_info = None\n self.sessions = []\n self.name = name\n self.logger = getLogger(name)\n self.session = SSHConnection(self.get_ip_address(), name,\n self.get_password())\n self.session.init_log(self.logger)\n self.alt_session = SSHConnection(\n self.get_ip_address(),\n name + '_alt',\n self.get_password())\n self.alt_session.init_log(self.logger)\n\n def send_expect(self, cmds, expected, timeout=TIMEOUT,\n alt_session=False, verify=False):\n \"\"\"\n Send commands to target and return string before expected string.\n If not, TimeoutException will be raised.\n \"\"\"\n if alt_session:\n return self.alt_session.session.send_expect(\n cmds, expected, timeout, verify)\n return self.session.send_expect(cmds, expected, timeout, verify)\n\n def get_session_output(self, timeout=TIMEOUT):\n \"\"\"\n Get session output message before timeout\n \"\"\"\n return self.session.get_session_before(timeout)\n\n def set_speedup_options(self, skip_setup):\n \"\"\"\n Configure skip network topology scan or skip SPDK packet setup.\n \"\"\"\n self.skip_setup = skip_setup\n\n def set_directory(self, base_dir):\n \"\"\"\n Set SPDK package folder name.\n \"\"\"\n self.base_dir = base_dir\n\n def set_path(self, dpdk_dir):\n \"\"\"\n Add DPDK package path name.\n \"\"\"\n self.dpdk_dir = dpdk_dir\n\n def pci_devices_information(self):\n self.pci_devices_information_uncached()\n self.serializer.save(self.PCI_DEV_CACHE_KEY, self.pci_devices_info)\n\n def pci_devices_information_uncached(self):\n out = self.send_expect(\n \"lspci -Dnn | grep -i eth\", \"# \", alt_session=True)\n rexp = r\"([\\da-f]{4}:[\\da-f]{2}:[\\da-f]{2}.\\d{1}) .*Eth.*?ernet .*?([\\da-f]{4}:[\\da-f]{4})\"\n pattern = re.compile(rexp)\n match = pattern.findall(out)\n self.pci_devices_info = []\n for i in range(len(match)):\n self.pci_devices_info.append((match[i][0], match[i][1]))\n\n def get_pci_dev_driver(self, domain_id, bus_id, devfun_id):\n out = self.send_expect(\"cat /sys/bus/pci/devices/%s\\:%s\\:%s/uevent\" %\n (domain_id, bus_id, devfun_id), \"# \", alt_session=True)\n rexp = r\"DRIVER=(.+?)\\r\"\n pattern = re.compile(rexp)\n match = pattern.search(out)\n if not match:\n return None\n return match.group(1)\n\n def enable_ipv6(self, intf):\n \"\"\"\n Enable ipv6 of of specified interface\n \"\"\"\n if intf != 'N/A':\n self.send_expect(\"sysctl net.ipv6.conf.%s.disable_ipv6=0\" %\n intf, \"# \", alt_session=True)\n",
"id": "1823987",
"language": "Python",
"matching_score": 3.4129607677459717,
"max_stars_count": 3,
"path": "framework/crb.py"
},
{
"content": "# BSD LICENSE\n#\n# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in\n# the documentation and/or other materials provided with the\n# distribution.\n# * Neither the name of Intel Corporation nor the names of its\n# contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nfrom functools import wraps\nimport settings\nfrom crb import Crb\nfrom settings import TIMEOUT\n\nNICS_LIST = []\n\n\nclass NetDevice(object):\n\n def __init__(self, crb, domain_id, bus_id, devfun_id):\n if not isinstance(crb, Crb):\n raise Exception(\" Please input the instance of Crb!!!\")\n self.crb = crb\n self.domain_id = domain_id\n self.bus_id = bus_id\n self.devfun_id = devfun_id\n self.pci = domain_id + ':' + bus_id + ':' + devfun_id\n self.pci_id = get_pci_id(crb, domain_id, bus_id, devfun_id)\n self.default_driver = settings.get_nic_driver(self.pci_id)\n self.intf_name = 'N/A'\n self.intf2_name = None\n self.get_interface_name()\n\n def __send_expect(self, cmds, expected, timeout=TIMEOUT, alt_session=True):\n \"\"\"\n Wrap the crb's session as private session for sending expect.\n \"\"\"\n return self.crb.send_expect(\n cmds, expected, timeout=timeout, alt_session=alt_session)\n\n def nic_has_driver(func):\n \"\"\"\n Check if the NIC has a driver.\n \"\"\"\n @wraps(func)\n def wrapper(*args, **kwargs):\n nic_instance = args[0]\n nic_instance.current_driver = nic_instance.get_nic_driver()\n if not nic_instance.current_driver:\n return ''\n return func(*args, **kwargs)\n return wrapper\n\n def get_nic_driver(self):\n \"\"\"\n Get the NIC driver.\n \"\"\"\n return self.crb.get_pci_dev_driver(\n self.domain_id, self.bus_id, self.devfun_id)\n\n @nic_has_driver\n def get_interface_name(self):\n \"\"\"\n Get interface name of NICs.\n \"\"\"\n driver = self.current_driver\n driver_alias = driver.replace('-', '_')\n try:\n get_interface_name = getattr(\n self, 'get_interface_name_%s' %\n driver_alias)\n except Exception as e:\n generic_driver = 'generic'\n get_interface_name = getattr(\n self, 'get_interface_name_%s' %\n generic_driver)\n out = get_interface_name(self.domain_id, self.bus_id, self.devfun_id)\n if \"No such file or directory\" in out:\n self.intf_name = 'N/A'\n else:\n self.intf_name = out\n return self.intf_name\n\n def get_interface_name_generic(self, domain_id, bus_id, devfun_id):\n \"\"\"\n Get the interface name by the default way.\n \"\"\"\n command = 'ls --color=never /sys/bus/pci/devices/%s\\:%s\\:%s/net' % (\n domain_id, bus_id, devfun_id)\n return self.__send_expect(command, '# ')\n\n def get_interface2_name(self):\n \"\"\"\n Get interface name of second port of this pci device.\n \"\"\"\n return self.intf2_name\n\n\ndef get_pci_id(crb, domain_id, bus_id, devfun_id):\n pass\n\n\ndef add_to_list(host, obj):\n \"\"\"\n Add NICs object to global structure\n Parameter 'host' is ip address, 'obj' is netdevice object\n \"\"\"\n nic = {}\n nic['host'] = host\n nic['pci'] = obj.pci\n nic['port'] = obj\n NICS_LIST.append(nic)\n\n\ndef get_from_list(host, domain_id, bus_id, devfun_id):\n \"\"\"\n Get NICs object from global structure\n Parameter will by host ip, pci domain id, pci bus id, pci function id\n \"\"\"\n for nic in NICS_LIST:\n if host == nic['host']:\n pci = ':'.join((domain_id, bus_id, devfun_id))\n if pci == nic['pci']:\n return nic['port']\n return None\n\n\ndef GetNicObj(crb, domain_id, bus_id, devfun_id):\n \"\"\"\n Get NICs object. If NICs has been initialized, just return object.\n \"\"\"\n obj = get_from_list(crb.crb['My IP'], domain_id, bus_id, devfun_id)\n if obj:\n return obj\n pci_id = get_pci_id(crb, domain_id, bus_id, devfun_id)\n nic = settings.get_nic_name(pci_id)\n obj = NetDevice(crb, domain_id, bus_id, devfun_id)\n add_to_list(crb.crb['My IP'], obj)\n return obj\n",
"id": "11501996",
"language": "Python",
"matching_score": 1.8614383935928345,
"max_stars_count": 3,
"path": "framework/net_device.py"
},
{
"content": "# BSD LICENSE\n#\n# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in\n# the documentation and/or other materials provided with the\n# distribution.\n# * Neither the name of Intel Corporation nor the names of its\n# contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport os\nfrom settings import NICS, DRIVERS\nfrom settings import load_global_setting, HOST_DRIVER_SETTING\nfrom dut import Dut\nfrom tester import Tester\n\n\nclass SPDKdut(Dut):\n \"\"\"\n SPDK project class will be called set_target function to setup\n build, memory and kernel module.\n \"\"\"\n\n def __init__(self, crb, serializer):\n super(SPDKdut, self).__init__(crb, serializer)\n self.testpmd = None\n\n def set_target(self, target):\n self.target = target\n drivername = load_global_setting(HOST_DRIVER_SETTING)\n if drivername == DRIVERS['ConnectX4']:\n out = self.send_expect(\"lsmod | grep mlx5_ib\", \"#\")\n if \"mlx5_ib\" not in out:\n self.send_expect(\"modprobe mlx5_core\", \"#\", 70)\n self.send_expect(\"modprobe mlx5_ib\", \"#\", 70)\n if drivername == DRIVERS['ConnectX3']:\n out = self.send_expect(\"lsmod | grep mlx4_ib\", \"#\")\n if \"mlx4_ib\" not in out:\n self.send_expect(\"modprobe mlx4_en\", \"#\", 70)\n self.send_expect(\"modprobe mlx4_core\", \"#\", 70)\n self.send_expect(\"modprobe mlx4_ib\", \"#\", 70)\n if drivername == DRIVERS['chelsio_40gb']:\n out = self.send_expect(\"lsmod | grep iw_cxgb4\", \"#\")\n if \"iw_cxgb4\" not in out:\n self.send_expect(\"modprobe cxgb4\", \"#\", 70)\n self.send_expect(\"modprobe iw_cxgb4\", \"#\", 70)\n self.setup_modules(target)\n if not self.skip_setup:\n self.build_install_spdk(target)\n\n def setup_modules(self, target):\n drivername = load_global_setting(HOST_DRIVER_SETTING)\n if drivername == \"ConnectX4\" or \"ConnectX3\":\n out = self.send_expect(\"lsmod | grep ib_cm\", \"#\")\n if \"ib_cm\" not in out:\n self.send_expect(\"modprobe ib_addr\", \"#\", 70)\n self.send_expect(\"modprobe ib_cm\", \"#\", 70)\n self.send_expect(\"modprobe ib_core\", \"#\", 70)\n self.send_expect(\"modprobe ib_mad\", \"#\", 70)\n self.send_expect(\"modprobe ib_sa\", \"#\", 70)\n self.send_expect(\"modprobe ib_ucm\", \"#\", 70)\n self.send_expect(\"modprobe ib_umad\", \"#\", 70)\n self.send_expect(\"modprobe ib_uverbs\", \"#\", 70)\n self.send_expect(\"modprobe iw_cm\", \"#\", 70)\n self.send_expect(\"modprobe rdma_cm\", \"#\", 70)\n self.send_expect(\"modprobe rdma_ucm\", \"#\", 70)\n print \" load some kernel modules\"\n print \" kernel modules has loaded\"\n\n def build_install_spdk(self, target, extra_options=''):\n self.send_expect(\"make clean\", \"#\", 20)\n drivername = load_global_setting(HOST_DRIVER_SETTING)\n if drivername == \"ConnectX4\" or \"ConnectX3\":\n self.send_expect(\"./configure --with-rdma\", \"#\", 100)\n else:\n self.send_expect(\"./configure\", \"#\", 100)\n out = self.send_expect(\"make -j\", \"# \", 100)\n if(\"Error\" in out or \"No rule to make\" in out):\n self.logger.error(\"ERROR - try to compile again\")\n out = self.send_expect(\"make\", \"# \", 100)\n assert (\"Error\" not in out), \"Compilation error...\"\n assert (\"No rule to make\" not in out), \"No rule to make error...\"\n self.send_expect(\"NRHUGE=12288 %s\" % r'./scripts/setup.sh', \"#\", 200)\n\n def prepare_package(self):\n if not self.skip_setup:\n depot = \"../dep\"\n gitLabel = \"master\"\n gitLabel1 = \"spdk-17.05\"\n gitURL = r\"https://github.com/spdk/spdk.git\"\n gitURL1 = r\"https://github.com/spdk/dpdk.git\"\n gitPrefix = r\"spdk/\"\n gitPrefix1 = r\"dpdk/\"\n package = r\"../dep/spdk.tar.gz\"\n package1 = r\"../dep/dpdk.tar.gz\"\n if os.path.exists(\"%s/%s\" % (depot, gitPrefix)) is True:\n ret = os.system(\n \"cd %s/%s && git pull --force\" %\n (depot, gitPrefix))\n else:\n print \"git clone %s %s/%s\" % (gitURL, depot, gitPrefix)\n ret = os.system(\n \"git clone %s %s/%s\" %\n (gitURL, depot, gitPrefix))\n if ret is not 0:\n print \"Clone spdk failed!!!\"\n raise EnvironmentError\n if os.path.exists(\"%s/%s\" % (depot, gitPrefix1)) is True:\n ret1 = os.system(\n \"cd %s/%s && git pull --force\" %\n (depot, gitPrefix1))\n else:\n print \"git clone %s %s/%s\" % (gitURL1, depot, gitPrefix1)\n ret1 = os.system(\n \"git clone %s %s/%s\" %\n (gitURL1, depot, gitPrefix1))\n if ret1 is not 0:\n print \"Clone spdk failed!!!\"\n raise EnvironmentError\n ret = os.system(\n \"cd %s/%s && git archive --format=tar.gz --prefix=%s/ %s -o ../%s\" %\n (depot, gitPrefix, gitPrefix, gitLabel, package))\n if ret is not 0:\n print \"Zip spdk failed!!!\"\n raise EnvironmentError\n assert (os.path.isfile(package) is True), \"Invalid spdk package\"\n ret1 = os.system(\n \"cd %s/%s && git archive --format=tar.gz --prefix=%s/ %s -o ../%s\" %\n (depot, gitPrefix1, gitPrefix1, gitLabel1, package1))\n if ret1 is not 0:\n print \"Zip dpdk failed!!!\"\n raise EnvironmentError\n assert (os.path.isfile(package1) is True), \"Invalid dpdk package\"\n\n p_dir, _ = os.path.split(self.base_dir)\n q_dir, _ = os.path.split(self.dpdk_dir)\n dst_dir = \"/tmp/\"\n out = self.send_expect(\n \"ls %s && cd %s\" %\n (dst_dir, p_dir), \"#\", verify=True)\n if out == -1:\n raise ValueError(\"Directiry %s or %s does not exist,\"\n \"please check params -d\"\n % (p_dir, dst_dir))\n self.session.copy_file_to(package, dst_dir)\n self.session.copy_file_to(package1, dst_dir)\n self.send_expect(\"ulimit -c unlimited\", \"#\")\n self.send_expect(\"rm -rf %s\" % self.base_dir, \"#\")\n out = self.send_expect(\"tar zxf %s%s -C %s\" %\n (dst_dir, package.split('/')[-1], p_dir), \"# \", 20, verify=True)\n if out == -1:\n raise ValueError(\"Extract spdk package to %s failure,\"\n \"please check params -d\"\n % (p_dir))\n self.send_expect(\"rm -rf %s\" % self.dpdk_dir, \"#\")\n out1 = self.send_expect(\"tar zxf %s%s -C %s\" %\n (dst_dir, package1.split('/')[-1], q_dir), \"# \", 20, verify=True)\n if out1 == -1:\n raise ValueError(\"Extract spdk package to %s failure,\"\n \"please check params -d\"\n % (q_dir))\n out = self.send_expect(\"cd %s\" % self.base_dir,\n \"# \", 20, verify=True)\n if out == -1:\n raise ValueError(\"spdk dir %s mismatch, please check params -d\"\n % self.base_dir)\n\n def prerequisites(self):\n self.prepare_package()\n self.dut_prerequisites()\n\n\nclass SPDKtester(Tester):\n\n def __init__(self, crb, serializer):\n self.NAME = \"tester\"\n super(SPDKtester, self).__init__(crb, serializer)\n\n def prerequisites(self, perf_test=False):\n self.tester_prerequisites()\n",
"id": "12770177",
"language": "Python",
"matching_score": 2.1962695121765137,
"max_stars_count": 3,
"path": "framework/project_spdk.py"
},
{
"content": "# BSD LICENSE\n#\n# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in\n# the documentation and/or other materials provided with the\n# distribution.\n# * Neither the name of Intel Corporation nor the names of its\n# contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport os\nimport sys\nimport re\n\nFOLDERS = {\n 'Framework': 'framework',\n 'Testscripts': 'tests',\n 'Depends': 'dep',\n 'Output': 'output',\n}\n\n\"\"\"\nNics and its identifiers supported by the framework.\n\"\"\"\nNICS = {\n 'niantic': '8086:10fb',\n 'ironpond': '8086:151c',\n 'twinpond': '8086:1528',\n 'twinville': '8086:1512',\n 'sageville': '8086:1563',\n 'sagepond': '8086:15ad',\n 'magnolia_park': '8086:15ce',\n 'springfountain': '8086:154a',\n 'fortville_eagle': '8086:1572',\n 'fortville_spirit': '8086:1583',\n 'fortville_spirit_single': '8086:1584',\n 'fortpark': '8086:374c',\n 'fortpark_TLV': '8086:37d0',\n 'ConnectX3': '15b3:1007',\n 'ConnectX4': '15b3:1015',\n 'fortville_25g': '8086:158b',\n 'chelsio_40gb': '1425:5410',\n 'chelsio_unknow': '1425:5010',\n}\n\nDRIVERS = {\n 'niantic': 'ixgbe',\n 'ironpond': 'ixgbe',\n 'twinpond': 'ixgbe',\n 'twinville': 'ixgbe',\n 'sageville': 'ixgbe',\n 'sagepond': 'ixgbe',\n 'magnolia_park': 'ixgbe',\n 'springfountain': 'ixgbe',\n 'fortville_eagle': 'i40e',\n 'fortville_spirit': 'i40e',\n 'fortville_spirit_single': 'i40e',\n 'fortpark': 'i40e',\n 'fortpark_TLV': 'i40e',\n 'ConnectX3': 'mlx4_en',\n 'ConnectX4': 'mlx5_core',\n 'fortville_25g': 'i40e',\n 'chelsio_40gb': 'cxgb4',\n 'chelsio_unknow': 'cxgb4',\n}\n\nUSERNAME = 'root'\n\n\"\"\"\nDefault session timeout.\n\"\"\"\nTIMEOUT = 15\n\n\"\"\"\nThe log name seperater.\n\"\"\"\nLOG_NAME_SEP = '.'\n\n\"\"\"\nglobal environment variable\n\"\"\"\nSPDK_ENV_PAT = r\"SPDK_*\"\nPERF_SETTING = \"SPDK_PERF_ONLY\"\nFUNC_SETTING = \"SPDK_FUNC_ONLY\"\nHOST_DRIVER_SETTING = \"SPDK_HOST_DRIVER\"\nHOST_NIC_SETTING = \"SPDK_HOST_NIC\"\nDEBUG_SETTING = \"SPDK_DEBUG_ENABLE\"\nDEBUG_CASE_SETTING = \"SPDK_DEBUGCASE_ENABLE\"\nSPDK_ERROR_ENV = \"SPDK_RUNNING_ERROR\"\n\n\"\"\"\nglobal error table\n\"\"\"\nSPDK_ERR_TBL = {\n \"GENERIC_ERR\": 1,\n \"SPDK_BUILD_ERR\": 2,\n \"DUT_SETUP_ERR\": 3,\n \"TESTER_SETUP_ERR\": 4,\n \"SUITE_SETUP_ERR\": 5,\n \"SUITE_EXECUTE_ERR\": 6,\n}\n\n\ndef get_nic_name(type):\n \"\"\"\n strip nic code name by nic type\n \"\"\"\n for name, nic_type in NICS.items():\n if nic_type == type:\n return name\n return 'Unknown'\n\n\ndef get_nic_driver(pci_id):\n \"\"\"\n Return linux driver for specified NICs\n \"\"\"\n driverlist = dict(zip(NICS.values(), DRIVERS.keys()))\n try:\n driver = DRIVERS[driverlist[pci_id]]\n except Exception as e:\n driver = None\n return driver\n\n\ndef save_global_setting(key, value):\n \"\"\"\n Save global setting\n \"\"\"\n if re.match(SPDK_ENV_PAT, key):\n env_key = key\n else:\n env_key = \"SPDK_\" + key\n os.environ[env_key] = value\n\n\ndef load_global_setting(key):\n \"\"\"\n Load global setting\n \"\"\"\n if re.match(SPDK_ENV_PAT, key):\n env_key = key\n else:\n env_key = \"SPDK_\" + key\n if env_key in os.environ.keys():\n return os.environ[env_key]\n else:\n return ''\n\n\ndef report_error(error):\n \"\"\"\n Report error when error occurred\n \"\"\"\n if error in SPDK_ERR_TBL.keys():\n os.environ[SPDK_ERROR_ENV] = error\n else:\n os.environ[SPDK_ERROR_ENV] = \"GENERIC_ERR\"\n\n\ndef exit_error():\n \"\"\"\n Set system exit value when error occurred\n \"\"\"\n if SPDK_ERROR_ENV in os.environ.keys():\n ret_val = SPDK_ERR_TBL[os.environ[SPDK_ERROR_ENV]]\n sys.exit(ret_val)\n else:\n sys.exit(0)\n",
"id": "8837358",
"language": "Python",
"matching_score": 2.2496068477630615,
"max_stars_count": 3,
"path": "framework/settings.py"
},
{
"content": "# BSD LICENSE\n#\n# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in\n# the documentation and/or other materials provided with the\n# distribution.\n# * Neither the name of Intel Corporation nor the names of its\n# contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport logging\nimport os\nimport inspect\nfrom settings import LOG_NAME_SEP, FOLDERS\n\n\"\"\"\nlogger module with several log level. Testframwork and TestSuite log\nwill be saved into different log files.\n\"\"\"\n\nSPDK_ENV_PAT = r\"SPDK_*\"\n\n\ndef RED(text):\n return \"\\x1B[\" + \"31;1m\" + str(text) + \"\\x1B[\" + \"0m\"\n\n\ndef GREEN(text):\n return \"\\x1B[\" + \"32;1m\" + str(text) + \"\\x1B[\" + \"0m\"\n\n\ndef get_subclasses(module, clazz):\n \"\"\"\n Get module attribute name and attribute.\n \"\"\"\n for subclazz_name, subclazz in inspect.getmembers(module):\n if hasattr(subclazz, '__bases__') and clazz in subclazz.__bases__:\n yield (subclazz_name, subclazz)\n\n\nlogging.SPDK_DUT_CMD = logging.INFO + 1\nlogging.SPDK_DUT_OUTPUT = logging.DEBUG + 1\nlogging.SPDK_DUT_RESULT = logging.WARNING + 1\nlogging.SPDK_TESTER_CMD = logging.INFO + 2\nlogging.SPDK_TESTER_OUTPUT = logging.DEBUG + 2\nlogging.SPDK_TESTER_RESULT = logging.WARNING + 2\nlogging.SUITE_DUT_CMD = logging.INFO + 3\nlogging.SUITE_DUT_OUTPUT = logging.DEBUG + 3\nlogging.SUITE_TESTER_CMD = logging.INFO + 4\nlogging.SUITE_TESTER_OUTPUT = logging.DEBUG + 4\n\nlogging.addLevelName(logging.SPDK_DUT_CMD, 'SPDK_DUT_CMD')\nlogging.addLevelName(logging.SPDK_DUT_OUTPUT, 'SPDK_DUT_OUTPUT')\nlogging.addLevelName(logging.SPDK_DUT_RESULT, 'SPDK_DUT_RESULT')\nlogging.addLevelName(logging.SPDK_TESTER_CMD, 'SPDK_TESTER_CMD')\nlogging.addLevelName(logging.SPDK_TESTER_OUTPUT, 'SPDK_TESTER_OUTPUT')\nlogging.addLevelName(logging.SPDK_TESTER_RESULT, 'SPDK_TESTER_RESULT')\nlogging.addLevelName(logging.SUITE_DUT_CMD, 'SUITE_DUT_CMD')\nlogging.addLevelName(logging.SUITE_DUT_OUTPUT, 'SUITE_DUT_OUTPUT')\nlogging.addLevelName(logging.SUITE_TESTER_CMD, 'SUITE_TESTER_CMD')\nlogging.addLevelName(logging.SUITE_TESTER_OUTPUT, 'SUITE_TESTER_OUTPUT')\n\nmessage_fmt = '%(asctime)s %(levelname)20s: %(message)s'\ndate_fmt = '%d/%m/%Y %H:%M:%S'\nRESET_COLOR = '\\033[0m'\nstream_fmt = '%(color)s%(levelname)20s: %(message)s' + RESET_COLOR\nlog_dir = None\n\n\ndef add_salt(salt, msg):\n if not salt:\n return msg\n else:\n return '[%s] ' % salt + str(msg)\n\n\nclass BaseLoggerAdapter(logging.LoggerAdapter):\n \"\"\"\n Upper layer of original logging module.\n \"\"\"\n\n def spdk_dut_cmd(self, msg, *args, **kwargs):\n self.log(logging.SPDK_DUT_CMD, msg, *args, **kwargs)\n\n def spdk_dut_output(self, msg, *args, **kwargs):\n self.log(logging.SPDK_DUT_OUTPUT, msg, *args, **kwargs)\n\n def spdk_dut_result(self, msg, *args, **kwargs):\n self.log(logging.SPDK_DUT_RESULT, msg, *args, **kwargs)\n\n def spdk_tester_cmd(self, msg, *args, **kwargs):\n self.log(logging.SPDK_TESTER_CMD, msg, *args, **kwargs)\n\n def spdk_tester_output(self, msg, *args, **kwargs):\n self.log(logging.SPDK_TESTER_CMD, msg, *args, **kwargs)\n\n def spdk_tester_result(self, msg, *args, **kwargs):\n self.log(logging.SPDK_TESTER_RESULT, msg, *args, **kwargs)\n\n def suite_dut_cmd(self, msg, *args, **kwargs):\n self.log(logging.SUITE_DUT_CMD, msg, *args, **kwargs)\n\n def suite_dut_output(self, msg, *args, **kwargs):\n self.log(logging.SUITE_DUT_OUTPUT, msg, *args, **kwargs)\n\n def suite_tester_cmd(self, msg, *args, **kwargs):\n self.log(logging.SUITE_TESTER_CMD, msg, *args, **kwargs)\n\n def suite_tester_output(self, msg, *args, **kwargs):\n self.log(logging.SUITE_TESTER_OUTPUT, msg, *args, **kwargs)\n\n\nclass ColorHandler(logging.StreamHandler):\n \"\"\"\n Color of log format.\n \"\"\"\n LEVEL_COLORS = {\n logging.DEBUG: '', # SYSTEM\n logging.SPDK_DUT_OUTPUT: '\\033[00;37m', # WHITE\n logging.SPDK_TESTER_OUTPUT: '\\033[00;37m', # WHITE\n logging.SUITE_DUT_OUTPUT: '\\033[00;37m', # WHITE\n logging.SUITE_TESTER_OUTPUT: '\\033[00;37m', # WHITE\n logging.INFO: '\\033[00;36m', # CYAN\n logging.SPDK_DUT_CMD: '', # SYSTEM\n logging.SPDK_TESTER_CMD: '', # SYSTEM\n logging.SUITE_DUT_CMD: '', # SYSTEM\n logging.SUITE_TESTER_CMD: '', # SYSTEM\n logging.WARN: '\\033[01;33m', # BOLD YELLOW\n logging.SPDK_DUT_RESULT: '\\033[01;34m', # BOLD BLUE\n logging.SPDK_TESTER_RESULT: '\\033[01;34m', # BOLD BLUE\n logging.ERROR: '\\033[01;31m', # BOLD RED\n logging.CRITICAL: '\\033[01;31m', # BOLD RED\n }\n\n def format(self, record):\n record.__dict__['color'] = self.LEVEL_COLORS[record.levelno]\n return logging.StreamHandler.format(self, record)\n\n\nclass SPDKLOG(BaseLoggerAdapter):\n \"\"\"\n log class for framework and testsuite.\n \"\"\"\n\n def __init__(self, logger, crb=\"suite\"):\n global log_dir\n filename = inspect.stack()[1][1][:-3]\n self.name = filename.split('/')[-1]\n self.error_lvl = logging.ERROR\n self.warn_lvl = logging.WARNING\n self.info_lvl = logging.INFO\n self.debug_lvl = logging.DEBUG\n if log_dir is None:\n self.log_path = os.getcwd() + \"/../\" + FOLDERS['Output']\n else:\n self.log_path = log_dir # log dir should contain tag/crb global value and mod in spdk\n self.spdk_log = \"TestFramework.log\"\n self.logger = logger\n self.logger.setLevel(logging.DEBUG)\n self.crb = crb\n super(SPDKLOG, self).__init__(self.logger, dict(crb=self.crb))\n self.salt = ''\n self.fh = None\n self.ch = None\n # add default log file\n fh = logging.FileHandler(self.log_path + \"/\" + self.spdk_log)\n ch = ColorHandler()\n self.__log_hander(fh, ch)\n\n def __log_hander(self, fh, ch):\n \"\"\"\n Config stream handler and file handler.\n \"\"\"\n fh.setFormatter(logging.Formatter(message_fmt, date_fmt))\n ch.setFormatter(logging.Formatter(stream_fmt, date_fmt))\n # file hander default level\n fh.setLevel(logging.DEBUG)\n # console handler default leve\n ch.setLevel(logging.INFO)\n self.logger.addHandler(fh)\n self.logger.addHandler(ch)\n if self.fh is not None:\n self.logger.removeHandler(self.fh)\n if self.ch is not None:\n self.logger.removeHandler(self.ch)\n self.fh = fh\n self.ch = ch\n\n def warning(self, message):\n \"\"\"\n warning level log function.\n \"\"\"\n message = add_salt(self.salt, message)\n self.logger.log(self.warn_lvl, message)\n\n def info(self, message):\n \"\"\"\n information level log function.\n \"\"\"\n message = add_salt(self.salt, message)\n self.logger.log(self.info_lvl, message)\n\n def error(self, message):\n \"\"\"\n error level log function.\n \"\"\"\n message = add_salt(self.salt, message)\n self.logger.log(self.error_lvl, message)\n\n def debug(self, message):\n \"\"\"\n debug level log function.\n \"\"\"\n message = add_salt(self.salt, message)\n self.logger.log(self.debug_lvl, message)\n\n def config_execution(self, crb):\n \"\"\"\n Reconfigure framework logfile.\n \"\"\"\n log_file = self.log_path + '/' + self.spdk_log\n fh = logging.FileHandler(log_file)\n ch = ColorHandler()\n self.__log_hander(fh, ch)\n\n def set_salt(crb, start_flag):\n if LOG_NAME_SEP in crb:\n old = '%s%s' % (start_flag, LOG_NAME_SEP)\n if not self.salt:\n self.salt = crb.replace(old, '', 1)\n if crb.startswith('dut'):\n self.info_lvl = logging.SPDK_DUT_CMD\n self.debug_lvl = logging.SPDK_DUT_OUTPUT\n self.warn_lvl = logging.SPDK_DUT_RESULT\n set_salt(crb, 'dut')\n elif crb.startswith('tester'):\n self.info_lvl = logging.SPDK_TESTER_CMD\n self.debug_lvl = logging.SPDK_TESTER_OUTPUT\n self.warn_lvl = logging.SPDK_TESTER_RESULT\n set_salt(crb, 'tester')\n else:\n self.error_lvl = logging.ERROR\n self.warn_lvl = logging.WARNING\n self.info_lvl = logging.INFO\n self.debug_lvl = logging.DEBUG\n\n def config_suite(self, suitename, crb=None):\n \"\"\"\n Reconfigure suitename logfile.\n \"\"\"\n log_file = self.log_path + '/' + suitename + '.log'\n fh = logging.FileHandler(log_file)\n ch = ColorHandler()\n self.__log_hander(fh, ch)\n if crb == 'dut':\n self.info_lvl = logging.SUITE_DUT_CMD\n self.debug_lvl = logging.SUITE_DUT_OUTPUT\n elif crb == 'tester':\n self.info_lvl = logging.SUITE_TESTER_CMD\n self.debug_lvl = logging.SUITE_TESTER_OUTPUT\n\n\ndef getLogger(name, crb=\"suite\"):\n \"\"\"\n Get logger handler and if there's no handler will create one.\n \"\"\"\n logger = SPDKLOG(logging.getLogger(name), crb)\n return logger\n\n\n_TESTSUITE_NAME_FORMAT_PATTERN = r'TEST SUITE : (.*)'\n_TESTSUITE_ENDED_FORMAT_PATTERN = r'TEST SUITE ENDED: (.*)'\n_TESTCASE_NAME_FORMAT_PATTERN = r'Test Case (.*) Begin'\n_TESTCASE_RESULT_FORMAT_PATTERN = r'Test Case (.*) Result (.*):'\n",
"id": "4585633",
"language": "Python",
"matching_score": 2.9192192554473877,
"max_stars_count": 3,
"path": "framework/logger.py"
},
{
"content": "# BSD LICENSE\n#\n# Copyright(c) 2010-2016 Intel Corporation. All rights reserved.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in\n# the documentation and/or other materials provided with the\n# distribution.\n# * Neither the name of Intel Corporation nor the names of its\n# contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport ConfigParser\nimport os\nimport traceback\nimport atexit\nimport copy\nimport settings\nfrom tester import Tester\nfrom dut import Dut\nfrom serializer import Serializer\nfrom test_case import TestCase\nfrom test_result import Result\nfrom exception import ConfigParseException, VerifyFailure\nfrom logger import getLogger, get_subclasses\nimport logger\nfrom config import CrbsConf\nimport sys\nreload(sys)\nsys.setdefaultencoding('UTF8')\ncwd = os.path.dirname(os.path.dirname(__file__))\nsys.path.append(cwd + '/tests/lib')\nresult = None\nlog_handler = None\n\n\ndef spdk_parse_param(config, section):\n \"\"\"\n Parse execution file parameters.\n \"\"\"\n performance = False\n functional = False\n parameters = config.get(section, 'parameters').split(':')\n drivername = config.get(section, 'drivername').split('=')[-1]\n settings.save_global_setting(settings.HOST_DRIVER_SETTING, drivername)\n paramDict = dict()\n for param in parameters:\n (key, _, value) = param.partition('=')\n paramDict[key] = value\n if 'perf' in paramDict and paramDict['perf'] == 'true':\n performance = True\n if 'func' in paramDict and paramDict['func'] == 'true':\n functional = True\n if 'nic_type' not in paramDict:\n paramDict['nic_type'] = 'any'\n settings.save_global_setting(\n settings.HOST_NIC_SETTING, paramDict['nic_type'])\n if performance:\n settings.save_global_setting(settings.PERF_SETTING, 'yes')\n else:\n settings.save_global_setting(settings.PERF_SETTING, 'no')\n if functional:\n settings.save_global_setting(settings.FUNC_SETTING, 'yes')\n else:\n settings.save_global_setting(settings.FUNC_SETTING, 'no')\n\n\ndef spdk_parse_config(config, section):\n \"\"\"\n Parse execution file configuration.\n \"\"\"\n duts = [dut_.strip() for dut_ in config.get(section,\n 'crbs').split(',')]\n targets = [target.strip()\n for target in config.get(section, 'targets').split(',')]\n test_suites = [suite.strip()\n for suite in config.get(section, 'test_suites').split(',')]\n for suite in test_suites:\n if suite == '':\n test_suites.remove(suite)\n return duts, targets, test_suites\n\n\ndef get_project_obj(project_name, super_class, crbInst, serializer):\n \"\"\"\n Load project module and return crb instance.\n \"\"\"\n project_obj = None\n PROJECT_MODULE_PREFIX = 'project_'\n try:\n project_module = __import__(PROJECT_MODULE_PREFIX + project_name)\n for project_subclassname, project_subclass in get_subclasses(\n project_module, super_class):\n project_obj = project_subclass(crbInst, serializer)\n if project_obj is None:\n project_obj = super_class(crbInst, serializer)\n except Exception as e:\n log_handler.info(\"LOAD PROJECT MODULE INFO: \" + str(e))\n project_obj = super_class(crbInst, serializer)\n return project_obj\n\n\ndef spdk_log_testsuite(duts, tester, suite_obj, log_handler, test_classname):\n \"\"\"\n Change SUITE self logger handler.\n \"\"\"\n log_handler.config_suite(test_classname, 'spdk')\n tester.logger.config_suite(test_classname, 'tester')\n for dutobj in duts:\n dutobj.logger.config_suite(test_classname, 'dut')\n dutobj.test_classname = test_classname\n\n\ndef spdk_log_execution(duts, tester, log_handler):\n \"\"\"\n Change default logger handler.\n \"\"\"\n log_handler.config_execution('spdk')\n tester.logger.config_execution('tester')\n for dutobj in duts:\n dutobj.logger.config_execution(\n 'dut' + settings.LOG_NAME_SEP + '%s' % dutobj.crb['My IP'])\n\n\ndef spdk_crbs_init(crbInsts, skip_setup, project,\n base_dir, serializer, dpdk_dir):\n \"\"\"\n Create dut/tester instance and initialize them.\n \"\"\"\n duts = []\n serializer.set_serialized_filename(settings.FOLDERS['Output'] +\n '/.%s.cache' % crbInsts[0]['IP'])\n serializer.load_from_file()\n testInst = copy.copy(crbInsts[0])\n testInst['My IP'] = crbInsts[0]['tester IP']\n tester = get_project_obj(project, Tester, testInst, serializer)\n for crbInst in crbInsts:\n dutInst = copy.copy(crbInst)\n dutInst['My IP'] = crbInst['IP']\n dutobj = get_project_obj(project, Dut, dutInst, serializer)\n duts.append(dutobj)\n spdk_log_execution(duts, tester, log_handler)\n tester.duts = duts\n show_speedup_options_messages(skip_setup)\n tester.set_speedup_options(skip_setup)\n nic = settings.load_global_setting(settings.HOST_NIC_SETTING)\n for dutobj in duts:\n dutobj.tester = tester\n dutobj.set_speedup_options(skip_setup)\n dutobj.set_directory(base_dir)\n dutobj.set_path(dpdk_dir)\n return duts, tester\n\n\ndef spdk_crbs_exit(duts, tester):\n \"\"\"\n Call dut and tester exit function after execution finished\n \"\"\"\n for dutobj in duts:\n dutobj.crb_exit()\n tester.crb_exit()\n\n\ndef spdk_run_prerequisties(duts, tester, serializer):\n \"\"\"\n Run spdk prerequisties function.\n \"\"\"\n try:\n tester.prerequisites()\n except Exception as ex:\n log_handler.error(\" PREREQ EXCEPTION \" + traceback.format_exc())\n log_handler.info('CACHE: Discarding cache.')\n settings.report_error(\"TESTER_SETUP_ERR\")\n return False\n try:\n for dutobj in duts:\n dutobj.prerequisites()\n serializer.save_to_file()\n except Exception as ex:\n log_handler.error(\" PREREQ EXCEPTION \" + traceback.format_exc())\n result.add_failed_dut(duts[0], str(ex))\n log_handler.info('CACHE: Discarding cache.')\n settings.report_error(\"DUT_SETUP_ERR\")\n return False\n\n\ndef spdk_run_target(duts, tester, targets, test_suites):\n \"\"\"\n Run each target in execution targets.\n \"\"\"\n for target in targets:\n log_handler.info(\"\\nTARGET \" + target)\n result.target = target\n try:\n drivername = settings.load_global_setting(\n settings.HOST_DRIVER_SETTING)\n if drivername == \"\":\n for dutobj in duts:\n dutobj.set_target(target, bind_dev=False)\n else:\n for dutobj in duts:\n dutobj.set_target(target)\n except AssertionError as ex:\n log_handler.error(\" TARGET ERROR: \" + str(ex))\n settings.report_error(\"SPDK_BUILD_ERR\")\n result.add_failed_target(result.dut, target, str(ex))\n continue\n except Exception as ex:\n settings.report_error(\"GENERIC_ERR\")\n log_handler.error(\" !!! DEBUG IT: \" + traceback.format_exc())\n result.add_failed_target(result.dut, target, str(ex))\n continue\n spdk_run_suite(duts, tester, test_suites, target)\n\n\ndef spdk_run_suite(duts, tester, test_suites, target):\n \"\"\"\n Run each suite in test suite list.\n \"\"\"\n for suite_name in test_suites:\n try:\n result.test_suite = suite_name\n suite_module = __import__('TestSuite_' + suite_name)\n for test_classname, test_class in get_subclasses(\n suite_module, TestCase):\n suite_obj = test_class(duts, tester, target, suite_name)\n result.nic = suite_obj.nic\n spdk_log_testsuite(duts, tester, suite_obj,\n log_handler, test_classname)\n log_handler.info(\"\\nTEST SUITE : \" + test_classname)\n log_handler.info(\"NIC : \" + result.nic)\n if suite_obj.execute_setup_all():\n suite_obj.execute_test_cases()\n suite_obj.execute_tear_downall()\n log_handler.info(\"\\nTEST SUITE ENDED: \" + test_classname)\n spdk_log_execution(duts, tester, log_handler)\n except VerifyFailure:\n settings.report_error(\"SUITE_EXECUTE_ERR\")\n log_handler.error(\" !!! DEBUG IT: \" + traceback.format_exc())\n except KeyboardInterrupt:\n log_handler.error(\" !!! STOPPING SPDK tests\")\n suite_obj.execute_tear_downall()\n break\n except Exception as e:\n settings.report_error(\"GENERIC_ERR\")\n log_handler.error(str(e))\n finally:\n suite_obj.execute_tear_downall()\n\n\ndef run_all(config_file, skip_setup, project,\n suite_dir, base_dir, output_dir, dpdk_dir):\n \"\"\"\n Main process of SPDK tests, it will run all test suites in the config file.\n \"\"\"\n global result\n global log_handler\n # save global variable\n serializer = Serializer()\n # prepare the output folder\n if output_dir == '':\n output_dir = settings.FOLDERS['Output']\n if not os.path.exists(output_dir):\n os.mkdir(output_dir)\n # add python module search path\n sys.path.append(suite_dir)\n sys.path.append(dpdk_dir)\n logger.log_dir = output_dir\n log_handler = getLogger('spdk')\n log_handler.config_execution('spdk')\n # Read config file\n config = ConfigParser.SafeConfigParser()\n load_cfg = config.read(config_file)\n if len(load_cfg) == 0:\n raise ConfigParseException(config_file)\n os.environ[\"TERM\"] = \"dumb\"\n # report objects\n result = Result()\n crbInsts = []\n crbs_conf = CrbsConf()\n crbs = crbs_conf.load_crbs_config()\n # for all Exectuion sections\n for section in config.sections():\n spdk_parse_param(config, section)\n # verify if the delimiter is good if the lists are vertical\n dutIPs, targets, test_suites = spdk_parse_config(config, section)\n for dutIP in dutIPs:\n log_handler.info(\"\\nDUT \" + dutIP)\n # look up in crbs - to find the matching IP\n for dutIP in dutIPs:\n for crb in crbs:\n if crb['IP'] == dutIP:\n crbInsts.append(crb)\n break\n # only run on the dut in known crbs\n if len(crbInsts) == 0:\n cwd = os.path.dirname(os.path.dirname(__file__))\n path1 = cwd + '/framework/execution.cfg'\n path2 = cwd + '/framework/crbs.cfg'\n print \" <Target_IP_Address> is\", dutIP, \"in\", path1\n log_handler.error(\" SKIP UNKNOWN TARGET\")\n if dutIP != '<Target_IP_Address>':\n print \" Please check IP Address information in\", path1, \"and\", path2\n continue\n result.dut = dutIPs[0]\n # init dut, tester crb\n duts, tester = spdk_crbs_init(\n crbInsts, skip_setup, project, base_dir, serializer, dpdk_dir)\n # register exit action\n atexit.register(quit_execution, duts, tester)\n # Run DUT prerequisites\n if spdk_run_prerequisties(duts, tester, serializer) is False:\n spdk_crbs_exit(duts, tester)\n continue\n spdk_run_target(duts, tester, targets, test_suites)\n spdk_crbs_exit(duts, tester)\n\n\ndef show_speedup_options_messages(skip_setup):\n \"\"\"\n Skip NIC and spdk setup.\n \"\"\"\n if skip_setup:\n log_handler.info('SKIP: Skipping SPDK setup.')\n else:\n log_handler.info('The SPDK setup steps will be executed.')\n\n\ndef quit_execution(duts, tester):\n \"\"\"\n Close session to DUT and tester before quit.\n Return exit status when failure occurred.\n \"\"\"\n # close all nics\n for dutobj in duts:\n if getattr(dutobj, 'ports_info', None) and dutobj.ports_info:\n for port_info in dutobj.ports_info:\n netdev = port_info['port']\n netdev.close()\n # close all session\n dutobj.close()\n if tester is not None:\n tester.close()\n log_handler.info(\"SPDK tests ended\")\n # return value\n settings.exit_error()\n",
"id": "3026272",
"language": "Python",
"matching_score": 3.696775436401367,
"max_stars_count": 3,
"path": "framework/test_suite.py"
},
{
"content": "# BSD LICENSE\n#\n# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in\n# the documentation and/or other materials provided with the\n# distribution.\n# * Neither the name of Intel Corporation nor the names of its\n# contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\"\"\"\nGeneric port and crbs configuration file load function.\n\"\"\"\n\nimport ConfigParser\nfrom exception import ConfigParseException\n\nCRBCONF = \"crbs.cfg\"\n\n\nclass UserConf():\n\n def __init__(self, config):\n self.conf = ConfigParser.SafeConfigParser()\n load_files = self.conf.read(config)\n if load_files == []:\n print \"FAILED LOADING %s!!!\" % config\n self.conf = None\n raise ConfigParseException(config)\n\n def get_sections(self):\n if self.conf is None:\n return None\n return self.conf.sections()\n\n def load_section(self, section):\n if self.conf is None:\n return None\n items = None\n for conf_sect in self.conf.sections():\n if conf_sect == section:\n items = self.conf.items(section)\n return items\n\n\nclass CrbsConf(UserConf):\n\n DEF_CRB = {\n 'IP': '',\n 'user': '',\n 'pass': '',\n 'tester IP': '',\n 'tester pass': ''}\n\n def __init__(self, crbs_conf=CRBCONF):\n self.config_file = crbs_conf\n self.crbs_cfg = []\n try:\n self.crbs_conf = UserConf(self.config_file)\n except ConfigParseException:\n self.crbs_conf = None\n raise ConfigParseException\n\n def load_crbs_config(self):\n sections = self.crbs_conf.get_sections()\n if not sections:\n return self.crbs_cfg\n for name in sections:\n crb = self.DEF_CRB.copy()\n crb_confs = self.crbs_conf.load_section(name)\n if not crb_confs:\n continue\n for conf in crb_confs:\n key, value = conf\n if key == 'dut_ip':\n crb['IP'] = value\n elif key == 'dut_user':\n crb['user'] = value\n elif key == 'dut_passwd':\n crb['pass'] = value\n elif key == 'tester_ip':\n crb['tester IP'] = value\n elif key == 'tester_passwd':\n crb['tester pass'] = value\n self.crbs_cfg.append(crb)\n return self.crbs_cfg\n",
"id": "11295076",
"language": "Python",
"matching_score": 1.154953122138977,
"max_stars_count": 3,
"path": "framework/config.py"
},
{
"content": "# BSD LICENSE\n#\n# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in\n# the documentation and/or other materials provided with the\n# distribution.\n# * Neither the name of Intel Corporation nor the names of its\n# contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport pickle\n\n\nclass Singleton(type):\n\n _instances = {}\n\n def __call__(self, *args, **kwargs):\n if self not in self._instances:\n self._instances[self] = \\\n super(Singleton, self).__call__(*args, **kwargs)\n return self._instances[self]\n\n\nclass Serializer(object):\n\n __metaclass__ = Singleton\n\n def __init__(self):\n self.volatile_cache = {}\n self.filename = 'serializer.cache'\n\n def save(self, object_name, object_to_save):\n self.volatile_cache[object_name] = object_to_save\n\n def set_serialized_filename(self, filename):\n self.filename = filename\n\n def save_to_file(self):\n try:\n serialized_file = open(self.filename, 'w')\n pickle.dump(self.volatile_cache, serialized_file)\n serialized_file.close()\n return True\n except:\n return False\n\n def load_from_file(self):\n try:\n serialized_file = open(self.filename, 'r')\n self.volatile_cache = pickle.load(serialized_file)\n serialized_file.close()\n return True\n except:\n self.volatile_cache.clear()\n return False\n",
"id": "3240705",
"language": "Python",
"matching_score": 0.7251415252685547,
"max_stars_count": 3,
"path": "framework/serializer.py"
},
{
"content": "#!/usr/bin/python\n\nimport argparse\nimport test_suite\n\nparser = argparse.ArgumentParser(description=\"SPDK Nightly Test Framework\")\n\nparser.add_argument('--config-file',\n default='execution.cfg',\n help='set target ip and nic config and test suites')\n\nparser.add_argument('--output',\n default='../output',\n help='save log and result in output directory')\n\nparser.add_argument('-s', '--skip-setup',\n action='store_true',\n help='skip config nic and compile spdk')\n\nparser.add_argument('-p', '--project',\n default='spdk',\n help='add spdk as test project.')\n\nparser.add_argument('--suite-dir',\n default='../tests',\n help='add tests directory to import test suites')\n\nparser.add_argument('--dpdk-dir',\n default='~/spdk/dpdk',\n help='Configure spdk where dpdk packages is added')\n\nparser.add_argument('-d', '--dir',\n default='~/spdk',\n help='Output directory where spdk package is extracted')\n\nargs = parser.parse_args()\n\ntest_suite.run_all(\n args.config_file,\n args.skip_setup,\n args.project,\n args.suite_dir,\n args.dir,\n args.output,\n args.dpdk_dir)\n",
"id": "4106609",
"language": "Python",
"matching_score": 0.38553938269615173,
"max_stars_count": 3,
"path": "framework/main.py"
},
{
"content": "# BSD LICENSE\n#\n# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in\n# the documentation and/or other materials provided with the\n# distribution.\n# * Neither the name of Intel Corporation nor the names of its\n# contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\"\"\"\nGeneric result container and reporters\n\"\"\"\n\n\nclass Result(object):\n\n def __init__(self):\n self.__dut = 0\n self.__target = 0\n self.__test_suite = 0\n self.__test_case = 0\n self.__test_result = None\n self.__message = None\n self.__internals = []\n self.__failed_duts = {}\n self.__failed_targets = {}\n\n def __set_dut(self, dut):\n if dut not in self.__internals:\n self.__internals.append(dut)\n self.__internals.append([])\n self.__dut = self.__internals.index(dut)\n\n def __get_dut(self):\n return self.__internals[self.__dut]\n\n def __current_targets(self):\n return self.internals[self.__dut + 1]\n\n def __set_target(self, target):\n targets = self.__current_targets()\n if target not in targets:\n targets.append(target)\n targets.append('_nic_')\n targets.append([])\n self.__target = targets.index(target)\n\n def __get_target(self):\n return self.__current_targets()[self.__target]\n\n def __set_nic(self, nic):\n targets = self.__current_targets()\n targets[self.__target + 1] = nic\n\n def __get_nic(self):\n targets = self.__current_targets()\n return targets[self.__target + 1]\n\n def __current_suites(self):\n return self.__current_targets()[self.__target + 2]\n\n def __set_test_suite(self, test_suite):\n suites = self.__current_suites()\n if test_suite not in suites:\n suites.append(test_suite)\n suites.append([])\n self.__test_suite = suites.index(test_suite)\n\n def __get_test_suite(self):\n return self.__current_suites()[self.__test_suite]\n\n def __current_cases(self):\n return self.__current_suites()[self.__test_suite + 1]\n\n def __set_test_case(self, test_case):\n cases = self.__current_cases()\n cases.append(test_case)\n cases.append([])\n self.__test_case = cases.index(test_case)\n\n def __get_test_case(self):\n return self.__current_cases()[self.__test_case]\n\n def __get_test_result(self):\n return self.__test_result\n\n def __get_message(self):\n return self.__message\n\n def __get_internals(self):\n return self.__internals\n\n def __current_result(self):\n return self.__current_cases()[self.__test_case + 1]\n\n def __set_test_case_result(self, result, message):\n test_case = self.__current_result()\n test_case.append(result)\n test_case.append(message)\n self.__test_result = result\n self.__message = message\n\n def copy_suite(self, suite_result):\n self.__current_suites()[self.__test_suite +\n 1] = suite_result.__current_cases()\n\n def test_case_passed(self):\n \"\"\"\n Set last test case added as PASSED\n \"\"\"\n self.__set_test_case_result(result='PASSED', message='')\n\n def test_case_skip(self, message):\n \"\"\"\n set last test case add as N/A\n \"\"\"\n self.__set_test_case_result(result='N/A', message=message)\n\n def test_case_failed(self, message):\n \"\"\"\n Set last test case added as FAILED\n \"\"\"\n self.__set_test_case_result(result='FAILED', message=message)\n\n def test_case_blocked(self, message):\n \"\"\"\n Set last test case added as BLOCKED\n \"\"\"\n self.__set_test_case_result(result='BLOCKED', message=message)\n\n def all_duts(self):\n \"\"\"\n Returns all the DUTs it's aware of.\n \"\"\"\n return self.__internals[::2]\n\n def all_targets(self, dut):\n \"\"\"\n Returns the targets for a given DUT\n \"\"\"\n try:\n dut_idx = self.__internals.index(dut)\n except:\n return None\n return self.__internals[dut_idx + 1][::3]\n\n def current_nic(self, dut, target):\n \"\"\"\n Returns the NIC for a given DUT and target\n \"\"\"\n try:\n dut_idx = self.__internals.index(dut)\n target_idx = self.__internals[dut_idx + 1].index(target)\n except:\n return None\n return self.__internals[dut_idx + 1][target_idx + 1]\n\n def all_test_suites(self, dut, target):\n \"\"\"\n Returns all the test suites for a given DUT and target.\n \"\"\"\n try:\n dut_idx = self.__internals.index(dut)\n target_idx = self.__internals[dut_idx + 1].index(target)\n except:\n return None\n return self.__internals[dut_idx + 1][target_idx + 2][::2]\n\n def all_test_cases(self, dut, target, suite):\n \"\"\"\n Returns all the test cases for a given DUT, target and test case.\n \"\"\"\n try:\n dut_idx = self.__internals.index(dut)\n target_idx = self.__internals[dut_idx + 1].index(target)\n suite_idx = self.__internals[dut_idx + 1][\n target_idx + 2].index(suite)\n except:\n return None\n return self.__internals[\n dut_idx + 1][target_idx + 2][suite_idx + 1][::2]\n\n def result_for(self, dut, target, suite, case):\n \"\"\"\n Returns the test case result/message for a given DUT, target, test\n suite and test case.\n \"\"\"\n try:\n dut_idx = self.__internals.index(dut)\n target_idx = self.__internals[dut_idx + 1].index(target)\n suite_idx = self.__internals[dut_idx + 1][\n target_idx + 2].index(suite)\n case_idx = self.__internals[dut_idx + 1][target_idx +\n 2][suite_idx + 1].index(case)\n except:\n return None\n return self.__internals[\n dut_idx + 1][target_idx + 2][suite_idx + 1][case_idx + 1]\n\n def add_failed_dut(self, dut, msg):\n \"\"\"\n Sets the given DUT as failing due to msg\n \"\"\"\n self.__failed_duts[dut] = msg\n\n def is_dut_failed(self, dut):\n \"\"\"\n True if the given DUT was marked as failing\n \"\"\"\n return dut in self.__failed_duts\n\n def dut_failed_msg(self, dut):\n \"\"\"\n Returns the reason of failure for a given DUT\n \"\"\"\n return self.__failed_duts[dut]\n\n def add_failed_target(self, dut, target, msg):\n \"\"\"\n Sets the given DUT, target as failing due to msg\n \"\"\"\n self.__failed_targets[dut + target] = msg\n\n def is_target_failed(self, dut, target):\n \"\"\"\n True if the given DUT,target were marked as failing\n \"\"\"\n return (dut + target) in self.__failed_targets\n\n def target_failed_msg(self, dut, target):\n \"\"\"\n Returns the reason of failure for a given DUT,target\n \"\"\"\n return self.__failed_targets[dut + target]\n\n dut = property(__get_dut, __set_dut)\n target = property(__get_target, __set_target)\n test_suite = property(__get_test_suite, __set_test_suite)\n test_case = property(__get_test_case, __set_test_case)\n test_result = property(__get_test_result)\n message = property(__get_message)\n nic = property(__get_nic, __set_nic)\n internals = property(__get_internals)\n\n\nclass ResultTable(object):\n\n def __init__(self, header):\n \"\"\"\n Add the title of result table.\n Usage:\n rt = ResultTable(header)\n rt.add_row(row)\n rt.table_print()\n \"\"\"\n self.results_table_rows = []\n self.results_table_rows.append([])\n self.results_table_header = header\n self.logger = None\n\n def set_logger(self, logger):\n self.logger = logger\n\n def add_row(self, row):\n \"\"\"\n Add one row to result table.\n \"\"\"\n self.results_table_rows.append(row)\n\n def table_print(self):\n \"\"\"\n Show off result table.\n \"\"\"\n self.table.add_rows(self.results_table_rows)\n self.table.header(self.results_table_header)\n alignments = []\n for _ in self.results_table_header:\n alignments.append(\"l\")\n self.table.set_cols_align(alignments)\n out = self.table.draw()\n if self.logger:\n self.logger.info('\\n' + out)\n",
"id": "3487692",
"language": "Python",
"matching_score": 2.645352602005005,
"max_stars_count": 3,
"path": "framework/test_result.py"
},
{
"content": "# BSD LICENSE\n#\n# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n#\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in\n# the documentation and/or other materials provided with the\n# distribution.\n# * Neither the name of Intel Corporation nor the names of its\n# contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\"\"\"\nA base class for creating DTF test cases.\n\"\"\"\nimport os\nimport re\nimport sys\nimport traceback\nfrom exception import VerifyFailure, TimeoutException\nfrom settings import DRIVERS, NICS, get_nic_name, load_global_setting\nfrom settings import PERF_SETTING, FUNC_SETTING, DEBUG_SETTING, DEBUG_CASE_SETTING, HOST_DRIVER_SETTING\nfrom excel_reporter import ExcelReporter\nfrom test_result import ResultTable, Result\nfrom logger import getLogger\nexcel_report = None\nis_backend = [\n \"iscsi_rxtxqueue\",\n \"iscsi_multiconnection\",\n \"nvmf_rxtxqueue\",\n \"nvmf_multiconnection\"]\n\n\nclass TestCase(object):\n\n def __init__(self, duts, tester, target, suitename):\n self.suite_name = suitename\n self.dut = duts[0]\n self.duts = duts\n self.tester = tester\n self.target = target\n class_name = self.__class__.__name__\n self.logger = getLogger(class_name)\n self.logger.config_suite(class_name)\n self._requested_tests = None\n self.nics = []\n drivername = []\n execution_path = os.path.dirname(os.path.dirname(__file__))\n execution_file = execution_path + '/framework/execution.cfg'\n execution = open(execution_file, 'r')\n status = re.findall(r\"\\n+parameters=nic_type=(.*)\", execution.read())\n status_nic = status[0].split(\":\")\n self.nic = status_nic[0]\n self.kdriver = self._get_nic_driver(self.nic)\n self._suite_result = Result()\n self._suite_result.dut = self.dut.crb['IP']\n self._suite_result.target = target\n self._suite_result.nic = self.nic\n self._suite_result.test_suite = self.suite_name\n if self._suite_result is None:\n raise ValueError(\"Result object should not None\")\n if load_global_setting(PERF_SETTING) == \"yes\":\n self._enable_perf = True\n else:\n self._enable_perf = False\n if load_global_setting(FUNC_SETTING) == \"yes\":\n self._enable_func = True\n else:\n self._enable_func = False\n if load_global_setting(DEBUG_SETTING) == \"yes\":\n self._enable_debug = True\n else:\n self._enable_debug = False\n if load_global_setting(DEBUG_CASE_SETTING) == \"yes\":\n self._debug_case = True\n else:\n self._debug_case = False\n self.drivername = load_global_setting(HOST_DRIVER_SETTING)\n\n def verify(self, passed, description):\n if not passed:\n raise VerifyFailure(description)\n\n def _get_nic_driver(self, nic_name):\n if nic_name in DRIVERS.keys():\n return DRIVERS[nic_name]\n return \"Unknown\"\n\n def result_table_create(self, header):\n self._result_table = ResultTable(header)\n self._result_table.set_logger(self.logger)\n\n def result_table_add(self, row):\n self._result_table.add_row(row)\n\n def result_table_print(self):\n self._result_table.table_print()\n\n def result_table_getrows(self):\n return self._result_table.results_table_rows\n\n def _get_functional_cases(self):\n \"\"\"\n Get all functional test cases.\n \"\"\"\n return self._get_test_cases(r'test_(?!perf_)')\n\n def _get_performance_cases(self):\n \"\"\"\n Get all performance test cases.\n \"\"\"\n return self._get_test_cases(r'test_perf_')\n\n def _has_it_been_requested(self, test_case, test_name_regex):\n \"\"\"\n Check whether test case has been requested for validation.\n \"\"\"\n name_matches = re.match(test_name_regex, test_case.__name__)\n if self._requested_tests is not None:\n return name_matches and test_case.__name__ in self._requested_tests\n return name_matches\n\n def set_requested_cases(self, case_list):\n \"\"\"\n Pass down input cases list for check\n \"\"\"\n self._requested_tests = case_list\n\n def _get_test_cases(self, test_name_regex):\n \"\"\"\n Return case list which name matched regex.\n \"\"\"\n for test_case_name in dir(self):\n test_case = getattr(self, test_case_name)\n if callable(test_case) and self._has_it_been_requested(\n test_case, test_name_regex):\n yield test_case\n\n def execute_setup_all(self):\n \"\"\"\n Execute suite setup_all function before cases.\n \"\"\"\n for dutobj in self.duts:\n dutobj.get_session_output(timeout=0.1)\n self.tester.get_session_output(timeout=0.1)\n try:\n self.set_up_all()\n return True\n except Exception:\n self.logger.error('set_up_all failed:\\n' + traceback.format_exc())\n if self._enable_func:\n i = 0\n for case_obj in self._get_functional_cases():\n case_name = case_obj.__name__\n if self._suite_result.test_suite in is_backend:\n self._suite_result.test_case = []\n if re.findall(r'test_a_', case_name):\n out = re.findall(r'(.*)', case_name)\n self._suite_result.test_case.append(out[i])\n i = i + 1\n else:\n return True\n else:\n self._suite_result.test_case = case_obj.__name__\n self._suite_result.test_case_blocked('set_up_all failed')\n if self._enable_perf:\n for case_obj in self._get_performance_cases():\n self._suite_result.test_case = case_obj.__name__\n self._suite_result.test_case_blocked('set_up_all failed')\n return False\n\n def _execute_test_case(self, case_obj):\n \"\"\"\n Execute specified test case in specified suite. If any exception occured in\n validation process, save the result and tear down this case.\n \"\"\"\n global excel_report\n case_name = case_obj.__name__\n if self._suite_result.test_suite in is_backend:\n self._suite_result.test_case = []\n i = 0\n if re.findall(r'test_a_', case_name):\n out = re.findall(r'(.*)', case_name)\n self._suite_result.test_case.append(out[i])\n i = i + 1\n else:\n return True\n else:\n self._suite_result.test_case = case_obj.__name__\n excel_report = ExcelReporter(\n '../output/test_results_%s.xls' %\n self._suite_result.test_suite)\n try:\n self.logger.info('Test Case %s Begin' % case_name)\n self.running_case = case_name\n # clean session\n for dutobj in self.duts:\n dutobj.get_session_output(timeout=0.1)\n self.tester.get_session_output(timeout=0.1)\n # run set_up function for each case\n self.set_up()\n case_obj()\n self._suite_result.test_case_passed()\n excel_report.save(self._suite_result)\n self.logger.info('Test Case %s Result PASSED:' % case_name)\n except VerifyFailure as v:\n self._suite_result.test_case_failed(str(v))\n excel_report.save(self._suite_result)\n self.logger.error('Test Case %s Result FAILED: ' %\n (case_name) + str(v))\n except KeyboardInterrupt:\n self._suite_result.test_case_blocked(\"Skipped\")\n excel_report.save(self._suite_result)\n self.logger.error('Test Case %s SKIPED: ' % (case_name))\n self.tear_down()\n raise KeyboardInterrupt(\"Stop SPDK\")\n except TimeoutException as e:\n msg = str(e)\n self._suite_result.test_case_failed(msg)\n excel_report.save(self._suite_result)\n self.logger.error('Test Case %s Result FAILED: ' %\n (case_name) + msg)\n self.logger.error('%s' % (e.get_output()))\n except Exception:\n trace = traceback.format_exc()\n self._suite_result.test_case_failed(trace)\n excel_report.save(self._suite_result)\n self.logger.error('Test Case %s Result ERROR: ' %\n (case_name) + trace)\n finally:\n self.tear_down()\n\n def execute_test_cases(self):\n \"\"\"\n Execute all test cases in one suite.\n \"\"\"\n if load_global_setting(FUNC_SETTING) == 'yes':\n for case_obj in self._get_functional_cases():\n self._execute_test_case(case_obj)\n if load_global_setting(PERF_SETTING) == 'yes':\n for case_obj in self._get_performance_cases():\n self._execute_test_case(case_obj)\n\n def get_result(self):\n return self._suite_result\n\n def execute_tear_downall(self):\n pass\n",
"id": "3363049",
"language": "Python",
"matching_score": 1.8899234533309937,
"max_stars_count": 3,
"path": "framework/test_case.py"
},
{
"content": "import time\nimport pexpect\nfrom pexpect import pxssh\nfrom exception import TimeoutException, SSHConnectionException, SSHSessionDeadException\nfrom logger import RED, GREEN\n\n\"\"\"\nModule handle ssh sessions between tester and DUT.\n\"\"\"\n\n\nclass SSHPexpect(object):\n\n def __init__(self, host, username, password):\n self.magic_prompt = \"MAGIC PROMPT\"\n try:\n self.session = pxssh.pxssh()\n self.host = host\n self.username = username\n self.password = password\n if ':' in host:\n self.ip = host.split(':')[0]\n self.port = int(host.split(':')[1])\n self.session.login(self.ip, self.username,\n self.password, original_prompt='[$#>]',\n port=self.port, login_timeout=20)\n else:\n self.session.login(self.host, self.username,\n self.password, original_prompt='[$#>]')\n self.send_expect('stty -echo', '# ', timeout=2)\n except Exception as e:\n print RED(e)\n if getattr(self, 'port', None):\n suggestion = \"\\nSuggession: Check if the fireware on [ %s ] \" % \\\n self.ip + \"is stoped\\n\"\n print GREEN(suggestion)\n raise SSHConnectionException(host)\n\n def init_log(self, logger, name):\n self.logger = logger\n self.logger.info(\"ssh %s@%s\" % (self.username, self.host))\n\n def send_expect_base(self, command, expected, timeout):\n self.clean_session()\n self.session.PROMPT = expected\n self.__sendline(command)\n self.__prompt(command, timeout)\n before = self.get_output_before()\n return before\n\n def send_expect(self, command, expected, timeout=15, verify=False):\n ret = self.send_expect_base(command, expected, timeout)\n if verify:\n ret_status = self.send_expect_base(\"echo $?\", expected, timeout)\n if not int(ret_status):\n return ret\n else:\n self.logger.error(\"Command: %s failure!\" % command)\n self.logger.error(ret)\n return int(ret_status)\n else:\n return ret\n\n def send_command(self, command, timeout=1):\n self.clean_session()\n self.__sendline(command)\n return self.get_session_before(timeout)\n\n def clean_session(self):\n self.get_session_before(timeout=0.01)\n\n def get_session_before(self, timeout=15):\n \"\"\"\n Get all output before timeout\n \"\"\"\n self.session.PROMPT = self.magic_prompt\n try:\n self.session.prompt(timeout)\n except Exception as e:\n pass\n before = self.get_output_before()\n self.__flush()\n return before\n\n def __flush(self):\n \"\"\"\n Clear all session buffer\n \"\"\"\n self.session.buffer = \"\"\n self.session.before = \"\"\n\n def __prompt(self, command, timeout):\n if not self.session.prompt(timeout):\n raise TimeoutException(command, self.get_output_all())\n\n def __sendline(self, command):\n if not self.isalive():\n raise SSHSessionDeadException(self.host)\n if len(command) == 2 and command.startswith('^'):\n self.session.sendcontrol(command[1])\n else:\n self.session.sendline(command)\n\n def get_output_before(self):\n if not self.isalive():\n raise SSHSessionDeadException(self.host)\n self.session.flush()\n before = self.session.before.rsplit('\\r\\n', 1)\n if before[0] == \"[PEXPECT]\":\n before[0] = \"\"\n\n return before[0]\n\n def get_output_all(self):\n self.session.flush()\n output = self.session.before\n output.replace(\"[PEXPECT]\", \"\")\n return output\n\n def close(self, force=False):\n if force is True:\n self.session.close()\n else:\n if self.isalive():\n self.session.logout()\n\n def isalive(self):\n return self.session.isalive()\n\n def copy_file_to(self, src, dst=\"~/\", password=''):\n \"\"\"\n Sends a local file to a remote place.\n \"\"\"\n command = 'scp {0} {1}@{2}:{3}'.format(\n src, self.username, self.host, dst)\n if ':' in self.host:\n command = 'scp -v -P {0} -o NoHostAuthenticationForLocalhost=yes {1} {2}@{3}:{4}'.format(\n str(self.port), src, self.username, self.ip, dst)\n else:\n command = 'scp -v {0} {1}@{2}:{3}'.format(\n src, self.username, self.host, dst)\n if password == '':\n self._spawn_scp(command, self.password)\n else:\n self._spawn_scp(command, password)\n\n def _spawn_scp(self, scp_cmd, password):\n self.logger.info(scp_cmd)\n p = pexpect.spawn(scp_cmd)\n time.sleep(0.5)\n ssh_newkey = 'Are you sure you want to continue connecting'\n i = p.expect([ssh_newkey, '[pP]assword', \"# \", pexpect.EOF,\n pexpect.TIMEOUT], 120)\n if i == 0:\n p.sendline('yes')\n i = p.expect([ssh_newkey, '[pP]assword', pexpect.EOF], 2)\n if i == 1:\n time.sleep(0.5)\n p.sendline(password)\n p.expect(\"Exit status 0\", 60)\n if i == 4:\n self.logger.error(\"SCP TIMEOUT error %d\" % i)\n p.close()\n",
"id": "5748946",
"language": "Python",
"matching_score": 3.55816912651062,
"max_stars_count": 3,
"path": "framework/ssh_pexpect.py"
},
{
"content": "\"\"\"\nUser-defined exceptions used across the framework.\n\"\"\"\n\n\nclass TimeoutException(Exception):\n\n \"\"\"\n Command execution timeout.\n \"\"\"\n\n def __init__(self, command, output):\n self.command = command\n self.output = output\n\n def __str__(self):\n msg = 'TIMEOUT on %s' % (self.command)\n return msg\n\n def get_output(self):\n return self.output\n\n\nclass VerifyFailure(Exception):\n\n \"\"\"\n To be used within the test cases to verify if a command output\n is as it was expected.\n \"\"\"\n\n def __init__(self, value):\n self.value = value\n\n def __str__(self):\n return repr(self.value)\n\n\nclass SSHConnectionException(Exception):\n\n \"\"\"\n SSH connection error.\n \"\"\"\n\n def __init__(self, host):\n self.host = host\n\n def __str__(self):\n return 'Error trying to connect with %s' % self.host\n\n\nclass SSHSessionDeadException(Exception):\n\n \"\"\"\n SSH session is not alive.\n It can no longer be used.\n \"\"\"\n\n def __init__(self, host):\n self.host = host\n\n def __str__(self):\n return 'SSH session with %s has been dead' % self.host\n\n\nclass ConfigParseException(Exception):\n\n \"\"\"\n Configuration file parse failure exception.\n \"\"\"\n\n def __init__(self, conf_file):\n self.config = conf_file\n\n def __str__(self):\n return \"Faile to parse config file [%s]\" % (self.config)\n",
"id": "1090496",
"language": "Python",
"matching_score": 1.988037109375,
"max_stars_count": 3,
"path": "framework/exception.py"
}
] | 2.782286 |
maxwellharon | [
{
"content": "VERSION = (1, 5, 24)\n\ndefault_app_config = 'image.apps.ImageConfig'\n",
"id": "7475688",
"language": "Python",
"matching_score": 1.6396539211273193,
"max_stars_count": 0,
"path": "virtual/lib/python3.6/site-packages/image/__init__.py"
},
{
"content": "from django.apps import AppConfig\n\n\nclass MaxgramConfig(AppConfig):\n name = 'maxgram'\n",
"id": "8519186",
"language": "Python",
"matching_score": 0.06513911485671997,
"max_stars_count": 0,
"path": "maxgram/apps.py"
},
{
"content": "from flask import render_template,request,redirect,url_for,abort\nfrom . import main\nfrom ..models import Category, Pitch, Comment\nfrom .forms import CategoryForm,PitchForm,CommentForm\nfrom flask_login import login_required,current_user\n# from ..models import Category\n\n\n\n# Views\n@main.route('/')\ndef index():\n \"\"\"\n View root page for the application\n :return:\n \"\"\"\n categories = Category.get_categories()\n title = 'Home'\n return render_template('index.html', title=title, categories=categories)\n\n@main.route('/category/new', methods=['GET', 'POST'])\n@login_required\ndef new_category():\n form = CategoryForm()\n if form.validate_on_submit():\n name = form.name.data\n new_category = Category(name=name)\n new_category.save_category()\n return redirect(url_for('.index'))\n title = 'New Pitch Category'\n return render_template('new_category.html', category_form=form)\n\n\n@main.route('/category/<int:id>')\ndef category(id):\n category = Category.query.get(id)\n pitch = Pitch.query.filter_by(category_id=id)\n\n title = f'{category.name} page'\n\n return render_template('category.html',title=title, category=category,pitch=pitch)\n\n@main.route('/category/pitch/new/<int:id>', methods=['GET', 'POST'])\n\n@login_required\ndef new_pitch(id):\n category = Category.query.get(id)\n form = PitchForm()\n if form.validate_on_submit():\n pitch = form.pitch.data\n new_pitch = Pitch(pitch=pitch, user=current_user, category_id=id)\n new_pitch.save_pitch()\n return redirect(url_for('.category', id=id))\n\n title = 'New Pitch'\n return render_template('new_pitch.html', title=title, pitch_form=form)\n\n\n@main.route('/pitch/<int:id>')\ndef pitch(id):\n pitch = Pitch.query.get(id)\n comment = Comment.get_comments(pitch_id=id)\n\n # Comment.query.order_by(Comment.id.desc()).filter_by(pitch_id=id).all()\n\n title = f'Pitch { pitch.id }'\n return render_template('pitch.html',title=title, pitch=pitch, comment=comment)\n\n\n@main.route('/comment/new/<int:id>', methods=['GET', 'POST'])\n@login_required\ndef new_comment(id):\n pitch = Pitch.query.get(id)\n # comment = Comment.query.get(pitch_id)\n\n form = CommentForm()\n if form.validate_on_submit():\n comment = form.comment.data\n new_comment = Comment(comment=comment, user=current_user, pitch_id=id)\n new_comment.save_comment()\n return redirect(url_for('.pitch', id=id))\n # title = f' Comment{comment.id}'\n return render_template('new_comment.html', comment_form=form, pitch=pitch)\n\n\n\n# @main.route('/test/<int:id>')\n# def comment(id):\n# comment = Comment.query.get(id)\n#\n# # title = f'Comment { comments.id }'\n# return render_template('test.html', comment=comment, )\n",
"id": "531193",
"language": "Python",
"matching_score": 1.7645405530929565,
"max_stars_count": 0,
"path": "app/main/views.py"
},
{
"content": "from django.views.generic import TemplateView\nfrom django.contrib.auth.mixins import LoginRequiredMixin\n'''\nThis view function is responsible for when the user logs in and sees the homepage\n'''\n\nclass HomePage(LoginRequiredMixin, TemplateView):\n template_name = 'index.html'\n",
"id": "286788",
"language": "Python",
"matching_score": 1.4993656873703003,
"max_stars_count": 0,
"path": "ujirani/views.py"
},
{
"content": "from django.conf.urls import url\nfrom django.contrib.auth import views as auth_views\nfrom . import views\n\napp_name = 'accounts'\nurlpatterns = [\n url(r'login/$', auth_views.LoginView.as_view(template_name='accounts/login.html'), name='login'),\n url(r'logout/$', auth_views.LogoutView.as_view(), name='logout'),\n url(r'signup/$', views.SignUp.as_view(), name='signup')\n]\n",
"id": "3734931",
"language": "Python",
"matching_score": 1.5677542686462402,
"max_stars_count": 2,
"path": "accounts/urls.py"
},
{
"content": "from django.conf.urls import url\nfrom . import views\nfrom django.conf.urls.static import static\n\napp_name = 'hoodwatch'\n\nurlpatterns = [\n url(r'^$', views.ListHoodwatchs.as_view(), name='all'),\n url(r'^new/$', views.CreateHoodwatch.as_view(), name='create'),\n url(r'businesses/in/(?P<slug>[-\\w]+)/$', views.SingleHoodwatch.as_view(), name='single'),\n url(r'join/(?P<slug>[-\\w]+)/$', views.JoinHoodwatch.as_view(), name='join'),\n url(r'leave/(?P<slug>[-\\w]+)/$', views.LeaveHoodwatch.as_view(), name='leave'),\n]\n",
"id": "2076394",
"language": "Python",
"matching_score": 1.7115588188171387,
"max_stars_count": 0,
"path": "hoodwatch/urls.py"
},
{
"content": "from django.contrib import messages\nfrom django.db import IntegrityError\nfrom django.shortcuts import get_object_or_404\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.core.urlresolvers import reverse\nfrom django.views import generic\nfrom hoodwatch.models import Hoodwatch, HoodwatchMember\n\n# Create your views here.\n\n\nclass CreateHoodwatch(LoginRequiredMixin, generic.CreateView):\n fields = ('name', 'location')\n model = Hoodwatch\n\n\nclass SingleHoodwatch(generic.DetailView):\n model = Hoodwatch\n\n\nclass ListHoodwatchs(generic.ListView):\n model = Hoodwatch\n\n'''\n This view function will enable new users join a given hoodwatch\n'''\nclass JoinHoodwatch(LoginRequiredMixin, generic.RedirectView):\n def get_redirect_url(self, *args, **kwargs):\n return reverse('hoodwatch:single', kwargs={'slug': self.kwargs.get('slug')})\n\n def get(self, request, *args, **kwargs):\n hoodwatch = get_object_or_404(Hoodwatch, slug=self.kwargs.get('slug'))\n\n try:\n HoodwatchMember.objects.create(user=self.request.user, hoodwatch=hoodwatch)\n except IntegrityError:\n messages.warning(self.request, ' You are already a member!')\n else:\n messages.success(self.request, 'Welcome to the Ujirani Community :-)')\n\n return super().get(request, *args, **kwargs)\n # https://rhettinger.wordpress.com/2011/05/26/super-considered-super/\n\n'''\n This view function will eneble users to exit a hoodwatch\n'''\nclass LeaveHoodwatch(LoginRequiredMixin, generic.RedirectView):\n def get_redirect_url(self, *args, **kwargs):\n return reverse('hoodwatch:single', kwargs={'slug': self.kwargs.get('slug')})\n\n def get(self, request, *args, **kwargs):\n\n try:\n membership = HoodwatchMember.objects.filter(user=self.request.user, hoodwatch__slug=self.kwargs.get('slug')).get()\n except HoodwatchMember.DoesNotExist:\n messages.warning(self.request, ' You are already a resident here :-)')\n else:\n membership.delete()\n messages.success(self.request, 'You are part of Ujirani :-)')\n\n return super().get(request, *args, **kwargs)\n",
"id": "10948026",
"language": "Python",
"matching_score": 3.632744550704956,
"max_stars_count": 0,
"path": "hoodwatch/views.py"
},
{
"content": "from django.db import models\nfrom django.utils.text import slugify\nfrom django import template\nfrom django.contrib.auth import get_user_model\nfrom django.core.urlresolvers import reverse\n\n\n# Create your models here.\nUser = get_user_model()\nreigster = template.Library\n\n\nclass Hoodwatch(models.Model):\n name = models.CharField(max_length=140, unique=True)\n slug = models.SlugField(allow_unicode=True, unique=True)\n location = models.CharField(max_length=140, blank=True, default='')\n occupants = models.ManyToManyField(User, through='HoodwatchMember')\n\n def __str__(self):\n return self.name\n\n def save(self, *args, **kwargs):\n self.slug = slugify(self.name)\n super().save(*args, **kwargs)\n\n def get_absolute_url(self):\n return reverse('hoodwatch:single', kwargs={'slug': self.slug})\n\n class Meta:\n ordering = ['name']\n\n\nclass HoodwatchMember(models.Model):\n hoodwatch = models.ForeignKey(Hoodwatch, related_name='memberships')\n user = models.ForeignKey(User, related_name='user_hoodwatchs')\n\n def __str__(self):\n return self.user.username\n\n class Meta:\n unique_together = ('hoodwatch', 'user')\n",
"id": "2123329",
"language": "Python",
"matching_score": 1.878794550895691,
"max_stars_count": 0,
"path": "hoodwatch/models.py"
},
{
"content": "from django.contrib import admin\nfrom . import models\n\n# Register your models here.\n\n\nclass HoodwatchMember(admin.TabularInline):\n model = models.HoodwatchMember\n\n\nadmin.site.register(models.Hoodwatch)\n",
"id": "9712850",
"language": "Python",
"matching_score": 1.7250932455062866,
"max_stars_count": 0,
"path": "hoodwatch/admin.py"
},
{
"content": "from django.contrib import admin\nfrom .models import Editor, Post, tags, Location\n\n# Register your models here.\n\n\nclass PostAdmin(admin.ModelAdmin):\n filter_horizontal = ('tags',)\n\n\nadmin.site.register(Editor)\nadmin.site.register(Post), PostAdmin\nadmin.site.register(tags)\nadmin.site.register(Location)\n",
"id": "11898565",
"language": "Python",
"matching_score": 1.4670225381851196,
"max_stars_count": 0,
"path": "news/admin.py"
},
{
"content": "from django.test import TestCase\nfrom django.test import TestCase\nfrom .models import Editor,Post,tags\n\n# Create your tests here.\nclass EditorTestClass(TestCase):\n\n # Set up method\n def setUp(self):\n self.james= Editor(first_name = 'James', last_name ='Muriuki', email ='<EMAIL>')\n# Testing instance\n def test_instance(self):\n self.assertTrue(isinstance(self.james,Editor))\n\n\n def test_save_method(self):\n self.james.save_editor()\n editors = Editor.objects.all()\n self.assertTrue(len(editors) > 0)\n\n\nclass PostTestClass(TestCase):\n\n def setUp(self):\n # Creating a new editor and saving it\n self.james= Editor(first_name = 'James', last_name ='Muriuki', email ='<EMAIL>')\n self.james.save_editor()\n\n # Creating a new tag and saving it\n self.new_tag = tags(name = 'testing')\n self.new_tag.save()\n\n self.new_post= Post(title = 'Test Post',post = 'This is a random test Post',editor = self.james)\n self.new_post.save()\n\n self.new_post.tags.add(self.new_tag)\n\n def tearDown(self):\n Editor.objects.all().delete()\n tags.objects.all().delete()\n Post.objects.all().delete()\n\n\n def test_get_news_today(self):\n today_news = Post.todays_news()\n self.assertTrue(len(today_news)>0)\n\n\n def test_get_news_by_date(self):\n test_date = '2017-03-17'\n date = dt.datetime.strptime(test_date, '%Y-%m-%d').date()\n news_by_date = Post.days_news(date)\n self.assertTrue(len(news_by_date) == 0)\n",
"id": "5570402",
"language": "Python",
"matching_score": 1.4732438325881958,
"max_stars_count": 0,
"path": "news/tests.py"
},
{
"content": "from django.shortcuts import render, redirect\nfrom django.http import HttpResponse, Http404\nimport datetime as dt\nfrom .models import Post\n\n# Create your views here.\n\n\ndef welcome(request):\n # return HttpResponse('Welcome to the Moringa Tribune')\n return render(request, 'welcome.html')\n\n\ndef news_today(request):\n date = dt.date.today()\n news = Post.todays_news()\n posts = Post.objects.all()\n return render(request, 'all-news/today-news.html', {\"date\": date, \"news\": news, 'posts': posts})\n\n # FUNCTION TO CONVERT DATE OBJECT TO FIND EXACT DAY\n # day = convert_dates(date)\n\n\ndef past_days_news(request, past_date):\n try:\n # Converts data from the string Url\n date = dt.datetime.strptime(past_date, '%Y-%m-%d').date()\n except ValueError:\n # Raise 404 error when ValueError is thrown\n raise Http404()\n assert False\n\n if date == dt.date.today():\n return redirect(news_today)\n\n news = Post.days_news(date)\n return render(request, 'all-news/past-news.html', {\"date\": date, \"news\": news})\n\n\ndef search_results(request):\n\n if 'post' in request.GET and request.GET[\"post\"]:\n search_term = request.GET.get(\"post\")\n searched_posts = Post.search_by_title(search_term)\n message = f\"{search_term}\"\n\n return render(request, 'all-news/search.html', {\"message\": message, \"posts\": searched_posts})\n\n else:\n message = \"You haven't searched for any term\"\n return render(request, 'all-news/search.html', {\"message\": message})\n\n\ndef post(request, post_id):\n try:\n post = Post.objects.get(id=post_id)\n except DoesNotExist:\n raise Http404()\n return render(request, \"all-news/post.html\", {\"post\": post})\n# def convert_dates(dates):\n# # Function that gets the weekday number for the date.\n# day_number = dt.date.weekday(dates)\n#\n# days = ['Monday','Tuesday','Wednesday','Thursday','Friday','Saturday',\"Sunday\"]\n#\n# # Returning the actual day of the week\n# day = days[day_number]\n# return day\n\n\ndef posts(request):\n posts = Post.objects.all()\n return render(request, \"all-news/postss.html\", {\"posts\": posts})\n",
"id": "276006",
"language": "Python",
"matching_score": 0.6466512084007263,
"max_stars_count": 0,
"path": "news/views.py"
},
{
"content": "#!/home/maxwell/Documents/moringa-school-projects/Personal-Blog/virtual/bin/python3.6\nfrom django.core import management\n\nif __name__ == \"__main__\":\n management.execute_from_command_line()\n",
"id": "10665076",
"language": "Python",
"matching_score": 0,
"max_stars_count": 0,
"path": "virtual/bin/django-admin.py"
},
{
"content": "import os\nimport csv\nimport json\nfrom collections import OrderedDict\n\nprint(\"Welcome to the JSON-CSV Converter.\")\nprint(\"This script will convert a JSON file to CSV or a CSV file to JSON\")\n\n# SELECT AND OPEN A CSV OR JSON FILE\ntry:\n print(\"Which file do you want to convert?\")\n filename = input(\"Filename: \")\n extension = filename.split(\".\")[-1].lower()\n \n f = open(filename)\n\n if extension == \"csv\":\n # load csv file\n data = list(csv.reader(f))\n print(\"CSV file loaded\")\n elif extension == \"json\":\n # load json file\n data = json.load(f,object_pairs_hook=OrderedDict)\n print(\"JSON file loaded\")\n else:\n print(\"unsupported file type ... exiting\")\n exit()\nexcept Exception as e:\n # error loading file\n print(\"Error loading file ... exiting:\",e)\n exit()\nelse:\n # CONVERT CSV TO JSON\n if extension == \"csv\":\n keys = data[0]\n converted = []\n\n for i in range(1, len(data)):\n obj = OrderedDict()\n for j in range(0,len(keys)):\n if len(data[i][j]) > 0:\n obj[keys[j]] = data[i][j]\n else:\n obj[keys[j]] = None\n converted.append(obj)\n \n # CONVERT JSON TO CSV\n if extension == \"json\":\n\n # get all keys in json objects\n keys = []\n for i in range(0,len(data)):\n for j in data[i]:\n if j not in keys:\n keys.append(j)\n \n # map data in each row to key index\n converted = []\n converted.append(keys)\n\n for i in range(0,len(data)):\n row = []\n for j in range(0,len(keys)):\n if keys[j] in data[i]:\n row.append(data[i][keys[j]])\n else:\n row.append(None)\n converted.append(row)\n\n # CREATE OUTPUT FILE\n converted_file_basename = os.path.basename(filename).split(\".\")[0]\n converted_file_extension = \".json\" if extension == \"csv\" else \".csv\"\n\n if(os.path.isfile(converted_file_basename + converted_file_extension)):\n counter = 1\n while os.path.isfile(converted_file_basename + \" (\" + str(counter) + \")\" + converted_file_extension):\n counter += 1\n converted_file_basename = converted_file_basename + \" (\" + str(counter) + \")\"\n \n try:\n if converted_file_extension == \".json\":\n with open(converted_file_basename + converted_file_extension, 'w') as outfile:\n json.dump(converted, outfile)\n elif converted_file_extension == \".csv\":\n with open(converted_file_basename + converted_file_extension, 'w') as outfile:\n writer = csv.writer(outfile)\n writer.writerows(converted)\n except:\n print(\"Error creating file ... exiting\")\n else:\n print(\"File created:\",converted_file_basename + converted_file_extension)\n",
"id": "12522207",
"language": "Python",
"matching_score": 0.16619226336479187,
"max_stars_count": 14,
"path": "json-csv-converter.py"
},
{
"content": "# import unittest\n# from personalblog.models import User\n# from personalblog.models import Post\n#\n#\n#\n#\n\n\n\n\n\n\nimport unittest\nfrom personalblog.models import User\nfrom personalblog.models import Post\n# from personalblog.forms import RegistrationForm\nfrom personalblog import db\n\n# class TestRegistrationModel(unittest.TestCase):\n# def setUp(self):\n# '''\n# Set up method before each test case\n# ` '''\n# self.registration = registration(username = \"Emilly\", email='<EMAIL>' ,password = '<PASSWORD>' , confirm_password = '<PASSWORD>' , submit = 'submit')\n# def test_whether_username_registers(self):\n# self.assertEqual(self.registration.email, '<EMAIL>')\n\n\nclass TestUserModel(unittest.TestCase):\n\n '''\n This is a test class that checks whether our Registration form is working correctly\n '''\n\n def setUp(self):\n '''\n Set up method before each test case\n` '''\n self.user1 = User(username=\"admin\", email=\"<EMAIL>\",password='<PASSWORD>',image_file=\"img/new.jpg\" )\n def test_whether_instantiation_is_correct(self):\n self.assertEqual(self.user1.username, 'admin')\n\n def test_whether_email_works(self):\n self.assertEqual(self.user1.email, '<EMAIL>')\n\n def test_whether_password_works(self):\n self.assertEqual(self.user1.password, '<PASSWORD>')\n\n def test_whether_image_file_registers(self):\n self.assertEqual(self.user1.image_file, 'img/new.jpg')\n\n\n\n # def test_user_is_saved_in_database(self):\n # db.create.all()\n # db.session.add(self.user1)\n # db.session.commit()\n # users = User.query.all()\n # self.assertTrue(length(users)>0)\n\nclass TestPostModel(unittest.TestCase):\n def setUp(self):\n '''\n Set up method before each TestCase\n '''\n\n self.user2 = Post(id = \"1\" , title = \"Max\" , content =\"Max_I_am\")\n def test_whether_content_work(self):\n self.assertEqual(self.user2.content, 'Max_I_am')\n\n def test_whether_title_work(self):\n self.assertEqual(self.user2.title, 'Max')\n\n\n def test_whether_id_works(self):\n self.assertEqual(self.user2.id, '1')\n\n\n\nif __name__ == '__main__':\n unittest.main(verbosity=2)\n",
"id": "5840491",
"language": "Python",
"matching_score": 5.009265422821045,
"max_stars_count": 0,
"path": "blog_test.py"
},
{
"content": "import unittest\nfrom personalblog.models import User\nfrom personalblog.models import Post\nfrom personalblog.forms import RegistrationForm\nfrom personalblog.forms import LoginForm\nfrom personalblog.forms import UpdateAccountForm\nfrom personalblog.forms import PostForm\n\n# class TestUserModel(unittest.Testcase):\n# def test(self):\n# pass\nclass TestUserModel(unittest.TestCase):\n def setUp(self):\n '''\n Setup method before each testcase\n '''\n self.user = User(id =\"1\" , username = \"Max\" , email= \"<EMAIL>\" )\n def id_test(self):\n self.assertTrue(self.user.id, '1')\n\n def username_Test(self):\n self.assertTrue(self.user.username, 'Max')\n\n def email_test(self):\n self.assertTrue(self.user.email, '<EMAIL>')\n\n\n\n\n\nclass TestPostModel(unittest.TestCase):\n def setUp(self):\n '''\n Set up method before each TestCase\n '''\n\n self.user2 = Post(id = \"1\" , title = \"Max\" , content =\"Max_I_am\")\n def test_whether_content_work(self):\n self.assertEqual(self.user2.content, 'Max_I_am')\n\n def test_whether_title_work(self):\n self.assertEqual(self.user2.title, 'Max')\n\n\n def test_whether_id_works(self):\n self.assertEqual(self.user2.id, '1')\n\n\nclass TestRegistrationForm(unittest.TestCase):\n def setUp(self):\n '''\n Set up method before each TestCase\n '''\n\n self.registration = RegistrationForm(username=\"Max\",email=\"<EMAIL>\",password=\"<PASSWORD>\",confirm_password=\"<PASSWORD>\",submit=\"signup\")\n def registration_Username(self):\n self.assertEqual(self.registration.username, 'Max')\n\n\n def registration_Email(self):\n self.assertEqual(self.registration.email, '<EMAIL>')\n\n\n def registration_Password(self):\n self.assertEqual(self.registration.password, '<PASSWORD>')\n\n\n def registration_confirm_password(self):\n self.assertEqual(self.registration.confirm_password, '<PASSWORD>')\n\n\n def registration_submit(self):\n self.assertEqual(self.registration.submit, 'signup')\n\n\nclass LoginForm(unittest.TestCase):\n def setUp(self):\n '''\n Setup method before each testcase\n '''\n self.login = LoginForm(email= \"<EMAIL>\" ,password = \"<PASSWORD>\" , submit = \"Ok\")\n def login_email(self):\n self.assertEqual(self.login.email, '<EMAIL>')\n # result = self.login.<EMAIL>\n # self.assertEqual(<EMAIL>, result)\n\n\n\n def login_password(self):\n # result = self.login_password, '<PASSWORD>'\n # self.assertCountEqual(1234, result)\n self.assertEqual(self.loginForm.password, '<PASSWORD>')\n\n\n def login_submit(self):\n # result = self.login_submit, 'ok'\n # self.assertRaises(ok, result)\n self.assertEqual(self.loginForm.submit, 'ok')\n\n\nclass UpdateAccountForm(unittest.TestCase):\n def setUp(self):\n '''\n Setup method before each testcase\n '''\n self.update = UpdateAccountForm(username=\"maxwell\",email=\"<EMAIL>\", submit=\"ok\")\n def username_Test(self):\n # result = self.username_Test, 'maxwell'\n # self.assertRaises(Maxwell, result)\n self.assertEqual(self.update.username, 'maxwell')\n # pass\n\n def email_test_Update(self):\n # result = self.email_test, '<EMAIL>'\n # self.assertTrue(<EMAIL>, result)\n self.assertEqual(self.update.email, '<EMAIL>')\n # pass\n\n def SubmitField_Test(self):\n # result = self.SubmitField_Test, 'ok'\n # self.assertTrue(ok, result)\n self.assertEqual(self.update.submit, 'ok')\n # pass\n\nclass PostForm(unittest.TestCase):\n def setUp(self):\n '''\n Setup method before each testcase\n '''\n self.post = PostForm(title = \"RegiBiz\",content = \"Registration of different businesses\", submit = \"post\" )\n def title_Test(self):\n # result = self.title_Test, 'RegiBiz'\n # self.assertTrue(RegiBiz, result)\n self.assertEqual(self.post.title, 'RegiBiz')\n # pass\n\n def content_test(self):\n # result = self.content_Test, 'Registration of different businesses'\n # self.assertTrue(Registration of different businesses, result)\n self.assertEqual(self.post.content, 'Registration of different businesses')\n # pass\n\n\n def submit_Test(self):\n # result = self.submit_Test, 'post'\n # self.assertTrue(post, result)\n self.assertEqual(self.post.content, 'Registration of different businesses')\n # pass\n\n\nclass SearchForm(unittest.TestCase):\n def setUp(self):\n '''\n Setup method before each testcase\n '''\n\n self.search = SearchForm(search=\"post\",submit=\"search\")\n def search_Test(self):\n # result = self.search_Test, 'post'\n # self.assertTrue(post, result)\n self.assertEqual(self.search.search, 'post')\n # pass\n\n def submitSearch_Test(self):\n # result = self.submitSearch_Test, 'search'\n # self.assertTrue(search, result)\n self.assertEqual(self.search.post, 'post')\n # pass\n\n # def fname(arg):\n # pass\n\n\n\n\n\n\n\n\n\n\n# if __name__ == '__main__':\n# unittest.main()\n",
"id": "11280037",
"language": "Python",
"matching_score": 2.4225683212280273,
"max_stars_count": 0,
"path": "unit_test.py"
},
{
"content": "import unittest\nimport pyperclip\nfrom credentials import Credentials\nfrom user import User\n\nclass TestCredentials(unittest.TestCase):\n '''\n This is a Yest class that defines test cases for our credentials class behaviours.\n\n Args:\n unittest.TestCase: Testcase class that helps in creating test cases\n '''\n def setUp(self):\n '''\n set up method to before each test case.\n '''\n\n self.new_credentials = Credentials(\"Max\", \"_Max\", \"<EMAIL>\", \"<PASSWORD>\")\n\n def tearDown(self):\n '''\n Method that does clean up after each test case has run\n '''\n Credentials.credentials_list = []\n\n\n def test_credentials_init(self):\n '''\n Created a test that tests if the ibject has been initialized properly\n '''\n self.assertEqual(self.new_credentials.account_name, \"Max\")\n self.assertEqual(self.new_credentials.user_name, \"_Max\")\n self.assertEqual(self.new_credentials.email, \"<EMAIL>\")\n self.assertEqual(self.new_credentials.password, \"<PASSWORD>\")\n\n\n def test_save_credentials(self):\n '''\n Test case to test if the contact object is saved into the credentials\n list\n '''\n self.new_credentials.save_credentials()\n self.assertEqual(len(Credentials.credentials_list), 1)\n\n def test_save_multiple_credentials(self):\n self.new_credentials.save_credentials()\n test_credentials = Credentials(\"Test\", \"t_un\", \"t_u@gmail\", \"<PASSWORD>\")\n test_credentials.save_credentials()\n self.assertEqual(len(Credentials.credentials_list), 2)\n\n\n def test_delete_credentials(self):\n '''\n Test if we can remove credentials from our credentials contact_list\n '''\n self.new_credentials.save_credentials()\n test_credentials = Credentials(\"Test\", \"t_un\", \"t_u@gmail\", \"<PASSWORD>\")\n test_credentials.save_credentials()\n\n self.new_credentials.delete_credentials()\n self.assertEqual(len(Credentials.credentials_list), 1)\n\n\n def test_find_credentials_by_account_name(self):\n '''\n this will check if we can find a contact by phone number and display\n information\n '''\n\n self.new_credentials.save_credentials()\n test_credentials = Credentials(\"Twitter\", \"t_uname\", \"<EMAIL>\", \"<PASSWORD>\")\n test_credentials.save_credentials()\n\n found_credentials = Credentials.find_by_account_name(\"Twitter\")\n\n self.assertEqual(found_credentials.email, test_credentials.email)\n\n\n def test_credentials_exist(self):\n '''\n Check if a credentials exist and return a boolean\n '''\n\n self.new_credentials.save_credentials()\n test_credentials = Credentials(\"Twitter\", \"t_uname\", \"<EMAIL>\", \"<PASSWORD>\")\n test_credentials.save_credentials()\n\n credentials_exist = Credentials.credentials_exist\n self.assertTrue(credentials_exist)\n\n\n def test_display_all_credentials(self):\n '''\n test method that returns a list of all the credentials saved.\n '''\n\n self.assertEqual(Credentials.display_credentials(), Credentials.credentials_list)\n\n\n def test_copy_account(self):\n '''\n Test to confirm that we are copying account credentials found\n '''\n\n self.new_credentials.save_credentials()\n Credentials.copy_username(\"Max\")\n\n self.assertEqual(self.new_credentials.account_name, pyperclip.paste())\n\nclass TestUser(unittest.TestCase):\n def setUp(self):\n '''\n method run before each test case\n '''\n self.new_user = User(\"KimaniNjoroge\", \"<PASSWORD>\")\n\n\n def tearDown(self):\n '''\n Method to give an empty array before each test for more accurate results\n '''\n User.user_list = []\n def test_user_init(self):\n '''\n method to test if our users are being instantiated correctly\n '''\n self.assertEqual(self.new_user.login_name, \"KimaniNjoroge\")\n self.assertEqual(self.new_user.password, \"<PASSWORD>\")\n\n\n\n def test_save_user(self):\n '''\n Method to test if users are being saved\n '''\n self.new_user.save_user()\n self.assertEqual(len(User.user_list), 1)\n\n\n def test_save_multiple_user(self):\n '''\n Method to test if multiple users ate being saved\n '''\n self.new_user.save_user()\n test_user = User(\"BobBo\", \"<PASSWORD>\")\n test_user.save_user()\n\n self.assertEqual(len(User.user_list), 2)\n\n def test_login(self):\n '''\n method to test login credentials\n '''\n self.new_user.save_user()\n test_user = User(\"maxwell_haron\", \"password\")\n test_user.save_user()\n\n login_credentials = User.user_login(\"maxwell_haron\")\n self.assertEqual(login_credentials.login_name, test_user.login_name)\n\n\nif __name__ == '__main__':\n unittest.main(verbosity=2)\n",
"id": "4414653",
"language": "Python",
"matching_score": 3.013181209564209,
"max_stars_count": 0,
"path": "credentials_test.py"
},
{
"content": "import pyperclip\nimport random\n\nclass Credentials:\n '''\n This is a class that generates the credentials of the user in question\n '''\n credentials_list = []\n user_password_list = []\n\n\n def __init__(self, account_name, user_name, email, password):\n '''\n This is a method that helps us define the properties of the users credentials\n '''\n self.account_name = account_name\n self.user_name = user_name\n self.email = email\n self.password = password\n\n def save_credentials(self):\n '''\n save_credentials method save credentials object into credentials_list\n '''\n Credentials.credentials_list.append(self)\n\n def delete_credentials(self):\n '''\n delete_contact method deletes an object from the credentials_list\n '''\n Credentials.credentials_list.remove(self)\n\n @classmethod\n def find_by_account_name(cls, name):\n '''\n This is a method that in whicha user can fin credentias by name search\n '''\n for credentials in cls.credentials_list:\n if credentials.account_name == name:\n return credentials\n\n @classmethod\n def credentials_exist(cls, name):\n '''\n Method that check if the credentials are already on the contact_list\n and return true(if it exists) and false(if it does not)\n '''\n\n for credentials in cls.credentials_list:\n if credentials.account_name == name:\n return True\n\n return False\n",
"id": "1398324",
"language": "Python",
"matching_score": 2.5335891246795654,
"max_stars_count": 0,
"path": "credentials.py"
}
] | 1.675606 |
neeldug | [
{
"content": "import pandas as pd\nimport matplotlib.pyplot as plot\nimport re as re\n\ndef compare(lt,out):\n\n rVal = re.split(\"(?:[a-zA-Z]=)(\\d+)\", lt)\n rVal = float(rVal[1])\n a = pd.read_csv(lt)\n print(out)\n b = pd.read_csv(out)\n \n a.columns = a.columns.str.lower()\n b.columns = b.columns.str.lower()\n a.set_index('time')\n b.set_index('time')\n c = a.copy()\n means = list()\n means.append(rVal)\n upTo = int((rVal*10e-6)/1e-5)\n print(upTo)\n a = a.iloc[:upTo]\n b = b.iloc[:upTo]\n header = list()\n header.append(\"Resistor Value\")\n for i in a.columns:\n if i == \"time\" or i==\"Time\":\n continue\n else:\n header.append(i)\n c[i] = 100*(abs(a[i]-b[i])/a[i])\n means.append(abs(c[i].mean()))\n\n return (c,means,header)\n\n\nfrom os import listdir\nfrom os.path import isfile, join\n\nfolder = \"RC_test\"\n\nLTfol = \"LT.csvs\"\nfol404 = \"out.csvs\"\nltFiles= [f for f in listdir(LTfol) if isfile(join(LTfol, f))]\nspiceFiles = [f for f in listdir(fol404) if isfile(join(fol404, f))]\n\nltFiles = [x for x in ltFiles if \"new\" in x]\nmeans = list()\nheader = list()\nfor ltFile in ltFiles:\n file404 = ltFile.replace('new','')\n resultTuple = compare(\"../LT.csvs/\"+ltFile,\"../out.csvs/\"+file404)\n means.append(resultTuple[1])\n a = resultTuple[0]\n header = resultTuple[2]\n b = a.plot(kind='scatter',marker='x',x='time',y='v(n001)',color='red')\n b.grid(color='black', linestyle='-', linewidth=0.4)\n b.set(xlabel=\"Time /s\", ylabel=\"Percentage Error in Voltage /%\",title=\"Pecentage Error of Voltage across resistor\\n vs Time /s \" + file404.replace(',.csv','')) \n\nx = pd.DataFrame.from_records(means)\nx.columns = header\nx.set_index('Resistor Value')\nprint(x)\n\nfor i in x.columns[1:]:\n b = x.plot(kind='scatter',marker='x',x='Resistor Value',y=i,color='red')\n b.grid(color='black', linestyle='-', linewidth=0.4)\n b.set(xlabel=\"Resistor Value /Ω\", ylabel=\"Mean Percentage Error in \" +i + \"/%\",title=\"Average Percentage Error in \" + i + \"/%\" + \"\\nIn first time constant for various resistor values /Ω\") \n",
"id": "10925268",
"language": "Python",
"matching_score": 0,
"max_stars_count": 0,
"path": "test/EvalTests/RC_test/PythonScripts/diff.py"
},
{
"content": "# Node generating script\nimport sys\nfor num_nodes in range(50,1000, 50):\n f = open(\"netlist\" + str(num_nodes) + \".cir\", \"a\")\n f.write(\"*\" + str(num_nodes))\n f.write(\"V1 1 0 SINE(0 100 1)\")\n for i in range(1,num_nodes):\n resistorSmall = \"R\" + str(i) + \" \" + str(i) + \" \" + str(i + 1) + \" 1\"\n resistorLarge = \"R\" + str(i+num_nodes) + \" \" + str(i) + \" \" + str(0) + \" 1000\"\n f.write(resistorSmall)\n f.write(resistorLarge)\n f.write(\".tran 0 3 0 0.001\")\n f.write(\".end\")\n f.close()\n\n \n",
"id": "3118186",
"language": "Python",
"matching_score": 0,
"max_stars_count": 0,
"path": "test/EvalTests/RC_test/PythonScripts/nodeGenerator.py"
}
] | 0 |
KMilhan | [
{
"content": "\"\"\"Proto file and its python compiled module\"\"\"\n",
"id": "5634853",
"language": "Python",
"matching_score": 0,
"max_stars_count": 0,
"path": "simplewc/protos/__init__.py"
},
{
"content": "\"\"\"Generate a single paper you read before an important test\"\"\"\n",
"id": "3809204",
"language": "Python",
"matching_score": 0,
"max_stars_count": 0,
"path": "pytestdocgen/__init__.py"
},
{
"content": "# -*- coding: utf-8 -*-\n\"\"\"\n Pygments Markdown lexer tests\n ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.\n :license: BSD, see LICENSE for details.\n\"\"\"\n\nimport pytest\n\nfrom pygments.lexers.markup import MarkdownLexer\n\n\n@pytest.fixture(scope='module')\ndef lexer():\n yield MarkdownLexer()\n\n\ndef assert_same_text(lexer, text):\n \"\"\"Show that lexed markdown does not remove any content. \"\"\"\n tokens = list(lexer.get_tokens_unprocessed(text))\n output = ''.join(t[2] for t in tokens)\n assert text == output\n\n\ndef test_code_fence(lexer):\n assert_same_text(lexer, r'```\\nfoo\\n```\\n')\n\n\ndef test_code_fence_gsm(lexer):\n assert_same_text(lexer, r'```markdown\\nfoo\\n```\\n')\n\n\ndef test_code_fence_gsm_with_no_lexer(lexer):\n assert_same_text(lexer, r'```invalid-lexer\\nfoo\\n```\\n')\n",
"id": "6751944",
"language": "Python",
"matching_score": 1.8623170852661133,
"max_stars_count": 1,
"path": "tests/test_markdown_lexer.py"
},
{
"content": "# -*- coding: utf-8 -*-\n\"\"\"\n Basic JavaLexer Test\n ~~~~~~~~~~~~~~~~~~~~\n\n :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.\n :license: BSD, see LICENSE for details.\n\"\"\"\n\nimport pytest\n\nfrom pygments.token import Text, Name, Keyword, Punctuation, String\nfrom pygments.lexers import KotlinLexer\n\n\n@pytest.fixture(scope='module')\ndef lexer():\n yield KotlinLexer()\n\n\ndef test_can_cope_with_backtick_names_in_functions(lexer):\n fragment = u'fun `wo bble`'\n tokens = [\n (Keyword, u'fun'),\n (Text, u' '),\n (Name.Function, u'`wo bble`'),\n (Text, u'\\n')\n ]\n assert list(lexer.get_tokens(fragment)) == tokens\n\n\ndef test_can_cope_with_commas_and_dashes_in_backtick_Names(lexer):\n fragment = u'fun `wo,-bble`'\n tokens = [\n (Keyword, u'fun'),\n (Text, u' '),\n (Name.Function, u'`wo,-bble`'),\n (Text, u'\\n')\n ]\n assert list(lexer.get_tokens(fragment)) == tokens\n\n\ndef test_can_cope_with_destructuring(lexer):\n fragment = u'val (a, b) = '\n tokens = [\n (Keyword, u'val'),\n (Text, u' '),\n (Punctuation, u'('),\n (Name.Property, u'a'),\n (Punctuation, u','),\n (Text, u' '),\n (Name.Property, u'b'),\n (Punctuation, u')'),\n (Text, u' '),\n (Punctuation, u'='),\n (Text, u' '),\n (Text, u'\\n')\n ]\n assert list(lexer.get_tokens(fragment)) == tokens\n\n\ndef test_can_cope_generics_in_destructuring(lexer):\n fragment = u'val (a: List<Something>, b: Set<Wobble>) ='\n tokens = [\n (Keyword, u'val'),\n (Text, u' '),\n (Punctuation, u'('),\n (Name.Property, u'a'),\n (Punctuation, u':'),\n (Text, u' '),\n (Name.Property, u'List'),\n (Punctuation, u'<'),\n (Name, u'Something'),\n (Punctuation, u'>'),\n (Punctuation, u','),\n (Text, u' '),\n (Name.Property, u'b'),\n (Punctuation, u':'),\n (Text, u' '),\n (Name.Property, u'Set'),\n (Punctuation, u'<'),\n (Name, u'Wobble'),\n (Punctuation, u'>'),\n (Punctuation, u')'),\n (Text, u' '),\n (Punctuation, u'='),\n (Text, u'\\n')\n ]\n assert list(lexer.get_tokens(fragment)) == tokens\n\n\ndef test_can_cope_with_generics(lexer):\n fragment = u'inline fun <reified T : ContractState> VaultService.queryBy(): Vault.Page<T> {'\n tokens = [\n (Keyword, u'inline fun'),\n (Text, u' '),\n (Punctuation, u'<'),\n (Keyword, u'reified'),\n (Text, u' '),\n (Name, u'T'),\n (Text, u' '),\n (Punctuation, u':'),\n (Text, u' '),\n (Name, u'ContractState'),\n (Punctuation, u'>'),\n (Text, u' '),\n (Name.Class, u'VaultService'),\n (Punctuation, u'.'),\n (Name.Function, u'queryBy'),\n (Punctuation, u'('),\n (Punctuation, u')'),\n (Punctuation, u':'),\n (Text, u' '),\n (Name, u'Vault'),\n (Punctuation, u'.'),\n (Name, u'Page'),\n (Punctuation, u'<'),\n (Name, u'T'),\n (Punctuation, u'>'),\n (Text, u' '),\n (Punctuation, u'{'),\n (Text, u'\\n')\n ]\n assert list(lexer.get_tokens(fragment)) == tokens\n\n\ndef test_should_cope_with_multiline_comments(lexer):\n fragment = u'\"\"\"\\nthis\\nis\\na\\ncomment\"\"\"'\n tokens = [\n (String, u'\"\"\"\\nthis\\nis\\na\\ncomment\"\"\"'),\n (Text, u'\\n')\n ]\n assert list(lexer.get_tokens(fragment)) == tokens\n",
"id": "543587",
"language": "Python",
"matching_score": 1.6992381811141968,
"max_stars_count": 1,
"path": "tests/test_kotlin.py"
},
{
"content": "# -*- coding: utf-8 -*-\n\"\"\"\n pygments.lexers.praat\n ~~~~~~~~~~~~~~~~~~~~~\n\n Lexer for Praat\n\n :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.\n :license: BSD, see LICENSE for details.\n\"\"\"\n\nfrom pygments.lexer import RegexLexer, words, bygroups, include\nfrom pygments.token import Name, Text, Comment, Keyword, String, Punctuation, Number, \\\n Operator\n\n__all__ = ['PraatLexer']\n\n\nclass PraatLexer(RegexLexer):\n \"\"\"\n For `Praat <http://www.praat.org>`_ scripts.\n\n .. versionadded:: 2.1\n \"\"\"\n\n name = 'Praat'\n aliases = ['praat']\n filenames = ['*.praat', '*.proc', '*.psc']\n\n keywords = (\n 'if', 'then', 'else', 'elsif', 'elif', 'endif', 'fi', 'for', 'from', 'to',\n 'endfor', 'endproc', 'while', 'endwhile', 'repeat', 'until', 'select', 'plus',\n 'minus', 'demo', 'assert', 'stopwatch', 'nocheck', 'nowarn', 'noprogress',\n 'editor', 'endeditor', 'clearinfo',\n )\n\n functions_string = (\n 'backslashTrigraphsToUnicode', 'chooseDirectory', 'chooseReadFile',\n 'chooseWriteFile', 'date', 'demoKey', 'do', 'environment', 'extractLine',\n 'extractWord', 'fixed', 'info', 'left', 'mid', 'percent', 'readFile', 'replace',\n 'replace_regex', 'right', 'selected', 'string', 'unicodeToBackslashTrigraphs',\n )\n\n functions_numeric = (\n 'abs', 'appendFile', 'appendFileLine', 'appendInfo', 'appendInfoLine', 'arccos',\n 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctan2', 'arctanh', 'barkToHertz',\n 'beginPause', 'beginSendPraat', 'besselI', 'besselK', 'beta', 'beta2',\n 'binomialP', 'binomialQ', 'boolean', 'ceiling', 'chiSquareP', 'chiSquareQ',\n 'choice', 'comment', 'cos', 'cosh', 'createDirectory', 'deleteFile',\n 'demoClicked', 'demoClickedIn', 'demoCommandKeyPressed',\n 'demoExtraControlKeyPressed', 'demoInput', 'demoKeyPressed',\n 'demoOptionKeyPressed', 'demoShiftKeyPressed', 'demoShow', 'demoWaitForInput',\n 'demoWindowTitle', 'demoX', 'demoY', 'differenceLimensToPhon', 'do', 'editor',\n 'endPause', 'endSendPraat', 'endsWith', 'erb', 'erbToHertz', 'erf', 'erfc',\n 'exitScript', 'exp', 'extractNumber', 'fileReadable', 'fisherP', 'fisherQ',\n 'floor', 'gaussP', 'gaussQ', 'hertzToBark', 'hertzToErb', 'hertzToMel',\n 'hertzToSemitones', 'imax', 'imin', 'incompleteBeta', 'incompleteGammaP', 'index',\n 'index_regex', 'integer', 'invBinomialP', 'invBinomialQ', 'invChiSquareQ', 'invFisherQ',\n 'invGaussQ', 'invSigmoid', 'invStudentQ', 'length', 'ln', 'lnBeta', 'lnGamma',\n 'log10', 'log2', 'max', 'melToHertz', 'min', 'minusObject', 'natural', 'number',\n 'numberOfColumns', 'numberOfRows', 'numberOfSelected', 'objectsAreIdentical',\n 'option', 'optionMenu', 'pauseScript', 'phonToDifferenceLimens', 'plusObject',\n 'positive', 'randomBinomial', 'randomGauss', 'randomInteger', 'randomPoisson',\n 'randomUniform', 'real', 'readFile', 'removeObject', 'rindex', 'rindex_regex',\n 'round', 'runScript', 'runSystem', 'runSystem_nocheck', 'selectObject',\n 'selected', 'semitonesToHertz', 'sentence', 'sentencetext', 'sigmoid', 'sin', 'sinc',\n 'sincpi', 'sinh', 'soundPressureToPhon', 'sqrt', 'startsWith', 'studentP',\n 'studentQ', 'tan', 'tanh', 'text', 'variableExists', 'word', 'writeFile', 'writeFileLine',\n 'writeInfo', 'writeInfoLine',\n )\n\n functions_array = (\n 'linear', 'randomGauss', 'randomInteger', 'randomUniform', 'zero',\n )\n\n objects = (\n 'Activation', 'AffineTransform', 'AmplitudeTier', 'Art', 'Artword',\n 'Autosegment', 'BarkFilter', 'BarkSpectrogram', 'CCA', 'Categories',\n 'Cepstrogram', 'Cepstrum', 'Cepstrumc', 'ChebyshevSeries', 'ClassificationTable',\n 'Cochleagram', 'Collection', 'ComplexSpectrogram', 'Configuration', 'Confusion',\n 'ContingencyTable', 'Corpus', 'Correlation', 'Covariance',\n 'CrossCorrelationTable', 'CrossCorrelationTables', 'DTW', 'DataModeler',\n 'Diagonalizer', 'Discriminant', 'Dissimilarity', 'Distance', 'Distributions',\n 'DurationTier', 'EEG', 'ERP', 'ERPTier', 'EditCostsTable', 'EditDistanceTable',\n 'Eigen', 'Excitation', 'Excitations', 'ExperimentMFC', 'FFNet', 'FeatureWeights',\n 'FileInMemory', 'FilesInMemory', 'Formant', 'FormantFilter', 'FormantGrid',\n 'FormantModeler', 'FormantPoint', 'FormantTier', 'GaussianMixture', 'HMM',\n 'HMM_Observation', 'HMM_ObservationSequence', 'HMM_State', 'HMM_StateSequence',\n 'Harmonicity', 'ISpline', 'Index', 'Intensity', 'IntensityTier', 'IntervalTier',\n 'KNN', 'KlattGrid', 'KlattTable', 'LFCC', 'LPC', 'Label', 'LegendreSeries',\n 'LinearRegression', 'LogisticRegression', 'LongSound', 'Ltas', 'MFCC', 'MSpline',\n 'ManPages', 'Manipulation', 'Matrix', 'MelFilter', 'MelSpectrogram',\n 'MixingMatrix', 'Movie', 'Network', 'Object', 'OTGrammar', 'OTHistory', 'OTMulti',\n 'PCA', 'PairDistribution', 'ParamCurve', 'Pattern', 'Permutation', 'Photo',\n 'Pitch', 'PitchModeler', 'PitchTier', 'PointProcess', 'Polygon', 'Polynomial',\n 'PowerCepstrogram', 'PowerCepstrum', 'Procrustes', 'RealPoint', 'RealTier',\n 'ResultsMFC', 'Roots', 'SPINET', 'SSCP', 'SVD', 'Salience', 'ScalarProduct',\n 'Similarity', 'SimpleString', 'SortedSetOfString', 'Sound', 'Speaker',\n 'Spectrogram', 'Spectrum', 'SpectrumTier', 'SpeechSynthesizer', 'SpellingChecker',\n 'Strings', 'StringsIndex', 'Table', 'TableOfReal', 'TextGrid', 'TextInterval',\n 'TextPoint', 'TextTier', 'Tier', 'Transition', 'VocalTract', 'VocalTractTier',\n 'Weight', 'WordList',\n )\n\n variables_numeric = (\n 'macintosh', 'windows', 'unix', 'praatVersion', 'pi', 'e', 'undefined',\n )\n\n variables_string = (\n 'praatVersion', 'tab', 'shellDirectory', 'homeDirectory',\n 'preferencesDirectory', 'newline', 'temporaryDirectory',\n 'defaultDirectory',\n )\n\n object_attributes = (\n 'ncol', 'nrow', 'xmin', 'ymin', 'xmax', 'ymax', 'nx', 'ny', 'dx', 'dy',\n )\n\n tokens = {\n 'root': [\n (r'(\\s+)(#.*?$)', bygroups(Text, Comment.Single)),\n (r'^#.*?$', Comment.Single),\n (r';[^\\n]*', Comment.Single),\n (r'\\s+', Text),\n\n (r'\\bprocedure\\b', Keyword, 'procedure_definition'),\n (r'\\bcall\\b', Keyword, 'procedure_call'),\n (r'@', Name.Function, 'procedure_call'),\n\n include('function_call'),\n\n (words(keywords, suffix=r'\\b'), Keyword),\n\n (r'(\\bform\\b)(\\s+)([^\\n]+)',\n bygroups(Keyword, Text, String), 'old_form'),\n\n (r'(print(?:line|tab)?|echo|exit|asserterror|pause|send(?:praat|socket)|'\n r'include|execute|system(?:_nocheck)?)(\\s+)',\n bygroups(Keyword, Text), 'string_unquoted'),\n\n (r'(goto|label)(\\s+)(\\w+)', bygroups(Keyword, Text, Name.Label)),\n\n include('variable_name'),\n include('number'),\n\n (r'\"', String, 'string'),\n\n (words((objects), suffix=r'(?=\\s+\\S+\\n)'), Name.Class, 'string_unquoted'),\n\n (r'\\b[A-Z]', Keyword, 'command'),\n (r'(\\.{3}|[)(,])', Punctuation),\n ],\n 'command': [\n (r'( ?[\\w()-]+ ?)', Keyword),\n\n include('string_interpolated'),\n\n (r'\\.{3}', Keyword, ('#pop', 'old_arguments')),\n (r':', Keyword, ('#pop', 'comma_list')),\n (r'\\s', Text, '#pop'),\n ],\n 'procedure_call': [\n (r'\\s+', Text),\n (r'([\\w.]+)(:|\\s*\\()',\n bygroups(Name.Function, Text), '#pop'),\n (r'([\\w.]+)', Name.Function, ('#pop', 'old_arguments')),\n ],\n 'procedure_definition': [\n (r'\\s', Text),\n (r'([\\w.]+)(\\s*?[(:])',\n bygroups(Name.Function, Text), '#pop'),\n (r'([\\w.]+)([^\\n]*)',\n bygroups(Name.Function, Text), '#pop'),\n ],\n 'function_call': [\n (words(functions_string, suffix=r'\\$(?=\\s*[:(])'), Name.Function, 'function'),\n (words(functions_array, suffix=r'#(?=\\s*[:(])'), Name.Function, 'function'),\n (words(functions_numeric, suffix=r'(?=\\s*[:(])'), Name.Function, 'function'),\n ],\n 'function': [\n (r'\\s+', Text),\n (r':', Punctuation, ('#pop', 'comma_list')),\n (r'\\s*\\(', Punctuation, ('#pop', 'comma_list')),\n ],\n 'comma_list': [\n (r'(\\s*\\n\\s*)(\\.{3})', bygroups(Text, Punctuation)),\n\n (r'(\\s*[])\\n])', Text, '#pop'),\n\n (r'\\s+', Text),\n (r'\"', String, 'string'),\n (r'\\b(if|then|else|fi|endif)\\b', Keyword),\n\n include('function_call'),\n include('variable_name'),\n include('operator'),\n include('number'),\n\n (r'[()]', Text),\n (r',', Punctuation),\n ],\n 'old_arguments': [\n (r'\\n', Text, '#pop'),\n\n include('variable_name'),\n include('operator'),\n include('number'),\n\n (r'\"', String, 'string'),\n (r'[^\\n]', Text),\n ],\n 'number': [\n (r'\\n', Text, '#pop'),\n (r'\\b\\d+(\\.\\d*)?([eE][-+]?\\d+)?%?', Number),\n ],\n 'object_reference': [\n include('string_interpolated'),\n (r'([a-z][a-zA-Z0-9_]*|\\d+)', Name.Builtin),\n\n (words(object_attributes, prefix=r'\\.'), Name.Builtin, '#pop'),\n\n (r'\\$', Name.Builtin),\n (r'\\[', Text, '#pop'),\n ],\n 'variable_name': [\n include('operator'),\n include('number'),\n\n (words(variables_string, suffix=r'\\$'), Name.Variable.Global),\n (words(variables_numeric,\n suffix=r'(?=[^a-zA-Z0-9\\._\"\\'\\$#\\[:\\(]|\\s|^|$)'),\n Name.Variable.Global),\n\n (words(objects, prefix=r'\\b', suffix=r\"(_)\"),\n bygroups(Name.Builtin, Name.Builtin),\n 'object_reference'),\n\n (r'\\.?_?[a-z][\\w.]*(\\$|#)?', Text),\n (r'[\\[\\]]', Punctuation, 'comma_list'),\n\n include('string_interpolated'),\n ],\n 'operator': [\n (r'([+\\/*<>=!-]=?|[&*|][&*|]?|\\^|<>)', Operator),\n (r'(?<![\\w.])(and|or|not|div|mod)(?![\\w.])', Operator.Word),\n ],\n 'string_interpolated': [\n (r'\\'[_a-z][^\\[\\]\\'\":]*(\\[([\\d,]+|\"[\\w\\d,]+\")\\])?(:[0-9]+)?\\'',\n String.Interpol),\n ],\n 'string_unquoted': [\n (r'(\\n\\s*)(\\.{3})', bygroups(Text, Punctuation)),\n\n (r'\\n', Text, '#pop'),\n (r'\\s', Text),\n\n include('string_interpolated'),\n\n (r\"'\", String),\n (r\"[^'\\n]+\", String),\n ],\n 'string': [\n (r'(\\n\\s*)(\\.{3})', bygroups(Text, Punctuation)),\n\n (r'\"', String, '#pop'),\n\n include('string_interpolated'),\n\n (r\"'\", String),\n (r'[^\\'\"\\n]+', String),\n ],\n 'old_form': [\n (r'(\\s+)(#.*?$)', bygroups(Text, Comment.Single)),\n (r'\\s+', Text),\n\n (r'(optionmenu|choice)([ \\t]+\\S+:[ \\t]+)',\n bygroups(Keyword, Text), 'number'),\n\n (r'(option|button)([ \\t]+)',\n bygroups(Keyword, Text), 'string_unquoted'),\n\n (r'(sentence|text)([ \\t]+\\S+)',\n bygroups(Keyword, Text), 'string_unquoted'),\n\n (r'(word)([ \\t]+\\S+[ \\t]*)(\\S+)?([ \\t]+.*)?',\n bygroups(Keyword, Text, String, Text)),\n\n (r'(boolean)(\\s+\\S+\\s*)(0|1|\"?(?:yes|no)\"?)',\n bygroups(Keyword, Text, Name.Variable)),\n\n # Ideally processing of the number would happend in the 'number'\n # but that doesn't seem to work\n (r'(real|natural|positive|integer)([ \\t]+\\S+[ \\t]*)([+-]?)(\\d+(?:\\.\\d*)?'\n r'(?:[eE][-+]?\\d+)?%?)',\n bygroups(Keyword, Text, Operator, Number)),\n\n (r'(comment)(\\s+)',\n bygroups(Keyword, Text), 'string_unquoted'),\n\n (r'\\bendform\\b', Keyword, '#pop'),\n ]\n }\n",
"id": "4446180",
"language": "Python",
"matching_score": 5.380119800567627,
"max_stars_count": 6989,
"path": "pygments/lexers/praat.py"
},
{
"content": "# -*- coding: utf-8 -*-\n\"\"\"\n pygments.lexers.matlab\n ~~~~~~~~~~~~~~~~~~~~~~\n\n Lexers for Matlab and related languages.\n\n :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.\n :license: BSD, see LICENSE for details.\n\"\"\"\n\nimport re\n\nfrom pygments.lexer import Lexer, RegexLexer, bygroups, words, do_insertions\nfrom pygments.token import Text, Comment, Operator, Keyword, Name, String, \\\n Number, Punctuation, Generic, Whitespace\n\nfrom pygments.lexers import _scilab_builtins\n\n__all__ = ['MatlabLexer', 'MatlabSessionLexer', 'OctaveLexer', 'ScilabLexer']\n\n\nclass MatlabLexer(RegexLexer):\n \"\"\"\n For Matlab source code.\n\n .. versionadded:: 0.10\n \"\"\"\n name = 'Matlab'\n aliases = ['matlab']\n filenames = ['*.m']\n mimetypes = ['text/matlab']\n\n #\n # These lists are generated automatically.\n # Run the following in bash shell:\n #\n # for f in elfun specfun elmat; do\n # echo -n \"$f = \"\n # matlab -nojvm -r \"help $f;exit;\" | perl -ne \\\n # 'push(@c,$1) if /^ (\\w+)\\s+-/; END {print q{[\"}.join(q{\",\"},@c).qq{\"]\\n};}'\n # done\n #\n # elfun: Elementary math functions\n # specfun: Special Math functions\n # elmat: Elementary matrices and matrix manipulation\n #\n # taken from Matlab version 7.4.0.336 (R2007a)\n #\n elfun = (\"sin\", \"sind\", \"sinh\", \"asin\", \"asind\", \"asinh\", \"cos\", \"cosd\", \"cosh\",\n \"acos\", \"acosd\", \"acosh\", \"tan\", \"tand\", \"tanh\", \"atan\", \"atand\", \"atan2\",\n \"atanh\", \"sec\", \"secd\", \"sech\", \"asec\", \"asecd\", \"asech\", \"csc\", \"cscd\",\n \"csch\", \"acsc\", \"acscd\", \"acsch\", \"cot\", \"cotd\", \"coth\", \"acot\", \"acotd\",\n \"acoth\", \"hypot\", \"exp\", \"expm1\", \"log\", \"log1p\", \"log10\", \"log2\", \"pow2\",\n \"realpow\", \"reallog\", \"realsqrt\", \"sqrt\", \"nthroot\", \"nextpow2\", \"abs\",\n \"angle\", \"complex\", \"conj\", \"imag\", \"real\", \"unwrap\", \"isreal\", \"cplxpair\",\n \"fix\", \"floor\", \"ceil\", \"round\", \"mod\", \"rem\", \"sign\")\n specfun = (\"airy\", \"besselj\", \"bessely\", \"besselh\", \"besseli\", \"besselk\", \"beta\",\n \"betainc\", \"betaln\", \"ellipj\", \"ellipke\", \"erf\", \"erfc\", \"erfcx\",\n \"erfinv\", \"expint\", \"gamma\", \"gammainc\", \"gammaln\", \"psi\", \"legendre\",\n \"cross\", \"dot\", \"factor\", \"isprime\", \"primes\", \"gcd\", \"lcm\", \"rat\",\n \"rats\", \"perms\", \"nchoosek\", \"factorial\", \"cart2sph\", \"cart2pol\",\n \"pol2cart\", \"sph2cart\", \"hsv2rgb\", \"rgb2hsv\")\n elmat = (\"zeros\", \"ones\", \"eye\", \"repmat\", \"rand\", \"randn\", \"linspace\", \"logspace\",\n \"freqspace\", \"meshgrid\", \"accumarray\", \"size\", \"length\", \"ndims\", \"numel\",\n \"disp\", \"isempty\", \"isequal\", \"isequalwithequalnans\", \"cat\", \"reshape\",\n \"diag\", \"blkdiag\", \"tril\", \"triu\", \"fliplr\", \"flipud\", \"flipdim\", \"rot90\",\n \"find\", \"end\", \"sub2ind\", \"ind2sub\", \"bsxfun\", \"ndgrid\", \"permute\",\n \"ipermute\", \"shiftdim\", \"circshift\", \"squeeze\", \"isscalar\", \"isvector\",\n \"ans\", \"eps\", \"realmax\", \"realmin\", \"pi\", \"i\", \"inf\", \"nan\", \"isnan\",\n \"isinf\", \"isfinite\", \"j\", \"why\", \"compan\", \"gallery\", \"hadamard\", \"hankel\",\n \"hilb\", \"invhilb\", \"magic\", \"pascal\", \"rosser\", \"toeplitz\", \"vander\",\n \"wilkinson\")\n\n _operators = r'-|==|~=|<=|>=|<|>|&&|&|~|\\|\\|?|\\.\\*|\\*|\\+|\\.\\^|\\.\\\\|\\.\\/|\\/|\\\\'\n\n tokens = {\n 'root': [\n # line starting with '!' is sent as a system command. not sure what\n # label to use...\n (r'^!.*', String.Other),\n (r'%\\{\\s*\\n', Comment.Multiline, 'blockcomment'),\n (r'%.*$', Comment),\n (r'^\\s*function\\b', Keyword, 'deffunc'),\n\n # from 'iskeyword' on version 7.11 (R2010):\n # Check that there is no preceding dot, as keywords are valid field\n # names.\n (words(('break', 'case', 'catch', 'classdef', 'continue', 'else',\n 'elseif', 'end', 'enumerated', 'events', 'for', 'function',\n 'global', 'if', 'methods', 'otherwise', 'parfor',\n 'persistent', 'properties', 'return', 'spmd', 'switch',\n 'try', 'while'),\n prefix=r'(?<!\\.)', suffix=r'\\b'),\n Keyword),\n\n (\"(\" + \"|\".join(elfun + specfun + elmat) + r')\\b', Name.Builtin),\n\n # line continuation with following comment:\n (r'\\.\\.\\..*$', Comment),\n\n # command form:\n # \"How MATLAB Recognizes Command Syntax\" specifies that an operator\n # is recognized if it is either surrounded by spaces or by no\n # spaces on both sides; only the former case matters for us. (This\n # allows distinguishing `cd ./foo` from `cd ./ foo`.)\n (r'(?:^|(?<=;))(\\s*)(\\w+)(\\s+)(?!=|\\(|(%s)\\s+)' % _operators,\n bygroups(Text, Name, Text), 'commandargs'),\n\n # operators:\n (_operators, Operator),\n\n # numbers (must come before punctuation to handle `.5`; cannot use\n # `\\b` due to e.g. `5. + .5`).\n (r'(?<!\\w)((\\d+\\.\\d*)|(\\d*\\.\\d+))([eEf][+-]?\\d+)?(?!\\w)', Number.Float),\n (r'\\b\\d+[eEf][+-]?[0-9]+\\b', Number.Float),\n (r'\\b\\d+\\b', Number.Integer),\n\n # punctuation:\n (r'\\[|\\]|\\(|\\)|\\{|\\}|:|@|\\.|,', Punctuation),\n (r'=|:|;', Punctuation),\n\n # quote can be transpose, instead of string:\n # (not great, but handles common cases...)\n (r'(?<=[\\w)\\].])\\'+', Operator),\n\n (r'\"(\"\"|[^\"])*\"', String),\n\n (r'(?<![\\w)\\].])\\'', String, 'string'),\n (r'[a-zA-Z_]\\w*', Name),\n (r'.', Text),\n ],\n 'blockcomment': [\n (r'^\\s*%\\}', Comment.Multiline, '#pop'),\n (r'^.*\\n', Comment.Multiline),\n (r'.', Comment.Multiline),\n ],\n 'deffunc': [\n (r'(\\s*)(?:(.+)(\\s*)(=)(\\s*))?(.+)(\\()(.*)(\\))(\\s*)',\n bygroups(Whitespace, Text, Whitespace, Punctuation,\n Whitespace, Name.Function, Punctuation, Text,\n Punctuation, Whitespace), '#pop'),\n # function with no args\n (r'(\\s*)([a-zA-Z_]\\w*)', bygroups(Text, Name.Function), '#pop'),\n ],\n 'string': [\n (r\"[^']*'\", String, '#pop'),\n ],\n 'commandargs': [\n (r\"[ \\t]+\", Text),\n (\"'[^']*'\", String),\n (r\"[^';\\s]+\", String),\n (\";?\", Punctuation, '#pop'),\n ]\n }\n\n def analyse_text(text):\n # function declaration.\n first_non_comment = next((line for line in text.splitlines()\n if not re.match(r'^\\s*%', text)), '').strip()\n if (first_non_comment.startswith('function')\n and '{' not in first_non_comment):\n return 1.\n # comment\n elif re.match(r'^\\s*%', text, re.M):\n return 0.2\n # system cmd\n elif re.match(r'^!\\w+', text, re.M):\n return 0.2\n\n\nline_re = re.compile('.*?\\n')\n\n\nclass MatlabSessionLexer(Lexer):\n \"\"\"\n For Matlab sessions. Modeled after PythonConsoleLexer.\n Contributed by <NAME> <<EMAIL>>.\n\n .. versionadded:: 0.10\n \"\"\"\n name = 'Matlab session'\n aliases = ['matlabsession']\n\n def get_tokens_unprocessed(self, text):\n mlexer = MatlabLexer(**self.options)\n\n curcode = ''\n insertions = []\n\n for match in line_re.finditer(text):\n line = match.group()\n\n if line.startswith('>> '):\n insertions.append((len(curcode),\n [(0, Generic.Prompt, line[:3])]))\n curcode += line[3:]\n\n elif line.startswith('>>'):\n insertions.append((len(curcode),\n [(0, Generic.Prompt, line[:2])]))\n curcode += line[2:]\n\n elif line.startswith('???'):\n\n idx = len(curcode)\n\n # without is showing error on same line as before...?\n # line = \"\\n\" + line\n token = (0, Generic.Traceback, line)\n insertions.append((idx, [token]))\n\n else:\n if curcode:\n for item in do_insertions(\n insertions, mlexer.get_tokens_unprocessed(curcode)):\n yield item\n curcode = ''\n insertions = []\n\n yield match.start(), Generic.Output, line\n\n if curcode: # or item:\n for item in do_insertions(\n insertions, mlexer.get_tokens_unprocessed(curcode)):\n yield item\n\n\nclass OctaveLexer(RegexLexer):\n \"\"\"\n For GNU Octave source code.\n\n .. versionadded:: 1.5\n \"\"\"\n name = 'Octave'\n aliases = ['octave']\n filenames = ['*.m']\n mimetypes = ['text/octave']\n\n # These lists are generated automatically.\n # Run the following in bash shell:\n #\n # First dump all of the Octave manual into a plain text file:\n #\n # $ info octave --subnodes -o octave-manual\n #\n # Now grep through it:\n\n # for i in \\\n # \"Built-in Function\" \"Command\" \"Function File\" \\\n # \"Loadable Function\" \"Mapping Function\";\n # do\n # perl -e '@name = qw('\"$i\"');\n # print lc($name[0]),\"_kw = [\\n\"';\n #\n # perl -n -e 'print \"\\\"$1\\\",\\n\" if /-- '\"$i\"': .* (\\w*) \\(/;' \\\n # octave-manual | sort | uniq ;\n # echo \"]\" ;\n # echo;\n # done\n\n # taken from Octave Mercurial changeset 8cc154f45e37 (30-jan-2011)\n\n builtin_kw = (\n \"addlistener\", \"addpath\", \"addproperty\", \"all\",\n \"and\", \"any\", \"argnames\", \"argv\", \"assignin\",\n \"atexit\", \"autoload\",\n \"available_graphics_toolkits\", \"beep_on_error\",\n \"bitand\", \"bitmax\", \"bitor\", \"bitshift\", \"bitxor\",\n \"cat\", \"cell\", \"cellstr\", \"char\", \"class\", \"clc\",\n \"columns\", \"command_line_path\",\n \"completion_append_char\", \"completion_matches\",\n \"complex\", \"confirm_recursive_rmdir\", \"cputime\",\n \"crash_dumps_octave_core\", \"ctranspose\", \"cumprod\",\n \"cumsum\", \"debug_on_error\", \"debug_on_interrupt\",\n \"debug_on_warning\", \"default_save_options\",\n \"dellistener\", \"diag\", \"diff\", \"disp\",\n \"doc_cache_file\", \"do_string_escapes\", \"double\",\n \"drawnow\", \"e\", \"echo_executing_commands\", \"eps\",\n \"eq\", \"errno\", \"errno_list\", \"error\", \"eval\",\n \"evalin\", \"exec\", \"exist\", \"exit\", \"eye\", \"false\",\n \"fclear\", \"fclose\", \"fcntl\", \"fdisp\", \"feof\",\n \"ferror\", \"feval\", \"fflush\", \"fgetl\", \"fgets\",\n \"fieldnames\", \"file_in_loadpath\", \"file_in_path\",\n \"filemarker\", \"filesep\", \"find_dir_in_path\",\n \"fixed_point_format\", \"fnmatch\", \"fopen\", \"fork\",\n \"formula\", \"fprintf\", \"fputs\", \"fread\", \"freport\",\n \"frewind\", \"fscanf\", \"fseek\", \"fskipl\", \"ftell\",\n \"functions\", \"fwrite\", \"ge\", \"genpath\", \"get\",\n \"getegid\", \"getenv\", \"geteuid\", \"getgid\",\n \"getpgrp\", \"getpid\", \"getppid\", \"getuid\", \"glob\",\n \"gt\", \"gui_mode\", \"history_control\",\n \"history_file\", \"history_size\",\n \"history_timestamp_format_string\", \"home\",\n \"horzcat\", \"hypot\", \"ifelse\",\n \"ignore_function_time_stamp\", \"inferiorto\",\n \"info_file\", \"info_program\", \"inline\", \"input\",\n \"intmax\", \"intmin\", \"ipermute\",\n \"is_absolute_filename\", \"isargout\", \"isbool\",\n \"iscell\", \"iscellstr\", \"ischar\", \"iscomplex\",\n \"isempty\", \"isfield\", \"isfloat\", \"isglobal\",\n \"ishandle\", \"isieee\", \"isindex\", \"isinteger\",\n \"islogical\", \"ismatrix\", \"ismethod\", \"isnull\",\n \"isnumeric\", \"isobject\", \"isreal\",\n \"is_rooted_relative_filename\", \"issorted\",\n \"isstruct\", \"isvarname\", \"kbhit\", \"keyboard\",\n \"kill\", \"lasterr\", \"lasterror\", \"lastwarn\",\n \"ldivide\", \"le\", \"length\", \"link\", \"linspace\",\n \"logical\", \"lstat\", \"lt\", \"make_absolute_filename\",\n \"makeinfo_program\", \"max_recursion_depth\", \"merge\",\n \"methods\", \"mfilename\", \"minus\", \"mislocked\",\n \"mkdir\", \"mkfifo\", \"mkstemp\", \"mldivide\", \"mlock\",\n \"mouse_wheel_zoom\", \"mpower\", \"mrdivide\", \"mtimes\",\n \"munlock\", \"nargin\", \"nargout\",\n \"native_float_format\", \"ndims\", \"ne\", \"nfields\",\n \"nnz\", \"norm\", \"not\", \"numel\", \"nzmax\",\n \"octave_config_info\", \"octave_core_file_limit\",\n \"octave_core_file_name\",\n \"octave_core_file_options\", \"ones\", \"or\",\n \"output_max_field_width\", \"output_precision\",\n \"page_output_immediately\", \"page_screen_output\",\n \"path\", \"pathsep\", \"pause\", \"pclose\", \"permute\",\n \"pi\", \"pipe\", \"plus\", \"popen\", \"power\",\n \"print_empty_dimensions\", \"printf\",\n \"print_struct_array_contents\", \"prod\",\n \"program_invocation_name\", \"program_name\",\n \"putenv\", \"puts\", \"pwd\", \"quit\", \"rats\", \"rdivide\",\n \"readdir\", \"readlink\", \"read_readline_init_file\",\n \"realmax\", \"realmin\", \"rehash\", \"rename\",\n \"repelems\", \"re_read_readline_init_file\", \"reset\",\n \"reshape\", \"resize\", \"restoredefaultpath\",\n \"rethrow\", \"rmdir\", \"rmfield\", \"rmpath\", \"rows\",\n \"save_header_format_string\", \"save_precision\",\n \"saving_history\", \"scanf\", \"set\", \"setenv\",\n \"shell_cmd\", \"sighup_dumps_octave_core\",\n \"sigterm_dumps_octave_core\", \"silent_functions\",\n \"single\", \"size\", \"size_equal\", \"sizemax\",\n \"sizeof\", \"sleep\", \"source\", \"sparse_auto_mutate\",\n \"split_long_rows\", \"sprintf\", \"squeeze\", \"sscanf\",\n \"stat\", \"stderr\", \"stdin\", \"stdout\", \"strcmp\",\n \"strcmpi\", \"string_fill_char\", \"strncmp\",\n \"strncmpi\", \"struct\", \"struct_levels_to_print\",\n \"strvcat\", \"subsasgn\", \"subsref\", \"sum\", \"sumsq\",\n \"superiorto\", \"suppress_verbose_help_message\",\n \"symlink\", \"system\", \"tic\", \"tilde_expand\",\n \"times\", \"tmpfile\", \"tmpnam\", \"toc\", \"toupper\",\n \"transpose\", \"true\", \"typeinfo\", \"umask\", \"uminus\",\n \"uname\", \"undo_string_escapes\", \"unlink\", \"uplus\",\n \"upper\", \"usage\", \"usleep\", \"vec\", \"vectorize\",\n \"vertcat\", \"waitpid\", \"warning\", \"warranty\",\n \"whos_line_format\", \"yes_or_no\", \"zeros\",\n \"inf\", \"Inf\", \"nan\", \"NaN\")\n\n command_kw = (\"close\", \"load\", \"who\", \"whos\")\n\n function_kw = (\n \"accumarray\", \"accumdim\", \"acosd\", \"acotd\",\n \"acscd\", \"addtodate\", \"allchild\", \"ancestor\",\n \"anova\", \"arch_fit\", \"arch_rnd\", \"arch_test\",\n \"area\", \"arma_rnd\", \"arrayfun\", \"ascii\", \"asctime\",\n \"asecd\", \"asind\", \"assert\", \"atand\",\n \"autoreg_matrix\", \"autumn\", \"axes\", \"axis\", \"bar\",\n \"barh\", \"bartlett\", \"bartlett_test\", \"beep\",\n \"betacdf\", \"betainv\", \"betapdf\", \"betarnd\",\n \"bicgstab\", \"bicubic\", \"binary\", \"binocdf\",\n \"binoinv\", \"binopdf\", \"binornd\", \"bitcmp\",\n \"bitget\", \"bitset\", \"blackman\", \"blanks\",\n \"blkdiag\", \"bone\", \"box\", \"brighten\", \"calendar\",\n \"cast\", \"cauchy_cdf\", \"cauchy_inv\", \"cauchy_pdf\",\n \"cauchy_rnd\", \"caxis\", \"celldisp\", \"center\", \"cgs\",\n \"chisquare_test_homogeneity\",\n \"chisquare_test_independence\", \"circshift\", \"cla\",\n \"clabel\", \"clf\", \"clock\", \"cloglog\", \"closereq\",\n \"colon\", \"colorbar\", \"colormap\", \"colperm\",\n \"comet\", \"common_size\", \"commutation_matrix\",\n \"compan\", \"compare_versions\", \"compass\",\n \"computer\", \"cond\", \"condest\", \"contour\",\n \"contourc\", \"contourf\", \"contrast\", \"conv\",\n \"convhull\", \"cool\", \"copper\", \"copyfile\", \"cor\",\n \"corrcoef\", \"cor_test\", \"cosd\", \"cotd\", \"cov\",\n \"cplxpair\", \"cross\", \"cscd\", \"cstrcat\", \"csvread\",\n \"csvwrite\", \"ctime\", \"cumtrapz\", \"curl\", \"cut\",\n \"cylinder\", \"date\", \"datenum\", \"datestr\",\n \"datetick\", \"datevec\", \"dblquad\", \"deal\",\n \"deblank\", \"deconv\", \"delaunay\", \"delaunayn\",\n \"delete\", \"demo\", \"detrend\", \"diffpara\", \"diffuse\",\n \"dir\", \"discrete_cdf\", \"discrete_inv\",\n \"discrete_pdf\", \"discrete_rnd\", \"display\",\n \"divergence\", \"dlmwrite\", \"dos\", \"dsearch\",\n \"dsearchn\", \"duplication_matrix\", \"durbinlevinson\",\n \"ellipsoid\", \"empirical_cdf\", \"empirical_inv\",\n \"empirical_pdf\", \"empirical_rnd\", \"eomday\",\n \"errorbar\", \"etime\", \"etreeplot\", \"example\",\n \"expcdf\", \"expinv\", \"expm\", \"exppdf\", \"exprnd\",\n \"ezcontour\", \"ezcontourf\", \"ezmesh\", \"ezmeshc\",\n \"ezplot\", \"ezpolar\", \"ezsurf\", \"ezsurfc\", \"factor\",\n \"factorial\", \"fail\", \"fcdf\", \"feather\", \"fftconv\",\n \"fftfilt\", \"fftshift\", \"figure\", \"fileattrib\",\n \"fileparts\", \"fill\", \"findall\", \"findobj\",\n \"findstr\", \"finv\", \"flag\", \"flipdim\", \"fliplr\",\n \"flipud\", \"fpdf\", \"fplot\", \"fractdiff\", \"freqz\",\n \"freqz_plot\", \"frnd\", \"fsolve\",\n \"f_test_regression\", \"ftp\", \"fullfile\", \"fzero\",\n \"gamcdf\", \"gaminv\", \"gampdf\", \"gamrnd\", \"gca\",\n \"gcbf\", \"gcbo\", \"gcf\", \"genvarname\", \"geocdf\",\n \"geoinv\", \"geopdf\", \"geornd\", \"getfield\", \"ginput\",\n \"glpk\", \"gls\", \"gplot\", \"gradient\",\n \"graphics_toolkit\", \"gray\", \"grid\", \"griddata\",\n \"griddatan\", \"gtext\", \"gunzip\", \"gzip\", \"hadamard\",\n \"hamming\", \"hankel\", \"hanning\", \"hggroup\",\n \"hidden\", \"hilb\", \"hist\", \"histc\", \"hold\", \"hot\",\n \"hotelling_test\", \"housh\", \"hsv\", \"hurst\",\n \"hygecdf\", \"hygeinv\", \"hygepdf\", \"hygernd\",\n \"idivide\", \"ifftshift\", \"image\", \"imagesc\",\n \"imfinfo\", \"imread\", \"imshow\", \"imwrite\", \"index\",\n \"info\", \"inpolygon\", \"inputname\", \"interpft\",\n \"interpn\", \"intersect\", \"invhilb\", \"iqr\", \"isa\",\n \"isdefinite\", \"isdir\", \"is_duplicate_entry\",\n \"isequal\", \"isequalwithequalnans\", \"isfigure\",\n \"ishermitian\", \"ishghandle\", \"is_leap_year\",\n \"isletter\", \"ismac\", \"ismember\", \"ispc\", \"isprime\",\n \"isprop\", \"isscalar\", \"issquare\", \"isstrprop\",\n \"issymmetric\", \"isunix\", \"is_valid_file_id\",\n \"isvector\", \"jet\", \"kendall\",\n \"kolmogorov_smirnov_cdf\",\n \"kolmogorov_smirnov_test\", \"kruskal_wallis_test\",\n \"krylov\", \"kurtosis\", \"laplace_cdf\", \"laplace_inv\",\n \"laplace_pdf\", \"laplace_rnd\", \"legend\", \"legendre\",\n \"license\", \"line\", \"linkprop\", \"list_primes\",\n \"loadaudio\", \"loadobj\", \"logistic_cdf\",\n \"logistic_inv\", \"logistic_pdf\", \"logistic_rnd\",\n \"logit\", \"loglog\", \"loglogerr\", \"logm\", \"logncdf\",\n \"logninv\", \"lognpdf\", \"lognrnd\", \"logspace\",\n \"lookfor\", \"ls_command\", \"lsqnonneg\", \"magic\",\n \"mahalanobis\", \"manova\", \"matlabroot\",\n \"mcnemar_test\", \"mean\", \"meansq\", \"median\", \"menu\",\n \"mesh\", \"meshc\", \"meshgrid\", \"meshz\", \"mexext\",\n \"mget\", \"mkpp\", \"mode\", \"moment\", \"movefile\",\n \"mpoles\", \"mput\", \"namelengthmax\", \"nargchk\",\n \"nargoutchk\", \"nbincdf\", \"nbininv\", \"nbinpdf\",\n \"nbinrnd\", \"nchoosek\", \"ndgrid\", \"newplot\", \"news\",\n \"nonzeros\", \"normcdf\", \"normest\", \"norminv\",\n \"normpdf\", \"normrnd\", \"now\", \"nthroot\", \"null\",\n \"ocean\", \"ols\", \"onenormest\", \"optimget\",\n \"optimset\", \"orderfields\", \"orient\", \"orth\",\n \"pack\", \"pareto\", \"parseparams\", \"pascal\", \"patch\",\n \"pathdef\", \"pcg\", \"pchip\", \"pcolor\", \"pcr\",\n \"peaks\", \"periodogram\", \"perl\", \"perms\", \"pie\",\n \"pink\", \"planerot\", \"playaudio\", \"plot\",\n \"plotmatrix\", \"plotyy\", \"poisscdf\", \"poissinv\",\n \"poisspdf\", \"poissrnd\", \"polar\", \"poly\",\n \"polyaffine\", \"polyarea\", \"polyderiv\", \"polyfit\",\n \"polygcd\", \"polyint\", \"polyout\", \"polyreduce\",\n \"polyval\", \"polyvalm\", \"postpad\", \"powerset\",\n \"ppder\", \"ppint\", \"ppjumps\", \"ppplot\", \"ppval\",\n \"pqpnonneg\", \"prepad\", \"primes\", \"print\",\n \"print_usage\", \"prism\", \"probit\", \"qp\", \"qqplot\",\n \"quadcc\", \"quadgk\", \"quadl\", \"quadv\", \"quiver\",\n \"qzhess\", \"rainbow\", \"randi\", \"range\", \"rank\",\n \"ranks\", \"rat\", \"reallog\", \"realpow\", \"realsqrt\",\n \"record\", \"rectangle_lw\", \"rectangle_sw\",\n \"rectint\", \"refresh\", \"refreshdata\",\n \"regexptranslate\", \"repmat\", \"residue\", \"ribbon\",\n \"rindex\", \"roots\", \"rose\", \"rosser\", \"rotdim\",\n \"rref\", \"run\", \"run_count\", \"rundemos\", \"run_test\",\n \"runtests\", \"saveas\", \"saveaudio\", \"saveobj\",\n \"savepath\", \"scatter\", \"secd\", \"semilogx\",\n \"semilogxerr\", \"semilogy\", \"semilogyerr\",\n \"setaudio\", \"setdiff\", \"setfield\", \"setxor\",\n \"shading\", \"shift\", \"shiftdim\", \"sign_test\",\n \"sinc\", \"sind\", \"sinetone\", \"sinewave\", \"skewness\",\n \"slice\", \"sombrero\", \"sortrows\", \"spaugment\",\n \"spconvert\", \"spdiags\", \"spearman\", \"spectral_adf\",\n \"spectral_xdf\", \"specular\", \"speed\", \"spencer\",\n \"speye\", \"spfun\", \"sphere\", \"spinmap\", \"spline\",\n \"spones\", \"sprand\", \"sprandn\", \"sprandsym\",\n \"spring\", \"spstats\", \"spy\", \"sqp\", \"stairs\",\n \"statistics\", \"std\", \"stdnormal_cdf\",\n \"stdnormal_inv\", \"stdnormal_pdf\", \"stdnormal_rnd\",\n \"stem\", \"stft\", \"strcat\", \"strchr\", \"strjust\",\n \"strmatch\", \"strread\", \"strsplit\", \"strtok\",\n \"strtrim\", \"strtrunc\", \"structfun\", \"studentize\",\n \"subplot\", \"subsindex\", \"subspace\", \"substr\",\n \"substruct\", \"summer\", \"surf\", \"surface\", \"surfc\",\n \"surfl\", \"surfnorm\", \"svds\", \"swapbytes\",\n \"sylvester_matrix\", \"symvar\", \"synthesis\", \"table\",\n \"tand\", \"tar\", \"tcdf\", \"tempdir\", \"tempname\",\n \"test\", \"text\", \"textread\", \"textscan\", \"tinv\",\n \"title\", \"toeplitz\", \"tpdf\", \"trace\", \"trapz\",\n \"treelayout\", \"treeplot\", \"triangle_lw\",\n \"triangle_sw\", \"tril\", \"trimesh\", \"triplequad\",\n \"triplot\", \"trisurf\", \"triu\", \"trnd\", \"tsearchn\",\n \"t_test\", \"t_test_regression\", \"type\", \"unidcdf\",\n \"unidinv\", \"unidpdf\", \"unidrnd\", \"unifcdf\",\n \"unifinv\", \"unifpdf\", \"unifrnd\", \"union\", \"unique\",\n \"unix\", \"unmkpp\", \"unpack\", \"untabify\", \"untar\",\n \"unwrap\", \"unzip\", \"u_test\", \"validatestring\",\n \"vander\", \"var\", \"var_test\", \"vech\", \"ver\",\n \"version\", \"view\", \"voronoi\", \"voronoin\",\n \"waitforbuttonpress\", \"wavread\", \"wavwrite\",\n \"wblcdf\", \"wblinv\", \"wblpdf\", \"wblrnd\", \"weekday\",\n \"welch_test\", \"what\", \"white\", \"whitebg\",\n \"wienrnd\", \"wilcoxon_test\", \"wilkinson\", \"winter\",\n \"xlabel\", \"xlim\", \"ylabel\", \"yulewalker\", \"zip\",\n \"zlabel\", \"z_test\")\n\n loadable_kw = (\n \"airy\", \"amd\", \"balance\", \"besselh\", \"besseli\",\n \"besselj\", \"besselk\", \"bessely\", \"bitpack\",\n \"bsxfun\", \"builtin\", \"ccolamd\", \"cellfun\",\n \"cellslices\", \"chol\", \"choldelete\", \"cholinsert\",\n \"cholinv\", \"cholshift\", \"cholupdate\", \"colamd\",\n \"colloc\", \"convhulln\", \"convn\", \"csymamd\",\n \"cummax\", \"cummin\", \"daspk\", \"daspk_options\",\n \"dasrt\", \"dasrt_options\", \"dassl\", \"dassl_options\",\n \"dbclear\", \"dbdown\", \"dbstack\", \"dbstatus\",\n \"dbstop\", \"dbtype\", \"dbup\", \"dbwhere\", \"det\",\n \"dlmread\", \"dmperm\", \"dot\", \"eig\", \"eigs\",\n \"endgrent\", \"endpwent\", \"etree\", \"fft\", \"fftn\",\n \"fftw\", \"filter\", \"find\", \"full\", \"gcd\",\n \"getgrent\", \"getgrgid\", \"getgrnam\", \"getpwent\",\n \"getpwnam\", \"getpwuid\", \"getrusage\", \"givens\",\n \"gmtime\", \"gnuplot_binary\", \"hess\", \"ifft\",\n \"ifftn\", \"inv\", \"isdebugmode\", \"issparse\", \"kron\",\n \"localtime\", \"lookup\", \"lsode\", \"lsode_options\",\n \"lu\", \"luinc\", \"luupdate\", \"matrix_type\", \"max\",\n \"min\", \"mktime\", \"pinv\", \"qr\", \"qrdelete\",\n \"qrinsert\", \"qrshift\", \"qrupdate\", \"quad\",\n \"quad_options\", \"qz\", \"rand\", \"rande\", \"randg\",\n \"randn\", \"randp\", \"randperm\", \"rcond\", \"regexp\",\n \"regexpi\", \"regexprep\", \"schur\", \"setgrent\",\n \"setpwent\", \"sort\", \"spalloc\", \"sparse\", \"spparms\",\n \"sprank\", \"sqrtm\", \"strfind\", \"strftime\",\n \"strptime\", \"strrep\", \"svd\", \"svd_driver\", \"syl\",\n \"symamd\", \"symbfact\", \"symrcm\", \"time\", \"tsearch\",\n \"typecast\", \"urlread\", \"urlwrite\")\n\n mapping_kw = (\n \"abs\", \"acos\", \"acosh\", \"acot\", \"acoth\", \"acsc\",\n \"acsch\", \"angle\", \"arg\", \"asec\", \"asech\", \"asin\",\n \"asinh\", \"atan\", \"atanh\", \"beta\", \"betainc\",\n \"betaln\", \"bincoeff\", \"cbrt\", \"ceil\", \"conj\", \"cos\",\n \"cosh\", \"cot\", \"coth\", \"csc\", \"csch\", \"erf\", \"erfc\",\n \"erfcx\", \"erfinv\", \"exp\", \"finite\", \"fix\", \"floor\",\n \"fmod\", \"gamma\", \"gammainc\", \"gammaln\", \"imag\",\n \"isalnum\", \"isalpha\", \"isascii\", \"iscntrl\",\n \"isdigit\", \"isfinite\", \"isgraph\", \"isinf\",\n \"islower\", \"isna\", \"isnan\", \"isprint\", \"ispunct\",\n \"isspace\", \"isupper\", \"isxdigit\", \"lcm\", \"lgamma\",\n \"log\", \"lower\", \"mod\", \"real\", \"rem\", \"round\",\n \"roundb\", \"sec\", \"sech\", \"sign\", \"sin\", \"sinh\",\n \"sqrt\", \"tan\", \"tanh\", \"toascii\", \"tolower\", \"xor\")\n\n builtin_consts = (\n \"EDITOR\", \"EXEC_PATH\", \"I\", \"IMAGE_PATH\", \"NA\",\n \"OCTAVE_HOME\", \"OCTAVE_VERSION\", \"PAGER\",\n \"PAGER_FLAGS\", \"SEEK_CUR\", \"SEEK_END\", \"SEEK_SET\",\n \"SIG\", \"S_ISBLK\", \"S_ISCHR\", \"S_ISDIR\", \"S_ISFIFO\",\n \"S_ISLNK\", \"S_ISREG\", \"S_ISSOCK\", \"WCONTINUE\",\n \"WCOREDUMP\", \"WEXITSTATUS\", \"WIFCONTINUED\",\n \"WIFEXITED\", \"WIFSIGNALED\", \"WIFSTOPPED\", \"WNOHANG\",\n \"WSTOPSIG\", \"WTERMSIG\", \"WUNTRACED\")\n\n tokens = {\n 'root': [\n # We should look into multiline comments\n (r'[%#].*$', Comment),\n (r'^\\s*function\\b', Keyword, 'deffunc'),\n\n # from 'iskeyword' on hg changeset 8cc154f45e37\n (words((\n '__FILE__', '__LINE__', 'break', 'case', 'catch', 'classdef', 'continue', 'do', 'else',\n 'elseif', 'end', 'end_try_catch', 'end_unwind_protect', 'endclassdef',\n 'endevents', 'endfor', 'endfunction', 'endif', 'endmethods', 'endproperties',\n 'endswitch', 'endwhile', 'events', 'for', 'function', 'get', 'global', 'if', 'methods',\n 'otherwise', 'persistent', 'properties', 'return', 'set', 'static', 'switch', 'try',\n 'until', 'unwind_protect', 'unwind_protect_cleanup', 'while'), suffix=r'\\b'),\n Keyword),\n\n (words(builtin_kw + command_kw + function_kw + loadable_kw + mapping_kw,\n suffix=r'\\b'), Name.Builtin),\n\n (words(builtin_consts, suffix=r'\\b'), Name.Constant),\n\n # operators in Octave but not Matlab:\n (r'-=|!=|!|/=|--', Operator),\n # operators:\n (r'-|==|~=|<|>|<=|>=|&&|&|~|\\|\\|?', Operator),\n # operators in Octave but not Matlab requiring escape for re:\n (r'\\*=|\\+=|\\^=|\\/=|\\\\=|\\*\\*|\\+\\+|\\.\\*\\*', Operator),\n # operators requiring escape for re:\n (r'\\.\\*|\\*|\\+|\\.\\^|\\.\\\\|\\.\\/|\\/|\\\\', Operator),\n\n\n # punctuation:\n (r'[\\[\\](){}:@.,]', Punctuation),\n (r'=|:|;', Punctuation),\n\n (r'\"[^\"]*\"', String),\n\n (r'(\\d+\\.\\d*|\\d*\\.\\d+)([eEf][+-]?[0-9]+)?', Number.Float),\n (r'\\d+[eEf][+-]?[0-9]+', Number.Float),\n (r'\\d+', Number.Integer),\n\n # quote can be transpose, instead of string:\n # (not great, but handles common cases...)\n (r'(?<=[\\w)\\].])\\'+', Operator),\n (r'(?<![\\w)\\].])\\'', String, 'string'),\n\n (r'[a-zA-Z_]\\w*', Name),\n (r'.', Text),\n ],\n 'string': [\n (r\"[^']*'\", String, '#pop'),\n ],\n 'deffunc': [\n (r'(\\s*)(?:(.+)(\\s*)(=)(\\s*))?(.+)(\\()(.*)(\\))(\\s*)',\n bygroups(Whitespace, Text, Whitespace, Punctuation,\n Whitespace, Name.Function, Punctuation, Text,\n Punctuation, Whitespace), '#pop'),\n # function with no args\n (r'(\\s*)([a-zA-Z_]\\w*)', bygroups(Text, Name.Function), '#pop'),\n ],\n }\n\n\nclass ScilabLexer(RegexLexer):\n \"\"\"\n For Scilab source code.\n\n .. versionadded:: 1.5\n \"\"\"\n name = 'Scilab'\n aliases = ['scilab']\n filenames = ['*.sci', '*.sce', '*.tst']\n mimetypes = ['text/scilab']\n\n tokens = {\n 'root': [\n (r'//.*?$', Comment.Single),\n (r'^\\s*function\\b', Keyword, 'deffunc'),\n\n (words((\n '__FILE__', '__LINE__', 'break', 'case', 'catch', 'classdef', 'continue', 'do', 'else',\n 'elseif', 'end', 'end_try_catch', 'end_unwind_protect', 'endclassdef',\n 'endevents', 'endfor', 'endfunction', 'endif', 'endmethods', 'endproperties',\n 'endswitch', 'endwhile', 'events', 'for', 'function', 'get', 'global', 'if', 'methods',\n 'otherwise', 'persistent', 'properties', 'return', 'set', 'static', 'switch', 'try',\n 'until', 'unwind_protect', 'unwind_protect_cleanup', 'while'), suffix=r'\\b'),\n Keyword),\n\n (words(_scilab_builtins.functions_kw +\n _scilab_builtins.commands_kw +\n _scilab_builtins.macros_kw, suffix=r'\\b'), Name.Builtin),\n\n (words(_scilab_builtins.variables_kw, suffix=r'\\b'), Name.Constant),\n\n # operators:\n (r'-|==|~=|<|>|<=|>=|&&|&|~|\\|\\|?', Operator),\n # operators requiring escape for re:\n (r'\\.\\*|\\*|\\+|\\.\\^|\\.\\\\|\\.\\/|\\/|\\\\', Operator),\n\n # punctuation:\n (r'[\\[\\](){}@.,=:;]', Punctuation),\n\n (r'\"[^\"]*\"', String),\n\n # quote can be transpose, instead of string:\n # (not great, but handles common cases...)\n (r'(?<=[\\w)\\].])\\'+', Operator),\n (r'(?<![\\w)\\].])\\'', String, 'string'),\n\n (r'(\\d+\\.\\d*|\\d*\\.\\d+)([eEf][+-]?[0-9]+)?', Number.Float),\n (r'\\d+[eEf][+-]?[0-9]+', Number.Float),\n (r'\\d+', Number.Integer),\n\n (r'[a-zA-Z_]\\w*', Name),\n (r'.', Text),\n ],\n 'string': [\n (r\"[^']*'\", String, '#pop'),\n (r'.', String, '#pop'),\n ],\n 'deffunc': [\n (r'(\\s*)(?:(.+)(\\s*)(=)(\\s*))?(.+)(\\()(.*)(\\))(\\s*)',\n bygroups(Whitespace, Text, Whitespace, Punctuation,\n Whitespace, Name.Function, Punctuation, Text,\n Punctuation, Whitespace), '#pop'),\n # function with no args\n (r'(\\s*)([a-zA-Z_]\\w*)', bygroups(Text, Name.Function), '#pop'),\n ],\n }\n",
"id": "2704044",
"language": "Python",
"matching_score": 5.774845600128174,
"max_stars_count": 1,
"path": "pygments/lexers/matlab.py"
},
{
"content": "# -*- coding: utf-8 -*-\n\"\"\"\n pygments.lexers.erlang\n ~~~~~~~~~~~~~~~~~~~~~~\n\n Lexers for Erlang.\n\n :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.\n :license: BSD, see LICENSE for details.\n\"\"\"\n\nimport re\n\nfrom pygments.lexer import Lexer, RegexLexer, bygroups, words, do_insertions, \\\n include, default\nfrom pygments.token import Text, Comment, Operator, Keyword, Name, String, \\\n Number, Punctuation, Generic\n\n__all__ = ['ErlangLexer', 'ErlangShellLexer', 'ElixirConsoleLexer',\n 'ElixirLexer']\n\n\nline_re = re.compile('.*?\\n')\n\n\nclass ErlangLexer(RegexLexer):\n \"\"\"\n For the Erlang functional programming language.\n\n Blame <NAME> (http://jerith.za.net/).\n\n .. versionadded:: 0.9\n \"\"\"\n\n name = 'Erlang'\n aliases = ['erlang']\n filenames = ['*.erl', '*.hrl', '*.es', '*.escript']\n mimetypes = ['text/x-erlang']\n\n keywords = (\n 'after', 'begin', 'case', 'catch', 'cond', 'end', 'fun', 'if',\n 'let', 'of', 'query', 'receive', 'try', 'when',\n )\n\n builtins = ( # See erlang(3) man page\n 'abs', 'append_element', 'apply', 'atom_to_list', 'binary_to_list',\n 'bitstring_to_list', 'binary_to_term', 'bit_size', 'bump_reductions',\n 'byte_size', 'cancel_timer', 'check_process_code', 'delete_module',\n 'demonitor', 'disconnect_node', 'display', 'element', 'erase', 'exit',\n 'float', 'float_to_list', 'fun_info', 'fun_to_list',\n 'function_exported', 'garbage_collect', 'get', 'get_keys',\n 'group_leader', 'hash', 'hd', 'integer_to_list', 'iolist_to_binary',\n 'iolist_size', 'is_atom', 'is_binary', 'is_bitstring', 'is_boolean',\n 'is_builtin', 'is_float', 'is_function', 'is_integer', 'is_list',\n 'is_number', 'is_pid', 'is_port', 'is_process_alive', 'is_record',\n 'is_reference', 'is_tuple', 'length', 'link', 'list_to_atom',\n 'list_to_binary', 'list_to_bitstring', 'list_to_existing_atom',\n 'list_to_float', 'list_to_integer', 'list_to_pid', 'list_to_tuple',\n 'load_module', 'localtime_to_universaltime', 'make_tuple', 'md5',\n 'md5_final', 'md5_update', 'memory', 'module_loaded', 'monitor',\n 'monitor_node', 'node', 'nodes', 'open_port', 'phash', 'phash2',\n 'pid_to_list', 'port_close', 'port_command', 'port_connect',\n 'port_control', 'port_call', 'port_info', 'port_to_list',\n 'process_display', 'process_flag', 'process_info', 'purge_module',\n 'put', 'read_timer', 'ref_to_list', 'register', 'resume_process',\n 'round', 'send', 'send_after', 'send_nosuspend', 'set_cookie',\n 'setelement', 'size', 'spawn', 'spawn_link', 'spawn_monitor',\n 'spawn_opt', 'split_binary', 'start_timer', 'statistics',\n 'suspend_process', 'system_flag', 'system_info', 'system_monitor',\n 'system_profile', 'term_to_binary', 'tl', 'trace', 'trace_delivered',\n 'trace_info', 'trace_pattern', 'trunc', 'tuple_size', 'tuple_to_list',\n 'universaltime_to_localtime', 'unlink', 'unregister', 'whereis'\n )\n\n operators = r'(\\+\\+?|--?|\\*|/|<|>|/=|=:=|=/=|=<|>=|==?|<-|!|\\?)'\n word_operators = (\n 'and', 'andalso', 'band', 'bnot', 'bor', 'bsl', 'bsr', 'bxor',\n 'div', 'not', 'or', 'orelse', 'rem', 'xor'\n )\n\n atom_re = r\"(?:[a-z]\\w*|'[^\\n']*[^\\\\]')\"\n\n variable_re = r'(?:[A-Z_]\\w*)'\n\n esc_char_re = r'[bdefnrstv\\'\"\\\\]'\n esc_octal_re = r'[0-7][0-7]?[0-7]?'\n esc_hex_re = r'(?:x[0-9a-fA-F]{2}|x\\{[0-9a-fA-F]+\\})'\n esc_ctrl_re = r'\\^[a-zA-Z]'\n escape_re = r'(?:\\\\(?:'+esc_char_re+r'|'+esc_octal_re+r'|'+esc_hex_re+r'|'+esc_ctrl_re+r'))'\n\n macro_re = r'(?:'+variable_re+r'|'+atom_re+r')'\n\n base_re = r'(?:[2-9]|[12][0-9]|3[0-6])'\n\n tokens = {\n 'root': [\n (r'\\s+', Text),\n (r'%.*\\n', Comment),\n (words(keywords, suffix=r'\\b'), Keyword),\n (words(builtins, suffix=r'\\b'), Name.Builtin),\n (words(word_operators, suffix=r'\\b'), Operator.Word),\n (r'^-', Punctuation, 'directive'),\n (operators, Operator),\n (r'\"', String, 'string'),\n (r'<<', Name.Label),\n (r'>>', Name.Label),\n ('(' + atom_re + ')(:)', bygroups(Name.Namespace, Punctuation)),\n ('(?:^|(?<=:))(' + atom_re + r')(\\s*)(\\()',\n bygroups(Name.Function, Text, Punctuation)),\n (r'[+-]?' + base_re + r'#[0-9a-zA-Z]+', Number.Integer),\n (r'[+-]?\\d+', Number.Integer),\n (r'[+-]?\\d+.\\d+', Number.Float),\n (r'[]\\[:_@\\\".{}()|;,]', Punctuation),\n (variable_re, Name.Variable),\n (atom_re, Name),\n (r'\\?'+macro_re, Name.Constant),\n (r'\\$(?:'+escape_re+r'|\\\\[ %]|[^\\\\])', String.Char),\n (r'#'+atom_re+r'(:?\\.'+atom_re+r')?', Name.Label),\n\n # Erlang script shebang\n (r'\\A#!.+\\n', Comment.Hashbang),\n\n # EEP 43: Maps\n # http://www.erlang.org/eeps/eep-0043.html\n (r'#\\{', Punctuation, 'map_key'),\n ],\n 'string': [\n (escape_re, String.Escape),\n (r'\"', String, '#pop'),\n (r'~[0-9.*]*[~#+BPWXb-ginpswx]', String.Interpol),\n (r'[^\"\\\\~]+', String),\n (r'~', String),\n ],\n 'directive': [\n (r'(define)(\\s*)(\\()('+macro_re+r')',\n bygroups(Name.Entity, Text, Punctuation, Name.Constant), '#pop'),\n (r'(record)(\\s*)(\\()('+macro_re+r')',\n bygroups(Name.Entity, Text, Punctuation, Name.Label), '#pop'),\n (atom_re, Name.Entity, '#pop'),\n ],\n 'map_key': [\n include('root'),\n (r'=>', Punctuation, 'map_val'),\n (r':=', Punctuation, 'map_val'),\n (r'\\}', Punctuation, '#pop'),\n ],\n 'map_val': [\n include('root'),\n (r',', Punctuation, '#pop'),\n (r'(?=\\})', Punctuation, '#pop'),\n ],\n }\n\n\nclass ErlangShellLexer(Lexer):\n \"\"\"\n Shell sessions in erl (for Erlang code).\n\n .. versionadded:: 1.1\n \"\"\"\n name = 'Erlang erl session'\n aliases = ['erl']\n filenames = ['*.erl-sh']\n mimetypes = ['text/x-erl-shellsession']\n\n _prompt_re = re.compile(r'(?:\\([\\w@_.]+\\))?\\d+>(?=\\s|\\Z)')\n\n def get_tokens_unprocessed(self, text):\n erlexer = ErlangLexer(**self.options)\n\n curcode = ''\n insertions = []\n for match in line_re.finditer(text):\n line = match.group()\n m = self._prompt_re.match(line)\n if m is not None:\n end = m.end()\n insertions.append((len(curcode),\n [(0, Generic.Prompt, line[:end])]))\n curcode += line[end:]\n else:\n if curcode:\n for item in do_insertions(insertions,\n erlexer.get_tokens_unprocessed(curcode)):\n yield item\n curcode = ''\n insertions = []\n if line.startswith('*'):\n yield match.start(), Generic.Traceback, line\n else:\n yield match.start(), Generic.Output, line\n if curcode:\n for item in do_insertions(insertions,\n erlexer.get_tokens_unprocessed(curcode)):\n yield item\n\n\ndef gen_elixir_string_rules(name, symbol, token):\n states = {}\n states['string_' + name] = [\n (r'[^#%s\\\\]+' % (symbol,), token),\n include('escapes'),\n (r'\\\\.', token),\n (r'(%s)' % (symbol,), bygroups(token), \"#pop\"),\n include('interpol')\n ]\n return states\n\n\ndef gen_elixir_sigstr_rules(term, token, interpol=True):\n if interpol:\n return [\n (r'[^#%s\\\\]+' % (term,), token),\n include('escapes'),\n (r'\\\\.', token),\n (r'%s[a-zA-Z]*' % (term,), token, '#pop'),\n include('interpol')\n ]\n else:\n return [\n (r'[^%s\\\\]+' % (term,), token),\n (r'\\\\.', token),\n (r'%s[a-zA-Z]*' % (term,), token, '#pop'),\n ]\n\n\nclass ElixirLexer(RegexLexer):\n \"\"\"\n For the `Elixir language <http://elixir-lang.org>`_.\n\n .. versionadded:: 1.5\n \"\"\"\n\n name = 'Elixir'\n aliases = ['elixir', 'ex', 'exs']\n filenames = ['*.ex', '*.exs']\n mimetypes = ['text/x-elixir']\n\n KEYWORD = ('fn', 'do', 'end', 'after', 'else', 'rescue', 'catch')\n KEYWORD_OPERATOR = ('not', 'and', 'or', 'when', 'in')\n BUILTIN = (\n 'case', 'cond', 'for', 'if', 'unless', 'try', 'receive', 'raise',\n 'quote', 'unquote', 'unquote_splicing', 'throw', 'super',\n )\n BUILTIN_DECLARATION = (\n 'def', 'defp', 'defmodule', 'defprotocol', 'defmacro', 'defmacrop',\n 'defdelegate', 'defexception', 'defstruct', 'defimpl', 'defcallback',\n )\n\n BUILTIN_NAMESPACE = ('import', 'require', 'use', 'alias')\n CONSTANT = ('nil', 'true', 'false')\n\n PSEUDO_VAR = ('_', '__MODULE__', '__DIR__', '__ENV__', '__CALLER__')\n\n OPERATORS3 = (\n '<<<', '>>>', '|||', '&&&', '^^^', '~~~', '===', '!==',\n '~>>', '<~>', '|~>', '<|>',\n )\n OPERATORS2 = (\n '==', '!=', '<=', '>=', '&&', '||', '<>', '++', '--', '|>', '=~',\n '->', '<-', '|', '.', '=', '~>', '<~',\n )\n OPERATORS1 = ('<', '>', '+', '-', '*', '/', '!', '^', '&')\n\n PUNCTUATION = (\n '\\\\\\\\', '<<', '>>', '=>', '(', ')', ':', ';', ',', '[', ']',\n )\n\n def get_tokens_unprocessed(self, text):\n for index, token, value in RegexLexer.get_tokens_unprocessed(self, text):\n if token is Name:\n if value in self.KEYWORD:\n yield index, Keyword, value\n elif value in self.KEYWORD_OPERATOR:\n yield index, Operator.Word, value\n elif value in self.BUILTIN:\n yield index, Keyword, value\n elif value in self.BUILTIN_DECLARATION:\n yield index, Keyword.Declaration, value\n elif value in self.BUILTIN_NAMESPACE:\n yield index, Keyword.Namespace, value\n elif value in self.CONSTANT:\n yield index, Name.Constant, value\n elif value in self.PSEUDO_VAR:\n yield index, Name.Builtin.Pseudo, value\n else:\n yield index, token, value\n else:\n yield index, token, value\n\n def gen_elixir_sigil_rules():\n # all valid sigil terminators (excluding heredocs)\n terminators = [\n (r'\\{', r'\\}', 'cb'),\n (r'\\[', r'\\]', 'sb'),\n (r'\\(', r'\\)', 'pa'),\n (r'<', r'>', 'ab'),\n (r'/', r'/', 'slas'),\n (r'\\|', r'\\|', 'pipe'),\n ('\"', '\"', 'quot'),\n (\"'\", \"'\", 'apos'),\n ]\n\n # heredocs have slightly different rules\n triquotes = [(r'\"\"\"', 'triquot'), (r\"'''\", 'triapos')]\n\n token = String.Other\n states = {'sigils': []}\n\n for term, name in triquotes:\n states['sigils'] += [\n (r'(~[a-z])(%s)' % (term,), bygroups(token, String.Heredoc),\n (name + '-end', name + '-intp')),\n (r'(~[A-Z])(%s)' % (term,), bygroups(token, String.Heredoc),\n (name + '-end', name + '-no-intp')),\n ]\n\n states[name + '-end'] = [\n (r'[a-zA-Z]+', token, '#pop'),\n default('#pop'),\n ]\n states[name + '-intp'] = [\n (r'^\\s*' + term, String.Heredoc, '#pop'),\n include('heredoc_interpol'),\n ]\n states[name + '-no-intp'] = [\n (r'^\\s*' + term, String.Heredoc, '#pop'),\n include('heredoc_no_interpol'),\n ]\n\n for lterm, rterm, name in terminators:\n states['sigils'] += [\n (r'~[a-z]' + lterm, token, name + '-intp'),\n (r'~[A-Z]' + lterm, token, name + '-no-intp'),\n ]\n states[name + '-intp'] = gen_elixir_sigstr_rules(rterm, token)\n states[name + '-no-intp'] = \\\n gen_elixir_sigstr_rules(rterm, token, interpol=False)\n\n return states\n\n op3_re = \"|\".join(re.escape(s) for s in OPERATORS3)\n op2_re = \"|\".join(re.escape(s) for s in OPERATORS2)\n op1_re = \"|\".join(re.escape(s) for s in OPERATORS1)\n ops_re = r'(?:%s|%s|%s)' % (op3_re, op2_re, op1_re)\n punctuation_re = \"|\".join(re.escape(s) for s in PUNCTUATION)\n alnum = r'\\w'\n name_re = r'(?:\\.\\.\\.|[a-z_]%s*[!?]?)' % alnum\n modname_re = r'[A-Z]%(alnum)s*(?:\\.[A-Z]%(alnum)s*)*' % {'alnum': alnum}\n complex_name_re = r'(?:%s|%s|%s)' % (name_re, modname_re, ops_re)\n special_atom_re = r'(?:\\.\\.\\.|<<>>|%\\{\\}|%|\\{\\})'\n\n long_hex_char_re = r'(\\\\x\\{)([\\da-fA-F]+)(\\})'\n hex_char_re = r'(\\\\x[\\da-fA-F]{1,2})'\n escape_char_re = r'(\\\\[abdefnrstv])'\n\n tokens = {\n 'root': [\n (r'\\s+', Text),\n (r'#.*$', Comment.Single),\n\n # Various kinds of characters\n (r'(\\?)' + long_hex_char_re,\n bygroups(String.Char,\n String.Escape, Number.Hex, String.Escape)),\n (r'(\\?)' + hex_char_re,\n bygroups(String.Char, String.Escape)),\n (r'(\\?)' + escape_char_re,\n bygroups(String.Char, String.Escape)),\n (r'\\?\\\\?.', String.Char),\n\n # '::' has to go before atoms\n (r':::', String.Symbol),\n (r'::', Operator),\n\n # atoms\n (r':' + special_atom_re, String.Symbol),\n (r':' + complex_name_re, String.Symbol),\n (r':\"', String.Symbol, 'string_double_atom'),\n (r\":'\", String.Symbol, 'string_single_atom'),\n\n # [keywords: ...]\n (r'(%s|%s)(:)(?=\\s|\\n)' % (special_atom_re, complex_name_re),\n bygroups(String.Symbol, Punctuation)),\n\n # @attributes\n (r'@' + name_re, Name.Attribute),\n\n # identifiers\n (name_re, Name),\n (r'(%%?)(%s)' % (modname_re,), bygroups(Punctuation, Name.Class)),\n\n # operators and punctuation\n (op3_re, Operator),\n (op2_re, Operator),\n (punctuation_re, Punctuation),\n (r'&\\d', Name.Entity), # anon func arguments\n (op1_re, Operator),\n\n # numbers\n (r'0b[01]+', Number.Bin),\n (r'0o[0-7]+', Number.Oct),\n (r'0x[\\da-fA-F]+', Number.Hex),\n (r'\\d(_?\\d)*\\.\\d(_?\\d)*([eE][-+]?\\d(_?\\d)*)?', Number.Float),\n (r'\\d(_?\\d)*', Number.Integer),\n\n # strings and heredocs\n (r'\"\"\"\\s*', String.Heredoc, 'heredoc_double'),\n (r\"'''\\s*$\", String.Heredoc, 'heredoc_single'),\n (r'\"', String.Double, 'string_double'),\n (r\"'\", String.Single, 'string_single'),\n\n include('sigils'),\n\n (r'%\\{', Punctuation, 'map_key'),\n (r'\\{', Punctuation, 'tuple'),\n ],\n 'heredoc_double': [\n (r'^\\s*\"\"\"', String.Heredoc, '#pop'),\n include('heredoc_interpol'),\n ],\n 'heredoc_single': [\n (r\"^\\s*'''\", String.Heredoc, '#pop'),\n include('heredoc_interpol'),\n ],\n 'heredoc_interpol': [\n (r'[^#\\\\\\n]+', String.Heredoc),\n include('escapes'),\n (r'\\\\.', String.Heredoc),\n (r'\\n+', String.Heredoc),\n include('interpol'),\n ],\n 'heredoc_no_interpol': [\n (r'[^\\\\\\n]+', String.Heredoc),\n (r'\\\\.', String.Heredoc),\n (r'\\n+', String.Heredoc),\n ],\n 'escapes': [\n (long_hex_char_re,\n bygroups(String.Escape, Number.Hex, String.Escape)),\n (hex_char_re, String.Escape),\n (escape_char_re, String.Escape),\n ],\n 'interpol': [\n (r'#\\{', String.Interpol, 'interpol_string'),\n ],\n 'interpol_string': [\n (r'\\}', String.Interpol, \"#pop\"),\n include('root')\n ],\n 'map_key': [\n include('root'),\n (r':', Punctuation, 'map_val'),\n (r'=>', Punctuation, 'map_val'),\n (r'\\}', Punctuation, '#pop'),\n ],\n 'map_val': [\n include('root'),\n (r',', Punctuation, '#pop'),\n (r'(?=\\})', Punctuation, '#pop'),\n ],\n 'tuple': [\n include('root'),\n (r'\\}', Punctuation, '#pop'),\n ],\n }\n tokens.update(gen_elixir_string_rules('double', '\"', String.Double))\n tokens.update(gen_elixir_string_rules('single', \"'\", String.Single))\n tokens.update(gen_elixir_string_rules('double_atom', '\"', String.Symbol))\n tokens.update(gen_elixir_string_rules('single_atom', \"'\", String.Symbol))\n tokens.update(gen_elixir_sigil_rules())\n\n\nclass ElixirConsoleLexer(Lexer):\n \"\"\"\n For Elixir interactive console (iex) output like:\n\n .. sourcecode:: iex\n\n iex> [head | tail] = [1,2,3]\n [1,2,3]\n iex> head\n 1\n iex> tail\n [2,3]\n iex> [head | tail]\n [1,2,3]\n iex> length [head | tail]\n 3\n\n .. versionadded:: 1.5\n \"\"\"\n\n name = 'Elixir iex session'\n aliases = ['iex']\n mimetypes = ['text/x-elixir-shellsession']\n\n _prompt_re = re.compile(r'(iex|\\.{3})((?:\\([\\w@_.]+\\))?\\d+|\\(\\d+\\))?> ')\n\n def get_tokens_unprocessed(self, text):\n exlexer = ElixirLexer(**self.options)\n\n curcode = ''\n in_error = False\n insertions = []\n for match in line_re.finditer(text):\n line = match.group()\n if line.startswith(u'** '):\n in_error = True\n insertions.append((len(curcode),\n [(0, Generic.Error, line[:-1])]))\n curcode += line[-1:]\n else:\n m = self._prompt_re.match(line)\n if m is not None:\n in_error = False\n end = m.end()\n insertions.append((len(curcode),\n [(0, Generic.Prompt, line[:end])]))\n curcode += line[end:]\n else:\n if curcode:\n for item in do_insertions(\n insertions, exlexer.get_tokens_unprocessed(curcode)):\n yield item\n curcode = ''\n insertions = []\n token = Generic.Error if in_error else Generic.Output\n yield match.start(), token, line\n if curcode:\n for item in do_insertions(\n insertions, exlexer.get_tokens_unprocessed(curcode)):\n yield item\n",
"id": "4182186",
"language": "Python",
"matching_score": 4.206193923950195,
"max_stars_count": 6989,
"path": "pygments/lexers/erlang.py"
},
{
"content": "# -*- coding: utf-8 -*-\n\"\"\"\n pygments.lexers.slash\n ~~~~~~~~~~~~~~~~~~~~~\n\n Lexer for the `Slash <https://github.com/arturadib/Slash-A>`_ programming\n language.\n\n :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.\n :license: BSD, see LICENSE for details.\n\"\"\"\n\nfrom pygments.lexer import ExtendedRegexLexer, bygroups, DelegatingLexer\nfrom pygments.token import Name, Number, String, Comment, Punctuation, \\\n Other, Keyword, Operator, Whitespace\n\n__all__ = ['SlashLexer']\n\n\nclass SlashLanguageLexer(ExtendedRegexLexer):\n _nkw = r'(?=[^a-zA-Z_0-9])'\n\n def move_state(new_state):\n return (\"#pop\", new_state)\n\n def right_angle_bracket(lexer, match, ctx):\n if len(ctx.stack) > 1 and ctx.stack[-2] == \"string\":\n ctx.stack.pop()\n yield match.start(), String.Interpol, u\"}\"\n ctx.pos = match.end()\n pass\n\n tokens = {\n \"root\": [\n (r\"<%=\", Comment.Preproc, move_state(\"slash\")),\n (r\"<%!!\", Comment.Preproc, move_state(\"slash\")),\n (r\"<%#.*?%>\", Comment.Multiline),\n (r\"<%\", Comment.Preproc, move_state(\"slash\")),\n (r\".|\\n\", Other),\n ],\n \"string\": [\n (r\"\\\\\", String.Escape, move_state(\"string_e\")),\n (r\"\\\"\", String, move_state(\"slash\")),\n (r\"#\\{\", String.Interpol, \"slash\"),\n (r'.|\\n', String),\n ],\n \"string_e\": [\n (r'n', String.Escape, move_state(\"string\")),\n (r't', String.Escape, move_state(\"string\")),\n (r'r', String.Escape, move_state(\"string\")),\n (r'e', String.Escape, move_state(\"string\")),\n (r'x[a-fA-F0-9]{2}', String.Escape, move_state(\"string\")),\n (r'.', String.Escape, move_state(\"string\")),\n ],\n \"regexp\": [\n (r'}[a-z]*', String.Regex, move_state(\"slash\")),\n (r'\\\\(.|\\n)', String.Regex),\n (r'{', String.Regex, \"regexp_r\"),\n (r'.|\\n', String.Regex),\n ],\n \"regexp_r\": [\n (r'}[a-z]*', String.Regex, \"#pop\"),\n (r'\\\\(.|\\n)', String.Regex),\n (r'{', String.Regex, \"regexp_r\"),\n ],\n \"slash\": [\n (r\"%>\", Comment.Preproc, move_state(\"root\")),\n (r\"\\\"\", String, move_state(\"string\")),\n (r\"'[a-zA-Z0-9_]+\", String),\n (r'%r{', String.Regex, move_state(\"regexp\")),\n (r'/\\*.*?\\*/', Comment.Multiline),\n (r\"(#|//).*?\\n\", Comment.Single),\n (r'-?[0-9]+e[+-]?[0-9]+', Number.Float),\n (r'-?[0-9]+\\.[0-9]+(e[+-]?[0-9]+)?', Number.Float),\n (r'-?[0-9]+', Number.Integer),\n (r'nil'+_nkw, Name.Builtin),\n (r'true'+_nkw, Name.Builtin),\n (r'false'+_nkw, Name.Builtin),\n (r'self'+_nkw, Name.Builtin),\n (r'(class)(\\s+)([A-Z][a-zA-Z0-9_\\']*)',\n bygroups(Keyword, Whitespace, Name.Class)),\n (r'class'+_nkw, Keyword),\n (r'extends'+_nkw, Keyword),\n (r'(def)(\\s+)(self)(\\s*)(\\.)(\\s*)([a-z_][a-zA-Z0-9_\\']*=?|<<|>>|==|<=>|<=|<|>=|>|\\+|-(self)?|~(self)?|\\*|/|%|^|&&|&|\\||\\[\\]=?)',\n bygroups(Keyword, Whitespace, Name.Builtin, Whitespace, Punctuation, Whitespace, Name.Function)),\n (r'(def)(\\s+)([a-z_][a-zA-Z0-9_\\']*=?|<<|>>|==|<=>|<=|<|>=|>|\\+|-(self)?|~(self)?|\\*|/|%|^|&&|&|\\||\\[\\]=?)',\n bygroups(Keyword, Whitespace, Name.Function)),\n (r'def'+_nkw, Keyword),\n (r'if'+_nkw, Keyword),\n (r'elsif'+_nkw, Keyword),\n (r'else'+_nkw, Keyword),\n (r'unless'+_nkw, Keyword),\n (r'for'+_nkw, Keyword),\n (r'in'+_nkw, Keyword),\n (r'while'+_nkw, Keyword),\n (r'until'+_nkw, Keyword),\n (r'and'+_nkw, Keyword),\n (r'or'+_nkw, Keyword),\n (r'not'+_nkw, Keyword),\n (r'lambda'+_nkw, Keyword),\n (r'try'+_nkw, Keyword),\n (r'catch'+_nkw, Keyword),\n (r'return'+_nkw, Keyword),\n (r'next'+_nkw, Keyword),\n (r'last'+_nkw, Keyword),\n (r'throw'+_nkw, Keyword),\n (r'use'+_nkw, Keyword),\n (r'switch'+_nkw, Keyword),\n (r'\\\\', Keyword),\n (r'λ', Keyword),\n (r'__FILE__'+_nkw, Name.Builtin.Pseudo),\n (r'__LINE__'+_nkw, Name.Builtin.Pseudo),\n (r'[A-Z][a-zA-Z0-9_\\']*'+_nkw, Name.Constant),\n (r'[a-z_][a-zA-Z0-9_\\']*'+_nkw, Name),\n (r'@[a-z_][a-zA-Z0-9_\\']*'+_nkw, Name.Variable.Instance),\n (r'@@[a-z_][a-zA-Z0-9_\\']*'+_nkw, Name.Variable.Class),\n (r'\\(', Punctuation),\n (r'\\)', Punctuation),\n (r'\\[', Punctuation),\n (r'\\]', Punctuation),\n (r'\\{', Punctuation),\n (r'\\}', right_angle_bracket),\n (r';', Punctuation),\n (r',', Punctuation),\n (r'<<=', Operator),\n (r'>>=', Operator),\n (r'<<', Operator),\n (r'>>', Operator),\n (r'==', Operator),\n (r'!=', Operator),\n (r'=>', Operator),\n (r'=', Operator),\n (r'<=>', Operator),\n (r'<=', Operator),\n (r'>=', Operator),\n (r'<', Operator),\n (r'>', Operator),\n (r'\\+\\+', Operator),\n (r'\\+=', Operator),\n (r'-=', Operator),\n (r'\\*\\*=', Operator),\n (r'\\*=', Operator),\n (r'\\*\\*', Operator),\n (r'\\*', Operator),\n (r'/=', Operator),\n (r'\\+', Operator),\n (r'-', Operator),\n (r'/', Operator),\n (r'%=', Operator),\n (r'%', Operator),\n (r'^=', Operator),\n (r'&&=', Operator),\n (r'&=', Operator),\n (r'&&', Operator),\n (r'&', Operator),\n (r'\\|\\|=', Operator),\n (r'\\|=', Operator),\n (r'\\|\\|', Operator),\n (r'\\|', Operator),\n (r'!', Operator),\n (r'\\.\\.\\.', Operator),\n (r'\\.\\.', Operator),\n (r'\\.', Operator),\n (r'::', Operator),\n (r':', Operator),\n (r'(\\s|\\n)+', Whitespace),\n (r'[a-z_][a-zA-Z0-9_\\']*', Name.Variable),\n ],\n }\n\n\nclass SlashLexer(DelegatingLexer):\n \"\"\"\n Lexer for the Slash programming language.\n\n .. versionadded:: 2.4\n \"\"\"\n\n name = 'Slash'\n aliases = ['slash']\n filenames = ['*.sl']\n\n def __init__(self, **options):\n from pygments.lexers.web import HtmlLexer\n super(SlashLexer, self).__init__(HtmlLexer, SlashLanguageLexer, **options)\n",
"id": "6432310",
"language": "Python",
"matching_score": 3.1808390617370605,
"max_stars_count": 6989,
"path": "pygments/lexers/slash.py"
},
{
"content": "# -*- coding: utf-8 -*-\n\"\"\"\n Basic CrystalLexer Test\n ~~~~~~~~~~~~~~~~~~~~~~~\n\n :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.\n :license: BSD, see LICENSE for details.\n\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport pytest\n\nfrom pygments.token import Text, Operator, Keyword, Name, String, Number, \\\n Punctuation, Error\nfrom pygments.lexers import CrystalLexer\n\n\n@pytest.fixture(scope='module')\ndef lexer():\n yield CrystalLexer()\n\n\ndef test_range_syntax1(lexer):\n fragment = '1...3\\n'\n tokens = [\n (Number.Integer, '1'),\n (Operator, '...'),\n (Number.Integer, '3'),\n (Text, '\\n'),\n ]\n assert list(lexer.get_tokens(fragment)) == tokens\n\n\ndef test_range_syntax2(lexer):\n fragment = '1 .. 3\\n'\n tokens = [\n (Number.Integer, '1'),\n (Text, ' '),\n (Operator, '..'),\n (Text, ' '),\n (Number.Integer, '3'),\n (Text, '\\n'),\n ]\n assert list(lexer.get_tokens(fragment)) == tokens\n\n\ndef test_interpolation_nested_curly(lexer):\n fragment = (\n '\"A#{ (3..5).group_by { |x| x/2}.map '\n 'do |k,v| \"#{k}\" end.join }\" + \"Z\"\\n')\n tokens = [\n (String.Double, '\"'),\n (String.Double, 'A'),\n (String.Interpol, '#{'),\n (Text, ' '),\n (Punctuation, '('),\n (Number.Integer, '3'),\n (Operator, '..'),\n (Number.Integer, '5'),\n (Punctuation, ')'),\n (Operator, '.'),\n (Name, 'group_by'),\n (Text, ' '),\n (String.Interpol, '{'),\n (Text, ' '),\n (Operator, '|'),\n (Name, 'x'),\n (Operator, '|'),\n (Text, ' '),\n (Name, 'x'),\n (Operator, '/'),\n (Number.Integer, '2'),\n (String.Interpol, '}'),\n (Operator, '.'),\n (Name, 'map'),\n (Text, ' '),\n (Keyword, 'do'),\n (Text, ' '),\n (Operator, '|'),\n (Name, 'k'),\n (Punctuation, ','),\n (Name, 'v'),\n (Operator, '|'),\n (Text, ' '),\n (String.Double, '\"'),\n (String.Interpol, '#{'),\n (Name, 'k'),\n (String.Interpol, '}'),\n (String.Double, '\"'),\n (Text, ' '),\n (Keyword, 'end'),\n (Operator, '.'),\n (Name, 'join'),\n (Text, ' '),\n (String.Interpol, '}'),\n (String.Double, '\"'),\n (Text, ' '),\n (Operator, '+'),\n (Text, ' '),\n (String.Double, '\"'),\n (String.Double, 'Z'),\n (String.Double, '\"'),\n (Text, '\\n'),\n ]\n assert list(lexer.get_tokens(fragment)) == tokens\n\n\ndef test_operator_methods(lexer):\n fragment = '([] of Int32).[]?(5)\\n'\n tokens = [\n (Punctuation, '('),\n (Operator, '['),\n (Operator, ']'),\n (Text, ' '),\n (Keyword, 'of'),\n (Text, ' '),\n (Name.Builtin, 'Int32'),\n (Punctuation, ')'),\n (Operator, '.'),\n (Name.Operator, '[]?'),\n (Punctuation, '('),\n (Number.Integer, '5'),\n (Punctuation, ')'),\n (Text, '\\n')\n ]\n assert list(lexer.get_tokens(fragment)) == tokens\n\n\ndef test_array_access(lexer):\n fragment = '[5][5]?\\n'\n tokens = [\n (Operator, '['),\n (Number.Integer, '5'),\n (Operator, ']'),\n (Operator, '['),\n (Number.Integer, '5'),\n (Operator, ']?'),\n (Text, '\\n')\n ]\n assert list(lexer.get_tokens(fragment)) == tokens\n\n\ndef test_numbers(lexer):\n for kind, testset in [\n (Number.Integer, '0 1 1_000_000 1u8 11231231231121312i64'),\n (Number.Float, '0.0 1.0_f32 1_f32 0f64 1e+4 1e111 1_234.567_890'),\n (Number.Bin, '0b1001_0110 0b0u8'),\n (Number.Oct, '0o17 0o7_i32'),\n (Number.Hex, '0xdeadBEEF'),\n ]:\n for fragment in testset.split():\n assert list(lexer.get_tokens(fragment + '\\n')) == \\\n [(kind, fragment), (Text, '\\n')]\n\n for fragment in '01 0b2 0x129g2 0o12358'.split():\n assert next(lexer.get_tokens(fragment + '\\n'))[0] == Error\n\n\ndef test_chars(lexer):\n for fragment in [\"'a'\", \"'я'\", \"'\\\\u{1234}'\", \"'\\n'\"]:\n assert list(lexer.get_tokens(fragment + '\\n')) == \\\n [(String.Char, fragment), (Text, '\\n')]\n assert next(lexer.get_tokens(\"'abc'\"))[0] == Error\n\n\ndef test_macro(lexer):\n fragment = (\n 'def<=>(other : self) : Int\\n'\n '{%for field in %w(first_name middle_name last_name)%}\\n'\n 'cmp={{field.id}}<=>other.{{field.id}}\\n'\n 'return cmp if cmp!=0\\n'\n '{%end%}\\n'\n '0\\n'\n 'end\\n')\n tokens = [\n (Keyword, 'def'),\n (Name.Function, '<=>'),\n (Punctuation, '('),\n (Name, 'other'),\n (Text, ' '),\n (Punctuation, ':'),\n (Text, ' '),\n (Keyword.Pseudo, 'self'),\n (Punctuation, ')'),\n (Text, ' '),\n (Punctuation, ':'),\n (Text, ' '),\n (Name.Builtin, 'Int'),\n (Text, '\\n'),\n (String.Interpol, '{%'),\n (Keyword, 'for'),\n (Text, ' '),\n (Name, 'field'),\n (Text, ' '),\n (Keyword, 'in'),\n (Text, ' '),\n (String.Other, '%w('),\n (String.Other, 'first_name middle_name last_name'),\n (String.Other, ')'),\n (String.Interpol, '%}'),\n (Text, '\\n'),\n (Name, 'cmp'),\n (Operator, '='),\n (String.Interpol, '{{'),\n (Name, 'field'),\n (Operator, '.'),\n (Name, 'id'),\n (String.Interpol, '}}'),\n (Operator, '<=>'),\n (Name, 'other'),\n (Operator, '.'),\n (String.Interpol, '{{'),\n (Name, 'field'),\n (Operator, '.'),\n (Name, 'id'),\n (String.Interpol, '}}'),\n (Text, '\\n'),\n (Keyword, 'return'),\n (Text, ' '),\n (Name, 'cmp'),\n (Text, ' '),\n (Keyword, 'if'),\n (Text, ' '),\n (Name, 'cmp'),\n (Operator, '!='),\n (Number.Integer, '0'),\n (Text, '\\n'),\n (String.Interpol, '{%'),\n (Keyword, 'end'),\n (String.Interpol, '%}'),\n (Text, '\\n'),\n (Number.Integer, '0'),\n (Text, '\\n'),\n (Keyword, 'end'),\n (Text, '\\n')\n ]\n assert list(lexer.get_tokens(fragment)) == tokens\n\n\ndef test_lib(lexer):\n fragment = (\n '@[Link(\"some\")]\\nlib LibSome\\n'\n '@[CallConvention(\"X86_StdCall\")]\\nfun foo=\"some.foo\"(thing : Void*) : LibC::Int\\n'\n 'end\\n')\n tokens = [\n (Operator, '@['),\n (Name.Decorator, 'Link'),\n (Punctuation, '('),\n (String.Double, '\"'),\n (String.Double, 'some'),\n (String.Double, '\"'),\n (Punctuation, ')'),\n (Operator, ']'),\n (Text, '\\n'),\n (Keyword, 'lib'),\n (Text, ' '),\n (Name.Namespace, 'LibSome'),\n (Text, '\\n'),\n (Operator, '@['),\n (Name.Decorator, 'CallConvention'),\n (Punctuation, '('),\n (String.Double, '\"'),\n (String.Double, 'X86_StdCall'),\n (String.Double, '\"'),\n (Punctuation, ')'),\n (Operator, ']'),\n (Text, '\\n'),\n (Keyword, 'fun'),\n (Text, ' '),\n (Name.Function, 'foo'),\n (Operator, '='),\n (String.Double, '\"'),\n (String.Double, 'some.foo'),\n (String.Double, '\"'),\n (Punctuation, '('),\n (Name, 'thing'),\n (Text, ' '),\n (Punctuation, ':'),\n (Text, ' '),\n (Name.Builtin, 'Void'),\n (Operator, '*'),\n (Punctuation, ')'),\n (Text, ' '),\n (Punctuation, ':'),\n (Text, ' '),\n (Name, 'LibC'),\n (Operator, '::'),\n (Name.Builtin, 'Int'),\n (Text, '\\n'),\n (Keyword, 'end'),\n (Text, '\\n')\n ]\n assert list(lexer.get_tokens(fragment)) == tokens\n\n\ndef test_escaped_bracestring(lexer):\n fragment = 'str.gsub(%r{\\\\\\\\\\\\\\\\}, \"/\")\\n'\n tokens = [\n (Name, 'str'),\n (Operator, '.'),\n (Name, 'gsub'),\n (Punctuation, '('),\n (String.Regex, '%r{'),\n (String.Regex, '\\\\\\\\'),\n (String.Regex, '\\\\\\\\'),\n (String.Regex, '}'),\n (Punctuation, ','),\n (Text, ' '),\n (String.Double, '\"'),\n (String.Double, '/'),\n (String.Double, '\"'),\n (Punctuation, ')'),\n (Text, '\\n'),\n ]\n assert list(lexer.get_tokens(fragment)) == tokens\n",
"id": "1679391",
"language": "Python",
"matching_score": 1.9780948162078857,
"max_stars_count": 1,
"path": "tests/test_crystal.py"
},
{
"content": "# -*- coding: utf-8 -*-\n\"\"\"\n Basic Apache Configuration Test\n ~~~~~~~~~~~~~~~~~--------------\n\n :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.\n :license: BSD, see LICENSE for details.\n\"\"\"\n\nimport textwrap\n\nimport pytest\n\nfrom pygments.token import Text, Number, Token\nfrom pygments.lexers import configs\n\n\n@pytest.fixture(scope='module')\ndef lexer():\n yield configs.ApacheConfLexer()\n\n\ndef test_multiline_comment(lexer):\n fragment = '#SecAction \\\\\\n \"id:\\'900004\\', \\\\\\n phase:1, \\\\\\n t:none, \\\\\\n setvar:tx.anomaly_score_blocking=on, \\\\\\n nolog, \\\\\\n pass\"\\n \\n'\n tokens = [\n (Token.Comment, '#SecAction \\\\\\n \"id:\\'900004\\', \\\\\\n phase:1, \\\\\\n t:none, \\\\\\n setvar:tx.anomaly_score_blocking=on, \\\\\\n nolog, \\\\\\n pass\"'),\n (Token.Text, '\\n \\n'),\n ]\n assert list(lexer.get_tokens(fragment)) == tokens\n\ndef test_multiline_argument(lexer):\n fragment = 'SecAction \\\\\\n \"id:\\'900001\\', \\\\\\n phase:1, \\\\\\n t:none, \\\\\\n setvar:tx.critical_anomaly_score=5, \\\\\\n setvar:tx.error_anomaly_score=4, \\\\\\n setvar:tx.warning_anomaly_score=3, \\\\\\n setvar:tx.notice_anomaly_score=2, \\\\\\n nolog, \\\\\\n pass\"\\n'\n tokens = [\n (Token.Name.Builtin, 'SecAction'),\n (Token.Text, ' '),\n (Token.Text, '\\\\\\n'),\n (Token.Text, ' '),\n (Token.Literal.String.Double, '\"id:\\'900001\\', \\\\\\n phase:1, \\\\\\n t:none, \\\\\\n setvar:tx.critical_anomaly_score=5, \\\\\\n setvar:tx.error_anomaly_score=4, \\\\\\n setvar:tx.warning_anomaly_score=3, \\\\\\n setvar:tx.notice_anomaly_score=2, \\\\\\n nolog, \\\\\\n pass\"'),\n (Token.Text, ''),\n (Token.Text, '\\n'),\n ]\n assert list(lexer.get_tokens(fragment)) == tokens\n\ndef test_directive_no_args(lexer):\n fragment = 'Example\\nServerName localhost'\n tokens = [\n (Token.Name.Builtin, 'Example'),\n (Token.Text, ''),\n (Token.Text, '\\n'),\n (Token.Name.Builtin, 'ServerName'),\n (Token.Text, ' '),\n (Token.Text, 'localhost'),\n (Token.Text, ''),\n (Token.Text, '\\n'),\n ]\n assert list(lexer.get_tokens(fragment)) == tokens\n",
"id": "5728431",
"language": "Python",
"matching_score": 3.345684766769409,
"max_stars_count": 0,
"path": "tests/test_apache_conf.py"
},
{
"content": "# -*- coding: utf-8 -*-\n\"\"\"\n PHP Tests\n ~~~~~~~~~\n\n :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.\n :license: BSD, see LICENSE for details.\n\"\"\"\n\nimport pytest\n\nfrom pygments.lexers import PhpLexer\nfrom pygments.token import Token\n\n\n@pytest.fixture(scope='module')\ndef lexer():\n yield PhpLexer()\n\n\ndef test_string_escaping_run(lexer):\n fragment = '<?php $x=\"{\\\\\"\"; ?>\\n'\n tokens = [\n (Token.Comment.Preproc, '<?php'),\n (Token.Text, ' '),\n (Token.Name.Variable, '$x'),\n (Token.Operator, '='),\n (Token.Literal.String.Double, '\"'),\n (Token.Literal.String.Double, '{'),\n (Token.Literal.String.Escape, '\\\\\"'),\n (Token.Literal.String.Double, '\"'),\n (Token.Punctuation, ';'),\n (Token.Text, ' '),\n (Token.Comment.Preproc, '?>'),\n (Token.Other, '\\n'),\n ]\n assert list(lexer.get_tokens(fragment)) == tokens\n",
"id": "3540957",
"language": "Python",
"matching_score": 3.2590689659118652,
"max_stars_count": 1,
"path": "tests/test_php.py"
},
{
"content": "# -*- coding: utf-8 -*-\n\"\"\"\n Basic Shell Tests\n ~~~~~~~~~~~~~~~~~\n\n :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.\n :license: BSD, see LICENSE for details.\n\"\"\"\n\nimport pytest\n\nfrom pygments.token import Token\nfrom pygments.lexers import BashLexer, BashSessionLexer, MSDOSSessionLexer\n\n\n@pytest.fixture(scope='module')\ndef lexer_bash():\n yield BashLexer()\n\n\n@pytest.fixture(scope='module')\ndef lexer_session():\n yield BashSessionLexer()\n\n\n@pytest.fixture(scope='module')\ndef lexer_msdos():\n yield MSDOSSessionLexer()\n\n\ndef test_curly_no_escape_and_quotes(lexer_bash):\n fragment = u'echo \"${a//[\"b\"]/}\"\\n'\n tokens = [\n (Token.Name.Builtin, u'echo'),\n (Token.Text, u' '),\n (Token.Literal.String.Double, u'\"'),\n (Token.String.Interpol, u'${'),\n (Token.Name.Variable, u'a'),\n (Token.Punctuation, u'//['),\n (Token.Literal.String.Double, u'\"b\"'),\n (Token.Punctuation, u']/'),\n (Token.String.Interpol, u'}'),\n (Token.Literal.String.Double, u'\"'),\n (Token.Text, u'\\n'),\n ]\n assert list(lexer_bash.get_tokens(fragment)) == tokens\n\n\ndef test_curly_with_escape(lexer_bash):\n fragment = u'echo ${a//[\\\\\"]/}\\n'\n tokens = [\n (Token.Name.Builtin, u'echo'),\n (Token.Text, u' '),\n (Token.String.Interpol, u'${'),\n (Token.Name.Variable, u'a'),\n (Token.Punctuation, u'//['),\n (Token.Literal.String.Escape, u'\\\\\"'),\n (Token.Punctuation, u']/'),\n (Token.String.Interpol, u'}'),\n (Token.Text, u'\\n'),\n ]\n assert list(lexer_bash.get_tokens(fragment)) == tokens\n\n\ndef test_parsed_single(lexer_bash):\n fragment = u\"a=$'abc\\\\''\\n\"\n tokens = [\n (Token.Name.Variable, u'a'),\n (Token.Operator, u'='),\n (Token.Literal.String.Single, u\"$'abc\\\\''\"),\n (Token.Text, u'\\n'),\n ]\n assert list(lexer_bash.get_tokens(fragment)) == tokens\n\n\ndef test_short_variable_names(lexer_bash):\n fragment = u'x=\"$\"\\ny=\"$_\"\\nz=\"$abc\"\\n'\n tokens = [\n # single lone $\n (Token.Name.Variable, u'x'),\n (Token.Operator, u'='),\n (Token.Literal.String.Double, u'\"'),\n (Token.Text, u'$'),\n (Token.Literal.String.Double, u'\"'),\n (Token.Text, u'\\n'),\n # single letter shell var\n (Token.Name.Variable, u'y'),\n (Token.Operator, u'='),\n (Token.Literal.String.Double, u'\"'),\n (Token.Name.Variable, u'$_'),\n (Token.Literal.String.Double, u'\"'),\n (Token.Text, u'\\n'),\n # multi-letter user var\n (Token.Name.Variable, u'z'),\n (Token.Operator, u'='),\n (Token.Literal.String.Double, u'\"'),\n (Token.Name.Variable, u'$abc'),\n (Token.Literal.String.Double, u'\"'),\n (Token.Text, u'\\n'),\n ]\n assert list(lexer_bash.get_tokens(fragment)) == tokens\n\n\ndef test_array_nums(lexer_bash):\n fragment = u'a=(1 2 3)\\n'\n tokens = [\n (Token.Name.Variable, u'a'),\n (Token.Operator, u'='),\n (Token.Operator, u'('),\n (Token.Literal.Number, u'1'),\n (Token.Text, u' '),\n (Token.Literal.Number, u'2'),\n (Token.Text, u' '),\n (Token.Literal.Number, u'3'),\n (Token.Operator, u')'),\n (Token.Text, u'\\n'),\n ]\n assert list(lexer_bash.get_tokens(fragment)) == tokens\n\n\ndef test_end_of_line_nums(lexer_bash):\n fragment = u'a=1\\nb=2 # comment\\n'\n tokens = [\n (Token.Name.Variable, u'a'),\n (Token.Operator, u'='),\n (Token.Literal.Number, u'1'),\n (Token.Text, u'\\n'),\n (Token.Name.Variable, u'b'),\n (Token.Operator, u'='),\n (Token.Literal.Number, u'2'),\n (Token.Text, u' '),\n (Token.Comment.Single, u'# comment\\n'),\n ]\n assert list(lexer_bash.get_tokens(fragment)) == tokens\n\n\ndef test_newline_in_echo(lexer_session):\n fragment = u'$ echo \\\\\\nhi\\nhi\\n'\n tokens = [\n (Token.Text, u''),\n (Token.Generic.Prompt, u'$'),\n (Token.Text, u' '),\n (Token.Name.Builtin, u'echo'),\n (Token.Text, u' '),\n (Token.Literal.String.Escape, u'\\\\\\n'),\n (Token.Text, u'hi'),\n (Token.Text, u'\\n'),\n (Token.Generic.Output, u'hi\\n'),\n ]\n assert list(lexer_session.get_tokens(fragment)) == tokens\n\n\ndef test_msdos_gt_only(lexer_msdos):\n fragment = u'> py\\nhi\\n'\n tokens = [\n (Token.Text, u''),\n (Token.Generic.Prompt, u'>'),\n (Token.Text, u' '),\n (Token.Text, u'py'),\n (Token.Text, u''),\n (Token.Text, u'\\n'),\n (Token.Generic.Output, u'hi\\n'),\n ]\n assert list(lexer_msdos.get_tokens(fragment)) == tokens\n\ndef test_virtualenv(lexer_session):\n fragment = u'(env) [~/project]$ foo -h\\n'\n tokens = [\n (Token.Text, u''),\n (Token.Generic.Prompt.VirtualEnv, u'(env)'),\n (Token.Text, u''),\n (Token.Text, u' '),\n (Token.Text, u''),\n (Token.Generic.Prompt, u'[~/project]$'),\n (Token.Text, u' '),\n (Token.Text, u'foo'),\n (Token.Text, u' '),\n (Token.Text, u'-h'),\n (Token.Text, u'\\n'),\n ]\n assert list(lexer_session.get_tokens(fragment)) == tokens\n",
"id": "6211806",
"language": "Python",
"matching_score": 2.823841094970703,
"max_stars_count": 1,
"path": "tests/test_shell.py"
},
{
"content": "# -*- coding: utf-8 -*-\n\"\"\"\n Basic Grammar Notation Tests\n ~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.\n :license: BSD, see LICENSE for details.\n\"\"\"\n\nimport pytest\n\nfrom pygments.token import Token\nfrom pygments.lexers import PegLexer\n\n\n@pytest.fixture(scope='module')\ndef lexer_peg():\n yield PegLexer()\n\n\ndef test_peg_basic(lexer_peg):\n fragment = u'rule<-(\"terminal\"/nonterminal/[cls])*\\n'\n tokens = [\n (Token.Name.Class, u'rule'),\n (Token.Operator, u'<-'),\n (Token.Punctuation, u'('),\n (Token.String.Double, u'\"terminal\"'),\n (Token.Operator, u'/'),\n (Token.Name.Class, u'nonterminal'),\n (Token.Operator, u'/'),\n (Token.Punctuation, u'['),\n (Token.String, u'cls'),\n (Token.Punctuation, u']'),\n (Token.Punctuation, u')'),\n (Token.Operator, u'*'),\n (Token.Text, u'\\n'),\n ]\n assert list(lexer_peg.get_tokens(fragment)) == tokens\n\n\ndef test_peg_operators(lexer_peg):\n # see for example:\n # - https://github.com/gvanrossum/pegen\n # - https://nim-lang.org/docs/pegs.html\n fragment = u\"rule = 'a' | 'b'\\n\"\n tokens = [\n (Token.Name.Class, u'rule'),\n (Token.Text, u' '),\n (Token.Operator, u'='),\n (Token.Text, u' '),\n (Token.String.Single, u\"'a'\"),\n (Token.Text, u' '),\n (Token.Operator, u'|'),\n (Token.Text, u' '),\n (Token.String.Single, u\"'b'\"),\n (Token.Text, u'\\n'),\n ]\n assert list(lexer_peg.get_tokens(fragment)) == tokens\n fragment = u\"rule: 'a' ~ 'b'\\n\"\n tokens = [\n (Token.Name.Class, u'rule'),\n (Token.Operator, u':'),\n (Token.Text, u' '),\n (Token.String.Single, u\"'a'\"),\n (Token.Text, u' '),\n (Token.Operator, u'~'),\n (Token.Text, u' '),\n (Token.String.Single, u\"'b'\"),\n (Token.Text, u'\\n'),\n ]\n assert list(lexer_peg.get_tokens(fragment)) == tokens\n\n\ndef test_peg_modified_strings(lexer_peg):\n # see for example:\n # - http://textx.github.io/Arpeggio/\n # - https://nim-lang.org/docs/pegs.html\n # - https://github.com/erikrose/parsimonious\n fragment = u'~\"regex\" i\"insensitive\" \"multimod\"ilx (\"not modified\")\\n'\n tokens = [\n # can't handle parsimonious-style regex while ~ is a cut operator\n (Token.Operator, u'~'),\n (Token.String.Double, u'\"regex\"'),\n (Token.Text, u' '),\n (Token.String.Double, u'i\"insensitive\"'),\n (Token.Text, u' '),\n (Token.String.Double, u'\"multimod\"ilx'),\n (Token.Text, u' '),\n (Token.Punctuation, u'('),\n (Token.String.Double, u'\"not modified\"'),\n (Token.Punctuation, u')'),\n (Token.Text, u'\\n'),\n ]\n assert list(lexer_peg.get_tokens(fragment)) == tokens\n",
"id": "3809916",
"language": "Python",
"matching_score": 2.2987775802612305,
"max_stars_count": 1,
"path": "tests/test_grammar_notation.py"
},
{
"content": "# -*- coding: utf-8 -*-\n\"\"\"\n Basic CLexer Test\n ~~~~~~~~~~~~~~~~~\n\n :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.\n :license: BSD, see LICENSE for details.\n\"\"\"\n\nimport textwrap\n\nimport pytest\n\nfrom pygments.token import Text, Number, Token\nfrom pygments.lexers import CLexer\n\n\n@pytest.fixture(scope='module')\ndef lexer():\n yield CLexer()\n\n\ndef test_numbers(lexer):\n code = '42 23.42 23. .42 023 0xdeadbeef 23e+42 42e-23'\n wanted = []\n for item in zip([Number.Integer, Number.Float, Number.Float,\n Number.Float, Number.Oct, Number.Hex,\n Number.Float, Number.Float], code.split()):\n wanted.append(item)\n wanted.append((Text, ' '))\n wanted = wanted[:-1] + [(Text, '\\n')]\n assert list(lexer.get_tokens(code)) == wanted\n\n\ndef test_switch(lexer):\n fragment = u'''\\\n int main()\n {\n switch (0)\n {\n case 0:\n default:\n ;\n }\n }\n '''\n tokens = [\n (Token.Keyword.Type, u'int'),\n (Token.Text, u' '),\n (Token.Name.Function, u'main'),\n (Token.Punctuation, u'('),\n (Token.Punctuation, u')'),\n (Token.Text, u'\\n'),\n (Token.Punctuation, u'{'),\n (Token.Text, u'\\n'),\n (Token.Text, u' '),\n (Token.Keyword, u'switch'),\n (Token.Text, u' '),\n (Token.Punctuation, u'('),\n (Token.Literal.Number.Integer, u'0'),\n (Token.Punctuation, u')'),\n (Token.Text, u'\\n'),\n (Token.Text, u' '),\n (Token.Punctuation, u'{'),\n (Token.Text, u'\\n'),\n (Token.Text, u' '),\n (Token.Keyword, u'case'),\n (Token.Text, u' '),\n (Token.Literal.Number.Integer, u'0'),\n (Token.Operator, u':'),\n (Token.Text, u'\\n'),\n (Token.Text, u' '),\n (Token.Keyword, u'default'),\n (Token.Operator, u':'),\n (Token.Text, u'\\n'),\n (Token.Text, u' '),\n (Token.Punctuation, u';'),\n (Token.Text, u'\\n'),\n (Token.Text, u' '),\n (Token.Punctuation, u'}'),\n (Token.Text, u'\\n'),\n (Token.Punctuation, u'}'),\n (Token.Text, u'\\n'),\n ]\n assert list(lexer.get_tokens(textwrap.dedent(fragment))) == tokens\n\n\ndef test_switch_space_before_colon(lexer):\n fragment = u'''\\\n int main()\n {\n switch (0)\n {\n case 0 :\n default :\n ;\n }\n }\n '''\n tokens = [\n (Token.Keyword.Type, u'int'),\n (Token.Text, u' '),\n (Token.Name.Function, u'main'),\n (Token.Punctuation, u'('),\n (Token.Punctuation, u')'),\n (Token.Text, u'\\n'),\n (Token.Punctuation, u'{'),\n (Token.Text, u'\\n'),\n (Token.Text, u' '),\n (Token.Keyword, u'switch'),\n (Token.Text, u' '),\n (Token.Punctuation, u'('),\n (Token.Literal.Number.Integer, u'0'),\n (Token.Punctuation, u')'),\n (Token.Text, u'\\n'),\n (Token.Text, u' '),\n (Token.Punctuation, u'{'),\n (Token.Text, u'\\n'),\n (Token.Text, u' '),\n (Token.Keyword, u'case'),\n (Token.Text, u' '),\n (Token.Literal.Number.Integer, u'0'),\n (Token.Text, u' '),\n (Token.Operator, u':'),\n (Token.Text, u'\\n'),\n (Token.Text, u' '),\n (Token.Keyword, u'default'),\n (Token.Text, u' '),\n (Token.Operator, u':'),\n (Token.Text, u'\\n'),\n (Token.Text, u' '),\n (Token.Punctuation, u';'),\n (Token.Text, u'\\n'),\n (Token.Text, u' '),\n (Token.Punctuation, u'}'),\n (Token.Text, u'\\n'),\n (Token.Punctuation, u'}'),\n (Token.Text, u'\\n'),\n ]\n assert list(lexer.get_tokens(textwrap.dedent(fragment))) == tokens\n\n\ndef test_label(lexer):\n fragment = u'''\\\n int main()\n {\n foo:\n goto foo;\n }\n '''\n tokens = [\n (Token.Keyword.Type, u'int'),\n (Token.Text, u' '),\n (Token.Name.Function, u'main'),\n (Token.Punctuation, u'('),\n (Token.Punctuation, u')'),\n (Token.Text, u'\\n'),\n (Token.Punctuation, u'{'),\n (Token.Text, u'\\n'),\n (Token.Name.Label, u'foo'),\n (Token.Punctuation, u':'),\n (Token.Text, u'\\n'),\n (Token.Text, u' '),\n (Token.Keyword, u'goto'),\n (Token.Text, u' '),\n (Token.Name, u'foo'),\n (Token.Punctuation, u';'),\n (Token.Text, u'\\n'),\n (Token.Punctuation, u'}'),\n (Token.Text, u'\\n'),\n ]\n assert list(lexer.get_tokens(textwrap.dedent(fragment))) == tokens\n\n\ndef test_label_space_before_colon(lexer):\n fragment = u'''\\\n int main()\n {\n foo :\n goto foo;\n }\n '''\n tokens = [\n (Token.Keyword.Type, u'int'),\n (Token.Text, u' '),\n (Token.Name.Function, u'main'),\n (Token.Punctuation, u'('),\n (Token.Punctuation, u')'),\n (Token.Text, u'\\n'),\n (Token.Punctuation, u'{'),\n (Token.Text, u'\\n'),\n (Token.Name.Label, u'foo'),\n (Token.Text, u' '),\n (Token.Punctuation, u':'),\n (Token.Text, u'\\n'),\n (Token.Text, u' '),\n (Token.Keyword, u'goto'),\n (Token.Text, u' '),\n (Token.Name, u'foo'),\n (Token.Punctuation, u';'),\n (Token.Text, u'\\n'),\n (Token.Punctuation, u'}'),\n (Token.Text, u'\\n'),\n ]\n assert list(lexer.get_tokens(textwrap.dedent(fragment))) == tokens\n\n\ndef test_label_followed_by_statement(lexer):\n fragment = u'''\\\n int main()\n {\n foo:return 0;\n goto foo;\n }\n '''\n tokens = [\n (Token.Keyword.Type, u'int'),\n (Token.Text, u' '),\n (Token.Name.Function, u'main'),\n (Token.Punctuation, u'('),\n (Token.Punctuation, u')'),\n (Token.Text, u'\\n'),\n (Token.Punctuation, u'{'),\n (Token.Text, u'\\n'),\n (Token.Name.Label, u'foo'),\n (Token.Punctuation, u':'),\n (Token.Keyword, u'return'),\n (Token.Text, u' '),\n (Token.Literal.Number.Integer, u'0'),\n (Token.Punctuation, u';'),\n (Token.Text, u'\\n'),\n (Token.Text, u' '),\n (Token.Keyword, u'goto'),\n (Token.Text, u' '),\n (Token.Name, u'foo'),\n (Token.Punctuation, u';'),\n (Token.Text, u'\\n'),\n (Token.Punctuation, u'}'),\n (Token.Text, u'\\n'),\n ]\n assert list(lexer.get_tokens(textwrap.dedent(fragment))) == tokens\n\n\ndef test_preproc_file(lexer):\n fragment = u'#include <foo>\\n'\n tokens = [\n (Token.Comment.Preproc, u'#'),\n (Token.Comment.Preproc, u'include'),\n (Token.Text, u' '),\n (Token.Comment.PreprocFile, u'<foo>'),\n (Token.Comment.Preproc, u'\\n'),\n ]\n assert list(lexer.get_tokens(fragment)) == tokens\n\n\ndef test_preproc_file2(lexer):\n fragment = u'#include \"foo.h\"\\n'\n tokens = [\n (Token.Comment.Preproc, u'#'),\n (Token.Comment.Preproc, u'include'),\n (Token.Text, u' '),\n (Token.Comment.PreprocFile, u'\"foo.h\"'),\n (Token.Comment.Preproc, u'\\n'),\n ]\n assert list(lexer.get_tokens(fragment)) == tokens\n",
"id": "4917489",
"language": "Python",
"matching_score": 2.7934248447418213,
"max_stars_count": 1,
"path": "tests/test_clexer.py"
},
{
"content": "# -*- coding: utf-8 -*-\n\"\"\"\n Basic CLexer Test\n ~~~~~~~~~~~~~~~~~\n\n :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.\n :license: BSD, see LICENSE for details.\n\"\"\"\n\nimport pytest\n\nfrom pygments.token import Token\nfrom pygments.lexers import ObjectiveCLexer\n\n\n@pytest.fixture(scope='module')\ndef lexer():\n yield ObjectiveCLexer()\n\n\ndef test_literal_number_int(lexer):\n fragment = u'@(1);\\n'\n expected = [\n (Token.Literal, u'@('),\n (Token.Literal.Number.Integer, u'1'),\n (Token.Literal, u')'),\n (Token.Punctuation, u';'),\n (Token.Text, u'\\n'),\n ]\n assert list(lexer.get_tokens(fragment)) == expected\n\n\ndef test_literal_number_expression(lexer):\n fragment = u'@(1+2);\\n'\n expected = [\n (Token.Literal, u'@('),\n (Token.Literal.Number.Integer, u'1'),\n (Token.Operator, u'+'),\n (Token.Literal.Number.Integer, u'2'),\n (Token.Literal, u')'),\n (Token.Punctuation, u';'),\n (Token.Text, u'\\n'),\n ]\n assert list(lexer.get_tokens(fragment)) == expected\n\n\ndef test_literal_number_nested_expression(lexer):\n fragment = u'@(1+(2+3));\\n'\n expected = [\n (Token.Literal, u'@('),\n (Token.Literal.Number.Integer, u'1'),\n (Token.Operator, u'+'),\n (Token.Punctuation, u'('),\n (Token.Literal.Number.Integer, u'2'),\n (Token.Operator, u'+'),\n (Token.Literal.Number.Integer, u'3'),\n (Token.Punctuation, u')'),\n (Token.Literal, u')'),\n (Token.Punctuation, u';'),\n (Token.Text, u'\\n'),\n ]\n assert list(lexer.get_tokens(fragment)) == expected\n\n\ndef test_literal_number_bool(lexer):\n fragment = u'@NO;\\n'\n expected = [\n (Token.Literal.Number, u'@NO'),\n (Token.Punctuation, u';'),\n (Token.Text, u'\\n'),\n ]\n assert list(lexer.get_tokens(fragment)) == expected\n\n\ndef test_literal_number_bool_expression(lexer):\n fragment = u'@(YES);\\n'\n expected = [\n (Token.Literal, u'@('),\n (Token.Name.Builtin, u'YES'),\n (Token.Literal, u')'),\n (Token.Punctuation, u';'),\n (Token.Text, u'\\n'),\n ]\n assert list(lexer.get_tokens(fragment)) == expected\n\n\ndef test_module_import(lexer):\n fragment = u'@import ModuleA;\\n'\n expected = [\n (Token.Keyword, u'@import'),\n (Token.Text, u' '),\n (Token.Name, u'ModuleA'),\n (Token.Punctuation, u';'),\n (Token.Text, u'\\n'),\n ]\n assert list(lexer.get_tokens(fragment)) == expected\n",
"id": "10685090",
"language": "Python",
"matching_score": 0.17974476516246796,
"max_stars_count": 1,
"path": "tests/test_objectiveclexer.py"
},
{
"content": "from collections import OrderedDict\nfrom os import PathLike\nfrom pathlib import Path\nfrom typing import List, Optional, Tuple, Union\n\nimport parso\nfrom parso.python.tree import Function, Module, PythonNode\n\nfrom pytestdocgen.gdocstring import RelaxedGoogleDocstring\nfrom pytestdocgen.parse import (\n bfs_test_cases_in_module,\n find_all_decorators,\n find_all_test_files,\n read_with_utf8,\n)\n\n\nclass TestFile:\n \"\"\"Represents a single test file\"\"\"\n\n def __init__(self, path: Path, test_root: Path, prefix: str):\n \"\"\"\n Args:\n path: A file's path\n test_root: The root directory of test suite\n \"\"\"\n self.code = read_with_utf8(path) # code in utf-8\n self.file = path.relative_to(path.cwd()) # file path relative to cwd\n self.root_dir = test_root # The root directory of test suite\n self.test_cases: List[TestCase] = list() # List of test cases\n self.parsed_tree = parso.parse(self.code) # AST\n self.rel_dir: Tuple[str] = tuple(\n self.file.absolute().relative_to(self.root_dir).parent.parts\n ) # Relative directory\n\n self._prefix = prefix # File name prefix\n\n # Find all test cases and add it\n for node in bfs_test_cases_in_module(self.parsed_tree):\n self.test_cases.append(TestCase(node, self, self.rel_dir))\n\n @property\n def file_name(self) -> str:\n \"\"\"Name part of file's path\"\"\"\n return str(self.file.name)\n\n @property\n def page_name(self) -> str:\n \"\"\"Page name part of file's path\"\"\"\n name = self.file_name\n if self.file_name.startswith(self._prefix):\n name = name[len(self._prefix) :]\n if self.file_name.endswith(\".py\"):\n name = name[: -len(\".py\")]\n return name\n\n\nclass TestDir:\n \"\"\"Represents directory containing test or directory of tests\"\"\"\n\n def __init__(\n self, path: Union[Path, PathLike], test_file_prefix: str = \"test_\"\n ):\n \"\"\"\n Args:\n path: Path to this directory\n test_file_prefix: File name convention to mark a file as a test file\n \"\"\"\n self.dir_path = Path(path) # Path to the directory\n self.test_files: List[\n TestFile\n ] = list() # List of test files under `self`\n self._sorted_tc: Optional[\n OrderedDict\n ] = None # Sorted test cases under `self`\n\n # Find all test cases under `self`\n for file in find_all_test_files(self.dir_path, test_file_prefix):\n self.test_files.append(\n TestFile(file, self.dir_path, test_file_prefix)\n )\n\n @property\n def test_cases(self) -> OrderedDict:\n \"\"\"Decisively ordered test cases under this directory\"\"\"\n if self._sorted_tc:\n return self._sorted_tc\n rel_dirs = list({x.rel_dir for x in self.test_files})\n rel_dirs.sort(key=lambda x: \"\".join(x))\n self._sorted_tc = OrderedDict.fromkeys(rel_dirs)\n for tf in self.test_files:\n for tc in tf.test_cases:\n tcs = self._sorted_tc.get(tf.rel_dir)\n if not tcs:\n tcs = list()\n tcs.append(tc)\n self._sorted_tc[tf.rel_dir] = tcs\n\n return self._sorted_tc\n\n\nclass TestCase:\n \"\"\"Represents a single test case\"\"\"\n\n def __init__(\n self,\n node: Union[PythonNode, Function],\n test_file: TestFile,\n rel_dir: Tuple[str],\n ):\n \"\"\"\n Args:\n node: AST of code block\n test_file: A TestFile containing this\n rel_dir: Relative directory from test suite root\n \"\"\"\n self.python_node: Union[PythonNode, Function] = node # AST\n\n self.name: str = str(node.name.value[len(\"test_\") :]).replace(\n \"_\", \" \"\n ) # Name of the test\n self.rel_dir: Tuple[\n str\n ] = rel_dir # Relative directories from the root of the test suite\n self.file: TestFile = test_file # A test file contains this\n\n self.pos: Tuple[Tuple[int, int], Tuple[int, int]] = (\n node.start_pos,\n node.end_pos,\n ) # A specfic position where this AST block starts and ends in code\n\n self.raw_doc: str # Untouched docstring block\n try:\n self.raw_doc = str(node.get_doc_node().value)\n except AttributeError:\n self.raw_doc = \"\"\n self.code = node.get_code() # Untouched code block\n self._parsed_gdoc: Optional[RelaxedGoogleDocstring] = None\n\n @property\n def decorators(self) -> Optional[List[str]]:\n \"\"\"List of decorators of this test case\"\"\"\n if isinstance(self.python_node.parent, Module):\n return None\n\n try:\n deco_block_node: PythonNode = self.python_node.parent\n if deco_block_node.type == \"async_funcdef\":\n # Async test case has one more node\n deco_block_node = deco_block_node.parent\n if deco_block_node.type != \"decorated\":\n return None\n return [\n str(node.get_code()).strip()\n for node in find_all_decorators(deco_block_node)\n ]\n except AttributeError:\n # Not having `parent`, `children`, either `type` is\n # expected behavior in case there's no decorator\n return None\n\n @property\n def parsed_doc(self) -> RelaxedGoogleDocstring:\n \"\"\"Parsed and populated Docstring object\"\"\"\n if not self._parsed_gdoc:\n self._parsed_gdoc = RelaxedGoogleDocstring(self.raw_doc)\n\n return self._parsed_gdoc\n",
"id": "6796183",
"language": "Python",
"matching_score": 4.435977935791016,
"max_stars_count": 0,
"path": "pytestdocgen/object.py"
},
{
"content": "from collections import deque\nfrom os import PathLike\nfrom pathlib import Path\nfrom typing import Iterable, Union\n\nimport cchardet\nfrom parso.python.tree import Module, PythonNode\n\n\ndef read_with_utf8(path: Union[PathLike, Path]) -> str:\n \"\"\"Read and decode source code file with utf\"\"\"\n with open(path, \"rb\") as src_file:\n content = src_file.read()\n guess = cchardet.detect(content)\n code = content.decode(guess[\"encoding\"])\n return code\n\n\ndef bfs_test_cases_in_module(module: Module) -> Iterable[PythonNode]:\n \"\"\"\n Visit all AST in BFS fashion and find TCs\n Args:\n module: AST node, `Module`\n\n Yields:\n AST with a single test case under it, within 1 depth\n\n \"\"\"\n to_visit = deque(module.children)\n\n while to_visit:\n node: PythonNode = to_visit.popleft()\n try:\n if (node.type == \"funcdef\" or node.type == \"async_funcdef\") and str(\n node.name.value\n ).startswith(\"test_\"):\n # Is test case\n yield node\n except AttributeError:\n # node has no name, which is fine\n pass\n\n try:\n for child in node.children:\n to_visit.append(child)\n except AttributeError:\n # Node has no child, which is fine\n pass\n\n\ndef find_all_decorators(node: PythonNode) -> Iterable[PythonNode]:\n \"\"\"\n Find all decorators a node has\n Args:\n node: AST representing function or coroutine\n\n Yields:\n AST of decorator\n\n \"\"\"\n to_visit = deque(node.children)\n\n while to_visit:\n node: PythonNode = to_visit.popleft()\n try:\n if node.type == \"decorator\":\n yield node\n for child in node.children:\n if child.type == \"decorator\" or child.type == \"decorators\":\n to_visit.append(child)\n except AttributeError:\n pass\n\n\ndef find_all_test_files(test_dir: Path, prefix: str = \"test_\"):\n \"\"\"\n Find all test files\n Args:\n test_dir: A root of test suite\n prefix: Naming convention for the testing file\n\n Yields:\n Test file\n\n \"\"\"\n yield from test_dir.glob(f\"**/{prefix}*.py\")\n",
"id": "3057931",
"language": "Python",
"matching_score": 1.6906503438949585,
"max_stars_count": 0,
"path": "pytestdocgen/parse.py"
},
{
"content": "from pathlib import Path\n\nfrom pytestdocgen.object import TestDir, TestFile\nfrom pytestdocgen.parse import find_all_test_files\n\nhere = Path(__file__).parent\n\n\ndef test_file_model():\n \"\"\"Test if file model is created\"\"\"\n compos_file = (\n here\n / \"..\"\n / \"test_package\"\n / \"tests\"\n / \"integration\"\n / \"test_composite.py\"\n )\n\n test_root = here / \"..\" / \"test_package\" / \"tests\"\n\n assert TestFile(compos_file, test_root) is not None\n assert (\n TestFile(compos_file, test_root).test_cases[2].decorators[0]\n == \"@pytest.mark.asyncio\"\n )\n assert len(TestFile(compos_file, test_root).test_cases[3].decorators) == 2\n\n\ndef test_dir_model():\n \"\"\"Test if directory model is created\"\"\"\n test_root = here / \"..\" / \"test_package\" / \"tests\"\n td = TestDir(test_root)\n assert td is not None\n assert td.test_cases is not None\n\n\ndef test_find_all_files():\n \"\"\"Test if all test files with a given pattern are found\"\"\"\n test_root = here / \"..\" / \"test_package\" / \"tests\"\n assert len([x for x in find_all_test_files(test_root)]) == 4\n",
"id": "11776128",
"language": "Python",
"matching_score": 3.3331942558288574,
"max_stars_count": 0,
"path": "tests/test_load.py"
},
{
"content": "from pathlib import Path\n\nfrom pytestdocgen.gendoc import tc_to_markdown, td_to_markdown\nfrom pytestdocgen.object import TestDir, TestFile\n\nhere = Path(__file__).parent\n\n\ndef test_tc_to_markdown():\n \"\"\"Convert a test case to markdown string\"\"\"\n compos_file = (\n here\n / \"..\"\n / \"test_package\"\n / \"tests\"\n / \"integration\"\n / \"test_composite.py\"\n )\n test_root = here / \"..\" / \"test_package\" / \"tests\"\n tc = TestFile(compos_file, test_root).test_cases[1]\n assert tc_to_markdown(tc) is not None\n\n\ndef test_gendoc():\n \"\"\"Convert a test directory to markdown document\"\"\"\n test_root = here / \"..\" / \"test_package\" / \"tests\"\n td = TestDir(test_root)\n md = td_to_markdown(td)\n assert md is not None\n",
"id": "11392674",
"language": "Python",
"matching_score": 1.2260534763336182,
"max_stars_count": 0,
"path": "tests/test_gendoc.py"
},
{
"content": "\"\"\"\nWelcome to PyTestDocGen 🎴\n\nUsage:\n pytestdocgen [--src-dir=SRC_DIR] [--format=FORMAT] [--test-dir=TEST_DIR] [--output=FILE] [--test-prefix=PREFIX] [--header=<str>] [--footer=<str>]\n pytestdocgen (-h | --help)\n\nOptions:\n -h --help Show this screen.\n -s --src-dir=SRC_DIR Working directory, usually a root of source code [default: .]\n -f --format=FORMAT Format of the output file [default: markdown]\n -t --test-dir=TEST_DIR Directory of tests, relative to SRC_DIR [default: tests]\n -o --output=FILE Output file path [default: TEST_DOC.md]\n -p --test-prefix=PREFIX Custom test file prefix [default: test_]\n\n --header=<str> Custom header to provide\n --footer=<str> Custom footer to provide\n\"\"\"\n\nimport sys\nfrom copy import copy\nfrom pathlib import Path\nfrom typing import Dict, Optional\n\nimport toml as toml\nfrom docopt import docopt\n\nfrom pytestdocgen.gendoc import td_to_markdown\nfrom pytestdocgen.object import TestDir\n\n\ndef get_args() -> dict:\n \"\"\"Parse arguments with `docopt`\"\"\"\n return docopt(__doc__)\n\n\ndef read_pyproject_toml(working_dir: Path) -> Dict[str, Optional[str]]:\n \"\"\"\n Read project's `pyproject.toml` file\n\n Args:\n working_dir: CWD. Usually a root of source code\n\n Returns:\n Configurations described in toml file\n\n Raises:\n toml.TomlDecodeError: Failed to decode\n OSError: Failed to read a file\n\n \"\"\"\n pyproject_toml_path: Path = working_dir / \"pyproject.toml\"\n if not pyproject_toml_path.is_file():\n return dict()\n\n pyproject_toml = toml.load(pyproject_toml_path)\n config_d = pyproject_toml.get(\"tool\", {}).get(\"pytestdocgen\", {})\n config = dict()\n\n for k in config_d:\n config[f\"--{str(k)}\"] = config_d[k]\n\n return config\n\n\ndef override_args_over_config(\n args: dict, config: dict\n) -> Dict[str, Optional[str]]:\n \"\"\"\n Merge two options with priority.\n\n If a configuration with higher priority has a `None` value on with a\n certain key, it is considered as None. But if configuration holds fallback\n value, it is preserved\n Args:\n args: Options with higher priority\n config: Options with less priority\n\n Returns:\n Merged conf\n\n \"\"\"\n res = copy(config)\n # Override config file by argument\n for k in args:\n if args[k] is not None:\n res[k] = args[k]\n if args[k] is None and config.get(k) is None:\n # is explicit None\n res[k] = None\n\n return res\n\n\ndef run():\n \"\"\"Run pytestdocgen\"\"\"\n arguments = get_args()\n # Read a configuration in pyproject.toml file\n config = read_pyproject_toml(Path(arguments[\"--src-dir\"]))\n config = override_args_over_config(arguments, config)\n\n src_dir_path = Path(config[\"--src-dir\"]).absolute()\n test_dir_path = (\n src_dir_path / Path(config[\"--test-dir\"])\n if not Path(config[\"--test-dir\"]).is_absolute()\n else Path(config[\"--test-dir\"])\n ).absolute()\n output_file_path = (\n Path.cwd() / Path(config[\"--output\"])\n if not Path(config[\"--output\"]).is_absolute()\n else Path(config[\"--output\"])\n ).absolute()\n\n if not test_dir_path.is_dir():\n print(\n f\"Given test dir {test_dir_path} is not a directory\",\n file=sys.stderr,\n )\n return -1\n td = TestDir(test_dir_path, test_file_prefix=config[\"--test-prefix\"])\n\n md_str = td_to_markdown(\n td, custom_header=config[\"--header\"], custom_footer=config[\"--footer\"]\n )\n\n output_file_path.write_text(md_str)\n\n return 0\n\n\nif __name__ == \"__main__\":\n \"\"\"Entry point of module\"\"\"\n sys.exit(run())\n",
"id": "9805258",
"language": "Python",
"matching_score": 2.896725654602051,
"max_stars_count": 0,
"path": "pytestdocgen/__main__.py"
},
{
"content": "from pathlib import Path\n\nimport pytestdocgen.__main__\n\n\ndef test_pyproject_toml():\n \"\"\"\n Test load and parsing of pyproject.toml configuration file\n\n \"\"\"\n config = pytestdocgen.__main__.read_pyproject_toml(\n Path(__file__).parent.parent\n )\n assert config[\"--format\"] == \"markdown\"\n",
"id": "11772373",
"language": "Python",
"matching_score": 0.2509877681732178,
"max_stars_count": 0,
"path": "tests/test_conf.py"
},
{
"content": "\"\"\"A package with tests\"\"\"\n\nimport os\nfrom pathlib import Path\n\nfrom setuptools import find_packages, setup\n\nshort_desc = (\n \"A mock package for rather formal documentation generator for \"\n \"pytest suite\"\n)\n\nsetup(\n name=\"pytestdocgen\",\n version=\"0.0.1\",\n packages=find_packages(),\n include_package_data=True,\n description=short_desc,\n zip_safe=False,\n test_require=[\"pytest\", \"pytest-asyncio\"],\n test_suite=\"tests\",\n python_require=\">=3.7\",\n entry_points={\n \"console_scripts\": [\n # 'jokbo_gen = pytestdocgen.__main__:run'\n ]\n },\n)\n",
"id": "7960621",
"language": "Python",
"matching_score": 0.5571979880332947,
"max_stars_count": 0,
"path": "test_package/setup.py"
},
{
"content": "# Copyright 2018-2021 The NATS Authors\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport sys\nimport unittest\nfrom collections import Counter\n\nfrom nats.nuid import BASE, NUID, MAX_SEQ, PREFIX_LENGTH, TOTAL_LENGTH\n\n\nclass NUIDTest(unittest.TestCase):\n\n def setUp(self):\n super().setUp()\n\n def test_nuid_length(self):\n nuid = NUID()\n self.assertEqual(len(nuid.next()), TOTAL_LENGTH)\n\n def test_nuid_are_unique(self):\n nuid = NUID()\n entries = [nuid.next().decode() for i in range(500000)]\n counted_entries = Counter(entries)\n repeated = [\n entry for entry, count in counted_entries.items() if count > 1\n ]\n self.assertEqual(len(repeated), 0)\n\n def test_nuid_are_very_unique(self):\n nuid = NUID()\n entries = [nuid.next().decode() for i in range(1000000)]\n counted_entries = Counter(entries)\n repeated = [\n entry for entry, count in counted_entries.items() if count > 1\n ]\n self.assertEqual(len(repeated), 0)\n\n def test_subsequent_nuid_equal(self):\n n_tests = 10000\n for i in range(n_tests):\n nuid = NUID()\n nuid._seq = MAX_SEQ - i - 10\n nuid._inc = BASE\n\n self.assertTrue(nuid.next() != nuid.next())\n\n def test_nuid_sequence_rollover(self):\n nuid = NUID()\n seq_a = nuid._seq\n inc_a = nuid._inc\n nuid_a = nuid.next()\n\n seq_b = nuid._seq\n inc_b = nuid._inc\n self.assertTrue(seq_a < seq_b)\n self.assertEqual(seq_b, seq_a + inc_a)\n nuid_b = nuid.next()\n self.assertEqual(nuid_a[:PREFIX_LENGTH], nuid_b[:PREFIX_LENGTH])\n\n # Force the sequence to rollover, prefix should now change\n nuid._seq = seq_c = MAX_SEQ + 1\n nuid_c = nuid.next()\n self.assertNotEqual(nuid_a[:PREFIX_LENGTH], nuid_c[:PREFIX_LENGTH])\n",
"id": "8302609",
"language": "Python",
"matching_score": 0.2699946165084839,
"max_stars_count": 0,
"path": "tests/test_nuid.py"
},
{
"content": "from datetime import datetime\nfrom itertools import chain, zip_longest\nfrom pathlib import Path\nfrom typing import Iterable, List, Tuple\n\nimport jinja2\n\nfrom pytestdocgen.object import TestCase, TestDir\n\nhere: Path = Path(__file__).parent\n# Jinja env with minor tweaks\nj_env = jinja2.Environment(\n loader=jinja2.FileSystemLoader(str(here / \"templates\")),\n trim_blocks=True,\n lstrip_blocks=True,\n)\n# A template for test case\ntc_template = j_env.get_template(\"test_case.md\")\nutc_now: str = datetime.utcnow().strftime(\"UTC %Y-%m-%d %H:%M:%S\")\n\n\ndef next_section_to_template(\n former_section: Iterable[str], current_section: Iterable[str]\n) -> Iterable[Tuple[int, str]]:\n \"\"\"\n Find a section (structured directory scheme) to be rendered\n Args:\n former_section: A last section we rendered\n current_section: A current section we are working on\n\n Yields:\n Index(depth, 0-based) and the name of section\n\n \"\"\"\n for fs_idx_value, cs_idx_value in zip_longest(\n enumerate(former_section), enumerate(current_section)\n ):\n if not fs_idx_value or (\n cs_idx_value and fs_idx_value[1] != cs_idx_value[1]\n ):\n yield cs_idx_value\n\n\ndef general_header(td: TestDir) -> str:\n \"\"\"Produce general header\"\"\"\n title = \"Test case documentation\\n\"\n title += len(title) * \"=\" + \"\\n\"\n\n return (\n f'{title}<div style=\"text-align: right\">'\n f\"<p>version: {utc_now}</p>\"\n f\"</div>\\n\"\n )\n\n\ndef general_footer(td: TestDir) -> str:\n \"\"\"Produce general footer\"\"\"\n return f\"*documentation created by PyTestDocGen@{utc_now}*\"\n\n\ndef worth_to_put_in_snippet(code_line: str) -> bool:\n \"\"\"Check if a line of source code is worth to be in a code snippet\"\"\"\n if \"async \" in code_line or \"def \" in code_line:\n return True\n if code_line.strip().startswith(\"assert\"):\n return True\n\n return False\n\n\ndef tc_to_markdown(tc: TestCase):\n \"\"\"Render test case to markdown\"\"\"\n file_name = \"/\".join(\n chain([tc.file.root_dir.name], tc.rel_dir, [tc.file.file_name])\n )\n tc_position = (\n f\"{tc.pos[0][0]}:{tc.pos[0][1]} - {tc.pos[1][0]}:{tc.pos[1][1]}\"\n )\n code_snippet = \"\\n\".join(\n [x for x in tc.code.splitlines() if worth_to_put_in_snippet(x)]\n )\n\n return tc_template.render(\n name=tc.name,\n file=file_name,\n pos=tc_position,\n snippet=code_snippet,\n summary=tc.parsed_doc.summary,\n description=tc.parsed_doc.description,\n sections=tc.parsed_doc.sections,\n decorators=tc.decorators,\n )\n\n\ndef td_to_markdown(\n td: TestDir, custom_header: str = None, custom_footer: str = None\n) -> str:\n \"\"\"\n Render TestDir to the Markdown string\n Args:\n td: Instantiated TestDir object\n custom_header: Custom header to put in the output\n custom_footer: Custom footer to put in the output\n\n Returns:\n Markdown string\n\n \"\"\"\n result: List[str] = [custom_header] if custom_header else [\n general_header(td)\n ]\n result.append(\"\\n\")\n former_section: Iterable[str] = []\n former_page: str = \"\"\n\n for section in td.test_cases:\n for section_to_render in next_section_to_template(\n former_section, section\n ):\n if section_to_render[0] == 0:\n result.append(\"\\n***\\n\")\n section_name = section_to_render[1].replace(\"_\", \" \")\n section_name = section_name[0].upper() + section_name[1:]\n\n section_str = \"#\" * (section_to_render[0] + 1) + \" \"\n section_str += section_name + \"\\n\\n\"\n\n result.append(section_str)\n former_section = section\n for tc in td.test_cases[section]:\n assert isinstance(tc, TestCase)\n if former_page != tc.file.page_name:\n result.append(\n \"#\" * 4\n + \" Test Page: \"\n + tc.file.page_name.replace(\"_\", \" \")\n + \"\\n\"\n )\n former_page = tc.file.page_name\n result.append(tc_to_markdown(tc))\n\n if custom_footer:\n result.append(custom_footer)\n else:\n result.append(general_footer(td))\n result.append(\"\\n\")\n return \"\".join(result)\n",
"id": "3218272",
"language": "Python",
"matching_score": 1.7990314960479736,
"max_stars_count": 0,
"path": "pytestdocgen/gendoc.py"
},
{
"content": "import inspect\nfrom collections import OrderedDict\nfrom itertools import chain, takewhile\nfrom typing import List, Tuple\n\n\ndef get_num_of_indentation(line: str) -> int:\n \"\"\"\n Count the number of indentation and return\n\n Args:\n line: str with indentation (including zero indentation)\n\n Returns:\n The number of whitespaces as indentation\n\n \"\"\"\n return sum(1 for _ in takewhile(str.isspace, line))\n\n\ndef separate_section_content(lines: List[str]) -> Tuple[str, str]:\n \"\"\"\n Separate section name and content depend on the content shape\n\n Args:\n lines: List of a single section of docstring\n\n Returns:\n Section name and content\n\n \"\"\"\n sec_name, leftover = lines[0].strip().split(\":\")\n try:\n assert isinstance(leftover, str)\n except AssertionError:\n raise ValueError(\"Two or more `:` detected in a Section name line.\")\n sec_content: str = \"\"\n if leftover:\n # Section name line includes description\n sec_content = leftover.strip()\n if len(lines) > 1:\n # And the description continues to the next line\n sec_content = \" \".join(\n chain([sec_content], [line.strip() for line in lines[1:]])\n )\n elif len(lines) > 1:\n # Typical docstring which starts from the next line\n indentations = get_num_of_indentation(lines[1])\n sec_content = \"\\n\".join([line[indentations:] for line in lines[1:]])\n\n return sec_name.strip(), sec_content.strip()\n\n\nclass RelaxedGoogleDocstring:\n \"\"\"\n Extended Google Docstring parser\n\n Target docstring will\n * Have no limitation in Sections (e.g., Args, Returns ...)\n * But always have capital letters in the beginning of the section name\n \"\"\"\n\n def __init__(self, text: str):\n \"\"\"\n Creates RGD instance and parse docstring text\n Args:\n text: `Clean docstring`_ or cleanable docstring with RGD style\n\n .. _Clean docstring:\n inspect.cleandoc\n \"\"\"\n\n self._header: List[str]\n self._description_idx: int # Summary/description separating white space\n self._sections: OrderedDict[str, str] = OrderedDict()\n\n text = inspect.cleandoc(text)\n if text.startswith('\"' * 3) and text.endswith('\"' * 3):\n text = text[3:-3]\n text = text.strip()\n\n text_lines: List[str] = text.splitlines()\n # Find if any section is apparent\n sec_starts = [\n idx\n for idx, x in enumerate(text_lines)\n if x\n and x[0].isupper()\n and \":\" in x\n and get_num_of_indentation(x) == 0\n ]\n\n # Get each section from the bottom\n for idx in reversed(sec_starts):\n text_lines, new_section = text_lines[:idx], text_lines[idx:]\n sec_name, sec_content = separate_section_content(new_section)\n\n self._sections[sec_name] = sec_content\n self._sections.move_to_end(sec_name, last=False)\n\n # Set header part\n self._header = (\n text_lines[: sec_starts[0]] if sec_starts else text_lines[:]\n )\n\n self._description_idx = len(self._header)\n for i, h_line in enumerate(self._header):\n if not h_line:\n # Empty line between summary and description\n self._description_idx = i + 1\n break\n\n @property\n def summary(self) -> str:\n \"\"\"Summary string of docstring, or the first paragraph of the\n docstring\"\"\"\n return \"\".join(self._header[: self._description_idx]).strip()\n\n @property\n def description(self) -> str:\n \"\"\"Description string of docstring, or the latter part of the docstring\n other than `summary`\"\"\"\n return \"\\n\".join(self._header[self._description_idx :]).strip()\n\n @property\n def sections(self) -> OrderedDict:\n \"\"\"Categorized members of docstring\"\"\"\n return self._sections\n",
"id": "9032664",
"language": "Python",
"matching_score": 2.76013445854187,
"max_stars_count": 0,
"path": "pytestdocgen/gdocstring.py"
},
{
"content": "from pytestdocgen.gdocstring import (\n RelaxedGoogleDocstring,\n separate_section_content,\n)\n\n\ndef test_simple_parse():\n \"\"\"Test the simplest docstring parse\"\"\"\n test_string = '''\"\"\"async coro\"\"\"'''\n\n gd = RelaxedGoogleDocstring(test_string)\n assert gd.summary == \"async coro\"\n\n\ndef test_simple_parse2():\n \"\"\"Test the simplest docstring parse with an unconventional whitespace\"\"\"\n test_string = '''\"\"\"\n async coro\n \"\"\"'''\n\n gd = RelaxedGoogleDocstring(test_string)\n assert gd.summary == \"async coro\"\n\n\ndef test_parse():\n \"\"\"Test docstring parse with full possibilities\"\"\"\n test_string = '''\"\"\"\nTest if addition is a part of summation\n\nCertainly long long description we have in here for the good\ntest across multiple lines we have description\n\n * Like this\n * And\n * That\n\nSteps:\n * Add 0 in each iteration\n * Finish the iteration\n * Check if summation is zero\n\nPrecondition:\n All inputs are integers\n\nInput: Any Integer which you can see pretty much\n tous-les-jours\n\nExpected Output:\n Summation\n\nNote:\n This test is not enough to test summation\n\nReturns:\n None as this is a test\n\"\"\"\n'''\n\n gd = RelaxedGoogleDocstring(test_string)\n\n assert gd.summary == \"Test if addition is a part of summation\"\n assert gd.description == (\n \"Certainly long long description we have in here for the good\\n\"\n \"test across multiple lines we have description\\n\"\n \"\\n\"\n \" * Like this\\n\"\n \" * And\\n\"\n \" * That\"\n )\n assert gd.sections[\"Returns\"] == \"None as this is a test\"\n assert gd.sections[\"Expected Output\"] == \"Summation\"\n assert gd.sections[\"Note\"] == \"This test is not enough to test summation\"\n assert (\n gd.sections[\"Input\"]\n == \"Any Integer which you can see pretty much tous-les-jours\"\n )\n assert (\n gd.sections[\"Steps\"]\n == \"\"\"* Add 0 in each iteration\n* Finish the iteration\n* Check if summation is zero\"\"\"\n )\n\n\ndef test_section_name_and_content_extraction():\n \"\"\"Does section name and content of it gets separated properly?\"\"\"\n test_section = \"\"\"Input: Any Integer which you can see pretty much\n everyday\n\"\"\"\n\n name, content = separate_section_content(test_section.splitlines())\n\n assert name == \"Input\"\n assert content == \"Any Integer which you can see pretty much everyday\"\n",
"id": "4946293",
"language": "Python",
"matching_score": 3.0914783477783203,
"max_stars_count": 0,
"path": "tests/test_gdocstring.py"
},
{
"content": "import pytest\n\nfrom mock_package import add, sigma, sub\n\n\ndef test_addition_and_subtraction_is_the_opposite():\n \"\"\"\n Test if addition and subtraction are the opposite operation\n\n Long description following the summary explaining the various stuffs\n\n Precondition:\n All inputs are integers\n \"\"\"\n assert sub(add(1, 2), 2) == 1\n\n\n@pytest.mark.parametrize(\"test_input\", [x for x in range(-2, 3)])\ndef test_add_is_part_of_sigma(test_input: int):\n \"\"\"\n Test if addition is a part of summation\n\n Steps:\n * Add 0 in each iteration\n * Finish the iteration\n * Check if summation is zero\n\n Precondition:\n All inputs are integers\n\n Input:\n Any Integer\n\n Expected Output:\n Summation\n\n Note:\n This test is not enough to test summation\n\n Return:\n None as this is a test\n \"\"\"\n add_res = 0\n for x in range(test_input):\n add_res += 1\n assert add_res == sigma([1 for _ in range(test_input)])\n\n\n@pytest.mark.asyncio\nasync def test_asyncio_coro(x):\n \"\"\"Async coro\"\"\"\n\n assert bool(x)\n\n\n@pytest.mark.asyncio\n@pytest.mark.parametrize(\"x\", [x for x in range(1, 3)])\nasync def test_asyncio_coro_para(x):\n assert bool(x)\n",
"id": "11936781",
"language": "Python",
"matching_score": 2.1312835216522217,
"max_stars_count": 0,
"path": "test_package/tests/integration/test_composite.py"
},
{
"content": "import pytest\n\nfrom mock_package import sub\n\n\ndef test_sub_operation_returns_addition():\n assert sub(1, 2) == 1 - 2\n\n\n@pytest.mark.parametrize(\"test_input\", [x for x in range(-2, 3)])\ndef test_sub_same_values_returns_zero(test_input: int):\n assert sub(test_input, test_input) == 0\n",
"id": "510743",
"language": "Python",
"matching_score": 2.619692087173462,
"max_stars_count": 0,
"path": "test_package/tests/unit/simple_calc/test_sub.py"
},
{
"content": "import pytest\n\nfrom mock_package import add\n\n\ndef test_addition_operation_returns_addition():\n assert add(1, 2) == 1 + 2\n\n\n@pytest.mark.parametrize(\"test_input\", [x for x in range(-2, 3)])\ndef test_add_same_values_with_negative_sign_returns_zero(test_input: int):\n assert add(test_input, -test_input) == 0\n",
"id": "3878631",
"language": "Python",
"matching_score": 2.5639586448669434,
"max_stars_count": 0,
"path": "test_package/tests/unit/simple_calc/test_add.py"
},
{
"content": "from mock_package import sigma\n\n\ndef test_sum_of_zero_is_zero():\n assert sigma([0 for _ in range(1024)]) == 0\n\n\ndef test_sigma_same_values_with_opposite_sign_returns_zero():\n assert sigma([x for x in range(-2, 3)]) == 0\n",
"id": "8120135",
"language": "Python",
"matching_score": 0.8045665621757507,
"max_stars_count": 0,
"path": "test_package/tests/unit/composite_calc/test_sigma.py"
},
{
"content": "\"\"\"Mock package to be used in tests\"\"\"\nfrom typing import Iterable\n\n\ndef add(a: int, b: int) -> int:\n \"\"\"Returns added int\"\"\"\n return a + b\n\n\ndef sub(a: int, b: int) -> int:\n \"\"\"Returns subtracted int\"\"\"\n return a - b\n\n\ndef sigma(a: Iterable[int]) -> int:\n \"\"\"Returns sum of the list of int\"\"\"\n return sum(a)\n",
"id": "4846689",
"language": "Python",
"matching_score": 0.1503172218799591,
"max_stars_count": 0,
"path": "test_package/mock_package/__init__.py"
},
{
"content": "import asyncio\nimport http.client\nimport json\nimport ssl\nimport time\nimport unittest\nimport datetime\nfrom unittest import mock\nimport tempfile\nimport shutil\nimport random\n\nimport nats\nimport nats.js.api\nfrom nats.aio.client import Client as NATS\nfrom nats.aio.client import __version__\nfrom nats.errors import *\nfrom nats.aio.errors import *\nfrom nats.js.errors import *\nfrom tests.utils import *\n\n\nclass PublishTest(SingleJetStreamServerTestCase):\n\n @async_test\n async def test_publish(self):\n nc = NATS()\n await nc.connect()\n js = nc.jetstream()\n\n with self.assertRaises(NoStreamResponseError):\n await js.publish(\"foo\", b'bar')\n\n await js.add_stream(name=\"QUUX\", subjects=[\"quux\"])\n\n ack = await js.publish(\"quux\", b'bar:1', stream=\"QUUX\")\n self.assertEqual(ack.stream, \"QUUX\")\n self.assertEqual(ack.seq, 1)\n\n ack = await js.publish(\"quux\", b'bar:2')\n self.assertEqual(ack.stream, \"QUUX\")\n self.assertEqual(ack.seq, 2)\n\n with self.assertRaises(BadRequestError) as err:\n await js.publish(\"quux\", b'bar', stream=\"BAR\")\n self.assertEqual(err.exception.err_code, 10060)\n\n await nc.close()\n\n @async_test\n async def test_publish_verbose(self):\n nc = NATS()\n await nc.connect(verbose=False)\n js = nc.jetstream()\n\n with self.assertRaises(NoStreamResponseError):\n await js.publish(\"foo\", b'bar')\n\n await js.add_stream(name=\"QUUX\", subjects=[\"quux\"])\n\n ack = await js.publish(\"quux\", b'bar:1', stream=\"QUUX\")\n self.assertEqual(ack.stream, \"QUUX\")\n self.assertEqual(ack.seq, 1)\n\n ack = await js.publish(\"quux\", b'bar:2')\n self.assertEqual(ack.stream, \"QUUX\")\n self.assertEqual(ack.seq, 2)\n\n with self.assertRaises(BadRequestError) as err:\n await js.publish(\"quux\", b'bar', stream=\"BAR\")\n self.assertEqual(err.exception.err_code, 10060)\n\n await nc.close()\n\n\nclass PullSubscribeTest(SingleJetStreamServerTestCase):\n\n @async_test\n async def test_auto_create_consumer(self):\n nc = NATS()\n await nc.connect()\n\n js = nc.jetstream()\n sinfo = await js.add_stream(\n name=\"TEST2\", subjects=[\"a1\", \"a2\", \"a3\", \"a4\"]\n )\n\n for i in range(1, 10):\n await js.publish(\"a1\", f'a1:{i}'.encode())\n\n # Should use a2 as the filter subject and receive a subset.\n sub = await js.pull_subscribe(\"a2\", \"auto\")\n await js.publish(\"a2\", b'one')\n\n for i in range(10, 20):\n await js.publish(\"a3\", f'a3:{i}'.encode())\n\n msgs = await sub.fetch(1)\n msg = msgs[0]\n await msg.ack()\n self.assertEqual(msg.data, b'one')\n\n # Getting another message should timeout for the a2 subject.\n with self.assertRaises(TimeoutError):\n await sub.fetch(1, timeout=1)\n\n # Customize consumer config.\n sub = await js.pull_subscribe(\n \"a2\", \"auto2\", config=nats.js.api.ConsumerConfig(max_waiting=10)\n )\n msgs = await sub.fetch(1)\n msg = msgs[0]\n await msg.ack()\n self.assertEqual(msg.data, b'one')\n\n info = await js.consumer_info(\"TEST2\", \"auto2\")\n self.assertEqual(info.config.max_waiting, 10)\n\n sub = await js.pull_subscribe(\"a3\", \"auto3\")\n msgs = await sub.fetch(1)\n msg = msgs[0]\n await msg.ack()\n self.assertEqual(msg.data, b'a3:10')\n\n # Getting all messages from stream.\n sub = await js.pull_subscribe(\"\", \"all\")\n msgs = await sub.fetch(1)\n msg = msgs[0]\n await msg.ack()\n self.assertEqual(msg.data, b'a1:1')\n\n for i in range(2, 10):\n msgs = await sub.fetch(1)\n msg = msgs[0]\n await msg.ack()\n\n # subject a2\n msgs = await sub.fetch(1)\n msg = msgs[0]\n await msg.ack()\n self.assertEqual(msg.data, b'one')\n\n # subject a3\n msgs = await sub.fetch(1)\n msg = msgs[0]\n await msg.ack()\n self.assertEqual(msg.data, b'a3:10')\n\n await nc.close()\n await asyncio.sleep(1)\n\n @async_test\n async def test_fetch_one(self):\n nc = NATS()\n await nc.connect()\n\n js = nc.jetstream()\n\n sinfo = await js.add_stream(name=\"TEST1\", subjects=[\"foo.1\", \"bar\"])\n\n ack = await js.publish(\"foo.1\", f'Hello from NATS!'.encode())\n self.assertEqual(ack.stream, \"TEST1\")\n self.assertEqual(ack.seq, 1)\n\n # Bind to the consumer that is already present.\n sub = await js.pull_subscribe(\"foo.1\", \"dur\")\n msgs = await sub.fetch(1)\n for msg in msgs:\n await msg.ack()\n\n msg = msgs[0]\n self.assertEqual(msg.metadata.sequence.stream, 1)\n self.assertEqual(msg.metadata.sequence.consumer, 1)\n self.assertTrue(datetime.datetime.now() > msg.metadata.timestamp)\n self.assertEqual(msg.metadata.num_pending, 0)\n self.assertEqual(msg.metadata.num_delivered, 1)\n\n with self.assertRaises(asyncio.TimeoutError):\n await sub.fetch(timeout=1)\n\n for i in range(0, 10):\n await js.publish(\n \"foo.1\", f\"i:{i}\".encode(), headers={'hello': 'world'}\n )\n\n # nak\n msgs = await sub.fetch()\n msg = msgs[0]\n\n info = await js.consumer_info(\"TEST1\", \"dur\", timeout=1)\n self.assertEqual(msg.header, {'hello': 'world'})\n\n await msg.nak()\n\n info = await js.consumer_info(\"TEST1\", \"dur\", timeout=1)\n self.assertEqual(info.stream_name, \"TEST1\")\n self.assertEqual(info.num_ack_pending, 1)\n self.assertEqual(info.num_redelivered, 0)\n\n # in_progress\n msgs = await sub.fetch()\n for msg in msgs:\n await msg.in_progress()\n\n # term\n msgs = await sub.fetch()\n for msg in msgs:\n await msg.term()\n\n info = await js.consumer_info(\"TEST1\", \"dur\", timeout=1)\n self.assertEqual(info.num_ack_pending, 1)\n self.assertEqual(info.num_redelivered, 1)\n\n # Fetch requires a timeout with an expires time.\n with self.assertRaises(ValueError):\n await sub.fetch(1, timeout=None)\n\n await nc.close()\n\n @async_test\n async def test_add_pull_consumer_via_jsm(self):\n nc = NATS()\n await nc.connect()\n\n js = nc.jetstream()\n\n sinfo = await js.add_stream(name=\"events\", subjects=[\"events.a\"])\n cinfo = await js.add_consumer(\n \"events\",\n durable_name=\"a\",\n deliver_policy=api.DeliverPolicy.ALL,\n max_deliver=20,\n max_waiting=512,\n # ack_wait=30,\n max_ack_pending=1024,\n filter_subject=\"events.a\"\n )\n await js.publish(\"events.a\", b'hello world')\n sub = await js.pull_subscribe('events.a', \"a\", stream=\"events\")\n msgs = await sub.fetch(1)\n for msg in msgs:\n await msg.ack()\n info = await js.consumer_info(\"events\", \"a\")\n self.assertEqual(0, info.num_pending)\n\n @async_long_test\n async def test_fetch_n(self):\n nc = NATS()\n await nc.connect()\n js = nc.jetstream()\n\n sinfo = await js.add_stream(name=\"TESTN\", subjects=[\"a\", \"b\", \"c\"])\n\n for i in range(0, 10):\n await js.publish(\"a\", f'i:{i}'.encode())\n\n sub = await js.pull_subscribe(\n \"a\",\n \"durable-1\",\n config=api.ConsumerConfig(max_waiting=3),\n )\n info = await sub.consumer_info()\n self.assertEqual(info.config.max_waiting, 3)\n\n # 10 messages\n # -5 fetched\n # -----------\n # 5 pending\n msgs = await sub.fetch(5)\n self.assertEqual(len(msgs), 5)\n\n i = 0\n for msg in msgs:\n self.assertEqual(msg.data, f'i:{i}'.encode())\n await msg.ack()\n i += 1\n info = await sub.consumer_info()\n self.assertEqual(info.num_pending, 5)\n\n # 5 messages\n # -10 fetched\n # -----------\n # 5 pending\n msgs = await sub.fetch(10, timeout=0.5)\n self.assertEqual(len(msgs), 5)\n\n i = 5\n for msg in msgs:\n self.assertEqual(msg.data, f'i:{i}'.encode())\n await msg.ack()\n i += 1\n\n info = await sub.consumer_info()\n self.assertEqual(info.num_ack_pending, 0)\n self.assertEqual(info.num_redelivered, 0)\n self.assertEqual(info.delivered.stream_seq, 10)\n self.assertEqual(info.delivered.consumer_seq, 10)\n self.assertEqual(info.ack_floor.stream_seq, 10)\n self.assertEqual(info.ack_floor.consumer_seq, 10)\n self.assertEqual(info.num_pending, 0)\n\n # 1 message\n # -1 fetched\n # ----------\n # 0 pending\n # 1 ack pending\n await js.publish(\"a\", b'i:11')\n msgs = await sub.fetch(2, timeout=0.5)\n\n # Leave this message unacked.\n msg = msgs[0]\n unacked_msg = msg\n\n self.assertEqual(msg.data, b'i:11')\n info = await sub.consumer_info()\n self.assertEqual(info.num_waiting, 1)\n self.assertEqual(info.num_pending, 0)\n self.assertEqual(info.num_ack_pending, 1)\n\n inflight = []\n inflight.append(msg)\n\n # +1 message\n # 1 extra from before but request has expired so does not count.\n # +1 ack pending since previous message not acked.\n # +1 pending to be consumed.\n await js.publish(\"a\", b'i:12')\n with self.assertRaises(asyncio.TimeoutError):\n await sub._sub.next_msg(timeout=0.5)\n info = await sub.consumer_info()\n self.assertEqual(info.num_waiting, 0)\n self.assertEqual(info.num_pending, 1)\n self.assertEqual(info.num_ack_pending, 1)\n\n # Start background task that gathers messages.\n fut = asyncio.create_task(sub.fetch(3, timeout=2))\n await asyncio.sleep(0.5)\n await js.publish(\"a\", b'i:13')\n await js.publish(\"a\", b'i:14')\n\n # It should have enough time to be able to get the 3 messages,\n # the no wait message will send the first message plus a 404\n # no more messages error.\n msgs = await fut\n self.assertEqual(len(msgs), 3)\n for msg in msgs:\n await msg.ack_sync()\n\n info = await sub.consumer_info()\n self.assertEqual(info.num_ack_pending, 1)\n self.assertEqual(info.num_redelivered, 0)\n self.assertEqual(info.num_waiting, 0)\n self.assertEqual(info.num_pending, 0)\n self.assertEqual(info.delivered.stream_seq, 14)\n self.assertEqual(info.delivered.consumer_seq, 14)\n\n # Message 10 is the last message that got acked.\n self.assertEqual(info.ack_floor.stream_seq, 10)\n self.assertEqual(info.ack_floor.consumer_seq, 10)\n\n # Unacked last message so that ack floor is updated.\n await unacked_msg.ack_sync()\n\n info = await sub.consumer_info()\n self.assertEqual(info.num_pending, 0)\n self.assertEqual(info.ack_floor.stream_seq, 14)\n self.assertEqual(info.ack_floor.consumer_seq, 14)\n\n # No messages at this point.\n for i in range(0, 5):\n with self.assertRaises(TimeoutError):\n msg = await sub.fetch(1, timeout=0.5)\n\n # Max waiting is 3 so it should be stuck at 2, the requests\n # cancel each and are done sequentially so no 408 errors expected.\n info = await sub.consumer_info()\n self.assertEqual(info.num_waiting, 2)\n\n # Following requests ought to cancel the previous ones.\n #\n # for i in range(0, 5):\n # with self.assertRaises(TimeoutError):\n # msg = await sub.fetch(2, timeout=0.5)\n # info = await sub.consumer_info()\n # self.assertEqual(info.num_waiting, 1)\n\n await nc.close()\n\n @async_test\n async def test_fetch_max_waiting_fetch_one(self):\n nc = NATS()\n await nc.connect()\n\n js = nc.jetstream()\n\n await js.add_stream(name=\"TEST3\", subjects=[\"max\"])\n\n sub = await js.pull_subscribe(\n \"max\",\n \"example\",\n config=nats.js.api.ConsumerConfig(max_waiting=3),\n )\n results = None\n try:\n results = await asyncio.gather(\n sub.fetch(1, timeout=1),\n sub.fetch(1, timeout=1),\n sub.fetch(1, timeout=1),\n sub.fetch(1, timeout=1),\n sub.fetch(1, timeout=1),\n return_exceptions=True,\n )\n except:\n pass\n\n err = None\n for e in results:\n if isinstance(e, asyncio.TimeoutError):\n continue\n else:\n self.assertIsInstance(e, APIError)\n err = e\n break\n\n # Should get at least one Request Timeout error.\n self.assertEqual(e.code, 408)\n info = await js.consumer_info(\"TEST3\", \"example\")\n self.assertEqual(info.num_waiting, 3)\n\n for i in range(0, 10):\n await js.publish(\"max\", b'foo')\n\n async def pub():\n while True:\n await js.publish(\"max\", b'foo')\n await asyncio.sleep(0)\n\n producer = asyncio.create_task(pub())\n\n async def cb():\n future = await asyncio.gather(\n sub.fetch(1, timeout=1),\n sub.fetch(512, timeout=1),\n sub.fetch(512, timeout=1),\n sub.fetch(1, timeout=1),\n sub.fetch(1, timeout=1),\n sub.fetch(1, timeout=1),\n sub.fetch(1, timeout=1),\n return_exceptions=True,\n )\n for e in future:\n if isinstance(e, asyncio.TimeoutError) or isinstance(e,\n APIError):\n continue\n else:\n m = e[0].metadata\n\n tasks = []\n for i in range(0, 100):\n task = asyncio.create_task(cb())\n tasks.append(task)\n await asyncio.sleep(0)\n\n for task in tasks:\n future = await task\n\n producer.cancel()\n\n await nc.close()\n\n @async_test\n async def test_fetch_max_waiting_fetch_n(self):\n nc = NATS()\n await nc.connect()\n\n js = nc.jetstream()\n\n await js.add_stream(name=\"TEST31\", subjects=[\"max\"])\n\n sub = await js.pull_subscribe(\n \"max\",\n \"example\",\n config=nats.js.api.ConsumerConfig(max_waiting=3),\n )\n results = None\n try:\n results = await asyncio.gather(\n sub.fetch(2, timeout=1),\n sub.fetch(2, timeout=1),\n sub.fetch(2, timeout=1),\n sub.fetch(2, timeout=1),\n sub.fetch(2, timeout=1),\n return_exceptions=True,\n )\n except:\n pass\n\n err = None\n for e in results:\n if isinstance(e, asyncio.TimeoutError):\n continue\n else:\n self.assertIsInstance(e, APIError)\n err = e\n break\n\n # Should get at least one Request Timeout error.\n self.assertEqual(err.code, 408)\n info = await js.consumer_info(\"TEST31\", \"example\")\n self.assertEqual(info.num_waiting, 3)\n await nc.close()\n\n @async_long_test\n async def test_fetch_concurrent(self):\n nc = await nats.connect()\n js = nc.jetstream()\n\n sinfo = await js.add_stream(name=\"TESTN10\", subjects=[\"a\", \"b\", \"c\"])\n\n async def go_publish():\n i = 0\n while True:\n try:\n payload = f'{i}'.encode()\n await js.publish(\"a\", payload)\n except Exception as e:\n pass\n i += 1\n await asyncio.sleep(0.01)\n\n task = asyncio.create_task(go_publish())\n\n sub = await js.pull_subscribe(\n \"a\",\n \"durable-1\",\n config=api.ConsumerConfig(max_waiting=3),\n )\n info = await sub.consumer_info()\n self.assertEqual(info.config.max_waiting, 3)\n\n start_time = time.monotonic()\n errors = []\n msgs = []\n m = {}\n while True:\n a = time.monotonic() - start_time\n if a > 2: # seconds\n break\n try:\n results = await asyncio.gather(\n sub.fetch(2, timeout=1),\n sub.fetch(2, timeout=1),\n sub.fetch(2, timeout=1),\n sub.fetch(2, timeout=1),\n sub.fetch(2, timeout=1),\n sub.fetch(2, timeout=1),\n # return_exceptions=True,\n )\n for batch in results:\n for msg in batch:\n m[int(msg.data.decode())] = msg\n self.assertTrue(msg.header is None)\n except Exception as e:\n errors.append(e)\n pass\n for e in errors:\n if isinstance(e, asyncio.TimeoutError):\n continue\n else:\n # Only 408 errors should ever bubble up.\n self.assertIsInstance(e, APIError)\n self.assertEqual(e.code, 408)\n task.cancel()\n\n await nc.close()\n\n return\n\n # Ensure that all published events that made it\n # were delivered.\n print(m)\n for i in range(0, len(m)):\n res = m.get(i)\n print(res.data)\n self.assertEqual(int(res.data.decode()), i)\n\n await nc.close()\n\n\nclass JSMTest(SingleJetStreamServerTestCase):\n\n @async_test\n async def test_stream_management(self):\n nc = NATS()\n await nc.connect()\n jsm = nc.jsm()\n\n acc = await jsm.account_info()\n self.assertIsInstance(acc, nats.js.api.AccountInfo)\n\n # Create stream\n stream = await jsm.add_stream(\n name=\"hello\", subjects=[\"hello\", \"world\", \"hello.>\"]\n )\n self.assertIsInstance(stream, nats.js.api.StreamInfo)\n self.assertIsInstance(stream.config, nats.js.api.StreamConfig)\n self.assertEqual(stream.config.name, \"hello\")\n self.assertIsInstance(stream.state, nats.js.api.StreamState)\n\n # Create without name\n with self.assertRaises(ValueError):\n await jsm.add_stream(subjects=[\"hello\", \"world\", \"hello.>\"])\n # Create with config, but without name\n with self.assertRaises(ValueError):\n await jsm.add_stream(nats.js.api.StreamConfig())\n # Create with config, name is provided as kwargs\n stream_with_name = await jsm.add_stream(\n nats.js.api.StreamConfig(), name=\"hi\"\n )\n self.assertEqual(stream_with_name.config.name, \"hi\")\n\n # Get info\n current = await jsm.stream_info(\"hello\")\n stream.did_create = None\n self.assertEqual(stream, current)\n\n self.assertIsInstance(current, nats.js.api.StreamInfo)\n self.assertIsInstance(current.config, nats.js.api.StreamConfig)\n self.assertEqual(current.config.name, \"hello\")\n self.assertIsInstance(current.state, nats.js.api.StreamState)\n\n # Send messages\n producer = nc.jetstream()\n ack = await producer.publish('world', b'Hello world!')\n self.assertEqual(ack.stream, \"hello\")\n self.assertEqual(ack.seq, 1)\n\n current = await jsm.stream_info(\"hello\")\n self.assertEqual(current.state.messages, 1)\n self.assertEqual(current.state.bytes, 47)\n\n # Delete stream\n is_deleted = await jsm.delete_stream(\"hello\")\n self.assertTrue(is_deleted)\n\n # Not foundError since there is none\n with self.assertRaises(NotFoundError):\n await jsm.stream_info(\"hello\")\n\n await nc.close()\n\n @async_test\n async def test_consumer_management(self):\n nc = NATS()\n await nc.connect()\n jsm = nc.jsm()\n\n acc = await jsm.account_info()\n self.assertIsInstance(acc, nats.js.api.AccountInfo)\n\n # Create stream.\n await jsm.add_stream(name=\"ctests\", subjects=[\"a\", \"b\", \"c.>\"])\n\n # Create durable consumer.\n cinfo = await jsm.add_consumer(\n \"ctests\",\n durable_name=\"dur\",\n ack_policy=\"explicit\",\n )\n\n # Fail with missing stream.\n with self.assertRaises(NotFoundError) as err:\n await jsm.consumer_info(\"missing\", \"c\")\n self.assertEqual(err.exception.err_code, 10059)\n\n # Get consumer, there should be no changes.\n current = await jsm.consumer_info(\"ctests\", \"dur\")\n self.assertEqual(cinfo, current)\n\n # Delete consumer.\n ok = await jsm.delete_consumer(\"ctests\", \"dur\")\n self.assertTrue(ok)\n\n # Consumer lookup should not be 404 now.\n with self.assertRaises(NotFoundError) as err:\n await jsm.consumer_info(\"ctests\", \"dur\")\n self.assertEqual(err.exception.err_code, 10014)\n\n # Create ephemeral consumer.\n cinfo = await jsm.add_consumer(\n \"ctests\",\n ack_policy=\"explicit\",\n deliver_subject=\"asdf\",\n )\n # Should not be empty.\n self.assertTrue(len(cinfo.name) > 0)\n ok = await jsm.delete_consumer(\"ctests\", cinfo.name)\n self.assertTrue(ok)\n\n # Ephemeral no longer found after delete.\n with self.assertRaises(NotFoundError):\n await jsm.delete_consumer(\"ctests\", cinfo.name)\n\n await nc.close()\n\n\nclass SubscribeTest(SingleJetStreamServerTestCase):\n\n @async_test\n async def test_queue_subscribe_deliver_group(self):\n nc = await nats.connect()\n js = nc.jetstream()\n\n await js.add_stream(name=\"qsub\", subjects=[\"quux\"])\n\n a, b, c = ([], [], [])\n\n async def cb1(msg):\n a.append(msg)\n\n async def cb2(msg):\n b.append(msg)\n\n async def cb3(msg):\n c.append(msg)\n\n subs = []\n\n # First queue subscriber will create.\n sub1 = await js.subscribe(\"quux\", \"wg\", cb=cb1)\n\n # Rest of queue subscribers will bind to the same.\n sub2 = await js.subscribe(\"quux\", \"wg\", cb=cb2)\n sub3 = await js.subscribe(\"quux\", \"wg\", cb=cb3)\n\n # All should be bound to the same subject.\n self.assertEqual(sub1.subject, sub2.subject)\n self.assertEqual(sub1.subject, sub3.subject)\n\n subs.append(sub1)\n subs.append(sub2)\n subs.append(sub3)\n\n for i in range(100):\n await js.publish(\"quux\", f'Hello World {i}'.encode())\n\n delivered = [a, b, c]\n for i in range(5):\n await asyncio.sleep(0.5)\n total = len(a) + len(b) + len(c)\n if total == 100:\n break\n\n # Check that there was a good balance among the group members.\n self.assertEqual(len(a) + len(b) + len(c), 100)\n for q in delivered:\n self.assertTrue(10 <= len(q) <= 70)\n\n # Now unsubscribe all.\n await sub1.unsubscribe()\n await sub2.drain()\n await sub3.unsubscribe()\n\n # Confirm that no more messages are received.\n for i in range(200, 210):\n await js.publish(\"quux\", f'Hello World {i}'.encode())\n\n with self.assertRaises(BadSubscriptionError):\n await sub1.unsubscribe()\n\n with self.assertRaises(BadSubscriptionError):\n await sub2.drain()\n\n await nc.close()\n\n @async_test\n async def test_subscribe_push_bound(self):\n nc = await nats.connect()\n js = nc.jetstream()\n\n await js.add_stream(name=\"pbound\", subjects=[\"pbound\"])\n\n a, b = ([], [])\n\n async def cb1(msg):\n a.append(msg)\n\n async def cb2(msg):\n b.append(msg)\n\n subs = []\n\n for i in range(10):\n await js.publish(\"pbound\", f'Hello World {i}'.encode())\n\n # First subscriber will create.\n sub1 = await js.subscribe(\"pbound\", cb=cb1, durable=\"singleton\")\n await asyncio.sleep(0.5)\n\n info = await js.consumer_info(\"pbound\", \"singleton\")\n self.assertTrue(info.push_bound)\n\n # Rest of subscribers will not bind because it is already bound.\n with self.assertRaises(nats.js.errors.Error) as err:\n await js.subscribe(\"pbound\", cb=cb2, durable=\"singleton\")\n self.assertEqual(\n err.exception.description,\n \"consumer is already bound to a subscription\"\n )\n\n with self.assertRaises(nats.js.errors.Error) as err:\n await js.subscribe(\n \"pbound\", queue=\"foo\", cb=cb2, durable=\"singleton\"\n )\n self.assertEqual(\n err.exception.description,\n \"cannot create queue subscription 'foo' to consumer 'singleton'\"\n )\n\n # Wait the delivery of the messages.\n for i in range(5):\n if len(a) == 10:\n break\n await asyncio.sleep(0.2)\n self.assertEqual(len(a), 10)\n\n # Create a sync subscriber now.\n sub2 = await js.subscribe(\"pbound\", durable=\"two\")\n msg = await sub2.next_msg()\n self.assertEqual(msg.data, b'Hello World 0')\n self.assertEqual(msg.metadata.sequence.stream, 1)\n self.assertEqual(msg.metadata.sequence.consumer, 1)\n self.assertEqual(sub2.pending_msgs, 9)\n\n await nc.close()\n\n @async_test\n async def test_ephemeral_subscribe(self):\n nc = await nats.connect()\n js = nc.jetstream()\n\n subject = \"ephemeral\"\n await js.add_stream(name=subject, subjects=[subject])\n\n for i in range(10):\n await js.publish(subject, f'Hello World {i}'.encode())\n\n # First subscriber will create.\n sub1 = await js.subscribe(subject)\n sub2 = await js.subscribe(subject)\n\n recvd = 0\n async for msg in sub1.messages:\n recvd += 1\n await msg.ack_sync()\n\n if recvd == 10:\n break\n\n # Both should be able to process the messages at their own pace.\n self.assertEqual(sub1.pending_msgs, 0)\n self.assertEqual(sub2.pending_msgs, 10)\n\n info1 = await sub1.consumer_info()\n self.assertEqual(info1.stream_name, \"ephemeral\")\n self.assertEqual(info1.num_ack_pending, 0)\n self.assertTrue(len(info1.name) > 0)\n\n info2 = await sub2.consumer_info()\n self.assertEqual(info2.stream_name, \"ephemeral\")\n self.assertEqual(info2.num_ack_pending, 10)\n self.assertTrue(len(info2.name) > 0)\n self.assertTrue(info1.name != info2.name)\n\n\nclass AckPolicyTest(SingleJetStreamServerTestCase):\n\n @async_test\n async def test_double_acking_pull_subscribe(self):\n nc = await nats.connect()\n\n js = nc.jetstream()\n sinfo = await js.add_stream(name=\"TESTACKS\", subjects=[\"test\"])\n for i in range(0, 10):\n await js.publish(\"test\", f'{i}'.encode())\n\n # Pull Subscriber\n psub = await js.pull_subscribe(\"test\", \"durable\")\n msgs = await psub.fetch(1)\n msg = msgs[0]\n await msg.ack()\n with self.assertRaises(MsgAlreadyAckdError):\n await msg.ack()\n\n info = await psub.consumer_info()\n self.assertEqual(info.num_pending, 9)\n self.assertEqual(info.num_ack_pending, 0)\n\n msgs = await psub.fetch(1)\n msg = msgs[0]\n await msg.nak()\n with self.assertRaises(MsgAlreadyAckdError):\n await msg.ack()\n info = await psub.consumer_info()\n self.assertEqual(info.num_pending, 8)\n self.assertEqual(info.num_ack_pending, 1)\n\n msgs = await psub.fetch(1)\n msg = msgs[0]\n await msg.in_progress()\n await asyncio.sleep(0.5)\n await msg.in_progress()\n await msg.ack()\n\n info = await psub.consumer_info()\n self.assertEqual(info.num_pending, 8)\n self.assertEqual(info.num_ack_pending, 0)\n\n await nc.close()\n\n @async_test\n async def test_double_acking_subscribe(self):\n errors = []\n\n async def error_handler(e):\n errors.append(e)\n\n nc = await nats.connect(error_cb=error_handler)\n\n js = nc.jetstream()\n sinfo = await js.add_stream(name=\"TESTACKS\", subjects=[\"test\"])\n for i in range(0, 10):\n await js.publish(\"test\", f'{i}'.encode())\n\n future = asyncio.Future()\n\n async def ocb(msg):\n # Ack the first one only.\n if msg.metadata.sequence.stream == 1:\n await msg.ack()\n return\n\n await msg.nak()\n await msg.ack()\n\n # Backoff to avoid a lot of redeliveries from nak protocol,\n # could get thousands per sec otherwise.\n if msg.metadata.sequence.stream == 10:\n await asyncio.sleep(1)\n\n # Subscriber\n sub = await js.subscribe(\"test\", cb=ocb)\n await asyncio.sleep(0.5)\n info = await sub.consumer_info()\n self.assertEqual(info.num_ack_pending, 9)\n self.assertEqual(info.num_pending, 0)\n await sub.unsubscribe()\n\n self.assertTrue(len(errors) > 0)\n self.assertIsInstance(errors[0], MsgAlreadyAckdError)\n\n await nc.close()\n\n\nclass OrderedConsumerTest(SingleJetStreamServerTestCase):\n\n @async_test\n async def test_flow_control(self):\n errors = []\n\n async def error_handler(e):\n print(\"Error:\", e)\n errors.append(e)\n\n nc = await nats.connect(error_cb=error_handler)\n\n js = nc.jetstream()\n\n subject = \"flow\"\n await js.add_stream(name=subject, subjects=[subject])\n\n async def cb(msg):\n await msg.ack()\n\n with self.assertRaises(nats.js.errors.APIError) as err:\n sub = await js.subscribe(subject, cb=cb, flow_control=True)\n self.assertEqual(\n err.exception.description,\n \"consumer with flow control also needs heartbeats\"\n )\n\n sub = await js.subscribe(\n subject, cb=cb, flow_control=True, idle_heartbeat=0.5\n )\n\n tasks = []\n\n async def producer():\n mlen = 128 * 1024\n msg = b'A' * mlen\n\n # Send it in chunks\n i = 0\n chunksize = 256\n while i < mlen:\n chunk = None\n if mlen - i <= chunksize:\n chunk = msg[i:]\n else:\n chunk = msg[i:i + chunksize]\n i += chunksize\n task = asyncio.create_task(js.publish(subject, chunk))\n tasks.append(task)\n\n task = asyncio.create_task(producer())\n await asyncio.wait_for(task, timeout=1)\n await asyncio.gather(*tasks)\n\n for i in range(0, 5):\n info = await sub.consumer_info()\n await asyncio.sleep(0.5)\n\n await nc.close()\n\n @async_test\n async def test_ordered_consumer(self):\n errors = []\n\n async def error_handler(e):\n print(\"Error:\", e)\n errors.append(e)\n\n nc = await nats.connect(error_cb=error_handler)\n js = nc.jetstream()\n\n subject = \"osub\"\n await js.add_stream(name=subject, subjects=[subject], storage=\"memory\")\n\n msgs = []\n\n async def cb(msg):\n msgs.append(msg)\n await msg.ack()\n\n sub = await js.subscribe(\n subject,\n cb=cb,\n ordered_consumer=True,\n idle_heartbeat=0.3,\n )\n\n tasks = []\n expected_size = 1048576\n\n async def producer():\n mlen = 1024 * 1024\n msg = b'A' * mlen\n\n # Send it in chunks\n i = 0\n chunksize = 1024\n while i < mlen:\n chunk = None\n if mlen - i <= chunksize:\n chunk = msg[i:]\n else:\n chunk = msg[i:i + chunksize]\n i += chunksize\n task = asyncio.create_task(\n js.publish(subject, chunk, headers={'data': \"true\"})\n )\n await asyncio.sleep(0)\n tasks.append(task)\n\n task = asyncio.create_task(producer())\n await asyncio.wait_for(task, timeout=4)\n await asyncio.gather(*tasks)\n self.assertEqual(len(msgs), 1024)\n\n received_payload = bytearray(b'')\n for msg in msgs:\n received_payload.extend(msg.data)\n self.assertEqual(len(received_payload), expected_size)\n\n for i in range(0, 5):\n info = await sub.consumer_info()\n if info.num_pending == 0:\n break\n await asyncio.sleep(0.5)\n\n info = await sub.consumer_info()\n self.assertEqual(info.num_pending, 0)\n await nc.close()\n\n @async_long_test\n async def test_ordered_consumer_single_loss(self):\n errors = []\n\n async def error_handler(e):\n print(\"Error:\", e)\n errors.append(e)\n\n # Consumer\n nc = await nats.connect(error_cb=error_handler)\n\n # Producer\n nc2 = await nats.connect(error_cb=error_handler)\n\n js = nc.jetstream()\n js2 = nc2.jetstream()\n\n # Consumer will lose some messages.\n orig_build_message = nc._build_message\n\n def _build_message(subject, reply, data, headers):\n r = random.randint(0, 100)\n if r <= 10 and headers and headers.get(\"data\"):\n nc._build_message = orig_build_message\n return\n\n return nc.msg_class(\n subject=subject.decode(),\n reply=reply.decode(),\n data=data,\n headers=headers,\n _client=nc,\n )\n\n # Override to introduce arbitrary loss.\n nc._build_message = _build_message\n\n subject = \"osub2\"\n await js2.add_stream(\n name=subject, subjects=[subject], storage=\"memory\"\n )\n\n # Consumer callback.\n future = asyncio.Future()\n msgs = []\n\n async def cb(msg):\n msgs.append(msg)\n if len(msgs) >= 1024:\n if not future.done():\n future.set_result(True)\n await msg.ack()\n\n sub = await js.subscribe(\n subject,\n cb=cb,\n ordered_consumer=True,\n idle_heartbeat=0.3,\n )\n\n tasks = []\n expected_size = 1048576\n\n async def producer():\n mlen = 1024 * 1024\n msg = b'A' * mlen\n\n # Send it in chunks\n i = 0\n chunksize = 1024\n while i < mlen:\n chunk = None\n if mlen - i <= chunksize:\n chunk = msg[i:]\n else:\n chunk = msg[i:i + chunksize]\n i += chunksize\n task = asyncio.create_task(\n nc2.publish(subject, chunk, headers={'data': \"true\"})\n )\n tasks.append(task)\n\n task = asyncio.create_task(producer())\n await asyncio.wait_for(task, timeout=2)\n await asyncio.gather(*tasks)\n\n # Async publishing complete\n await nc2.flush()\n await asyncio.sleep(1)\n\n # Wait until callback receives all the messages.\n try:\n await asyncio.wait_for(future, timeout=5)\n except Exception as err:\n print(\"Test Error:\", err)\n pass\n\n self.assertEqual(len(msgs), 1024)\n\n received_payload = bytearray(b'')\n for msg in msgs:\n received_payload.extend(msg.data)\n self.assertEqual(len(received_payload), expected_size)\n\n for i in range(0, 5):\n info = await sub.consumer_info()\n if info.num_pending == 0:\n break\n await asyncio.sleep(1)\n\n info = await sub.consumer_info()\n self.assertEqual(info.num_pending, 0)\n await nc.close()\n await nc2.close()\n\n\nclass KVTest(SingleJetStreamServerTestCase):\n\n @async_test\n async def test_kv_simple(self):\n errors = []\n\n async def error_handler(e):\n print(\"Error:\", e, type(e))\n errors.append(e)\n\n nc = await nats.connect(error_cb=error_handler)\n js = nc.jetstream()\n await js.add_stream(name=\"mystream\")\n\n kv = await js.create_key_value(bucket=\"TEST\", history=5, ttl=3600)\n status = await kv.status()\n self.assertEqual(status.bucket, \"TEST\")\n self.assertEqual(status.values, 0)\n self.assertEqual(status.history, 5)\n self.assertEqual(status.ttl, 3600)\n\n seq = await kv.put(\"hello\", b'world')\n self.assertEqual(seq, 1)\n\n entry = await kv.get(\"hello\")\n self.assertEqual(\"hello\", entry.key)\n self.assertEqual(b'world', entry.value)\n\n status = await kv.status()\n self.assertEqual(status.values, 1)\n\n for i in range(0, 100):\n await kv.put(f\"hello.{i}\", b'Hello JS KV!')\n\n status = await kv.status()\n self.assertEqual(status.values, 101)\n\n await kv.delete(\"hello.1\")\n\n with self.assertRaises(KeyDeletedError) as err:\n await kv.get(\"hello.1\")\n self.assertEqual(err.exception.entry.key, 'hello.1')\n self.assertEqual(err.exception.entry.revision, 102)\n self.assertEqual(err.exception.entry.value, None)\n self.assertEqual(err.exception.op, 'DEL')\n\n await kv.purge(\"hello.5\")\n\n with self.assertRaises(KeyDeletedError) as err:\n await kv.get(\"hello.5\")\n self.assertEqual(err.exception.entry.key, 'hello.5')\n self.assertEqual(err.exception.entry.revision, 103)\n self.assertEqual(err.exception.entry.value, None)\n self.assertEqual(err.exception.op, 'PURGE')\n\n status = await kv.status()\n self.assertEqual(status.values, 102)\n\n await kv.purge(\"hello.*\")\n\n with self.assertRaises(NotFoundError):\n await kv.get(\"hello.5\")\n\n status = await kv.status()\n self.assertEqual(status.values, 2)\n\n entry = await kv.get(\"hello\")\n self.assertEqual(\"hello\", entry.key)\n self.assertEqual(b'world', entry.value)\n self.assertEqual(1, entry.revision)\n\n # Now get the the same KV via lookup.\n kv = await js.key_value(\"TEST\")\n entry = await kv.get(\"hello\")\n self.assertEqual(\"hello\", entry.key)\n self.assertEqual(b'world', entry.value)\n self.assertEqual(1, entry.revision)\n\n status = await kv.status()\n self.assertEqual(status.values, 2)\n\n for i in range(100, 200):\n await kv.put(f\"hello.{i}\", b'Hello JS KV!')\n\n status = await kv.status()\n self.assertEqual(status.values, 102)\n\n with self.assertRaises(NotFoundError):\n await kv.get(\"hello.5\")\n\n entry = await kv.get(\"hello.102\")\n self.assertEqual(\"hello.102\", entry.key)\n self.assertEqual(b'Hello JS KV!', entry.value)\n self.assertEqual(107, entry.revision)\n\n await nc.close()\n\n @async_test\n async def test_not_kv(self):\n errors = []\n\n async def error_handler(e):\n print(\"Error:\", e, type(e))\n errors.append(e)\n\n nc = await nats.connect(error_cb=error_handler)\n js = nc.jetstream()\n\n with self.assertRaises(BucketNotFoundError):\n await js.key_value(bucket=\"TEST2\")\n\n await js.add_stream(name=\"KV_TEST3\")\n\n with self.assertRaises(BadBucketError):\n await js.key_value(bucket=\"TEST3\")\n\n\nif __name__ == '__main__':\n import sys\n runner = unittest.TextTestRunner(stream=sys.stdout)\n unittest.main(verbosity=2, exit=False, testRunner=runner)\n",
"id": "7624937",
"language": "Python",
"matching_score": 5.930911064147949,
"max_stars_count": 0,
"path": "tests/test_js.py"
},
{
"content": "# Copyright 2016-2021 The NATS Authors\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport asyncio\nimport json\nimport time\nimport ssl\nimport ipaddress\nimport base64\nfrom random import shuffle\nfrom urllib.parse import urlparse, ParseResult\nimport sys\nimport logging\nfrom typing import Any, Awaitable, Callable, Dict, List, Optional, Tuple, Type, Union\nfrom email.parser import BytesParser\nfrom dataclasses import dataclass\n\nimport nats.js\nfrom nats import errors\nfrom nats.protocol.parser import AUTHORIZATION_VIOLATION, PERMISSIONS_ERR, PONG, Parser, STALE_CONNECTION\nfrom nats.nuid import NUID\nfrom nats.protocol import command as prot_command\n\nfrom .subscription import DEFAULT_SUB_PENDING_BYTES_LIMIT, DEFAULT_SUB_PENDING_MSGS_LIMIT, Subscription\nfrom .errors import ErrInvalidUserCredentials, ErrStaleConnection\nfrom .msg import Msg\n\n__version__ = '2.0.0rc5'\n__lang__ = 'python3'\n_logger = logging.getLogger(__name__)\nPROTOCOL = 1\n\nINFO_OP = b'INFO'\nCONNECT_OP = b'CONNECT'\nPING_OP = b'PING'\nPONG_OP = b'PONG'\nOK_OP = b'+OK'\nERR_OP = b'-ERR'\n_CRLF_ = b'\\r\\n'\n_SPC_ = b' '\nEMPTY = \"\"\n\nPING_PROTO = PING_OP + _CRLF_\nPONG_PROTO = PONG_OP + _CRLF_\nINBOX_PREFIX = bytearray(b'_INBOX.')\nINBOX_PREFIX_LEN = len(INBOX_PREFIX) + 22 + 1\n\nDEFAULT_PENDING_SIZE = 1024 * 1024\nDEFAULT_BUFFER_SIZE = 32768\nDEFAULT_RECONNECT_TIME_WAIT = 2 # in seconds\nDEFAULT_MAX_RECONNECT_ATTEMPTS = 60\nDEFAULT_PING_INTERVAL = 120 # in seconds\nDEFAULT_MAX_OUTSTANDING_PINGS = 2\nDEFAULT_MAX_PAYLOAD_SIZE = 1048576\nDEFAULT_MAX_FLUSHER_QUEUE_SIZE = 1024\nDEFAULT_CONNECT_TIMEOUT = 2 # in seconds\nDEFAULT_DRAIN_TIMEOUT = 30 # in seconds\nMAX_CONTROL_LINE_SIZE = 1024\n\nNATS_HDR_LINE = bytearray(b'NATS/1.0\\r\\n')\nNATS_HDR_LINE_SIZE = len(NATS_HDR_LINE)\nNO_RESPONDERS_STATUS = \"503\"\nCTRL_STATUS = \"100\"\nSTATUS_MSG_LEN = 3 # e.g. 20x, 40x, 50x\nCTRL_LEN = len(_CRLF_)\n\nCallback = Callable[[], Awaitable[None]]\nErrorCallback = Callable[[Exception], Awaitable[None]]\nJWTCallback = Callable[[], Union[bytearray, bytes]]\nCredentials = Union[str, Tuple[str, str]]\nSignatureCallback = Callable[[str], bytes]\n\n\n@dataclass\nclass Srv:\n \"\"\"\n Srv is a helper data structure to hold state of a server.\n \"\"\"\n uri: ParseResult\n reconnects: int = 0\n last_attempt: Optional[float] = None\n did_connect: bool = False\n discovered: bool = False\n tls_name: Optional[str] = None\n\n\nasync def _default_error_callback(ex: Exception) -> None:\n \"\"\"\n Provides a default way to handle async errors if the user\n does not provide one.\n \"\"\"\n _logger.error('nats: encountered error', exc_info=ex)\n\n\nclass Client:\n \"\"\"\n Asyncio based client for NATS.\n \"\"\"\n\n msg_class: Type[Msg] = Msg\n\n # FIXME: Use an enum instead.\n DISCONNECTED = 0\n CONNECTED = 1\n CLOSED = 2\n RECONNECTING = 3\n CONNECTING = 4\n DRAINING_SUBS = 5\n DRAINING_PUBS = 6\n\n def __repr__(self) -> str:\n return f\"<nats client v{__version__}>\"\n\n def __init__(self) -> None:\n self._current_server: Optional[Srv] = None\n self._server_info: Dict[str, Any] = {}\n self._server_pool: List[Srv] = []\n self._reading_task: Optional[asyncio.Task] = None\n self._ping_interval_task: Optional[asyncio.Task] = None\n self._pings_outstanding: int = 0\n self._pongs_received: int = 0\n self._pongs: List[asyncio.Future] = []\n self._bare_io_reader: Optional[asyncio.StreamReader] = None\n self._io_reader: Optional[asyncio.StreamReader] = None\n self._bare_io_writer: Optional[asyncio.StreamWriter] = None\n self._io_writer: Optional[asyncio.StreamWriter] = None\n self._err: Optional[Exception] = None\n\n # callbacks\n self._error_cb: ErrorCallback = _default_error_callback\n self._disconnected_cb: Optional[Callback] = None\n self._closed_cb: Optional[Callback] = None\n self._discovered_server_cb: Optional[Callback] = None\n self._reconnected_cb: Optional[Callback] = None\n\n self._reconnection_task: Union[asyncio.Task[None], None] = None\n self._reconnection_task_future: Optional[asyncio.Future] = None\n self._max_payload: int = DEFAULT_MAX_PAYLOAD_SIZE\n # This is the client id that the NATS server knows\n # about. Useful in debugging application errors\n # when logged with this identifier along\n # with nats server log.\n # This would make more sense if we log the server\n # connected to as well in case of cluster setup.\n self._client_id: Optional[str] = None\n self._sid: int = 0\n self._subs: Dict[int, Subscription] = {}\n self._status: int = Client.DISCONNECTED\n self._ps: Parser = Parser(self)\n self._pending: List[bytes] = []\n self._pending_data_size: int = 0\n self._flush_queue: Optional[\"asyncio.Queue[None]\"] = None\n self._flusher_task: Optional[asyncio.Task] = None\n self._hdr_parser: BytesParser = BytesParser()\n\n # New style request/response\n self._resp_map: Dict[str, asyncio.Future] = {}\n self._resp_sub_prefix: Optional[bytearray] = None\n self._nuid = NUID()\n\n # NKEYS support\n #\n # user_jwt_cb is used to fetch and return the account\n # signed JWT for this user.\n self._user_jwt_cb: Optional[JWTCallback] = None\n\n # signature_cb is used to sign a nonce from the server while\n # authenticating with nkeys. The user should sign the nonce and\n # return the base64 encoded signature.\n self._signature_cb: Optional[SignatureCallback] = None\n\n # user credentials file can be a tuple or single file.\n self._user_credentials: Optional[Credentials] = None\n\n # file that contains the nkeys seed and its public key as a string.\n self._nkeys_seed: Optional[str] = None\n self._public_nkey: Optional[str] = None\n\n self.options: Dict[str, Any] = {}\n self.stats = {\n 'in_msgs': 0,\n 'out_msgs': 0,\n 'in_bytes': 0,\n 'out_bytes': 0,\n 'reconnects': 0,\n 'errors_received': 0,\n }\n\n async def connect(\n self,\n servers: List[str] = [\"nats://127.0.0.1:4222\"],\n error_cb: Optional[ErrorCallback] = None,\n disconnected_cb: Optional[Callback] = None,\n closed_cb: Optional[Callback] = None,\n discovered_server_cb: Optional[Callback] = None,\n reconnected_cb: Optional[Callback] = None,\n name: Optional[str] = None,\n pedantic: bool = False,\n verbose: bool = False,\n allow_reconnect: bool = True,\n connect_timeout: int = DEFAULT_CONNECT_TIMEOUT,\n reconnect_time_wait: int = DEFAULT_RECONNECT_TIME_WAIT,\n max_reconnect_attempts: int = DEFAULT_MAX_RECONNECT_ATTEMPTS,\n ping_interval: int = DEFAULT_PING_INTERVAL,\n max_outstanding_pings: int = DEFAULT_MAX_OUTSTANDING_PINGS,\n dont_randomize: bool = False,\n flusher_queue_size: int = DEFAULT_MAX_FLUSHER_QUEUE_SIZE,\n no_echo: bool = False,\n tls: Optional[ssl.SSLContext] = None,\n tls_hostname: Optional[str] = None,\n user: Optional[str] = None,\n password: Optional[str] = None,\n token: Optional[str] = None,\n drain_timeout: int = DEFAULT_DRAIN_TIMEOUT,\n signature_cb: Optional[SignatureCallback] = None,\n user_jwt_cb: Optional[JWTCallback] = None,\n user_credentials: Optional[Credentials] = None,\n nkeys_seed: Optional[str] = None,\n ) -> None:\n \"\"\"\n Establishes a connection to NATS.\n\n :param servers: NATS Connection\n :param name: Label the connection with name (shown in NATS monitoring)\n :param error_cb: Callback to report errors.\n :param disconnected_cb: Callback to report disconnection from NATS.\n :param closed_cb: Callback to report when client stops reconnection to NATS.\n :param discovered_server_cb: Callback to report when a new server joins the cluster.\n\n Connecting setting all callbacks::\n\n import asyncio\n import nats\n\n async def main():\n async def disconnected_cb():\n print('Got disconnected!')\n\n async def reconnected_cb():\n print(f'Got reconnected to {nc.connected_url.netloc}')\n\n async def error_cb(e):\n print(f'There was an error: {e}')\n\n async def closed_cb():\n print('Connection is closed')\n\n # Connect to NATS with logging callbacks.\n nc = await nats.connect('demo.nats.io',\n error_cb=error_cb,\n reconnected_cb=reconnected_cb,\n disconnected_cb=disconnected_cb,\n closed_cb=closed_cb,\n )\n\n async def handler(msg):\n print(f'Received a message on {msg.subject} {msg.reply}: {msg.data}')\n await msg.respond(b'OK')\n\n sub = await nc.subscribe('help.please', cb=handler)\n\n resp = await nc.request('help.please', b'help')\n print('Response:', resp)\n\n await nc.close()\n\n if __name__ == '__main__':\n asyncio.run(main())\n\n Using a context manager::\n\n import asyncio\n import nats\n\n async def main():\n\n is_done = asyncio.Future()\n\n async def closed_cb():\n print('Connection to NATS is closed.')\n is_done.set_result(True)\n\n async with (await nats.connect('nats://demo.nats.io:4222', closed_cb=closed_cb)) as nc:\n print(f'Connected to NATS at {nc.connected_url.netloc}...')\n\n async def subscribe_handler(msg):\n subject = msg.subject\n reply = msg.reply\n data = msg.data.decode()\n print('Received a message on '{subject} {reply}': {data}'.format(\n subject=subject, reply=reply, data=data))\n\n await nc.subscribe('discover', cb=subscribe_handler)\n await nc.flush()\n\n for i in range(0, 10):\n await nc.publish('discover', b'hello world')\n await asyncio.sleep(0.1)\n\n await asyncio.wait_for(is_done, 60.0)\n\n if __name__ == '__main__':\n asyncio.run(main())\n\n \"\"\"\n\n for cb in [error_cb, disconnected_cb, closed_cb, reconnected_cb,\n discovered_server_cb]:\n if cb and not asyncio.iscoroutinefunction(cb):\n raise errors.InvalidCallbackTypeError\n\n self._setup_server_pool(servers)\n self._error_cb = error_cb or _default_error_callback\n self._closed_cb = closed_cb\n self._discovered_server_cb = discovered_server_cb\n self._reconnected_cb = reconnected_cb\n self._disconnected_cb = disconnected_cb\n\n # NKEYS support\n self._signature_cb = signature_cb\n self._user_jwt_cb = user_jwt_cb\n self._user_credentials = user_credentials\n self._nkeys_seed = nkeys_seed\n\n # Customizable options\n self.options[\"verbose\"] = verbose\n self.options[\"pedantic\"] = pedantic\n self.options[\"name\"] = name\n self.options[\"allow_reconnect\"] = allow_reconnect\n self.options[\"dont_randomize\"] = dont_randomize\n self.options[\"reconnect_time_wait\"] = reconnect_time_wait\n self.options[\"max_reconnect_attempts\"] = max_reconnect_attempts\n self.options[\"ping_interval\"] = ping_interval\n self.options[\"max_outstanding_pings\"] = max_outstanding_pings\n self.options[\"no_echo\"] = no_echo\n self.options[\"user\"] = user\n self.options[\"password\"] = password\n self.options[\"token\"] = token\n self.options[\"connect_timeout\"] = connect_timeout\n self.options[\"drain_timeout\"] = drain_timeout\n\n if tls:\n self.options['tls'] = tls\n if tls_hostname:\n self.options['tls_hostname'] = tls_hostname\n\n if self._user_credentials is not None or self._nkeys_seed is not None:\n self._setup_nkeys_connect()\n\n # Queue used to trigger flushes to the socket\n self._flush_queue = asyncio.Queue(maxsize=flusher_queue_size)\n\n if self.options[\"dont_randomize\"] is False:\n shuffle(self._server_pool)\n\n while True:\n try:\n await self._select_next_server()\n await self._process_connect_init()\n assert self._current_server, \"the current server must be set by _select_next_server\"\n self._current_server.reconnects = 0\n break\n except errors.NoServersError as e:\n if self.options[\"max_reconnect_attempts\"] < 0:\n # Never stop reconnecting\n continue\n self._err = e\n raise e\n except (OSError, errors.Error, asyncio.TimeoutError) as e:\n self._err = e\n await self._error_cb(e)\n\n # Bail on first attempt if reconnecting is disallowed.\n if not self.options[\"allow_reconnect\"]:\n raise e\n\n await self._close(Client.DISCONNECTED, False)\n if self._current_server is not None:\n self._current_server.last_attempt = time.monotonic()\n self._current_server.reconnects += 1\n\n def _setup_nkeys_connect(self) -> None:\n if self._user_credentials is not None:\n self._setup_nkeys_jwt_connect()\n else:\n self._setup_nkeys_seed_connect()\n\n def _setup_nkeys_jwt_connect(self) -> None:\n assert self._user_credentials, \"_user_credentials required\"\n import nkeys\n import os\n\n creds = self._user_credentials\n if isinstance(creds, tuple):\n assert len(creds) == 2\n\n def user_cb() -> bytearray:\n contents = None\n with open(creds[0], 'rb') as f:\n contents = bytearray(os.fstat(f.fileno()).st_size)\n f.readinto(contents) # type: ignore[attr-defined]\n return contents\n\n self._user_jwt_cb = user_cb\n\n def sig_cb(nonce: str) -> bytes:\n seed = None\n with open(creds[1], 'rb') as f:\n seed = bytearray(os.fstat(f.fileno()).st_size)\n f.readinto(seed) # type: ignore[attr-defined]\n kp = nkeys.from_seed(seed)\n raw_signed = kp.sign(nonce.encode())\n sig = base64.b64encode(raw_signed)\n\n # Best effort attempt to clear from memory.\n kp.wipe()\n del kp\n del seed\n return sig\n\n self._signature_cb = sig_cb\n else:\n # Define the functions to be able to sign things using nkeys.\n def user_cb() -> bytearray:\n assert isinstance(creds, str)\n user_jwt = None\n with open(creds, 'rb') as f:\n while True:\n line = bytearray(f.readline())\n if b'BEGIN NATS USER JWT' in line:\n user_jwt = bytearray(f.readline())\n break\n # Remove trailing line break but reusing same memory view.\n return user_jwt[:len(user_jwt) - 1]\n\n self._user_jwt_cb = user_cb\n\n def sig_cb(nonce: str) -> bytes:\n assert isinstance(creds, str)\n user_seed = None\n with open(creds, 'rb', buffering=0) as f:\n for line in f:\n # Detect line where the NKEY would start and end,\n # then seek and read into a fixed bytearray that\n # can be wiped.\n if b'BEGIN USER NKEY SEED' in line:\n nkey_start_pos = f.tell()\n try:\n next(f)\n except StopIteration:\n raise ErrInvalidUserCredentials\n nkey_end_pos = f.tell()\n nkey_size = nkey_end_pos - nkey_start_pos - 1\n f.seek(nkey_start_pos)\n\n # Only gather enough bytes for the user seed\n # into the pre allocated bytearray.\n user_seed = bytearray(nkey_size)\n f.readinto(user_seed) # type: ignore[attr-defined]\n kp = nkeys.from_seed(user_seed)\n raw_signed = kp.sign(nonce.encode())\n sig = base64.b64encode(raw_signed)\n\n # Delete all state related to the keys.\n kp.wipe()\n del user_seed\n del kp\n return sig\n\n self._signature_cb = sig_cb\n\n def _setup_nkeys_seed_connect(self) -> None:\n assert self._nkeys_seed, \"Client.connect must be called first\"\n import nkeys\n import os\n\n seed = None\n creds = self._nkeys_seed\n with open(creds, 'rb') as f:\n seed = bytearray(os.fstat(f.fileno()).st_size)\n f.readinto(seed) # type: ignore[attr-defined]\n kp = nkeys.from_seed(seed)\n self._public_nkey = kp.public_key.decode()\n kp.wipe()\n del kp\n del seed\n\n def sig_cb(nonce: str) -> bytes:\n seed = None\n with open(creds, 'rb') as f:\n seed = bytearray(os.fstat(f.fileno()).st_size)\n f.readinto(seed) # type: ignore[attr-defined]\n kp = nkeys.from_seed(seed)\n raw_signed = kp.sign(nonce.encode())\n sig = base64.b64encode(raw_signed)\n\n # Best effort attempt to clear from memory.\n kp.wipe()\n del kp\n del seed\n return sig\n\n self._signature_cb = sig_cb\n\n async def close(self) -> None:\n \"\"\"\n Closes the socket to which we are connected and\n sets the client to be in the CLOSED state.\n No further reconnections occur once reaching this point.\n \"\"\"\n await self._close(Client.CLOSED)\n\n async def _close(self, status: int, do_cbs: bool = True) -> None:\n if self.is_closed:\n self._status = status\n return\n self._status = Client.CLOSED\n\n # Kick the flusher once again so it breaks\n # and avoid pending futures.\n await self._flush_pending()\n\n if self._reading_task is not None and not self._reading_task.cancelled(\n ):\n self._reading_task.cancel()\n\n if self._ping_interval_task is not None and not self._ping_interval_task.cancelled(\n ):\n self._ping_interval_task.cancel()\n\n if self._flusher_task is not None and not self._flusher_task.cancelled(\n ):\n self._flusher_task.cancel()\n\n if self._reconnection_task is not None and not self._reconnection_task.done(\n ):\n self._reconnection_task.cancel()\n\n # Wait for the reconection task to be done which should be soon.\n try:\n if self._reconnection_task_future is not None and not self._reconnection_task_future.cancelled(\n ):\n await asyncio.wait_for(\n self._reconnection_task_future,\n self.options[\"reconnect_time_wait\"],\n )\n except (asyncio.CancelledError, asyncio.TimeoutError):\n pass\n\n # Relinquish control to allow background tasks to wrap up.\n await asyncio.sleep(0)\n\n assert self._io_writer, \"Client.connect must be called first\"\n if self._current_server is not None:\n # In case there is any pending data at this point, flush before disconnecting.\n if self._pending_data_size > 0:\n self._io_writer.writelines(self._pending[:])\n self._pending = []\n self._pending_data_size = 0\n await self._io_writer.drain()\n\n # Cleanup subscriptions since not reconnecting so no need\n # to replay the subscriptions anymore.\n for sub in self._subs.values():\n # FIXME: Should we clear the pending queue here?\n if sub._wait_for_msgs_task and not sub._wait_for_msgs_task.done():\n sub._wait_for_msgs_task.cancel()\n self._subs.clear()\n\n if self._io_writer is not None:\n self._io_writer.close()\n try:\n await self._io_writer.wait_closed()\n except Exception as e:\n await self._error_cb(e)\n\n if do_cbs:\n if self._disconnected_cb is not None:\n await self._disconnected_cb()\n if self._closed_cb is not None:\n await self._closed_cb()\n\n # Set the client_id back to None\n self._client_id = None\n\n async def drain(self) -> None:\n \"\"\"\n drain will put a connection into a drain state. All subscriptions will\n immediately be put into a drain state. Upon completion, the publishers\n will be drained and can not publish any additional messages. Upon draining\n of the publishers, the connection will be closed. Use the `closed_cb`\n option to know when the connection has moved from draining to closed.\n\n \"\"\"\n if self.is_draining:\n return\n if self.is_closed:\n raise errors.ConnectionClosedError\n if self.is_connecting or self.is_reconnecting:\n raise errors.ConnectionReconnectingError\n\n drain_tasks = []\n for sub in self._subs.values():\n coro = sub._drain()\n task = asyncio.get_running_loop().create_task(coro)\n drain_tasks.append(task)\n\n drain_is_done = asyncio.gather(*drain_tasks)\n\n # Start draining the subscriptions.\n # Relinquish CPU to allow drain tasks to start in the background,\n # before setting state to draining.\n await asyncio.sleep(0)\n self._status = Client.DRAINING_SUBS\n\n try:\n await asyncio.wait_for(\n drain_is_done, self.options[\"drain_timeout\"]\n )\n except asyncio.TimeoutError:\n drain_is_done.exception()\n drain_is_done.cancel()\n await self._error_cb(errors.DrainTimeoutError())\n except asyncio.CancelledError:\n pass\n finally:\n self._status = Client.DRAINING_PUBS\n await self.flush()\n await self._close(Client.CLOSED)\n\n async def publish(\n self,\n subject: str,\n payload: bytes = b'',\n reply: str = '',\n headers: Optional[Dict[str, Any]] = None\n ) -> None:\n \"\"\"\n Publishes a NATS message.\n\n :param subject: Subject to which the message will be published.\n :param payload: Message data.\n :param reply: Inbox to which a responder can respond.\n :param headers: Optional message header.\n\n ::\n\n import asyncio\n import nats\n\n async def main():\n nc = await nats.connect('demo.nats.io')\n\n # Publish as message with an inbox.\n inbox = nc.new_inbox()\n sub = await nc.subscribe('hello')\n\n # Simple publishing\n await nc.publish('hello', b'Hello World!')\n\n # Publish with a reply\n await nc.publish('hello', b'Hello World!', reply=inbox)\n\n # Publish with a reply\n await nc.publish('hello', b'With Headers', headers={'Foo':'Bar'})\n\n while True:\n try:\n msg = await sub.next_msg()\n except:\n break\n print('----------------------')\n print('Subject:', msg.subject)\n print('Reply :', msg.reply)\n print('Data :', msg.data)\n print('Headers:', msg.header)\n\n if __name__ == '__main__':\n asyncio.run(main())\n\n \"\"\"\n\n if self.is_closed:\n raise errors.ConnectionClosedError\n if self.is_draining_pubs:\n raise errors.ConnectionDrainingError\n\n payload_size = len(payload)\n if payload_size > self._max_payload:\n raise errors.MaxPayloadError\n await self._send_publish(\n subject, reply, payload, payload_size, headers\n )\n\n async def _send_publish(\n self,\n subject: str,\n reply: str,\n payload: bytes,\n payload_size: int,\n headers: Optional[Dict[str, Any]],\n ) -> None:\n \"\"\"\n Sends PUB command to the NATS server.\n \"\"\"\n if subject == \"\":\n # Avoid sending messages with empty replies.\n raise errors.BadSubjectError\n\n pub_cmd = None\n if headers is None:\n pub_cmd = prot_command.pub_cmd(subject, reply, payload)\n else:\n hdr = bytearray()\n hdr.extend(NATS_HDR_LINE)\n for k, v in headers.items():\n hdr.extend(k.encode())\n hdr.extend(b': ')\n hdr.extend(v.encode())\n hdr.extend(_CRLF_)\n hdr.extend(_CRLF_)\n pub_cmd = prot_command.hpub_cmd(subject, reply, hdr, payload)\n\n self.stats['out_msgs'] += 1\n self.stats['out_bytes'] += payload_size\n await self._send_command(pub_cmd)\n if self._flush_queue is not None and self._flush_queue.empty():\n await self._flush_pending()\n\n async def subscribe(\n self,\n subject: str,\n queue: str = \"\",\n cb: Optional[Callable[[Msg], Awaitable[None]]] = None,\n future: Optional[asyncio.Future] = None,\n max_msgs: int = 0,\n pending_msgs_limit: int = DEFAULT_SUB_PENDING_MSGS_LIMIT,\n pending_bytes_limit: int = DEFAULT_SUB_PENDING_BYTES_LIMIT,\n ) -> Subscription:\n \"\"\"\n subscribe registers interest in a given subject.\n\n If a callback is provided, messages will be processed asychronously.\n\n If a callback isn't provided, messages can be retrieved via an\n asynchronous iterator on the returned subscription object.\n \"\"\"\n if not subject:\n raise errors.BadSubjectError\n\n if self.is_closed:\n raise errors.ConnectionClosedError\n\n if self.is_draining:\n raise errors.ConnectionDrainingError\n\n self._sid += 1\n sid = self._sid\n\n sub = Subscription(\n self,\n sid,\n subject,\n queue=queue,\n cb=cb,\n future=future,\n max_msgs=max_msgs,\n pending_msgs_limit=pending_msgs_limit,\n pending_bytes_limit=pending_bytes_limit,\n )\n\n sub._start(self._error_cb)\n self._subs[sid] = sub\n await self._send_subscribe(sub)\n return sub\n\n def _remove_sub(self, sid: int, max_msgs: int = 0) -> None:\n self._subs.pop(sid, None)\n\n async def _send_subscribe(self, sub: Subscription) -> None:\n sub_cmd = None\n if sub._queue is None:\n sub_cmd = prot_command.sub_cmd(sub._subject, EMPTY, sub._id)\n else:\n sub_cmd = prot_command.sub_cmd(sub._subject, sub._queue, sub._id)\n await self._send_command(sub_cmd)\n await self._flush_pending()\n\n async def _init_request_sub(self) -> None:\n self._resp_map = {}\n\n self._resp_sub_prefix = INBOX_PREFIX[:]\n self._resp_sub_prefix.extend(self._nuid.next())\n self._resp_sub_prefix.extend(b'.')\n resp_mux_subject = self._resp_sub_prefix[:]\n resp_mux_subject.extend(b'*')\n await self.subscribe(\n resp_mux_subject.decode(), cb=self._request_sub_callback\n )\n\n async def _request_sub_callback(self, msg: Msg) -> None:\n token = msg.subject[INBOX_PREFIX_LEN:]\n try:\n fut = self._resp_map.get(token)\n if not fut:\n return\n fut.set_result(msg)\n self._resp_map.pop(token, None)\n except (asyncio.CancelledError, asyncio.InvalidStateError):\n # Request may have timed out already so remove the entry\n self._resp_map.pop(token, None)\n\n async def request(\n self,\n subject: str,\n payload: bytes = b'',\n timeout: float = 0.5,\n old_style: bool = False,\n headers: Dict[str, Any] = None,\n ) -> Msg:\n \"\"\"\n Implements the request/response pattern via pub/sub\n using a single wildcard subscription that handles\n the responses.\n\n \"\"\"\n if old_style:\n # FIXME: Support headers in old style requests.\n return await self._request_old_style(\n subject, payload, timeout=timeout\n )\n else:\n msg = await self._request_new_style(\n subject, payload, timeout=timeout, headers=headers\n )\n if msg.headers and msg.headers.get(nats.js.api.Header.STATUS\n ) == NO_RESPONDERS_STATUS:\n raise errors.NoRespondersError\n return msg\n\n async def _request_new_style(\n self,\n subject: str,\n payload: bytes,\n timeout: float = 1,\n headers: Dict[str, Any] = None,\n ) -> Msg:\n if self.is_draining_pubs:\n raise errors.ConnectionDrainingError\n\n if not self._resp_sub_prefix:\n await self._init_request_sub()\n assert self._resp_sub_prefix\n\n # Use a new NUID for the token inbox and then use the future.\n token = self._nuid.next()\n inbox = self._resp_sub_prefix[:]\n inbox.extend(token)\n future: asyncio.Future = asyncio.Future()\n self._resp_map[token.decode()] = future\n await self.publish(\n subject, payload, reply=inbox.decode(), headers=headers\n )\n\n # Wait for the response or give up on timeout.\n try:\n msg = await asyncio.wait_for(future, timeout)\n return msg\n except asyncio.TimeoutError:\n # Double check that the token is there already.\n self._resp_map.pop(token.decode())\n future.cancel()\n raise errors.TimeoutError\n\n def new_inbox(self) -> str:\n \"\"\"\n new_inbox returns a unique inbox that can be used\n for NATS requests or subscriptions::\n\n # Create unique subscription to receive direct messages.\n inbox = nc.new_inbox()\n sub = await nc.subscribe(inbox)\n nc.publish('broadcast', b'', reply=inbox)\n msg = sub.next_msg()\n \"\"\"\n next_inbox = INBOX_PREFIX[:]\n next_inbox.extend(self._nuid.next())\n return next_inbox.decode()\n\n async def _request_old_style(\n self, subject: str, payload: bytes, timeout: float = 1\n ) -> Msg:\n \"\"\"\n Implements the request/response pattern via pub/sub\n using an ephemeral subscription which will be published\n with a limited interest of 1 reply returning the response\n or raising a Timeout error.\n \"\"\"\n inbox = self.new_inbox()\n\n future: asyncio.Future[Msg] = asyncio.Future()\n sub = await self.subscribe(inbox, future=future, max_msgs=1)\n await sub.unsubscribe(limit=1)\n await self.publish(subject, payload, reply=inbox)\n\n try:\n msg = await asyncio.wait_for(future, timeout)\n if msg.headers:\n if msg.headers.get(nats.js.api.Header.STATUS\n ) == NO_RESPONDERS_STATUS:\n raise errors.NoRespondersError\n return msg\n except asyncio.TimeoutError:\n await sub.unsubscribe()\n future.cancel()\n raise errors.TimeoutError\n\n async def _send_unsubscribe(self, sid: int, limit: int = 1) -> None:\n unsub_cmd = prot_command.unsub_cmd(sid, limit)\n await self._send_command(unsub_cmd)\n await self._flush_pending()\n\n async def flush(self, timeout: int = 10) -> None:\n \"\"\"\n Sends a ping to the server expecting a pong back ensuring\n what we have written so far has made it to the server and\n also enabling measuring of roundtrip time.\n In case a pong is not returned within the allowed timeout,\n then it will raise nats.errors.TimeoutError\n \"\"\"\n if timeout <= 0:\n raise errors.BadTimeoutError\n\n if self.is_closed:\n raise errors.ConnectionClosedError\n\n future: asyncio.Future = asyncio.Future()\n try:\n await self._send_ping(future)\n await asyncio.wait_for(future, timeout)\n except asyncio.TimeoutError:\n future.cancel()\n raise errors.TimeoutError\n\n @property\n def connected_url(self) -> Optional[str]:\n if self._current_server and self.is_connected:\n return str(self._current_server.uri)\n return None\n\n @property\n def servers(self) -> List[str]:\n servers = []\n for srv in self._server_pool:\n servers.append(str(srv.uri))\n return servers\n\n @property\n def discovered_servers(self) -> List[str]:\n servers = []\n for srv in self._server_pool:\n if srv.discovered:\n servers.append(str(srv.uri))\n return servers\n\n @property\n def max_payload(self) -> int:\n \"\"\"\n Returns the max payload which we received from the servers INFO\n \"\"\"\n return self._max_payload\n\n @property\n def client_id(self) -> Optional[str]:\n \"\"\"\n Returns the client id which we received from the servers INFO\n \"\"\"\n return self._client_id\n\n @property\n def last_error(self) -> Optional[Exception]:\n \"\"\"\n Returns the last error which may have occured.\n \"\"\"\n return self._err\n\n @property\n def pending_data_size(self) -> int:\n return self._pending_data_size\n\n @property\n def is_closed(self) -> bool:\n return self._status == Client.CLOSED\n\n @property\n def is_reconnecting(self) -> bool:\n return self._status == Client.RECONNECTING\n\n @property\n def is_connected(self) -> bool:\n return (self._status == Client.CONNECTED) or self.is_draining\n\n @property\n def is_connecting(self) -> bool:\n return self._status == Client.CONNECTING\n\n @property\n def is_draining(self) -> bool:\n return (\n self._status == Client.DRAINING_SUBS\n or self._status == Client.DRAINING_PUBS\n )\n\n @property\n def is_draining_pubs(self) -> bool:\n return self._status == Client.DRAINING_PUBS\n\n async def _send_command(self, cmd: bytes, priority: bool = False) -> None:\n if priority:\n self._pending.insert(0, cmd)\n else:\n self._pending.append(cmd)\n self._pending_data_size += len(cmd)\n if self._pending_data_size > DEFAULT_PENDING_SIZE:\n await self._flush_pending()\n\n async def _flush_pending(self) -> None:\n assert self._flush_queue, \"Client.connect must be called first\"\n try:\n # kick the flusher!\n await self._flush_queue.put(None)\n\n if not self.is_connected:\n return\n\n except asyncio.CancelledError:\n pass\n\n def _setup_server_pool(self, connect_url: Union[str, List[str]]) -> None:\n if isinstance(connect_url, str):\n try:\n if \"nats://\" in connect_url or \"tls://\" in connect_url:\n # Closer to how the Go client handles this.\n # e.g. nats://127.0.0.1:4222\n uri = urlparse(connect_url)\n elif \":\" in connect_url:\n # Expand the scheme for the user\n # e.g. 127.0.0.1:4222\n uri = urlparse(f\"nats://{connect_url}\")\n else:\n # Just use the endpoint with the default NATS port.\n # e.g. demo.nats.io\n uri = urlparse(f\"nats://{connect_url}:4222\")\n\n # In case only endpoint with scheme was set.\n # e.g. nats://demo.nats.io or localhost:\n if uri.port is None:\n uri = urlparse(f\"nats://{uri.hostname}:4222\")\n except ValueError:\n raise errors.Error(\"nats: invalid connect url option\")\n\n if uri.hostname is None or uri.hostname == \"none\":\n raise errors.Error(\"nats: invalid hostname in connect url\")\n self._server_pool.append(Srv(uri))\n elif isinstance(connect_url, list):\n try:\n for server in connect_url:\n uri = urlparse(server)\n self._server_pool.append(Srv(uri))\n except ValueError:\n raise errors.Error(\"nats: invalid connect url option\")\n else:\n raise errors.Error(\"nats: invalid connect url option\")\n\n async def _select_next_server(self) -> None:\n \"\"\"\n Looks up in the server pool for an available server\n and attempts to connect.\n \"\"\"\n\n while True:\n if len(self._server_pool) == 0:\n self._current_server = None\n raise errors.NoServersError\n\n now = time.monotonic()\n s = self._server_pool.pop(0)\n if self.options[\"max_reconnect_attempts\"] > 0:\n if s.reconnects > self.options[\"max_reconnect_attempts\"]:\n # Discard server since already tried to reconnect too many times\n continue\n\n # Not yet exceeded max_reconnect_attempts so can still use\n # this server in the future.\n self._server_pool.append(s)\n if s.last_attempt is not None and now < s.last_attempt + self.options[\n \"reconnect_time_wait\"]:\n # Backoff connecting to server if we attempted recently.\n await asyncio.sleep(self.options[\"reconnect_time_wait\"])\n try:\n s.last_attempt = time.monotonic()\n connection_future = asyncio.open_connection(\n s.uri.hostname, s.uri.port, limit=DEFAULT_BUFFER_SIZE\n )\n r, w = await asyncio.wait_for(\n connection_future, self.options['connect_timeout']\n )\n self._current_server = s\n\n # We keep a reference to the initial transport we used when\n # establishing the connection in case we later upgrade to TLS\n # after getting the first INFO message. This is in order to\n # prevent the GC closing the socket after we send CONNECT\n # and replace the transport.\n #\n # See https://github.com/nats-io/asyncio-nats/issues/43\n self._bare_io_reader = self._io_reader = r\n self._bare_io_writer = self._io_writer = w\n break\n except Exception as e:\n s.last_attempt = time.monotonic()\n s.reconnects += 1\n\n self._err = e\n await self._error_cb(e)\n continue\n\n async def _process_err(self, err_msg: str) -> None:\n \"\"\"\n Processes the raw error message sent by the server\n and close connection with current server.\n \"\"\"\n assert self._error_cb, \"Client.connect must be called first\"\n if STALE_CONNECTION in err_msg:\n await self._process_op_err(errors.StaleConnectionError())\n return\n\n if AUTHORIZATION_VIOLATION in err_msg:\n self._err = errors.AuthorizationError()\n else:\n prot_err = err_msg.strip(\"'\")\n m = f\"nats: {prot_err}\"\n err = errors.Error(m)\n self._err = err\n\n if PERMISSIONS_ERR in m:\n await self._error_cb(err)\n return\n\n do_cbs = False\n if not self.is_connecting:\n do_cbs = True\n\n # FIXME: Some errors such as 'Invalid Subscription'\n # do not cause the server to close the connection.\n # For now we handle similar as other clients and close.\n asyncio.create_task(self._close(Client.CLOSED, do_cbs))\n\n async def _process_op_err(self, e: Exception) -> None:\n \"\"\"\n Process errors which occured while reading or parsing\n the protocol. If allow_reconnect is enabled it will\n try to switch the server to which it is currently connected\n otherwise it will disconnect.\n \"\"\"\n if self.is_connecting or self.is_closed or self.is_reconnecting:\n return\n\n if self.options[\"allow_reconnect\"] and self.is_connected:\n self._status = Client.RECONNECTING\n self._ps.reset()\n\n if self._reconnection_task is not None and not self._reconnection_task.cancelled(\n ):\n # Cancel the previous task in case it may still be running.\n self._reconnection_task.cancel()\n\n self._reconnection_task = asyncio.get_running_loop().create_task(\n self._attempt_reconnect()\n )\n else:\n self._process_disconnect()\n self._err = e\n await self._close(Client.CLOSED, True)\n\n async def _attempt_reconnect(self) -> None:\n assert self._current_server, \"Client.connect must be called first\"\n if self._reading_task is not None and not self._reading_task.cancelled(\n ):\n self._reading_task.cancel()\n\n if self._ping_interval_task is not None and not self._ping_interval_task.cancelled(\n ):\n self._ping_interval_task.cancel()\n\n if self._flusher_task is not None and not self._flusher_task.cancelled(\n ):\n self._flusher_task.cancel()\n\n if self._io_writer is not None:\n self._io_writer.close()\n try:\n await self._io_writer.wait_closed()\n except Exception as e:\n await self._error_cb(e)\n\n self._err = None\n if self._disconnected_cb is not None:\n await self._disconnected_cb()\n\n if self.is_closed:\n return\n\n if \"dont_randomize\" not in self.options or not self.options[\n \"dont_randomize\"]:\n shuffle(self._server_pool)\n\n # Create a future that the client can use to control waiting\n # on the reconnection attempts.\n self._reconnection_task_future = asyncio.Future()\n while True:\n try:\n # Try to establish a TCP connection to a server in\n # the cluster then send CONNECT command to it.\n await self._select_next_server()\n assert self._io_writer, \"_select_next_server must've set _io_writer\"\n await self._process_connect_init()\n\n # Consider a reconnect to be done once CONNECT was\n # processed by the server successfully.\n self.stats[\"reconnects\"] += 1\n\n # Reset reconnect attempts for this server\n # since have successfully connected.\n self._current_server.did_connect = True\n self._current_server.reconnects = 0\n\n # Replay all the subscriptions in case there were some.\n subs_to_remove = []\n for sid, sub in self._subs.items():\n max_msgs = 0\n if sub._max_msgs > 0:\n # If we already hit the message limit, remove the subscription and don't\n # resubscribe\n if sub._received >= sub._max_msgs:\n subs_to_remove.append(sid)\n continue\n # auto unsubscribe the number of messages we have left\n max_msgs = sub._max_msgs - sub._received\n\n sub_cmd = prot_command.sub_cmd(\n sub._subject, sub._queue, sid\n )\n self._io_writer.write(sub_cmd)\n\n if max_msgs > 0:\n unsub_cmd = prot_command.unsub_cmd(sid, max_msgs)\n self._io_writer.write(unsub_cmd)\n\n for sid in subs_to_remove:\n self._subs.pop(sid)\n\n await self._io_writer.drain()\n\n # Flush pending data before continuing in connected status.\n # FIXME: Could use future here and wait for an error result\n # to bail earlier in case there are errors in the connection.\n await self._flush_pending()\n self._status = Client.CONNECTED\n await self.flush()\n if self._reconnected_cb is not None:\n await self._reconnected_cb()\n self._reconnection_task_future = None\n break\n except errors.NoServersError as e:\n self._err = e\n await self.close()\n break\n except (OSError, errors.Error, TimeoutError) as e:\n self._err = e\n await self._error_cb(e)\n self._status = Client.RECONNECTING\n self._current_server.last_attempt = time.monotonic()\n self._current_server.reconnects += 1\n except asyncio.CancelledError:\n break\n\n if self._reconnection_task_future is not None and not self._reconnection_task_future.cancelled(\n ):\n self._reconnection_task_future.set_result(True)\n\n def _connect_command(self) -> bytes:\n '''\n Generates a JSON string with the params to be used\n when sending CONNECT to the server.\n\n ->> CONNECT {\"lang\": \"python3\"}\n\n '''\n options = {\n \"verbose\": self.options[\"verbose\"],\n \"pedantic\": self.options[\"pedantic\"],\n \"lang\": __lang__,\n \"version\": __version__,\n \"protocol\": PROTOCOL\n }\n if \"headers\" in self._server_info:\n options[\"headers\"] = self._server_info[\"headers\"]\n options[\"no_responders\"] = self._server_info[\"headers\"]\n\n if \"auth_required\" in self._server_info:\n if self._server_info[\"auth_required\"]:\n if \"nonce\" in self._server_info and self._signature_cb is not None:\n sig = self._signature_cb(self._server_info[\"nonce\"])\n options[\"sig\"] = sig.decode()\n\n if self._user_jwt_cb is not None:\n jwt = self._user_jwt_cb()\n options[\"jwt\"] = jwt.decode()\n elif self._public_nkey is not None:\n options[\"nkey\"] = self._public_nkey\n # In case there is no password, then consider handle\n # sending a token instead.\n elif self.options[\"user\"] is not None and self.options[\n \"password\"] is not None:\n options[\"user\"] = self.options[\"user\"]\n options[\"pass\"] = self.options[\"password\"]\n elif self.options[\"token\"] is not None:\n options[\"auth_token\"] = self.options[\"token\"]\n elif self._current_server and self._current_server.uri.username is not None:\n if self._current_server.uri.password is None:\n options[\"auth_token\"\n ] = self._current_server.uri.username\n else:\n options[\"user\"] = self._current_server.uri.username\n options[\"pass\"] = self._current_server.uri.password\n\n if self.options[\"name\"] is not None:\n options[\"name\"] = self.options[\"name\"]\n if self.options[\"no_echo\"] is not None:\n options[\"echo\"] = not self.options[\"no_echo\"]\n\n connect_opts = json.dumps(options, sort_keys=True)\n return b''.join([CONNECT_OP + _SPC_ + connect_opts.encode() + _CRLF_])\n\n async def _process_ping(self) -> None:\n \"\"\"\n Process PING sent by server.\n \"\"\"\n await self._send_command(PONG)\n await self._flush_pending()\n\n async def _process_pong(self) -> None:\n \"\"\"\n Process PONG sent by server.\n \"\"\"\n if len(self._pongs) > 0:\n future = self._pongs.pop(0)\n future.set_result(True)\n self._pongs_received += 1\n self._pings_outstanding = 0\n\n def _is_control_message(self, data, header: Dict[str,\n str]) -> Optional[str]:\n if len(data) > 0:\n return None\n status = header.get(nats.js.api.Header.STATUS)\n if status == CTRL_STATUS:\n return header.get(nats.js.api.Header.DESCRIPTION)\n return None\n\n async def _process_msg(\n self,\n sid: int,\n subject: bytes,\n reply: bytes,\n data: bytes,\n headers: bytes,\n ) -> None:\n \"\"\"\n Process MSG sent by server.\n \"\"\"\n assert self._error_cb, \"Client.connect must be called first\"\n payload_size = len(data)\n self.stats['in_msgs'] += 1\n self.stats['in_bytes'] += payload_size\n\n sub = self._subs.get(sid)\n if not sub:\n # Skip in case no subscription present.\n return\n\n sub._received += 1\n if sub._max_msgs > 0 and sub._received >= sub._max_msgs:\n # Enough messages so can throwaway subscription now.\n self._subs.pop(sid, None)\n sub._stop_processing()\n\n hdr: Optional[Dict[str, str]] = None\n if headers:\n hdr = {}\n\n # Check the rest of the headers in case there is any.\n raw_headers = headers[len(NATS_HDR_LINE):]\n try:\n parsed_hdr = self._hdr_parser.parsebytes(raw_headers)\n # Check if it is an inline status message like:\n #\n # NATS/1.0 404 No Messages\n #\n if len(parsed_hdr.items()) == 0:\n line = headers[len(NATS_HDR_LINE) - 1:]\n status = line[:STATUS_MSG_LEN]\n desc = line[STATUS_MSG_LEN + 1:len(line) - CTRL_LEN -\n CTRL_LEN]\n hdr[nats.js.api.Header.STATUS] = status.decode()\n\n # FIXME: Clean this up...\n if len(desc) > 0:\n # Heartbeat messages can have both headers and inline status,\n # check that there are no pending headers to be parsed.\n i = desc.find(_CRLF_)\n if i > 0:\n hdr[nats.js.api.Header.DESCRIPTION\n ] = desc[:i].decode()\n parsed_hdr = self._hdr_parser.parsebytes(\n desc[i + CTRL_LEN:]\n )\n for k, v in parsed_hdr.items():\n hdr[k] = v\n else:\n # Just inline status...\n hdr[nats.js.api.Header.DESCRIPTION] = desc.decode()\n else:\n for k, v in parsed_hdr.items():\n hdr[k] = v\n except Exception as e:\n await self._error_cb(e)\n return\n\n msg = self._build_message(subject, reply, data, hdr)\n if not msg:\n return\n\n # Process flow control messages in case of using a JetStream context.\n ctrl_msg = None\n fcReply = None\n if sub._jsi:\n #########################################\n # #\n # JetStream Control Messages Processing #\n # #\n #########################################\n jsi = sub._jsi\n if hdr:\n ctrl_msg = self._is_control_message(data, hdr)\n\n # Check if the hearbeat has a \"Consumer Stalled\" header, if\n # so, the value is the FC reply to send a nil message to.\n # We will send it at the end of this function.\n if ctrl_msg and ctrl_msg.startswith(\"Idle\"):\n fcReply = hdr.get(nats.js.api.Header.CONSUMER_STALLED)\n\n # OrderedConsumer: checkOrderedMsgs\n if not ctrl_msg and jsi._ordered and msg.reply:\n did_reset = None\n tokens = Msg.Metadata._get_metadata_fields(msg.reply)\n # FIXME: Support JS Domains.\n sseq = int(tokens[5])\n dseq = int(tokens[6])\n if dseq != jsi._dseq:\n # Pick up from where we last left.\n did_reset = await jsi.reset_ordered_consumer(jsi._sseq + 1)\n else:\n # Update our tracking\n jsi._dseq = dseq + 1\n jsi._sseq = sseq\n if did_reset:\n return\n\n # Skip processing if this is a control message.\n if not ctrl_msg:\n # Check if it is an old style request.\n if sub._future:\n if sub._future.cancelled():\n # Already gave up, nothing to do.\n return\n sub._future.set_result(msg)\n return\n\n # Let subscription wait_for_msgs coroutine process the messages,\n # but in case sending to the subscription task would block,\n # then consider it to be an slow consumer and drop the message.\n try:\n sub._pending_size += payload_size\n # allow setting pending_bytes_limit to 0 to disable\n if sub._pending_bytes_limit > 0 and sub._pending_size >= sub._pending_bytes_limit:\n # Subtract the bytes since the message will be thrown away\n # so it would not be pending data.\n sub._pending_size -= payload_size\n\n await self._error_cb(\n errors.SlowConsumerError(\n subject=msg.subject,\n reply=msg.reply,\n sid=sid,\n sub=sub\n )\n )\n return\n sub._pending_queue.put_nowait(msg)\n except asyncio.QueueFull:\n await self._error_cb(\n errors.SlowConsumerError(\n subject=msg.subject, reply=msg.reply, sid=sid, sub=sub\n )\n )\n\n # Store the ACK metadata from the message to\n # compare later on with the received heartbeat.\n if sub._jsi:\n sub._jsi.track_sequences(msg.reply)\n elif ctrl_msg.startswith(\"Flow\") and msg.reply and sub._jsi:\n # This is a flow control message.\n # We will schedule the send of the FC reply once we have delivered the\n # DATA message that was received before this flow control message, which\n # has sequence `jsi.fciseq`. However, it is possible that this message\n # has already been delivered, in that case, we need to send the FC reply now.\n if sub.delivered >= sub._jsi._fciseq:\n fcReply = msg.reply\n else:\n # Schedule a reply after the previous message is delivered.\n sub._jsi.schedule_flow_control_response(msg.reply)\n\n # Handle flow control response.\n if fcReply:\n await self.publish(fcReply)\n\n if ctrl_msg and not msg.reply and ctrl_msg.startswith(\"Idle\"):\n if sub._jsi:\n await sub._jsi.check_for_sequence_mismatch(msg)\n\n def _build_message(\n self,\n subject: bytes,\n reply: bytes,\n data: bytes,\n headers: Optional[Dict[str, str]],\n ):\n return self.msg_class(\n subject=subject.decode(),\n reply=reply.decode(),\n data=data,\n headers=headers,\n _client=self,\n )\n\n def _process_disconnect(self) -> None:\n \"\"\"\n Process disconnection from the server and set client status\n to DISCONNECTED.\n \"\"\"\n self._status = Client.DISCONNECTED\n\n def _process_info(\n self, info: Dict[str, Any], initial_connection: bool = False\n ) -> None:\n \"\"\"\n Process INFO lines sent by the server to reconfigure client\n with latest updates from cluster to enable server discovery.\n \"\"\"\n assert self._current_server, \"Client.connect must be called first\"\n if 'connect_urls' in info:\n if info['connect_urls']:\n connect_urls = []\n for connect_url in info['connect_urls']:\n scheme = ''\n if self._current_server.uri.scheme == 'tls':\n scheme = 'tls'\n else:\n scheme = 'nats'\n\n uri = urlparse(f\"{scheme}://{connect_url}\")\n srv = Srv(uri)\n srv.discovered = True\n\n # Check whether we should reuse the original hostname.\n if 'tls_required' in self._server_info and self._server_info['tls_required'] \\\n and self._host_is_ip(uri.hostname):\n srv.tls_name = self._current_server.uri.hostname\n\n # Filter for any similar server in the server pool already.\n should_add = True\n for s in self._server_pool:\n if uri.netloc == s.uri.netloc:\n should_add = False\n if should_add:\n connect_urls.append(srv)\n\n if self.options[\"dont_randomize\"] is not True:\n shuffle(connect_urls)\n for srv in connect_urls:\n self._server_pool.append(srv)\n\n if not initial_connection and connect_urls and self._discovered_server_cb:\n self._discovered_server_cb()\n\n def _host_is_ip(self, connect_url: Optional[str]) -> bool:\n try:\n ipaddress.ip_address(connect_url)\n return True\n except Exception:\n return False\n\n async def _process_connect_init(self) -> None:\n \"\"\"\n Process INFO received from the server and CONNECT to the server\n with authentication. It is also responsible of setting up the\n reading and ping interval tasks from the client.\n \"\"\"\n assert self._io_reader, \"must be called only from Client.connect\"\n assert self._io_writer, \"must be called only from Client.connect\"\n assert self._current_server, \"must be called only from Client.connect\"\n self._status = Client.CONNECTING\n\n connection_completed = self._io_reader.readline()\n info_line = await asyncio.wait_for(\n connection_completed, self.options[\"connect_timeout\"]\n )\n if INFO_OP not in info_line:\n raise errors.Error(\n \"nats: empty response from server when expecting INFO message\"\n )\n\n _, info = info_line.split(INFO_OP + _SPC_, 1)\n\n try:\n srv_info = json.loads(info.decode())\n except Exception:\n raise errors.Error(\"nats: info message, json parse error\")\n\n self._server_info = srv_info\n self._process_info(srv_info, initial_connection=True)\n\n if 'max_payload' in self._server_info:\n self._max_payload = self._server_info[\"max_payload\"]\n\n if 'client_id' in self._server_info:\n self._client_id = self._server_info[\"client_id\"]\n\n if 'tls_required' in self._server_info and self._server_info[\n 'tls_required']:\n ssl_context: Optional[ssl.SSLContext]\n if \"tls\" in self.options:\n ssl_context = self.options.get('tls')\n elif self._current_server.uri.scheme == 'tls':\n ssl_context = ssl.create_default_context()\n if ssl_context is None:\n raise errors.Error('nats: no ssl context provided')\n\n # Check whether to reuse the original hostname for an implicit route.\n hostname = None\n if \"tls_hostname\" in self.options:\n hostname = self.options[\"tls_hostname\"]\n elif self._current_server.tls_name is not None:\n hostname = self._current_server.tls_name\n else:\n hostname = self._current_server.uri.hostname\n\n await self._io_writer.drain() # just in case something is left\n\n # loop.start_tls was introduced in python 3.7\n # the previous method is removed in 3.9\n if sys.version_info.minor >= 7:\n # manually recreate the stream reader/writer with a tls upgraded transport\n reader = asyncio.StreamReader()\n protocol = asyncio.StreamReaderProtocol(reader)\n transport_future = asyncio.get_running_loop().start_tls(\n self._io_writer.transport,\n protocol,\n ssl_context,\n server_hostname=hostname\n )\n transport = await asyncio.wait_for(\n transport_future, self.options['connect_timeout']\n )\n writer = asyncio.StreamWriter(\n transport, protocol, reader, asyncio.get_running_loop()\n )\n self._io_reader, self._io_writer = reader, writer\n else:\n transport = self._io_writer.transport\n sock = transport.get_extra_info('socket')\n if not sock:\n # This shouldn't happen\n raise errors.Error('nats: unable to get socket')\n\n connection_future = asyncio.open_connection(\n limit=DEFAULT_BUFFER_SIZE,\n sock=sock,\n ssl=ssl_context,\n server_hostname=hostname,\n )\n self._io_reader, self._io_writer = await asyncio.wait_for(\n connection_future, self.options['connect_timeout']\n )\n\n # Refresh state of parser upon reconnect.\n if self.is_reconnecting:\n self._ps.reset()\n\n assert self._io_reader\n assert self._io_writer\n connect_cmd = self._connect_command()\n self._io_writer.write(connect_cmd)\n await self._io_writer.drain()\n if self.options[\"verbose\"]:\n future = self._io_reader.readline()\n next_op = await asyncio.wait_for(\n future, self.options[\"connect_timeout\"]\n )\n if OK_OP in next_op:\n # Do nothing\n pass\n elif ERR_OP in next_op:\n err_line = next_op.decode()\n _, err_msg = err_line.split(\" \", 1)\n\n # FIXME: Maybe handling could be more special here,\n # checking for errors.AuthorizationError for example.\n # await self._process_err(err_msg)\n raise errors.Error(\"nats: \" + err_msg.rstrip('\\r\\n'))\n\n self._io_writer.write(PING_PROTO)\n await self._io_writer.drain()\n\n future = self._io_reader.readline()\n next_op = await asyncio.wait_for(\n future, self.options[\"connect_timeout\"]\n )\n\n if PONG_PROTO in next_op:\n self._status = Client.CONNECTED\n elif ERR_OP in next_op:\n err_line = next_op.decode()\n _, err_msg = err_line.split(\" \", 1)\n\n # FIXME: Maybe handling could be more special here,\n # checking for ErrAuthorization for example.\n # await self._process_err(err_msg)\n raise errors.Error(\"nats: \" + err_msg.rstrip('\\r\\n'))\n\n if PONG_PROTO in next_op:\n self._status = Client.CONNECTED\n\n self._reading_task = asyncio.get_running_loop().create_task(\n self._read_loop()\n )\n self._pongs = []\n self._pings_outstanding = 0\n self._ping_interval_task = asyncio.get_running_loop().create_task(\n self._ping_interval()\n )\n\n # Task for kicking the flusher queue\n self._flusher_task = asyncio.get_running_loop().create_task(\n self._flusher()\n )\n\n async def _send_ping(self, future: asyncio.Future = None) -> None:\n assert self._io_writer, \"Client.connect must be called first\"\n if future is None:\n future = asyncio.Future()\n self._pongs.append(future)\n self._io_writer.write(PING_PROTO)\n await self._flush_pending()\n\n async def _flusher(self) -> None:\n \"\"\"\n Coroutine which continuously tries to consume pending commands\n and then flushes them to the socket.\n \"\"\"\n assert self._error_cb, \"Client.connect must be called first\"\n assert self._io_writer, \"Client.connect must be called first\"\n assert self._flush_queue, \"Client.connect must be called first\"\n while True:\n if not self.is_connected or self.is_connecting:\n break\n\n try:\n await self._flush_queue.get()\n\n if self._pending_data_size > 0:\n self._io_writer.writelines(self._pending[:])\n self._pending = []\n self._pending_data_size = 0\n await self._io_writer.drain()\n except OSError as e:\n await self._error_cb(e)\n await self._process_op_err(e)\n break\n except (asyncio.CancelledError, RuntimeError, AttributeError):\n # RuntimeError in case the event loop is closed\n break\n\n async def _ping_interval(self) -> None:\n while True:\n await asyncio.sleep(self.options[\"ping_interval\"])\n if not self.is_connected:\n continue\n try:\n self._pings_outstanding += 1\n if self._pings_outstanding > self.options[\n \"max_outstanding_pings\"]:\n await self._process_op_err(ErrStaleConnection())\n return\n await self._send_ping()\n except (asyncio.CancelledError, RuntimeError, AttributeError):\n break\n # except asyncio.InvalidStateError:\n # pass\n\n async def _read_loop(self) -> None:\n \"\"\"\n Coroutine which gathers bytes sent by the server\n and feeds them to the protocol parser.\n In case of error while reading, it will stop running\n and its task has to be rescheduled.\n \"\"\"\n while True:\n try:\n should_bail = self.is_closed or self.is_reconnecting\n if should_bail or self._io_reader is None:\n break\n if self.is_connected and self._io_reader.at_eof():\n await self._error_cb(errors.StaleConnectionError())\n await self._process_op_err(errors.StaleConnectionError())\n break\n\n b = await self._io_reader.read(DEFAULT_BUFFER_SIZE)\n await self._ps.parse(b)\n except errors.ProtocolError:\n await self._process_op_err(errors.ProtocolError())\n break\n except OSError as e:\n await self._process_op_err(e)\n break\n except asyncio.CancelledError:\n break\n except Exception as ex:\n _logger.error('nats: encountered error', exc_info=ex)\n break\n # except asyncio.InvalidStateError:\n # pass\n\n async def __aenter__(self) -> \"Client\":\n \"\"\"For when NATS client is used in a context manager\"\"\"\n return self\n\n async def __aexit__(self, *exc_info) -> None:\n \"\"\"Close connection to NATS when used in a context manager\"\"\"\n await self._close(Client.CLOSED, do_cbs=True)\n\n def jetstream(self, **opts) -> nats.js.JetStreamContext:\n \"\"\"\n jetstream returns a context that can be used to produce and consume\n messages from NATS JetStream.\n\n :param prefix: Default JetStream API Prefix.\n :param domain: Optional domain used by the JetStream API.\n :param timeout: Timeout for all JS API actions.\n\n ::\n\n import asyncio\n import nats\n\n async def main():\n nc = await nats.connect()\n js = nc.jetstream()\n\n await js.add_stream(name='hello', subjects=['hello'])\n ack = await js.publish('hello', b'Hello JS!')\n print(f'Ack: stream={ack.stream}, sequence={ack.seq}')\n # Ack: stream=hello, sequence=1\n await nc.close()\n\n if __name__ == '__main__':\n asyncio.run(main())\n \"\"\"\n return nats.js.JetStreamContext(self, **opts)\n\n def jsm(self, **opts) -> nats.js.JetStreamManager:\n \"\"\"JetStream context for managing JetStream via JS API\"\"\"\n return nats.js.JetStreamManager(self, **opts)\n",
"id": "2578398",
"language": "Python",
"matching_score": 4.001945972442627,
"max_stars_count": 0,
"path": "nats/aio/client.py"
},
{
"content": "import asyncio\n\nimport pytest\n\nnkeys_installed = None\n\ntry:\n import nkeys\n nkeys_installed = True\nexcept ModuleNotFoundError:\n nkeys_installed = False\n\nfrom nats.aio.client import Client as NATS\nfrom nats.errors import *\nfrom nats.aio.errors import *\nfrom tests.utils import (\n async_test, TrustedServerTestCase, NkeysServerTestCase, get_config_file\n)\n\n\nclass ClientNkeysAuthTest(NkeysServerTestCase):\n\n @async_test\n async def test_nkeys_connect(self):\n if not nkeys_installed:\n pytest.skip(\"nkeys not installed\")\n\n nc = NATS()\n\n future = asyncio.Future()\n\n async def error_cb(e):\n nonlocal future\n future.set_result(True)\n\n await nc.connect(\n [\"tls://127.0.0.1:4222\"],\n error_cb=error_cb,\n connect_timeout=10,\n nkeys_seed=get_config_file(\"nkeys/foo-user.nk\"),\n allow_reconnect=False,\n )\n\n async def help_handler(msg):\n await nc.publish(msg.reply, b'OK!')\n\n await nc.subscribe(\"help\", cb=help_handler)\n await nc.flush()\n msg = await nc.request(\"help\", b'I need help')\n self.assertEqual(msg.data, b'OK!')\n\n await nc.subscribe(\"bar\", cb=help_handler)\n await nc.flush()\n\n await asyncio.wait_for(future, 1)\n\n msg = await nc.request(\"help\", b'I need help')\n self.assertEqual(msg.data, b'OK!')\n\n await nc.close()\n\n\nclass ClientJWTAuthTest(TrustedServerTestCase):\n\n @async_test\n async def test_nkeys_jwt_creds_user_connect(self):\n if not nkeys_installed:\n pytest.skip(\"nkeys not installed\")\n\n nc = NATS()\n\n async def error_cb(e):\n print(\"Async Error:\", e, type(e))\n\n await nc.connect(\n [\"tls://127.0.0.1:4222\"],\n error_cb=error_cb,\n connect_timeout=5,\n user_credentials=get_config_file(\"nkeys/foo-user.creds\"),\n allow_reconnect=False,\n )\n\n async def help_handler(msg):\n await nc.publish(msg.reply, b'OK!')\n\n await nc.subscribe(\"help\", cb=help_handler)\n await nc.flush()\n msg = await nc.request(\"help\", b'I need help')\n self.assertEqual(msg.data, b'OK!')\n await nc.close()\n\n @async_test\n async def test_nkeys_jwt_creds_user_connect_tuple(self):\n if not nkeys_installed:\n pytest.skip(\"nkeys not installed\")\n\n nc = NATS()\n\n async def error_cb(e):\n print(\"Async Error:\", e, type(e))\n\n await nc.connect(\n [\"tls://127.0.0.1:4222\"],\n error_cb=error_cb,\n connect_timeout=5,\n user_credentials=(\n get_config_file(\"nkeys/foo-user.jwt\"),\n get_config_file(\"nkeys/foo-user.nk\")\n ),\n allow_reconnect=False,\n )\n\n async def help_handler(msg):\n await nc.publish(msg.reply, b'OK!')\n\n await nc.subscribe(\"help\", cb=help_handler)\n await nc.flush()\n msg = await nc.request(\"help\", b'I need help')\n self.assertEqual(msg.data, b'OK!')\n await nc.close()\n\n @async_test\n async def test_nkeys_jwt_creds_bad_nkeys_connect(self):\n if not nkeys_installed:\n pytest.skip(\"nkeys not installed\")\n\n with self.assertRaises(InvalidUserCredentialsError):\n nc = NATS()\n await nc.connect(\n [\"tls://127.0.0.1:4222\"],\n connect_timeout=5,\n user_credentials=get_config_file(\"nkeys/bad-user.creds\"),\n allow_reconnect=False,\n )\n\n with self.assertRaises(nkeys.ErrInvalidSeed):\n nc = NATS()\n await nc.connect(\n [\"tls://127.0.0.1:4222\"],\n connect_timeout=5,\n user_credentials=get_config_file(\"nkeys/bad-user2.creds\"),\n allow_reconnect=False,\n )\n",
"id": "8042334",
"language": "Python",
"matching_score": 1.4723317623138428,
"max_stars_count": 0,
"path": "tests/test_client_nkeys.py"
},
{
"content": "import asyncio\nimport nats\n\nasync def main():\n nc = await nats.connect(\"demo.nats.io\")\n\n # Publish as message with an inbox.\n inbox = nc.new_inbox()\n sub = await nc.subscribe(\"hello\")\n\n # Simple publishing\n await nc.publish(\"hello\", b'Hello World!')\n\n # Publish with a reply\n await nc.publish(\"hello\", b'Hello World!', reply=inbox)\n \n # Publish with a reply\n await nc.publish(\"hello\", b'With Headers', headers={'Foo':'Bar'})\n\n while True:\n try:\n msg = await sub.next_msg()\n except:\n break\n print(\"----------------------\")\n print(\"Subject:\", msg.subject)\n print(\"Reply :\", msg.reply)\n print(\"Data :\", msg.data)\n print(\"Headers:\", msg.header)\n\nif __name__ == '__main__':\n asyncio.run(main())\n",
"id": "8530889",
"language": "Python",
"matching_score": 2.0920474529266357,
"max_stars_count": 0,
"path": "examples/publish.py"
},
{
"content": "import asyncio\nimport nats\nfrom nats.errors import TimeoutError\n\n\nasync def main():\n nc = await nats.connect(\"localhost\")\n\n # Create JetStream context.\n js = nc.jetstream()\n\n # Persist messages on 'foo's subject.\n await js.add_stream(name=\"sample-stream\", subjects=[\"foo\"])\n\n for i in range(0, 10):\n ack = await js.publish(\"foo\", f\"hello world: {i}\".encode())\n print(ack)\n\n # Create pull based consumer on 'foo'.\n psub = await js.pull_subscribe(\"foo\", \"psub\")\n\n # Fetch and ack messagess from consumer.\n for i in range(0, 10):\n msgs = await psub.fetch(1)\n for msg in msgs:\n print(msg)\n\n # Create single ephemeral push based subscriber.\n sub = await js.subscribe(\"foo\")\n msg = await sub.next_msg()\n await msg.ack()\n\n # Create single push based subscriber that is durable across restarts.\n sub = await js.subscribe(\"foo\", durable=\"myapp\")\n msg = await sub.next_msg()\n await msg.ack()\n\n # Create deliver group that will be have load balanced messages.\n async def qsub_a(msg):\n print(\"QSUB A:\", msg)\n await msg.ack()\n\n async def qsub_b(msg):\n print(\"QSUB B:\", msg)\n await msg.ack()\n await js.subscribe(\"foo\", \"workers\", cb=qsub_a)\n await js.subscribe(\"foo\", \"workers\", cb=qsub_b)\n\n for i in range(0, 10):\n ack = await js.publish(\"foo\", f\"hello world: {i}\".encode())\n print(\"\\t\", ack)\n\n # Create ordered consumer with flow control and heartbeats\n # that auto resumes on failures.\n osub = await js.subscribe(\"foo\", ordered_consumer=True)\n data = bytearray()\n\n while True:\n try:\n msg = await osub.next_msg()\n data.extend(msg.data)\n except TimeoutError:\n break\n print(\"All data in stream:\", len(data))\n\n await nc.close()\n\nif __name__ == '__main__':\n asyncio.run(main())\n",
"id": "4606874",
"language": "Python",
"matching_score": 2.019541025161743,
"max_stars_count": 0,
"path": "examples/jetstream.py"
},
{
"content": "# Copyright 2021 The NATS Authors\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport json\nfrom nats.js import api\nfrom nats.errors import NoRespondersError\nfrom nats.js.errors import ServiceUnavailableError, APIError\nfrom email.parser import BytesParser\nfrom typing import TYPE_CHECKING, Any, Dict, Optional\n\nif TYPE_CHECKING:\n from nats import NATS\n\n\nclass JetStreamManager:\n \"\"\"\n JetStreamManager exposes management APIs for JetStream.\n \"\"\"\n\n def __init__(\n self,\n conn: \"NATS\",\n prefix: str = api.DefaultPrefix,\n timeout: float = 5,\n ) -> None:\n self._prefix = prefix\n self._nc = conn\n self._timeout = timeout\n self._hdr_parser = BytesParser()\n\n async def account_info(self) -> api.AccountInfo:\n resp = await self._api_request(\n f\"{self._prefix}.INFO\", b'', timeout=self._timeout\n )\n return api.AccountInfo.from_response(resp)\n\n async def find_stream_name_by_subject(self, subject: str) -> str:\n \"\"\"\n Find the stream to which a subject belongs in an account.\n \"\"\"\n\n req_sub = f\"{self._prefix}.STREAM.NAMES\"\n req_data = json.dumps({\"subject\": subject})\n info = await self._api_request(\n req_sub, req_data.encode(), timeout=self._timeout\n )\n return info['streams'][0]\n\n async def stream_info(self, name: str) -> api.StreamInfo:\n \"\"\"\n Get the latest StreamInfo by stream name.\n \"\"\"\n resp = await self._api_request(\n f\"{self._prefix}.STREAM.INFO.{name}\", timeout=self._timeout\n )\n return api.StreamInfo.from_response(resp)\n\n async def add_stream(\n self, config: api.StreamConfig = None, **params\n ) -> api.StreamInfo:\n \"\"\"\n add_stream creates a stream.\n \"\"\"\n if config is None:\n config = api.StreamConfig()\n config = config.evolve(**params)\n if config.name is None:\n raise ValueError(\"nats: stream name is required\")\n\n data = json.dumps(config.as_dict())\n resp = await self._api_request(\n f\"{self._prefix}.STREAM.CREATE.{config.name}\",\n data.encode(),\n timeout=self._timeout,\n )\n return api.StreamInfo.from_response(resp)\n\n async def delete_stream(self, name: str) -> bool:\n \"\"\"\n Delete a stream by name.\n \"\"\"\n resp = await self._api_request(\n f\"{self._prefix}.STREAM.DELETE.{name}\", timeout=self._timeout\n )\n return resp['success']\n\n async def consumer_info(\n self, stream: str, consumer: str, timeout: Optional[float] = None\n ):\n # TODO: Validate the stream and consumer names.\n if timeout is None:\n timeout = self._timeout\n resp = await self._api_request(\n f\"{self._prefix}.CONSUMER.INFO.{stream}.{consumer}\",\n b'',\n timeout=timeout\n )\n return api.ConsumerInfo.from_response(resp)\n\n async def add_consumer(\n self,\n stream: str,\n config: Optional[api.ConsumerConfig] = None,\n timeout: Optional[float] = None,\n **params,\n ) -> api.ConsumerInfo:\n if not timeout:\n timeout = self._timeout\n if config is None:\n config = api.ConsumerConfig()\n config = config.evolve(**params)\n durable_name = config.durable_name\n req = {\"stream_name\": stream, \"config\": config.as_dict()}\n req_data = json.dumps(req).encode()\n\n resp = None\n if durable_name is not None:\n resp = await self._api_request(\n f\"{self._prefix}.CONSUMER.DURABLE.CREATE.{stream}.{durable_name}\",\n req_data,\n timeout=timeout\n )\n else:\n resp = await self._api_request(\n f\"{self._prefix}.CONSUMER.CREATE.{stream}\",\n req_data,\n timeout=timeout\n )\n return api.ConsumerInfo.from_response(resp)\n\n async def delete_consumer(self, stream: str, consumer: str) -> bool:\n resp = await self._api_request(\n f\"{self._prefix}.CONSUMER.DELETE.{stream}.{consumer}\",\n b'',\n timeout=self._timeout\n )\n return resp['success']\n\n async def _api_request(\n self,\n req_subject: str,\n req: bytes = b'',\n timeout: float = 5,\n ) -> Dict[str, Any]:\n try:\n msg = await self._nc.request(req_subject, req, timeout=timeout)\n resp = json.loads(msg.data)\n except NoRespondersError:\n raise ServiceUnavailableError\n\n # Check for API errors.\n if 'error' in resp:\n raise APIError.from_error(resp['error'])\n\n return resp\n",
"id": "7445529",
"language": "Python",
"matching_score": 2.805701732635498,
"max_stars_count": 0,
"path": "nats/js/manager.py"
},
{
"content": "# Copyright 2016-2021 The NATS Authors\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nfrom typing import TYPE_CHECKING, Any, Dict, NoReturn, Optional\nimport nats.errors\nfrom nats.js import api\nfrom dataclasses import dataclass\n\nif TYPE_CHECKING:\n from nats.aio.msg import Msg\n\n\nclass Error(nats.errors.Error):\n \"\"\"\n An Error raised by the NATS client when using JetStream.\n \"\"\"\n\n def __init__(self, description: Optional[str] = None) -> None:\n self.description = description\n\n def __str__(self) -> str:\n desc = ''\n if self.description:\n desc = self.description\n return f\"nats: JetStream.{self.__class__.__name__} {desc}\"\n\n\n@dataclass\nclass APIError(Error):\n \"\"\"\n An Error that is the result of interacting with NATS JetStream.\n \"\"\"\n code: Optional[int]\n err_code: Optional[int]\n description: Optional[str]\n stream: Optional[str]\n seq: Optional[int]\n\n def __init__(\n self,\n code: int = None,\n description: Optional[str] = None,\n err_code: Optional[int] = None,\n stream: Optional[str] = None,\n seq: Optional[int] = None,\n ) -> None:\n self.code = code\n self.err_code = err_code\n self.description = description\n self.stream = stream\n self.seq = seq\n\n @classmethod\n def from_msg(cls, msg: \"Msg\") -> NoReturn:\n if msg.header is None:\n raise APIError\n code = msg.header[api.Header.STATUS]\n if code == api.StatusCode.SERVICE_UNAVAILABLE:\n raise ServiceUnavailableError\n else:\n desc = msg.header[api.Header.DESCRIPTION]\n raise APIError(code=int(code), description=desc)\n\n @classmethod\n def from_error(cls, err: Dict[str, Any]):\n code = err['code']\n if code == 503:\n raise ServiceUnavailableError(**err)\n elif code == 500:\n raise ServerError(**err)\n elif code == 404:\n raise NotFoundError(**err)\n elif code == 400:\n raise BadRequestError(**err)\n else:\n raise APIError(**err)\n\n def __str__(self) -> str:\n return (\n f\"nats: {type(self).__name__}: code={self.code} err_code={self.err_code} \"\n f\"description='{self.description}'\"\n )\n\n\nclass ServiceUnavailableError(APIError):\n \"\"\"\n A 503 error\n \"\"\"\n pass\n\n\nclass ServerError(APIError):\n \"\"\"\n A 500 error\n \"\"\"\n pass\n\n\nclass NotFoundError(APIError):\n \"\"\"\n A 404 error\n \"\"\"\n pass\n\n\nclass BadRequestError(APIError):\n \"\"\"\n A 400 error\n \"\"\"\n pass\n\n\nclass NoStreamResponseError(Error):\n \"\"\"\n Raised if the client gets a 503 when publishing a message.\n \"\"\"\n\n def __str__(self) -> str:\n return \"nats: no response from stream\"\n\n\nclass ConsumerSequenceMismatchError(Error):\n \"\"\"\n Async error raised by the client with idle_heartbeat mode enabled\n when one of the message sequences is not the expected one.\n \"\"\"\n\n def __init__(\n self,\n stream_resume_sequence=None,\n consumer_sequence=None,\n last_consumer_sequence=None\n ) -> None:\n self.stream_resume_sequence = stream_resume_sequence\n self.consumer_sequence = consumer_sequence\n self.last_consumer_sequence = last_consumer_sequence\n\n def __str__(self) -> str:\n gap = self.last_consumer_sequence - self.consumer_sequence\n return (\n f\"nats: sequence mismatch for consumer at sequence {self.consumer_sequence} \"\n f\"({gap} sequences behind), should restart consumer from stream sequence {self.stream_resume_sequence}\"\n )\n\n\nclass BucketNotFoundError(NotFoundError):\n \"\"\"\n When attempted to bind to a JetStream KeyValue that does not exist.\n \"\"\"\n pass\n\n\nclass BadBucketError(Error):\n pass\n\n\nclass KeyDeletedError(Error):\n \"\"\"\n Raised when trying to get a key that was deleted from a JetStream KeyValue store.\n \"\"\"\n\n def __init__(self, entry=None, op=None) -> None:\n self.entry = entry\n self.op = op\n\n def __str__(self) -> str:\n return \"nats: key was deleted\"\n",
"id": "10639797",
"language": "Python",
"matching_score": 2.8237056732177734,
"max_stars_count": 0,
"path": "nats/js/errors.py"
},
{
"content": "# Copyright 2021 The NATS Authors\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nfrom typing import TYPE_CHECKING, Optional\nfrom nats.js import api\nfrom nats.js.errors import KeyDeletedError\nfrom dataclasses import dataclass\nimport base64\n\nif TYPE_CHECKING:\n from nats.js import JetStreamContext\n\nKV_OP = \"KV-Operation\"\nKV_DEL = \"DEL\"\nKV_PURGE = \"PURGE\"\nMSG_ROLLUP_SUBJECT = \"sub\"\n\n\nclass KeyValue:\n \"\"\"\n KeyValue uses the JetStream KeyValue functionality.\n\n .. note::\n This functionality is EXPERIMENTAL and may be changed in later releases.\n\n ::\n\n import asyncio\n import nats\n\n async def main():\n nc = await nats.connect()\n js = nc.jetstream()\n\n # Create a KV\n kv = await js.create_key_value(bucket='MY_KV')\n\n # Set and retrieve a value\n await kv.put('hello', b'world')\n entry = await kv.get('hello')\n print(f'KeyValue.Entry: key={entry.key}, value={entry.value}')\n # KeyValue.Entry: key=hello, value=world\n\n await nc.close()\n\n if __name__ == '__main__':\n asyncio.run(main())\n\n \"\"\"\n\n @dataclass\n class Entry:\n \"\"\"\n An entry from a KeyValue store in JetStream.\n \"\"\"\n bucket: str\n key: str\n value: Optional[bytes]\n revision: Optional[int]\n\n @dataclass(frozen=True)\n class BucketStatus:\n \"\"\"\n BucketStatus is the status of a KeyValue bucket.\n \"\"\"\n stream_info: api.StreamInfo\n bucket: str\n\n @property\n def values(self) -> int:\n \"\"\"\n values returns the number of stored messages in the stream.\n \"\"\"\n return self.stream_info.state.messages\n\n @property\n def history(self) -> int:\n \"\"\"\n history returns the max msgs per subject.\n \"\"\"\n return self.stream_info.config.max_msgs_per_subject\n\n @property\n def ttl(self) -> Optional[float]:\n \"\"\"\n ttl returns the max age in seconds.\n \"\"\"\n if self.stream_info.config.max_age is None:\n return None\n return self.stream_info.config.max_age\n\n def __init__(\n self,\n name: str,\n stream: str,\n pre: str,\n js: \"JetStreamContext\",\n ) -> None:\n self._name = name\n self._stream = stream\n self._pre = pre\n self._js = js\n\n async def get(self, key: str) -> Entry:\n \"\"\"\n get returns the latest value for the key.\n \"\"\"\n msg = await self._js.get_last_msg(self._stream, f\"{self._pre}{key}\")\n data = None\n if msg.data:\n data = base64.b64decode(msg.data)\n\n entry = KeyValue.Entry(\n bucket=self._name,\n key=key,\n value=data,\n revision=msg.seq,\n )\n\n # Check headers to see if deleted or purged.\n if msg.headers:\n op = msg.headers.get(KV_OP, None)\n if op == KV_DEL or op == KV_PURGE:\n raise KeyDeletedError(entry, op)\n\n return entry\n\n async def put(self, key: str, value: bytes) -> int:\n \"\"\"\n put will place the new value for the key into the store\n and return the revision number.\n \"\"\"\n pa = await self._js.publish(f\"{self._pre}{key}\", value)\n return pa.seq\n\n async def update(self, key: str, value: bytes, last: int) -> int:\n \"\"\"\n update will update the value iff the latest revision matches.\n \"\"\"\n hdrs = {}\n hdrs[api.Header.EXPECTED_LAST_SUBJECT_SEQUENCE] = str(last)\n pa = await self._js.publish(f\"{self._pre}{key}\", value, headers=hdrs)\n return pa.seq\n\n async def delete(self, key: str) -> bool:\n \"\"\"\n delete will place a delete marker and remove all previous revisions.\n \"\"\"\n hdrs = {}\n hdrs[KV_OP] = KV_DEL\n await self._js.publish(f\"{self._pre}{key}\", headers=hdrs)\n return True\n\n async def purge(self, key: str) -> bool:\n \"\"\"\n purge will remove the key and all revisions.\n \"\"\"\n hdrs = {}\n hdrs[KV_OP] = KV_PURGE\n hdrs[api.Header.ROLLUP] = MSG_ROLLUP_SUBJECT\n await self._js.publish(f\"{self._pre}{key}\", headers=hdrs)\n return True\n\n async def status(self) -> BucketStatus:\n \"\"\"\n status retrieves the status and configuration of a bucket.\n \"\"\"\n info = await self._js.stream_info(self._stream)\n return KeyValue.BucketStatus(stream_info=info, bucket=self._name)\n",
"id": "3208695",
"language": "Python",
"matching_score": 3.5745439529418945,
"max_stars_count": 0,
"path": "nats/js/kv.py"
},
{
"content": "import asyncio\nimport nats\n\nasync def main():\n nc = await nats.connect()\n js = nc.jetstream()\n\n # Create a KV\n kv = await js.create_key_value(bucket='MY_KV')\n\n # Set and retrieve a value\n await kv.put('hello', b'world')\n entry = await kv.get('hello')\n print(f'KeyValue.Entry: key={entry.key}, value={entry.value}')\n # KeyValue.Entry: key=hello, value=world\n\n await nc.close()\n\nif __name__ == '__main__':\n asyncio.run(main())\n",
"id": "3755347",
"language": "Python",
"matching_score": 0.29996994137763977,
"max_stars_count": 0,
"path": "examples/kv.py"
},
{
"content": "# -*- coding: utf-8 -*-\n\"\"\"\n pygments.lexers._csound_builtins\n ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.\n :license: BSD, see LICENSE for details.\n\"\"\"\n\n# Opcodes in Csound 6.13.0 using:\n# python3 -c \"\n# import re\n# from subprocess import Popen, PIPE\n# output = Popen(['csound', '--list-opcodes0'], stderr=PIPE, text=True).communicate()[1]\n# opcodes = output[re.search(r'^\\$', output, re.M).end() : re.search(r'^\\d+ opcodes\\$', output, re.M).start()].split()\n# output = Popen(['csound', '--list-opcodes2'], stderr=PIPE, text=True).communicate()[1]\n# all_opcodes = output[re.search(r'^\\$', output, re.M).end() : re.search(r'^\\d+ opcodes\\$', output, re.M).start()].split()\n# deprecated_opcodes = [opcode for opcode in all_opcodes if opcode not in opcodes]\n# # Remove opcodes that csound.py treats as keywords.\n# keyword_opcodes = [\n# 'cggoto', # https://csound.com/docs/manual/cggoto.html\n# 'cigoto', # https://csound.com/docs/manual/cigoto.html\n# 'cingoto', # (undocumented)\n# 'ckgoto', # https://csound.com/docs/manual/ckgoto.html\n# 'cngoto', # https://csound.com/docs/manual/cngoto.html\n# 'cnkgoto', # (undocumented)\n# 'endin', # https://csound.com/docs/manual/endin.html\n# 'endop', # https://csound.com/docs/manual/endop.html\n# 'goto', # https://csound.com/docs/manual/goto.html\n# 'igoto', # https://csound.com/docs/manual/igoto.html\n# 'instr', # https://csound.com/docs/manual/instr.html\n# 'kgoto', # https://csound.com/docs/manual/kgoto.html\n# 'loop_ge', # https://csound.com/docs/manual/loop_ge.html\n# 'loop_gt', # https://csound.com/docs/manual/loop_gt.html\n# 'loop_le', # https://csound.com/docs/manual/loop_le.html\n# 'loop_lt', # https://csound.com/docs/manual/loop_lt.html\n# 'opcode', # https://csound.com/docs/manual/opcode.html\n# 'reinit', # https://csound.com/docs/manual/reinit.html\n# 'return', # https://csound.com/docs/manual/return.html\n# 'rireturn', # https://csound.com/docs/manual/rireturn.html\n# 'rigoto', # https://csound.com/docs/manual/rigoto.html\n# 'tigoto', # https://csound.com/docs/manual/tigoto.html\n# 'timout' # https://csound.com/docs/manual/timout.html\n# ]\n# opcodes = [opcode for opcode in opcodes if opcode not in keyword_opcodes]\n# newline = '\\n'\n# print(f'''OPCODES = set(\\'''\n# {newline.join(opcodes)}\n# \\'''.split())\n#\n# DEPRECATED_OPCODES = set(\\'''\n# {newline.join(deprecated_opcodes)}\n# \\'''.split())\n# ''')\n# \"\n\nOPCODES = set('''\nATSadd\nATSaddnz\nATSbufread\nATScross\nATSinfo\nATSinterpread\nATSpartialtap\nATSread\nATSreadnz\nATSsinnoi\nFLbox\nFLbutBank\nFLbutton\nFLcloseButton\nFLcolor\nFLcolor2\nFLcount\nFLexecButton\nFLgetsnap\nFLgroup\nFLgroupEnd\nFLgroup_end\nFLhide\nFLhvsBox\nFLhvsBoxSetValue\nFLjoy\nFLkeyIn\nFLknob\nFLlabel\nFLloadsnap\nFLmouse\nFLpack\nFLpackEnd\nFLpack_end\nFLpanel\nFLpanelEnd\nFLpanel_end\nFLprintk\nFLprintk2\nFLroller\nFLrun\nFLsavesnap\nFLscroll\nFLscrollEnd\nFLscroll_end\nFLsetAlign\nFLsetBox\nFLsetColor\nFLsetColor2\nFLsetFont\nFLsetPosition\nFLsetSize\nFLsetSnapGroup\nFLsetText\nFLsetTextColor\nFLsetTextSize\nFLsetTextType\nFLsetVal\nFLsetVal_i\nFLsetVali\nFLsetsnap\nFLshow\nFLslidBnk\nFLslidBnk2\nFLslidBnk2Set\nFLslidBnk2Setk\nFLslidBnkGetHandle\nFLslidBnkSet\nFLslidBnkSetk\nFLslider\nFLtabs\nFLtabsEnd\nFLtabs_end\nFLtext\nFLupdate\nFLvalue\nFLvkeybd\nFLvslidBnk\nFLvslidBnk2\nFLxyin\nJackoAudioIn\nJackoAudioInConnect\nJackoAudioOut\nJackoAudioOutConnect\nJackoFreewheel\nJackoInfo\nJackoInit\nJackoMidiInConnect\nJackoMidiOut\nJackoMidiOutConnect\nJackoNoteOut\nJackoOn\nJackoTransport\nK35_hpf\nK35_lpf\nMixerClear\nMixerGetLevel\nMixerReceive\nMixerSend\nMixerSetLevel\nMixerSetLevel_i\nOSCbundle\nOSCcount\nOSCinit\nOSCinitM\nOSClisten\nOSCraw\nOSCsend\nOSCsend_lo\nS\nSTKBandedWG\nSTKBeeThree\nSTKBlowBotl\nSTKBlowHole\nSTKBowed\nSTKBrass\nSTKClarinet\nSTKDrummer\nSTKFMVoices\nSTKFlute\nSTKHevyMetl\nSTKMandolin\nSTKModalBar\nSTKMoog\nSTKPercFlut\nSTKPlucked\nSTKResonate\nSTKRhodey\nSTKSaxofony\nSTKShakers\nSTKSimple\nSTKSitar\nSTKStifKarp\nSTKTubeBell\nSTKVoicForm\nSTKWhistle\nSTKWurley\na\nabs\nactive\nadsr\nadsyn\nadsynt\nadsynt2\naftouch\nalpass\nalwayson\nampdb\nampdbfs\nampmidi\nampmidicurve\nampmidid\nareson\naresonk\natone\natonek\natonex\nbabo\nbalance\nbalance2\nbamboo\nbarmodel\nbbcutm\nbbcuts\nbeadsynt\nbeosc\nbetarand\nbexprnd\nbformdec1\nbformenc1\nbinit\nbiquad\nbiquada\nbirnd\nbpf\nbpfcos\nbqrez\nbutbp\nbutbr\nbuthp\nbutlp\nbutterbp\nbutterbr\nbutterhp\nbutterlp\nbutton\nbuzz\nc2r\ncabasa\ncauchy\ncauchyi\ncbrt\nceil\ncell\ncent\ncentroid\nceps\ncepsinv\nchanctrl\nchanged2\nchani\nchano\nchebyshevpoly\ncheckbox\nchn_S\nchn_a\nchn_k\nchnclear\nchnexport\nchnget\nchngetks\nchnmix\nchnparams\nchnset\nchnsetks\nchuap\nclear\nclfilt\nclip\nclockoff\nclockon\ncmp\ncmplxprod\ncomb\ncombinv\ncompilecsd\ncompileorc\ncompilestr\ncompress\ncompress2\nconnect\ncontrol\nconvle\nconvolve\ncopya2ftab\ncopyf2array\ncos\ncosh\ncosinv\ncosseg\ncossegb\ncossegr\ncps2pch\ncpsmidi\ncpsmidib\ncpsmidinn\ncpsoct\ncpspch\ncpstmid\ncpstun\ncpstuni\ncpsxpch\ncpumeter\ncpuprc\ncross2\ncrossfm\ncrossfmi\ncrossfmpm\ncrossfmpmi\ncrosspm\ncrosspmi\ncrunch\nctlchn\nctrl14\nctrl21\nctrl7\nctrlinit\ncuserrnd\ndam\ndate\ndates\ndb\ndbamp\ndbfsamp\ndcblock\ndcblock2\ndconv\ndct\ndctinv\ndeinterleave\ndelay\ndelay1\ndelayk\ndelayr\ndelayw\ndeltap\ndeltap3\ndeltapi\ndeltapn\ndeltapx\ndeltapxw\ndenorm\ndiff\ndiode_ladder\ndirectory\ndiskgrain\ndiskin\ndiskin2\ndispfft\ndisplay\ndistort\ndistort1\ndivz\ndoppler\ndot\ndownsamp\ndripwater\ndssiactivate\ndssiaudio\ndssictls\ndssiinit\ndssilist\ndumpk\ndumpk2\ndumpk3\ndumpk4\nduserrnd\ndust\ndust2\nenvlpx\nenvlpxr\nephasor\neqfil\nevalstr\nevent\nevent_i\nexciter\nexitnow\nexp\nexpcurve\nexpon\nexprand\nexprandi\nexpseg\nexpsega\nexpsegb\nexpsegba\nexpsegr\nfareylen\nfareyleni\nfaustaudio\nfaustcompile\nfaustctl\nfaustdsp\nfaustgen\nfaustplay\nfft\nfftinv\nficlose\nfilebit\nfilelen\nfilenchnls\nfilepeak\nfilescal\nfilesr\nfilevalid\nfillarray\nfilter2\nfin\nfini\nfink\nfiopen\nflanger\nflashtxt\nflooper\nflooper2\nfloor\nfluidAllOut\nfluidCCi\nfluidCCk\nfluidControl\nfluidEngine\nfluidInfo\nfluidLoad\nfluidNote\nfluidOut\nfluidProgramSelect\nfluidSetInterpMethod\nfmanal\nfmax\nfmb3\nfmbell\nfmin\nfmmetal\nfmod\nfmpercfl\nfmrhode\nfmvoice\nfmwurlie\nfof\nfof2\nfofilter\nfog\nfold\nfollow\nfollow2\nfoscil\nfoscili\nfout\nfouti\nfoutir\nfoutk\nfprintks\nfprints\nfrac\nfractalnoise\nframebuffer\nfreeverb\nftaudio\nftchnls\nftconv\nftcps\nftfree\nftgen\nftgenonce\nftgentmp\nftlen\nftload\nftloadk\nftlptim\nftmorf\nftom\nftprint\nftresize\nftresizei\nftsamplebank\nftsave\nftsavek\nftslice\nftsr\ngain\ngainslider\ngauss\ngaussi\ngausstrig\ngbuzz\ngenarray\ngenarray_i\ngendy\ngendyc\ngendyx\ngetcfg\ngetcol\ngetftargs\ngetrow\ngetrowlin\ngetseed\ngogobel\ngrain\ngrain2\ngrain3\ngranule\ngtf\nguiro\nharmon\nharmon2\nharmon3\nharmon4\nhdf5read\nhdf5write\nhilbert\nhilbert2\nhrtfearly\nhrtfmove\nhrtfmove2\nhrtfreverb\nhrtfstat\nhsboscil\nhvs1\nhvs2\nhvs3\nhypot\ni\nihold\nimagecreate\nimagefree\nimagegetpixel\nimageload\nimagesave\nimagesetpixel\nimagesize\nin\nin32\ninch\ninh\ninit\ninitc14\ninitc21\ninitc7\ninleta\ninletf\ninletk\ninletkid\ninletv\nino\ninq\ninrg\nins\ninsglobal\ninsremot\nint\ninteg\ninterleave\ninterp\ninvalue\ninx\ninz\njacktransport\njitter\njitter2\njoystick\njspline\nk\nla_i_add_mc\nla_i_add_mr\nla_i_add_vc\nla_i_add_vr\nla_i_assign_mc\nla_i_assign_mr\nla_i_assign_t\nla_i_assign_vc\nla_i_assign_vr\nla_i_conjugate_mc\nla_i_conjugate_mr\nla_i_conjugate_vc\nla_i_conjugate_vr\nla_i_distance_vc\nla_i_distance_vr\nla_i_divide_mc\nla_i_divide_mr\nla_i_divide_vc\nla_i_divide_vr\nla_i_dot_mc\nla_i_dot_mc_vc\nla_i_dot_mr\nla_i_dot_mr_vr\nla_i_dot_vc\nla_i_dot_vr\nla_i_get_mc\nla_i_get_mr\nla_i_get_vc\nla_i_get_vr\nla_i_invert_mc\nla_i_invert_mr\nla_i_lower_solve_mc\nla_i_lower_solve_mr\nla_i_lu_det_mc\nla_i_lu_det_mr\nla_i_lu_factor_mc\nla_i_lu_factor_mr\nla_i_lu_solve_mc\nla_i_lu_solve_mr\nla_i_mc_create\nla_i_mc_set\nla_i_mr_create\nla_i_mr_set\nla_i_multiply_mc\nla_i_multiply_mr\nla_i_multiply_vc\nla_i_multiply_vr\nla_i_norm1_mc\nla_i_norm1_mr\nla_i_norm1_vc\nla_i_norm1_vr\nla_i_norm_euclid_mc\nla_i_norm_euclid_mr\nla_i_norm_euclid_vc\nla_i_norm_euclid_vr\nla_i_norm_inf_mc\nla_i_norm_inf_mr\nla_i_norm_inf_vc\nla_i_norm_inf_vr\nla_i_norm_max_mc\nla_i_norm_max_mr\nla_i_print_mc\nla_i_print_mr\nla_i_print_vc\nla_i_print_vr\nla_i_qr_eigen_mc\nla_i_qr_eigen_mr\nla_i_qr_factor_mc\nla_i_qr_factor_mr\nla_i_qr_sym_eigen_mc\nla_i_qr_sym_eigen_mr\nla_i_random_mc\nla_i_random_mr\nla_i_random_vc\nla_i_random_vr\nla_i_size_mc\nla_i_size_mr\nla_i_size_vc\nla_i_size_vr\nla_i_subtract_mc\nla_i_subtract_mr\nla_i_subtract_vc\nla_i_subtract_vr\nla_i_t_assign\nla_i_trace_mc\nla_i_trace_mr\nla_i_transpose_mc\nla_i_transpose_mr\nla_i_upper_solve_mc\nla_i_upper_solve_mr\nla_i_vc_create\nla_i_vc_set\nla_i_vr_create\nla_i_vr_set\nla_k_a_assign\nla_k_add_mc\nla_k_add_mr\nla_k_add_vc\nla_k_add_vr\nla_k_assign_a\nla_k_assign_f\nla_k_assign_mc\nla_k_assign_mr\nla_k_assign_t\nla_k_assign_vc\nla_k_assign_vr\nla_k_conjugate_mc\nla_k_conjugate_mr\nla_k_conjugate_vc\nla_k_conjugate_vr\nla_k_current_f\nla_k_current_vr\nla_k_distance_vc\nla_k_distance_vr\nla_k_divide_mc\nla_k_divide_mr\nla_k_divide_vc\nla_k_divide_vr\nla_k_dot_mc\nla_k_dot_mc_vc\nla_k_dot_mr\nla_k_dot_mr_vr\nla_k_dot_vc\nla_k_dot_vr\nla_k_f_assign\nla_k_get_mc\nla_k_get_mr\nla_k_get_vc\nla_k_get_vr\nla_k_invert_mc\nla_k_invert_mr\nla_k_lower_solve_mc\nla_k_lower_solve_mr\nla_k_lu_det_mc\nla_k_lu_det_mr\nla_k_lu_factor_mc\nla_k_lu_factor_mr\nla_k_lu_solve_mc\nla_k_lu_solve_mr\nla_k_mc_set\nla_k_mr_set\nla_k_multiply_mc\nla_k_multiply_mr\nla_k_multiply_vc\nla_k_multiply_vr\nla_k_norm1_mc\nla_k_norm1_mr\nla_k_norm1_vc\nla_k_norm1_vr\nla_k_norm_euclid_mc\nla_k_norm_euclid_mr\nla_k_norm_euclid_vc\nla_k_norm_euclid_vr\nla_k_norm_inf_mc\nla_k_norm_inf_mr\nla_k_norm_inf_vc\nla_k_norm_inf_vr\nla_k_norm_max_mc\nla_k_norm_max_mr\nla_k_qr_eigen_mc\nla_k_qr_eigen_mr\nla_k_qr_factor_mc\nla_k_qr_factor_mr\nla_k_qr_sym_eigen_mc\nla_k_qr_sym_eigen_mr\nla_k_random_mc\nla_k_random_mr\nla_k_random_vc\nla_k_random_vr\nla_k_subtract_mc\nla_k_subtract_mr\nla_k_subtract_vc\nla_k_subtract_vr\nla_k_t_assign\nla_k_trace_mc\nla_k_trace_mr\nla_k_upper_solve_mc\nla_k_upper_solve_mr\nla_k_vc_set\nla_k_vr_set\nlenarray\nlfo\nlimit\nlimit1\nlincos\nline\nlinen\nlinenr\nlineto\nlink_beat_force\nlink_beat_get\nlink_beat_request\nlink_create\nlink_enable\nlink_is_enabled\nlink_metro\nlink_peers\nlink_tempo_get\nlink_tempo_set\nlinlin\nlinrand\nlinseg\nlinsegb\nlinsegr\nliveconv\nlocsend\nlocsig\nlog\nlog10\nlog2\nlogbtwo\nlogcurve\nloopseg\nloopsegp\nlooptseg\nloopxseg\nlorenz\nloscil\nloscil3\nloscil3phs\nloscilphs\nloscilx\nlowpass2\nlowres\nlowresx\nlpf18\nlpform\nlpfreson\nlphasor\nlpinterp\nlposcil\nlposcil3\nlposcila\nlposcilsa\nlposcilsa2\nlpread\nlpreson\nlpshold\nlpsholdp\nlpslot\nlua_exec\nlua_iaopcall\nlua_iaopcall_off\nlua_ikopcall\nlua_ikopcall_off\nlua_iopcall\nlua_iopcall_off\nlua_opdef\nmac\nmaca\nmadsr\nmags\nmandel\nmandol\nmaparray\nmaparray_i\nmarimba\nmassign\nmax\nmax_k\nmaxabs\nmaxabsaccum\nmaxaccum\nmaxalloc\nmaxarray\nmclock\nmdelay\nmedian\nmediank\nmetro\nmfb\nmidglobal\nmidiarp\nmidic14\nmidic21\nmidic7\nmidichannelaftertouch\nmidichn\nmidicontrolchange\nmidictrl\nmididefault\nmidifilestatus\nmidiin\nmidinoteoff\nmidinoteoncps\nmidinoteonkey\nmidinoteonoct\nmidinoteonpch\nmidion\nmidion2\nmidiout\nmidiout_i\nmidipgm\nmidipitchbend\nmidipolyaftertouch\nmidiprogramchange\nmiditempo\nmidremot\nmin\nminabs\nminabsaccum\nminaccum\nminarray\nmincer\nmirror\nmode\nmodmatrix\nmonitor\nmoog\nmoogladder\nmoogladder2\nmoogvcf\nmoogvcf2\nmoscil\nmp3bitrate\nmp3in\nmp3len\nmp3nchnls\nmp3scal\nmp3sr\nmpulse\nmrtmsg\nmtof\nmton\nmultitap\nmute\nmvchpf\nmvclpf1\nmvclpf2\nmvclpf3\nmvclpf4\nmxadsr\nnchnls_hw\nnestedap\nnlalp\nnlfilt\nnlfilt2\nnoise\nnoteoff\nnoteon\nnoteondur\nnoteondur2\nnotnum\nnreverb\nnrpn\nnsamp\nnstance\nnstrnum\nnstrstr\nntof\nntom\nntrpol\nnxtpow2\noctave\noctcps\noctmidi\noctmidib\noctmidinn\noctpch\nolabuffer\noscbnk\noscil\noscil1\noscil1i\noscil3\noscili\noscilikt\nosciliktp\noscilikts\nosciln\noscils\noscilx\nout\nout32\noutc\noutch\nouth\noutiat\noutic\noutic14\noutipat\noutipb\noutipc\noutkat\noutkc\noutkc14\noutkpat\noutkpb\noutkpc\noutleta\noutletf\noutletk\noutletkid\noutletv\nouto\noutq\noutq1\noutq2\noutq3\noutq4\noutrg\nouts\nouts1\nouts2\noutvalue\noutx\noutz\np\np5gconnect\np5gdata\npan\npan2\npareq\npart2txt\npartials\npartikkel\npartikkelget\npartikkelset\npartikkelsync\npassign\npaulstretch\npcauchy\npchbend\npchmidi\npchmidib\npchmidinn\npchoct\npchtom\npconvolve\npcount\npdclip\npdhalf\npdhalfy\npeak\npgmassign\npgmchn\nphaser1\nphaser2\nphasor\nphasorbnk\nphs\npindex\npinker\npinkish\npitch\npitchac\npitchamdf\nplanet\nplaterev\nplltrack\npluck\npoisson\npol2rect\npolyaft\npolynomial\nport\nportk\nposcil\nposcil3\npow\npowershape\npowoftwo\npows\nprealloc\nprepiano\nprint\nprint_type\nprintarray\nprintf\nprintf_i\nprintk\nprintk2\nprintks\nprintks2\nprints\nproduct\npset\nptable\nptable3\nptablei\nptablew\nptrack\nputs\npvadd\npvbufread\npvcross\npvinterp\npvoc\npvread\npvs2array\npvs2tab\npvsadsyn\npvsanal\npvsarp\npvsbandp\npvsbandr\npvsbin\npvsblur\npvsbuffer\npvsbufread\npvsbufread2\npvscale\npvscent\npvsceps\npvscross\npvsdemix\npvsdiskin\npvsdisp\npvsenvftw\npvsfilter\npvsfread\npvsfreeze\npvsfromarray\npvsftr\npvsftw\npvsfwrite\npvsgain\npvshift\npvsifd\npvsin\npvsinfo\npvsinit\npvslock\npvsmaska\npvsmix\npvsmooth\npvsmorph\npvsosc\npvsout\npvspitch\npvstanal\npvstencil\npvstrace\npvsvoc\npvswarp\npvsynth\npwd\npyassign\npyassigni\npyassignt\npycall\npycall1\npycall1i\npycall1t\npycall2\npycall2i\npycall2t\npycall3\npycall3i\npycall3t\npycall4\npycall4i\npycall4t\npycall5\npycall5i\npycall5t\npycall6\npycall6i\npycall6t\npycall7\npycall7i\npycall7t\npycall8\npycall8i\npycall8t\npycalli\npycalln\npycallni\npycallt\npyeval\npyevali\npyevalt\npyexec\npyexeci\npyexect\npyinit\npylassign\npylassigni\npylassignt\npylcall\npylcall1\npylcall1i\npylcall1t\npylcall2\npylcall2i\npylcall2t\npylcall3\npylcall3i\npylcall3t\npylcall4\npylcall4i\npylcall4t\npylcall5\npylcall5i\npylcall5t\npylcall6\npylcall6i\npylcall6t\npylcall7\npylcall7i\npylcall7t\npylcall8\npylcall8i\npylcall8t\npylcalli\npylcalln\npylcallni\npylcallt\npyleval\npylevali\npylevalt\npylexec\npylexeci\npylexect\npylrun\npylruni\npylrunt\npyrun\npyruni\npyrunt\nqinf\nqnan\nr2c\nrand\nrandh\nrandi\nrandom\nrandomh\nrandomi\nrbjeq\nreadclock\nreadf\nreadfi\nreadk\nreadk2\nreadk3\nreadk4\nreadks\nreadscore\nreadscratch\nrect2pol\nrelease\nremoteport\nremove\nrepluck\nreshapearray\nreson\nresonk\nresonr\nresonx\nresonxk\nresony\nresonz\nresyn\nreverb\nreverb2\nreverbsc\nrewindscore\nrezzy\nrfft\nrifft\nrms\nrnd\nrnd31\nround\nrspline\nrtclock\ns16b14\ns32b14\nsamphold\nsandpaper\nsc_lag\nsc_lagud\nsc_phasor\nsc_trig\nscale\nscalearray\nscanhammer\nscans\nscantable\nscanu\nschedkwhen\nschedkwhennamed\nschedule\nschedwhen\nscoreline\nscoreline_i\nseed\nsekere\nselect\nsemitone\nsense\nsensekey\nseqtime\nseqtime2\nserialBegin\nserialEnd\nserialFlush\nserialPrint\nserialRead\nserialWrite\nserialWrite_i\nsetcol\nsetctrl\nsetksmps\nsetrow\nsetscorepos\nsfilist\nsfinstr\nsfinstr3\nsfinstr3m\nsfinstrm\nsfload\nsflooper\nsfpassign\nsfplay\nsfplay3\nsfplay3m\nsfplaym\nsfplist\nsfpreset\nshaker\nshiftin\nshiftout\nsignum\nsin\nsinh\nsininv\nsinsyn\nsleighbells\nslicearray\nslicearray_i\nslider16\nslider16f\nslider16table\nslider16tablef\nslider32\nslider32f\nslider32table\nslider32tablef\nslider64\nslider64f\nslider64table\nslider64tablef\nslider8\nslider8f\nslider8table\nslider8tablef\nsliderKawai\nsndloop\nsndwarp\nsndwarpst\nsockrecv\nsockrecvs\nsocksend\nsocksends\nsorta\nsortd\nsoundin\nspace\nspat3d\nspat3di\nspat3dt\nspdist\nsplitrig\nsprintf\nsprintfk\nspsend\nsqrt\nsquinewave\nstatevar\nstix\nstrcat\nstrcatk\nstrchar\nstrchark\nstrcmp\nstrcmpk\nstrcpy\nstrcpyk\nstrecv\nstreson\nstrfromurl\nstrget\nstrindex\nstrindexk\nstring2array\nstrlen\nstrlenk\nstrlower\nstrlowerk\nstrrindex\nstrrindexk\nstrset\nstrsub\nstrsubk\nstrtod\nstrtodk\nstrtol\nstrtolk\nstrupper\nstrupperk\nstsend\nsubinstr\nsubinstrinit\nsum\nsumarray\nsvfilter\nsyncgrain\nsyncloop\nsyncphasor\nsystem\nsystem_i\ntab\ntab2array\ntab2pvs\ntab_i\ntabifd\ntable\ntable3\ntable3kt\ntablecopy\ntablefilter\ntablefilteri\ntablegpw\ntablei\ntableicopy\ntableigpw\ntableikt\ntableimix\ntablekt\ntablemix\ntableng\ntablera\ntableseg\ntableshuffle\ntableshufflei\ntablew\ntablewa\ntablewkt\ntablexkt\ntablexseg\ntabmorph\ntabmorpha\ntabmorphak\ntabmorphi\ntabplay\ntabrec\ntabrowlin\ntabsum\ntabw\ntabw_i\ntambourine\ntan\ntanh\ntaninv\ntaninv2\ntbvcf\ntempest\ntempo\ntemposcal\ntempoval\ntimedseq\ntimeinstk\ntimeinsts\ntimek\ntimes\ntival\ntlineto\ntone\ntonek\ntonex\ntradsyn\ntrandom\ntranseg\ntransegb\ntransegr\ntrcross\ntrfilter\ntrhighest\ntrigger\ntrigseq\ntrim\ntrim_i\ntrirand\ntrlowest\ntrmix\ntrscale\ntrshift\ntrsplit\nturnoff\nturnoff2\nturnon\ntvconv\nunirand\nunwrap\nupsamp\nurandom\nurd\nvactrol\nvadd\nvadd_i\nvaddv\nvaddv_i\nvaget\nvalpass\nvaset\nvbap\nvbapg\nvbapgmove\nvbaplsinit\nvbapmove\nvbapz\nvbapzmove\nvcella\nvco\nvco2\nvco2ft\nvco2ift\nvco2init\nvcomb\nvcopy\nvcopy_i\nvdel_k\nvdelay\nvdelay3\nvdelayk\nvdelayx\nvdelayxq\nvdelayxs\nvdelayxw\nvdelayxwq\nvdelayxws\nvdivv\nvdivv_i\nvecdelay\nveloc\nvexp\nvexp_i\nvexpseg\nvexpv\nvexpv_i\nvibes\nvibr\nvibrato\nvincr\nvlimit\nvlinseg\nvlowres\nvmap\nvmirror\nvmult\nvmult_i\nvmultv\nvmultv_i\nvoice\nvosim\nvphaseseg\nvport\nvpow\nvpow_i\nvpowv\nvpowv_i\nvpvoc\nvrandh\nvrandi\nvsubv\nvsubv_i\nvtaba\nvtabi\nvtabk\nvtable1k\nvtablea\nvtablei\nvtablek\nvtablewa\nvtablewi\nvtablewk\nvtabwa\nvtabwi\nvtabwk\nvwrap\nwaveset\nwebsocket\nweibull\nwgbow\nwgbowedbar\nwgbrass\nwgclar\nwgflute\nwgpluck\nwgpluck2\nwguide1\nwguide2\nwiiconnect\nwiidata\nwiirange\nwiisend\nwindow\nwrap\nwritescratch\nwterrain\nxadsr\nxin\nxout\nxscanmap\nxscans\nxscansmap\nxscanu\nxtratim\nxyscale\nzacl\nzakinit\nzamod\nzar\nzarg\nzaw\nzawm\nzdf_1pole\nzdf_1pole_mode\nzdf_2pole\nzdf_2pole_mode\nzdf_ladder\nzfilter2\nzir\nziw\nziwm\nzkcl\nzkmod\nzkr\nzkw\nzkwm\n'''.split())\n\nDEPRECATED_OPCODES = set('''\narray\nbformdec\nbformenc\nchanged\ncopy2ftab\ncopy2ttab\nhrtfer\nktableseg\nlentab\nmaxtab\nmintab\npop\npop_f\nptableiw\npush\npush_f\nscalet\nsndload\nsoundout\nsoundouts\nspecaddm\nspecdiff\nspecdisp\nspecfilt\nspechist\nspecptrk\nspecscal\nspecsum\nspectrum\nstack\nsumtab\ntabgen\ntableiw\ntabmap\ntabmap_i\ntabslice\ntb0\ntb0_init\ntb1\ntb10\ntb10_init\ntb11\ntb11_init\ntb12\ntb12_init\ntb13\ntb13_init\ntb14\ntb14_init\ntb15\ntb15_init\ntb1_init\ntb2\ntb2_init\ntb3\ntb3_init\ntb4\ntb4_init\ntb5\ntb5_init\ntb6\ntb6_init\ntb7\ntb7_init\ntb8\ntb8_init\ntb9\ntb9_init\nvbap16\nvbap4\nvbap4move\nvbap8\nvbap8move\nxyin\n'''.split())\n",
"id": "6246720",
"language": "Python",
"matching_score": 0.5208208560943604,
"max_stars_count": 6989,
"path": "pygments/lexers/_csound_builtins.py"
},
{
"content": "\"\"\"Simple Word Counter over the internet!\"\"\"\n",
"id": "10907736",
"language": "Python",
"matching_score": 0.53050696849823,
"max_stars_count": 0,
"path": "simplewc/__init__.py"
},
{
"content": "\"\"\"This is example program using WordCount gRPC service\"\"\"\nfrom typing import Iterable\n\nimport grpc\n\nfrom simplewc.config import INSECURE_HOST, INSECURE_PORT\nfrom simplewc.protos.wc_pb2 import WordCount, WordCountRequest\nfrom simplewc.protos.wc_pb2_grpc import WordCountServiceStub\n\n\ndef print_responses(responses: Iterable[WordCount]):\n \"\"\"Print the stream of response\"\"\"\n responses = list(responses)\n if not responses:\n print(\"\\tNo word found\")\n for r in responses:\n print(f\"\\tAt {r.uri}, word {r.word} appears {r.count} time(s)\")\n\n\ndef run():\n \"\"\"Run Example Program\"\"\"\n channel = grpc.insecure_channel(f\"{INSECURE_HOST}:{INSECURE_PORT}\")\n stub = WordCountServiceStub(channel)\n responses = stub.CountWords(\n WordCountRequest(\n uri=\"https://virtusize.jp\", words=[\"fit\", \"size\", \"virtusize\"]\n )\n )\n print(\"Try to find 3 different words in a URL\")\n print_responses(responses)\n\n print(\"\\n\\nTry to find nothing\")\n responses = stub.CountWords(\n WordCountRequest(uri=\"https://virtusize.jp\", words=[])\n )\n print_responses(responses)\n\n print(\"\\n\\nInaccessible host: non existing https://virtusize.co.jp\")\n try:\n responses = stub.CountWords(\n WordCountRequest(uri=\"https://virtusize.co.jp\", words=[])\n )\n list(responses)\n except grpc.RpcError as e:\n print(\"\\tRPC Error\", e)\n\n print(\"\\n\\nInaccessible host: 127.0.0.1\")\n try:\n responses = stub.CountWords(\n WordCountRequest(uri=\"https://127.0.0.1\", words=[])\n )\n list(responses)\n except grpc.RpcError as e:\n print(\"\\tRPC Error\", e)\n\n print(\"\\n\\nInaccessible host: file:///etc/apt/sources.list\")\n try:\n responses = stub.CountWords(\n WordCountRequest(\n uri=\"file:///etc/apt/sources.list\", words=[\"round\"]\n )\n )\n list(responses)\n except grpc.RpcError as e:\n print(\"\\tRPC Error\", e)\n\n\nif __name__ == \"__main__\":\n run()\n",
"id": "8015682",
"language": "Python",
"matching_score": 2.1118924617767334,
"max_stars_count": 0,
"path": "simplewc/example_client.py"
},
{
"content": "# Generated by the protocol buffer compiler. DO NOT EDIT!\n# source: wc.proto\n\nimport sys\n\nfrom google.protobuf import descriptor as _descriptor\nfrom google.protobuf import message as _message\nfrom google.protobuf import reflection as _reflection\nfrom google.protobuf import symbol_database as _symbol_database\n\n_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode(\"latin1\"))\n\n# @@protoc_insertion_point(imports)\n\n_sym_db = _symbol_database.Default()\n\nDESCRIPTOR = _descriptor.FileDescriptor(\n name=\"wc.proto\",\n package=\"\",\n syntax=\"proto3\",\n serialized_options=None,\n serialized_pb=_b(\n '\\n\\x08wc.proto\".\\n\\x10WordCountRequest\\x12\\x0b\\n\\x03uri\\x18\\x01 \\x01(\\t\\x12\\r\\n\\x05words\\x18\\x02 \\x03(\\t\"5\\n\\tWordCount\\x12\\x0c\\n\\x04word\\x18\\x01 \\x01(\\t\\x12\\x0b\\n\\x03uri\\x18\\x02 \\x01(\\t\\x12\\r\\n\\x05\\x63ount\\x18\\x03 \\x01(\\r2m\\n\\x10WordCountService\\x12-\\n\\nCountWords\\x12\\x11.WordCountRequest\\x1a\\n.WordCount0\\x01\\x12*\\n\\tCountWord\\x12\\x11.WordCountRequest\\x1a\\n.WordCountb\\x06proto3'\n ),\n)\n\n_WORDCOUNTREQUEST = _descriptor.Descriptor(\n name=\"WordCountRequest\",\n full_name=\"WordCountRequest\",\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name=\"uri\",\n full_name=\"WordCountRequest.uri\",\n index=0,\n number=1,\n type=9,\n cpp_type=9,\n label=1,\n has_default_value=False,\n default_value=_b(\"\").decode(\"utf-8\"),\n message_type=None,\n enum_type=None,\n containing_type=None,\n is_extension=False,\n extension_scope=None,\n serialized_options=None,\n file=DESCRIPTOR,\n ),\n _descriptor.FieldDescriptor(\n name=\"words\",\n full_name=\"WordCountRequest.words\",\n index=1,\n number=2,\n type=9,\n cpp_type=9,\n label=3,\n has_default_value=False,\n default_value=[],\n message_type=None,\n enum_type=None,\n containing_type=None,\n is_extension=False,\n extension_scope=None,\n serialized_options=None,\n file=DESCRIPTOR,\n ),\n ],\n extensions=[],\n nested_types=[],\n enum_types=[],\n serialized_options=None,\n is_extendable=False,\n syntax=\"proto3\",\n extension_ranges=[],\n oneofs=[],\n serialized_start=12,\n serialized_end=58,\n)\n\n_WORDCOUNT = _descriptor.Descriptor(\n name=\"WordCount\",\n full_name=\"WordCount\",\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(\n name=\"word\",\n full_name=\"WordCount.word\",\n index=0,\n number=1,\n type=9,\n cpp_type=9,\n label=1,\n has_default_value=False,\n default_value=_b(\"\").decode(\"utf-8\"),\n message_type=None,\n enum_type=None,\n containing_type=None,\n is_extension=False,\n extension_scope=None,\n serialized_options=None,\n file=DESCRIPTOR,\n ),\n _descriptor.FieldDescriptor(\n name=\"uri\",\n full_name=\"WordCount.uri\",\n index=1,\n number=2,\n type=9,\n cpp_type=9,\n label=1,\n has_default_value=False,\n default_value=_b(\"\").decode(\"utf-8\"),\n message_type=None,\n enum_type=None,\n containing_type=None,\n is_extension=False,\n extension_scope=None,\n serialized_options=None,\n file=DESCRIPTOR,\n ),\n _descriptor.FieldDescriptor(\n name=\"count\",\n full_name=\"WordCount.count\",\n index=2,\n number=3,\n type=13,\n cpp_type=3,\n label=1,\n has_default_value=False,\n default_value=0,\n message_type=None,\n enum_type=None,\n containing_type=None,\n is_extension=False,\n extension_scope=None,\n serialized_options=None,\n file=DESCRIPTOR,\n ),\n ],\n extensions=[],\n nested_types=[],\n enum_types=[],\n serialized_options=None,\n is_extendable=False,\n syntax=\"proto3\",\n extension_ranges=[],\n oneofs=[],\n serialized_start=60,\n serialized_end=113,\n)\n\nDESCRIPTOR.message_types_by_name[\"WordCountRequest\"] = _WORDCOUNTREQUEST\nDESCRIPTOR.message_types_by_name[\"WordCount\"] = _WORDCOUNT\n_sym_db.RegisterFileDescriptor(DESCRIPTOR)\n\nWordCountRequest = _reflection.GeneratedProtocolMessageType(\n \"WordCountRequest\",\n (_message.Message,),\n dict(\n DESCRIPTOR=_WORDCOUNTREQUEST,\n __module__=\"wc_pb2\"\n # @@protoc_insertion_point(class_scope:WordCountRequest)\n ),\n)\n_sym_db.RegisterMessage(WordCountRequest)\n\nWordCount = _reflection.GeneratedProtocolMessageType(\n \"WordCount\",\n (_message.Message,),\n dict(\n DESCRIPTOR=_WORDCOUNT,\n __module__=\"wc_pb2\"\n # @@protoc_insertion_point(class_scope:WordCount)\n ),\n)\n_sym_db.RegisterMessage(WordCount)\n\n_WORDCOUNTSERVICE = _descriptor.ServiceDescriptor(\n name=\"WordCountService\",\n full_name=\"WordCountService\",\n file=DESCRIPTOR,\n index=0,\n serialized_options=None,\n serialized_start=115,\n serialized_end=224,\n methods=[\n _descriptor.MethodDescriptor(\n name=\"CountWords\",\n full_name=\"WordCountService.CountWords\",\n index=0,\n containing_service=None,\n input_type=_WORDCOUNTREQUEST,\n output_type=_WORDCOUNT,\n serialized_options=None,\n ),\n _descriptor.MethodDescriptor(\n name=\"CountWord\",\n full_name=\"WordCountService.CountWord\",\n index=1,\n containing_service=None,\n input_type=_WORDCOUNTREQUEST,\n output_type=_WORDCOUNT,\n serialized_options=None,\n ),\n ],\n)\n_sym_db.RegisterServiceDescriptor(_WORDCOUNTSERVICE)\n\nDESCRIPTOR.services_by_name[\"WordCountService\"] = _WORDCOUNTSERVICE\n\n# @@protoc_insertion_point(module_scope)\n",
"id": "5252468",
"language": "Python",
"matching_score": 3.0544240474700928,
"max_stars_count": 0,
"path": "simplewc/protos/wc_pb2.py"
},
{
"content": "# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!\nimport grpc\n\nfrom . import wc_pb2 as wc__pb2\n\n\nclass WordCountServiceStub(object):\n \"\"\"WordCountService services word counting based on WordCountRequest message.\n * Note on caching:\n - The implementation of this service may contain internal caching on\n HTML document.\n - Request multiple word count in a single uri rather than calling\n any services multiple times.\n \"\"\"\n\n def __init__(self, channel):\n \"\"\"Constructor.\n\n Args:\n channel: A grpc.Channel.\n \"\"\"\n self.CountWords = channel.unary_stream(\n \"/WordCountService/CountWords\",\n request_serializer=wc__pb2.WordCountRequest.SerializeToString,\n response_deserializer=wc__pb2.WordCount.FromString,\n )\n\n\nclass WordCountServiceServicer(object):\n \"\"\"WordCountService services word counting based on WordCountRequest message.\n * Note on caching:\n - The implementation of this service may contain internal caching on\n HTML document.\n - Request multiple word count in a single uri rather than calling\n any services multiple times.\n \"\"\"\n\n def CountWords(self, request, context):\n \"\"\"Service each word's occurrence in a certain uri.\n If error happens, it will cut a stream and send gRPC error code with\n detailed message instead of WordCount stream\n \"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details(\"Method not implemented!\")\n raise NotImplementedError(\"Method not implemented!\")\n\n\ndef add_WordCountServiceServicer_to_server(servicer, server):\n rpc_method_handlers = {\n \"CountWords\": grpc.unary_stream_rpc_method_handler(\n servicer.CountWords,\n request_deserializer=wc__pb2.WordCountRequest.FromString,\n response_serializer=wc__pb2.WordCount.SerializeToString,\n )\n }\n generic_handler = grpc.method_handlers_generic_handler(\n \"WordCountService\", rpc_method_handlers\n )\n server.add_generic_rpc_handlers((generic_handler,))\n",
"id": "5244137",
"language": "Python",
"matching_score": 4.34065580368042,
"max_stars_count": 0,
"path": "simplewc/protos/wc_pb2_grpc.py"
},
{
"content": "import time\nfrom concurrent import futures\n\nimport grpc\n\nfrom simplewc import config, exceptions\nfrom simplewc.model import HTMLDocumentModel\nfrom simplewc.protos import wc_pb2_grpc\nfrom simplewc.protos.wc_pb2 import WordCount, WordCountRequest\nfrom simplewc.protos.wc_pb2_grpc import WordCountServiceServicer\nfrom simplewc.storage import get_mongo_db, get_redis_cache\n\n_ONE_DAY_IN_SECONDS = 60 * 60 * 24\n\n\nclass WordCountServicer(WordCountServiceServicer):\n \"\"\"gRPC servicer for WordCountService\"\"\"\n\n def CountWords(self, request: WordCountRequest, context):\n \"\"\"\n API for Word Count\n :param request: gRPC request of `WordCountRequest`\n :param context: gRPC context\n :return:\n * stream of `WordCount`. in a form of Generator\n :exception: cut stream, then `return` grpc error code and grpc error msg\n \"\"\"\n try:\n uri, words = request.uri, request.words\n model = HTMLDocumentModel(uri, get_mongo_db(), get_redis_cache())\n\n for word in words:\n yield WordCount(\n uri=uri, word=word, count=model.count_word(word)\n )\n return\n\n except exceptions.NotAllowedScheme:\n msg = f\"You can only access {config.ALLOWED_PROTOCOLS} protocol\"\n context.set_details(msg)\n context.set_code(grpc.StatusCode.PERMISSION_DENIED)\n return\n\n except exceptions.AccessLocalURI:\n msg = \"You cannot access Local URI\"\n context.set_details(msg)\n context.set_code(grpc.StatusCode.PERMISSION_DENIED)\n return\n\n except exceptions.TooBigResource:\n msg = (\n f\"You can only access less than\"\n f\"{config.MAX_CONTENT_SIZE} bytes document\"\n )\n context.set_details(msg)\n context.set_code(grpc.StatusCode.INVALID_ARGUMENT)\n return\n\n except exceptions.NotReacheableLocation:\n msg = \"We could not reach a server of requested URI\"\n context.set_details(msg)\n context.set_code(grpc.StatusCode.INTERNAL)\n\n except Exception as e:\n msg = \"Internal error occurred\"\n print(e)\n context.set_details(msg)\n context.set_code(grpc.StatusCode.INTERNAL)\n return\n\n\ndef serve_insecure(host_port: str):\n \"\"\"Open Insecure service of `WordCountServicer`\"\"\"\n server = grpc.server(\n futures.ThreadPoolExecutor(max_workers=config.MAX_GRPC_SERVER_THREADS)\n )\n wc_pb2_grpc.add_WordCountServiceServicer_to_server(\n WordCountServicer(), server\n )\n server.add_insecure_port(host_port)\n server.start()\n try:\n while True:\n time.sleep(_ONE_DAY_IN_SECONDS)\n except KeyboardInterrupt:\n server.stop(0)\n",
"id": "6601474",
"language": "Python",
"matching_score": 4.074703216552734,
"max_stars_count": 0,
"path": "simplewc/servicer.py"
},
{
"content": "\"\"\"Represent Data Model Layer\"\"\"\nimport socket\nfrom collections import Counter\nfrom ipaddress import IPv6Address, ip_address\nfrom string import punctuation\nfrom typing import Generator, Union\nfrom urllib.parse import urlparse\n\nimport requests\nfrom bs4 import BeautifulSoup\n\nfrom simplewc.config import ALLOWED_PROTOCOLS, MAX_CONTENT_SIZE\nfrom simplewc.exceptions import (\n AccessLocalURI,\n NotAllowedScheme,\n NotInDocumentStorage,\n NotInResultCacheQuery,\n NotReacheableLocation,\n TooBigResource,\n)\nfrom simplewc.storage import DocumentStorage, QueryCache\n\n\ndef raise_if_not_safe(uri: str):\n \"\"\"\n Check if given URI is pointing publicly available resource\n :param uri: URI to user requested resource\n :return: None\n :raises: `NotAllowedScheme` for unexpected protocol and `AccessLocalURI`\n for local resources\n \"\"\"\n up = urlparse(uri)\n if up.scheme not in ALLOWED_PROTOCOLS:\n raise NotAllowedScheme\n try:\n ip = ip_address(socket.gethostbyname(up.netloc))\n except socket.gaierror:\n raise NotReacheableLocation\n\n if ip.is_link_local:\n raise AccessLocalURI(\"Access to %s is to local resource\" % uri)\n if isinstance(ip, IPv6Address) and ip.is_site_local:\n raise AccessLocalURI(\"Access to %s is to local resource\" % uri)\n if not ip.is_global or ip.is_loopback or ip.is_reserved:\n raise AccessLocalURI(\"Access to %s is to non public resource\" % uri)\n\n\ndef tokenize_html_to_words(\n content: Union[str, bytes]\n) -> Generator[str, None, None]:\n \"\"\"\n Parse HTML content and split into word tokens.\n * Words are case insensitive. IOW, Always lower case.\n * Punctuations in the beginning and the end are stripped out\n - e.g., \"fit.\" is treated as \"fit\"\n * HTML tags are considered as white spaces between words\n - e.g., \"fit</p>\" is considered as \"fit\" and \"</p>\".\n * Tedious work including decoding/encoding is done by BeautifulSoup\n :param content: HTML document\n :return: Each token we find in html document\n \"\"\"\n soup = BeautifulSoup(content, \"html.parser\")\n for tok in soup.prettify().split():\n yield tok.strip(punctuation).lower()\n return\n\n\ndef retrieve_html(uri: str) -> bytes:\n \"\"\"\n Retrieve HTML document in given uri\n :param uri: URI to the HTML document\n :return: HTML document response's content\n \"\"\"\n\n # TODO(KMilhan): Implement retry\n rqg = requests.get(uri, stream=True)\n if int(rqg.headers[\"Content-length\"]) < MAX_CONTENT_SIZE:\n return rqg.content\n\n raise TooBigResource(\n \"%s is too big file to parse\" % rqg.headers[\"Content-length\"]\n )\n\n\nclass HTMLDocumentModel:\n \"\"\"Represents HTML Document and its behaviors\"\"\"\n\n def __init__(\n self, uri: str, doc_store: DocumentStorage, query_cache: QueryCache\n ):\n \"\"\"\n Create HTMLDocumentModel\n :param uri: Where the HTML Document is serviced\n :param doc_store: Document Storage for cache service\n :param query_cache: Query result cache\n \"\"\"\n raise_if_not_safe(uri)\n self.uri = uri\n self.doc_store = doc_store\n self.query_cache = query_cache\n self.get_html = retrieve_html # Define how we retrieve HTML document\n self._local_counter_cache: Counter = None # Local HTML document cache\n\n def count_word(self, word: str) -> int:\n \"\"\"\n Facade for counting word\n :param word: Count the given `word`'s appearance in this HTML document\n :return: An appearance of `word` in this HTML document\n \"\"\"\n word = word.lower()\n try:\n # Try to use cache first. And do not extend TTL\n return self.query_cache.get(self.uri, word)\n except NotInResultCacheQuery:\n # Try to use document storage. Update query cache\n self.query_cache.store(\n self.uri, word, self.local_counter_cache[word]\n )\n return self.local_counter_cache[word]\n\n @property\n def local_counter_cache(self) -> Counter:\n \"\"\"\n Returns counter(internal form of HTML document) cache\n * If we can find html doc in Document storage, load it\n * else, get it over the internet and store it in Document storage\n :return: Locally loaded cache of HTML document\n \"\"\"\n if self._local_counter_cache:\n # If we already cached it, early return\n return self._local_counter_cache\n try:\n # Try to use document storage\n self._local_counter_cache = self.doc_store.get(self.uri)\n except NotInDocumentStorage:\n # We failed to query document storage.\n\n # Then, actually access web\n self._local_counter_cache = Counter(\n tokenize_html_to_words(self.get_html(self.uri))\n )\n # Store the result\n self.doc_store.store(self.uri, self._local_counter_cache)\n\n return self._local_counter_cache\n",
"id": "11353869",
"language": "Python",
"matching_score": 4.347553253173828,
"max_stars_count": 0,
"path": "simplewc/model.py"
},
{
"content": "\"\"\"Exceptions for WordCounter\"\"\"\n\n\nclass AccessLocalURI(PermissionError):\n \"\"\"\n User requested Local URI\n * Local file (file:///etc/resolv.conf.d/resolv.conf)\n * Intranet URI (https://cluster.local/dashboard)\n \"\"\"\n\n\nclass NotAllowedScheme(AccessLocalURI):\n \"\"\"\n User requested not expected protocol.\n * Only `https` and `http` are allowed\n * The other protocols are considered \"potentially\" local\n \"\"\"\n\n\nclass NotReacheableLocation(IOError):\n \"\"\"No routing possible\"\"\"\n\n\nclass TooBigResource(IOError):\n \"\"\"User requested resource is too big to count a word\"\"\"\n\n\nclass NotInResultCacheQuery(KeyError):\n \"\"\"We don't have such result in cache\"\"\"\n\n\nclass NotInDocumentStorage(KeyError):\n \"\"\"We don't have such result in document storage\"\"\"\n\n\nclass CannotAccessToRedis(ConnectionError):\n \"\"\"Cannot connect to Redis\"\"\"\n\n\nclass CannotAccessToMongo(ConnectionError):\n \"\"\"Cannot connect to MongoDB\"\"\"\n",
"id": "8503632",
"language": "Python",
"matching_score": 3.2996630668640137,
"max_stars_count": 0,
"path": "simplewc/exceptions.py"
},
{
"content": "\"\"\"Data storage layer\"\"\"\nimport sys\nfrom abc import ABC\nfrom collections import Counter, defaultdict\nfrom datetime import datetime\n\nimport redis\nfrom pymongo import MongoClient\nfrom pymongo.errors import OperationFailure\n\nfrom simplewc.config import (\n CACHE_EXPIRE,\n MONGO_COLLECTION,\n MONGO_DB,\n MONGO_HOST,\n MONGO_PORT,\n MONGO_TTL,\n REDIS_DB,\n REDIS_HOST,\n REDIS_PORT,\n)\nfrom simplewc.exceptions import (\n CannotAccessToMongo,\n CannotAccessToRedis,\n NotInDocumentStorage,\n NotInResultCacheQuery,\n)\n\n\nclass DocumentStorage(ABC):\n \"\"\"Where we store HTML Document\"\"\"\n\n def __init__(self, host: str, auth=None):\n \"\"\"\n :param host: access information\n :param auth: authentication information\n \"\"\"\n self.host = host\n self.auth = auth\n\n def store(self, uri: str, counter: Counter):\n \"\"\"Store html document\"\"\"\n raise NotImplementedError\n\n def get(self, uri: str):\n \"\"\"Get stored html document\"\"\"\n raise NotImplementedError\n\n\nclass QueryCache(ABC):\n \"\"\"Where we store recent result\"\"\"\n\n def __init__(self, host: str, auth=None):\n \"\"\"\n :param host: access information\n :param auth: authentication information\n \"\"\"\n self.host = host\n self.auth = auth\n\n def store(self, uri: str, word: str, count: int):\n \"\"\"Store recent result\"\"\"\n raise NotImplementedError\n\n def get(self, uri: str, word: str) -> int:\n \"\"\"Get stored recent result\"\"\"\n raise NotImplementedError\n\n\nclass MockDocumentStorage(DocumentStorage):\n \"\"\"Pure in-memory mocking document storage for testing purpose\"\"\"\n\n def __init__(self, host: str):\n super(MockDocumentStorage, self).__init__(host)\n self.mock_db = dict()\n\n def store(self, uri: str, counter: Counter):\n self.mock_db[uri] = counter\n\n def get(self, uri: str):\n if uri in self.mock_db:\n return self.mock_db[uri]\n\n raise NotInDocumentStorage\n\n\nclass MockQueryCache(QueryCache):\n \"\"\"Pure in-memory mocking query storage for testing purpose\"\"\"\n\n def __init__(self, host: str):\n super(MockQueryCache, self).__init__(host)\n self.mock_cache = defaultdict(int)\n\n def get(self, uri: str, word: str) -> int:\n if (uri, word) in self.mock_cache:\n return self.mock_cache[(uri, word)]\n\n raise NotInResultCacheQuery\n\n def store(self, uri: str, word: str, count: int):\n self.mock_cache[(uri, word)] = count\n\n\nclass RedisQueryCache(QueryCache):\n \"\"\"Redis as a LRU query cache\"\"\"\n\n def __init__(self, host: str, port: int, db: int, **redis_opt):\n \"\"\"\n Instantiate RedisQueryCache\n :param host: Redis host\n :param port: Redis port\n :param db: Redis DB\n :param redis_opt: Redis additional options such as cert and password\n \"\"\"\n super(RedisQueryCache, self).__init__(host)\n self._pool = redis.ConnectionPool(host=host, port=port, db=db)\n self.redis = redis.Redis(connection_pool=self._pool, **redis_opt)\n self.expire = CACHE_EXPIRE\n\n try:\n self.redis.exists(\"wc_test_val\")\n except ConnectionError:\n raise CannotAccessToRedis\n\n def get(self, uri: str, word: str) -> int:\n \"\"\"\n Get ResultCache from Redis Server\n :param uri: Where HTML document originates\n :param word: A word to count\n :return: Occurrence of `word` in HTML doc at `uri`\n :raise: NotInResultCacheQuery in case we didn't hit cache\n \"\"\"\n cache = self.redis.hget(uri, word)\n if cache is not None:\n return int(cache)\n\n raise NotInResultCacheQuery\n\n def store(self, uri: str, word: str, count: int):\n \"\"\"\n Store cache in RedisQueryCache server. The first time it stores document(uri), it sets lifespan of cache.\n Thus, after certain time, all caches in a document must be refreshed together\n :param uri: Where HTML document originates\n :param word: A word to count\n :param count: Occurrence of `word` in HTML doc at `uri` to save\n :return:\n \"\"\"\n if self.redis.exists(uri):\n self.redis.hset(uri, word, str(count))\n return\n\n self.redis.hset(uri, word, str(count))\n self.redis.expire(uri, self.expire)\n\n\nclass MongoDocumentStorage(DocumentStorage):\n \"\"\"MongoDB as a document storage\"\"\"\n\n def __init__(\n self,\n host: str,\n port: int,\n mongo_db_name: str,\n mongo_collection: str,\n mongo_ttl: int,\n **mongo_opt,\n ):\n \"\"\"\n :param host: MongoDB host\n :param port: MongoDB port\n :param mongo_db_name: MongoDB database name\n :param mongo_collection: MongoDB collection name for HTML documents\n :param mongo_ttl: MongoDB document's life span\n :param mongo_opt: Additional option (auth for example) for MongoDB connection\n \"\"\"\n super(MongoDocumentStorage, self).__init__(host)\n self._mongo = MongoClient(host, port, **mongo_opt)\n try:\n self._mongo.server_info()\n self._mongo_db = self._mongo.get_database(mongo_db_name)\n self.collection = self._mongo_db.get_collection(mongo_collection)\n except ConnectionError:\n raise CannotAccessToMongo\n\n try:\n self.collection.create_index(\"added\", expireAfterSeconds=mongo_ttl)\n except OperationFailure:\n print(\n \"Warning: TTL value for MongoDB document set with different value\",\n file=sys.stderr,\n )\n\n @classmethod\n def to_mongo_key(cls, key: str) -> str:\n \"\"\"MongoDB does not support `$` and `.` in key. Converting it to unicode is MongoDB's official recommendation\"\"\"\n return key.replace(\"$\", \"$\").replace(\".\", \".\")\n\n @classmethod\n def to_plain_key(cls, key: str) -> str:\n \"\"\"Revert escaped MongoDB key string to original string\"\"\"\n return key.replace(\"$\", \"$\").replace(\".\", \".\")\n\n @classmethod\n def to_mongo_hash(cls, counter: Counter) -> dict:\n \"\"\"MongoDB does not support `$` and '.' in hash key\"\"\"\n return {cls.to_mongo_key(key): counter[key] for key in counter}\n\n @classmethod\n def to_counter(cls, mongo_hash: dict) -> Counter:\n \"\"\"Cast hash from MongoDB data and return it as Python `Counter` object\"\"\"\n c = Counter()\n for key in mongo_hash:\n c[cls.to_plain_key(key)] = mongo_hash[key]\n return c\n\n def store(self, uri: str, counter: Counter):\n \"\"\"\n Save (word-counted) HTML document into MongoDB\n :param uri: Where HTML document originates\n :param counter: HTML document in a form of Counter{Word:str, Occurrence:int}\n :return: None\n \"\"\"\n uri = self.to_mongo_key(uri)\n self.collection.insert_one(\n {\n \"added\": datetime.utcnow(),\n \"uri\": uri,\n \"counter\": self.to_mongo_hash(counter),\n }\n )\n\n def get(self, uri: str) -> Counter:\n \"\"\"\n Get (word-counted) HTML document from MongoDB\n :param uri: Where HTML document originates\n :return: HTML document in a form of Counter{Word:str, Occurrence:int}\n :raise: NotInDocumentStorage when we can't find it in MongoDB\n \"\"\"\n uri = self.to_mongo_key(uri)\n doc = self.collection.find_one({\"uri\": uri})\n if doc:\n return self.to_counter(doc[\"counter\"])\n\n raise NotInDocumentStorage\n\n\n_RQC = None\n_MDS = None\n\n\ndef get_redis_cache():\n \"\"\"Get singleton redis cache instance\"\"\"\n global _RQC\n if _RQC is not None:\n return _RQC\n _RQC = RedisQueryCache(REDIS_HOST, REDIS_PORT, REDIS_DB)\n return _RQC\n\n\ndef get_mongo_db():\n \"\"\"Get singleton mongodb document storage instance\"\"\"\n global _MDS\n if _MDS:\n return _MDS\n _MDS = MongoDocumentStorage(\n MONGO_HOST, MONGO_PORT, MONGO_DB, MONGO_COLLECTION, MONGO_TTL\n )\n return _MDS\n",
"id": "6794112",
"language": "Python",
"matching_score": 3.8055708408355713,
"max_stars_count": 0,
"path": "simplewc/storage.py"
},
{
"content": "\"\"\"Configuration for wordcounter\"\"\"\n\nALLOWED_PROTOCOLS = (\"http\", \"https\")\nMAX_CONTENT_SIZE = 2 ** (10 + 10 + 4) # 16.0 MiB\nMAX_GRPC_SERVER_THREADS = 16\nINSECURE_HOST = \"localhost\"\nINSECURE_PORT = 50001\n\nREDIS_HOST = \"localhost\"\nREDIS_PORT = 6379\nREDIS_DB = 0\nCACHE_EXPIRE = \"600\"\n\nMONGO_HOST = \"localhost\"\nMONGO_PORT = 27017\nMONGO_DB = \"wc_doc_cache\"\nMONGO_COLLECTION = \"wc_doc_collection\"\nMONGO_TTL = 3600\n",
"id": "10214427",
"language": "Python",
"matching_score": 0.7160211205482483,
"max_stars_count": 0,
"path": "simplewc/config.py"
},
{
"content": "\"\"\"Call from CLI\"\"\"\n\n# Start word count service\nfrom simplewc.servicer import serve_insecure\n\n# Test purpose server\nserve_insecure(\"[::]:50001\")\n",
"id": "3763592",
"language": "Python",
"matching_score": 0.3580569922924042,
"max_stars_count": 0,
"path": "simplewc/__main__.py"
},
{
"content": "from collections import Counter\nfrom pathlib import Path\n\nfrom simplewc.model import (\n HTMLDocumentModel,\n retrieve_html,\n tokenize_html_to_words,\n)\n\nhere = Path(__file__).absolute().parent\n\n\ndef test_tokenize_html_to_words():\n \"\"\"Test tokenizing\"\"\"\n with open(here / \"virtusize.html.bytes\", \"rb\") as f:\n \"\"\"testfile has 993 tokens\"\"\"\n assert len(list(tokenize_html_to_words(f.read()))) == 993\n\n assert list(tokenize_html_to_words(\"some str\")) == [\"some\", \"str\"]\n assert list(tokenize_html_to_words(b\"some bytes\")) == [\"some\", \"bytes\"]\n assert list(\n tokenize_html_to_words(b\"some \" b\"\\xed\\x95\\x9c\\xea\\xb5\\xad\\xec\\x96\\xb4\")\n ) == [\"some\", \"한국어\"]\n\n assert list(tokenize_html_to_words(\"fit.</p>\")) == [\"fit\"]\n assert list(tokenize_html_to_words(\"fit</p>\")) == [\"fit\"]\n assert list(tokenize_html_to_words(\"FIT!</p>\")) == [\"fit\"]\n\n\ndef test_get():\n \"\"\"Test web access\"\"\"\n assert retrieve_html(\"https://virtusize.jp\")\n\n\ndef test_creation(mock_doc_storage, mock_query_cache):\n assert (\n HTMLDocumentModel(\n \"https://virtusize.jp\", mock_doc_storage, mock_query_cache\n )\n is not None\n )\n\n\ndef test_mock_query_cache_creation(mock_doc_storage, mock_query_cache):\n model = HTMLDocumentModel(\n \"https://virtusize.jp\", mock_doc_storage, mock_query_cache\n )\n\n fit_query = model.count_word(\"fit\")\n assert fit_query is not None\n assert isinstance(fit_query, int)\n assert (\n model.query_cache.mock_cache[(\"https://virtusize.jp\", \"fit\")]\n == fit_query\n )\n\n\ndef test_mock_document_storage_creation(mock_doc_storage, mock_query_cache):\n model = HTMLDocumentModel(\n \"https://virtusize.jp\", mock_doc_storage, mock_query_cache\n )\n model.count_word(\"fit\")\n assert mock_doc_storage.mock_db[\"https://virtusize.jp\"]\n\n\ndef test_query(mock_doc_storage, mock_query_cache):\n with open(here / \"virtusize.html.bytes\", \"rb\") as f:\n # Ground truth counter\n gtc = Counter(tokenize_html_to_words(f.read()))\n\n model = HTMLDocumentModel(\n \"https://virtusize.jp\", mock_doc_storage, mock_query_cache\n )\n with open(here / \"virtusize.html.bytes\", \"rb\") as f:\n \"\"\"Monkey patch `get_html` method to read local file\"\"\"\n\n model.get_html = lambda x: bytes(f.read())\n assert model.count_word(\"fit\") == gtc[\"fit\"]\n assert (\n model.count_word(\"DOES_NOT_EXIST_STRING\")\n == gtc[\"DOES_NOT_EXIST_STRING\"]\n )\n",
"id": "986056",
"language": "Python",
"matching_score": 2.7546701431274414,
"max_stars_count": 0,
"path": "test/test_model.py"
},
{
"content": "import pytest\n\nfrom simplewc.exceptions import AccessLocalURI, NotAllowedScheme\nfrom simplewc.model import HTMLDocumentModel, raise_if_not_safe\nfrom simplewc.storage import MockDocumentStorage, MockQueryCache\n\nLINK_TO_VERY_BIG_RESOURCE = (\n \"http://ftp.riken.jp/Linux/ubuntu-releases/18.04\"\n \".2/ubuntu-18.04.2-desktop-amd64.iso\"\n)\n\n\ndef test_raise_if_local():\n with pytest.raises(NotAllowedScheme):\n raise_if_not_safe(\"ftp://google.com/index.html\")\n\n with pytest.raises(NotAllowedScheme):\n raise_if_not_safe(\"file:///etc/resolv.conf.d/resolv.conf\")\n\n with pytest.raises(AccessLocalURI):\n raise_if_not_safe(\"https://127.0.0.1/index.html\")\n\n mock_doc_storage = MockDocumentStorage(\"\")\n mock_query_cache = MockQueryCache(\"\")\n\n with pytest.raises(NotAllowedScheme):\n HTMLDocumentModel(\n \"ftp://google.com/index.html\", mock_doc_storage, mock_query_cache\n )\n\n with pytest.raises(NotAllowedScheme):\n HTMLDocumentModel(\n \"file:///etc/resolv.conf.d/resolv.conf\",\n mock_doc_storage,\n mock_query_cache,\n )\n\n with pytest.raises(AccessLocalURI):\n HTMLDocumentModel(\n \"https://127.0.0.1/index.html\", mock_doc_storage, mock_query_cache\n )\n",
"id": "4925781",
"language": "Python",
"matching_score": 4.054433822631836,
"max_stars_count": 0,
"path": "test/test_security.py"
},
{
"content": "import pytest\n\nfrom simplewc.storage import MockDocumentStorage, MockQueryCache\n\n\n@pytest.fixture(scope=\"function\")\ndef mock_doc_storage():\n yield MockDocumentStorage(\"\")\n\n\n@pytest.fixture(scope=\"function\")\ndef mock_query_cache():\n yield MockQueryCache(\"\")\n\n\ndef mds():\n \"\"\"Get mock document storage\"\"\"\n return MockDocumentStorage(\"\")\n\n\ndef mqc():\n \"\"\"Get mock query cache\"\"\"\n return MockQueryCache(\"\")\n",
"id": "570118",
"language": "Python",
"matching_score": 2.4074928760528564,
"max_stars_count": 0,
"path": "test/conftest.py"
}
] | 2.591825 |
funivan | [
{
"content": "# -*- coding: UTF-8 -*-\nfrom __future__ import with_statement\n\n__kupfer_name__ = _(\"Custom urls\")\n__kupfer_sources__ = (\"CustomUrlSource\", )\n__description__ = _(\"Navigate to custom urls from list\")\n__version__ = \"0.0.1\"\n__author__ = \"<NAME> <<EMAIL>>\"\n\nimport gtk\n\nimport os.path\nimport subprocess\nimport re\n\n\nfrom kupfer import config\nfrom kupfer import plugin_support\n\nfrom kupfer.obj import objects\nfrom kupfer.obj.base import Source\nfrom kupfer.obj.helplib import FilesystemWatchMixin\n\nfrom kupfer.objects import UrlLeaf, Leaf, Source\n\nCONFIG_FILENAME = 'custom_urls.cfg'\n\n'''\nAllow user to define own urls.\nExample of configuration file (default file located in ~/.config/kupfer/custom_urls.cfg'):\n\nblog = http://funivan.com\nadmin = http://localhost/admin\ntest = http://test\n\n\n'''\n\n__kupfer_settings__ = plugin_support.PluginSettings(\n {\n \"key\": \"change_file_location\",\n \"label\": _(\"Change file location\"),\n \"type\": bool,\n \"value\": False,\n },\n {\n \"key\" : \"path_to_file\",\n \"label\": _(\"Path to custom config files ( delimiter ; )\"),\n \"type\": str,\n \"value\": \"\",\n },\n)\n\n\n\n\nclass CustomUrlSource (Source, FilesystemWatchMixin):\n appleaf_content_id = \"custom-url\"\n\n def __init__(self):\n Source.__init__(self, name=_('CustomUrlSource'))\n self.loaded = False\n self.items = []\n __kupfer_settings__.connect(\"plugin-setting-changed\", self._refresh_settings);\n\n def _refresh_settings(self, settings, key, value):\n self.loaded = False\n\n def is_dynamic(self):\n return True;\n \n def get_items(self):\n \n self.output_debug(\"Items\")\n if self.loaded: \n self.output_debug('load from cache')\n return self.items\n\n filesToLoad=[]\n\n if __kupfer_settings__[\"change_file_location\"]: \n filesConfig = __kupfer_settings__[\"path_to_file\"]\n filesToLoad = filesConfig.split(';')\n else:\n filesToLoad.append(config.get_config_file(CONFIG_FILENAME))\n \n\n self.items = []\n\n for filePath in filesToLoad:\n if not os.path.isfile(filePath):\n self.output_debug('File does not exist', filePath)\n continue\n \n self.output_debug('loading sources', filePath) \n ins = open( filePath, \"r\" )\n \n for line in ins:\n matchObj = re.match( r'([^=]+)=(.*)$', line.strip(), re.M|re.I)\n if matchObj:\n self.items.append(UrlLeaf(matchObj.group(2).strip(), matchObj.group(1).strip()))\n\n ins.close() \n\n self.output_debug(self.items);\n self.output_debug('mark_for_update');\n self.loaded = True\n return self.items\n \n \n def get_description(self):\n return _(__description__)\n\n def get_gicon(self):\n return self.get_leaf_repr() and self.get_leaf_repr().get_gicon()\n \n def get_icon_name(self):\n return \"web-browser\"\n\n def provides(self):\n yield UrlLeaf\n",
"id": "3822439",
"language": "Python",
"matching_score": 0,
"max_stars_count": 1,
"path": "custom_urls.py"
}
] | 0 |
sfaleron | [
{
"content": "\n\"\"\"Functions that support adding types and building configuration in code.\nAlso see the main ConfigCont class.\"\"\"\n\nfrom __future__ import absolute_import\n\nfrom .types import *\n",
"id": "3448746",
"language": "Python",
"matching_score": 0.7775363326072693,
"max_stars_count": 0,
"path": "api.py"
},
{
"content": "\n\"\"\"All that is needed for common usage.\"\"\"\n\nfrom __future__ import absolute_import\n\n# set up the validators (side effects)\nfrom . import validators\n\nfrom .parser import *\n",
"id": "10729023",
"language": "Python",
"matching_score": 0.8317263126373291,
"max_stars_count": 0,
"path": "__init__.py"
},
{
"content": "\n# ==============================================================================\n# == Copyright 2016 <NAME> ==\n# == ==\n# == Licensed under the Apache License, Version 2.0 (the \"License\"); ==\n# == you may not use this file except in compliance with the License. ==\n# == You may obtain a copy of the License at ==\n# == ==\n# == http://www.apache.org/licenses/LICENSE-2.0 ==\n# == ==\n# == Unless required by applicable law or agreed to in writing, software ==\n# == distributed under the License is distributed on an \"AS IS\" BASIS, ==\n# == WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ==\n# == See the License for the specific language governing permissions and ==\n# == limitations under the License. ==\n# ==============================================================================\n\nfrom math import sin, cos, pi\n\nfrom elements import *\nfrom options import *\n\nCX, CY = CVS_WIDTH/2, CVS_HEIGHT/2\n\ndef do_start_tile(shape):\n if shape == 'q':\n r = TILE_SIDE * 2**.5 / 2\n n = 4\n da = pi/4\n else:\n r = TILE_SIDE / 3**.5\n n = 3\n da = pi/2\n\n return [Point(r*cos(a)+CX, r*sin(a)+CY) for a in [2*pi/n*i-da for i in range(n)]]\n\n# the principle used is the same for both shapes: perpendicular lines are extended some distance.\n# for the square, one from each end; for the triangle, one from the midpoint.\n\ndef extend_line(pt, m, dsq):\n sols = {}\n\n if m is None:\n sols[ 1] = Point(pt.x, pt.y + dsq**.5)\n sols[-1] = Point(pt.x, pt.y - dsq**.5)\n\n else:\n tmp = (dsq / (1+m**2))**.5\n\n x = pt.x + tmp\n\n y = m*(x - pt.x) + pt.y\n\n sols[ 1] = Point(x, y)\n\n x = pt.x - tmp\n\n y = m*(x - pt.x) + pt.y\n\n sols[-1] = Point(x, y)\n\n return sols\n\ndef make_tile(tile, edge, shape):\n p1, p2 = edge\n\n other = (set(tile.vertices) - set(edge)).pop()\n\n parent_side = edge.what_side(other)\n\n # note the slope computed is for the perpendicular\n\n denom = p1.y - p2.y\n\n m = None if abs(denom) < EPS else (p2.x-p1.x) / denom\n\n if shape == 'q':\n sols3 = extend_line(p2, m, TILE_SIDE**2)\n\n if edge.what_side(sols3[1]) != parent_side:\n p3 = sols3[ 1]\n else:\n p3 = sols3[-1]\n\n p4 = Point(p1.x+p3.x-p2.x, p1.y+p3.y-p2.y)\n\n return (p4, p3, p2, p1)\n\n else:\n sols3 = extend_line(centroid(p1, p2), m, .75*TILE_SIDE**2)\n\n p3 = sols3[1] if edge.what_side(sols3[1]) != parent_side else sols3[-1]\n\n return (p3, p2, p1)\n",
"id": "1317692",
"language": "Python",
"matching_score": 3.1089389324188232,
"max_stars_count": 0,
"path": "src/shapes.py"
},
{
"content": "\n# ==============================================================================\n# == Copyright 2016 <NAME> ==\n# == ==\n# == Licensed under the Apache License, Version 2.0 (the \"License\"); ==\n# == you may not use this file except in compliance with the License. ==\n# == You may obtain a copy of the License at ==\n# == ==\n# == http://www.apache.org/licenses/LICENSE-2.0 ==\n# == ==\n# == Unless required by applicable law or agreed to in writing, software ==\n# == distributed under the License is distributed on an \"AS IS\" BASIS, ==\n# == WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ==\n# == See the License for the specific language governing permissions and ==\n# == limitations under the License. ==\n# ==============================================================================\n\nCVS_WIDTH = 1000\nCVS_HEIGHT = 1000\n\nVIEW_WIDTH = 500\nVIEW_HEIGHT = 500\n\nTILE_SIDE = 50\n\nFILL = 'peru'\nEDGE = 'black'\n\nCURSOR = 'saddlebrown'\nHILIGHT_ERR = 'orangered'\nHILIGHT_USR = 'sandybrown'\nHI_N_ACTIVE = 'slategrey'\nSELECTED = 'rosybrown'\n\nCSR_SCL = .7\n\nEPS = 1e-6\nFIXEDPT = 20\n\nSCENE_DIR = 'scenes'\nSCENE_EXT = 'fp'\nSCENE_DESC = 'Foundation Plans'\n\nAPP_TITLE = 'Foundation Planner'\n",
"id": "7470353",
"language": "Python",
"matching_score": 1.4920165538787842,
"max_stars_count": 0,
"path": "src/options.py"
},
{
"content": "\n# ==============================================================================\n# == Copyright 2016 <NAME> ==\n# == ==\n# == Licensed under the Apache License, Version 2.0 (the \"License\"); ==\n# == you may not use this file except in compliance with the License. ==\n# == You may obtain a copy of the License at ==\n# == ==\n# == http://www.apache.org/licenses/LICENSE-2.0 ==\n# == ==\n# == Unless required by applicable law or agreed to in writing, software ==\n# == distributed under the License is distributed on an \"AS IS\" BASIS, ==\n# == WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ==\n# == See the License for the specific language governing permissions and ==\n# == limitations under the License. ==\n# ==============================================================================\n\n# getting these straight is nuttier than I thought. A point/click interface\n# might have been less pain to get going, after all.\ndef do_key(host, e):\n\n host.clear_highlights()\n\n ks = e.keysym\n\n if len(ks) == 1:\n ks = ks.lower()\n\n kc = e.keycode\n ksn = e.keysym_num\n\n shifty = bool(e.state&1)\n\n msgbits = ['key:']\n\n if shifty:\n msgbits.append('shift')\n\n msgbits.append(ks)\n host.log(*msgbits)\n\n state = host.state\n\n # new tile\n # hold shift to keep active tile\n # hold alt key to add debug info to messages\n # otherwise new tile becomes active\n if ks in ('q', 't'):\n newtile = host.new_tile(ks, bool(e.state&4))\n\n if newtile and not shifty:\n host.activate_tile(newtile)\n\n elif ks == 'l':\n host.load_scene()\n\n elif ks == 's':\n host.save_scene()\n\n # edges, ccw\n # hold shift to rotate scene fifteen degrees\n elif ks == 'Left':\n host.prev_edge()\n\n # edges, cw\n # hold shift to rotate scene fifteen degrees\n elif ks == 'Right':\n host.next_edge()\n\n # set active tile to tile across active edge\n # not detected on windows with numlock on\n elif ks.endswith('Insert'):\n\n tile = state['tile']\n\n edge = tile[state['edge']]\n\n newtile = edge.tile2 if edge.tile1 == tile else edge.tile1\n\n if newtile:\n host.activate_tile(newtile)\n\n # center view on active tile\n # hold shift to recenter canvas on the active tile\n elif ks == 'c':\n pass\n\n # exit program\n elif ks == 'Escape':\n f = host.exitfxn\n if callable(f):\n f()\n\n # (un)select active tile\n elif ks == 'space':\n state['tile'].select_toggle()\n\n # delete the tile across active edge,\n # shift deletes selected tiles\n elif ks == 'Delete':\n if shifty:\n host.do_delete_many()\n else:\n host.do_delete_single()\n\n # undo, hold shift to redo\n elif ks == 'BackSpace':\n pass\n\n # show help window\n elif ks in ('h', 'question', 'F1'):\n pass\n\n elif ks in ('plus', 'KP_Add'):\n state['tile'][state['edge']].highlight_toggle()\n\n # fill-in-the-blank debuging/test function, possibly ignored\n # center key on keypad, num lock needs to be off on windows\n elif ks in ('KP_Begin', 'Clear'):\n host.trythis(e.state)\n",
"id": "858980",
"language": "Python",
"matching_score": 2.757202386856079,
"max_stars_count": 0,
"path": "src/commands.py"
},
{
"content": "\n# ==============================================================================\n# == Copyright 2016 <NAME> ==\n# == ==\n# == Licensed under the Apache License, Version 2.0 (the \"License\"); ==\n# == you may not use this file except in compliance with the License. ==\n# == You may obtain a copy of the License at ==\n# == ==\n# == http://www.apache.org/licenses/LICENSE-2.0 ==\n# == ==\n# == Unless required by applicable law or agreed to in writing, software ==\n# == distributed under the License is distributed on an \"AS IS\" BASIS, ==\n# == WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ==\n# == See the License for the specific language governing permissions and ==\n# == limitations under the License. ==\n# ==============================================================================\n\nfrom collections import namedtuple\n\nfrom math import copysign\n\nsign = lambda x: int(copysign(1.0, x))\n\nfrom options import *\n\n_Point = namedtuple('Point', ( 'x', 'y'))\n_Edge = namedtuple( 'Edge', ('p1', 'p2'))\n\ndef scale_to_ints(pt):\n return (int(round(pt.x*2**FIXEDPT, 0)), int(round(pt.y*2**FIXEDPT, 0)))\n\ndef centroid(*pts):\n xs, ys = zip(*pts)\n return Point(sum(xs) / len(pts), sum(ys) / len(pts))\n\ndef scale_sort(p, q):\n\n px, py = scale_to_ints(p)\n qx, qy = scale_to_ints(q)\n\n return (\n ((qx, px) if qx < px else (px, qx)) +\n ((qy, py) if qy < py else (py, qy)) )\n\nclass Point(_Point):\n def __eq__(self, other):\n return scale_to_ints(self) == scale_to_ints(other)\n\n# edge may be a plain pair of Points\ndef edge_print(e):\n return '%.1f,%.1f - %.1f,%.1f' % sum(e, ())\n\nclass Edge(_Edge):\n def __new__(cls, cvs, p1, p2, host):\n self = super(Edge, cls).__new__(cls, p1, p2)\n\n self.active = False\n self.highlighted = False\n\n self.tile1 = None\n self.tile2 = None\n\n self.host = host\n self.cvs = cvs\n\n self.id_ = cvs.create_line(*self, width=3, fill=EDGE, state='hidden')\n\n return self\n\n @staticmethod\n def signature(p1, p2):\n return frozenset(map(scale_to_ints, (p1, p2)))\n\n def update(self):\n active, highlighted = self.active, self.highlighted\n\n if not (active or highlighted):\n self.cvs.itemconfigure(self.id_, state='hidden')\n return\n\n fill = (HI_N_ACTIVE if highlighted else EDGE) if active else HILIGHT_USR\n\n self.cvs.itemconfigure(self.id_, fill=fill, state='normal')\n self.cvs.tag_raise(self.id_)\n\n def activate(self):\n self.active = True\n self.update()\n\n def deactivate(self):\n self.active = False\n self.update()\n\n def highlight_toggle(self):\n self.highlighted = not self.highlighted\n self.update()\n\n def add_tile(self, tile):\n if not self.tile1:\n self.tile1 = tile\n else:\n if not self.tile2:\n self.tile2 = tile\n else:\n raise RuntimeError('Edge is full!')\n\n def remove_tile(self, tile):\n if self.tile1 == tile:\n self.tile1 = self.tile2\n self.tile2 = None\n else:\n if self.tile2 == tile:\n self.tile2 = None\n else:\n raise RuntimeError('Tile not found!')\n\n # only possible when both are None, and the edge is no longer used\n if self.tile1 == self.tile2:\n self.cvs.delete(self.id_)\n self.host.edge_cleanup(self)\n\n def what_side(self, pt):\n p1, p2 = self\n side = (pt.x*p1.y) + (pt.y*p2.x) + (p1.x*p2.y) - (p1.y*p2.x) - (pt.y*p1.x) - (pt.x*p2.y)\n\n if abs(side) < EPS:\n side = 0.0\n\n return sign(side)\n\n # assumes edges are not duplicated (parallel edges that are colinear do not intersect)\n # returns true if edges intersect\n # other edge may be a plain pair of Points\n def intersect_check(self, other, debuggery):\n\n host = self.host\n\n if debuggery:\n log = host.log\n log('DEBUGERRY BEGINS')\n log(edge_print(self))\n log(edge_print(other))\n\n p1, p2 = self\n q1, q2 = other\n\n denom = p2.x - p1.x\n\n mp = None if abs(denom) < EPS else (p2.y-p1.y) / denom\n\n denom = q2.x - q1.x\n\n mq = None if abs(denom) < EPS else (q2.y-q1.y) / denom\n\n # if the edges share an endpoint, that's an okay intersection\n if p1 == q1 or p1 == q2 or p2 == q1 or p2 == q2:\n if debuggery:\n log('early exit a')\n log('DEBUGERRY ENDS')\n return False\n\n # both edges are vertical\n if mp is None and mq is None:\n if debuggery:\n log('early exit b')\n log('DEBUGERRY ENDS')\n return False\n\n # neither side is vertical, but they are parallel\n if not mp is None and not mq is None:\n if abs(mp - mq) < EPS:\n if debuggery:\n log('early exit c')\n log('DEBUGERRY ENDS')\n return False\n\n # the lines defined by each edge do intersect, but perhaps not on the edges\n # only one coordinate is necessary\n\n x = ( ( (p2.y*p1.x - p1.y*p2.x)*(q2.x - q1.x) + (q1.y*q2.x - q2.y*q1.x)*(p2.x - p1.x) ) /\n ( (p2.y - p1.y)*(q2.x - q1.x) + (q1.y - q2.y)*(p2.x - p1.x) ) )\n\n y = ( ( (p2.y*p1.x - p1.y*p2.x)*(q1.y - q2.y) + (q1.y*q2.x - q2.y*q1.x)*(p1.y - p2.y) ) /\n ( (p1.x - p2.x)*(q1.y - q2.y) + (q2.x - q1.x)*(p1.y - p2.y) ) )\n\n px = Point(x, y)\n if debuggery:\n log('%.1f,%.1f' % px)\n\n p1x, p2x, p1y, p2y = scale_sort(p1, p2)\n q1x, q2x, q1y, q2y = scale_sort(q1, q2)\n\n x, y = scale_to_ints(px)\n\n if debuggery:\n log(p1x, p1y, p2x, p2y)\n log(q1x, q1y, q2x, q2y)\n log(x, y)\n\n if p1x <= x <= p2x and q1x <= x <= q2x and p1y <= y <= p2y and q1y <= y <= q2y:\n host.highlight_edges(self, other, px)\n if debuggery:\n log('noncompliant intersection detected')\n log('DEBUGERRY ENDS')\n return True\n\n if debuggery:\n log('deemed okay')\n log('DEBUGERRY ENDS')\n\n return False\n\nclass Tile(tuple):\n def __new__(cls, cvs, vertices, edges, host):\n self = super(Tile, cls).__new__(cls, edges)\n\n self.cvs = cvs\n self.host = host\n self.group = None\n self.selected = False\n self.vertices = vertices\n\n for e in self:\n e.add_tile(self)\n\n self.id_ = id1 = cvs.create_polygon(*vertices, **dict(fill=FILL, outline=EDGE, activestipple='gray12'))\n\n # scaled copy, to represent the cursor\n cx, cy = centroid(*vertices)\n\n csrvertices = [Point(CSR_SCL*(x-cx)+cx, CSR_SCL*(y-cy)+cy) for x,y in vertices]\n\n self.csrid = id2 = cvs.create_polygon(*csrvertices, **dict(fill=CURSOR, state='hidden', activestipple='gray12'))\n\n cvs.tag_bind(id1, '<ButtonPress-1>', self.onActivate)\n cvs.tag_bind(id2, '<ButtonPress-1>', self.onActivate)\n\n cvs.tag_bind(id1, '<ButtonPress-3>', self.select_toggle)\n cvs.tag_bind(id2, '<ButtonPress-3>', self.select_toggle)\n\n return self\n\n def select_toggle(self, e=None):\n self.selected = not self.selected\n self.cvs.itemconfigure(self.id_, fill=SELECTED if self.selected else FILL)\n\n def onActivate(self, e):\n state = self.host.state\n\n state['tile'].deactivate()\n state['tile'][state['edge']].deactivate()\n\n state['tile'] = self\n state['edge'] = 0\n\n self[0].activate()\n\n self.activate()\n\n def activate(self):\n self.cvs.itemconfigure(self.csrid, state='normal')\n\n def deactivate(self):\n self.cvs.itemconfigure(self.csrid, state='hidden')\n\n def delete(self):\n self.cvs.delete(self.csrid)\n self.cvs.delete(self.id_)\n\n for e in self:\n e.remove_tile(self)\n\n__all__ = ('Point', 'Edge', 'Tile', 'centroid', 'edge_print')\n",
"id": "5123522",
"language": "Python",
"matching_score": 2.9027233123779297,
"max_stars_count": 0,
"path": "src/elements.py"
},
{
"content": "\n# ==============================================================================\n# == Copyright 2016 <NAME> ==\n# == ==\n# == Licensed under the Apache License, Version 2.0 (the \"License\"); ==\n# == you may not use this file except in compliance with the License. ==\n# == You may obtain a copy of the License at ==\n# == ==\n# == http://www.apache.org/licenses/LICENSE-2.0 ==\n# == ==\n# == Unless required by applicable law or agreed to in writing, software ==\n# == distributed under the License is distributed on an \"AS IS\" BASIS, ==\n# == WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ==\n# == See the License for the specific language governing permissions and ==\n# == limitations under the License. ==\n# ==============================================================================\n\nimport Tkinter as tk\nimport Pmw\n\nfrom commands import do_key\nfrom elements import *\nfrom options import *\nfrom shapes import *\nfrom ayeoh import *\n\nimport tkFileDialog\n\nfrom time import strftime, localtime, time\n\nimport inspect, sys, ctypes\n\nimport os.path as osp\n\nfileDlgOpts = dict(initialdir=SCENE_DIR, defaultextension='.'+SCENE_EXT, filetypes=((SCENE_DESC, '*.'+SCENE_EXT),('All Files', '*')))\n\ndef make_rawedges(vertices):\n return [(vertices[i], vertices[i+1]) for i in range(len(vertices)-1)] + [(vertices[-1], vertices[0])]\n\nclass StatusItem(tk.Frame):\n def __init__(self, parent, label, **kwargs):\n tk.Frame.__init__(self, parent, **kwargs)\n lbl = tk.Label(self, text=label, font='sans 10')\n lbl.grid(row=0, column=0)\n self.field = tk.Label(self, text='--', font='sans 10')\n self.field.grid(row=0, column=1)\n\n def update(self, val):\n self.field['text'] = str(val)\n\nclass PolyTiles(tk.Frame):\n def buildGUI(self, parent):\n self.scrfr = \\\n scrfr = Pmw.ScrolledCanvas(parent, usehullsize=True,\n hull_width = VIEW_WIDTH, hull_height = VIEW_HEIGHT,\n canvas_width = CVS_WIDTH, canvas_height = CVS_HEIGHT )\n\n self.cvs = \\\n cvs = scrfr.component('canvas')\n\n cvs.bind('<KeyRelease>', lambda e: do_key(self, e))\n cvs.focus_set()\n\n scrfr.grid(row=0, column=0, sticky='nsew')\n\n self.status = \\\n status = {}\n\n fr = tk.Frame(parent)\n\n status[ 'q'] = w = StatusItem(fr, 'Squares:')\n w.grid(row=0, column=0, sticky='w')\n\n status[ 't'] = w = StatusItem(fr, 'Triangles:', padx=6)\n w.grid(row=0, column=1, sticky='w')\n\n status['ee'] = w = StatusItem(fr, 'Exposed Edges:', padx=6)\n w.grid(row=0, column=2, sticky='w')\n\n status[ 'g'] = w = StatusItem(fr, 'Group:')\n w.grid(row=0, column=3, sticky='e')\n\n fr.grid(row=1, column=0, sticky='ew')\n\n fr.columnconfigure(2, weight=1)\n\n def trythis(self, modifiers=0):\n pass\n\n def save_scene(self, fn=None):\n if fn is None:\n fn = tkFileDialog.asksaveasfilename(**fileDlgOpts)\n self.present()\n\n if not fn:\n self.log('save aborted')\n return\n\n self.log('save: ' + fn)\n\n tiles = self.state['tiles']\n\n with open(fn, 'wb') as f:\n writefile(f, zip(['t']*len(tiles), tiles))\n\n def itemproc(self, id_, *args, **kwargs):\n if id_ == 't':\n vertices = args[0]\n rawedges = make_rawedges(vertices)\n self.add_tile(vertices, rawedges)\n\n def load_scene(self, fn=None):\n if fn is None:\n fn = tkFileDialog.askopenfilename(**fileDlgOpts)\n self.present()\n\n if not fn:\n self.log('load aborted')\n return\n\n self.log('load: ' + fn)\n state = self.state\n\n for t in state['tiles']:\n t.delete()\n\n state.update(tiles=set(), edges={}, q=0, t=0)\n\n with open(fn, 'rb') as f:\n readfile(f, self.itemproc)\n\n tile = state['tiles'].copy().pop()\n state['tile'] = tile\n state['edge'] = 0\n\n tile[0].activate()\n\n tile.activate()\n\n def new_tile(self, shape, debuggery):\n state = self.state\n\n tile = state['tile']\n\n edge = tile[state['edge']]\n\n if edge.tile2:\n self.log('edge full!')\n return None\n\n vertices = make_tile(tile, edge, shape)\n rawedges = make_rawedges(vertices)\n\n for e1 in rawedges:\n if not Edge.signature(*e1) in state['edges']:\n for e2 in state['edges'].itervalues():\n if e2.intersect_check(e1, debuggery):\n self.log('overlap!')\n return None\n\n newtile = self.add_tile(vertices, rawedges)\n\n self.cvs.tag_raise(edge.id_)\n\n return newtile\n\n def add_tile(self, vertices, rawedges):\n st = self.state\n\n tile = Tile(self.cvs, vertices, self.make_edges(rawedges), self)\n st['tiles'].add(tile)\n\n shape = 'q' if len(vertices) == 4 else 't'\n\n st[shape] += 1\n self.status[shape].update(st[shape])\n\n self.scrfr.resizescrollregion()\n\n return tile\n\n def activate_tile(self, newtile):\n state = self.state\n\n tile = state['tile']\n\n edge = tile[state['edge']]\n\n state['tile'] = newtile\n\n tile.deactivate()\n newtile.activate()\n\n for i in range(len(newtile)):\n if newtile[i] == edge:\n state['edge'] = i\n\n def add_edge(self, p1, p2):\n\n edges = self.state['edges']\n sign = Edge.signature(p1, p2)\n\n # order of points may be reversed\n if sign in edges:\n e = edges[sign]\n\n else:\n e = Edge(self.cvs, p1, p2, self)\n edges[sign] = e\n\n return e\n\n def edge_cleanup(self, e):\n\n edges = self.state['edges']\n sign = Edge.signature(*e)\n\n edges.pop(sign)\n\n def prev_edge(self):\n state = self.state\n\n edges = state['tile']\n i = state['edge']\n\n edges[i].deactivate()\n\n state['edge'] = \\\n i = (i-1) % len(edges)\n\n edges[i].activate()\n\n def next_edge(self):\n state = self.state\n\n edges = state['tile']\n i = state['edge']\n\n edges[i].deactivate()\n\n state['edge'] = \\\n i = (i+1) % len(edges)\n\n edges[i].activate()\n\n def make_edges(self, rawedges):\n return [self.add_edge(*i) for i in rawedges]\n\n def do_delete_single(self):\n state = self.state\n tile = state['tile']\n edge = tile[state['edge']]\n\n rmtile = edge.tile2 if edge.tile1 == tile else edge.tile1\n\n if rmtile:\n if len(tile) == 4:\n shape = 'q'\n else:\n shape = 't'\n\n state[shape] -= 1\n\n self.status[shape].update(state[shape])\n\n state['tiles'].remove(rmtile)\n rmtile.delete()\n\n else:\n self.log('no tile to delete')\n\n def do_delete_many(self):\n state = self.state\n\n alltiles = state['tiles']\n rmtiles = [t for t in alltiles if t.selected]\n\n if len(alltiles) == len(rmtiles):\n self.log('at least one tile must remain!')\n return\n\n oh_noes = False\n\n for tile in rmtiles:\n if tile == state['tile']:\n self.log('oh noes!')\n oh_noes = True\n tile[state['edge']].deactivate()\n\n if len(tile) == 4:\n shape = 'q'\n else:\n shape = 't'\n\n state[shape] -= 1\n\n alltiles.remove(tile)\n tile.delete()\n\n if oh_noes:\n tile = alltiles.copy().pop()\n state['tile'] = tile\n state['edge'] = 0\n\n tile[0].activate()\n\n tile.activate()\n\n self.status['q'].update(state['q'])\n self.status['t'].update(state['t'])\n\n # e1, e2 may be a plain pair of Points\n def highlight_edges(self, e1, e2, pt):\n\n cvs = self.cvs\n hilites = self.highlights\n\n if hilites[0]:\n cvs.delete(hilites[0])\n cvs.delete(hilites[1])\n cvs.delets(hilites[2])\n\n hilites[0] = cvs.create_line(*e1, width=3, fill=HILIGHT_ERR)\n hilites[1] = cvs.create_line(*e2, width=3, fill=HILIGHT_ERR)\n hilites[2] = cvs.create_oval(pt.x-4, pt.y-4, pt.x+4, pt.y+4, width=0, fill=HILIGHT_ERR)\n\n def clear_highlights(self):\n hilites = self.highlights\n\n if hilites[0] is None:\n return\n\n self.cvs.delete(hilites[0])\n self.cvs.delete(hilites[1])\n self.cvs.delete(hilites[2])\n\n hilites[0] = hilites[1] = hilites[2] = None\n\n def log(self, *msgitems):\n msg = ' '.join(map(str, msgitems))\n\n s = time()-self.t0\n h, s = divmod(s, 3600)\n m, s = divmod(s, 60)\n\n # as per https://docs.python.org/2/library/inspect.html#the-interpreter-stack\n caller = inspect.stack()[1][0]\n try:\n info = inspect.getframeinfo(caller)\n finally:\n del caller\n\n logentry = '%d:%02d:%02.1f [%s/%s:%d] %s' % ( h, m, s,\n osp.basename(info.filename), info.function, info.lineno, msg )\n\n print logentry\n\n # set the input focus to the window containing the instance\n # only implemented on windows so far\n def present(self):\n if sys.platform == 'win32':\n ctypes.windll.user32.SetFocus(self.native_toplevel)\n\n\n # shape0 is \"q\", \"t\" or \"l\" to load from a file\n def __init__(self, parent, shape0, exitfxn=None):\n tk.Frame.__init__(self)\n\n self.t0 = time()\n self.log('PolygonTiles starting up..')\n with open(osp.join('lib', 'release'), 'rb') as f:\n self.log('Release: ' + f.read().rstrip())\n self.log('The timestamp is: ' + strftime('%Y.%m.%d.%H.%M', localtime(self.t0)))\n self.log('Platform reported as: ' + sys.platform)\n self.log('initial tile: ' + shape0)\n\n self.native_toplevel = self.winfo_toplevel().winfo_id()\n\n self.buildGUI(parent)\n\n self.exitfxn = exitfxn\n\n self.rowconfigure(0, weight=1)\n self.columnconfigure(0, weight=1)\n\n self.highlights = [None, None, None]\n\n self.state = {\n 'tiles' : set(), 'edges' : {}, 'q': 0,\n 'tile' : None, 'edge' : 0, 't': 0,\n }\n\n if shape0 == 'l':\n self.load_scene()\n\n else:\n vertices = do_start_tile(shape0)\n rawedges = make_rawedges(vertices)\n\n self.state['tile'] = tile0 = self.add_tile(vertices, rawedges)\n\n tile0[0].activate()\n\n tile0.activate()\n",
"id": "11895308",
"language": "Python",
"matching_score": 4.096843719482422,
"max_stars_count": 0,
"path": "src/polytiles.py"
},
{
"content": "# ==============================================================================\n# == Copyright 2016 <NAME> ==\n# == ==\n# == Licensed under the Apache License, Version 2.0 (the \"License\"); ==\n# == you may not use this file except in compliance with the License. ==\n# == You may obtain a copy of the License at ==\n# == ==\n# == http://www.apache.org/licenses/LICENSE-2.0 ==\n# == ==\n# == Unless required by applicable law or agreed to in writing, software ==\n# == distributed under the License is distributed on an \"AS IS\" BASIS, ==\n# == WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ==\n# == See the License for the specific language governing permissions and ==\n# == limitations under the License. ==\n# ==============================================================================\n\nimport os\nimport os.path as osp\n\nfrom src import PolyTiles, SCENE_DIR, APP_TITLE\n\nd = osp.dirname(__file__)\nif d:\n os.chdir(d)\n\nif not osp.exists(SCENE_DIR):\n os.mkdir(SCENE_DIR)\n\n\nimport Tkinter as tk\n\nimport Pmw\n\nchoice = set()\n\ndef set_choice(e):\n ch = e.keysym\n top = e.widget\n\n choice.add(ch)\n top.quit()\n\ndef buildGUI(top):\n\n top['borderwidth'] = 8\n\n lbl = tk.Label(top, text='Choose starting tile:', font='Sans 14 underline')\n lbl.grid(row=0, column=0, columnspan=2, sticky='ew')\n\n lbl = tk.Label(top, text='Q: square', font='Sans 12')\n lbl.grid(row=1, column=0, sticky='ew')\n\n lbl = tk.Label(top, text='T: triangle', font='Sans 12')\n lbl.grid(row=1, column=1, sticky='ew')\n\n lbl = tk.Label(top, text='L: load scene', font='Sans 12')\n lbl.grid(row=2, column=0, columnspan=2, sticky='ew')\n\n top.resizable(False, False)\n\n top.bind('<Escape>', lambda e: top.quit())\n top.bind('q', set_choice)\n top.bind('t', set_choice)\n top.bind('l', set_choice)\n\ntop = tk.Tk()\ntop.title('Starting Tile')\nbuildGUI(top)\ntop.mainloop()\ntop.destroy()\n\nif choice:\n top = Pmw.initialise(tk.Tk())\n top.title(APP_TITLE)\n app = PolyTiles(top, choice.pop(), top.quit)\n app.grid(row=0, column=0, sticky='nsew')\n\n top.rowconfigure(0, weight=1)\n top.columnconfigure(0, weight=1)\n\n # if the user selected load, but aborted, then just quit\n if app.state['tiles']:\n app.present()\n top.mainloop()\n",
"id": "3041492",
"language": "Python",
"matching_score": 1.7282347679138184,
"max_stars_count": 0,
"path": "polytiles.pyw"
},
{
"content": "\nimport os.path as osp\nimport subprocess\nimport zipfile\nimport os\n\nif os.name != 'nt':\n print \"Bundling probably isn't even necessary/useful on this system, but\"\n print \"we'll try it your way. It's your fault if it blows up, though!\"\n print 'Resuming after a short delay..'\n import time\n time.sleep(2)\n\nos.chdir(osp.join(osp.dirname(__file__), '..'))\n\nif not osp.exists('bundles'):\n os.mkdir('bundles')\n\nfn = osp.join(osp.dirname(__file__), '..', 'lib', 'release')\n\nwith open(fn, 'rb') as f:\n n = int(f.read().rstrip().split()[0])\n\nbase = osp.join('bundles', 'PolygonTiles_r' + str(n))\nlogfn = base + '.log'\nzipfn = base + '.zip'\n\nif osp.exists(zipfn):\n print 'Bundle %s exists. If replacement is desired, you must remove it.' % (zipfn,)\n raise SystemExit(1)\n\nprint 'calling pyinstaller. output is saved to: ' + logfn\n\nwith open(logfn, 'w') as f:\n subprocess.check_call(['pyinstaller', '--noconfirm', 'bundle.spec'], stderr=f)\n\nprint 'pyinstaller complete'\n\nprint 'tidying up..'\n\nsrc = osp.join('dist', 'polytiles')\n\nprint 'hiding the mess..'\n\nokfiles = set(['LICENSE', 'README.rst'])\n\nos.mkdir(osp.join(src, 'messy'))\n\nfor file_ in os.listdir(src):\n path = osp.join(src, file_)\n if osp.isdir(path):\n continue\n if file_ in okfiles:\n continue\n\n os.rename(path, osp.join(src, 'messy', file_))\n\nos.rename(osp.join(src, 'tcl'), osp.join(src, 'messy', 'tcl'))\nos.rename(osp.join(src, 'tk'), osp.join(src, 'messy', 'tk'))\n\nprint 'mess hidden'\n\nprint 'cleanup complete'\n\n# no messages, as it's safe and fast\nwith open(osp.join(src, 'polytiles.bat'), 'w') as f:\n f.write('@echo off\\n')\n f.write('cd %~dp0\\n')\n f.write('messy\\\\pgt_exec.exe\\n')\n\nprint 'creating zipfile..'\n\nzipfd = zipfile.ZipFile(zipfn, 'w')\n\nfor path, dirs, files in os.walk(src):\n for fn in files:\n pn = osp.join(path, fn)\n zipfd.write(pn, osp.join('PolygonTiles_r'+str(n), osp.relpath(pn, 'dist/polytiles/')))\n\nzipfd.close()\n\nprint 'zipfile complete'\n\nprint \"that's all! enjoy!\"\n",
"id": "7690341",
"language": "Python",
"matching_score": 1.727198839187622,
"max_stars_count": 0,
"path": "devbin/bundle.py"
},
{
"content": "\n# to restore the previous release specification, simply checkout lib/release\n# from the git repository.\n\n# the release specification is logged when the program launches, so these\n# details can help with troubleshooting. If you make significant changes\n# to the code and release the result, others will be able to track down any\n# problems much more effectively by referencing this.\nTAG = 'by sfaleron'\n\nimport os.path as osp\n\nfrom time import strftime, localtime, time\n\nfn = osp.join(osp.dirname(__file__), '..', 'lib', 'release')\n\nwith open(fn, 'rb') as f:\n n = int(f.read().rstrip().split()[0]) + 1\n\nspec = '%d %s %s' % (n, strftime('%Y%m%d', localtime(time())), TAG)\n\nprint 'new release specification:'\nprint spec\n\nwith open(fn, 'wb') as f:\n f.write(spec+'\\n')\n",
"id": "6982655",
"language": "Python",
"matching_score": 0,
"max_stars_count": 0,
"path": "devbin/inc_release.py"
},
{
"content": "\n\"\"\"Direct access is not intended; instead use the api module.\"\"\"\n\nfrom __future__ import absolute_import\n\nfrom .errors import *\n\nfrom collections import OrderedDict\n\ntry:\n basestring\nexcept NameError:\n basestring = str\n\n\n# \"l\" is implicit\ncontainers = {}\n\ntypes = OrderedDict()\n\ndef add_container(key, constructor, additem=None):\n \"\"\"Add a container type. The container is considered immutable if additem is omitted or None.\n Raises ConfigTypeError if the type is invalid or additem isn't a method.\"\"\"\n if '_' in key:\n raise ConfigTypeError('container name may not contain underscores')\n\n if key in ('l',):\n raise ConfigTypeError('may not override a reserved container')\n\n if additem:\n cont = constructor()\n if not hasattr(cont, additem) or not callable(getattr(cont, additem)):\n raise ConfigTypeError('\"%s\" not a method of the container' % (additem,))\n\n containers[key] = (constructor, additem)\n\ndef has_container(s):\n \"\"\"True if passed a recognized container \"\"\"\n return s == 'l' or s in containers\n\ndef add_type(s, validator):\n \"\"\"Add a type. May override most existing types.\n Raises ConfigValueError if type is reserved or invalid,\n and ConfigTypeError if it's not a string.\n The validator is not, umm, validated.\"\"\"\n if not isinstance(s, basestring):\n raise ConfigTypeError('type must be a string')\n\n if '_' in s:\n raise ConfigValueError('type name may not contain underscores')\n\n if s in ():\n raise ConfigTypeError('may not override a reserved type')\n\n types[s] = validator\n\ndef get_type(s):\n \"\"\"For chaining/inheritence.\n Raises ConfigKeyError if the type is not recognized.\"\"\"\n if not s in types:\n raise ConfigKeyError('unrecognized item type \"%s\"' % (s,))\n\n return types[s]\n\nadd_container('t', tuple)\nadd_container('f', frozenset )\nadd_container('s', set, 'add')\n\n\nclass TypeHelper(object):\n def __init__(self, s):\n if s is None:\n cont_type = None\n item_type = None\n else:\n if not isinstance(s, basestring):\n raise ConfigTypeError('type must be a string')\n\n if '_' in s:\n cont_type, item_type = s.split('_')\n else:\n cont_type = None\n item_type = s\n\n if cont_type and not cont_type in containers and cont_type != 'l':\n raise ConfigTypeError('unrecognized container type \"%s\"' % (cont_type,))\n\n if not item_type in types and item_type not in ('str', None):\n raise ConfigTypeError('unrecognized item type \"%s\"' % (item_type,))\n\n self.cont_type = cont_type\n self.item_type = item_type\n\n if cont_type and cont_type != 'l':\n self.constructor, self.additem = containers[cont_type]\n\n def is_container(self):\n return bool(self.cont_type)\n\n # always called on finalized containers\n def add_to_container(self, cc, key, value):\n if self.cont_type != 'l':\n if self.additem is None:\n raise ConfigTypeError('container \"%s\" is immutable' % (key,))\n else:\n getattr(cc[key], self.additem)(value)\n else:\n cc[key].append(value)\n\n def finalize_container(self, cc, key):\n if self.cont_type != 'l':\n OrderedDict.__setitem__(cc, key, self.constructor(cc.is_pending[key]))\n else:\n OrderedDict.__setitem__(cc, key, cc.is_pending[key])\n\n del cc.is_pending[key]\n\ndef get_helper(s):\n return TypeHelper(s)\n\ndef is_container(s):\n \"\"\"True if type is a container.\n ConfigTypeError is raised if the type is not recognized.\"\"\"\n if s == '_child':\n return False\n\n return get_helper(s).is_container()\n\ndef has_type(s):\n \"\"\"Checks that the container (if present) and item components are both recognized.\n Raises ConfigTypeError if argument isn't a string.\"\"\"\n if not isinstance(s, basestring):\n raise ConfigTypeError('type must be a string')\n\n try:\n get_helper(s)\n\n except ConfigTypeError:\n return False\n\n else:\n return True\n\ndef validate(value, whatis):\n if whatis is None:\n return value\n else:\n return types[whatis](value)\n\n\n__all__ = ('has_type', 'add_type', 'get_type', 'is_container', 'has_container', 'add_container')\n",
"id": "7151760",
"language": "Python",
"matching_score": 4.249503135681152,
"max_stars_count": 0,
"path": "types.py"
},
{
"content": "\nfrom __future__ import absolute_import\n\nfrom .types import *\n# not included with a wildcard import\nfrom .types import validate, get_helper\n\nfrom .errors import *\n\nfrom collections import OrderedDict\n\n\nclass ConfigCont(OrderedDict):\n def __init__(self, parent=None):\n self.whatis = {}\n self.is_pending = {}\n self.set_parent(parent)\n\n OrderedDict.__init__(self)\n\n def add_child(self, key, cc):\n \"\"\"Adds a subsection to the configuration.\n Raises ConfigTypeError if object is not a ConfigCont,\n and ConfigKeyError if the key is in use.\"\"\"\n if not isinstance(cc, ConfigCont):\n raise ConfigTypeError('not a configuration container')\n\n if key in self.whatis:\n raise ConfigKeyError('cannot overwrite existing element ' + key)\n\n self.whatis[key] = '_child'\n OrderedDict.__setitem__(self, key, cc)\n\n def has_key(self, key):\n \"\"\"True if the element is declared, whether or not it's been defined\"\"\"\n return key in self.whatis\n\n # element must be declared\n def _add_to_or_set_element(self, key, valin, cont_expected):\n if self.has_key(key):\n helper = get_helper(self.whatis[key])\n\n valout = validate(valin, helper.item_type)\n\n if helper.is_container():\n if not cont_expected:\n raise ConfigKeyError('cannot overwrite existing container')\n\n if key in self:\n helper.add_to_container(self, key, valout)\n else:\n self.is_pending[key].append(valout)\n else:\n if cont_expected:\n raise ConfigKeyError('configuration element is not a container')\n\n OrderedDict.__setitem__(self, key, valout)\n\n else:\n raise ConfigKeyError('configuration element is not declared')\n\n def __setitem__(self, key, value):\n \"\"\"Item must be be declared. ConfigKeyError is raised if it isn't, it's a container,\n or it's a configuration container. ConfigValueError is raised if validation fails.\"\"\"\n if not self.has_key(key):\n raise ConfigKeyError('configuration element is not declared')\n\n if self.whatis[key] == '_child':\n raise ConfigKeyError('cannot overwrite existing configuation container ' + key)\n\n self._add_to_or_set_element(key, value, False)\n\n def add_to_container(self, key, value):\n \"\"\"Container must be be declared. ConfigKeyError is raised if it isn't, or it isn't a container.\n ConfigValueError is raised if validation fails.\"\"\"\n\n self._add_to_or_set_element(key, value, True)\n\n def add_container(self, key, whatis):\n \"\"\"Declare and define a new container. Raises ConfigKeyError if key is in use,\n ConfigTypeError if it isn't a container type.\"\"\"\n\n if key in self.whatis:\n raise ConfigKeyError('cannot overwrite existing container or item ' + key)\n\n if not is_container(whatis):\n raise ConfigTypeError('type %s is not a container' % (whatis,))\n\n # start out as a list, convert to final type when its items are defined\n self.is_pending[key] = []\n self.whatis[key] = whatis\n\n def finalize_container(self, key):\n \"\"\"Finalize a container; containers become their intended type. ConfigKeyError is\n raised if the key is not found among the pending containers.\"\"\"\n if not key in self.is_pending:\n raise ConfigKeyError('pending container %s not found' % (key,))\n\n get_helper(self.whatis[key]).finalize_container(self, key)\n\n def add_item(self, key, whatis):\n \"\"\"Declares a new item. Raises ConfigKeyError if key is in use,\n ConfigTypeError if it isn't an item type.\"\"\"\n\n if key in self.whatis:\n raise ConfigKeyError('cannot overwrite existing container or item ' + key)\n\n if is_container(whatis):\n raise ConfigTypeError('%s is not an item' % (key,))\n\n self.whatis[key] = whatis\n\n def add_untyped(self, key, value):\n \"\"\"Convenience method for expanding the configuration at runtime.\n Raises ConfigKeyError if key is in use.\"\"\"\n\n if key in self.whatis:\n raise ConfigKeyError('cannot overwrite existing container or item ' + key)\n\n self.whatis[key] = None\n self[key] = value\n\n def set_parent(self, parent):\n \"\"\"Supports pruning and grafting of configuration trees.\n Raises ConfigTypeError if object is not a ConfigCont or None.\"\"\"\n if not (parent is None or isinstance(parent, ConfigCont)):\n raise ConfigTypeError('not a configuration container')\n\n self.parent = parent\n\n def check_completeness(self, path='root'):\n \"\"\"Raises ConfigIncompleteError if any elements are not defined. Recurses into\n child containere.\"\"\"\n s = set(self.whatis) - set(self)\n\n if s:\n raise ConfigIncompleteError('undefined elements remain: %s: %s' % (path, ','.join(s)))\n\n for k,v in self.iteritems():\n if self.whatis[k] == '_child':\n v.check_completeness('%s:%s' % (path, k))\n\n__all__ = ('ConfigCont',)\n",
"id": "9940685",
"language": "Python",
"matching_score": 2.6357669830322266,
"max_stars_count": 0,
"path": "configcont.py"
},
{
"content": "\n\"\"\"Direct access is not intended; instead use the api module.\"\"\"\n\n# validators return the parsed/converted value on success, and raises\n# ConfigValueError otherwise.\n\nfrom __future__ import absolute_import\n\nfrom .errors import ConfigTypeError, ConfigValueError\n\nfrom .types import add_type\n\ntry:\n basestring\nexcept NameError:\n basestring = str\n\ntry:\n intTypes = (int, long)\nexcept NameError:\n intTypes = int\n\n\ndef validate_string(s):\n \"\"\"stringiness is obligatory\"\"\"\n if not isinstance(s, basestring):\n raise ConfigTypeError('not a string')\n\n return s\n\ndef no_validation(s):\n \"\"\"anything goes\"\"\"\n return s\n\ndef validate_boolean(s):\n \"\"\"value may be \"true\" or \"false\", evaluates to Python boolean values True and False\"\"\"\n\n if isinstance(s, bool):\n return s\n\n if s == 'true':\n return True\n\n if s == 'false':\n return False\n\n if isinstance(s, basestring):\n raise ConfigValueError('boolean value must be \"true\" or \"false\".')\n else:\n raise ConfigTypeError('boolean value must be \"true\" or \"false\".')\n\n\nimport re\nnotdigit_r = re.compile('[^0-9]')\n\ndef validate_integer(s):\n \"\"\"digits and negative sign only. will not convert a decimal value!\"\"\"\n\n if isinstance(s, intTypes):\n return s\n\n if not isinstance(s, basestring):\n raise ConfigTypeError('integer value may contain only digits and a leading negative sign')\n\n negate = s.startswith('-')\n if negate:\n s = s[1:]\n\n if notdigit_r.search(s):\n raise ConfigValueError('integer value may contain only digits and a leading negative sign')\n\n try:\n return (-1 if negate else 1) * int(s)\n\n except ValueError as e:\n raise ConfigValueError(e)\n\n\ndef validate_float(s):\n \"\"\"passed straight into float() built-in function\"\"\"\n try:\n return float(s)\n\n except TypeError as e:\n raise ConfigTypeError(e)\n\n except ValueError as e:\n raise ConfigValueError(e)\n\n\nimport os.path as osp\nimport os, tempfile\n\n# path validation checks for any exception. typeerror and ioerror are observed,\n# but it isn't clear to me what may be expected in general.\n\ndef validate_path(s):\n \"\"\"leading part of path must exist, and the last element be a valid directory\n entry. symbolic links are followed\"\"\"\n\n if not isinstance(s, basestring):\n raise ConfigTypeError('not a string')\n\n base, fn = osp.split(s)\n\n if base:\n try:\n if not osp.exists(base):\n raise ConfigValueError('directory not found')\n except:\n raise ConfigValueError('invalid directory')\n\n tmpd = tempfile.mkdtemp()\n\n try:\n tmpf = osp.join(tmpd, fn)\n open(tmpf, 'w').close()\n os.remove(tmpf)\n\n except:\n raise ConfigValueError('invalid filename')\n\n finally:\n os.rmdir(tmpd)\n\n return s\n\n\ndef validate_existing_path(s):\n \"\"\"an existing directory entry (file, directory, or perhaps something else).\"\"\"\n validate_path(s)\n\n if not osp.exists(s):\n raise ConfigValueError('path not found')\n\n return s\n\n\ndef validate_existing_dir(s):\n \"\"\"an existing directory.\"\"\"\n validate_existing_path(s)\n\n if not osp.isdir(s):\n raise ConfigValueError('path is not a directory')\n\n return s\n\n\ndef validate_existing_file(s):\n \"\"\"an existing file.\"\"\"\n validate_existing_path(s)\n\n if not osp.isfile(s):\n raise ConfigValueError('path is not a file')\n\n return s\n\n\nadd_type('notype', no_validation)\nadd_type('str', validate_string)\nadd_type('bool', validate_boolean)\nadd_type('int', validate_integer)\nadd_type('fp', validate_float)\n\nadd_type('path', validate_path)\nadd_type('epath', validate_existing_path)\nadd_type('dir', validate_existing_dir)\nadd_type('file', validate_existing_file)\n\nfrom .factories import *\n\nadd_type('userport', Range('user tcp/ip port', 'nonpriviledged port numbers, 1024-65535', 1024, 65535))\n\nadd_type('platmixd', PlatformMixer('an existing directory', 'directories', validate_existing_dir))\nadd_type('platmixf', PlatformMixer('an existing file', 'files', validate_existing_file))\n",
"id": "3843116",
"language": "Python",
"matching_score": 4.381564617156982,
"max_stars_count": 0,
"path": "validators.py"
},
{
"content": "\n\"\"\"Validator factories\"\"\"\n\nfrom __future__ import absolute_import\n\nfrom .errors import ConfigTypeError, ConfigValueError\n\nfrom .validators import validate_integer\n\ntry:\n basestring\nexcept NameError:\n basestring = str\n\ntry:\n intTypes = (int, long)\nexcept NameError:\n intTypes = int\n\n\nclass Range(object):\n \"\"\"Accepts integers between two values, inclusively.\n Numbers may be equal and order is not significant.\"\"\"\n def __init__(self, name, desc, n1, n2):\n\n self.__doc__ = desc\n\n if not isinstance(n1, intTypes) or not isinstance(n2, intTypes):\n raise ConfigTypeError('integers are required')\n\n if n2 < n1:\n n1, n2 = n2, n1\n\n self.min_ = n1\n self.max_ = n2\n\n self.errmsg = '%s value must be within %d and %d, inclusively' % (name, n1, n2)\n\n def __call__(self, s):\n n = validate_integer(s)\n\n if self.min_ <= n <= self.max_:\n return n\n else:\n raise ConfigValueError(self.errmsg)\n\nfrom collections import Iterable, Hashable\n\nclass Choice(object):\n \"\"\"Select from a list of predetermined strings (or other hashable).\n Raises ConfigTypeError if the iterable of strings (or other hasables) isn't.\"\"\"\n def __init__(self, name, choices):\n\n if not isinstance(choices, Iterable):\n raise ConfigTypeError('finite, nonempty iterable of strings (or other hasables) required')\n\n for i in choices:\n if not isinstance(i, Hashable):\n raise ConfigTypeError('finite, nonempty iterable of strings (or other hasables) required')\n\n self.choices = choices = tuple(choices)\n\n if len(choices) == 0:\n raise ConfigTypeError('finite, nonempty iterable of strings (or other hasables) required')\n elif len(choices) == 1:\n choicemsg = '\"%s\"' % (choices[0],)\n elif len(choices) == 2:\n choicemsg = '\"%s\" or \"%s\"' % choices\n else:\n choicemsg = ', '.join(['\"%s\"' % (i,) for i in choices[:-1]]) + ', or \"%s\"' % choices[-1:]\n\n self.errmsg = '%s value must be %s.' % (name, choicemsg)\n\n self.__doc__ = '%s; valid choices are: %s.' % (name, choicemsg)\n\n def __call__(self, s):\n if not isinstance(s, Hashable):\n raise ConfigTypeError(self.errmsg)\n\n if s in self.choices:\n return s\n else:\n raise ConfigValueError(self.errmsg)\n\nimport sys\n\nfrom collections import Sequence\n\nclass PlatformMixer(object):\n \"\"\"Choice of values, depending on whether the platform isn't windows, or is.\"\"\"\n def __init__(self, longdesc, plural, validator):\n self.mixedthing = plural\n self.validator = validator\n self.__doc__ = \\\n \"\"\"%s, chosen from a pair, depending on platform.\n Takes two newline delimited %s and returns the first when\n not run under windows, otherwise returns the second.\"\"\" % (longdesc, plural)\n\n def __call__(self, s):\n if not isinstance(s, basestring):\n if isinstance(s, Sequence):\n l = s\n else:\n raise ConfigValueError('requires two newline delimited %s (string), or a length-two sequence' % (self.mixedthing,))\n else:\n l = s.strip().splitlines()\n\n if len(l) != 2:\n raise ConfigValueError('requires two newline delimited %s (string), or a length-two sequence' % (self.mixedthing,))\n else:\n return self.validator(l[int(sys.platform == 'win32')])\n\nfrom .types import get_type, add_type\n\n__all__ = ('Range', 'Choice', 'PlatformMixer', 'get_type', 'add_type')\n",
"id": "7478174",
"language": "Python",
"matching_score": 1.516021490097046,
"max_stars_count": 0,
"path": "factories.py"
},
{
"content": "import ConfigNG\n\nis_special = lambda k,v: (k.startswith('__') and \\\n k.endswith('__')) or type(v) == type(__builtins__)\n\ndef do_module(m):\n return [k for k,v in m.__dict__.iteritems() if not is_special(k,v)]\n\nfd = open('ConfigNG.rst', 'w')\n\n\nfd.write( \"\"\"\nConfigNG Package\n================\n\n:mod:`ConfigNG` Package\n-----------------------\n\n.. automodule:: ConfigNG.__init__\n\n\"\"\" )\n\n\nfor f in do_module(ConfigNG):\n if f.endswith('Error'):\n fd.write( '.. autoclass:: %s\\n' % (f,) )\n fd.write( ' :show-inheritance:\\n\\n' )\n else:\n fd.write( '.. autofunction:: %s\\n\\n' % (f,) )\n\n\nfd.write( \"\"\"\n:mod:`api` Module\n-----------------\n\n.. automodule:: ConfigNG.api\n\n\"\"\")\n\nfor f in do_module(ConfigNG.api):\n fd.write( '.. autofunction:: %s\\n\\n' % (f,) )\n\n\nfd.write( \"\"\"\n:mod:`configcont` Module\n------------------------\n\n.. automodule:: ConfigNG.configcont\n\n.. autoclass:: ConfigCont\n :members:\n :special-members:\n :show-inheritance:\n\n\"\"\")\n\n\nfd.write( \"\"\"\n:mod:`factories` Module\n-----------------------\n\n.. automodule:: ConfigNG.factories\n :members:\n :show-inheritance:\n\n\n:mod:`errors` Module\n--------------------\n\n.. automodule:: ConfigNG.errors\n :members:\n :undoc-members:\n :show-inheritance:\n\n\nIncluded validators\n-------------------\nConfigValueError or ConfigTypeError is raised if the input is invalid.\n\n\"\"\" )\n\nfrom restgrid import make_grid\n\nfixds = lambda s:' '.join(s.splitlines())\n\nfd.write( make_grid( [('type', 'description')] + [(k, fixds(v.__doc__)) for k,v in ConfigNG.types.types.iteritems()] ) + '\\n')\n\nfd.close()\n",
"id": "9257858",
"language": "Python",
"matching_score": 1.3450332880020142,
"max_stars_count": 0,
"path": "doc/make_rst.py"
},
{
"content": "\n\"\"\"Direct access is not intended; instead use the toplevel package.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\n\nfrom .errors import *\n\nfrom .configcont import ConfigCont\n\nfrom minisup import do_string\n\nimport os.path as osp\n\nimport re\n\n# detects a declaration or assignment\nitem_r = re.compile('([^:+=\\\\s][^:+=]*)(:|[+]?=)(.*)')\n\n# detects a command\ncmd_r = re.compile('%([\\\\S]*)\\\\s+(.*)')\n\n\ntry:\n import localsettings as ls\n global_cfg = ls.get_global_cfg()\n\nexcept ImportError:\n global_cfg = ''\n\nclass ConfigParserError(Exception):\n def __init__(self, *args):\n e, n, fn = args\n self.args = ('%s:%d:%s' % (fn, n, e),)\n\nclass ConfigIOError(ConfigBaseError):\n pass\n\ndef add_sign(f, sign):\n bits = osp.splitext(f)\n return '%s_%s%s' % ( bits[0], sign, bits[1] )\n\ndef findfile(f, dirs):\n dirs = list(dirs)\n if global_cfg:\n dirs.append(global_cfg)\n\n for i in dirs:\n path = osp.join(i, f)\n\n if osp.exists( path ):\n print(osp.normpath(path))\n return path\n\n print(dirs)\n raise ConfigIOError('File \"%s\" not found in include directories!' % (f,))\n\n# python can be a little weird about nonscalar default function parameters\nclass ClearContext(dict):\n def __init__(self, fname):\n dict.__init__( self, { \\\n 'config' : ConfigCont(None),\n 'incdirs' : ['.'],\n 'fname' : fname,\n 'n' : 0 }\n )\n\ndef get_config(file_):\n \"\"\"Takes a filename or stream; returns a configuraton container.\n Raises ConfigParserError if a descendent of ConfigBaseError is raised while processing.\"\"\"\n return config_recursable(file_, **ClearContext(file_))\n\ndef config_recursable(file_, config, incdirs, fname='stream', n=0):\n\n if type(file_) == type(''):\n fd = open( findfile(file_, incdirs), 'r' )\n fname = file_\n else:\n fd = file_\n\n conts = []\n\n while True:\n ln = fd.readline()\n\n # end of file\n if not ln:\n break\n\n ln = ln.strip()\n\n # end of block\n if ln == '}':\n break\n\n n += 1\n\n if not ln:\n continue\n\n if ln.startswith('#'):\n continue\n\n try:\n m = cmd_r.match(ln)\n\n if m:\n # commands\n cmd, args = m.groups()\n\n if cmd == 'include':\n config_recursable(args, config, incdirs)\n\n elif cmd == 'dict' or cmd == 'odict':\n if args.endswith('{'):\n key = args[:-1].strip()\n d = config_recursable( fd, ConfigCont(config), incdirs, fname, n )\n else:\n idx = args.find('=')\n\n if idx == -1:\n raise ConfigParserError('Syntax Error', n, fname)\n\n key = args[:idx].strip()\n d = config_recursable( args[idx+1:].strip(), ConfigCont(config), incdirs )\n\n config.add_child(key, d)\n\n elif cmd == 'includedir':\n incdirs.append(args)\n\n else:\n raise ConfigParserError('Unrecognized command', n, fname)\n\n else:\n # declarations and definitions\n m = item_r.match(ln)\n\n if not m:\n raise ConfigParserError('Syntax Error', n, fname)\n\n a, op, b = [i.strip() for i in m.groups()]\n\n if op == ':':\n key, whatis = a, b\n\n if not has_type(whatis):\n raise ConfigParserError('Unsupported type \"%s\"' % (whatis,), n, fname)\n\n try:\n if is_container(whatis):\n config.add_container(key, whatis)\n conts.append(key)\n else:\n config.add_item(key, whatis)\n\n except ConfigTypeError as e:\n raise ConfigParserError('Unrecognized type \"%s\"' % (whatis,), n, fname)\n\n else:\n key, val = a, b\n\n if not config.has_key(key):\n raise ConfigParserError('Unrecognized key \"%s\"' % (key,), n, fname)\n\n val, inc = do_string(fd, val)\n n += inc\n\n if op == '=':\n config[key] = val\n\n else:\n config.add_to_container(key, val)\n\n except ConfigBaseError as e:\n raise ConfigParserError(e[0], n, fname)\n\n if file_ is not fd:\n fd.close()\n\n for cont in conts:\n config.finalize_container(cont)\n\n return config\n\n__all__ = ('get_config', 'ConfigParserError')\n\n# cyclical imports, so it goes at the end\nfrom .api import *\n",
"id": "2219620",
"language": "Python",
"matching_score": 3.279400110244751,
"max_stars_count": 0,
"path": "parser.py"
},
{
"content": "\nclass ConfigBaseError(Exception):\n pass\n\nclass ConfigKeyError(ConfigBaseError, KeyError):\n pass\n\nclass ConfigTypeError(ConfigBaseError, TypeError):\n pass\n\nclass ConfigValueError(ConfigBaseError, ValueError):\n pass\n\nclass ConfigIncompleteError(Exception):\n pass\n",
"id": "2315526",
"language": "Python",
"matching_score": 0.6240891218185425,
"max_stars_count": 0,
"path": "errors.py"
},
{
"content": "#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThe Composable class wraps a callable, providing methods for composition.\nComposable instances take/return exactly one argument/value.\"\"\"\n\nfrom functools import update_wrapper\n\n\ndef strip(f):\n \"\"\"\n Extract the function wrapped by the Composable instance. Can be\n overridden, see the Composable docstring for details.\"\"\"\n\n if hasattr(f, '_stripped'):\n return f._stripped\n else:\n return f\n\n\ndef compose_base(*f):\n \"\"\"Composition without processing. Bare functions in, bare function out.\n \"\"\"\n\n def builder(seq):\n if len(seq) == 1:\n return 'f[{:d}](t)'.format(seq[0])\n else:\n return 'f[{:d}]({})'.format(seq[0], builder(seq[1:]))\n\n if len(f) == 0:\n return lambda t: t\n elif len(f) == 1:\n return f[0]\n else:\n return eval('lambda t: ' + builder(range(len(f))), dict(f=f), {})\n\n\ndef compose(*f):\n \"\"\"Returns a new Composable instance wrapping the composition, after\n stripping (as needed) the arguments.\n\n The type of the result will match the outermost function, if it is a\n instance of Composable or a class derived from it, unless the property\n _new is customized, the result of which depends on the customization.\n\n Note that any such intermediate customization will be ignored. To\n respect the customization compose in stages such that the desired\n customization is outermost. The result will still pass through\n strip() unless that is also suitably customized.\"\"\"\n\n instMkr = f[0]._new if hasattr(f[0], '_new') else (\n type(f[0]) if isinstance(f[0], Composable) else Composable )\n\n return instMkr(compose_base(*map(strip, f)))\n\n\n# Could mixin with a class with callable instances\nclass ComposableBase:\n \"\"\"\n Provides methods for composition, both \"wrapped\" other(self(t))\n and \"nested\" self(other(t)).\n\n Composition can be customized by overloading the method _compose(). The\n default behavior is to use the module-level compose() function.\n\n The or operator is overloaded to implement \"piping\" in the manner familiar\n to shell users. Non-callable arguments at the front of the pipeline are\n passed to the function and evaluated in the expected manner.\"\"\"\n\n\n # maybe leaving this out would be better? Just overload __new__ instead.\n # explicit is better than implicit, I think.\n\n @property\n def _new(self):\n \"\"\"Customizable instantiation. Overload if derived class has a sufficiently\n distinct constructor signature. Otherwise, the type of the composition will\n match the type of the outermost function, and the constructor (or factory)\n will invoked as if it was the constructor of Composable.\"\"\"\n\n return self.__class__\n\n @staticmethod\n def _compose(f, g):\n return compose(f, g)\n\n def nest(self, other):\n \"\"\"self(other(t))\"\"\"\n return self._compose(self, other)\n\n def wrap(self, other):\n \"\"\"other(self(t))\"\"\"\n return self._compose(other, self)\n\n def __or__(self, other):\n \"\"\"other(self(t))\"\"\"\n return self._compose(other, self)\n\n def __ror__(self, other):\n \"\"\"self(other(t))\"\"\"\n if callable(other):\n return self._compose(self, other)\n else:\n return self(other)\n\n\n\nclass Composable(ComposableBase):\n \"\"\"Wraps a callable passed to the constructor, or the identity function,\n if none is provided. Docstrings and such are passed through via\n functools.update_wrapper(). The callable is passed through strip(), so\n classes do not nest, unless stripping is customized to enable it.\n\n The provided callable can be accessed via the strip() function. Customi-\n zation of how instances are stripped can be done by overloading the\n attribute \"_stripped\".\n\n TypeError is raised if the object passed is not callable. Ability to\n accept a single argument is not validated.\n\n >>> from sfacomposable import Composable, strip\n >>> f=Composable(lambda x:x+1)\n >>> g=Composable(lambda x:4*x)\n >>> @Composable\n ... def h(x):\n ... return 2**x\n ...\n >>> h(g(f(1)))\n 256\n >>> f(g(h(1)))\n 9\n >>> (f|g|h)(1)\n 256\n >>> 1|f|g|h\n 256\n >>> class Foo(Composable):\n ... pass\n ...\n >>> class Bar(Composable):\n ... _new = Foo\n ...\n >>> hFoo=Foo(h)\n >>> hBar=Bar(h)\n >>> type(strip(h))\n <class 'function'>\n >>> type(strip(hFoo))\n <class 'function'>\n >>> 1|f|g|hFoo\n 256\n >>> 1|f|g|hBar\n 256\n >>> type(f|g|hFoo).__name__\n 'Foo'\n >>> type(f|g|hBar).__name__\n 'Foo'\n \"\"\"\n\n @property\n def _stripped(self):\n \"\"\"Fixed attributes are allowed. Conserves space moreso than computation,\n however. To disable, delete or assign self to the attribute.\"\"\"\n\n return self.__wrapped__\n\n def __init__(self, f=None):\n if f is None:\n f = lambda x: x\n\n if not callable(f):\n raise TypeError('Pass a callable (that can be invoked as f(arg)), or pass nothing for the identity function.')\n\n update_wrapper(self, strip(f))\n\n def __call__(self, t):\n return self.__wrapped__(t)\n",
"id": "8528705",
"language": "Python",
"matching_score": 1.2647454738616943,
"max_stars_count": 0,
"path": "src/sfacomposable/base.py"
},
{
"content": "from __future__ import print_function\n\nimport pickle\n\nfrom opts import GETSET\n\ndef run(*args):\n m = args[0]\n print(m.info)\n constructor = m.generator\n\n args = (42,)+args[1:]\n\n # seeding at or after instantiation\n rng1 = constructor(*args)\n rng2 = constructor()\n print(rng1==rng2, rng2==rng1)\n rng2.seed(*args)\n print(rng1==rng2, rng2==rng1)\n\n if GETSET:\n print(rng2)\n\n # copy constructor\n rng3 = constructor(rng2)\n print(rng2==rng3); rng3.discard(1)\n print(rng2==rng3, rng3==rng2)\n\n # generator characteristics\n print('bits:', rng1.bits)\n print('range:', rng1.min(), rng2.max())\n print('base-two logarithm of period:', rng1.period_pow2())\n print('base-two logarithm of stream count:', rng1.streams_pow2())\n\n # get some random ints or floats, compare\n print(rng1.next_as_float(), rng2())\n print(rng2==rng2, rng1==rng2)\n\n rng1.backstep(1); rng2.backstep(1)\n\n print(rng1(), rng2.next_as_float())\n print(rng2==rng2, rng2==rng1)\n\n print(rng1(1000), rng2(1000))\n print(rng2==rng2, rng2==rng1)\n\n # subtract returns how much to advance rhs.. so is that equal\n # to backtracking lhs? note that these take unsigned arguments!\n\n rng2.seed(*args)\n one2 = rng1-rng2\n two1 = rng2-rng1\n period = 2**rng1.period_pow2()\n\n print(one2,two1,period)\n\n if GETSET:\n state = rng1.get_state()\n else:\n rng3 = constructor(rng1)\n\n rng2.advance(one2)\n print(rng1==rng2, rng2==rng1)\n\n rng2.seed(*args)\n\n if GETSET:\n rng1.set_state(state)\n else:\n rng1 = rng3\n\n one2 = rng1-rng2\n two1 = rng2-rng1\n print(one2, two1)\n\n #if \n # rng1.backstep(period-two1)\n # print(rng1==rng2, rng2==rng1)\n #except OverflowError:\n # print('period overflows C type')\n",
"id": "11711516",
"language": "Python",
"matching_score": 2.1703712940216064,
"max_stars_count": 0,
"path": "python/support/pcgtests.py"
},
{
"content": "from . [PCGx] import *\n\ndiscard_after_use = generator()\n\ninfo = dict(\n name = '[PCGx]',\n bits = cvar.bits,\n largest = discard_after_use.max(),\n period_pow2 = discard_after_use.period_pow2(),\n streams_pow2 = discard_after_use.streams_pow2(),\n incomplete = [INCOMPLETE]\n)\n\ndel discard_after_use\n\n__all__ = ('generator', 'info',\n ( 'Almost' if [INCOMPLETE] else '') + 'Random' )\n",
"id": "8643099",
"language": "Python",
"matching_score": 0.9162953495979309,
"max_stars_count": 0,
"path": "python/templates/__init__.py"
},
{
"content": "\nfrom __future__ import print_function\n\nimport os.path as osp\nimport subprocess\nimport shutil\nimport shlex\nimport sys\nimport os\n\nfrom importlib import import_module\n\nsys.path.append('support')\nd=osp.abspath(osp.dirname(__file__))\nsys.path.append(d)\nprint(d)\n\nfrom opts import *\nimport pcgtests as test\n\nwith open(osp.join('support', 'hasstreams.txt'), 'r') as f:\n hasstreams = set()\n f.readline()\n\n for line in f:\n hasstreams.add(line.strip())\n\ntemplates = os.listdir('templates')\n\nif osp.exists('pcgrng'):\n shutil.rmtree('pcgrng')\n\nos.mkdir('pcgrng')\n\nwith open(osp.join('pcgrng', '__init__.py'), 'w') as fmain:\n fmain.write('generators = {}\\n')\n\nos.chdir('pcgrng')\n\nwith open('../generators.txt', 'r') as fgens:\n masked = False\n for line in fgens:\n generator = line.strip()\n if not generator or generator.startswith('#'):\n continue\n\n if generator in ('-', '+'):\n masked = (generator == '-')\n continue\n\n if masked:\n continue\n\n substs['PCGx'] = generator\n\n substs['SWIGOPTS'] = SWIGOPTS\n\n if generator in hasstreams:\n substs['SWIGOPTS'] += ' -DSTREAMS'\n\n if GETSET:\n substs['SWIGOPTS'] += ' -DGETSET'\n\n os.mkdir(generator)\n\n with open('__init__.py', 'a') as fmain:\n fmain.write(\"from . import {0}\\ngenerators['{0}']={0}\\n\".format(generator))\n\n for filename in templates:\n with open(osp.join('../templates', filename), 'r') as fin:\n with open(osp.join(generator, filename), 'w') as fout:\n s = fin.read()\n for key, value in substs.items():\n s = s.replace('[{}]'.format(key), value)\n\n fout.write(s)\n\n print(substs)\n print('\\n', generator)\n\n with open(osp.join(generator, 'make'), 'r') as fmake:\n append = False\n cmds = []\n\n for line in fmake:\n cmd = line.strip()\n\n if not cmd:\n continue\n\n if append:\n cmds[-1] += cmd\n else:\n cmds.append(cmd)\n\n append = cmd.endswith('\\\\')\n\n for cmd in cmds:\n subprocess.check_call(shlex.split(cmd))\n\n m = import_module('pcgrng.'+generator)\n\n test.run(m)\n\n if generator in hasstreams:\n test.run(m, 137)\n\n for filename in templates:\n if not filename.startswith('__'):\n os.remove(osp.join(generator, filename))\n\n for filename in ['pcggen_wrap.'+i for i in ('cxx', 'o')]:\n os.remove(filename)\n",
"id": "1784485",
"language": "Python",
"matching_score": 1.0261282920837402,
"max_stars_count": 0,
"path": "python/build.py"
},
{
"content": "\nwith open('opts.cfg', 'r') as f:\n exec(f.read())\n\nif not PYHEADERS:\n try:\n import sysconfig\n PYHEADERS = sysconfig.get_path('include')\n except ImportError:\n import sys\n PYHEADERS = '/usr/include/python{0}.{1}'.format(*sys.version_info)\n\n# set include directory to a useful default if it evaluates as false\nsubsts = dict(\n INCOMPLETE = 'False' if GETSET else 'True',\n SWIG = SWIG, CXX = CXX, CXXOPTS = CXXOPTS,\n PCGHEADERS = PCGHEADERS,\n PYHEADERS = PYHEADERS)\n",
"id": "11377547",
"language": "Python",
"matching_score": 0.7693873643875122,
"max_stars_count": 0,
"path": "python/support/opts.py"
},
{
"content": "from distutils.core import setup, Extension\n\npcg64_module = Extension('_pcg64',\n sources=['pcg64_wrap.cxx'],\n include_dirs=['../../../include']\n)\n\nsetup(\n name='pcg64',\n author='<NAME>',\n description=\"Wrapper of the PGC PRNG C++ library by <NAME>\",\n ext_modules=[pcg64_module],\n py_modules=('pcg64',)\n)\n",
"id": "6058623",
"language": "Python",
"matching_score": 0.19180789589881897,
"max_stars_count": 0,
"path": "python/setup.py"
},
{
"content": "\nfrom __future__ import print_function\n\n### Copyright 2017 <NAME>\n###\n### Licensed under the Apache License, Version 2.0 (the \"License\");\n### you may not use this file except in compliance with the License.\n### You may obtain a copy of the License at\n###\n### http://www.apache.org/licenses/LICENSE-2.0\n###\n### Unless required by applicable law or agreed to in writing, software\n### distributed under the License is distributed on an \"AS IS\" BASIS,\n### WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n### See the License for the specific language governing permissions and\n### limitations under the License.\n############################################################################\n############################################################################\n\n\n# sorry, just not a fan of Bash/Makefile programming\n# also, not tested in Python v3, but that'll get fixed soon\n\n# expects to be run from project's root\n\n# decent command line validation, but could still blow up messily\n# if the input SVG isn't Just So.\n\nSVG = 'src/sign.svg'\n\nimport sys\n\nfrom string import Template\n\nimport xml.etree.ElementTree as ET\n\n\nTMPLS = {\n 'outer' : [ Template(s) if s else None for s in (\n 'inkscape -Cj -i $id -y 255 --export-pdf=tmp/${name}1.pdf $svg',\n 'mutool poster -x 2 -y 2 tmp/${name}1.pdf tmp/tiles.pdf',\n 'pdftk tmp/tiles.pdf burst output tmp/tile%d1.pdf',\n 'pdftk tmp/tile21.pdf background src/copyleft.pdf output tmp/tile22.pdf',\n '',\n 'pdftk tmp/tile14.pdf tmp/tile25.pdf tmp/tile34.pdf tmp/tile44.pdf cat output output/${name}.pdf',\n 'pdfnup --nup 2x2 --outfile tmp/${name}2.pdf tmp/tile11.pdf tmp/tile22.pdf tmp/tile31.pdf tmp/tile41.pdf',\n 'convert tmp/${name}2.pdf -background white -alpha remove -geometry 440x340 output/${name}.png',\n 'zip -jn .png output/sign.zip output/${name}.pdf output/${name}.png src/assembly.pdf',\n ) ],\n\n 'inner' : [ Template(s) for s in (\n 'pdftk tmp/tile${i}${j}.pdf rotate 1east output tmp/tile${i}${k}.pdf',\n 'pdf2ps tmp/tile${i}${k}.pdf tmp/tile${i}${k}.ps',\n 'pstops -p letter \"@0.9(0.425in,0.55in)\" tmp/tile${i}${k}.ps tmp/tile${i}${m}.ps',\n 'ps2pdf tmp/tile${i}${m}.ps tmp/tile${i}${m}.pdf',\n 'pdftk tmp/tile${i}${m}.pdf rotate 1west output tmp/tile${i}${n}.pdf'\n ) ],\n\n 'inner_why_not_this_work' : [ Template(s) for s in (\n './pdfScale.sh -s 0.9 tmp/tile${i}${j}.pdf tmp/tile${i}${k}.pdf',\n ) ],\n}\n\ndef nonsplz(s):\n t = ''\n for c in s[::-1]:\n if c == '}':\n break\n else:\n t = c+t\n\n return t\n\n\nif __name__ == '__main__':\n\n argv = sys.argv[1:]\n\n sides_in = argv[:2]\n sides = []\n\n while sides_in:\n if sides_in[-1] in ('front', 'back'):\n sides.append(sides_in[-1])\n\n sides_in.pop()\n\n if len(sides) == 2 and sides[0] == sides[1]:\n sides.pop()\n\n if not sides:\n sides = ('front', 'back')\n\n tree = ET.parse(SVG)\n root = tree.getroot()\n\n IDs = [ e.attrib['id'] for e in root.iter() if nonsplz(e.tag) == 'g' ]\n\n layers = dict(list(zip(('back', 'front'), IDs)))\n kwargs = dict(svg=SVG)\n\n print('mkdir -p tmp')\n\n for name in sides:\n kwargs['name'] = name\n kwargs[ 'id'] = layers[name]\n for tplo in TMPLS['outer']:\n if tplo:\n print(tplo.substitute(**kwargs))\n else:\n for i,j,k,m,n in zip(\n (1,2,3,4), (1,2,1,1), (2,3,2,2), (3,4,3,3), (4,5,4,4) ):\n for tpli in TMPLS['inner']:\n print(tpli.substitute(i=i, j=j, k=k, m=m, n=n))\n\n\n print('rm -f tmp/*')\n",
"id": "2837715",
"language": "Python",
"matching_score": 1.0128669738769531,
"max_stars_count": 0,
"path": "build.py"
},
{
"content": "\n# ==============================================================================\n# == Copyright 2016 <NAME> ==\n# == ==\n# == Licensed under the Apache License, Version 2.0 (the \"License\"); ==\n# == you may not use this file except in compliance with the License. ==\n# == You may obtain a copy of the License at ==\n# == ==\n# == http://www.apache.org/licenses/LICENSE-2.0 ==\n# == ==\n# == Unless required by applicable law or agreed to in writing, software ==\n# == distributed under the License is distributed on an \"AS IS\" BASIS, ==\n# == WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ==\n# == See the License for the specific language governing permissions and ==\n# == limitations under the License. ==\n# ==============================================================================\n\nfrom elements import Point\n\nimport struct\n\ndef raw2hex(s):\n return ''.join(['%02x' % (ord(c),) for c in s])\n\ndef hex2raw(s):\n return ''.join([chr(int(s[2*i:2*(i+1)], 16)) for i in range(len(s)/2)])\n\n\ndef tile2hex(t):\n return raw2hex(struct.pack('<%df' %(2*len(t),), *sum(t.vertices, ())))\n\ndef hex2vertices(s):\n nums = struct.unpack('<%df' % (len(s)/8,), hex2raw(s))\n return [Point(*nums[2*i:2*(i+1)]) for i in range(len(nums)/2)]\n\n\ndef readfile(fd, cb):\n for ln in fd:\n s = ln.rstrip()\n\n if not s:\n continue\n\n if s.startswith('#'):\n continue\n\n id_, args = s.split(' ', 1)\n\n if id_ == 't':\n cb('t', hex2vertices(s[2:]))\n\n\ndef writefile(fd, items):\n for id_, args in items:\n if id_ == 't':\n fd.write('t %s\\n' % (tile2hex(args),))\n\n\n__all__ = ('writefile', 'readfile')\n",
"id": "11622160",
"language": "Python",
"matching_score": 0.220509335398674,
"max_stars_count": 0,
"path": "src/ayeoh.py"
}
] | 1.492017 |
jawsnfl | [
{
"content": "# Copyright 2017 Netflix, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport boto.sns\nimport grizzly_util\n\nimport eventlet\nfrom eventlet.timeout import Timeout\nfrom eventlet.green.urllib import request\nfrom eventlet.green.urllib import error\nimport eventlet.green.ssl as ssl\n\nimport json\nimport _thread\nimport time\nimport string\nimport urllib.parse\nfrom datetime import datetime\nfrom collections import Counter\nimport random\n\n\nclass GrizzlyConfiguration():\n '''\n This class is called to configure and conduct an application layer\n DoS test.\n\n More information on how to configure the tool can be found on:\n https://github.com/netflix-skunkworks/repulsive-grizzly\n '''\n def __init__(self):\n # Read in config file\n self.conf = \"\"\n with open(\"commands.json\") as config:\n self.conf = json.loads(config.read())\n\n self.status_code = []\n\n # If setup to use Kraken, we should ensure sns_region and sns_topic\n try:\n if self.conf[\"use_with_kraken\"]:\n self.use_kraken = True\n self.sns = boto.sns.connect_to_region(self.conf[\"sns_region\"])\n self.topic = self.conf[\"sns_topic\"]\n else:\n self.use_kraken = False\n except:\n print(\"Could not set sns_region or sns_topic, did you specify them?\")\n exit(1)\n\n # Check if we should perform a sanity check\n try:\n if self.conf[\"perform_sanity_check\"]:\n self.perform_sanity_check = True\n else:\n self.perform_sanity_check = False\n except:\n print(\"Could not determine if we should do sanity check\")\n exit(1)\n\n # If setup to use Kraken, we should ensure sns_region and sns_topic\n try:\n if self.conf[\"use_with_kraken\"]:\n self.instance_id = grizzly_util.get_node_number(\"all\")\n else:\n self.instance_id = 1\n except:\n print(\"Could not set instance, do you have AWS credentials \"\n \"on the host you are running Repulsive Grizzly?\")\n exit(1)\n\n self.cookie_list = []\n self.headers_list = []\n\n def payload_generator(self, size=50, chars=string.ascii_uppercase + string.digits):\n '''\n Payload generator can be used by supplying a placehodler $$1$$\n and overwritten for your specific use case\n\n NOTE: This is not currently used or implemented\n '''\n\n return ''.join(random.choice(chars) for _ in range(size))\n\n def load_commands(self, command_file):\n '''\n Loads all commands into self object\n '''\n\n # Make sure there is a hostname defined, otherwise we can't set header\n try:\n self.verb = self.conf[\"verb\"]\n except:\n print(\"Could not resolve HTTP Verb for attack, exiting\")\n exit(1)\n\n # Configure proxy if enabled\n try:\n if self.conf[\"proxy\"]:\n self.proxy = True\n self.proxy_config = self.conf[\"proxy_config\"]\n else:\n self.proxy = False\n except:\n print(\"Proxy should be set to True/False in the commands.json\")\n exit(1)\n\n # Grab the sanity check url\n try:\n self.sanity_check_url = self.conf[\"sanity_check_url\"]\n except:\n print(\"No sanity check url provided, how do we know we are healthy?\")\n exit(1)\n\n # Make sure there is a hostname defined, otherwise we can't set header\n try:\n self.host = self.conf[\"hostname\"]\n except:\n print(\"Could not resolve hostname for attack, exiting\")\n exit(1)\n\n # Load post data if provided and verb is either post, put or patch\n try:\n if self.verb.lower() in [\"post\", \"put\", \"patch\"] and self.conf[\"post_data\"]:\n if self.conf[\"post_data\"]:\n with open(\"post_data/{}\".format(str(self.conf[\"post_data\"]))) as post_data:\n self.post_data = post_data.read().replace('\\n', '')\n else:\n self.post_data = \"\"\n except:\n print(\"Could not resolve post data, did you specify the correct filename?\")\n raise\n\n # If configured to use cookies, load the cookies from json into string?\n try:\n if self.conf[\"use_auth\"]:\n self.auth_store_name = self.conf[\"auth_store_name\"]\n with open(\"./authentication/{}\".format(self.auth_store_name)) as auth_objects:\n self.auth_objects = json.loads(auth_objects.read())\n else:\n self.auth_objects = []\n except Exception as e:\n print(\"Could not resolve cookie store for attack, exiting\")\n print(e)\n exit(1)\n\n # You can set one_url_per_agent to true to have each agent\n # hit all URLs or moduls to fix one URL per attack agent.\n # Otherwise this defaults to all urls per each agent\n try:\n if self.conf[\"urls\"] and self.conf[\"one_url_per_agent\"]:\n self.urls = [self.conf[\"urls\"][int(self.instance_id) % len(self.conf[\"urls\"])]]\n elif self.conf[\"urls\"]:\n self.urls = self.conf[\"urls\"]\n except Exception as e:\n print(\"Could not assign one url per agent, exiting!\")\n print(e)\n exit(1)\n\n # Load headers into a dict object\n if self.conf[\"headers\"]:\n self.header_store_name = self.conf[\"headers\"]\n with open(\"./headers/{}\".format(self.header_store_name)) as config:\n self.headers = json.loads(config.read())\n else:\n print(\"no headers specified, using default headers.\")\n with open(\"./headers/{}\".format(\"default\")) as config:\n self.headers = json.loads(config.read())\n\n # If we need to replace auth objects, let's load them and build a map\n if len(self.auth_objects) > 0:\n # This method generates a random sample with a deterministic seed\n # to ensure each instances uses the same cookies\n try:\n random_sample = random\n random_sample.seed(self.conf[\"auth_store_count\"])\n if len(self.auth_objects) != 0:\n self.auth_objects = random_sample.sample(self.auth_objects, (self.conf[\"auth_store_count\"]))\n else:\n self.auth_objects = []\n except:\n print(\"Did you specify the number of objects (auth_store_count) \"\n \"for your authentication store?\")\n exit(1)\n\n # The following code blocks compute all possible requests depending\n # on how many auth objects were provided.\n self.computed_requests = {}\n self.computed_requests[\"urls\"] = []\n self.computed_requests[\"headers\"] = []\n self.computed_requests[\"post_data\"] = []\n temp_hash = {}\n\n # Compute a list of URLs with associated auth objects if identified\n for url in self.urls:\n if \"$$AUTH$$\" in url:\n for auth_object in self.auth_objects:\n self.computed_requests[\"urls\"].append(url.replace(\"$$AUTH$$\", auth_object))\n else:\n self.computed_requests[\"urls\"].append(url)\n\n # Compute a list of headers with associated auth objects if identified\n auth_headers = False\n for header in self.headers.values():\n if \"$$AUTH$$\" in header:\n auth_headers = True\n\n if auth_headers:\n for i in range(len(self.auth_objects)):\n print(i)\n temp_hash = {}\n for key, value in self.headers.items():\n if \"$$AUTH$$\" in value:\n temp_hash.update({key: value.replace(\"$$AUTH$$\", self.auth_objects[i])})\n else:\n temp_hash.update({key: value})\n\n self.computed_requests[\"headers\"].append(temp_hash)\n else:\n self.computed_requests[\"headers\"] = [self.headers]\n\n # Compute a list of post_data samples with associated auth objects if identified\n if self.post_data:\n if \"$$AUTH$$\" in self.post_data:\n auth_headers = True\n if auth_headers:\n for i in range(len(self.auth_objects)):\n self.computed_requests[\"post_data\"].append(self.post_data.replace(\"$$AUTH$$\", self.auth_objects[i]))\n else:\n self.computed_requests[\"post_data\"] = [self.post_data]\n else:\n self.computed_requests = {}\n self.computed_requests[\"urls\"] = []\n self.computed_requests[\"headers\"] = []\n self.computed_requests[\"post_data\"] = []\n temp_hash = {}\n self.computed_requests[\"urls\"] = self.urls\n self.computed_requests[\"headers\"] = [self.headers]\n self.computed_requests[\"post_data\"] = [self.post_data]\n\n def generate_request(self, verb, url, headers, post_data=None):\n try:\n # import pdb; pdb.set_trace()\n req = request.Request(url,\n data=post_data.encode(\"utf-8\") if post_data is not None else None,\n headers=headers,\n method=verb)\n if self.proxy:\n req.set_proxy(self.proxy_config, urllib.parse.urlparse(url).scheme)\n response = request.urlopen(req, timeout=60, context=self.create_ctx())\n else:\n response = request.urlopen(req, timeout=60, context=self.create_ctx())\n self.status_code.append(int(response.code))\n except error.HTTPError as e:\n self.status_code.append(int(e.code))\n except error.URLError as e:\n self.sns_logger(status_codes={}, exception=str(e.reason), subject=\"Grizzly Error\")\n except Exception:\n import traceback\n self.sns_logger(status_codes={}, exception=str(traceback.format_exc()), subject=\"Grizzly Error\")\n print(('generic exception: ' + traceback.format_exc()))\n\n def countdown(self, start_time):\n '''\n This method sleeps until the start_time is triggered.\n This is used to keep attack agents in sync so they start\n their tests at the same time.\n '''\n print((\"Executing Test on \"\n \"{} with {} threads \"\n \"via {} url(s) for \"\n \"{} seconds\".format(self.conf[\"hostname\"],\n str(self.conf[\"threads\"]),\n self.urls,\n str(self.conf[\"ttl\"]))))\n\n now = datetime.now()\n timestamp = start_time.split(':')\n start_attack = now.replace(hour=int(timestamp[0]), minute=int(\n timestamp[1]), second=int(timestamp[2]))\n t = int((start_attack - now).total_seconds())\n print((\"Attack starts at: {} in {} seconds\".format(start_time, t)))\n\n while start_attack > now:\n now = datetime.now()\n timestamp = start_time.split(':')\n start_attack = now.replace(hour=int(timestamp[0]), minute=int(\n timestamp[1]), second=int(timestamp[2]))\n mins, secs = divmod(t, 60)\n timeformat = '{:02d}:{:02d}'.format(mins, secs)\n print(timeformat)\n time.sleep(1)\n t -= 1\n print('Attack Executing!\\n\\n')\n\n def sns_logger(self, status_codes={}, exception=None, subject=\"Grizzly Log\", url=\"\"):\n '''\n This method logs messages to an SNS queue and/or prints them to console.\n '''\n timestamp = '%s' % datetime.now()\n agent = self.instance_id\n if url == \"\":\n url = self.urls\n\n if status_codes:\n message = json.dumps({\"agent\": agent,\n \"timestamp\": timestamp,\n \"status_codes\": status_codes,\n \"elb\": url})\n if self.use_kraken:\n self.sns.publish(message=message, subject=subject, topic=self.topic)\n print(message)\n # I am not handling exceptions yet, but this is for future\n if exception:\n message = json.dumps({\"agent\": agent,\n \"timestamp\": timestamp,\n \"url\": url,\n \"exception\": exception})\n if self.use_kraken:\n self.sns.publish(message=message, subject=subject, topic=self.topic)\n print(message)\n\n def status_counter(self, thread_name):\n '''\n This provides status updates to the SNS queue every 5 seconds\n '''\n while True:\n time.sleep(5)\n status_codes = Counter(self.status_code)\n self.sns_logger(status_codes)\n self.status_code = []\n\n def create_ctx(self):\n '''\n This method sets the right ssl context to disable hostname checking\n and certificate validation.\n '''\n ctx = ssl.create_default_context()\n ctx.check_hostname = False\n ctx.verify_mode = ssl.CERT_NONE\n return ctx\n\n def sanity_check(self, client, computed_requests):\n '''\n This method checks that the sanity_check_url provides a 200 status code.\n If the sanity check fails, the application exists.\n '''\n req = request.Request(client, headers=self.computed_requests[\"headers\"][0])\n\n response = request.urlopen(req, timeout=60, context=self.create_ctx())\n if response.code != 200:\n self.sns_logger(status_codes={},\n exception=str(response.code),\n subject=\"Grizzly Sanity Check Failed\",\n url=client)\n raise\n else:\n self.sns_logger(status_codes={},\n exception=str(response.code),\n subject=\"Grizzly Sanity Check Passed\",\n url=client)\n print('Sanity check passed: 200 OK')\n return True\n\n\nif __name__ == \"__main__\":\n # Initialize class and load command file\n grizzly_config = GrizzlyConfiguration()\n grizzly_config.load_commands(\"commands.json\")\n\n # Set threadpool\n pool = eventlet.GreenPool(int(grizzly_config.conf[\"threads\"]))\n\n # Start time is when to start in seconds from time\n try:\n # First publish a message telling the system the test is beginning\n print('Test is starting')\n _thread.start_new_thread(grizzly_config.status_counter, (\"thread_1\",))\n except:\n # If we can't publish a message or start our thread, exit\n raise\n\n # Block execution of attack until start_time is triggered\n try:\n start_time = grizzly_config.conf[\"start_time\"]\n grizzly_config.countdown(start_time)\n except:\n raise\n\n # Perform a sanity check\n if grizzly_config.sanity_check:\n grizzly_config.sanity_check(grizzly_config.sanity_check_url, grizzly_config.computed_requests)\n\n # Set time interval for attack\n timeout = Timeout(int(grizzly_config.conf[\"ttl\"]), False)\n\n # Conduct attack until timeout is triggered, then exit gracefully\n try:\n while True:\n for url in grizzly_config.computed_requests[\"urls\"]: # and not kill switch\n if grizzly_config.verb != \"GET\":\n for headers in grizzly_config.computed_requests[\"headers\"]: # and not kill switch\n for post_data in grizzly_config.computed_requests[\"post_data\"]:\n pool.spawn(grizzly_config.generate_request,\n grizzly_config.conf[\"verb\"].upper(),\n url,\n headers,\n post_data)\n else:\n for headers in grizzly_config.computed_requests[\"headers\"]: # and not kill switch\n pool.spawn(grizzly_config.generate_request, grizzly_config.conf[\"verb\"].upper(),\n url,\n headers)\n finally:\n timeout.cancel()\n",
"id": "3505669",
"language": "Python",
"matching_score": 3.809093952178955,
"max_stars_count": 184,
"path": "grizzly.py"
},
{
"content": "#!/usr/bin/env python\n# Copyright 2017 Netflix, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nUSAGE:\n grizzly_util.py test <command_file> <region>\n grizzly_util.py sendmsg <arn> <subject> <message>...\n\"\"\"\n\nimport boto3\nimport logging\n\nlog = logging.getLogger(\"mock_grizzly\")\nlogging.basicConfig()\nlog.setLevel(logging.DEBUG)\n\n\ndef get_node_number(region):\n\n region = \"all\"\n\n d = boto3.session.Session().client(\"dynamodb\", region_name=\"us-west-2\")\n v = d.update_item(TableName=\"repulsive_grizzly\", Key={ \"key\": {\"S\": \"counter\"}, \"region\": {\"S\": region}}, UpdateExpression=\"SET node_number = node_number + :c\", ExpressionAttributeValues={\":c\": {\"N\": \"1\"}}, ReturnValues=\"ALL_OLD\")\n\n return int(v[\"Attributes\"][\"node_number\"][\"N\"])\n\n\ndef killswitch():\n d = boto3.session.Session().client(\"dynamodb\", region_name=\"us-west-2\")\n v = d.get_item(TableName=\"repulsive_grizzly\", Key={\"key\": {\"S\": \"kill_switch\"}, \"region\": {\"S\": \"all\"}})\n\n item = v.get(\"Item\")\n if not item:\n log.critical(\"Can't find kill switch\")\n return True\n\n switch = bool(item[\"shutdown\"][\"BOOL\"])\n\n return switch\n\n\ndef get_uuid():\n import uuid\n return uuid.uuid4()\n\n\ndef send_message(arn, subject, message):\n\n region = arn.split(\":\")[3]\n log.debug(\"sending to '{}' subject '{}' message '{}'\".format(arn, subject, message))\n\n sns = boto3.session.Session().client(\"sns\", region_name=region)\n sns.publish(TopicArn=arn, Subject=subject, Message=message)\n\n\ndef main(args):\n\n if args.get(\"test\"):\n command_file = args[\"<command_file>\"]\n region = args[\"<region>\"]\n\n log.debug(\"command file is {}\".format(command_file))\n log.debug(\"node number is {}\".format(get_node_number(region)))\n log.debug(\"kill switch is {}\".format(killswitch()))\n log.debug(\"uuid is {}\".format(get_uuid()))\n\n elif args.get(\"sendmsg\"):\n arn = args[\"<arn>\"]\n subject = args[\"<subject>\"]\n message = \" \".join(args[\"<message>\"])\n send_message(arn, subject, message)\n\nif __name__ == \"__main__\":\n from docopt import docopt\n main(docopt(__doc__))\n",
"id": "3645806",
"language": "Python",
"matching_score": 0.4076929986476898,
"max_stars_count": 184,
"path": "grizzly_util.py"
},
{
"content": "from setuptools import setup\nimport sys\nimport os\nimport re\n\nIS_PY_2 = (sys.version_info[0] <= 2)\n\n\ndef read_readme():\n with open('README.md') as f:\n return f.read()\n\ndef read_version():\n # importing gpustat causes an ImportError :-)\n __PATH__ = os.path.abspath(os.path.dirname(__file__))\n with open(os.path.join(__PATH__, 'gpustat.py')) as f:\n version_match = re.search(r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\",\n f.read(), re.M)\n if version_match:\n return version_match.group(1)\n raise RuntimeError(\"Unable to find __version__ string\")\n\n\ninstall_requires = [\n 'six',\n 'nvidia-ml-py>=7.352.0' if IS_PY_2 else \\\n 'nvidia-ml-py3>=7.352.0',\n 'psutil',\n 'blessings>=1.6',\n]\n\ntests_requires = [\n 'mock>=2.0.0',\n 'nose',\n 'nose-cover3'\n]\n\nsetup(\n name='gpustat',\n version=read_version(),\n license='MIT',\n description='An utility to monitor NVIDIA GPU status and usage',\n long_description=read_readme(),\n url='https://github.com/wookayin/gpustat',\n author='<NAME>',\n author_email='<EMAIL>',\n keywords='nvidia-smi gpu cuda monitoring gpustat',\n classifiers=[\n # https://pypi.python.org/pypi?%3Aaction=list_classifiers\n 'Development Status :: 3 - Alpha',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Topic :: System :: Monitoring',\n ],\n #packages=['gpustat'],\n py_modules=['gpustat'],\n install_requires=install_requires,\n extras_require={'test': tests_requires},\n tests_require=tests_requires,\n test_suite='nose.collector',\n entry_points={\n 'console_scripts': ['gpustat=gpustat:main'],\n },\n include_package_data=True,\n zip_safe=False,\n)\n",
"id": "193331",
"language": "Python",
"matching_score": 1.4678518772125244,
"max_stars_count": 2,
"path": "setup.py"
},
{
"content": "import unittest\nimport os\nimport shutil\nimport tempfile\nimport requests_mock\nimport glob\nfrom instagram_scraper import InstagramScraper\nfrom instagram_scraper.constants import *\n\nclass InstagramTests(unittest.TestCase):\n\n def setUp(self):\n fixtures_path = os.path.join(os.path.dirname(__file__), 'fixtures')\n\n fixture_files = glob.glob(os.path.join(fixtures_path, '*'))\n\n for file_path in fixture_files:\n basename = os.path.splitext(os.path.basename(file_path))[0]\n self.__dict__[basename] = open(file_path).read()\n\n # This is a max id of the last item in response_first_page.json.\n self.max_id = \"1369793132326237681_50955533\"\n\n self.test_dir = tempfile.mkdtemp()\n\n args = {\n 'usernames': ['test'],\n 'destination': self.test_dir,\n 'login_user': None,\n 'login_pass': None,\n 'quiet': True,\n 'maximum': 0,\n 'retain_username': False,\n 'media_metadata': False,\n 'media_types': ['image', 'video', 'story'],\n 'latest': False\n }\n\n self.scraper = InstagramScraper(**args)\n\n def tearDown(self):\n shutil.rmtree(self.test_dir)\n\n def test_scrape(self):\n with requests_mock.Mocker() as m:\n m.get(BASE_URL + self.scraper.usernames[0], text=self.response_user_metadata)\n m.get(MEDIA_URL.format(self.scraper.usernames[0]), text=self.response_first_page)\n m.get(MEDIA_URL.format(self.scraper.usernames[0]) + '?max_id=' + self.max_id,\n text=self.response_second_page)\n m.get('https://fake-url.com/photo1.jpg', text=\"image1\")\n m.get('https://fake-url.com/photo2.jpg', text=\"image2\")\n m.get('https://fake-url.com/photo3.jpg', text=\"image3\")\n\n self.scraper.scrape()\n\n # First page has photo1 and photo2, while second page has photo3. If photo3\n # is opened, generator successfully traversed both pages.\n self.assertEqual(open(os.path.join(self.test_dir, 'photo3.jpg')).read(),\n \"image3\")\n\n def test_scrape_hashtag(self):\n with requests_mock.Mocker() as m:\n m.get(QUERY_HASHTAG.format(self.scraper.usernames[0], ''), text=self.response_query_hashtag_first_page, status_code=200)\n m.get(QUERY_HASHTAG.format(self.scraper.usernames[0], 'J0'), text=self.response_query_hashtag_second_page, status_code=200)\n\n m.get('https://fake-url.com/photo4.jpg', text=\"image4\")\n\n self.scraper.scrape_hashtag()\n\n self.assertEqual(open(os.path.join(self.test_dir, 'photo4.jpg')).read(), \"image4\")",
"id": "4321871",
"language": "Python",
"matching_score": 2.426628351211548,
"max_stars_count": 6,
"path": "instagram_scraper/tests/test_instagram.py"
},
{
"content": "BASE_URL = 'https://www.instagram.com/'\nLOGIN_URL = BASE_URL + 'accounts/login/ajax/'\nLOGOUT_URL = BASE_URL + 'accounts/logout/'\nMEDIA_URL = BASE_URL + '{0}/media'\nCHROME_WIN_UA = 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36'\n\nSTORIES_URL = 'https://i.instagram.com/api/v1/feed/user/{0}/reel_media/'\nSTORIES_UA = 'Instagram 9.5.2 (iPhone7,2; iPhone OS 9_3_3; en_US; en-US; scale=2.00; 750x1334) AppleWebKit/420+'\nSTORIES_COOKIE = 'ds_user_id={0}; sessionid={1};'\n\nTAGS_URL = BASE_URL + 'explore/tags/{0}/?__a=1'\nLOCATIONS_URL = BASE_URL + 'explore/locations/{0}/?__a=1'\nVIEW_MEDIA_URL = BASE_URL + 'p/{0}/?__a=1'\nSEARCH_URL = BASE_URL + 'web/search/topsearch/?context=blended&query={0}'\n\nQUERY_COMMENTS = BASE_URL + 'graphql/query/?query_id=17852405266163336&shortcode={0}&first=100&after={1}'\nQUERY_HASHTAG = BASE_URL + 'graphql/query/?query_id=17882293912014529&tag_name={0}&first=100&after={1}'\nQUERY_LOCATION = BASE_URL + 'graphql/query/?query_id=17881432870018455&id={0}&first=100&after={1}'\n",
"id": "268041",
"language": "Python",
"matching_score": 1.471980094909668,
"max_stars_count": 6,
"path": "instagram_scraper/constants.py"
}
] | 1.47198 |
torsina | [
{
"content": "import subprocess\nimport os\nimport platform\nimport sys\n\n_dir = \"\"\n_make = \"\"\nif platform.system() == \"Linux\":\n _dir = \"../build\"\n _make = \"make\"\nelse:\n _dir = \"./cmake-build-debug\"\n _make = \"mingw32-make\"\n\npipes = subprocess.Popen([_make, \"docs\"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=_dir)\nstd_out, std_err = pipes.communicate()\n\nprint(str(std_out, 'utf-8'))\nprint(\"Current directory :\", os.getcwd())\nif len(std_err) != 0 and \" WARNING: \" in str(std_err):\n print(str(std_err, 'utf-8'))\n sys.exit(-1)\nsys.exit(0)",
"id": "3595982",
"language": "Python",
"matching_score": 0,
"max_stars_count": 2,
"path": "test_docs.py"
}
] | 0 |
mjmilazzo | [
{
"content": "import os\nimport markdown\n\n\"\"\"\nAll text files in working directory are\nmarkdown files. This script converts them\nto html files for the GitHub personal page.\n\nUse this template to generate markdown text:\nhttps://dillinger.io/\n\"\"\"\n\nwd = os.getcwd()\n\nmdfiles = [f for f in os.listdir(wd) if os.path.isfile(os.path.join(wd, f))]\nmdfiles = [f for f in mdfiles if f.endswith('.txt')]\n\n\"\"\"\nAll text files within working directory are markdown files,\nwith the naming convention \"....._md.txt\", where prefix\nis the name of the desired html file output.\n\"\"\"\n\nfor f in mdfiles:\n fname = f.split(\"_md.txt\")[0]\n \n with open(f, \"r\", encoding=\"utf-8\") as input_file:\n text = input_file.read()\n\n html = markdown.markdown(text)\n\n with open(\"{}.html\".format(fname), \"w\", encoding=\"utf-8\", errors=\"xmlcharrefreplace\") as output_file:\n output_file.write(html)",
"id": "5264929",
"language": "Python",
"matching_score": 0,
"max_stars_count": 0,
"path": "html generation.py"
}
] | 0 |
houzaj | [
{
"content": "# coding=utf-8\nimport logging\nimport re\nimport json\nfrom pathlib import Path\n\nimport settings\n\n\nclass Generator(object):\n\n def __init__(self):\n self.content = []\n\n def populate(self):\n global data\n with open('./config.json', 'r') as f:\n data = json.load(f)\n \n for ele in data:\n name, depth, src = ele['section'], ele['depth'], ele['src']\n self.populate_section(name, depth)\n \n if ele['src'] is not None: # is File\n lang = ele['src'].split('.')\n lang = lang[len(lang) - 1]\n self.populate_file(name, lang, src, depth)\n\n return '\\n'.join(self.content)\n\n def populate_section(self, caption, depth):\n \"\"\"\n 输出段落\n :param caption: 标题\n :param depth: 遍历深度\n :return: str\n \"\"\"\n line = ''.join([' ' * depth, '\\\\', 'sub' * depth, 'section{%s}' % caption])\n self.content.append(line)\n\n def populate_file(self, caption, suffix, path, depth):\n \"\"\"\n 输出文件\n :param caption: 标题\n :param suffix: 后缀名\n :param path: 相对路径\n :param depth: 遍历深度\n :return: list\n \"\"\"\n if suffix.lower() in settings.INPUT_TYPE:\n line = ''.join([' ' * depth, '\\\\input{%s}' % path])\n self.content.append(line)\n if suffix.lower() in settings.CODE_TYPE:\n line = ''.join([' ' * depth, '\\\\inputminted{%s}{%s}' % (suffix, path)])\n self.content.append(line)\n",
"id": "10335877",
"language": "Python",
"matching_score": 0,
"max_stars_count": 1,
"path": "core.py"
},
{
"content": "# coding=utf-8\n\n# 使用`\\input`的后缀名\nINPUT_TYPE = [\n 'tex',\n]\n\n# 使用`\\inputminted`进行高亮的后缀名\nCODE_TYPE = [\n 'c',\n 'cpp',\n 'c++',\n 'java',\n 'txt',\n]\n",
"id": "6728369",
"language": "Python",
"matching_score": 0,
"max_stars_count": 1,
"path": "settings.py"
}
] | 0 |
fredimaihub | [
{
"content": "from flask import Flask, Response, jsonify, render_template\nfrom base64 import b64encode\n\nfrom dotenv import load_dotenv, find_dotenv\nload_dotenv(find_dotenv())\n\nimport requests\nimport json\nimport os\nimport random\n\nSPOTIFY_CLIENT_ID = os.getenv(\"SPOTIFY_CLIENT_ID\")\nSPOTIFY_SECRET_ID = os.getenv(\"SPOTIFY_SECRET_ID\")\nSPOTIFY_REFRESH_TOKEN = os.getenv(\"SPOTIFY_REFRESH_TOKEN\")\n\n# scope user-read-currently-playing/user-read-recently-played\nSPOTIFY_URL_REFRESH_TOKEN = \"https://accounts.spotify.com/api/token\"\nSPOTIFY_URL_NOW_PLAYING = \"https://api.spotify.com/v1/me/player/currently-playing\"\nSPOTIFY_URL_RECENTLY_PLAY = \"https://api.spotify.com/v1/me/player/recently-played?limit=10\"\n\n\napp = Flask(__name__)\n\n\ndef getAuth():\n return b64encode(f\"{SPOTIFY_CLIENT_ID}:{SPOTIFY_SECRET_ID}\".encode()).decode(\"ascii\")\n\n\ndef refreshToken():\n data = {\n \"grant_type\": \"refresh_token\",\n \"refresh_token\": SPOTIFY_REFRESH_TOKEN,\n }\n\n headers = {\"Authorization\": \"Basic {}\".format(getAuth())}\n\n response = requests.post(SPOTIFY_URL_REFRESH_TOKEN, data=data, headers=headers)\n return response.json()[\"access_token\"]\n\ndef recentlyPlayed():\n token = refreshToken()\n headers = {\"Authorization\": f\"Bearer {token}\"}\n response = requests.get(SPOTIFY_URL_RECENTLY_PLAY, headers=headers)\n\n if response.status_code == 204:\n return {}\n\n return response.json()\n\ndef nowPlaying():\n\n token = refreshToken()\n\n headers = {\"Authorization\": f\"Bearer {token}\"}\n\n response = requests.get(SPOTIFY_URL_NOW_PLAYING, headers=headers)\n\n if response.status_code == 204:\n return {}\n\n return response.json()\n\ndef barGen(barCount):\n barCSS = \"\"\n left = 1\n for i in range(1, barCount + 1):\n anim = random.randint(1000, 1350)\n barCSS += \".bar:nth-child({}) {{ left: {}px; animation-duration: {}ms; }}\".format(\n i, left, anim\n )\n left += 4\n\n return barCSS\n\ndef loadImageB64(url):\n resposne = requests.get(url)\n return b64encode(resposne.content).decode(\"ascii\")\n\ndef makeSVG(data):\n barCount = 85\n contentBar = \"\".join([\"<div class='bar'></div>\" for i in range(barCount)])\n barCSS = barGen(barCount)\n\n if data == {}:\n content_bar = \"\"\n recent_plays = recentlyPlayed()\n size_recent_play = len(recent_plays[\"items\"])\n idx = random.randint(0, size_recent_play - 1)\n item = recent_plays[\"items\"][idx][\"track\"]\n else:\n item = data[\"item\"]\n\n img = loadImageB64(item[\"album\"][\"images\"][1][\"url\"])\n artistName = item[\"artists\"][0][\"name\"].replace(\"&\", \"&\")\n songName = item[\"name\"].replace(\"&\", \"&\")\n\n dataDict = {\n \"content_bar\": contentBar,\n \"css_bar\": barCSS,\n \"artist_name\": artistName,\n \"song_name\": songName,\n \"img\": img,\n }\n\n return render_template(\"spotify.html.j2\", **dataDict)\n\n@app.route(\"/\", defaults={\"path\": \"\"})\n@app.route(\"/<path:path>\")\ndef catch_all(path):\n\n data = nowPlaying()\n svg = makeSVG(data)\n\n resp = Response(svg, mimetype=\"image/svg+xml\")\n resp.headers[\"Cache-Control\"] = \"s-maxage=1\"\n\n return resp\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n",
"id": "12219437",
"language": "Python",
"matching_score": 0,
"max_stars_count": 40,
"path": "api/spotify-playing.py"
}
] | 0 |
Square-face | [
{
"content": "import discord, asyncio\nfrom discord.ext import commands, menus\nfrom typing import Union, List\n\n\nclass Paginator(menus.Menu):\n \"\"\"Navigation menue with reactions\n\n Navigate a menu by reaction to reactions. A list of pages is passed and a\n start page. The start page is 0 by default. When the start function is ran\n and there is no specified message a new one will be sent. If there is one\n the reactions will be added to that message.\n There exists 5 buttons and 2 more planed.\n\n The 5 existing ones are:\n FIRST: Witch navigates to the first page in the list of pages\n BACK: Witch navigates to the previous page in the list of pages\n STOP: Witch stops the paginator from listening to reactions\n NEXT: Witch moves to the next page in the list of pages\n LAST: Witch moves to the last page in the list of pages.\n\n planed buttons:\n NUMBER: Choice a page to go to by sending a number in chat.\n INFO: Show info on what all the buttons do.\n\n Args:\n ----\n page: Optional[:class:`int`]\n The currently selected page. First page is 1. Defaults to 1\n\n\n Kwargs:\n -------\n user: Optional[:class:`discord.User`]\n The user who can edit this paginator.\n Can be None if anyone should be able to edit the paginator. Defaults\n to None.\n users: Optional[List[:class:`discord.User`]]\n A list of user who can edit this paginator. If user arg was\n specified this arg will be ignored. Can be None if anyone should be\n able to edit the paginator. Defaults to None.\n timeout: Optional[:class:`float`]\n The amount of time in seconds the user has between each reaction.\n If more time has passed the paginator will stop listen for reactions.\n Defaults to 120.0.\n delete_message_after: Optional[:class:`bool`]\n If the paginator should be deleted when the exit/stop button is\n pressed or the timeout is reached. Defaults to False.\n clear_reactions_after: Optional[:class:`bool`]\n If the paginator should clear all reactions when the exit/stop\n button is pressed or the timeout is reached. Defaults to True.\n check_embeds: Optional[:class:`bool`]\n If wther to verify embed permissions. Should not be active if the\n paginator doesn't contain any embeds. Defaults to True.\n message: Optional[:class:`discord.Message`]\n The message this paginator is active on. Set to None if a new\n message should be created. Defaults to None\n replace_footer: Optional[:class:`bool`]\n If the paginator should try to replace embed footers to\n \"Page: {current_page}/{total_pages}\" when using them. Note that no\n embeds with a aledy set footer will have the replaced. This only\n applyes to embeds without footers. Defaults to True.\n \"\"\"\n\n def __init__(self, *, user:discord.User=None, users:List[discord.User]=None, page:int=0, pages:List[Union[discord.Embed, str]]=None, timeout:float=180.0, delete_message_after:bool=False, clear_reactions_after:bool=True, check_embeds:bool=True, message:discord.Message=None, replace_footer:bool=True):\n \"\"\"Init\n\n Defining all variables for paginator.\n \"\"\"\n\n self.pages = pages or []\n self.page = page\n self.timeout = timeout\n self.delete_message_after = delete_message_after\n self.clear_reactions_after = clear_reactions_after\n self.replace_footer = replace_footer\n self.check_embeds = check_embeds\n self._can_remove_reactions = False\n self._Menu__tasks = []\n self._running = True\n self.message = message\n self.ctx = None\n self.bot = None\n self._author_id = None\n self._buttons = self.__class__.get_buttons()\n self._lock = asyncio.Lock()\n self._event = asyncio.Event()\n\n\n\n def add_page(self, page, index:int=None):\n \"\"\"Add a new page\n\n Add a new page to this paginator.\n\n args\n ----\n page: List[:class:`discord.Embed`, :class:`str`]\n The page that will be added to the paginator.\n index: Optional[:class:`int`]\n The position for this page, if None the page will be added to the end.\n If the index is to large it will be treted as if it was None.\n Defaults to None.\n \"\"\"\n\n if index is None:\n self.pages.append(page)\n return\n\n if index > len(self.pages)-1:\n self.pages.append(page)\n return\n\n if index < 0:\n self.pages.insert(0, page)\n return\n\n self.pages.insert(index, page)\n print(self.pages)\n\n\n def remove_page(self, index:int):\n \"\"\" Remove a page.\n\n Remove a page from the page list. If the index is under 0, the first\n page will be removed. If it is over the number of pages, the last page\n will be removed. If there are no pages nothing will be deleted.\n\n args\n ----\n index: :class:`int`\n the index of the page to be removed.\n \"\"\"\n\n if len(self.pages) == 0:\n # if there are no pages, don't remove anything.\n return\n\n if index < 0:\n # if the index is below 0, remove first page\n self.pages.pop(0)\n return\n\n if index > len(self.pages)-1:\n # if index is over the ucrrent amount of pages, remove last page\n self.pages.pop(len(self.pages)-1)\n return\n\n # remove page with index specified\n self.pages.pop(index)\n\n\n\n\n def __fix_embed(self, embed, page):\n \"\"\"Replace footer if posible\n\n Check if the embed has a footer, if it doesn't have,\n add one that says \"Page: {current_page+1}/{total_pages}\"\n\n args\n ----\n embed: :class:`discord.Embed`\n the embed object that will have its footer replaced if posible.\n page: :class:`int`\n the page for this embed.\n \"\"\"\n if embed.footer:\n return embed\n\n current_page = page\n total_pages = len(self.pages)\n\n embed.set_footer(text=f\"Page: {current_page+1}/{total_pages}\")\n return embed\n\n\n def _get_message(self):\n \"\"\"generate message edit coroutine\n\n Generates a coroutine to edit the paginator message. If replace_footer\n is active and the current page is a embed a attempt at setting a new one\n will be made.\n \"\"\"\n\n page = self.pages[self.page]\n # get current page\n\n if isinstance(page, discord.Embed):\n # the page is a embed\n\n if self.replace_footer:\n # replace footer is active, try to replace the embed footer\n page = self.__fix_embed(page, self.page)\n\n # return message edit coroutine for embed page\n return self.message.edit(content=None, embed=page)\n\n # return message edit coroutine for text only page\n return self.message.edit(content=page, embed=None)\n\n\n\n\n async def send_initial_message(self, ctx, channel):\n \"\"\"Start the paginator.\n\n Initiate the paginator if a message variable isn't alredy set.\n\n args\n ----\n ctx: :class:`discord.Context`\n the context for this paginator.\n channel: :class:`discord.TextChannel`\n the channel to send the initial message in.\n \"\"\"\n\n page = self.pages[self.page]\n # get current page\n\n if isinstance(page, discord.Embed):\n # the page is a embed\n if self.replace_footer:\n # the footer should be replaced if posible\n page = self.__fix_embed(page, self.page)\n\n # send and return message object for embed page\n return await channel.send(embed=page)\n\n # send and return message object for text only page\n return await channel.send(page)\n\n\n\n\n @menus.button('\\U000023ea')\n async def first(self, payload):\n \"\"\"Go to first page in paginator.\n\n Set current page to 0 and update message.\n \"\"\"\n\n # reset page\n self.page = 0\n\n return await self._get_message()\n\n\n @menus.button('\\U000025c0')\n async def back(self, payload):\n \"\"\"Go back one page in paginator.\n\n Set current page to one less than what it currently is and update message.\n If current page is 0 or lower, don't do anything.\n \"\"\"\n if self.page <= 0:\n # if page is 0 or below, don't do anything\n return\n\n # lower current page by 1\n self.page -= 1\n\n return await self._get_message()\n\n\n @menus.button('\\U000023f9')\n async def kill(self, payload):\n \"\"\"Stop paginator.\n\n Stop listening for reactions and if clear_reactions_after is active all\n reactions will be removed. If delete_message_after is active the message\n will be removed.\n \"\"\"\n\n # kill paginator\n self.stop()\n\n\n @menus.button('\\U000025b6')\n async def forward(self, payload):\n \"\"\"Go forward one page in paginator.\n\n Increase the current page by one and update message. If the current page is the total number of pages or above don't do anything.\n \"\"\"\n if self.page >= len(self.pages)-1:\n # if current page is above or equal to total number of pages,\n # don't do anything.\n return\n\n # increase current page by 1\n self.page += 1\n\n return await self._get_message()\n\n\n @menus.button('\\U000023e9')\n async def last(self, payload):\n \"\"\"Go to last page in paginator.\n\n Set current page to last page and update message.\n \"\"\"\n # set current page to total number of pages.\n self.page = len(self.pages)-1\n\n return await self._get_message()\n",
"id": "5879497",
"language": "Python",
"matching_score": 2.19801926612854,
"max_stars_count": 0,
"path": "utils/paginator.py"
},
{
"content": "'''Owner commands\n\nCommands that can only be accessed by the bot owner(s).\n'''\n\n\nimport discord, datetime, asyncio\nfrom discord.ext import commands\nfrom typing import Optional\n\n\n# symbols\nSTOP = \"\\U0001f6d1\" # 🔁\nRELOAD = \"\\U0001f501\" # 🛑\n\n\nclass Owner(commands.Cog):\n \"\"\"Owner commands\n\n Commands that can only be invoked by bot owner(s).\n \"\"\"\n\n def __init__(self, bot):\n \"\"\"Init\n\n Initiate Cog variables\n\n Args:\n ----\n bot: :class:`commands.Bot`\n The bot object this Cog is part of.\n \"\"\"\n self.bot = bot\n\n def reload_cogs(self, cogs, ctx):\n '''Reload a list of cogs/modules\n\n Go through a list of cogs and reload the.\n For the ones that fails, save the error message\n and set success variable to false.\n The success variable is set to false when a cog has failed to reload.\n\n There might be a error if the cog failed to load last time around or if the cog hasn't been loaded from before.\n The function will ignore all\n\n Args:\n ----\n cogs: List[:class:`str`]\n A list of all the cogs that shall be reloaded.\n '''\n success, responses = True, []\n\n for cog in cogs:\n # go through each cog in cog list\n try:\n\n # try to load and unload the cog\n try:\n self.bot.unload_extension(cog)\n\n except commands.errors.ExtensionNotLoaded:\n pass\n\n # load the cog\n self.bot.load_extension(cog)\n\n except Exception as e:\n # the cog errored when loading\n\n # set success variable to false and add error message in response\n success = False\n responses.append(f\"{self.bot.smart_emojis.get_emoji('no', ctx.channel)} {cog}:```cmd\\n{e}```\")\n\n else:\n responses.append(f\"{self.bot.smart_emojis.get_emoji('yes', ctx.channel)} {cog}\")\n\n\n # return response\n return responses, success\n\n async def retry_cogs(self, ctx:commands.Context, msg:discord.Message, cogs:list):\n '''Reload a list of cogs more than once\n\n This function tries to load a list of cogs, and if loading one fails the developer can react with the repeat reaction to try loading the cogs again.\n This can be done as many times as necessary to fix the issue\n\n Args:\n -----\n ctx: :class:`commands.Context`\n The current context\n msg: :class:`discord.Message`\n The active message\n cogs: List[:class:`str`]\n A list of the cogs to check for\n '''\n\n # refresh reactions\n await msg.clear_reactions()\n await msg.add_reaction(RELOAD)\n await msg.add_reaction(STOP)\n\n def validate_request(reaction, user) -> bool:\n '''Validating reaction\n\n Validate that the detected reaction was done by the author,\n was one of two options and on the right message.\n\n Args:\n -----\n reaction: :class:`discord.Reaction`\n The reaction object returned from reaction\n user: :class:`discord.User`\n The user who reacted\n '''\n valid_response = False\n \n if user == ctx.author:\n if reaction.emoji in [RELOAD, STOP]:\n if reaction.message == msg:\n valid_response = True\n \n return valid_response\n\n while True:\n try:\n # wait 2 minutes for a reaction on the message\n reaction, _ = await self.bot.wait_for(\"reaction_add\", timeout=120.0, check=validate_request)\n\n except asyncio.TimeoutError:\n # 2 minutes passed without a reaction\n return await msg.clear_reactions()\n\n else:\n # a reaction was detected\n\n if reaction.emoji == STOP:\n return await msg.clear_reactions()\n\n # reload the cogs and edit message\n await msg.edit(content=self.bot.smart_emojis.get_emoji(\"loading\", ctx.channel))\n response, success = self.reload_cogs(cogs)\n await msg.edit(content=\"\\n\".join(response))\n\n if success:\n return await msg.clear_reactions()\n\n @commands.command(hidden=True, brief=\"Some owner(s) only information about the bot.\", aliases=[\"owner\"])\n @commands.is_owner()\n async def dev(self, ctx:commands.Context):\n '''Developer bot info\n\n Shows information about the bot that only the developer has to know about.\n '''\n\n # create embed\n embed = discord.Embed(\n title=f\"{self.bot.user.name} statistics\",\n description=f\"{self.bot.user.name} is currently in `{len(self.bot.guilds)} guilds(s)` and can see `{len(self.bot.users)} user(s)`.\\nThe bot has a total of `{len(self.bot.commands)} command(s)` in `{len(self.bot.extensions)} module(s)`\",\n color=0xFF0000,\n timestamp=datetime.datetime.utcnow()\n ).set_author(\n name=self.bot.user.name,\n icon_url=self.bot.user.avatar_url\n )\n\n # send embed\n return await ctx.send(embed=embed)\n\n\n @commands.command(hidden=True, brief=\"Reload/load one or more modules.\")\n @commands.is_owner()\n async def reload(self, ctx:commands.Context, *, cogs: Optional[str]):\n '''Use this command to reload one or more bot cogs/modules.\n\n Leave the `cogs` argument empty to reload all currently loaded cogs.\n Use the cog name and cog path (for example \"bot.cogs.owner\") to reload a\n specific cog. Use multiple cog paths to reload a specified list of cogs\n (for example \"bot.cogs.owner bot.cogs.info\") and remember to separate\n the cog names with spaces.\n '''\n\n\n if cogs:\n # if one or more cogs are specified\n # split them where there is spaces\n cogs = cogs.split()\n\n else:\n # if no cogs where specified reload the\n cogs = list(self.bot.extensions)\n\n # reload the cogs\n response, success = self.reload_cogs(cogs, ctx)\n\n if success:\n # all the cogs was successfully reloaded\n return await ctx.send(\"\\n\".join(response))\n\n # one or more cogs failed to reload correctly\n msg = await ctx.send(\"\\n\".join(response))\n\n # ask retry question\n await self.retry_cogs(ctx, msg, cogs)\n\n @commands.group(hidden=True, aliases=[\"bl\"], brief=\"View and edit the blacklists for this bot\")\n @commands.is_owner()\n async def blacklist(self, ctx):\n if ctx.invoked_subcommand:\n return\n\n await ctx.send(\"This is a decoy command\")\n\n @blacklist.group(hidden=True, aliases=[\"u\"], brief=\"Show blacklists for users\")\n @commands.is_owner()\n async def user(self, ctx):\n \"\"\"Show all blacklisted users\n\n Display every user who is currently blacklisted and their individual reasons.\n \"\"\"\n\n if ctx.invoked_subcommand:\n return\n\n blacklists = {}\n\n for entry in self.bot.cache.blacklist.data:\n # go trough blacklisted users\n\n if entry[2] == \"server\":\n continue\n\n try:\n user = await self.bot.fetch_user(entry[1])\n except discord.errors.NotFound:\n user = entry[1]\n blacklists[user]=entry[3]\n\n await ctx.send(blacklists)\n\n @user.command(hidden=True, name=\"blacklist\", aliases=[\"bl\", \"add\", \"a\"], brief=\"blacklist a user\")\n @commands.is_owner()\n async def add(self, ctx, subject_id:int, reason:str=\"No reason specified.\"):\n \"\"\"Blacklist a user\n\n Add a user or a server to the bot's blacklist.\n \"\"\"\n\n if subject_id in [d[1] for d in self.bot.cache.blacklist.data]:\n # fetch user from discord\n user = await self.bot.fetch_user(subject_id)\n\n # create response embed\n embed = discord.Embed(\n title=f\"{user.name} is already blacklisted\",\n description=\"This user is already blacklisted.\",\n color=0xFF0000\n ).set_author(\n name=user.__str__(),\n icon_url=user.avatar_url\n )\n\n # send response\n return await ctx.send(embed=embed)\n\n\n # check if subject is user or server or invalid\n try:\n # attempt to fetch user from discord\n user = await self.bot.fetch_user(subject_id)\n\n except discord.errors.NotFound:\n # failed to fetch user, not a userid\n # invalid user\n return await ctx.send(f\"Invalid user id.\")\n\n\n # add user to database\n await self.bot.cache.blacklist.add(user.id, \"user\", reason)\n\n # create response embed\n embed = discord.Embed(\n title=f'{user.name} is now blacklisted.',\n description=reason,\n color=0x00FF00\n ).set_author(\n name=user.__str__(),\n icon_url=user.avatar_url\n )\n\n # send response\n return await ctx.send(embed=embed)\n\n\n\n @blacklist.command(hidden=True, aliases=[\"ubl\", \"remove\", \"r\"], brief=\"UnBlacklist a user or server\")\n @commands.is_owner()\n async def unblacklist(self, ctx, subject):\n await ctx.send(f\"Unblacklisted \\\"{subject}\\\"\")\n\ndef setup(bot):\n bot.add_cog(Owner(bot))\n",
"id": "995924",
"language": "Python",
"matching_score": 4.311875820159912,
"max_stars_count": 0,
"path": "bot/cogs/owner.py"
},
{
"content": "import discord, difflib, datetime\nfrom discord.ext import commands\n\nclass System(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n @commands.Cog.listener(\"on_message_edit\")\n async def edit_command(self, before, after):\n del before\n\n await self.bot.process_commands(after)\n\n\n\n\n @commands.Cog.listener(\"on_command_error\")\n async def command_not_found(self, ctx, error):\n \"\"\"Atempt at invoking a command that doesn't exist\n\n A user tried to invoke a command that doesn't exist.\n Try to find commands with matching names to the atempted commands.\n For example if the user misspelled a command name.\n \"\"\"\n if not isinstance(error, commands.CommandNotFound):\n # if the error isn't a CommandNotFound error\n return\n\n # get the atempted command name\n command = ctx.message.content.split()[0][len(ctx.prefix):]\n\n # get list of all commands\n command_list = self.bot.get_normal_commands(await self.bot.is_owner(ctx.author))\n\n # the matches from command list to atempted command\n matches = difflib.get_close_matches(command, command_list, 5, 0.6)\n guessed_commands=[]\n\n for match in matches:\n # get actual command name for match\n guessed_commands.append(self.bot.get_command(match).qualified_name)\n\n # remove any dupes\n guessed_commands=list(set(guessed_commands))\n\n\n if len(matches) > 0:\n # if one or more matches was found, return them\n await ctx.send(f\":warning: **Command Not Found!**\\nDid you mean:\\n- `\"+'`\\n- `'.join(guessed_commands)+\"`\")\n\n\n @commands.Cog.listener(\"on_command_error\")\n async def error_log(self, ctx, error):\n \"\"\"A command has failed because of a error\n\n A command has been used and for some reason didn't finish properly.\n The user will be notified and the error message will be loged.\n \"\"\"\n\n # get channel\n channel = self.get_config_channel(self.bot.config.logging.Errors)\n\n if not channel:\n # invalid channel\n return\n \n\n if isinstance(error, commands.CommandNotFound):\n # error is alredy checked for, ignore\n return\n\n\n # create log embed\n embed = discord.Embed(\n title=\"Command Error\",\n description=ctx.message.content,\n color=0xFF0000,\n timestamp=ctx.message.created_at\n ).add_field(\n name=\"Message\",\n value=f\"[Jump to message]({ctx.message.jump_url})\"\n )\n\n await channel.send(self.bot.config.channel.error).send(embed=embed)\n return await ctx.send(\"Uh oh, I don't feel so good.\")\n\n\n def get_config_channel(self, id: int=None):\n \"\"\"Get a channel from id\n\n Take a channel id and make sure it is a valid id, a channel the bot has\n access to and that the bot has necessary permissions to send embeds in\n the channel.\n\n args\n ----\n id: :class:`int`\n The id for the channel. Can be None but that will automatically\n return None. Defaults to None.\n\n returns\n -------\n :class:`discord.TextChannel`\n The channel object retreaved from the id.\n :class:`NoneType`\n If the channel was invalid or didn't exist.\n \"\"\"\n\n\n if id is None:\n # if the id is None\n return None\n\n # get channel object from id\n channel = self.bot.get_channel(id)\n\n if not channel:\n # the channe was None\n # the channel is invalid\n return None\n\n if channel.guild.me.permissions_in(channel).embed_links and channel.guild.me.permissions_in(channel).send_messages:\n # the bot has send messages permission as well as embed links permission\n return channel\n\n print(f\"Missing permissions in {channel.name} for logging\")\n return None\n\n\n\n\n @commands.Cog.listener(\"on_command\")\n async def command_log(self, ctx):\n \"\"\"A command was invoked\n\n This function gets called each time a user invokes a command on the bot.\n The function generates a log embed to send to a bot owner only channel\n for logging\n \"\"\"\n\n # get channel\n channel = self.get_config_channel(self.bot.config.logging.Commands)\n\n if not channel:\n # invalid channel\n return\n\n\n # generate embed\n embed = discord.Embed(\n title=f'A command was used!',\n description = ctx.message.content,\n color=0x00FF00,\n timestamp=datetime.datetime.utcnow()\n ).set_author(\n name=ctx.author,\n icon_url=ctx.author.avatar_url\n ).add_field(\n name=\"User\",\n value=f'Name: `{ctx.author.name}`\\nDiscriminator: `{ctx.author.discriminator}`\\nID: `{ctx.author.id}`',\n inline=False\n ).add_field(\n name=\"Server\",\n value=f'Name: `{ctx.guild.name}`\\nID: `{ctx.guild.id}`\\nOwner: `{ctx.guild.owner}`',\n inline=True\n ).add_field(\n name=\"Channel\",\n value=f'Name: `{ctx.channel.name}`\\nMention: {ctx.channel.mention}\\nID: `{ctx.channel.id}`',\n inline=True,\n )\n\n # send logging embed\n await channel.send(embed=embed)\n\n\n\n\n\n @commands.Cog.listener(\"on_guild_join\")\n async def guild_join_log(self, guild):\n \"\"\"A guild was joined\n\n This function gets called each time the bot is added to a guild.\n The function generates a log embed to send to a bot owner only channel\n for logging\n \"\"\"\n\n # get channel object\n channel = self.get_config_channel(self.bot.config.logging.Servers)\n\n if not channel:\n # invalid channel\n return\n\n # generate embed\n embed = discord.Embed(\n title=f'I was added to a server!',\n description=f' Name: `{guild.name}`\\n \\\n ID: `{guild.id}`\\n \\\n Owner: `{guild.owner}`',\n color=0x00FF00,\n timestamp=datetime.datetime.utcnow()\n ).set_footer(\n text=guild.owner,\n icon_url=guild.owner.avatar_url\n ).set_thumbnail(\n url=guild.icon_url\n )\n\n # send embed\n await channel.send(embed=embed)\n\n\n\n\n @commands.Cog.listener(\"on_guild_remove\")\n async def guild_leave_log(self, guild):\n \"\"\"A guild was left\n\n This function gets called each time the bot gets removed from a guild.\n The function generates a log embed to send to a bot owner only channel\n for logging\n \"\"\"\n\n # get channel object\n channel = self.get_config_channel(self.bot.config.logging.Servers)\n\n if not channel:\n # invalid channel\n return\n\n # generate embed\n embed = discord.Embed(\n title=f'I was removed from a server',\n description=f' Name: `{guild.name}`\\n \\\n ID: `{guild.id}`\\n \\\n Owner: `{guild.owner}`',\n color=0xFF0000,\n timestamp=datetime.datetime.utcnow()\n ).set_footer(\n text=guild.owner,\n icon_url=guild.owner.avatar_url\n ).set_thumbnail(\n url=guild.icon_url\n )\n\n # send embed\n await channel.send(embed=embed)\n\n\n\n\ndef setup(bot):\n bot.add_cog(System(bot))\n",
"id": "6412374",
"language": "Python",
"matching_score": 2.969534397125244,
"max_stars_count": 0,
"path": "bot/cogs/system.py"
},
{
"content": "import discord\nfrom discord.ext import commands\nfrom utils.paginator import Paginator\nfrom typing import Union\n\n\nclass MyHelp(commands.HelpCommand):\n '''Show information about all commands, the commands in a module or a specific command.\n Uses a paginator so you can navigate without reinvoking the help command.'''\n\n async def send_bot_help(self, mapping):\n '''Show all commands and what modules they are in.\n \n Go trough each cog/module and its corresponding commands.\n Make a embed showing all the cogs and their commands.\n '''\n\n # defining variables\n ctx = self.context\n bot = self.context.bot\n prefix = self.clean_prefix\n\n # create embed\n embed = discord.Embed(\n color=bot.config.Color,\n title=f\"{bot.user.name} help\",\n description=bot.description\n )\n\n for extension in bot.cogs.values():\n # go through all extensions\n \n if extension.qualified_name == \"Jishaku\":\n # ignore the extension if it was Jishaku\n continue\n\n # a list of all the available commands in this extensions\n commands = []\n\n for cmd in extension.walk_commands():\n # go through all commands\n\n if cmd.hidden and not await bot.is_owner(ctx.author):\n # if the command is hidden and the author is not a bot owner\n continue\n \n if cmd.parent:\n # if the command is a subcommand, ignore it\n continue\n\n # add the command to command list\n commands.append(f\"`{cmd.name:12}` - {cmd.brief}\")\n\n if len(commands) == 0:\n # ignore this extension if it didn't have any commands\n continue\n\n # add the extension to embed with all of its commands\n embed.add_field(\n name=extension.qualified_name,\n value='\\n'.join(commands),\n inline=False\n )\n\n # send help embed\n return await ctx.send(embed=embed)\n\n async def send_cog_help(self, cog):\n '''Send information about a module\n \n Show a cogs description and its command.\n The commands should also have their brief description as well\n as their signatures\n '''\n \n # defining variables\n ctx = self.context\n bot = self.context.bot\n prefix = self.clean_prefix\n is_owner = await bot.is_owner(ctx.author)\n \n if cog.qualified_name == \"Jishaku\" and not is_owner:\n # ignore all commands from Jishaku cog if the author is\n # not a bot owner\n await ctx.send(f'No command called \"{cog.qualified_name}\" found.')\n return\n \n # creating help embed\n embed = discord.Embed(\n color=bot.config.Color,\n title=f\"{cog.qualified_name} help\",\n description=cog.description\n )\n \n # lift of available commands \n command_list = []\n \n for cmd in cog.get_commands():\n # go through all the commands in this cog\n if cmd.hidden and not await bot.is_owner(ctx.author):\n # ignore the command if it is hidden\n continue\n \n \n aliases = [cmd.qualified_name]\n # list of all the names this command has\n # the original name will always be added first\n \n for alias in cmd.aliases:\n # go trough its aliases and add them\n aliases.append(alias)\n \n # add the command to command list\n command_list.append(f\"`{prefix}[{'|'.join(aliases)}]`\\n - {cmd.brief}\\n\")\n \n \n if len(command_list) == 0:\n # if no commands existed for this cog,\n # ignore it and send error message\n await ctx.send(f'No command called \"{cog.qualified_name}\" found.')\n return\n \n # add the commands to help embed\n embed.add_field(\n name=\"Commands\",\n value=\"\\n\".join(command_list)\n )\n \n # send embed\n await ctx.send(embed=embed)\n return\n \n \n \n \n def generate_command_embed(self, command) -> discord.Embed:\n \"\"\"Generate embed object for command\n \n Generate a help embed for a command. Works with both commands and\n groups. Info that will be shown includes, command name, command\n description, syntax, command aliases, its module and if possible,\n its subcommands\n \n args\n ----\n command: Union[:class:`commands.Command`, :class:`commands.Group`]\n the command to generate embed for\n \n returns\n -------\n :class:`discord.Embed`\n the embed object for this command\n \"\"\"\n \n # defining variables\n ctx = self.context\n bot = ctx.bot\n prefix = self.clean_prefix\n \n # generate syntax\n base_command = f\"{prefix}{command.qualified_name}\"\n \n if not command.signature:\n # if it has no variables that has to be passed\n # just use the prefix + command WITHOUT a space after.\n syntax = f\"`{base_command}`\"\n else:\n # if the command has a signature,\n # use prefix + command + \" \" + signature\n # the space is important for the formating\n syntax = f\"`{base_command} {command.signature}`\"\n \n # create base embed\n embed=discord.Embed(\n title=f\"{command.name} help\",\n description=f\"{command.help or 'This command has no description!'}\",\n color=bot.config.Color\n ).add_field(\n name=\"Syntax\",\n value=syntax\n )\n \n # what module this command is from\n embed.add_field(\n name = \"Module\",\n value = command.cog.qualified_name.capitalize(),\n inline=False\n )\n \n # if the command is a subcommand, show its parent(s)\n if command.parent:\n embed.add_field(\n name=\"Parent(s)\",\n value=f'`{command.full_parent_name.replace(\" \", \"` > `\")}`'\n )\n\n # --adding the aliases--\n \n # default text\n aliases=\"No aliases.\"\n\n # if the command has no aliases, set the value to \"No aliases.\"\n if len(command.aliases) > 0:\n \n # the command has aliases\n aliases=f\"`{'` | `'.join(list(command.aliases))}`\"\n \n # add field\n embed.add_field(\n name = \"Aliase(s)\", \n value = aliases\n )\n \n \n # --adding the subcommands\n if isinstance(command, commands.Group):\n # this is only done if the command is a group\n # normal commands can't have subcommands\n \n # default text\n sub = \"No subcommands\"\n \n # if the command has no subcommands,\n # set the value to \"No subcommands.\"\n if len(command.commands) > 0:\n \n # the command has subcommands\n \n # generate list with all subcommand names\n subcommands = [cmd.name for cmd in list(command.commands)]\n sub=f\"`{'` | `'.join(subcommands)}`\"\n \n # add the field\n embed.add_field(\n name = \"Subcommand(s)\",\n value = sub\n )\n\n # return compleated embed\n return embed\n\n\n\n\n def _is_valid(self, command: Union[commands.Command, commands.Group], is_owner:bool=False):\n \"\"\"Is a command good to show to anyone\n \n Checks if the supplied command is hidden\n \n args\n ----\n command: Union[:class:`discord.Command`, :class:`discord.Group`]\n the command to check\n is_owner: Optional[:class:`bool`]\n if the author is a bot owner. Defaults to False.\n \n returns\n -------\n :class:`bool`\n if the command is valid or not\n \"\"\"\n\n if is_owner:\n # if the user is a owner all other commands will be valid\n return True\n \n # if the command is not hidden, return True\n return not command.hidden\n\n\n\n\n async def generate_embeds_for_normal_command(self, command):\n \"\"\"Generate a list of embeds for all the commands for this bot.\n \n A list of embeds for each and every command in the bot. This includes all\n commands with subcommands or so called \"groups\", they will have a special\n area for listing their subcommands. The function will also return the index\n for the command requested.\n \n args\n ----\n commands: Union[:class:`discord.Command`, :class:`discord.Group`]\n \n returns\n -------\n List[:class:`discord.Embed`]:\n A list with a embed for each command this bot has.\n \n :class:`int`\n The index in the embed list for the requested command.\n \"\"\"\n \n # define variables\n embeds = []\n index = None\n ctx = self.context\n bot = ctx.bot\n attempted_command = ctx.message.content.split()[1]\n is_owner = await bot.is_owner(ctx.author)\n \n if command.cog.qualified_name == \"Jishaku\" and not is_owner:\n await ctx.send(f'No command called \"{attempted_command}\" found.')\n return\n \n for cog in bot.cogs.values():\n # go trough each cog this bot currently has active\n \n for cmd in cog.walk_commands():\n # go through each command in this cog\n \n if self._is_valid(cmd, is_owner) and not cmd.parent:\n # if command is valid, create embed\n embed=self.generate_command_embed(cmd)\n embeds.append(embed)\n \n if cmd == command:\n # if this command is the requested one, set the index\n index = embeds.index(embed)\n\n return embeds, index\n\n \n async def generate_embeds_for_subcommand(self, command):\n \"\"\"Generate a list of embeds for all the subcommands for this commands parent\n \n A list of embeds for each and every command in this commands parent. This includes all\n commands with subcommands or so called \"groups\", they will have a special\n area for listing their subcommands. The function will also return the index\n for the command requested.\n \n args\n ----\n commands: Union[:class:`discord.Command`, :class:`discord.Group`]\n \n returns\n -------\n List[:class:`discord.Embed`]:\n A list with a embed for each command this bot has.\n \n :class:`int`\n The index in the embed list for the requested command.\n \"\"\"\n \n # define variables\n embeds = []\n index = None\n ctx = self.context\n bot = ctx.bot\n attempted_command = \" \".join(ctx.message.content.split()[1])\n is_owner = await bot.is_owner(ctx.author)\n \n if command.cog.qualified_name == \"Jishaku\" and not is_owner:\n await ctx.send(f'No command called \"{attempted_command}\" found.')\n return\n \n for cmd in command.parent.commands:\n # go through each command in this cog\n \n if self._is_valid(cmd, is_owner):\n # if command is valid, create embed\n embed=self.generate_command_embed(cmd)\n embeds.append(embed)\n \n if cmd == command:\n # if this command is the requested one, set the index\n index = embeds.index(embed)\n\n return embeds, index\n \n \n \n \n async def send_command_help(self, command):\n \"\"\"Show info on a command.\n\n Make a paginator with all the commands and the first page is the requested command.\n \"\"\"\n # get variables\n ctx = self.context\n bot = ctx.bot\n attempted_command = \" \".join(ctx.message.content.split()[1:])\n is_owner = await bot.is_owner(ctx.author)\n \n \n # return error if the command is hidden\n if command.hidden and not is_owner:\n await ctx.send(f'No command called \"{attempted_command}\" found.')\n return\n\n if command.parent:\n embeds, index = await self.generate_embeds_for_subcommand(command)\n else:\n embeds, index = await self.generate_embeds_for_normal_command(command)\n \n \n paginator = Paginator(page=index, pages=embeds)\n await paginator.start(ctx)\n \n async def send_group_help(self, command):\n \"\"\"Show info on a command.\n\n Make a paginator with all the commands and the first page is the requested command.\n \"\"\"\n # get variables\n ctx = self.context\n bot = ctx.bot\n attempted_command = \" \".join(ctx.message.content.split()[1:])\n is_owner = await bot.is_owner(ctx.author)\n \n \n # return error if the command is hidden\n if command.hidden and not is_owner:\n await ctx.send(f'No command called \"{attempted_command}\" found.')\n return\n \n if command.parent:\n embeds, index = await self.generate_embeds_for_subcommand(command)\n else:\n embeds, index = await self.generate_embeds_for_normal_command(command)\n \n \n paginator = Paginator(page=index, pages=embeds)\n await paginator.start(ctx)\n\n\ndef setup(bot):\n bot._original_help_command = bot.help_command\n\n bot.help_command = MyHelp(command_attrs={\"brief\":\"Get information about my commands!\"})\n bot.help_command.add_check(commands.bot_has_permissions(embed_links=True, send_messages=True))\n bot.help_command.cog = bot.cogs[\"Info\"]\n",
"id": "3247785",
"language": "Python",
"matching_score": 3.4709510803222656,
"max_stars_count": 0,
"path": "bot/cogs/help.py"
},
{
"content": "import datetime, os\nfrom discord.ext.commands import Bot\nfrom discord import Intents\n\nclass InviteTracker(Bot):\n '''Bot subclass.\n\n Base class for the bot.\n Contains more variables and usefull functions.\n\n Args:\n ----\n config: :class:`utils.config.Config`\n The config class for the config.yml file that stores data necessary to start the bot.\n '''\n\n def __init__(self, config):\n \"\"\"Creating variables for bot subclass.\n\n Assign all the values for the bot subclass and init bot instance.\n\n Args:\n ----\n config: :class:`utils.config.Config`\n The config class for the config.yml file that stores data necessary to start the bot.\n \"\"\"\n self.config=config\n self.start_time=datetime.datetime.utcnow()\n\n intents = Intents.default()\n intents.members = True\n super().__init__(\n command_prefix=self.config.Prefix,\n case_sensitive=False,\n intents=intents,\n description=self.config.Description\n )\n\n\n def ignite(self, token):\n '''Start bot\n\n Start the bot using its token.\n The token should be set in the config.yml file.\n This function should only be run once on startup.\n\n Args:\n -----\n token: :class:`str`\n The bot token. Set the Token value in config.yml.\n\n '''\n self.token = token\n self.run(self.token)\n\n\n async def on_ready(self):\n '''Bot has connected to discord\n\n Sets start time to currently UTC datetime and prints out startup message.\n '''\n\n self.start_time=datetime.datetime.utcnow()\n\n print(f\"{self.user.name} is now online!\")\n\n\n\n def load_extensions(self, extensions:list = [\"jishaku\", \"bot.cogs.owner\", \"bot.cogs.info\", \"bot.cogs.system\", \"bot.cogs.help\"]):\n '''Load bot extensions\n\n Load a list of bot extensions.\n Most if not all extensions must have \"bot.cogs.\" before the file name as bot/cogs/ is the folder most of the extensions are in.\n The one exception is the jishaku extension as it is not a file in this project but a imported library.\n\n\n Args:\n ----\n extensions: :class:`list`\n A list of all the extension that should be loaded.\n '''\n\n if \"jishaku\" in extensions:\n os.environ[\"JISHAKU_NO_UNDERSCORE\"] = \"True\"\n\n for extension in extensions:\n # go through extensions to load them\n\n precentage = round(((extensions.index(extension)+1)/len(extensions))*100)\n\n start = f\"{precentage:3}% - \"\n\n try:\n # attemt to load extension\n self.load_extension(extension)\n\n except Exception as e:\n # send error if loading extension failed\n print(f\"{start}Failed to load extension: {extension}\\n{e}\\n\")\n\n else:\n print(f\"{start}Loaded extension: {extension}\")\n return\n\n def get_normal_commands(self, is_owner:bool=False) -> list:\n \"\"\"Get a list of all available commands.\n\n Generates a list of all the commands and groups that are not owner only\n and from the jishaku cog. Not that this function won't return any subcommands.\n\n args\n ----\n is_owner: :class:`bool`\n if the user who is to see these commands is a bot owner.\n\n\n returns\n -------\n List[Union[:class:`commands.command`, :class:`commands.group`]]\n \"\"\"\n\n command_list = []\n\n for cog in self.cogs.values():\n # go through all cogs and the commands inside of each cog\n\n \n if cog.qualified_name == \"Jishaku\":\n # ignore the jishaku cog\n continue\n\n for cmd in cog.walk_commands():\n \n if cmd.parent:\n # ignore all commands that has a parent command\n continue\n\n if not cmd.hidden or is_owner:\n # add the command if it is not hidden or the user is a bot owner.\n command_list.append(cmd)\n\n\n return command_list\n\n\n def get_subcommands(self, is_owner:bool=False) -> list:\n \"\"\"Get a list of all available subcommands.\n\n Generates a list of all the commands and groups that are not owner only\n and from the jishaku cog. Not that this function won't return any subcommands.\n\n args\n ----\n is_owner: :class:`bool`\n if the user who is to see these commands is a bot owner.\n\n\n returns\n -------\n List[Union[:class:`commands.command`, :class:`commands.group`]]\n \"\"\"\n\n command_list = []\n\n for cog in self.cogs.values():\n\n # ignore the jishaku cog\n if cog.qualified_name == \"Jishaku\":\n continue\n\n for cmd in cog.walk_commands():\n \n if not cmd.parent:\n # ignore all commands that doesn't have a parent command.\n continue\n\n if not cmd.hidden or is_owner:\n # add the command if it is not hidden and the user is not a bot owner.\n command_list.append(cmd)\n\n\n return command_list\n",
"id": "6664446",
"language": "Python",
"matching_score": 3.345223903656006,
"max_stars_count": 0,
"path": "bot/main.py"
},
{
"content": "'''Start the bot.\n\nStart the bot and all files that has to be started before it.\n'''\n\nfrom utils.config import Config\nfrom utils.db_manager import Cache, DataBase\nfrom utils.emojis import Emojis\nimport bot.main as Bot\n\nif __name__ == \"__main__\":\n \n # initiating config instance\n config = Config()\n \n # initiating bot instance, set config and load extensions\n bot = Bot.InviteTracker(config)\n bot.load_extensions()\n \n # intiate modules\n DataBase(bot)\n Cache(bot, bot.db)\n Emojis(bot)\n\n \n # connect database and run bot\n bot.ignite(config.Token)",
"id": "3507825",
"language": "Python",
"matching_score": 1.6982884407043457,
"max_stars_count": 0,
"path": "launcher.py"
},
{
"content": "import asyncio\nimport aiomysql as mysql\nfrom discord.ext.commands import Bot\nfrom typing import Optional\n\nclass SubCache(object):\n \n class InvalidEnumValue(Exception):\n pass\n \n def __init__(self, bot, table):\n self.bot = bot\n self.db = bot.db\n self.parent = bot.cache\n self.table = table\n self.data = []\n \n async def fetch(self):\n \"\"\"Fetch all data from table for this subcache\"\"\"\n \n self.data = []\n \n \n cursor = await self.db.execute(f\"SELECT * FROM {self.table}\")\n results = await cursor.fetchall()\n \n for result in results:\n self.data.append(result)\n\nclass blacklist(SubCache):\n async def add(self, id:int, type:str, reason:Optional[str]=\"No reason specified.\"):\n \"\"\"Add a user or server to blacklist.\n \n The type can only be \"user\" or \"guild\"\n \n args\n ----\n id: :class:`int`\n The id of the server or user to be blacklisted.\n type: :class:`str`\n If its a server or a user that is going to be blacklisted.\n Can only be \"user\" or \"guild\"\n reason: Optional[:class:`str`]\n The reason this server or user is blacklisted.\n Defaults to \"No reason specified.\"\n \n raises\n -------\n :Exception:`InvalidEnumValue`\n the type is not \"user\" or \"guild\"\n \"\"\"\n \n if not type.lower() in [\"user\", \"guild\"]:\n raise self.InvalidEnumValue(f'Type can only be \"user\" or \"guild\" and not \"{type}\"')\n \n if id in [d[1] for d in self.data]:\n return\n \n await self.db.execute(\"INSERT INTO blacklist (id, type, reason) VALUES (%s, %s, %s)\", (id, type, reason), commit=True)\n cursor = await self.db.execute(\"SELECT index_id FROM blacklist WHERE id = %s\", (id,))\n \n index = (await cursor.fetchall())[0][0]\n self.data.append((index, id, type, reason))\n\n\nclass Cache():\n \"\"\"Cache manager\n \n A class to store cache data.\n Has multiple functions to make data managing easy.\n \"\"\"\n def __init__(self, bot:Bot, db):\n self.bot = bot # the bot object\n self.bot.cache = self\n self.db = db # the database\n self.blacklist = blacklist(bot, \"blacklist\")\n\nclass DataBase():\n \"\"\"Database manager.\n \n Connected to database and has some functions with prewritten\n queries that is used a lot.\n \"\"\"\n \n class NotConnected(Exception):\n \"\"\"Database hasn't been connected yet.\n \n If the database connection takes a while to setup or failed\n all together.\n \"\"\"\n pass\n \n class ConnectionFailed(Exception):\n \"\"\"if connecting to database failed\"\"\"\n \n pass\n \n \n def __init__(self, bot):\n self.bot = bot\n self.bot.db = self\n self.connected = False\n self.db = None\n self.loop = asyncio.get_event_loop()\n task = self.loop.create_task(self.connect())\n if not self.loop.run_until_complete(task):\n raise self.ConnectionFailed(\"Failed to connect to db\")\n \n async def connect(self):\n \"\"\"Connect database manager to database.\n \n Use config data to connect to database.\n \"\"\"\n \n # creating asyncio loop\n loop = asyncio.get_event_loop()\n config = self.bot.config\n\n\n # Attempting to connect to database\n try:\n self.db = await mysql.create_pool(\n host = config.db.Host,\n port = config.db.Port,\n user = config.db.User,\n password = <PASSWORD>,\n db = config.db.DBName,\n loop = loop\n )\n except Exception as e:\n print(e)\n return False\n else:\n # connection was successfully established\n \n print(\"\\nSuccessfully connected to database!\")\n self.connected = True\n \n # return database object\n return True\n \n \n async def execute(self, query:str, args:tuple=(), *, commit:bool=False):\n \"\"\"Run a query to the database\n \n Execute a SQL query to the connected MariaDB server.\n Arguments can also be used but they have to be a tuple.\n \n args\n ----\n query: :class:`str`\n The query string to be executed.\n args: :class:`tuple`\n The arguments to be used with the query.\n \n kwargs\n ------\n commit: :class:`bool`\n If a commit should be run afterwards to commit any changes made.\n \n returns\n -------\n :class:``\n \"\"\"\n \n if not self.connected:\n # if the database hasn't been connected yet, raise error\n raise self.NotConnected(\"Database connection has not been established yet.\")\n \n async with self.db.acquire() as con:\n # get pool connection object\n \n async with con.cursor() as cursor:\n # get cursor object for this pool\n \n \n # execute query and save result\n await cursor.execute(query, args)\n \n \n if commit:\n # commit to database if specified\n await con.commit()\n \n # return cursor\n return cursor",
"id": "9309327",
"language": "Python",
"matching_score": 2.3276069164276123,
"max_stars_count": 0,
"path": "utils/db_manager.py"
},
{
"content": "'''Config manager.\n\nManage the config file and request attributes.\n'''\n\nimport yaml\n\nfrom yaml import Loader\n\n\nclass MissingKey(Exception):\n pass\n\nclass MissingValue(Exception):\n pass\n\n\nclass Sub():\n pass\n\n\n\nclass Config():\n \"\"\"A Config manager\n\n A class for managing the bots config file.\n The Config file contains things as token and website info.\n This class is to help getting info and making sure the file has\n the right syntax.\n \"\"\"\n\n\n\n def __init__(self, filename:str =\"config.yml\"):\n \"\"\"Sett variables for config instance\n\n Args:\n filename (str, optional): The name of the config file. Defaults to \"config.yml\".\n \"\"\"\n\n self.filename = filename\n self.file = open(self.filename, \"r\")\n self.stream = yaml.load(self.file.read(), Loader=Loader)\n\n self.CheckConfig()\n\n # bot\n self.Token = self.stream[\"Token\"]\n self.Secret = self.stream[\"Secret\"]\n self.Prefix = self.stream[\"Prefix\"]\n self.Color = self.stream[\"Color\"]\n self.Description = self.stream[\"Description\"]\n \n # logging\n self.logging = Sub()\n self.logging.Servers = self.stream[\"Servers\"]\n self.logging.Commands = self.stream[\"Commands\"]\n self.logging.Errors = self.stream[\"Errors\"]\n self.logging.DMs = self.stream[\"DMs\"]\n self.logging.Website = self.stream[\"Website\"]\n self.logging.Events = self.stream[\"Events\"]\n\n # database\n self.db = Sub()\n self.db.Host = self.stream[\"Host\"]\n self.db.Port = self.stream[\"Port\"]\n self.db.User = self.stream[\"User\"]\n self.db.Password = self.stream[\"Password\"]\n self.db.DBName = self.stream[\"DBName\"]\n\n # Dashbaord\n self.Dashboard = Sub()\n self.Dashboard.Url = self.stream[\"URL\"]\n self.Dashboard.Port = self.stream[\"PORT\"]\n\n # emojis\n self.emojis = {\n \"yes\": self.stream[\"confirm\"], \"no\": self.stream[\"deny\"],\n \"voice\": self.stream[\"voice_channels\"], \"text\": self.stream[\"text_channels\"],\n \"loading\": self.stream[\"loading\"]\n }\n\n \n def CheckConfig(self) -> bool: \n \"\"\"Check the config file\n \n Make sure all values and \n \"\"\"\n\n file = open(self.filename, \"r\")\n stream = yaml.load(file.read(), Loader=Loader)\n\n args = [\n \"Token\", \"Prefix\", \"Color\", \"Description\", \"Servers\", \"Commands\", \"Errors\", \"DMs\",\n \"Website\", \"Events\", \"Host\", \"Port\", \"User\", \"Password\", \"confirm\", \"deny\", \"loading\",\n \"voice_channels\", \"text_channels\"\n ] # all keys that has to be in config file\n can_be_empty = [\n \"Color\", \"Servers\", \"Commands\", \"Errors\", \"DMs\", \"Website\", \"Events\", \"deny\", \"confirm\",\n \"loading\", \"voice_channels\", \"text_channels\"\n ] # they keys that can still be 0 or None\n\n for arg in args:\n if not arg in stream.keys():\n # the entire key is gone\n raise MissingKey(f\"The '{arg}' key is missing in config file({self.filename}). Make sure you are using a up-to-date file.\")\n\n if arg in can_be_empty:\n # the argument is allowed to be None\n continue\n\n if not stream[arg]:\n # there is no value for this argument\n raise MissingValue(f\"No value for '{arg}' has been set. Make sure all values in the config file({self.filename}) is set right and restart the bot.\")\n \n return True\n",
"id": "8924794",
"language": "Python",
"matching_score": 1.7979673147201538,
"max_stars_count": 0,
"path": "utils/config.py"
},
{
"content": "import discord, datetime, humanize\nfrom discord.ext import commands\n\n\n\nclass Info(commands.Cog):\n \"\"\"Most commands that shows information are found in this module.\"\"\"\n \n def __init__(self, bot):\n self.bot = bot\n \n @commands.command(name=\"bot_info\", aliases=[\"bi\", \"botinfo\"], brief=\"General information about me!\")\n @commands.bot_has_permissions(use_external_emojis=True, embed_links=True, send_messages=True)\n async def ABCbot_info(self, ctx):\n '''Give information and statistics about the bot.\n Information given includes:\n - `Server count`\n - `User count`\n - `Start time`\n '''\n \n # how long the bot has been online\n online_time = humanize.naturaltime(datetime.datetime.utcnow()-self.bot.start_time)[:-4]\n \n # number of text and voice channels\n text_channels=[]\n voice_channels=[]\n \n # get Voice and Text Channel amount.\n for channel in self.bot.get_all_channels():\n if channel.type == discord.ChannelType.text:\n text_channels.append(channel)\n continue\n \n if channel.type == discord.ChannelType.voice:\n voice_channels.append(channel)\n continue\n \n \n # creating info embed\n embed = discord.Embed(\n title=f\"{self.bot.user.name} Statistics and Information\",\n description=f\"{self.bot.user.name} has been online for `{online_time}` and is currently in `{len(self.bot.guilds)} server(s)` and can see `{len(self.bot.users)} user(s)`.\",\n color=0xFF0000,\n timestamp=datetime.datetime.utcnow()\n ).set_author(\n name=self.bot.user.name,\n icon_url=self.bot.user.avatar_url\n ).add_field(\n name=f\"Channels: {len(text_channels)+len(voice_channels)}\",\n value=f\"<:Text_Channel:778350926468743228> Text Channels: `{len(text_channels)}`\\n<:Voice_Channel:778351389415440395> Voice Channels: `{len(voice_channels)}`\"\n )\n \n # send embed\n return await ctx.send(embed=embed)\n\ndef setup(bot):\n bot.add_cog(Info(bot))",
"id": "899548",
"language": "Python",
"matching_score": 1.2608824968338013,
"max_stars_count": 0,
"path": "bot/cogs/info.py"
},
{
"content": "\"\"\"Automatic Emoji permission check\nAutomaticaly check if a amoji is availible or not.\n\"\"\"\n\nclass UnavailiblEmoji(Exception):\n \"\"\"Emoji doesn't exist\n \n This exception is called if a emoji is unavailible.\n \"\"\"\n pass\n\nclass Emojis():\n \"\"\"Manage bot emojis\n\n Manage the availible emojis the bot has access to.\n ⚠️ means no emoji is set\n ❗❗ means the bot is missing necessary permissions to use the emoji\n ❗ means the bot does not have access to the emoji\n\n Args:\n -----\n bot: :class:`commands.Bot`\n The bot\n \"\"\"\n\n def __init__(self, bot):\n self.bot = bot\n self.bot.smart_emojis = self\n self.emojis = bot.config.emojis\n\n def get_emoji(self, emoji:str, location):\n \"\"\"Get emoji if availible\n\n Use this function to get a emoji instead of getting it\n directly from config as it can result in problems.\n \"\"\"\n\n if self.emojis[emoji] == \"\": # the emoji is not set\n return \"⚠️\" # emoji isn't configured\n \n emojis = [e.__str__() for e in self.bot.emojis] # list of emoji names\n if not self.emojis[emoji] in emojis: # the emoji is not availible to bot\n return \"❗\" # emoji doesn't exist\n \n\n if location.guild is None: # the emoji can be used no matter what\n return self.emojis[emoji]\n \n # If the bot does not have 'use_external_emojis',\n # does the emoji exist in the guild emojis\n if not location.guild.me.permissions_in(location).use_external_emojis:\n if not self.emojis[emoji] in location.guild.emojis:\n return \"‼️\" # missing perms\n \n # return emoji\n return self.emojis[emoji]",
"id": "9445674",
"language": "Python",
"matching_score": 0.2275027483701706,
"max_stars_count": 0,
"path": "utils/emojis.py"
}
] | 2.262813 |
ciffelia | [
{
"content": "import os\nimport requests\n\n\nurl = os.environ[\"POST_URL\"]\naccess_client_id = os.environ[\"ACCESS_CLIENT_ID\"]\naccess_client_secret = os.environ[\"ACCESS_CLIENT_SECRET\"]\n\n\ndef post(data):\n headers = {\n \"CF-Access-Client-Id\": access_client_id,\n \"CF-Access-Client-Secret\": access_client_secret,\n }\n\n try:\n requests.post(url, headers=headers, data=data, timeout=5.0)\n except requests.exceptions.RequestException as err:\n print(f\"HTTP request error: {err}\")\n",
"id": "9242438",
"language": "Python",
"matching_score": 0.3105328679084778,
"max_stars_count": 0,
"path": "raspberry-pi/storage.py"
},
{
"content": "# -*- coding: utf-8 -*-\n\nimport struct\nfrom collections import namedtuple\nfrom bluepy.btle import Scanner\n\n\n# Test company ID\nmyCompanyId = \"ffff\"\n\nAdvertisePayload = struct.Struct(\"<fffHH\")\n\nMeasurementValue = namedtuple(\n \"MeasurementValue\", \"temperature humidity pressure co2 tvoc\"\n)\n\n\ndef scan(timeout):\n scanner = Scanner()\n devices = scanner.scan(timeout)\n\n for device in devices:\n # Ad Type 0x09: Complete Local Name\n deviceName = device.getValueText(0x09)\n if deviceName != \"airpapyrus\":\n continue\n\n # Ad Type 0xFF: Manufacturer Specific Data\n adData = device.getValueText(0xFF)\n if adData is None:\n continue\n\n companyId = adData[0:4]\n if companyId != myCompanyId:\n continue\n\n return parseAirpapyrusAdvertise(adData)\n\n return None, None\n\n\ndef parseAirpapyrusAdvertise(advertise):\n seq = advertise[4:6]\n payload = bytes.fromhex(advertise[6:])\n\n measurementValue = MeasurementValue._make(AdvertisePayload.unpack(payload))\n\n return seq, measurementValue\n",
"id": "989841",
"language": "Python",
"matching_score": 1.4807558059692383,
"max_stars_count": 0,
"path": "raspberry-pi/ble.py"
},
{
"content": "# -*- coding: utf-8 -*-\n\nfrom datetime import datetime\nfrom time import sleep\nimport ble\nimport display\nimport epaper\nimport storage\n\n\ndef main():\n while True:\n _, measurementValue = ble.scan(5.0)\n\n image = display.generate_image(measurementValue)\n epaper.draw(image)\n\n if measurementValue is not None:\n storage.post(measurementValue._asdict())\n\n sleep_for_sec = max(0, 55 - datetime.now().second)\n sleep(sleep_for_sec)\n\n\nif __name__ == \"__main__\":\n main()\n",
"id": "3857240",
"language": "Python",
"matching_score": 1.531514048576355,
"max_stars_count": 0,
"path": "raspberry-pi/main.py"
},
{
"content": "# -*- coding: utf-8 -*-\n\nfrom PIL import ImageOps\nfrom waveshare_epd import epd2in7\n\nepd = epd2in7.EPD()\n\n\ndef draw(image):\n # なぜか左右反転して表示されるので\n mirror_image = ImageOps.mirror(image)\n\n buffer = epd.getbuffer_4Gray(mirror_image)\n\n try:\n epd.Init_4Gray()\n epd.display_4Gray(buffer)\n\n except KeyboardInterrupt:\n print(\"Shutting down e-Paper...\")\n epd.sleep()\n exit()\n\n epd.sleep()\n",
"id": "6761938",
"language": "Python",
"matching_score": 0.8709350228309631,
"max_stars_count": 0,
"path": "raspberry-pi/epaper.py"
},
{
"content": "# -*- coding: utf-8 -*-\n\nfrom enum import IntEnum\nfrom PIL import Image, ImageDraw, ImageFont\nfrom datetime import datetime\n\n\nWIDTH = 264\nHEIGHT = 176\n\n\nclass Color(IntEnum):\n WHITE = 0xFF\n LIGHT_GRAY = 0xC0\n DARK_GRAY = 0x80\n BLACK = 0x00\n\n\ndummy_image = Image.new(\"L\", (0, 0))\ndummy_draw = ImageDraw.Draw(dummy_image)\n\nfont_20 = ImageFont.truetype(\"fonts/x14y20pxScoreDozer.ttf\", 20)\nfont_40 = ImageFont.truetype(\"fonts/x14y20pxScoreDozer.ttf\", 40)\n\n\ndef text_to_image(text, font, color=Color.BLACK):\n width, height = dummy_draw.textsize(text, font)\n\n image = Image.new(\"LA\", (width, height))\n draw = ImageDraw.Draw(image)\n\n draw.text((0, 0), text, (color, 255), font)\n\n return image\n\n\ndef generate_label(value, unit, line_spacing=0):\n value_image = text_to_image(value, font_40)\n unit_image = text_to_image(unit, font_20)\n\n if len(unit) == 1:\n # Single line\n width = value_image.width + unit_image.width\n height = value_image.height\n else:\n # Two lines\n width = value_image.width\n height = value_image.height + line_spacing + unit_image.height\n\n image = Image.new(\"LA\", (width, height))\n\n # Upper left\n image.paste(value_image)\n\n # Bottom right\n image.paste(\n unit_image,\n box=(image.width - unit_image.width, image.height - unit_image.height),\n )\n\n return image\n\n\ndef paste_center(image1, image2, xy):\n x = xy[0] - image2.width // 2\n y = xy[1] - image2.height // 2\n\n alpha_band = image2.split()[-1]\n\n image1.paste(image2, mask=alpha_band, box=(x, y))\n\n\ndef draw_measurement_value(image, value):\n temperature_text = str(round(value.temperature, 1))\n humidity_text = str(round(value.humidity, 1))\n pressure_text = str(round(value.pressure / 100)) # Pa to hPa\n co2_text = str(round(value.co2))\n\n temperature_image = generate_label(temperature_text, \"℃\")\n humidity_image = generate_label(humidity_text, \"%\")\n pressure_image = generate_label(pressure_text, \"hPa\")\n co2_image = generate_label(co2_text, \"ppm\")\n\n x1 = WIDTH // 4\n x2 = x1 * 3\n y1 = (HEIGHT - 36) // 4\n y2 = y1 * 3\n\n paste_center(image, temperature_image, (x1, y1))\n paste_center(image, humidity_image, (x2, y1))\n paste_center(image, pressure_image, (x1, y2))\n paste_center(image, co2_image, (x2, y2))\n\n\ndef draw_no_data(image):\n no_data_image = text_to_image(\"No data\", font_40)\n\n x = WIDTH // 2\n y = (HEIGHT - 36) // 2\n\n paste_center(image, no_data_image, (x, y))\n\n\ndef draw_datetime(image, dt):\n dt_text = dt.strftime(\"%Y/%m/%d (%a) %H:%M\")\n dt_image = text_to_image(dt_text, font_20, Color.WHITE)\n\n paste_center(image, dt_image, (WIDTH // 2, HEIGHT - 18))\n\n\ndef generate_image(value):\n image = Image.open(f\"assets/background.png\")\n\n if value is None:\n draw_no_data(image)\n else:\n draw_measurement_value(image, value)\n\n draw_datetime(image, datetime.now())\n\n return image\n",
"id": "1209289",
"language": "Python",
"matching_score": 5.0367817878723145,
"max_stars_count": 0,
"path": "raspberry-pi/display.py"
},
{
"content": "# -*- coding: utf-8 -*-\n\nfrom enum import IntEnum\nfrom PIL import Image, ImageDraw\n\n\nWIDTH = 264\nHEIGHT = 176\n\n\nclass Color(IntEnum):\n WHITE = 0xFF\n LIGHT_GRAY = 0xC0\n DARK_GRAY = 0x80\n BLACK = 0x00\n\n\ndef generate_icon(icon_name, size):\n image = Image.open(f\"{icon_name}.png\").convert(\"RGBA\")\n resized_image = image.resize(size, Image.NEAREST)\n\n pixdata = resized_image.load()\n\n for x in range(size[0]):\n for y in range(size[1]):\n if pixdata[x, y][3] >= 128:\n pixdata[x, y] = (\n Color.LIGHT_GRAY,\n Color.LIGHT_GRAY,\n Color.LIGHT_GRAY,\n 255,\n )\n else:\n pixdata[x, y] = (0, 0, 0, 0)\n\n return resized_image\n\n\ndef generate_background():\n image = Image.new(\"L\", (WIDTH, HEIGHT), Color.WHITE)\n draw = ImageDraw.Draw(image)\n\n temp_icon = generate_icon(\"temperature\", (70, 70))\n image.paste(temp_icon, mask=temp_icon, box=(0, 0))\n\n humidity_icon = generate_icon(\"humidity\", (70, 70))\n image.paste(humidity_icon, mask=humidity_icon, box=(WIDTH // 2, 0))\n\n pressure_icon = generate_icon(\"pressure\", (60, 60))\n image.paste(pressure_icon, mask=pressure_icon, box=(5, (HEIGHT - 36) // 2 + 5))\n\n co2_icon = generate_icon(\"co2\", (70, 70))\n image.paste(co2_icon, mask=co2_icon, box=(WIDTH // 2 + 5, (HEIGHT - 36) // 2))\n\n # 縦の区切り線\n draw.line([(WIDTH / 2, 0), (WIDTH / 2, HEIGHT)], width=2, fill=Color.DARK_GRAY)\n\n # 横の区切り線\n draw.line(\n [(0, (HEIGHT - 36) / 2), (WIDTH, (HEIGHT - 36) / 2)],\n width=2,\n fill=Color.DARK_GRAY,\n )\n\n # 下部の日時表示の背景\n draw.rectangle([(0, HEIGHT - 36), (WIDTH, HEIGHT)], fill=Color.BLACK)\n\n return image\n\n\ndef main():\n image = generate_background()\n image.save(\"background.png\")\n\n\nif __name__ == \"__main__\":\n main()\n",
"id": "4632671",
"language": "Python",
"matching_score": 2.370361089706421,
"max_stars_count": 0,
"path": "raspberry-pi/assets/generate-background.py"
}
] | 1.506135 |
sagaragarwal94-archives | [
{
"content": "import sys\n\nfrom flask import Flask, render_template\nfrom flask_frozen import Freezer\n\nDEBUG = True\n\napp = Flask(__name__)\nfreezer = Freezer(app)\n\n@app.route(\"/\")\ndef index():\n\treturn render_template('index.html')\n\nif __name__ == '__main__':\n if len(sys.argv) > 1 and sys.argv[1] == \"build\":\n\t\tfreezer.freeze()\n\n else:\n app.run(port=8000)\n",
"id": "2012430",
"language": "Python",
"matching_score": 2.0257670879364014,
"max_stars_count": 4,
"path": "sitebuilder.py"
},
{
"content": "from flask import Flask, render_template, url_for, request, redirect, json\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_bootstrap import Bootstrap\n\napp = Flask(__name__)\napp.config.from_pyfile('config.py')\ndb = SQLAlchemy(app)\nbootstrap = Bootstrap(app)\n\n\nclass Categories(db.Model):\n __tablename__ = \"category\"\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(120), unique=True)\n\n def __init__(self, name):\n self.name = name\n\n def __repr__(self):\n return '<Name %r>' % self.name\n\nclass Items(db.Model):\n __tablename__ = \"item\"\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(120), unique=True)\n desc = db.Column(db.String(500), unique=False)\n cat_name = db.Column(db.String(120), unique = False)\n\n def __init__(self, name, desc, cat_name):\n self.name = name\n self.cat_name = cat_name\n self.desc = desc\n\n def __repr__(self):\n return '<Name %r>' % self.name\n\n\n\n@app.route('/')\t\ndef index():\n\tcategories = Categories.query.all()\n\titems = Items.query.order_by(Items.id.desc()).limit(10).all()[::-1]\n\tlast_show = {}\n\tfor item in items:\n\t\tlast_show[item.name] = item.cat_name\n\treturn render_template(\"base.html\", categories = categories, last_show = last_show, latest = True)\n\n\n@app.route('/catalog/edit', methods = ['GET', 'POST'])\ndef add_item():\n\tif request.method == 'POST':\n\t\tname = request.form['name']\n\t\tdesc = request.form['desc']\n\t\tcategory_name = request.form['category']\n\t\titem_add= Items(name, desc, category_name)\n\t\tdb.session.add(item_add)\n\t\tdb.session.commit()\n\t\treturn redirect(url_for('index'))\n\tcategories = Categories.query.all()\n\treturn render_template(\"edit.html\", name = \"\", value = \"\", categories = categories)\n\n@app.route('/catalog/<category>/items')\ndef catalog(category):\n\titem_name = Items.query.filter_by(cat_name = category).all()\n\tcategories = Categories.query.all()\n\treturn render_template(\"base.html\", categories = categories, category= category, item_name = item_name, latest = False)\n\n\n@app.route('/catalog/<category>/<name>')\ndef describe(category, name):\n\titem_name = Items.query.filter_by(cat_name = category).first()\n\treturn render_template(\"describe.html\", desc= item_name.desc, name = item_name.name)\n\n@app.route('/catalog/<item_name>/delete', methods = ['POST','GET'])\ndef delete(item_name):\n\tif request.method == 'POST':\n\t\titem_name = Items.query.filter_by(name = item_name).first()\n\t\tdb.session.delete(item_name)\n\t\tdb.session.commit()\n\t\treturn redirect(url_for('index'))\n\treturn render_template(\"delete.html\")\n\n@app.route('/catalog/<item_name>/edit', methods = ['POST','GET'])\ndef edit(item_name):\n\titem_name = Items.query.filter_by(name = item_name).first()\n\tcategories = Categories.query.all()\n\tif request.method == 'POST':\n\t\titem_name.name = request.form['name']\n\t\titem_name.desc = request.form['desc']\n\t\titem_name.category_name = request.form['category']\n\t\tdb.session.commit()\n\t\treturn redirect(url_for('index'))\n\treturn render_template(\"edit.html\", name = item_name.name, desc = item_name.desc,categories = categories)\n\n@app.route('/catalog.json')\ndef catalog_json():\n\ttest =[]\n\tcategories = Categories.query.all()\n\tfor category in categories:\n\t\ttest_item=[]\n\t\tcounter = 0\n\t\titem_name = Items.query.filter_by(cat_name = category.name).all()\n\t\tfor item in item_name:\n\t\t\tcounter = counter + 1\n\t\t\ttest_item.append({\"cat_id\":category.id, \"description\": item.desc, \"id\": counter, \"title\": item.name })\n\t\ttest.append({\"id\": category.id, \"name\": category.name, \"Item\": test_item})\n\tresult={}\n\tresult[\"Category\"] = test\n\tresponse = app.response_class(\n response=json.dumps(result, indent=4, sort_keys=True),\n status=200,\n mimetype='application/json'\n )\n\treturn response\n\nif __name__ == '__main__':\n\tapp.run(host='127.0.0.1', port=8000, debug=True)\n",
"id": "3427284",
"language": "Python",
"matching_score": 1,
"max_stars_count": 4,
"path": "app.py"
},
{
"content": "SQLALCHEMY_DATABASE_URI = \"postgresql://crud:iamtony@localhost/crud\"",
"id": "4223386",
"language": "Python",
"matching_score": 0.038268815726041794,
"max_stars_count": 4,
"path": "config.py"
}
] | 1 |
samirak93 | [
{
"content": "from sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn import metrics\n\nimport pandas as pd\nimport numpy as np\n\n\ndef get_logreg_output(features_df, target_df, active_norm):\n non_num_features = [col for col, dt in features_df.dtypes.items() if dt == object]\n likely_cat = {}\n for var in features_df.columns:\n likely_cat[var] = features_df[var].nunique() <= 100\n likely_cat = [k for k, v in likely_cat.items() if v is True]\n non_num_features = list(set(non_num_features + likely_cat))\n\n if list(non_num_features):\n lb_results_df = pd.DataFrame(pd.get_dummies(features_df[non_num_features]))\n features_df = features_df.drop(columns=non_num_features)\n features_df = pd.concat([features_df, lb_results_df], axis=1)\n\n X_train, X_test, y_train, y_test = train_test_split(features_df, target_df, test_size=0.2, random_state=40)\n\n if active_norm == 1:\n X_train = pd.DataFrame(StandardScaler().fit_transform(X_train))\n X_test = pd.DataFrame(StandardScaler().fit_transform(X_test))\n else:\n X_train = X_train\n X_test = X_test\n\n logreg = LogisticRegression(class_weight='balanced', n_jobs=-1, solver='lbfgs', max_iter=500)\n logreg.fit(X_train, y_train)\n y_pred = logreg.predict(X_test)\n accuracy_score = np.round(logreg.score(X_test, y_test), 2)\n class_report = metrics.classification_report(y_test, y_pred, output_dict=True)\n class_report_df = pd.DataFrame(class_report)\n class_report_df.columns = class_report_df.columns.str.upper()\n class_report_df.index = class_report_df.index.str.upper()\n class_report_df = class_report_df.round(3).transpose().\\\n reset_index().rename(columns={'index': \"\"})\n\n confusion_matrix = metrics.confusion_matrix(y_test, y_pred)\n\n confusion_df = pd.DataFrame(\n confusion_matrix,\n columns=sorted(target_df.unique()),\n index=sorted(target_df.unique()))\n confusion_df.index.name = 'Actual'\n confusion_df.columns.name = 'Prediction'\n\n confusion_df = confusion_df.stack().rename(\"value\").reset_index()\n logit_roc_auc = np.round(metrics.roc_auc_score(y_test, logreg.predict(X_test)),3)\n fpr, tpr, thresholds = metrics.roc_curve(y_test, logreg.predict_proba(X_test)[:, 1])\n\n return accuracy_score, class_report_df, confusion_df, logit_roc_auc, fpr, tpr, thresholds\n",
"id": "5759838",
"language": "Python",
"matching_score": 5.5367021560668945,
"max_stars_count": 4,
"path": "code/logistic_regression.py"
},
{
"content": "from sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn import metrics\n\nimport pandas as pd\nimport numpy as np\n\ndef get_classify_output(features_df, target_df, active_norm):\n\n non_num_features = [col for col, dt in features_df.dtypes.items() if dt == object]\n likely_cat = {}\n for var in features_df.columns:\n likely_cat[var] = features_df[var].nunique() <= 100\n likely_cat = [k for k, v in likely_cat.items() if v is True]\n non_num_features = list(set(non_num_features + likely_cat))\n\n if list(non_num_features):\n lb_results_df = pd.DataFrame(pd.get_dummies(features_df[non_num_features]))\n features_df = features_df.drop(columns=non_num_features)\n features_df = pd.concat([features_df, lb_results_df], axis=1)\n\n X_train, X_test, y_train, y_test = train_test_split(features_df, target_df, test_size=0.3, random_state=40)\n\n if active_norm == 1:\n X_train = pd.DataFrame(StandardScaler().fit_transform(X_train))\n X_test = pd.DataFrame(StandardScaler().fit_transform(X_test))\n else:\n X_train = X_train\n X_test = X_test\n\n random_forest = RandomForestClassifier(n_estimators=400, max_depth=10, random_state=0,\n class_weight='balanced', n_jobs=-1)\n random_forest.fit(X_train, y_train)\n \n y_pred = random_forest.predict(X_test)\n accuracy_score = np.round(random_forest.score(X_test, y_test), 2)\n class_report = metrics.classification_report(y_test, y_pred, output_dict=True)\n class_report_df = pd.DataFrame(class_report)\n class_report_df.columns = class_report_df.columns.str.upper()\n class_report_df.index = class_report_df.index.str.upper()\n class_report_df = class_report_df.round(3).transpose().reset_index().rename(columns={'index': \"\"})\n\n confusion_matrix = metrics.confusion_matrix(y_test, y_pred)\n\n confusion_df = pd.DataFrame(\n confusion_matrix,\n columns=sorted(list(set(y_test))),\n index=sorted(list(set(y_test))))\n confusion_df.index.name = 'Actual'\n confusion_df.columns.name = 'Prediction'\n\n confusion_df = confusion_df.stack().rename(\"value\").reset_index()\n \n rf_feature_labels = features_df.columns.values.tolist()\n rf_feature_importance = random_forest.feature_importances_.tolist()\n\n return accuracy_score, class_report_df, confusion_df, rf_feature_labels, rf_feature_importance",
"id": "10929666",
"language": "Python",
"matching_score": 3.8251869678497314,
"max_stars_count": 4,
"path": "code/classification.py"
},
{
"content": "from bokeh.palettes import RdBu\n\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler, LabelBinarizer, OneHotEncoder, LabelEncoder\nfrom sklearn import metrics\n\nimport pandas as pd\nimport numpy as np\nimport bisect\n\nfrom numpy import arange\nfrom itertools import chain\nfrom collections import defaultdict\n\ndef get_bounds(nlabels):\n bottom = list(chain.from_iterable([[ii]*nlabels for ii in range(nlabels)]))\n top = list(chain.from_iterable([[ii+1]*nlabels for ii in range(nlabels)]))\n left = list(chain.from_iterable([list(range(nlabels)) for ii in range(nlabels)]))\n right = list(chain.from_iterable([list(range(1,nlabels+1)) for ii in range(nlabels)]))\n return top, bottom, left, right\n\ndef get_colors(corr_array, colors, min, max):\n ccorr = arange(min, max, 1/(len(colors)/2))\n color = []\n for value in corr_array:\n ind = bisect.bisect_left(ccorr, value)\n color.append(colors[ind-1])\n\n return color\n\ndef get_corr_plot(df):\n\n corr = df.corr()\n colors = list(reversed(RdBu[9]))\n labels = df.columns\n nlabels = len(labels)\n top, bottom, left, right = get_bounds(nlabels)\n color_list = get_colors(corr.values.flatten(), colors, -1, 1)\n\n return top, bottom, left, right, labels, nlabels, color_list, corr.values.flatten()\n\ndef get_regression_plot(features_df, target_df, active_norm):\n\n non_num_features = [col for col, dt in features_df.dtypes.items() if dt == object]\n likely_cat = {}\n for var in features_df.columns:\n likely_cat[var] = features_df[var].nunique() <= 100\n likely_cat = [k for k, v in likely_cat.items() if v is True]\n non_num_features = list(set(non_num_features + likely_cat))\n\n if list(non_num_features):\n lb_results_df = pd.DataFrame(pd.get_dummies(features_df[non_num_features]))\n features_df = features_df.drop(columns=non_num_features)\n features_df = pd.concat([features_df, lb_results_df], axis=1)\n\n X_train, X_test, y_train, y_test = train_test_split(features_df, target_df, test_size=0.2, random_state=40)\n\n if active_norm == 1:\n X_train = StandardScaler().fit_transform(X_train)\n X_test = StandardScaler().fit_transform(X_test)\n else:\n X_train = X_train\n X_test = X_test\n\n regressor = LinearRegression(normalize=True, n_jobs=-1)\n regressor.fit(X_train, y_train)\n\n y_pred = regressor.predict(X_test)\n\n residual = y_test - y_pred\n\n r2 = metrics.r2_score(y_test, y_pred)\n slope = regressor.coef_[0]\n intercept = regressor.intercept_\n\n text = [\"R^2 - %02f\" % r2]\n MAE = np.round(metrics.mean_absolute_error(y_test, y_pred),2)\n MSE = np.round(metrics.mean_squared_error(y_test, y_pred),2)\n RMSE = np.round(np.sqrt(metrics.mean_squared_error(y_test, y_pred)),2)\n\n return y_test, y_pred, text, MAE, RMSE, residual, slope, intercept",
"id": "7014563",
"language": "Python",
"matching_score": 3.1282684803009033,
"max_stars_count": 4,
"path": "code/regression.py"
},
{
"content": "#Used Random Forest Regressor\n#Accuracy 67%\n#MAPE= ~4400 viewers\n\n## <NAME> NBA Hackathon 2018 - Business Analytics\nimport pandas as pd\nimport numpy as np\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.preprocessing import StandardScaler\n\n\n\n#Train and test model with output.csv file\ndf=pd.read_csv('output.csv')\n\ndata=pd.DataFrame(df)\ndata_set=data.fillna(0)\n\nfeatures=data_set.iloc[:,3:-1]\n\ndataset_X=np.array(features)\n\nonehotencoder = OneHotEncoder(categorical_features = [0,1,10,12,13,18])\nX = onehotencoder.fit_transform(features).toarray()\nX = X[:, 1:]\n\nviews_target=data_set['Rounded Viewers'].values\nfeature_list = list(features.columns)\n\n\nbball=RandomForestRegressor(random_state=42,n_estimators=1500,max_features=None,max_depth=None)\n\nX_trainset, X_testset, y_trainset, y_testset = train_test_split(X, views_target, test_size=0, random_state=42)\n\nsc = StandardScaler()\nX_trainset = sc.fit_transform(X_trainset)\n\n\n#Test with test_data.csv\ntdf=pd.read_csv('test_data.csv')\n\ntest_data=pd.DataFrame(tdf)\ntest_dataset=test_data.fillna(0)\n\n\nfeature_difference = set(data) - set(test_dataset)\n\nfeature_difference_df = pd.DataFrame(data=np.zeros((test_dataset.shape[0], len(feature_difference))),\n columns=list(feature_difference))\n\ntest_dummy = test_dataset.join(feature_difference_df)\n\ntest_features=np.array(test_dummy.iloc[:,3:-1])\n\n\ntest_onehotencoder = OneHotEncoder(categorical_features = [0,1,10,12,13,18])\nX_test = onehotencoder.fit_transform(test_features).toarray()\nX_test = X_test[:, 1:]\n\nX_testset = sc.transform(X_test)\n\nviews_target=data_set['Rounded Viewers'].values\nfeature_list = list(features.columns)\n\nbball.fit(X_trainset,y_trainset)\n\noutput=[]\npredForest=bball.predict(X_testset)\n\noutput=predForest\n\ntest_set_new=pd.DataFrame(pd.read_csv('test_set.csv'))\n\ntest_set_new['Total_Viewers']=np.round(output,0)\n\nprint test_set_new['Total_Viewers']\n\ntest_set_new.to_csv('test_set.csv')\n\n\n",
"id": "2568539",
"language": "Python",
"matching_score": 0.5212468504905701,
"max_stars_count": 1,
"path": "Random_Forest_Reg.py"
},
{
"content": "# -*- coding: utf-8 -*-\n# Version 1.0\n# Date: Jan 2 2020\n\nfrom bokeh.plotting import figure, curdoc\nfrom bokeh.models import ColumnDataSource, HoverTool, ColorBar, LinearColorMapper, Legend, BasicTickFormatter, \\\n LegendItem, Span, BasicTicker, LabelSet, Panel, Tabs\nfrom bokeh.models.widgets import DataTable, Select, TableColumn, Slider, MultiSelect, RadioButtonGroup, Div, Button, \\\n CheckboxGroup, PreText, Paragraph, FileInput, TextAreaInput, HTMLTemplateFormatter\nfrom bokeh.layouts import column, row, widgetbox\nfrom bokeh.palettes import Spectral6, Set1, Category20, RdBu, RdBu3, Oranges, Blues\nfrom bokeh.transform import linear_cmap, transform\nfrom bokeh.models.ranges import FactorRange\nfrom bokeh.transform import factor_cmap\nfrom bokeh.models.tickers import FixedTicker, SingleIntervalTicker\nfrom bokeh import events\nfrom bokeh.models.callbacks import CustomJS\n\nfrom math import pi\n\nfrom collections import OrderedDict\n\nimport pandas as pd\nimport numpy as np\n\nfrom code.clustering import get_elbow_plot, get_tsne, clustering_data\nfrom code.regression import get_corr_plot, get_regression_plot, get_colors\nfrom code.logistic_regression import get_logreg_output\nfrom code.classification import get_classify_output\nfrom code.data_sources import load_data_sources\n\nimport warnings\nimport os\nimport io\n\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n\n\"\"\"\nCODE\n\n\"\"\"\n\n\nclass plot_attributes(object):\n \"\"\"[summary]\n\n Arguments:\n object {figure} -- Unformatted plot\n\n Returns:\n [figure] -- Formatted plot\n \"\"\"\n\n def plot_format(self, plot):\n\n plot.background_fill_color = self.background_fill_color\n plot.border_fill_color = self.border_fill_color\n plot.xaxis.formatter = self.x_axis_format\n plot.yaxis.formatter = self.y_axis_format\n plot.title.align = self.title_align\n plot.title.text_font = self.text_font\n plot.axis.axis_label_text_font = self.axis_label_text_font\n plot.axis.axis_label_text_font_size = self.axis_label_text_font_size\n plot.title.text_font_size = self.text_font_size\n\n return plot\n\n\nclass landing_page():\n def __init__(self):\n self.note = None\n\n def landing_note(self):\n self.note = Div(text=\"\"\"<br><br> Machine Learning Tool: <br> This is a tool to get hands-on experience \n with Machine Learning concepts like Regression, Classification, Clustering. </br></br>\n \n <li>The tool was built to make it as a medium to get hands-on visual experience to different aspect of \n data science like exploring/visualizing different data types, building models to make predictions, \n evaluating the models.</li> </br>\n <li>At this point, model optimization/selection is not an option since datasets are pre-built. \n This could be implemented as a future update.</li>\n <br><br></br></br>\n <b>Disclaimer:</b> As a data scientist, this is not the <i>only</i> way to learn/practice data science concepts. \n For someone with relatively less experience in coding/data-science concepts, this is a method to \n facilitate interest and give a brief idea about the concepts.\n </br></br></br>\"\"\",\n style={'font-size': '14pt', 'color': 'black',\"font\":'Font Awesome\\ 5 Free'},\n width=1200, sizing_mode='stretch_both', css_classes=['div_landing'])\n\n\n self.alert_loading = Div(text='', css_classes=['hidden'], visible=False)\n\n self.alert_loading.js_on_change('text', self.callback_notification)\n\n tab_landing = Panel(child=column(self.note),\n title=\"Home\")\n return tab_landing\n\n\nclass eda_plots(plot_attributes):\n\n def __init__(self):\n\n self.active_df = None\n self.table_eda = None\n self.explore_data_select = None\n self.button_eda_plot = None\n self.slider_bins = None\n self.log_x_cb = None\n self.log_y_cb = None\n self.log_hist_cb = None\n self.button_hist_plot = None\n self.plots = None\n self.hover_scatter = None\n self.eda_df = None\n self.button_count_plot = None\n self.plot_count_plot = None\n self.reset_data_eda()\n\n def reset_data_eda(self):\n self.source_scatter.data = dict(x=[], y=[], color=[])\n self.source_histogram.data = dict(top=[], left=[], right=[])\n self.source_count_plot.data = dict(x=[], y=[])\n self.source_eda.data = {}\n self.table_eda.columns = []\n self.select_x_axis.options = [\"None\"]\n self.select_y_axis.options = [\"None\"]\n self.select_color.options = ['None']\n self.select_hist.options = [\"None\"]\n self.select_count_plot.options = [\"None\"]\n self.select_x_axis.value = \"None\"\n self.select_y_axis.value = \"None\"\n self.select_color.value = 'None'\n self.select_hist.value = \"None\"\n self.select_count_plot.value = \"None\"\n self.plot_scatter.xaxis.axis_label = ''\n self.plot_scatter.yaxis.axis_label = ''\n self.plot_hist.xaxis.axis_label = ''\n self.plot_hist.yaxis.axis_label = ''\n self.plot_count_plot.xaxis.axis_label = ''\n self.plot_count_plot.yaxis.axis_label = ''\n self.data_source_eda.text = \"\"\n\n def create_eda_figure(self):\n active_df = self.explore_data_select.value\n select_x_axis = self.select_x_axis.value\n select_y_axis = self.select_y_axis.value\n\n if active_df != \"Select dataset\":\n ticker_x_dict, ticker_y_dict = {}, {}\n xs, ys = [], []\n if select_x_axis != \"None\" and select_y_axis != \"None\":\n if self.log_x_cb.active:\n if self.log_x_cb.active[0] == 0:\n xs = np.log(self.eda_df[select_x_axis].values + 1)\n else:\n xs = self.eda_df[select_x_axis].values\n\n if self.log_y_cb.active:\n if self.log_y_cb.active[0] == 0:\n ys = np.log(self.eda_df[select_y_axis].values + 1)\n else:\n ys = self.eda_df[select_y_axis].values\n\n self.plot_scatter.xaxis.axis_label = select_x_axis\n self.plot_scatter.yaxis.axis_label = select_y_axis\n\n color_dict = {}\n select_color = self.select_color.value\n\n if select_color != \"None\":\n color_factors = self.eda_df[select_color].unique().tolist()\n for i in range(0, len(color_factors)):\n color_dict[str(color_factors[i])] = Category20[20][i]\n\n scat_color = pd.Series(\n self.eda_df[select_color].astype(str)).map(color_dict)\n self.source_scatter.data = dict(x=xs, y=ys, color=scat_color)\n else:\n scat_color = ['dodgerblue'] * len(xs)\n self.source_scatter.data = dict(x=xs, y=ys, color=scat_color)\n\n def create_hist_figure(self):\n active_df = self.explore_data_select.value\n\n if active_df != \"Select dataset\":\n hist, edges = [], []\n if self.select_hist.value != 'None':\n self.plot_hist.xaxis.axis_label = self.select_hist.value\n self.plot_hist.yaxis.axis_label = 'Count'\n\n if self.log_hist_cb.active:\n if self.log_hist_cb.active[0] == 0:\n log_hist = np.log(\n self.eda_df[self.select_hist.value].values + 1)\n else:\n log_hist = self.eda_df[self.select_hist.value].values\n\n hist, edges = np.histogram(\n log_hist, bins=self.slider_bins.value)\n\n self.source_histogram.data = dict(\n top=hist, left=edges[:-1], right=edges[1:])\n\n def create_count_figure(self):\n active_df = self.explore_data_select.value\n\n if active_df != \"Select dataset\":\n count_column, count_value = [], []\n if self.select_count_plot.value != 'None':\n self.plot_count_plot.xaxis.axis_label = self.select_count_plot.value\n self.plot_count_plot.yaxis.axis_label = 'Count'\n\n count_df = self.eda_df[self.select_count_plot.value].value_counts(\n ).to_frame()\n\n count_column, count_value = count_df.index.tolist(\n ), count_df[self.select_count_plot.value].values.tolist()\n count_column = [str(i) for i in count_column]\n self.plot_count_plot.x_range.factors = list(count_column)\n self.source_count_plot.data = dict(\n x=list(count_column), y=list(count_value))\n\n def eda_table(self, attr, old, new):\n active_df = self.explore_data_select.value\n data_source_text = load_data_sources()\n\n if active_df != \"Select dataset\":\n self.reset_data_eda()\n self.file_path = str(self.cwd + self.data_path + str(self.eda_data_source.get(active_df)))\n self.eda_df = pd.read_csv(self.file_path)\n self.eda_df = self.eda_df.fillna(self.eda_df.mean())\n self.eda_df.columns = [x.upper() for x in self.eda_df.columns]\n\n self.source_eda.data = dict(self.eda_df)\n self.table_eda.columns = [TableColumn(\n field=cols, title=cols, width=90) for cols in self.eda_df.columns]\n\n filter_objects = {}\n filter_numeric = {}\n likely_cat = {}\n for var in self.eda_df.columns:\n filter_objects[var] = self.eda_df[var].dtype == np.float64 or self.eda_df[var].dtype == np.int64\n filter_numeric[var] = str(self.eda_df[var].dtype) == 'object' or self.eda_df[var].nunique() <= 20\n likely_cat[var] = self.eda_df[var].nunique() <= 20\n\n filter_objects = [\n k for k, v in filter_objects.items() if v is True]\n self.select_x_axis.options = [\"None\"] + filter_objects\n self.select_y_axis.options = [\"None\"] + filter_objects\n self.select_hist.options = [\"None\"] + filter_objects\n\n likely_cat = [k for k, v in likely_cat.items() if v is True]\n self.select_color.options = ['None'] + likely_cat\n\n filter_numeric = [\n k for k, v in filter_numeric.items() if v is True]\n self.select_count_plot.options = [\"None\"] + filter_numeric\n \n data_source_df = data_source_text[data_source_text['Name'] == active_df]\n data_text = \"<center>Data Source</center>\\n\\n<b>Title:</b> \"+data_source_df['Dataset'].tolist()[0] + \"<br><br>\" + \\\n \"<b>Source Link:</b> <a href=\"+ data_source_df['Link'].tolist()[0] +\"\"\" target=\"_blank\">\"\"\"+data_source_df['Link'].tolist()[0]+\"</a><br>\" + \\\n \"<b>Description:</b>\" + data_source_df['Description'].tolist()[0] + \"<br><br>\"\n self.data_source_eda.text = data_text\n else:\n self.reset_data_eda()\n\n def eda_button_enable(self, attr, old, new):\n\n if (self.select_x_axis.value != 'None') and (self.select_y_axis.value != \"None\"):\n self.button_eda_plot.disabled = False\n else:\n self.button_eda_plot.disabled = True\n\n if self.select_hist.value != \"None\":\n self.button_hist_plot.disabled = False\n else:\n self.button_hist_plot.disabled = True\n\n if self.select_count_plot.value != \"None\":\n self.button_count_plot.disabled = False\n else:\n self.button_count_plot.disabled = True\n\n def exploration_plots(self):\n\n df_exploration = pd.DataFrame()\n self.source_eda = ColumnDataSource(data=dict(df_exploration))\n eda_columns = [TableColumn(field=cols, title=cols) for cols in df_exploration.columns]\n self.table_eda = DataTable(source=self.source_eda, columns=eda_columns, width=1200,\n height=300, fit_columns=True)\n\n x_scat, y_scat, scat_color = [], [], []\n self.source_scatter = ColumnDataSource(\n data=dict(x=x_scat, y=y_scat, color=scat_color))\n self.hover_scatter = HoverTool(\n tooltips=[(\"X\", \"@x{1.11}\"),\n (\"Y\", \"@y{1.11}\")])\n\n self.plot_scatter = figure(title=\"Scatter Plot\", plot_height=600, plot_width=800,\n tools=['pan,box_zoom,reset'] + [self.hover_scatter])\n self.plot_scatter.scatter(x='x', y='y', size=10, line_color=\"white\", alpha=0.6,\n hover_color='white', hover_alpha=0.5, source=self.source_scatter, fill_color='color')\n self.plot_scatter = self.plot_format(self.plot_scatter)\n self.plot_scatter.min_border_left = 75\n self.plot_scatter.min_border_bottom = 75\n\n hist, edges = [], []\n\n self.source_histogram = ColumnDataSource(\n data=dict(top=hist, left=edges[:-1], right=edges[1:]))\n\n hover_hist = HoverTool(\n tooltips=[(\"X\", \"@left{1.11} ~ @right{1.11}\"),\n (\"Count\", \"@top{int}\")])\n self.plot_hist = figure(title='Histogram', plot_height=600, plot_width=800,\n tools=['pan,box_zoom,reset'] + [hover_hist])\n self.plot_hist.quad(top='top', bottom=0, left='left', right='right', source=self.source_histogram,\n fill_color='dodgerblue', line_color=\"white\", fill_alpha=0.8)\n self.plot_hist = self.plot_format(self.plot_hist)\n self.plot_hist.min_border_left = 50\n self.plot_hist.min_border_bottom = 50\n\n count_column, count_value = [], []\n\n self.source_count_plot = ColumnDataSource(\n data=dict(x=count_column, y=count_value))\n\n hover_count_plot = HoverTool(tooltips=[(\"Category:\", \"@x\"),\n (\"Count:\", \"@y{int}\")])\n self.plot_count_plot = figure(title=\"Count Plot\", plot_height=600, plot_width=800,\n tools=['pan,box_zoom,reset']+[hover_count_plot], x_range=[])\n self.plot_count_plot.vbar(x='x', top='y', width=0.9, source=self.source_count_plot,\n fill_color='dodgerblue',\n line_color=\"white\", fill_alpha=0.8)\n self.plot_count_plot.background_fill_color = self.background_fill_color\n self.plot_count_plot.border_fill_color = self.border_fill_color\n self.plot_count_plot.title.align = self.title_align\n self.plot_count_plot.title.text_font = self.text_font\n self.plot_count_plot.axis.axis_label_text_font = self.axis_label_text_font\n self.plot_count_plot.axis.axis_label_text_font_size = self.axis_label_text_font_size\n self.plot_count_plot.title.text_font_size = self.text_font_size\n self.plot_count_plot.min_border_top = 50\n self.plot_count_plot.min_border_bottom = 75\n self.plot_count_plot.xaxis.major_label_orientation = pi / 4\n\n self.explore_data_select = Select(title=\"Dataset:\", value=\"Select dataset\",\n options=[\"Select dataset\"] + list(self.eda_data_source.keys()))\n self.select_x_axis = Select(\n title=\"X-Axis:\", value=\"None\", options=[\"None\"])\n self.select_y_axis = Select(\n title=\"Y-Axis:\", value=\"None\", options=[\"None\"])\n self.select_color = Select(\n title=\"Color:\", value=\"None\", options=[\"None\"])\n self.button_eda_plot = Button(label=\"Draw Plot\")\n self.button_eda_plot.disabled = True\n\n self.select_hist = Select(\n title=\"Histogram Value:\", value=\"None\", options=[\"None\"])\n self.slider_bins = Slider(title=\"Histogram Bins\", value=20, start=5.0, end=50, step=1,\n callback_policy='mouseup', css_classes=['custom_slider'])\n\n self.log_x_cb = CheckboxGroup(\n labels=[\"Log transform: x-axis\"], active=[])\n self.log_y_cb = CheckboxGroup(\n labels=[\"Log transform: y-axis\"], active=[])\n self.log_hist_cb = CheckboxGroup(\n labels=[\"Log transform axis\"], active=[])\n\n self.button_hist_plot = Button(label=\"Draw Histogram\")\n self.button_hist_plot.disabled = True\n\n self.select_count_plot = Select(\n title=\"Count Plot Value:\", value=\"None\", options=[\"None\"])\n self.button_count_plot = Button(label=\"Draw Count Plot\")\n self.button_count_plot.disabled = True\n\n self.select_x_axis.on_change('value', self.eda_button_enable)\n self.select_y_axis.on_change('value', self.eda_button_enable)\n self.select_hist.on_change('value', self.eda_button_enable)\n self.select_count_plot.on_change('value', self.eda_button_enable)\n self.explore_data_select.on_change(\"value\", self.eda_table)\n self.button_eda_plot.on_click(self.create_eda_figure)\n self.button_hist_plot.on_click(self.create_hist_figure)\n self.button_count_plot.on_click(self.create_count_figure)\n\n \n self.data_source_eda = Div(text='', width = 800, height = 200, css_classes=['itemconfiguration'])\n\n tab_eda = Panel(child=column(row(self.explore_data_select, self.data_source_eda), self.table_eda,\n row(column(self.select_x_axis, self.log_x_cb, self.select_y_axis, self.log_y_cb,\n self.select_color, self.button_eda_plot), self.plot_scatter),\n row(column(self.select_hist, self.log_hist_cb, self.slider_bins,\n self.button_hist_plot), self.plot_hist),\n row(column(self.select_count_plot,\n self.button_count_plot), self.plot_count_plot)),\n title=\"Data Exploration\")\n return tab_eda\n\n\nclass linear_regression(plot_attributes):\n\n \"\"\"\n\n Linear Regression Tab\n\n \"\"\"\n\n def __init__(self):\n self.color_bar = None\n self.plot_hist_resid = None\n self.reg_target_ms = None\n self.source_corr = None\n self.plot_corr = None\n self.table_reg = None\n self.button_reg = None\n self.hline = None\n self.hover_corr = None\n self.hover_reg = None\n self.hover_resid = None\n self.hover_resid_hist = None\n self.legend_reg = None\n self.plot_reg = None\n self.plot_resid = None\n self.reg_data_select = None\n self.reg_features_ms = None\n self.reg_scatter = None\n self.active_df = None\n self.reg_df = None\n self.normalize_linreg = None\n self.reset_data_reg()\n\n def reset_data_reg(self):\n self.source_reg.data = {}\n self.source_reg_scat.data = dict(actual=[], predict=[])\n self.source_reg_resid.data = dict(predict=[], residual=[])\n self.source_hist_resid.data = dict(top=[], bottom=[], right=[])\n self.legend_reg.items = []\n self.table_reg.columns = []\n self.color_bar_reg.scale_alpha = 0\n self.color_bar_reg.major_label_text_alpha = 0\n self.reg_features_ms.options = [\"ALL\"]\n self.reg_features_ms.value = [\"ALL\"]\n self.reg_target_ms.options = ['SELECT TARGET']\n self.reg_target_ms.value = 'SELECT TARGET'\n self.button_logreg.disabled = True\n top, bottom, left, right, labels, nlabels, color_list, corr = get_corr_plot(\n pd.DataFrame())\n self.corr_plot(top, bottom, left, right, labels,\n nlabels, color_list, corr)\n\n def corr_plot(self, top, bottom, left, right, labels, nlabels, color_list, corr):\n\n self.source_corr.data = dict(\n top=top, bottom=bottom, left=left, right=right, color=color_list, corr=corr)\n self.plot_corr.x_range.start, self.plot_corr.x_range.end = 0, nlabels\n self.plot_corr.y_range.start, self.plot_corr.y_range.end = 0, nlabels\n ticks = [tick + 0.5 for tick in list(range(nlabels))]\n\n tick_dict = OrderedDict([[tick, labels[ii]]\n for ii, tick in enumerate(ticks)])\n\n self.color_bar_reg.scale_alpha = 1\n self.color_bar_reg.major_label_text_alpha = 1\n self.plot_corr.xaxis.ticker = ticks\n self.plot_corr.yaxis.ticker = ticks\n self.plot_corr.xaxis.major_label_overrides = tick_dict\n self.plot_corr.yaxis.major_label_overrides = tick_dict\n\n def reg_plot(self):\n\n features = self.reg_features_ms.value\n label = self.reg_target_ms.value\n active_norm = self.normalize_linreg.active\n\n if label != \"SELECT TARGET\":\n if 'ALL' in features:\n df_columns = self.reg_df.columns.values.tolist()\n df_columns.remove(label)\n features_df = self.reg_df.loc[:, df_columns]\n\n else:\n if label in features:\n features.remove(label)\n features_df = self.reg_df.loc[:, features]\n\n else:\n features_df = self.reg_df.loc[:, features]\n\n target_df = self.reg_df.loc[:, label]\n\n actual_reg, predict_reg, text, MAE, RMSE, residual, \\\n slope, intercept = get_regression_plot(\n features_df, target_df, active_norm)\n\n self.plot_reg.x_range.start, self.plot_reg.x_range.end = actual_reg.min(), actual_reg.max()\n self.plot_reg.y_range.start, self.plot_reg.y_range.end = predict_reg.min(), predict_reg.max()\n\n self.plot_resid.x_range.start, self.plot_resid.x_range.end = predict_reg.min(), predict_reg.max()\n self.plot_resid.y_range.start, self.plot_resid.y_range.end = residual.min(), residual.max()\n\n self.source_reg_scat.data = dict(\n actual=list(actual_reg), predict=list(predict_reg))\n self.source_reg_resid.data = dict(\n predict=list(predict_reg), residual=list(residual))\n self.legend_reg.items = [LegendItem(label=text[0], renderers=[self.reg_scatter]),\n LegendItem(label=\"MAE - \" + str(MAE),\n renderers=[self.reg_scatter]),\n LegendItem(label=\"RMSE - \" + str(RMSE), renderers=[self.reg_scatter])]\n\n vhist, vedges = np.histogram(residual, bins=50)\n vmax = max(vhist) * 1.1\n\n self.plot_hist_resid.x_range.start, self.plot_hist_resid.x_range.end = 0, vmax\n self.plot_hist_resid.y_range.start, self.plot_hist_resid.y_range.end = residual.min(), residual.max()\n\n self.hline.line_alpha = 0.5\n self.source_hist_resid.data = dict(\n top=vedges[1:], bottom=vedges[:-1], right=vhist)\n\n self.error_count += 1\n self.alert_reg.text = str(self.error_count)+\" Regression Completed\"\n\n def create_figure_reg(self, attr, old, new):\n self.active_df = self.reg_data_select.value\n\n if self.active_df != \"Select dataset\":\n self.reset_data_reg()\n self.file_path = str(\n self.cwd + self.data_path + str(self.regression_data_source.get(self.active_df)))\n\n self.reg_df = pd.read_csv(self.file_path)\n self.reg_df = self.reg_df.fillna(self.reg_df.mean())\n self.reg_df.columns = [x.upper() for x in self.reg_df.columns]\n self.source_reg.data = dict(self.reg_df)\n self.table_reg.columns = [TableColumn(\n field=cols, title=cols, width=90) for cols in self.reg_df.columns]\n\n self.reg_features_ms.options = ['ALL'] + list(self.reg_df.columns)\n\n likely_target = {}\n for var in self.reg_df.columns:\n likely_target[var] = self.reg_df[var].nunique() > self.reg_df.shape[0]*0.1\n likely_target = [k for k, v in likely_target.items() if v is True]\n self.reg_target_ms.options = [\n 'SELECT TARGET'] + list(likely_target)\n\n top, bottom, left, right, labels, nlabels, color_list, corr = get_corr_plot(self.reg_df)\n self.corr_plot(top, bottom, left, right, labels, nlabels, color_list, corr)\n\n self.button_reg.disabled = True\n else:\n self.reset_data_reg()\n\n def button_enable(self, attr, old, new):\n if self.reg_target_ms.value != 'SELECT TARGET':\n self.button_reg.disabled = False\n else:\n self.button_reg.disabled = True\n\n def lin_reg(self):\n df_reg = pd.DataFrame()\n self.source_reg = ColumnDataSource(data=dict(df_reg))\n reg_columns = [TableColumn(field=cols, title=cols)\n for cols in df_reg.columns]\n self.table_reg = DataTable(source=self.source_reg, columns=reg_columns, width=1200, height=300,\n fit_columns=True)\n\n top, bottom, left, right, color, corr = [], [], [], [], [], []\n self.source_corr = ColumnDataSource(data=dict(top=top, bottom=bottom, left=left, right=right, \n color=color, corr=corr))\n\n self.hover_corr = HoverTool(tooltips=[(\"Correlation\", \"@corr{1.11}\")])\n\n self.plot_corr = figure(plot_width=750, plot_height=650, title=\"Correlation Matrix\",\n toolbar_location='left', tools=[self.hover_corr])\n\n self.plot_corr.quad(top='top', bottom='bottom', left='left', right='right', color='color', \n line_color='white', source=self.source_corr)\n self.plot_corr = self.plot_format(self.plot_corr)\n self.plot_corr.xgrid.grid_line_color = None\n self.plot_corr.ygrid.grid_line_color = None\n self.plot_corr.xaxis.major_label_orientation = pi / 4\n self.plot_corr.min_border_left = 110\n self.plot_corr.min_border_bottom = 110\n self.plot_corr.y_range.flipped = True\n\n corr_colors = list(reversed(RdBu[9]))\n self.reg_mapper = LinearColorMapper(\n palette=corr_colors, low=-1, high=1)\n\n self.color_bar_reg = ColorBar(color_mapper=self.reg_mapper, location=(0, 0),\n ticker=BasicTicker(\n desired_num_ticks=len(corr_colors)),\n scale_alpha=0, major_label_text_alpha=0)\n self.plot_corr.add_layout(self.color_bar_reg, 'right')\n self.color_bar_reg.background_fill_color = 'whitesmoke'\n\n actual_reg, predict_reg = [], []\n self.source_reg_scat = ColumnDataSource(\n data=dict(actual=actual_reg, predict=predict_reg))\n\n self.hover_reg = HoverTool(tooltips=[(\"Actual\", \"@actual{int}\"),\n (\"Predicted\", \"@predict{int}\")])\n\n self.plot_reg = figure(plot_height=500, plot_width=900,\n tools=['pan,box_zoom,reset,wheel_zoom'] + [self.hover_reg])\n\n self.reg_scatter = self.plot_reg.scatter(x='actual', y='predict', size=7, line_color=\"white\", alpha=0.6,\n hover_color='white',\n hover_alpha=0.5, source=self.source_reg_scat,\n fill_color='dodgerblue', )\n\n self.legend_reg = Legend(items=[LegendItem(label=\"\", renderers=[\n self.reg_scatter])], location='bottom_right')\n self.plot_reg.add_layout(self.legend_reg)\n self.plot_reg = self.plot_format(self.plot_reg)\n self.plot_reg.xaxis.axis_label = \"Actual Value\"\n self.plot_reg.yaxis.axis_label = \"Predicted Value\"\n\n residual, predict_reg = [], []\n self.source_reg_resid = ColumnDataSource(\n data=dict(predict=predict_reg, residual=residual))\n\n self.hover_resid = HoverTool(tooltips=[(\"Predicted\", \"@predict{int}\"),\n (\"Residual\", \"@residual{int}\")],\n names=['resid'])\n\n self.plot_resid = figure(plot_height=500, plot_width=700,\n tools=['pan,box_zoom,reset,wheel_zoom'] + [self.hover_resid])\n\n self.hline = Span(location=0, dimension='width', line_color='black', line_width=3,\n line_alpha=0, line_dash=\"dashed\")\n self.plot_resid.renderers.extend([self.hline])\n\n self.plot_resid.scatter(x='predict', y='residual', size=7, line_color=\"white\", alpha=0.6, hover_color='white',\n hover_alpha=0.5, source=self.source_reg_resid, fill_color='dodgerblue', name='resid')\n self.plot_resid = self.plot_format(self.plot_resid)\n self.plot_resid.xaxis.axis_label = \"Predicted Value\"\n self.plot_resid.yaxis.axis_label = \"Residual Value\"\n\n vhist, vedges = [], []\n\n self.source_hist_resid = ColumnDataSource(\n data=dict(top=vedges[1:], bottom=vedges[:-1], right=vhist))\n self.hover_resid_hist = HoverTool(tooltips=[(\"Count\", \"@right{int}\")])\n self.plot_hist_resid = figure(toolbar_location=None, plot_width=200, plot_height=self.plot_resid.plot_height,\n y_range=self.plot_resid.y_range, min_border=10, y_axis_location=\"right\",\n tools=[self.hover_resid_hist] + ['pan'])\n self.plot_hist_resid.quad(left=0, bottom='bottom', top='top', right='right', color=\"dodgerblue\",\n line_color=\"white\", source=self.source_hist_resid)\n\n self.plot_hist_resid.ygrid.grid_line_color = None\n self.plot_hist_resid.xaxis.major_label_orientation = np.pi / 4\n self.plot_hist_resid = self.plot_format(self.plot_hist_resid)\n\n self.reg_data_select = Select(title=\"Dataset:\", value=\"Select dataset\",\n options=[\"Select dataset\"] + list(self.regression_data_source.keys()))\n self.reg_features_ms = MultiSelect(\n title=\"Select features:\", value=[\"ALL\"], options=[\"ALL\"])\n self.normalize_linreg = RadioButtonGroup(\n labels=[\"Actual Data\", \"Normalize Data\"], active=0)\n\n self.reg_target_ms = Select(title=\"Select target for regression:\", value=\"SELECT TARGET\",\n options=[\"SELECT TARGET\"])\n self.button_reg = Button(label=\"Calculate regression\")\n self.button_reg.disabled = True\n\n self.reg_data_select.on_change(\"value\", self.create_figure_reg)\n self.reg_target_ms.on_change('value', self.button_enable)\n self.button_reg.on_click(self.reg_plot)\n\n self.div_whitespace = Div(text=\"\"\"\"\"\", height=100)\n\n self.alert_reg = Div(text='', css_classes=['hidden'], visible=False)\n\n self.alert_reg.js_on_change('text', self.callback_notification)\n\n tab_reg = Panel(child=column(self.reg_data_select, self.table_reg, self.plot_corr,\n row(column(self.reg_features_ms, self.normalize_linreg,\n self.reg_target_ms, self.button_reg),\n column(self.plot_reg, row(self.plot_resid, self.plot_hist_resid),\n self.alert_reg, self.div_whitespace))),\n title=\"Linear Regression\")\n\n return tab_reg\n\n\nclass logistic_regression(plot_attributes):\n \"\"\"\n Tab for Logistic Regression\n\n \"\"\"\n\n def __init__(self):\n\n self.active_df = None\n self.logreg_df = None\n self.legend_roc = None\n self.roc_line = None\n self.hover_logreg_cm = None\n self.color_bar_logreg_cm = None\n self.table_class_rep = None\n self.button_logreg = None\n self.hover_logreg_roc = None\n self.labels_logreg_cm = None\n self.logreg_roc_plot = None\n self.normalize_logreg = None\n self.div_report_title = None\n self.reset_data_logreg()\n\n def reset_data_logreg(self):\n self.source_logreg.data = {}\n self.source_class_rep_logreg.data = {}\n self.source_logreg_cm.data = dict(Actual=[], Prediction=[], value=[])\n self.source_logreg_roc.data = dict(fpr_roc=[], tpr_roc=[])\n self.source_logreg_const_roc.data = dict(\n const_roc_x=[], const_roc_y=[])\n self.table_logreg.columns = []\n self.table_class_rep_logreg.columns = []\n self.legend_roc.items = []\n self.color_bar_logreg_cm.scale_alpha = 0\n self.color_bar_logreg_cm.major_label_text_alpha = 0\n self.logreg_features_ms.options = [\"ALL\"]\n self.logreg_features_ms.value = [\"ALL\"]\n self.logreg_target_ms.options = ['SELECT TARGET']\n self.logreg_target_ms.value = 'SELECT TARGET'\n self.button_logreg.disabled = True\n\n def logreg_button_enable(self, attr, old, new):\n\n if self.logreg_target_ms.value != 'SELECT TARGET':\n self.button_logreg.disabled = False\n else:\n self.button_logreg.disabled = True\n\n def create_figure_logreg(self, attr, old, new):\n self.active_df = self.logreg_data_select.value\n\n if self.active_df != \"Select dataset\":\n self.reset_data_logreg()\n\n self.file_path = str(self.cwd + self.data_path +\n str(self.logreg_data_source.get(self.active_df)))\n\n logreg_df = pd.read_csv(self.file_path)\n logreg_df = logreg_df.fillna(logreg_df.mean())\n logreg_df.columns = [x.upper() for x in logreg_df.columns]\n self.logreg_df = logreg_df\n\n self.source_logreg.data = dict(logreg_df)\n self.table_logreg.columns = [TableColumn(field=cols, title=cols, width=90) for cols in\n self.logreg_df.columns]\n\n self.logreg_features_ms.options = [\n \"ALL\"] + logreg_df.columns.values.tolist()\n\n likely_cat = {}\n for var in logreg_df.columns:\n likely_cat[var] = logreg_df[var].nunique() == 2 and set(\n logreg_df[var].unique()) == set([0, 1])\n likely_cat = [k for k, v in likely_cat.items() if v is True]\n\n self.logreg_target_ms.options = ['SELECT TARGET'] + likely_cat\n self.button_logreg.disabled = True\n else:\n self.reset_data_logreg()\n\n def logreg_plot(self):\n features = self.logreg_features_ms.value\n label = self.logreg_target_ms.value\n logreg_df = self.logreg_df\n active_norm = self.normalize_logreg.active\n\n if label != \"SELECT TARGET\":\n if 'ALL' in features:\n df_columns = logreg_df.columns.values.tolist()\n df_columns.remove(label)\n features_df = logreg_df.loc[:, df_columns]\n else:\n if label in features:\n features.remove(label)\n features_df = logreg_df.loc[:, features]\n else:\n features_df = logreg_df.loc[:, features]\n\n target_df = logreg_df.loc[:, label]\n\n accuracy_score, class_report_df, confusion_df, \\\n logit_roc_auc, fpr, tpr, thresholds = get_logreg_output(\n features_df, target_df, active_norm)\n\n self.source_class_rep_logreg.data = dict(class_report_df)\n self.table_class_rep_logreg.columns = [TableColumn(field=cols, title=cols, width=90) for cols in\n class_report_df.columns]\n self.table_class_rep_logreg.index_position = None\n\n self.logreg_cm_mapper.low, self.logreg_cm_mapper.high = confusion_df.value.values.min(\n ), confusion_df.value.values.max()\n self.color_bar_logreg_cm.scale_alpha = 1\n self.color_bar_logreg_cm.major_label_text_alpha = 1\n\n self.logreg_cm_plot.x_range.start, self.logreg_cm_plot.x_range.end = confusion_df.Actual.min(), \\\n confusion_df.Actual.max()\n self.logreg_cm_plot.y_range.start, self.logreg_cm_plot.y_range.end = confusion_df.Prediction.min(), \\\n confusion_df.Prediction.max()\n\n self.logreg_cm_plot.xaxis.ticker = sorted(target_df.unique())\n self.logreg_cm_plot.yaxis.ticker = sorted(target_df.unique())\n self.logreg_cm_plot.xaxis.axis_label = \"Actual\"\n self.logreg_cm_plot.yaxis.axis_label = \"Predicted\"\n\n self.source_logreg_cm.data = confusion_df\n self.source_logreg_roc.data = dict(fpr_roc=fpr, tpr_roc=tpr)\n self.logreg_roc_plot.xaxis.axis_label = \"False Positive Rate\"\n self.logreg_roc_plot.yaxis.axis_label = \"True Positive Rate\"\n self.legend_roc.items = [LegendItem(label=\"Logistic Regression (area = \" + str(logit_roc_auc) + \")\",\n renderers=[self.roc_line])]\n self.source_logreg_const_roc.data = dict(\n const_roc_x=[0, 1], const_roc_y=[0, 1])\n\n self.error_count += 1\n self.alert_logreg.text = str(\n self.error_count)+\" Logistic Regression Completed\"\n\n def logreg(self):\n\n df_logreg = pd.DataFrame()\n self.source_logreg = ColumnDataSource(data=dict(df_logreg))\n logreg_columns = [TableColumn(field=cols, title=cols)\n for cols in df_logreg.columns]\n self.table_logreg = DataTable(source=self.source_logreg, columns=logreg_columns, width=1200, height=300,\n fit_columns=True)\n\n df_class_report = pd.DataFrame()\n self.source_class_rep_logreg = ColumnDataSource(\n data=dict(df_class_report))\n class_rep_columns_logreg = [TableColumn(\n field=cols, title=cols) for cols in df_class_report.columns]\n self.table_class_rep_logreg = DataTable(source=self.source_class_rep_logreg, columns=class_rep_columns_logreg,\n width=600, height=200, fit_columns=True)\n\n logreg_cm_colors = list(reversed(Blues[9]))\n actual_cm, predicted_cm, value_cm = [], [], []\n self.source_logreg_cm = ColumnDataSource(\n data=dict(Actual=actual_cm, Prediction=predicted_cm, value=value_cm))\n\n self.logreg_cm_mapper = LinearColorMapper(\n palette=logreg_cm_colors, low=0, high=100)\n\n self.labels_logreg_cm = LabelSet(x='Actual', y='Prediction', text='value', level='overlay', x_offset=0,\n y_offset=-10,\n source=self.source_logreg_cm, render_mode='canvas', text_align='center',\n text_font='times',\n text_color='#FF0000', text_font_style='bold', text_font_size='16px')\n\n self.hover_logreg_cm = HoverTool(tooltips=[(\"Actual\", \"@Actual\"),\n (\"Predicted\", \"@Prediction\"),\n (\"Value\", \"@value\")])\n self.logreg_cm_plot = figure(plot_width=400, plot_height=300, title=\"Confusion Matrix\", toolbar_location=None,\n tools=[self.hover_logreg_cm], x_axis_location=\"above\")\n\n self.logreg_cm_plot.rect(x=\"Actual\", y=\"Prediction\", width=.9, height=.9, source=self.source_logreg_cm,\n line_color='black', fill_color=transform('value', self.logreg_cm_mapper))\n self.logreg_cm_plot.y_range.flipped = True\n\n self.color_bar_logreg_cm = ColorBar(color_mapper=self.logreg_cm_mapper, location=(0, 0),\n ticker=BasicTicker(\n desired_num_ticks=len(logreg_cm_colors)),\n scale_alpha=0, major_label_text_alpha=0)\n\n self.logreg_cm_plot.add_layout(self.color_bar_logreg_cm, 'right')\n self.color_bar_logreg_cm.background_fill_color = \"whitesmoke\"\n\n self.logreg_cm_plot = self.plot_format(self.logreg_cm_plot)\n self.logreg_cm_plot.add_layout(self.labels_logreg_cm)\n self.logreg_cm_plot.min_border_left = 50\n self.logreg_cm_plot.min_border_top = 50\n\n self.hover_logreg_roc = HoverTool(tooltips=[(\"False Positive Rate\", \"@fpr_roc\"),\n (\"True Positive Rate\", \"@tpr_roc\")],\n names=['roc'])\n\n fpr_roc, tpr_roc = [], []\n\n self.source_logreg_roc = ColumnDataSource(\n data=dict(fpr_roc=fpr_roc, tpr_roc=tpr_roc))\n\n const_roc_x, const_roc_y = [], []\n self.source_logreg_const_roc = ColumnDataSource(\n data=dict(const_roc_x=const_roc_x, const_roc_y=const_roc_y))\n\n self.logreg_roc_plot = figure(plot_width=500, plot_height=500, title=\"ROC AUC\", toolbar_location=None,\n tools=[self.hover_logreg_roc], x_range=(-0.04, 1.04), y_range=(-0.04, 1.04))\n\n self.roc_line = self.logreg_roc_plot.line(x=\"fpr_roc\", y=\"tpr_roc\", line_width=4, source=self.source_logreg_roc,\n line_color='dodgerblue', name='roc')\n self.logreg_roc_plot.line(x=\"const_roc_x\", y=\"const_roc_y\", line_width=2, line_dash='dashed',\n source=self.source_logreg_const_roc, line_color='orangered')\n self.legend_roc = Legend(items=[LegendItem(label=\"\", renderers=[\n self.roc_line])], location='bottom_right')\n self.logreg_roc_plot.add_layout(self.legend_roc)\n self.logreg_roc_plot = self.plot_format(self.logreg_roc_plot)\n self.logreg_roc_plot.min_border_left = 50\n self.logreg_roc_plot.min_border_bottom = 50\n\n self.logreg_data_select = Select(title=\"Dataset:\", value=\"Select dataset\",\n options=[\"Select dataset\"] + list(self.logreg_data_source.keys()))\n self.logreg_features_ms = MultiSelect(\n title=\"Select features:\", value=[\"ALL\"], options=[\"ALL\"])\n self.normalize_logreg = RadioButtonGroup(\n labels=[\"Actual Data\", \"Normalize Data\"], active=0)\n\n self.logreg_target_ms = Select(title=\"Select target for Logistic regression:\", value=\"SELECT TARGET\",\n options=[\"SELECT TARGET\"])\n self.button_logreg = Button(label=\"Calculate regression\")\n self.button_logreg.disabled = True\n\n self.logreg_data_select.on_change(\"value\", self.create_figure_logreg)\n self.logreg_target_ms.on_change('value', self.logreg_button_enable)\n self.button_logreg.on_click(self.logreg_plot)\n\n self.div_report_title = Div(\n text=\"\"\"<center>Classification Report</center>\"\"\", width=600)\n\n self.alert_logreg = Div(text='', css_classes=['hidden'], visible=False)\n\n self.alert_logreg.js_on_change('text', self.callback_notification)\n\n tab_logreg = Panel(child=column(self.logreg_data_select, self.table_logreg,\n row(column(self.logreg_features_ms, self.normalize_logreg,\n self.logreg_target_ms, self.button_logreg),\n column(self.div_report_title, self.table_class_rep_logreg, self.logreg_cm_plot,\n self.logreg_roc_plot, self.alert_logreg))),\n title=\"Logistic Regression\")\n\n return tab_logreg\n\n\nclass classification(plot_attributes):\n\n def __init__(self):\n self.source_classify = None\n\n def create_figure_classify(self, attr, old, new):\n self.active_df = self.classify_data_select.value\n\n if self.active_df != \"Select dataset\":\n self.file_path = str(\n self.cwd + self.data_path + str(self.classify_data_source.get(self.active_df)))\n classify_df = pd.read_csv(self.file_path)\n classify_df = classify_df.fillna(classify_df.mean())\n classify_df.columns = [x.upper() for x in classify_df.columns]\n self.classify_df = classify_df\n\n self.source_classify.data = dict(classify_df)\n self.table_classify.columns = [TableColumn(field=cols, title=cols, width=90) for cols in\n self.classify_df.columns]\n\n self.classify_features_ms.options = [\n \"ALL\"] + classify_df.columns.values.tolist()\n\n likely_cat = {}\n for var in classify_df.columns:\n likely_cat[var] = classify_df[var].nunique() <= 20\n likely_cat = [k for k, v in likely_cat.items() if v is True]\n\n self.classify_target_ms.options = ['SELECT TARGET'] + likely_cat\n\n self.button_classify.disabled = True\n\n else:\n self.source_classify.data = {}\n self.table_classify.columns = []\n self.classify_features_ms.options = [\"ALL\"]\n self.classify_features_ms.value = [\"ALL\"]\n self.classify_target_ms.options = ['SELECT TARGET']\n self.classify_target_ms.value = 'SELECT TARGET'\n self.button_classify.disabled = True\n self.source_classify_cm.data = {}\n self.source_classify_fi.data = {}\n self.source_class_rep_classify.data = {}\n\n def classify_button_enable(self, attr, old, new):\n if self.classify_target_ms.value != \"SELECT TARGET\":\n self.button_classify.disabled = False\n else:\n self.button_classify.disabled = True\n\n def classify_plot(self):\n features = self.classify_features_ms.value\n label = self.classify_target_ms.value\n classify_df = self.classify_df\n active_norm = self.normalize_classify.active\n\n if label != \"SELECT TARGET\":\n if 'ALL' in features:\n df_columns = classify_df.columns.values.tolist()\n df_columns.remove(label)\n features_df = classify_df.loc[:, df_columns]\n else:\n if label in features:\n features.remove(label)\n features_df = classify_df.loc[:, features]\n else:\n features_df = classify_df.loc[:, features]\n\n target_df = classify_df.loc[:, label]\n\n accuracy_score, class_report_df, confusion_df, \\\n rf_feature_labels, rf_feature_importance = get_classify_output(\n features_df, target_df, active_norm)\n\n self.source_class_rep_classify.data = dict(class_report_df)\n self.table_class_rep_classify.columns = [TableColumn(field=cols, title=cols, width=90) for cols in\n class_report_df.columns]\n self.table_class_rep_classify.index_position = None\n\n self.classify_cm_mapper.low, self.classify_cm_mapper.high = confusion_df.value.values.min(), \\\n confusion_df.value.values.max()\n self.color_bar_classify_cm.scale_alpha = 1\n self.color_bar_classify_cm.major_label_text_alpha = 1\n\n if str(confusion_df['Actual'].dtype) == 'object' or str(confusion_df['Prediction'].dtype) == 'object':\n\n self.classify_cm_plot.xaxis.ticker = list(\n set(pd.Categorical(confusion_df['Actual']).codes))\n ticker_x_dict = dict(\n enumerate(pd.Categorical(confusion_df['Actual']).categories))\n confusion_df['Actual'] = pd.Categorical(\n confusion_df['Actual']).codes\n self.classify_cm_plot.xaxis.major_label_overrides = ticker_x_dict\n self.classify_cm_plot.xaxis.major_label_orientation = pi / 4\n\n self.classify_cm_plot.yaxis.ticker = list(\n set(pd.Categorical(confusion_df['Prediction']).codes))\n ticker_y_dict = dict(enumerate(pd.Categorical(\n confusion_df['Prediction']).categories))\n confusion_df['Prediction'] = pd.Categorical(\n confusion_df['Prediction']).codes\n self.classify_cm_plot.yaxis.major_label_overrides = ticker_y_dict\n\n else:\n self.classify_cm_plot.x_range.start, self.classify_cm_plot.x_range.end = confusion_df.Actual.min(), \\\n confusion_df.Actual.max()\n self.classify_cm_plot.y_range.start, self.classify_cm_plot.y_range.end = confusion_df.Prediction.min(), \\\n confusion_df.Prediction.max()\n self.classify_cm_plot.xaxis.ticker = sorted(target_df.unique())\n self.classify_cm_plot.yaxis.ticker = sorted(target_df.unique())\n\n self.classify_cm_plot.xaxis.axis_label = \"Actual\"\n self.classify_cm_plot.yaxis.axis_label = \"Predicted\"\n\n self.source_classify_cm.data = confusion_df\n rf_df = pd.DataFrame(dict({'rf_features': rf_feature_labels,\n 'rf_importance': rf_feature_importance})).nlargest(15, \"rf_importance\")\n self.source_classify_fi.data = dict(rf_df)\n self.classify_fi_plot.x_range.factors = rf_df['rf_features'].values.tolist(\n )\n\n self.error_count += 1\n self.alert_classify.text = str(\n self.error_count)+\" Classification completed\"\n\n def classify(self):\n df_classify = pd.DataFrame()\n self.source_classify = ColumnDataSource(data=dict(df_classify))\n classify_columns = [TableColumn(field=cols, title=cols)\n for cols in df_classify.columns]\n self.table_classify = DataTable(source=self.source_classify, columns=classify_columns, width=1200, height=300,\n fit_columns=True)\n\n df_class_report = pd.DataFrame()\n self.source_class_rep_classify = ColumnDataSource(\n data=dict(df_class_report))\n class_rep_columns_classify = [TableColumn(\n field=cols, title=cols) for cols in df_class_report.columns]\n self.table_class_rep_classify = DataTable(source=self.source_class_rep_classify, columns=class_rep_columns_classify, width=600, height=200,\n fit_columns=True)\n\n classify_cm_colors = list(reversed(Blues[9]))\n actual_cm, predicted_cm, value_cm = [], [], []\n self.source_classify_cm = ColumnDataSource(data=dict(Actual=actual_cm, Prediction=predicted_cm,\n value=value_cm))\n\n self.classify_cm_mapper = LinearColorMapper(\n palette=classify_cm_colors, low=0, high=100)\n self.labels_classify_cm = LabelSet(x='Actual', y='Prediction', text='value', level='overlay', x_offset=0,\n y_offset=-10,\n source=self.source_classify_cm, render_mode='canvas', text_align='center',\n text_font='times',\n text_color='#FF0000', text_font_style='bold', text_font_size='16px')\n\n self.hover_classify_cm = HoverTool(tooltips=[(\"Actual\", \"@Actual\"),\n (\"Predicted\", \"@Prediction\"),\n (\"Value\", \"@value\")])\n self.classify_cm_plot = figure(plot_width=600, plot_height=550, title=\"Confusion Matrix\", toolbar_location=None,\n tools=[self.hover_logreg_cm], x_axis_location=\"above\")\n self.classify_cm_plot.rect(x=\"Actual\", y=\"Prediction\", width=.9, height=.9, source=self.source_classify_cm,\n line_color='black', fill_color=transform('value', self.classify_cm_mapper))\n self.classify_cm_plot.y_range.flipped = True\n\n self.color_bar_classify_cm = ColorBar(color_mapper=self.classify_cm_mapper, location=(0, 0),\n ticker=BasicTicker(\n desired_num_ticks=len(classify_cm_colors)),\n scale_alpha=0, major_label_text_alpha=0)\n\n self.classify_cm_plot.add_layout(self.color_bar_classify_cm, 'right')\n self.color_bar_classify_cm.background_fill_color = \"whitesmoke\"\n self.classify_cm_plot = self.plot_format(self.classify_cm_plot)\n self.classify_cm_plot.add_layout(self.labels_classify_cm)\n self.classify_cm_plot.min_border_left = 100\n self.classify_cm_plot.min_border_top = 100\n self.classify_cm_plot.min_border_bottom = 50\n\n rf_features = []\n rf_importance = []\n\n self.hover_classify_fi = HoverTool(tooltips=[(\"Feature\", \"@rf_features\"),\n (\"Importance Score\", \"@rf_importance{0.02f}\")])\n self.source_classify_fi = ColumnDataSource(\n data=dict(rf_features=rf_features, rf_importance=rf_importance))\n self.classify_fi_plot = figure(x_range=[], plot_width=600, plot_height=600, toolbar_location=None,\n title=\"Feature Importance\", tools=[self.hover_classify_fi])\n self.classify_fi_plot.vbar(x='rf_features', top='rf_importance', bottom=0, width=0.9,\n source=self.source_classify_fi, line_color='white', fill_color='dodgerblue')\n self.classify_fi_plot.background_fill_color = self.background_fill_color\n self.classify_fi_plot.border_fill_color = self.border_fill_color\n self.classify_fi_plot.yaxis.formatter = self.x_axis_format\n self.classify_fi_plot.title.align = self.title_align\n self.classify_fi_plot.title.text_font = self.text_font\n self.classify_fi_plot.axis.axis_label_text_font = self.axis_label_text_font\n self.classify_fi_plot.axis.axis_label_text_font_size = '8pt'\n self.classify_fi_plot.title.text_font_size = self.text_font_size\n self.classify_fi_plot.xaxis.major_label_orientation = pi / 4\n self.classify_fi_plot.min_border_left = 50\n self.classify_fi_plot.min_border_bottom = 100\n\n self.classify_data_select = Select(title=\"Dataset:\", value=\"Select dataset\",\n options=[\"Select dataset\"] + list(self.classify_data_source.keys()))\n self.classify_features_ms = MultiSelect(\n title=\"Select features:\", value=[\"ALL\"], options=[\"ALL\"])\n self.normalize_classify = RadioButtonGroup(\n labels=[\"Actual Data\", \"Normalize Data\"], active=0)\n\n self.classify_target_ms = Select(title=\"Select target for Classification:\", value=\"SELECT TARGET\",\n options=[\"SELECT TARGET\"])\n self.button_classify = Button(label=\"Perform classification\")\n self.button_classify.disabled = True\n\n self.classify_data_select.on_change(\n 'value', self.create_figure_classify)\n self.classify_target_ms.on_change(\"value\", self.classify_button_enable)\n self.button_classify.on_click(self.classify_plot)\n\n self.div_report_title = Div(\n text=\"\"\"<center>Classification Report</center>\"\"\", width=600)\n\n self.alert_classify = Div(text='', css_classes=[\n 'hidden'], visible=False)\n\n self.alert_classify.js_on_change('text', self.callback_notification)\n\n tab_classify = Panel(child=column(self.classify_data_select, self.table_classify,\n row(column(self.classify_features_ms, self.normalize_classify, self.classify_target_ms,\n self.button_classify),\n column(self.div_report_title, self.table_class_rep_classify, column(self.classify_cm_plot, self.classify_fi_plot, self.alert_classify)))),\n title=\"Classification\")\n\n return tab_classify\n\n\nclass clustering(plot_attributes):\n \"\"\"\n\n Tab for Clustering\n\n \"\"\"\n\n def __init__(self):\n self.source_clustering = None\n self.clust_df = None\n self.source_clust = None\n self.mapper = None\n self.clust_scat = None\n self.clust_slider = None\n self.button_cluster = None\n self.clus_data_select = None\n self.clust_features_ms = None\n self.clust_norm_rbg = None\n self.hover_clust = None\n self.table_clustering = None\n\n def cluster_plot(self):\n\n active_features = self.clust_features_ms.value\n active_norm = self.clust_norm_rbg.active\n active_clust_no = self.clust_slider.value\n\n source_clust_data = clustering_data(self.clust_df, active_features, active_norm, active_clust_no,\n self.clustering_data_source, self.mapper, self.clust_scat)\n self.source_clust.data = source_clust_data\n self.error_count += 1\n self.alert_cluster.text = str(self.error_count)+\" Clustering Completed\"\n\n def clustering_plot(self, attr, old, new):\n self.active_df = str(self.clus_data_select.value)\n\n if self.active_df != \"Select dataset\":\n self.button_cluster.disabled = False\n self.file_path = str(\n self.cwd + self.data_path + str(self.clustering_data_source.get(self.active_df)))\n\n clust_df = pd.read_csv(self.file_path)\n clust_df = clust_df.fillna(clust_df.mean())\n clust_df.columns = [x.upper() for x in clust_df.columns]\n\n self.clust_df = clust_df\n\n self.source_clustering.data = dict(clust_df)\n self.table_clustering.columns = [TableColumn(field=cols, title=cols, width=90) for cols in\n self.clust_df.columns]\n self.clust_features_ms.options = ['ALL'] + list(clust_df.columns)\n\n else:\n self.button_cluster.disabled = True\n\n def cluster(self):\n df_clustering = pd.DataFrame()\n self.source_clustering = ColumnDataSource(data=dict(df_clustering))\n clust_columns = [TableColumn(field=cols, title=cols)\n for cols in df_clustering.columns]\n self.table_clustering = DataTable(source=self.source_clustering, columns=clust_columns, width=1200, height=300,\n fit_columns=True)\n\n tsne_x, tsne_y, cluster_col = [0], [0], [0]\n self.source_clust = ColumnDataSource(\n data=dict(x=tsne_x, y=tsne_y, cluster=cluster_col))\n\n self.hover_clust = HoverTool(tooltips=[(\"User\", \"$index\"),\n (\"Cluster\", \"@cluster\")])\n self.mapper = linear_cmap(field_name='cluster', palette=Set1[9], low=min(\n cluster_col), high=max(cluster_col))\n self.clust_scat = figure(plot_height=600, plot_width=850, tools=[\n 'pan,box_zoom,reset,tap'] + [self.hover_clust])\n self.clust_scat.scatter(\n \"x\", 'y', source=self.source_clust, color=self.mapper, size=10, legend='cluster')\n self.clust_scat.axis.major_tick_line_color = None\n self.clust_scat.axis.minor_tick_line_color = None\n self.clust_scat.xaxis.axis_label = \"Dimension 1\"\n self.clust_scat.yaxis.axis_label = \"Dimension 2\"\n self.clust_scat.title.text_font_size = '12pt'\n self.clust_scat.min_border_top = 40\n self.clust_scat = self.plot_format(self.clust_scat)\n\n self.clus_data_select = Select(title=\"Dataset:\", value=\"Select dataset\",\n options=[\"Select dataset\"] + list(self.clustering_data_source.keys()))\n self.clust_features_ms = MultiSelect(\n title=\"Select features for clustering:\", value=[\"ALL\"], options=[\"ALL\"])\n self.clust_norm_rbg = RadioButtonGroup(\n labels=[\"Actual Data\", \"Normalize Data\"], active=0)\n self.clust_slider = Slider(title=\"Total Clusters\", value=5, start=1, end=20, step=1,\n callback_policy='mouseup', css_classes=['custom_slider'])\n self.button_cluster = Button(\n label=\"Calculate and plot clusters\", css_classes=['button'])\n self.button_cluster.disabled = True\n\n self.clus_data_select.on_change(\"value\", self.clustering_plot)\n self.button_cluster.on_click(self.cluster_plot)\n\n self.alert_cluster = Div(text='', css_classes=[\n 'hidden'], visible=False)\n\n self.alert_cluster.js_on_change('text', self.callback_notification)\n\n tab_cluster = Panel(child=column(self.clus_data_select, self.table_clustering,\n row(column(self.clust_features_ms, self.clust_norm_rbg, self.clust_slider,\n self.button_cluster),\n column(self.clust_scat), self.alert_cluster)), title=\"Clustering\")\n\n return tab_cluster\n\n\nclass main_tool(landing_page, eda_plots, linear_regression, logistic_regression, clustering, classification):\n\n \"\"\"\n Add datasets in each dictionary based on the algorithm\n eda_data_source: Can be any dataset for exploratory analysis\n clustering_data_source: Dataset for clustering algorithm\n regression_data_source: Dataset for linear regression algorithm\n logreg_data_source: Dataset for logistic regression algorithm\n classify_data_source: Dataset for multilabel classification algorithm\n\n \"\"\"\n\n def __init__(self):\n self.cwd = str(os.getcwd())\n self.data_path = \"/ML/Data/\"\n self.eda_data_source = {\"Credit Card (Clustering)\": \"CC GENERAL.csv\",\n \"House Sales (Lin. Reg.)\": \"HOUSING PRICE.csv\",\n \"Diabetes (Log. Reg.)\": \"DIABETES.csv\",\n \"Glass Type (Classification)\": \"GLASS.csv\",\n \"Census Income (Classification)\": \"CENSUS_INCOME.csv\"}\n self.clustering_data_source = {\"Credit Card\": \"CC GENERAL.csv\"}\n self.regression_data_source = {\"House Sales\": \"HOUSING PRICE.csv\"}\n self.logreg_data_source = {\"Diabetes\": \"DIABETES.csv\"}\n self.classify_data_source = {\"Glass Type\": \"GLASS.csv\", \"Mobile Prices\": \"MOBILE.csv\",\n \"Census Income\": \"CENSUS_INCOME.csv\"}\n\n self.background_fill_color = 'whitesmoke'\n self.border_fill_color = 'whitesmoke'\n self.x_axis_format = BasicTickFormatter(use_scientific=False)\n self.y_axis_format = BasicTickFormatter(use_scientific=False)\n self.title_align = 'center'\n self.text_font_size = '12pt'\n self.text_font = 'times'\n self.axis_label_text_font = 'times'\n self.axis_label_text_font_size = \"12pt\"\n self.error_count = 0\n self.callback_notification = CustomJS(args={}, \n code=\"\"\"var x = document.getElementById(\"toast\")\n x.className = \"show\";\n s = cb_obj.text\n document.getElementById(\"desc\").innerHTML = s.substr(s.indexOf(' ')+1);\n setTimeout(function(){ x.className = x.className.replace(\"show\", \"\"); }, 5000);\"\"\")\n self.reg_data_select = None\n\n def run_tool(self):\n landing_tab = self.landing_note()\n eda_tab = self.exploration_plots()\n linreg_tab = self.lin_reg()\n logreg_tab = self.logreg()\n cluster_tab = self.cluster()\n classify_tab = self.classify()\n\n tabs = Tabs(tabs=[landing_tab, eda_tab, linreg_tab, logreg_tab, classify_tab, cluster_tab],\n tabs_location='above', sizing_mode='scale_both', active=0)\n\n return tabs\n\n\ntabs = main_tool().run_tool()\n\ncurdoc().add_root(tabs)\ncurdoc().title = \"ML APP\"\n",
"id": "9799284",
"language": "Python",
"matching_score": 4.573009490966797,
"max_stars_count": 4,
"path": "main.py"
},
{
"content": "from sklearn.cluster import KMeans\nfrom sklearn.manifold import TSNE\nfrom sklearn.preprocessing import StandardScaler\n\nimport pandas as pd\nimport numpy as np\n\nfrom yellowbrick.cluster import SilhouetteVisualizer, KElbowVisualizer\n\nfrom bokeh.models.widgets import TableColumn\n\nimport warnings\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n\nfrom sklearn.exceptions import ConvergenceWarning\nConvergenceWarning('ignore')\n\n\ndef get_elbow_plot(X):\n\n output_text = \"\"\n try:\n model = KMeans(random_state=40,)\n elbow_score = KElbowVisualizer(model, k=(1, 30))\n elbow_score.fit(X)\n elbow_value = elbow_score.elbow_value_\n model = KMeans(elbow_value, random_state=42)\n silhoutte_score = SilhouetteVisualizer(model, colors='yellowbrick')\n silhoutte_score.fit(X)\n\n output_text = \"\"\"The optimal number of clusters is \"\"\" + \\\n str(silhoutte_score.n_clusters_) + \"\"\" and the silhouette score is \"\"\" + \\\n str(np.round(silhoutte_score.silhouette_score_, 2))\n except ValueError as e:\n print(e)\n\n return output_text\n\n\ndef get_tsne(df, c_no, mapper):\n\n clust_norm_data_df = pd.DataFrame(df)\n source_clust_data = dict()\n\n try:\n kmeans = KMeans(n_clusters=c_no, random_state=40).fit(clust_norm_data_df)\n clust_norm_data_df['Cluster'] = kmeans.predict(df)\n tsne = TSNE(n_components=2, perplexity=40, n_iter=300, random_state=100)\n tsne_results = tsne.fit_transform(clust_norm_data_df.iloc[:, :-1])\n\n clust_norm_data_df['tsne-2d-one'] = tsne_results[:, 0]\n clust_norm_data_df['tsne-2d-two'] = tsne_results[:, 1]\n mapper['transform'].low = min(clust_norm_data_df['Cluster'].values)\n mapper['transform'].high = max(clust_norm_data_df['Cluster'].values)\n\n source_clust_data = dict(x=clust_norm_data_df['tsne-2d-one'], y=clust_norm_data_df['tsne-2d-two'],\n cluster=clust_norm_data_df['Cluster'])\n except ValueError as e:\n print (e)\n\n return source_clust_data\n\n\ndef clustering_data(clust_df, active_features, active_norm, active_clust_no,\n clustering_data_source, mapper, clust_scat):\n\n if 'ALL' in active_features:\n clust_df = clust_df\n else:\n clust_df = clust_df.loc[:, active_features]\n\n clust_df = pd.get_dummies(clust_df, drop_first=True)\n if active_norm == 1:\n clust_norm_data = pd.DataFrame(StandardScaler().fit_transform(clust_df.values))\n else:\n clust_norm_data = clust_df\n\n if clust_norm_data.shape[1] == 1:\n clust_norm_data = clust_norm_data.values.reshape(-1, 1)\n\n output_text = get_elbow_plot(clust_norm_data)\n clust_scat.title.text = output_text\n\n source_clust_data = get_tsne(clust_norm_data, active_clust_no, mapper)\n return source_clust_data\n",
"id": "10710038",
"language": "Python",
"matching_score": 0.9683631658554077,
"max_stars_count": 4,
"path": "code/clustering.py"
},
{
"content": "import numpy as np\nimport pandas as pd\nfrom bokeh.io import curdoc\nfrom bokeh.layouts import row, widgetbox,column\nfrom bokeh.models import ColumnDataSource,LabelSet,PointDrawTool,CustomJS\nfrom bokeh.models.widgets import Slider,Paragraph,Button,CheckboxButtonGroup\nfrom bokeh.plotting import figure\nfrom scipy.spatial import ConvexHull\n\ndf=pd.read_csv('data/data.csv')\nheaders = [\"x\", \"y\", \"team_id\", \"player_id\",\"time\"]\nall_team = pd.DataFrame(df, columns=headers)\n\nteam_att =all_team[all_team.team_id==1]\nteam_def =all_team[all_team.team_id==2]\nball=all_team[all_team.team_id==3]\ni=0\n\nt1 = np.vstack((team_att[team_att.time==i].x, team_att[team_att.time==i].y)).T\nt2 = np.vstack((team_def[team_def.time == i].x, team_def[team_def.time == i].y)).T\n\nhull=ConvexHull(t1)\nhull2= ConvexHull(t2)\nxc = t1[hull.vertices, 0]\nyc = t1[hull.vertices, 1]\n\nax = t2[hull2.vertices, 0]\nay = t2[hull2.vertices, 1]\n\n\nplayer_id=['1','2','3','4','5','6','7','8','9','10','11','1','2','3','4','5','6','7','8','9','10','11',' ']\nc=['dodgerblue','dodgerblue','dodgerblue','dodgerblue','dodgerblue','dodgerblue','dodgerblue','dodgerblue','dodgerblue','dodgerblue','dodgerblue','orangered','orangered','orangered',\n 'orangered','orangered','orangered','orangered','orangered','orangered','orangered','orangered','gold']\n\n\nsource = ColumnDataSource(data=dict(x=x, y=y,player_id=player_id,color=c))\nsource2 = ColumnDataSource(data=dict(xc=xc,yc=yc))\nsource3 = ColumnDataSource(data=dict(ax=ax,ay=ay))\n\n\nplot = figure(name='base',plot_height=600, plot_width=800, title=\"Game Animation\",\n tools=\"reset,save\",\n x_range=[-52.5,52.5], y_range=[-34, 34],toolbar_location=\"below\")\nplot.image_url(url=[\"myapp/static/images/base.png\"],x=-52.5,y=-34,w=105,h=68,anchor=\"bottom_left\")\n\nplot.xgrid.grid_line_color = None\nplot.ygrid.grid_line_color = None\nplot.axis.visible=False\n\nst=plot.scatter('x','y', source=source,size=20,fill_color='color')\n\nlabels=LabelSet(x='x', y='y', text='player_id', level='glyph',\n x_offset=-5, y_offset=-7, source=source, render_mode='canvas',text_color='white',text_font_size=\"10pt\")\n\n\nfreq = Slider(title=\"Game Time\", value=0, start=all_team.time.unique().min(), end=all_team.time.unique().max(), step=1)\n\n\ndef update_data(attrname, old, new):\n\n\n k = freq.value\n # shadow_draw(k)\n\n x = all_team[all_team.time == k].x\n y = all_team[all_team.time == k].y\n source.data = dict(x=x, y=y,player_id=player_id,color=c)\n\n t1 = np.vstack((team_att[team_att.time==k].x, team_att[team_att.time==k].y)).T\n t2 = np.vstack((team_def[team_def.time==k].x, team_def[team_def.time==k].y)).T\n\n hull = ConvexHull(t1)\n hull2= ConvexHull(t2)\n xc = t1[hull.vertices, 0]\n yc = t1[hull.vertices, 1]\n\n ax = t2[hull2.vertices, 0]\n ay = t2[hull2.vertices, 1]\n \n source2.data=dict(xc=xc,yc=yc)\n source3.data=dict(ax=ax,ay=ay)\n\n\nfor w in [freq]:\n w.on_change('value', update_data)\n\nplot.add_layout(labels)\n\nbutton = Button(label='► Play', width=60)\n\ndef animate_update():\n year = freq.value + 1\n if year > all_team.time.max():\n year = all_team.time[0]\n freq.value = year\n \ndef animate():\n if button.label == '► Play':\n button.label = '❚❚ Pause'\n curdoc().add_periodic_callback(animate_update, 50)\n else:\n button.label = '► Play'\n curdoc().remove_periodic_callback(animate_update)\n\nbutton.on_click(animate)\n\nteam_Blue=plot.patch('xc', 'yc', source=source2, alpha=0, line_width=3, fill_color='dodgerblue')\nteam_red = plot.patch('ax', 'ay',source=source3, alpha=0, line_width=3,fill_color='orangered')\n\ncheckbox_blue=CheckboxButtonGroup(labels=[\"Team Blue\"],button_type = \"primary\")\ncheckbox_red=CheckboxButtonGroup(labels=[\"Team Red\"],button_type = \"primary\")\n\ncheckbox_blue.callback = CustomJS(args=dict(l0=team_Blue, checkbox=checkbox_blue), code=\"\"\"\nl0.visible = 0 in checkbox.active;\nl0.glyph.fill_alpha = 0.3;\n\"\"\")\ncheckbox_red.callback = CustomJS(args=dict(l0=team_red, checkbox=checkbox_red), code=\"\"\"\nl0.visible = 0 in checkbox.active;\nl0.glyph.fill_alpha = 0.3;\n\"\"\")\n\np = Paragraph(text=\"\"\"Select team to plot convex hull\"\"\",\nwidth=200)\n\ninputs = widgetbox(freq,button,p,checkbox_blue,checkbox_red)\n\nlayout = column(row(inputs,plot))\n\ncurdoc().add_root(layout)\n\ncurdoc().title = \"Game Animation\"\n",
"id": "1485688",
"language": "Python",
"matching_score": 3.621819257736206,
"max_stars_count": 4,
"path": "blog_images/images/blog2/main.py"
},
{
"content": "# -*- coding: utf-8 -*-\nfrom bokeh.plotting import figure, output_file, show, save\nfrom bokeh.models import ColumnDataSource, Label, LabelSet, Range1d, Title\nfrom bokeh.io import push_notebook, show, output_notebook\nfrom bokeh.layouts import row, gridplot\n\nfrom statistics import mean\n\noutput_notebook()\n\n\nclass plot_packing():\n \"\"\"\n Plot the player location on the pitch and highlight the defending team players\n that might have been calculated in packing.\n\n Parameters\n ----------\n passer_team_df : DataFrame\n DataFrame with the passing team coordinates\n Column name with `id` or `_id` are considered player ids (Only 1 column with such name).\n packing_df : DataFrame\n Resulting DataFrame from packing module (should not be altered).\n Column name with `id` or `_id` are considered player ids (Only 1 column with such name).\n col_label_x : String\n The column label for defending team's X coordinate in defending_team_xy\n col_label_y : String\n The column label for defending team's Y coordinate in defending_team_xy\n packing_rate : Float\n Resulting output from packing module (should not be altered)\n pass_pressure : Int\n Defending players who are closer to sender/receiver but not involved in packing\n sender_xy : ndarray\n Sender XY coordinates as numpy array\n receiver_xy : ndarray\n Receiver XY coordinates as numpy array\n x_range : [start, end] list\n List of range of x-axis of the pitch, Eg: `[0, 100]` or `[-5250, 5250]`\n y_range : [start, end] list\n List of range of y-axis of the pitch, Eg: `[0, 100]`or `[3400, -3400]`\n path_to_save : \n A path to save the output html file. Path should end with a `/`\n pass_frame : String, Optional, default None\n Identifier to display pass event time on plot\n bcg_img : String, default None\n Path to background image\n file_name : String, default `packing`\n Filename to save the plot\n\n Returns\n ----------\n Defending players who have been calcuated in packing will be marked in a green border.\n show() :\n Plot is shown on the browser. If module is run on jupyter notebook,\n plot will be shown in the notebook. \n save() : \n Plot saved locally to path specified under `path_to_save`.\n Note: If `file_name` is not changed every time module is run, plots\n will be overwritten.\n \"\"\"\n\n def __init__(\n self,\n passer_team_df,\n packing_df,\n col_label_x,\n col_label_y,\n packing_rate,\n pass_pressure,\n sender_xy,\n receiver_xy,\n x_range,\n y_range,\n path_to_save,\n pass_frame=None,\n bcg_img=None,\n file_name='packing'\n ):\n self.passer_team_df = passer_team_df\n self.packing_df = packing_df\n self.col_label_x = col_label_x\n self.col_label_y = col_label_y\n self.packing_rate = packing_rate\n self.pass_pressure = pass_pressure\n self.sender_xy = sender_xy\n self.receiver_xy = receiver_xy\n self.bcg_img = bcg_img\n self.x_range = x_range\n self.y_range = y_range\n self.pass_frame = pass_frame\n self.file_name = file_name\n self.path_to_save = path_to_save\n output_file(path_to_save + self.file_name +\n \".html\", title=self.file_name + \" plot\")\n\n def save_plots(self, plot):\n\n show(plot)\n save(plot)\n print(\n f\"Plot successfully saved at {self.path_to_save+self.file_name+'.html'}\")\n\n def plot(self):\n\n # Plot the visualization\n pass_team_cols = self.passer_team_df.columns.tolist()\n defend_team_cols = self.packing_df.columns.tolist()\n if len(pass_team_cols) < 3:\n raise ValueError(\n f\"Expect minimum 3 columns in 'passer_team_df'. Please provide a valid dataframe with a column for x and y and player_id each.\")\n\n pass_team_cols_x = [i for i in pass_team_cols if '_x' in i or 'x' == i]\n pass_team_cols_y = [i for i in pass_team_cols if '_y' in i or 'y' == i]\n pass_team_cols_id = [\n i for i in pass_team_cols if '_id' in i or 'id' == i or 'player' in i]\n\n defend_team_cols_id = [\n i for i in defend_team_cols if '_id' in i or 'id' == i or 'player' in i]\n\n if len(pass_team_cols_x) < 1:\n raise ValueError(\n f\"The column name for passing team x does not contain 'x' or '_x'. Please provide a valid name with 'x' or '_x'\")\n if len(pass_team_cols_y) < 1:\n raise ValueError(\n f\"The column name for passing team y does not contain 'y' or '_y'. Please provide a valid name with 'y' or '_y'\")\n if len(pass_team_cols_id) > 1:\n raise ValueError(\n f\"There are multiple columns containing either 'id' or '_id' in passer_team_df dataframe. Please provide a single column with 'id' or '_id' as column name\")\n if len(defend_team_cols_id) > 1:\n raise ValueError(\n f\"There are multiple columns containing either 'id' or '_id' in packing_df dataframe. Please provide a single column with 'id' or '_id' as column name\")\n\n fig_height, fig_width = 900, 550\n fig_title = \"Packing rate: {} Pass pressure: {} \\n\".format(\n self.packing_rate, self.pass_pressure)\n\n plot = figure(name='base', plot_height=550, plot_width=850,\n tools=\"save, wheel_zoom, reset, pan\", toolbar_location=\"right\",\n x_range=self.x_range, y_range=self.y_range,)\n\n plot.add_layout(\n Title(text=f\"Pass pressure: {self.pass_pressure}\", text_font_size=\"10pt\", align='center'), 'above')\n plot.add_layout(\n Title(text=f\"Packing rate: {self.packing_rate}\", text_font_size=\"10pt\", align='center'), 'above')\n\n if self.bcg_img != None:\n image_min_x, image_min_y, image_max_x, image_max_y = min(self.x_range), max(self.y_range), \\\n (abs(self.x_range[0]) + abs(self.x_range[1])\n ), (abs(self.y_range[0]) + abs(self.y_range[1]))\n\n plot.image_url(url=[self.path_to_save+self.bcg_img], x=image_min_x, y=image_min_y,\n w=image_max_x, h=image_max_y, anchor=\"bottom_left\")\n\n plot.line([self.sender_xy[0], self.receiver_xy[0]], [self.sender_xy[1], self.receiver_xy[1]],\n line_color=\"dodgerblue\", line_alpha=0.5, line_width=4, line_dash='dashed',)\n\n source_pass_team = ColumnDataSource(self.passer_team_df)\n plot.scatter(x=pass_team_cols_x[0], y=pass_team_cols_y[0], source=source_pass_team,\n size=17, fill_color='dodgerblue', fill_alpha=0.7)\n\n if pass_team_cols_id:\n labels_pass_team = LabelSet(x=pass_team_cols_x[0], y=pass_team_cols_y[0], text=pass_team_cols_id[0],\n x_offset=-4.5, y_offset=-6, source=source_pass_team, render_mode='canvas', text_font_size='8pt',\n text_color='white')\n plot.add_layout(labels_pass_team)\n\n source_sender = ColumnDataSource(\n data={'x': [self.sender_xy[0]], 'y': [self.sender_xy[1]], 'id': ['S']})\n plot.scatter('x', 'y', source=source_sender,\n size=17, fill_color='dodgerblue', line_color='red', line_width=3)\n labels_sender = LabelSet(x='x', y='y', text='id',\n x_offset=-4.5, y_offset=5, source=source_sender, render_mode='canvas')\n plot.add_layout(labels_sender)\n\n source_receiver = ColumnDataSource(\n data={'x': [self.receiver_xy[0]], 'y': [self.receiver_xy[1]], 'id': ['R']})\n plot.scatter('x', 'y', source=source_receiver,\n size=17, fill_color='dodgerblue', line_color='red', line_width=3)\n labels_receiver = LabelSet(x='x', y='y', text='id',\n x_offset=-4.5, y_offset=5, source=source_receiver, render_mode='canvas')\n plot.add_layout(labels_receiver)\n\n colors = {-1: 'green', 1: 'green', 0.5: 'green', 0: 'orangered'}\n alpha = {-1: 1, 1: 1, 0.5: 1, 0: 0.5}\n radius = {-1: 1000, 1: 1000, 0.5: 1000, 0: 0}\n\n for i in range(len(self.packing_df[self.col_label_x].tolist())):\n x = self.packing_df.iloc[i][self.col_label_x]\n y = self.packing_df.iloc[i][self.col_label_y]\n id = self.packing_df.iloc[i][defend_team_cols_id]\n edge_col = colors[self.packing_df.iloc[i]['packing_rate']]\n fill_alpha = alpha[self.packing_df.iloc[i]['packing_rate']]\n circle_radius = radius[self.packing_df.iloc[i]['packing_rate']]\n\n source_def_team = ColumnDataSource(\n data={'x': [x], 'y': [y], 'id': [id], 'edge_col': [edge_col], 'fill_alpha': [fill_alpha], 'radius': [circle_radius]})\n\n plot.scatter('x', 'y',\n size=17, fill_color='orangered', line_color='edge_col', line_width=3, source=source_def_team,\n fill_alpha='fill_alpha')\n\n if defend_team_cols_id:\n labels_pass_team = LabelSet(x='x', y='y', text='id',\n x_offset=-4.5, y_offset=-6, source=source_def_team, render_mode='canvas',\n text_font_size='8pt', text_color='white')\n plot.add_layout(labels_pass_team)\n\n plot.axis.visible = False\n plot.xgrid.grid_line_color = None\n plot.ygrid.grid_line_color = None\n plot.title.align = 'center'\n plot.toolbar.autohide = True\n plot.min_border = 40\n if self.pass_frame != None:\n caption1 = Label(text=f\"Pass Frame: {str(self.pass_frame)}\",\n text_font_size=\"8pt\",\n x=min(self.x_range) +\n (0.01*(self.x_range[1]-self.x_range[0])),\n y=self.y_range[0] +\n (0.01*(self.y_range[1]-self.y_range[0])))\n plot.add_layout(caption1)\n\n self.save_plots(plot)\n",
"id": "4171935",
"language": "Python",
"matching_score": 2.9606170654296875,
"max_stars_count": 17,
"path": "football_packing/plot_packing.py"
},
{
"content": "# -*- coding: utf-8 -*-\n\"\"\"\n* Find packing for real-time metrica data\n\n* Owner: <NAME>\n* Version: V1.0\n* Last Updated: May-14-2020\n\"\"\"\nimport os\nimport sys\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom scipy.spatial import distance\nfrom collections import defaultdict\n\nimport itertools\nimport random\n\n# Import custom packages\n\nfrom football_packing import packing\nfrom football_packing import plot_packing\n\npd.set_option('display.max_rows', None)\npd.set_option('display.max_columns', None)\n\n\nclass metrica:\n def __init__(\n self,\n path_play_df: str,\n path_event_df: str,\n game_id: list\n ):\n self.path_play_df = path_play_df\n self.path_event_df = path_event_df\n self.game_id = game_id\n self.defend_side = \"\"\n self.goal_center = {'left': (0, 0.5), 'right': (1, 0.5)}\n\n def get_defend_side(self, defending_team_xy, passing_team_xy):\n \"\"\"\n * Process to identify which side the defending team defends\n \"\"\"\n\n total_defend_left = defending_team_xy[defending_team_xy['x']\n <= 0.2]['x'].count()\n total_defend_right = defending_team_xy[defending_team_xy['x']\n >= 0.8]['x'].count()\n total_passer_left = passing_team_xy[passing_team_xy['x']\n <= 0.21]['x'].count()\n\n # 2. When only one end of pitch has a defender/gk\n\n if (((total_defend_left == 0) and (total_defend_right > 0)) or ((total_defend_left > 0) and (total_defend_right == 0))):\n\n if (total_defend_right > 0):\n self.defend_side = 'right'\n else:\n self.defend_side = 'left'\n\n # 1. When both end of pitch has a last man\n\n elif (total_defend_left > 0) and (total_defend_right > 0):\n\n # 1.1 When last man is on left and no attacking player near him\n if (total_defend_left > 0) and (total_passer_left == 0):\n self.defend_side = 'left'\n\n else:\n # 1.2\n self.defend_side = 'right'\n\n def process_data(self):\n\n game_events = pd.read_csv(self.path_event_df)\n play_df = pd.read_csv(self.path_play_df, dtype={\n 'frame': 'int', 'player': 'str', 'game_id': 'str'})\n\n play_df = play_df[play_df['game_id'] == self.game_id]\n event_type = ['PASS']\n\n game_events = game_events[game_events['Type'].isin(event_type)]\n\n game_events.loc[:, 'From'] = game_events['From'].str.replace(\n 'Player', '')\n game_events.loc[:, 'To'] = game_events['To'].str.replace(\n 'Player', '')\n\n random_index = random.choice(game_events.index.values)\n random_game_events = game_events[game_events.index == random_index]\n random_end_frame = random_game_events['End Frame'].values[0]\n random_sender = random_game_events['From'].values[0]\n random_receiver = random_game_events['To'].values[0]\n random_passing_team = random_game_events['Team'].values[0]\n\n random_play_end_df = play_df[play_df['frame']\n == random_end_frame].reset_index(drop=True)\n\n sender_xy = random_play_end_df[random_play_end_df['player'] == random_sender][[\n 'x', 'y']].values[0]\n receiver_xy = random_play_end_df[random_play_end_df['player'] == random_receiver][[\n 'x', 'y']].values[0]\n\n if random_passing_team == 'Away':\n passing_team_xy = random_play_end_df[(random_play_end_df['team'] == 'away') &\n (random_play_end_df['player']\n != random_sender)\n & (random_play_end_df['player'] != random_receiver)][[\n 'x', 'y', 'player']].dropna()\n defending_team_xy = random_play_end_df[random_play_end_df['team'] == 'home'][[\n 'x', 'y', 'player']].dropna().set_index('player', drop=False)\n else:\n passing_team_xy = random_play_end_df[(random_play_end_df['team'] == 'home') &\n (random_play_end_df['player']\n != random_sender)\n & (random_play_end_df['player'] != random_receiver)][[\n 'x', 'y', 'player']].dropna()\n defending_team_xy = random_play_end_df[random_play_end_df['team'] == 'away'][[\n 'x', 'y', 'player']].dropna().set_index('player', drop=False)\n\n defending_team_xy = defending_team_xy.rename(\n columns={'player': 'player_id'})\n\n self.get_defend_side(defending_team_xy, passing_team_xy)\n\n pack = packing(sender_xy, receiver_xy,\n defending_team_xy, col_label_x='x', col_label_y='y', defend_side=self.defend_side)\n self.packing_df, self.packing_rate, self.pass_pressure = pack.get_packing()\n\n plot = plot_packing(passer_team_df=passing_team_xy, packing_df=self.packing_df,\n col_label_x='x', col_label_y='y',\n packing_rate=self.packing_rate, pass_pressure=self.pass_pressure,\n sender_xy=sender_xy, receiver_xy=receiver_xy,\n x_range=[0, 1], y_range=[1, 0], path_to_save=dir_path+'/',\n pass_frame=random_end_frame, file_name='metrica',\n bcg_img='/images/pitch/pitch.jpg')\n\n plot.plot()\n\n\nif __name__ == '__main__':\n\n dir_path = os.path.dirname(os.path.realpath(__file__))\n\n \"\"\"\n Path to the game level data -\n Use this Tidy data - https://drive.google.com/drive/folders/1BGLHbe7DB_NGZxitjJAQxu2-N-B4Zk3s\n Credit - <NAME>\n \"\"\"\n path_game_df = sys.argv[1]\n # Path to the event level data\n path_events_df = sys.argv[2]\n\n game_id = '1'\n metric = metrica(path_game_df, path_events_df, game_id)\n metric.process_data()\n",
"id": "11279877",
"language": "Python",
"matching_score": 4.516384124755859,
"max_stars_count": 17,
"path": "examples/metrica.py"
},
{
"content": "# -*- coding: utf-8 -*-\n\"\"\"\nFind packing for Belgian football data (https://github.com/JanVanHaaren/mlsa18-pass-prediction)\n\nOwner: <NAME>\nVersion: V2.0\nLast Updated: May-20-2020\n\n\"\"\"\n#import modules\nimport numpy as np\nimport pandas as pd\n\nfrom football_packing import packing\nfrom football_packing import plot_packing\n\nimport os\nimport math\n\npd.set_option('display.max_rows', None)\npd.set_option('display.max_columns', None)\n\n\nclass belgium:\n \"\"\"\n Find the packing for a given play\n\n Arguments:\n df {string} -- source for the data\n play_id {string} -- Id for the given pass - taken from index of df\n defend_side {string} -- Side of the defending team\n pass_direction {string} -- Direction of pass - Forward, Back or Side\n pass_pressure {int} -- Total players that put pressure on sender & receiver (excluding players involved\n in packing)\n goal_center -- Center of goal, based on defend_side\n\n Returns:\n packing_rate {int} -- packing rate for that given pass option (Eg: player A --> B)\n plot (figure) -- matplotlib figure with player location and packing rate\n \"\"\"\n\n def __init__(\n self,\n df,\n play_id\n ):\n self.df = df\n self.play_id = play_id\n self.defend_side = \"\"\n self.pass_direction = \"\"\n self.pass_pressure = 0\n self.packing_rate = 0\n self.goal_center = {'left': (-5250, 0), 'right': (5250, 0)}\n\n def get_pass_direction(self, sender: np.array, receiver: np.array, goal: np.array):\n \"\"\"\n Get the direction of the pass.\n\n Arguments:\n sender {np.array} -- XY location of the sender\n receiver {np.array} -- XY location of the receiver\n goal {np.array} -- XY location of the goal\n\n Returns:\n direction -- Forward/Back/Side\n \"\"\"\n\n # Distance of 3 sides sender-receiver-goal triangle\n\n d_sr = np.linalg.norm(sender-receiver)\n d_sg = np.linalg.norm(sender-goal)\n d_rg = np.linalg.norm(receiver-goal)\n\n angle_s = math.degrees(\n math.acos((d_sr**2 + d_sg**2 - d_rg**2)/(2.0 * d_sr * d_sg)))\n angle_g = math.degrees(\n math.acos((d_rg**2 + d_sg**2 - d_sr**2)/(2.0 * d_rg * d_sg)))\n angle_r = math.degrees(\n math.acos((d_sr**2 + d_rg**2 - d_sg**2)/(2.0 * d_sr * d_rg)))\n\n if (d_rg < d_sg) and ((angle_r >= 90) or (angle_r >= angle_g)):\n return \"Forward\"\n elif (d_rg > d_sg) and ((angle_s >= 90) or (angle_r >= angle_g)):\n return \"Back\"\n else:\n return \"Side\"\n\n def process_data(self):\n \"\"\"\n Process the source data to get it in the necessary format\n \"\"\"\n\n self.df.loc[:, 'home_away'] = self.df['sender_id'].apply(\n lambda x: 1 if x < 15 else 0)\n\n # Get pass sender and receiver location\n\n self.df.loc[:, 'sender_x'] = self.df.apply(\n lambda x: x.iloc[x['sender_id'].astype(int)+3], axis=1)\n self.df.loc[:, 'sender_y'] = self.df.apply(\n lambda x: x.iloc[x['sender_id'].astype(int)+31], axis=1)\n self.df.loc[:, 'receiver_x'] = self.df.apply(\n lambda x: x.iloc[x['receiver_id'].astype(int)+3], axis=1)\n self.df.loc[:, 'receiver_y'] = self.df.apply(\n lambda x: x.iloc[x['receiver_id'].astype(int)+31], axis=1)\n\n # Get passing and defender team location\n\n self.df.loc[:, 'passer_team_x'] = self.df.apply(lambda x: x.iloc[4:18].dropna(\n ).values.tolist() if x['home_away'] == 1 else x.iloc[18:32].dropna().values.tolist(), axis=1)\n self.df.loc[:, 'passer_team_y'] = self.df.apply(lambda x: x.iloc[32:46].dropna(\n ).values.tolist() if x['home_away'] == 1 else x.iloc[46:60].dropna().values.tolist(), axis=1)\n self.df.loc[:, 'defender_team_x'] = self.df.apply(lambda x: x.iloc[4:18].dropna(\n ).values.tolist() if x['home_away'] == 0 else x.iloc[18:32].dropna().values.tolist(), axis=1)\n self.df.loc[:, 'defender_team_y'] = self.df.apply(lambda x: x.iloc[32:46].dropna(\n ).values.tolist() if x['home_away'] == 0 else x.iloc[46:60].dropna().values.tolist(), axis=1)\n\n # Get passing and receiving team player ids\n passer_team_ids = self.df.apply(lambda x: x.iloc[4:18].dropna(\n ).index.tolist() if x['home_away'] == 1 else x.iloc[18:32].dropna().index.tolist(), axis=1).values.tolist()[0]\n self.df.loc[:, 'passer_team_id'] = pd.Series(\n [[int(i.replace('x_', '')) for i in passer_team_ids]])\n\n defend_team_ids = self.df.apply(lambda x: x.iloc[4:18].dropna(\n ).index.tolist() if x['home_away'] == 0 else x.iloc[18:32].dropna().index.tolist(), axis=1).values.tolist()[0]\n self.df.loc[:, 'defend_team_id'] = pd.Series(\n [[int(i.replace('x_', '')) for i in defend_team_ids]])\n\n team_x = {'defender_team_x': self.df['defender_team_x'].tolist()[0],\n \"passer_team_x\": self.df['passer_team_x'].tolist()[0]}\n team_x_df = pd.DataFrame.from_dict(team_x, orient='index').fillna(0).T\n team_x_df['defender_pitch'] = np.where(\n team_x_df['defender_team_x'] <= 0, 'left', 'right')\n team_x_df['passer_pitch'] = np.where(\n team_x_df['passer_team_x'] <= 0, 'left', 'right')\n\n \"\"\"\n Process to identify which side the defending team defends\n \"\"\"\n\n total_defend_left = team_x_df[team_x_df['defender_team_x']\n <= -2000]['defender_team_x'].count()\n total_defend_right = team_x_df[team_x_df['defender_team_x']\n >= 2000]['defender_team_x'].count()\n total_passer_left = team_x_df[team_x_df['passer_team_x']\n <= -2100]['passer_team_x'].count()\n\n # 1. When only one end of pitch has a defender/gk\n\n if (((total_defend_left == 0) and (total_defend_right > 0)) or ((total_defend_left > 0) and (total_defend_right == 0))):\n\n if (total_defend_right > 0):\n self.defend_side = 'right'\n else:\n self.defend_side = 'left'\n\n # 2. When both end of pitch has a last man\n\n elif (total_defend_left > 0) and (total_defend_right > 0):\n\n # 2.1 When last man is on left and no attacking player near him\n if (total_defend_left > 0) and (total_passer_left == 0):\n self.defend_side = 'left'\n\n else:\n # 2.2\n self.defend_side = 'right'\n\n pass_sr = {'sender': list(zip(self.df['sender_x'], self.df['sender_y'])),\n 'receiver': list(zip(self.df['receiver_x'], self.df['receiver_y']))}\n\n # Get pass direction based on defending team side\n self.pass_direction = self.get_pass_direction(np.array(pass_sr['sender']), np.array(\n pass_sr['receiver']), np.array(self.goal_center[self.defend_side]))\n\n def_team_xy = {'defender_team_x': self.df['defender_team_x'].tolist()[0],\n \"defender_team_y\": self.df['defender_team_y'].tolist()[0],\n 'passer_team_x': self.df['passer_team_x'].tolist()[0],\n \"passer_team_y\": self.df['passer_team_y'].tolist()[0],\n \"defend_team_id\": self.df['defend_team_id'].tolist()[0]}\n\n self.def_team_xy_df = pd.DataFrame.from_dict(\n def_team_xy, orient='index').fillna(0).T.set_index('defend_team_id', drop=False)\n\n self.def_team_xy_df.loc[:, 'sender'] = np.where(((self.def_team_xy_df['passer_team_x'] == self.df['sender_x'].values[0])\n & (self.def_team_xy_df['passer_team_y'] == self.df['sender_y'].values[0])), 1, 0)\n self.def_team_xy_df.loc[:, 'receiver'] = np.where(((self.def_team_xy_df['passer_team_x'] == self.df['receiver_x'].values[0])\n & (self.def_team_xy_df['passer_team_y'] == self.df['receiver_y'].values[0])), 1, 0)\n # self.def_team_xy_df.set_index('defend_team_id')\n\n def packing_calculate(self):\n\n sender_xy = self.df[['sender_x', 'sender_y']].values[0]\n receiver_xy = self.df[['receiver_x', 'receiver_y']].values[0]\n\n pack = packing(sender_xy, receiver_xy, self.def_team_xy_df,\n col_label_x='defender_team_x', col_label_y='defender_team_y',\n defend_side=self.defend_side)\n self.packing_df, self.packing_rate, self.pass_pressure = pack.get_packing()\n\n passing_team_xy = pd.DataFrame({'passer_team_x': self.df['passer_team_x'].tolist()[0],\n 'passer_team_y': self.df['passer_team_y'].tolist()[0],\n 'passer_team_id': self.df['passer_team_id'].tolist()[0]})\n\n plot = plot_packing(passer_team_df=passing_team_xy, packing_df=self.packing_df,\n col_label_x='defender_team_x', col_label_y='defender_team_y',\n packing_rate=self.packing_rate, pass_pressure=self.pass_pressure,\n sender_xy=sender_xy, receiver_xy=receiver_xy,\n x_range=[-5250, 5250], y_range=[3400, -3400],\n path_to_save=dir_path+'/', pass_frame=self.play_id, file_name='belgium',\n bcg_img='/images/pitch/pitch.jpg')\n plot.plot()\n\n def execute_pack(self):\n # Looping functions\n steps = (\n self.process_data(),\n self.packing_calculate(),\n )\n\n for step in steps:\n step\n\n\nif __name__ == '__main__':\n\n dir_path = os.path.dirname(os.path.realpath(__file__))\n df = pd.read_csv(dir_path+\"/data/passes.csv\")\n df.loc[:, 'pass_success'] = np.where(((df['sender_id'] <= 14) & (df['receiver_id'] <= 14))\n | ((df['sender_id'] > 14) & (df['receiver_id'] > 14)), 1, 0)\n\n df = df.loc[(df['pass_success'] == 1) & (df['sender_id'] !=\n df['receiver_id']), :].copy().reset_index(drop=True)\n df = df.sample(1).copy()\n play_id = str(df.index.values[0])\n df = df.reset_index(drop=True)\n pack_belgium = belgium(df, play_id)\n pack_belgium.execute_pack()\n",
"id": "12462379",
"language": "Python",
"matching_score": 4.74912691116333,
"max_stars_count": 17,
"path": "examples/belgium.py"
},
{
"content": "# -*- coding: utf-8 -*-\nimport pandas as pd\nimport numpy as np\n\nfrom scipy.spatial import distance\n\nimport math\nfrom sklearn import preprocessing\n\n\nclass calculate_packing:\n\n def __init__(self):\n self.packing_rate = 0\n\n def get_pass_direction(self, sender, receiver, goal, defend_side):\n \"\"\"\n Get Pass Direction\n\n Returns:\n Forward/Back/Side\n \"\"\"\n if defend_side == 'left':\n goal_sender = [0, sender[1]]\n goal_receiver = [0, receiver[1]]\n elif defend_side == 'right':\n goal_sender = [1, sender[1]]\n goal_receiver = [1, receiver[1]]\n\n # Distance of 3 sides sender-receiver-goal triangle\n d_sg = np.round(np.linalg.norm(sender-goal_sender), 5)\n d_rg = np.round(np.linalg.norm(\n receiver-goal_receiver), 5)\n\n if (d_rg < d_sg) and (np.abs(d_rg-d_sg) > 0.03):\n return 'Forward'\n elif (d_rg > d_sg) and (np.abs(d_rg-d_sg) > 0.03):\n return 'Back'\n else:\n return 'Side'\n\n def method_1(self, box_a, box_b, box_c, box_d, df_method1, col_label_x, col_label_y, rect_thresh=0.010):\n \"\"\"\n Method 1 :\n Draw a rectangle box between sender and receiver to see if any player\n is inside the bounding box. A rect_thresh of 0.01 is used to consider players on the\n edge of the box.\n\n Parameters\n ----------\n box_a : ndarray\n A ndarray of ['sender_x', 'sender_y']\n box_b : ndarray\n A ndarray of ['sender_x', 'receiver_y']\n box_c : ndarray\n A ndarray of ['receiver_x', 'receiver_y']\n box_d : ndarray\n A ndarray of ['receiver_x', 'sender_y']\n df_method1 : DataFrame\n A copy of defending_team_xy dataframe\n col_label_x : String\n The column label for defending team's X coordinate in `defending_team_xy`\n col_label_y : String\n The column label for defending team's Y coordinate in `defending_team_xy`\n rect_thresh : Float, default 0.015\n A threshold to check if any player is outside/on the edge of the box within\n the threshold distance\n\n Returns\n ----------\n df_method1 : DataFrame\n A copy of original DataFrame with 1/0 for Method 1 and the following new columns :\n `triangle_area` : Float, `rect_length` : Float, `rect_width` : Float, `method_1` : Binary\n \"\"\"\n def area_triangle(s1, s2, s3):\n\n s = (s1 + s2 + s3) / 2.0\n area = (s*(s-s1)*(s-s2)*(s-s3)) ** 0.5\n\n if area == np.nan:\n return 0\n else:\n return np.round(area, 5)\n\n def checkBoundary(df):\n method_1 = 0\n point_def = df[[col_label_x, col_label_y]].values.tolist()\n\n p_a = np.round(np.linalg.norm(point_def-box_a), 5)\n p_b = np.round(np.linalg.norm(point_def-box_b), 5)\n p_c = np.round(np.linalg.norm(point_def-box_c), 5)\n p_d = np.round(np.linalg.norm(point_def-box_d), 5)\n\n area_rect = np.round(ab*bc, 5)\n area_ab = area_triangle(p_a, p_b, ab)\n area_bc = area_triangle(p_b, p_c, bc)\n area_cd = area_triangle(p_c, p_d, cd)\n area_da = area_triangle(p_d, p_a, da)\n\n # Check if player xy lies inside the bounding box\n # rect_thresh = 0.010 is for normalized data\n\n if ((area_ab + area_bc + area_cd + area_da) - area_rect) <= rect_thresh:\n method_1 = 1\n else:\n method_1 = 0\n\n return pd.to_numeric(pd.Series({'triangle_area': (area_ab + area_bc + area_cd + area_da),\n 'rect_length': ab, 'rect_width': bc,\n 'area_diff': ((area_ab + area_bc + area_cd + area_da) - area_rect),\n 'method_1': method_1}),\n downcast='integer')\n\n # rectangle edges\n ab = np.round(np.linalg.norm(box_a-box_b), 5)\n bc = np.round(np.linalg.norm(box_b-box_c), 5)\n cd = np.round(np.linalg.norm(box_c-box_d), 5)\n da = np.round(np.linalg.norm(box_d-box_a), 5)\n\n df_method1[['triangle_area', 'rect_length', 'rect_width', 'area_diff', 'method_1']\n ] = df_method1.apply(checkBoundary, axis=1)\n\n return df_method1\n\n def method_2(self, sender_xy, receiver_xy, df_method2, col_label_x, col_label_y, method2_radius=0.12):\n \"\"\"\n Method 2 :\n Check if player is within a certain distance to line of pass, so that\n the pass can potentially be intersected (assuming the speed of pass is not a factor).\n\n For a given defender, assume the defender xy to be center of circle. Find the perpendicular\n distance from player xy to the line of pass. If the distance is <= method2_radius, then method_2\n returns as 1, else 0.\n\n Parameters\n ----------\n sender_xy : ndarray\n A ndarray of ['sender_x', 'sender_y']\n receiver_xy : ndarray\n A ndarray of ['receiver_x', 'receiver_y']\n df_method2 : DataFrame\n A copy of defending_team_xy dataframe, updated from `Method 1`\n radius : Float, default 0.150\n search radius for find if player can potentially intersect the pass\n by being within a given distance\n\n Returns\n ----------\n df_method2 : DataFrame\n A copy of original DataFrame with 1/0 for Method 2 and the following new columns :\n `method2_dist` : Distance of player to line of pass,\n `method_2` : Binary, (1/0)\n \"\"\"\n\n def check_intersection(df):\n \"\"\"\n If rectangle from method_1 is big enough ((rect_length > 0.01) or (rect_width > 0.01)),\n take a diagonal (non player) side of the rectangle and find the perpendicular distance\n between it and the line of pass. If a defending player is within that distance to the\n line of pass, then method_2 = 1.\n\n If rectangle is small, then use method2_radius to check if a defending player\n is within that distance to the line of pass.\n \"\"\"\n method_2 = 0\n\n # Defender point\n center = df[[col_label_x, col_label_y]].values\n dist_dl = np.round(np.abs(np.cross(receiver_xy-sender_xy, sender_xy-center)) /\n np.linalg.norm(receiver_xy-sender_xy), 5)\n\n # Box diagonal\n box_diagonal = np.array([sender_xy[0], receiver_xy[1]])\n dist_box_line = np.round(np.abs(np.cross(receiver_xy-sender_xy, sender_xy-box_diagonal)) /\n np.linalg.norm(receiver_xy-sender_xy), 5)\n\n rect_length = df['rect_length']\n rect_width = df['rect_width']\n\n if (rect_length <= 0.07) or (rect_width <= 0.07):\n if (dist_dl <= method2_radius):\n method_2 = 1\n else:\n method_2 = 0\n elif dist_dl <= dist_box_line:\n method_2 = 1\n else:\n method_2 = 0\n\n return pd.to_numeric(pd.Series({'method2_dist': dist_dl,\n 'method_2': method_2}),\n downcast='integer')\n\n df_method2[['method2_dist', 'method_2']] = df_method2.apply(\n check_intersection, axis=1)\n\n return df_method2\n\n def method_3(self, sender_xy, receiver_xy, df_method3, col_label_x, col_label_y):\n \"\"\"\n Method 3 :\n Check defender angle with respect to sender & receiver.\n One of the draw back of `method_2` is that defender can be close to line to pass\n but still be beyond the sender or receiver (one of angle b/w defender & sender/receiver > 90).\n This method checks this condition.\n\n Parameters\n ----------\n sender_xy : ndarray\n A ndarray of ['sender_x', 'sender_y']\n receiver_xy : ndarray\n A ndarray of ['receiver_x', 'receiver_y']\n df_method3 : DataFrame\n A copy of defending_team_xy dataframe, updated from `Method 2`\n\n Returns\n ----------\n df_method3 : DataFrame\n A copy of original DataFrame with 1/0 for Method 3 and the following new columns :\n `method3_angle_s` : Angle between defender & sender,\n `method3_angle_r` : Angle between defender & receiver,\n `method_3` : Binary, (1/0)\n \"\"\"\n def check_angles(df):\n method_3 = 0\n center = df[[col_label_x, col_label_y]].values\n\n # Distance between sender, receiver & defender combination\n d_sr = np.linalg.norm(sender_xy-receiver_xy)\n d_sd = np.linalg.norm(sender_xy-center)\n d_rd = np.linalg.norm(receiver_xy-center)\n\n angle_s = np.round(math.degrees(\n math.acos((d_sr**2 + d_sd**2 - d_rd**2)/(2.0 * d_sr * d_sd))))\n angle_r = np.round(math.degrees(\n math.acos((d_sr**2 + d_rd**2 - d_sd**2)/(2.0 * d_sr * d_rd))))\n\n if (angle_s <= 105) & (angle_r <= 105):\n method_3 = 1\n else:\n method_3 = 0\n\n return pd.to_numeric(pd.Series({'method3_angle_s': angle_s,\n 'method3_angle_r': angle_r,\n 'method_3': method_3}),\n downcast='integer')\n\n df_method3[['method3_angle_s', 'method3_angle_r', 'method_3']\n ] = df_method3.apply(check_angles, axis=1)\n\n return df_method3\n\n def update_method_1(self, df_update):\n \"\"\"\n Method 1 Update :\n For special cases where bounding box from `Method 1` is almost a line i.e: either width/length <= 0.07 units\n (both sender and receiver are in similar X or Y coordinate).\n In this case, update the value of method_1 value to 1 if both method_2 and method_3 are 1.\n\n Parameters\n ----------\n df_update : DataFrame\n The copy of DataFrame after Methods 1,2 & 3.\n\n Returns\n ----------\n df_update : DataFrame\n Final Dataframe with updated 1/0 for Method 1\n \"\"\"\n rect_length = df_update['rect_length'].unique()[0]\n rect_width = df_update['rect_width'].unique()[0]\n\n if (rect_length <= 0.07) or (rect_width <= 0.07):\n df_update.loc[:, 'method_1_update'] = np.where(((df_update['method_1'] == 0) &\n (df_update['method_2'] == 1) &\n (df_update['method_3'] == 1)), 1, df_update['method_1'])\n else:\n df_update.loc[:, 'method_1_update'] = df_update['method_1']\n\n return df_update\n\n def get_pass_pressure(self, sender_xy, receiver_xy, defending_team_xy, col_label_x, col_label_y):\n \"\"\"\n For defender who are not in the packing rate, if they are close (<=0.05 units) to the\n sender/receiver, they're considered to have an influence on the pass by increasing the\n pressure of the pass.\n\n Parameters\n ----------\n sender_xy : ndarray\n Sender XY coordinates as numpy array\n receiver_xy : ndarray\n Receiver XY coordinates as numpy array\n defending_team_xy : DataFrame\n DataFrame with the defending team coordinates\n col_label_x : String\n The column label for defending team's X coordinate in `defending_team_xy`\n col_label_y : String\n The column label for defending team's Y coordinate in `defending_team_xy`\n\n Returns\n ----------\n total_pressure : Int\n Total count of defenders applying pressure on the sender & receiver, but not involved in\n packing rate.\n \"\"\"\n defend_xy = defending_team_xy[defending_team_xy['packing_rate'] == 0][[\n col_label_x, col_label_y]].values\n sender_def_cdist = distance.cdist(sender_xy, defend_xy)\n receiver_def_cdist = distance.cdist(receiver_xy, defend_xy)\n\n sender_ids = np.array(\n np.where(sender_def_cdist[0] <= 0.05)).tolist()[0]\n receiver_ids = np.array(\n np.where(receiver_def_cdist[0] <= 0.05)).tolist()[0]\n\n pass_pressure_players = list(\n set(sender_ids).symmetric_difference(set(receiver_ids)))\n total_pressure = len(pass_pressure_players)\n\n return total_pressure\n\n\nclass packing:\n \"\"\"\n Find the packing for a given pass\n\n Parameters\n ----------\n sender_xy : ndarray\n Sender XY coordinates as numpy array\n receiver_xy : ndarray\n Receiver XY coordinates as numpy array\n defending_team_xy : DataFrame\n DataFrame with the defending team coordinates\n Do not include any passing team XY or other columns as it'll have an impact on \n plotting function.\n col_label_x : String\n The column label for defending team's X coordinate in `defending_team_xy`\n col_label_y : String\n The column label for defending team's Y coordinate in `defending_team_xy`\n defend_side : String\n The side of the defending team on the football pitch. Left/Right, `not case sensitive`\n goal_center : Dict\n Center of goal selected based on defend_side\n {'left': [0, 0.5], 'right': [1, 0.5]}\n\n Returns\n ----------\n packing_df : DataFrame\n Returns a dataframe with the following new columns along with existing columns\n that was provided.\n New Columns :\n [`triangle_area`, `rect_length`, `rect_width`, `area_diff`, `method_1`, `method2_dist`,\n `method_2`, `method3_angle_s`, `method3_angle_r`, `method_3`, `method_1_update`,\n `packing_rate`, `col_label_x`, `col_label_y`]\n packing_rate : Float\n Packing rate for that given pass scenario\n Packing rate will be multiplied by a factor based on the pass type:\n 1.0 : Forward Pass\n -1.0 : Back Pass\n 0.5 : Side pass\n pass_pressure : Integer\n Defending players who are closer to sender/receiver but not involved in\n packing. Indicator to see if players take high risk pass.\n For eg: packing rate could be lower but pass pressure can be higher if pass\n sender/receiver are heavily marked.\n\n \"\"\"\n\n def __init__(\n self,\n sender_xy: np.array,\n receiver_xy: np.array,\n defending_team_xy: pd.DataFrame,\n col_label_x: str,\n col_label_y: str,\n defend_side: str,\n ):\n self.sender_xy = np.asarray(sender_xy)\n self.receiver_xy = np.asarray(receiver_xy)\n self.defending_team_xy = defending_team_xy.copy()\n self.col_label_x = col_label_x\n self.col_label_y = col_label_y\n self.defend_side = defend_side.lower()\n self.goal_center = {'left': [0, 0.5], 'right': [1, 0.5]}\n self.pass_pressure = None\n\n def get_packing(self):\n\n self.defending_team_xy_copy = self.defending_team_xy.copy()\n if self.sender_xy.size == 0:\n raise RuntimeError(\n \"Sender coordinates are empty. A valid array with [x, y] should be provided\")\n\n if self.receiver_xy.size == 0:\n raise RuntimeError(\n \"Receiver coordinates are empty. A valid array with [x, y] should be provided\")\n\n if self.defending_team_xy_copy.size == 0:\n raise RuntimeError(\n \"Defending team coordinates are empty. A valid dataframe with [x, y] should be provided for at least 1 player\")\n\n if not isinstance(self.defending_team_xy_copy, pd.DataFrame):\n raise RuntimeError(\n \"Defending team coordinates should be a dataframe with x and y values.\")\n\n defend_xy_cols = self.defending_team_xy_copy.columns.tolist()\n\n if self.col_label_x not in defend_xy_cols or self.col_label_x not in defend_xy_cols:\n raise RuntimeError(\n f\"Either {self.col_label_x} or {self.col_label_y} is not a column in defending_team_xy. Please provide valid column names\")\n\n self.goal_xy = self.goal_center[self.defend_side]\n\n if max(self.defending_team_xy_copy[[self.col_label_x]].values) > 1 or \\\n max(self.defending_team_xy_copy[[self.col_label_y]].values) > 1:\n concat_location = np.concatenate([\n self.defending_team_xy_copy[[\n self.col_label_x, self.col_label_y]].values,\n self.sender_xy.reshape(1, -1),\n self.receiver_xy.reshape(1, -1)\n ])\n min_max_scaler = preprocessing.MinMaxScaler()\n defending_team_xy_scaled = min_max_scaler.fit_transform(\n concat_location)\n self.defending_team_xy_copy.drop(\n [self.col_label_x, self.col_label_y], axis=1, inplace=True)\n self.defending_team_xy_copy[self.col_label_x], self.defending_team_xy_copy[\n self.col_label_y] = defending_team_xy_scaled[:-2, 0], defending_team_xy_scaled[:-2, 1]\n self.sender_xy = defending_team_xy_scaled[-2]\n self.receiver_xy = defending_team_xy_scaled[-1]\n\n box_a = np.asarray(self.sender_xy) # sender\n box_b = np.asarray(\n [self.sender_xy[0], self.receiver_xy[1]])\n box_c = np.asarray(list(self.receiver_xy)) # receiver\n box_d = np.asarray(\n [self.receiver_xy[0], self.sender_xy[1]])\n\n cp = calculate_packing()\n\n self.pass_direction = cp.get_pass_direction(\n self.sender_xy, self.receiver_xy, self.goal_xy, self.defend_side)\n\n self.packing_df = cp.method_1(\n box_a, box_b, box_c, box_d, self.defending_team_xy_copy.copy(), col_label_x=self.col_label_x,\n col_label_y=self.col_label_y)\n\n self.packing_df = cp.method_2(\n self.sender_xy, self.receiver_xy, self.packing_df, col_label_x=self.col_label_x, col_label_y=self.col_label_y)\n\n self.packing_df = cp.method_3(\n self.sender_xy, self.receiver_xy, self.packing_df, col_label_x=self.col_label_x, col_label_y=self.col_label_y)\n\n self.packing_df = cp.update_method_1(self.packing_df)\n\n self.packing_df['packing_rate'] = np.where(\n self.packing_df[[\"method_1_update\", \"method_2\", \"method_3\"]].sum(axis=1) == 3, 1, 0)\n\n # If back pass, multiple packing by -1\n if self.pass_direction == 'Back':\n self.packing_df.loc[:,\n 'packing_rate'] = self.packing_df.loc[:, 'packing_rate']*-1.0\n elif self.pass_direction == 'Side':\n self.packing_df.loc[:,\n 'packing_rate'] = self.packing_df.loc[:, 'packing_rate']*0.5\n\n self.packing_rate = self.packing_df['packing_rate'].sum()\n\n self.pass_pressure = cp.get_pass_pressure(self.sender_xy.reshape(1, -1), self.receiver_xy.reshape(1, -1),\n self.packing_df, self.col_label_x,\n self.col_label_y,)\n\n if max(self.defending_team_xy[[self.col_label_x]].values) > 1 or \\\n max(self.defending_team_xy[[self.col_label_y]].values) > 1:\n\n defending_team_xy_unscaled = min_max_scaler.inverse_transform(\n self.packing_df[[self.col_label_x, self.col_label_y]].values)\n self.packing_df.drop(\n [self.col_label_x, self.col_label_y], axis=1, inplace=True)\n self.packing_df[self.col_label_x], self.packing_df[\n self.col_label_y] = defending_team_xy_unscaled[:, 0], defending_team_xy_unscaled[:, 1]\n\n return self.packing_df, self.packing_rate, self.pass_pressure\n",
"id": "9325979",
"language": "Python",
"matching_score": 0.22578896582126617,
"max_stars_count": 17,
"path": "football_packing/packing.py"
},
{
"content": "import pandas as pd\nfrom pandas.tseries.holiday import USFederalHolidayCalendar as calendar\n\n#prepare columns for test data\n\ndf=pd.read_csv('test_set.csv')\n\ndata=pd.DataFrame(df)\nmapping = {'CLE': 1, 'POR': 2,'GSW': 3,'ORL': 4,'IND': 5,'BOS': 6,'TOR': 7,'MIL': 8,'MEM': 9,\n 'PHI': 10,'PHX': 11,'LAL': 12,'ATL': 13,'CHI': 14,'SAC': 15,'BKN': 16,'DET': 17,'OKC': 18,\n 'MIA': 19,'UTA': 20,'NOP': 21,'NYK': 22,'SAS': 23,'DEN': 24,'LAC': 25,'HOU': 26,'MIN': 27,'WAS': 28,'CHA': 29,'DAL': 30}\nupdated=data.replace({'Home_Team': mapping,'Away_Team':mapping})\nupdated['home_team_score']=0\nupdated['away_team_score']=0\nupdated['wins_home']=0\nupdated['wins_away']=0\nupdated['loss_home']=0\nupdated['loss_away']=0\nupdated['largest_lead_home']=0\nupdated['largest_lead_away']=0\nupdated['result_win']=0\nupdated['ASG_Count']=0\nupdated['day']= pd.to_datetime(updated['Game_Date']).dt.dayofweek\n\narray=[]\nfor x,y in zip(updated.Home_Team,updated.Away_Team):\n if ((x==1)&(y==3)) | ((x==3)&(y==1)):\n (array.append(int('1')))\n elif ((x==6)&(y==12)) | ((x==12)&(y==6)):\n array.append(int('1'))\n elif ((x == 17) & (y == 12)) | ((x == 12) & (y == 17)):\n array.append(int('1'))\n elif ((x == 10) & (y == 6)) | ((x == 6) & (y == 10)):\n array.append(int('2'))\n elif ((x == 6) & (y == 22)) | ((x == 22) & (y == 6)):\n array.append(int('2'))\n elif ((x == 16) & (y == 22)) | ((x == 22) & (y == 16)):\n array.append(int('2'))\n elif ((x == 17) & (y == 14)) | ((x == 14) & (y == 17)):\n array.append(int('2'))\n elif ((x == 1) & (y == 14)) | ((x == 14) & (y == 1)):\n array.append(int('2'))\n elif ((x == 19) & (y == 14)) | ((x == 14) & (y == 19)):\n array.append(int('2'))\n elif ((x == 22) & (y == 14)) | ((x == 14) & (y == 22)):\n array.append(int('2'))\n elif ((x == 6) & (y == 17)) | ((x == 17) & (y == 6)):\n array.append(int('2'))\n elif ((x == 22) & (y == 19)) | ((x == 19) & (y == 22)):\n array.append(int('2'))\n elif ((x == 22) & (y == 5)) | ((x == 5) & (y == 22)):\n array.append(int('2'))\n elif ((x == 12) & (y == 25)) | ((x == 25) & (y == 12)):\n array.append(int('3'))\n elif ((x == 30) & (y == 26)) | ((x == 26) & (y == 30)):\n array.append(int('3'))\n elif ((x == 23) & (y == 26)) | ((x == 26) & (y == 23)):\n array.append(int('3'))\n elif ((x == 20) & (y == 26)) | ((x == 26) & (y == 20)):\n array.append(int('3'))\n elif ((x == 12) & (y == 23)) | ((x == 23) & (y == 12)):\n array.append(int('3'))\n elif ((x == 11) & (y == 23)) | ((x == 23) & (y == 11)):\n array.append(int('3'))\n else:\n array.append(int(0))\nupdated[\"rivalry\"]=array\n\nhome_team_rank=[]\ntwitter_followers_home=[]\n\nfor x in (updated.Home_Team):\n if ((x==1)):\n home_team_rank.append(int(11))\n twitter_followers_home.append(int(2100000))\n elif (x==2):\n home_team_rank.append(int(17))\n twitter_followers_home.append(int(823000))\n elif (x==3):\n home_team_rank.append(int(3))\n twitter_followers_home.append(int(3500000))\n elif (x==4):\n home_team_rank.append(int(19))\n twitter_followers_home.append(int(1500000))\n elif (x==5):\n home_team_rank.append(int(24))\n twitter_followers_home.append(int(930000))\n elif (x==6):\n home_team_rank.append(int(5))\n twitter_followers_home.append(int(2300000))\n elif (x==7):\n home_team_rank.append(int(13))\n twitter_followers_home.append(int(1400000))\n elif (x==8):\n home_team_rank.append(int(27))\n twitter_followers_home.append(int(695000))\n elif (x==9):\n home_team_rank.append(int(26))\n twitter_followers_home.append(int(766000))\n elif (x==10):\n home_team_rank.append(int(25))\n twitter_followers_home.append(int(925000))\n elif (x==11):\n home_team_rank.append(int(14))\n twitter_followers_home.append(int(753000))\n elif (x==12):\n home_team_rank.append(int(2))\n twitter_followers_home.append(int(6170000))\n elif (x==13):\n home_team_rank.append(int(23))\n twitter_followers_home.append(int(991000))\n elif (x==14):\n home_team_rank.append(int(4))\n twitter_followers_home.append(int(3600000))\n elif (x==15):\n home_team_rank.append(int(15))\n twitter_followers_home.append(int(714000))\n elif (x==16):\n home_team_rank.append(int(7))\n twitter_followers_home.append(int(755000))\n elif (x==17):\n home_team_rank.append(int(21))\n twitter_followers_home.append(int(710000))\n elif (x==18):\n home_team_rank.append(int(16))\n twitter_followers_home.append(int(1800000))\n elif (x==19):\n home_team_rank.append(int(10))\n twitter_followers_home.append(int(4090000))\n elif (x==20):\n home_team_rank.append(int(20))\n twitter_followers_home.append(int(632000))\n elif (x==21):\n home_team_rank.append(int(30))\n twitter_followers_home.append(int(659000))\n elif (x==22):\n home_team_rank.append(int(1))\n twitter_followers_home.append(int(1780000))\n elif (x==23):\n home_team_rank.append(int(12))\n twitter_followers_home.append(int(2300000))\n elif (x==24):\n home_team_rank.append(int(22))\n twitter_followers_home.append(int(634000))\n elif (x==25):\n home_team_rank.append(int(6))\n twitter_followers_home.append(int(1100000))\n elif (x==26):\n home_team_rank.append(int(8))\n twitter_followers_home.append(int(1710000))\n elif (x==27):\n home_team_rank.append(int(29))\n twitter_followers_home.append(int(645000))\n elif (x == 28):\n home_team_rank.append(int(18))\n twitter_followers_home.append(int(662000))\n elif (x==29):\n home_team_rank.append(int(28))\n twitter_followers_home.append(int(726000))\n elif (x==30):\n twitter_followers_home.append(int(1200000))\n home_team_rank.append(int(9))\n\n\ntwitter_followers_away=[]\naway_team_rank=[]\n\nfor x in (updated.Away_Team):\n if ((x==1)):\n away_team_rank.append(int(11))\n twitter_followers_away.append(int(2100000))\n elif (x==2):\n away_team_rank.append(int(17))\n twitter_followers_away.append(int(823000))\n elif (x==3):\n away_team_rank.append(int(3))\n twitter_followers_away.append(int(3500000))\n elif (x==4):\n away_team_rank.append(int(19))\n twitter_followers_away.append(int(1500000))\n elif (x==5):\n away_team_rank.append(int(24))\n twitter_followers_away.append(int(930000))\n elif (x==6):\n away_team_rank.append(int(5))\n twitter_followers_away.append(int(2300000))\n elif (x==7):\n away_team_rank.append(int(13))\n twitter_followers_away.append(int(1400000))\n elif (x==8):\n away_team_rank.append(int(27))\n twitter_followers_away.append(int(695000))\n elif (x==9):\n away_team_rank.append(int(26))\n twitter_followers_away.append(int(766000))\n elif (x==10):\n away_team_rank.append(int(25))\n twitter_followers_away.append(int(925000))\n elif (x==11):\n away_team_rank.append(int(14))\n twitter_followers_away.append(int(753000))\n elif (x==12):\n away_team_rank.append(int(2))\n twitter_followers_away.append(int(6170000))\n elif (x==13):\n away_team_rank.append(int(23))\n twitter_followers_away.append(int(991000))\n elif (x==14):\n away_team_rank.append(int(4))\n twitter_followers_away.append(int(3600000))\n elif (x==15):\n away_team_rank.append(int(15))\n twitter_followers_away.append(int(714000))\n elif (x==16):\n away_team_rank.append(int(7))\n twitter_followers_away.append(int(755000))\n elif (x==17):\n away_team_rank.append(int(21))\n twitter_followers_away.append(int(710000))\n elif (x==18):\n away_team_rank.append(int(16))\n twitter_followers_away.append(int(1800000))\n elif (x==19):\n away_team_rank.append(int(10))\n twitter_followers_away.append(int(4090000))\n elif (x==20):\n away_team_rank.append(int(20))\n twitter_followers_away.append(int(632000))\n elif (x==21):\n away_team_rank.append(int(30))\n twitter_followers_away.append(int(659000))\n elif (x==22):\n away_team_rank.append(int(1))\n twitter_followers_away.append(int(1780000))\n elif (x==23):\n away_team_rank.append(int(12))\n twitter_followers_away.append(int(2300000))\n elif (x==24):\n away_team_rank.append(int(22))\n twitter_followers_away.append(int(634000))\n elif (x==25):\n away_team_rank.append(int(6))\n twitter_followers_away.append(int(1100000))\n elif (x==26):\n away_team_rank.append(int(8))\n twitter_followers_away.append(int(1710000))\n elif (x==27):\n away_team_rank.append(int(29))\n twitter_followers_away.append(int(645000))\n elif (x == 28):\n away_team_rank.append(int(18))\n twitter_followers_away.append(int(662000))\n elif (x==29):\n away_team_rank.append(int(28))\n twitter_followers_away.append(int(726000))\n elif (x==30):\n twitter_followers_away.append(int(1200000))\n away_team_rank.append(int(9))\n\n\nupdated['Home_Team_Twitter']=twitter_followers_home\nupdated['Away_Team_Twitter']=twitter_followers_away\n\nupdated['Home_Team_Rank']=home_team_rank\nupdated['Away_Team_Rank']=away_team_rank\n\nupdated['Game_Date'] = pd.to_datetime(updated.Game_Date)\ncal = calendar()\nholidays = cal.holidays(start=updated.Game_Date.min(), end=updated.Game_Date.max())\n\nupdated['Holiday'] = updated['Game_Date'].isin(holidays)\n\nholiday=[]\n\nfor holi in updated.Holiday:\n if holi==True:\n holiday.append(int(1))\n elif holi!=True:\n holiday.append(int(0))\nupdated['Holiday'] = holiday\ndf_dummy=updated.pop('Total_Viewers')\n\nupdated['Total_Viewers']=df_dummy\n\nupdated.to_csv('test_data.csv')\n\nprint updated.shape",
"id": "6097721",
"language": "Python",
"matching_score": 4.2844038009643555,
"max_stars_count": 1,
"path": "prep_test_data.py"
},
{
"content": "import pandas as pd\nimport numpy as np\nfrom pandas.tseries.holiday import USFederalHolidayCalendar as calendar\n\n### filtering training dataset\ndf=pd.read_csv('training_set.csv')\n\ndata=pd.DataFrame(df)\n\n\npivoted_data=data.pivot_table('Rounded Viewers',['Season', 'Game_ID','Game_Date','Away_Team','Home_Team'],'Country')\nmean_view=[]\nmean_view=data.groupby('Game_ID')['Rounded Viewers'].sum().tolist()\n\npivoted_data['Rounded Viewers']=mean_view\npivoted_data.reset_index(drop=False,inplace=True)\n\n#Getting through Game_Data.csv to get scores, largest lead\n\ngd_df=pd.read_csv('game_data.csv')\ngame_data=pd.DataFrame(gd_df)\n\nhome_team_score=[]\ngame_data_ID=game_data.Game_ID[::2].tolist()\nhome_team_score=game_data.Final_Score[::2].tolist()\naway_team_score=game_data.Final_Score[1::2].tolist()\n\nwins_home=game_data.Wins_Entering_Gm[::2].tolist()\nwins_away=game_data.Wins_Entering_Gm[1::2].tolist()\n\nloss_home=game_data.Losses_Entering_Gm[::2].tolist()\nloss_away=game_data.Losses_Entering_Gm[1::2].tolist()\n\nlargest_lead_home=game_data.Largest_Lead[::2].tolist()\nlargest_lead_away=game_data.Largest_Lead[1::2].tolist()\n\nscore=pd.DataFrame()\nscore['Game_ID']=game_data.Game_ID.unique().tolist()\nscore['home_team_score']=home_team_score\nscore['away_team_score']=away_team_score\n\nscore['wins_home']=wins_home\nscore['wins_away']=wins_away\n\nscore['loss_home']=loss_home\nscore['loss_away']=loss_away\n\nscore['largest_lead_home']=largest_lead_home\nscore['largest_lead_away']=largest_lead_away\n\n\n#pivoted data joined with team scores\njoined_data=pd.merge(pivoted_data,score,on=['Game_ID','Game_ID'],how='left')\n\nresult_win=[]\n\n#1=Home win, 2=Away win\n\njoined_data['result_win'] = np.where(joined_data['home_team_score'] > joined_data['away_team_score'], '1', '2')\n\n\n#Filter player_data.csv\npd_df=pd.read_csv('player_data.csv')\n\nplayer_data=pd.DataFrame(pd_df)\nasg_player=player_data[player_data.ASG_Team!='None']\n\nasg_count=asg_player.groupby('Game_ID')['ASG_Team'].count().tolist()\n\njoined_asg_data=pd.DataFrame()\njoined_asg_data['Game_ID']=asg_player.Game_ID.unique().tolist()\njoined_asg_data['ASG_Count']=asg_count\n\n\nasg_data=pd.merge(joined_data,joined_asg_data,on=['Game_ID','Game_ID'],how='left')\n\n\n#Map team names to numbers\n\nmapping = {'CLE': 1, 'POR': 2,'GSW': 3,'ORL': 4,'IND': 5,'BOS': 6,'TOR': 7,'MIL': 8,'MEM': 9,\n 'PHI': 10,'PHX': 11,'LAL': 12,'ATL': 13,'CHI': 14,'SAC': 15,'BKN': 16,'DET': 17,'OKC': 18,\n 'MIA': 19,'UTA': 20,'NOP': 21,'NYK': 22,'SAS': 23,'DEN': 24,'LAC': 25,'HOU': 26,'MIN': 27,'WAS': 28,'CHA': 29,'DAL': 30}\nupdated=asg_data.replace({'Home_Team': mapping,'Away_Team':mapping})\n\n#Drop columns for each country data\nupdated.drop(updated.iloc[:, 5:-11],axis=1,inplace=True)\ndf_dummy=updated.pop('Rounded Viewers')\n\n#Get day of week for matchday\n\nupdated['day']= pd.to_datetime(updated['Game_Date']).dt.dayofweek\n\n#Get Rivalry Data\narray=[]\n\nfor x,y in zip(updated.Home_Team,updated.Away_Team):\n if ((x==1)&(y==3)) | ((x==3)&(y==1)):\n (array.append(int('1')))\n elif ((x==6)&(y==12)) | ((x==12)&(y==6)):\n array.append(int('1'))\n elif ((x == 17) & (y == 12)) | ((x == 12) & (y == 17)):\n array.append(int('1'))\n elif ((x == 10) & (y == 6)) | ((x == 6) & (y == 10)):\n array.append(int('2'))\n elif ((x == 6) & (y == 22)) | ((x == 22) & (y == 6)):\n array.append(int('2'))\n elif ((x == 16) & (y == 22)) | ((x == 22) & (y == 16)):\n array.append(int('2'))\n elif ((x == 17) & (y == 14)) | ((x == 14) & (y == 17)):\n array.append(int('2'))\n elif ((x == 1) & (y == 14)) | ((x == 14) & (y == 1)):\n array.append(int('2'))\n elif ((x == 19) & (y == 14)) | ((x == 14) & (y == 19)):\n array.append(int('2'))\n elif ((x == 22) & (y == 14)) | ((x == 14) & (y == 22)):\n array.append(int('2'))\n elif ((x == 6) & (y == 17)) | ((x == 17) & (y == 6)):\n array.append(int('2'))\n elif ((x == 22) & (y == 19)) | ((x == 19) & (y == 22)):\n array.append(int('2'))\n elif ((x == 22) & (y == 5)) | ((x == 5) & (y == 22)):\n array.append(int('2'))\n elif ((x == 12) & (y == 25)) | ((x == 25) & (y == 12)):\n array.append(int('3'))\n elif ((x == 30) & (y == 26)) | ((x == 26) & (y == 30)):\n array.append(int('3'))\n elif ((x == 23) & (y == 26)) | ((x == 26) & (y == 23)):\n array.append(int('3'))\n elif ((x == 20) & (y == 26)) | ((x == 26) & (y == 20)):\n array.append(int('3'))\n elif ((x == 12) & (y == 23)) | ((x == 23) & (y == 12)):\n array.append(int('3'))\n elif ((x == 11) & (y == 23)) | ((x == 23) & (y == 11)):\n array.append(int('3'))\n else:\n array.append(int(0))\nupdated[\"rivalry\"]=array\n\n\n#Team social media rank and twitter followers\nhome_team_rank=[]\ntwitter_followers_home=[]\n\nfor x in (updated.Home_Team):\n if ((x==1)):\n home_team_rank.append(int(11))\n twitter_followers_home.append(int(2100000))\n elif (x==2):\n home_team_rank.append(int(17))\n twitter_followers_home.append(int(823000))\n elif (x==3):\n home_team_rank.append(int(3))\n twitter_followers_home.append(int(3500000))\n elif (x==4):\n home_team_rank.append(int(19))\n twitter_followers_home.append(int(1500000))\n elif (x==5):\n home_team_rank.append(int(24))\n twitter_followers_home.append(int(930000))\n elif (x==6):\n home_team_rank.append(int(5))\n twitter_followers_home.append(int(2300000))\n elif (x==7):\n home_team_rank.append(int(13))\n twitter_followers_home.append(int(1400000))\n elif (x==8):\n home_team_rank.append(int(27))\n twitter_followers_home.append(int(695000))\n elif (x==9):\n home_team_rank.append(int(26))\n twitter_followers_home.append(int(766000))\n elif (x==10):\n home_team_rank.append(int(25))\n twitter_followers_home.append(int(925000))\n elif (x==11):\n home_team_rank.append(int(14))\n twitter_followers_home.append(int(753000))\n elif (x==12):\n home_team_rank.append(int(2))\n twitter_followers_home.append(int(6170000))\n elif (x==13):\n home_team_rank.append(int(23))\n twitter_followers_home.append(int(991000))\n elif (x==14):\n home_team_rank.append(int(4))\n twitter_followers_home.append(int(3600000))\n elif (x==15):\n home_team_rank.append(int(15))\n twitter_followers_home.append(int(714000))\n elif (x==16):\n home_team_rank.append(int(7))\n twitter_followers_home.append(int(755000))\n elif (x==17):\n home_team_rank.append(int(21))\n twitter_followers_home.append(int(710000))\n elif (x==18):\n home_team_rank.append(int(16))\n twitter_followers_home.append(int(1800000))\n elif (x==19):\n home_team_rank.append(int(10))\n twitter_followers_home.append(int(4090000))\n elif (x==20):\n home_team_rank.append(int(20))\n twitter_followers_home.append(int(632000))\n elif (x==21):\n home_team_rank.append(int(30))\n twitter_followers_home.append(int(659000))\n elif (x==22):\n home_team_rank.append(int(1))\n twitter_followers_home.append(int(1780000))\n elif (x==23):\n home_team_rank.append(int(12))\n twitter_followers_home.append(int(2300000))\n elif (x==24):\n home_team_rank.append(int(22))\n twitter_followers_home.append(int(634000))\n elif (x==25):\n home_team_rank.append(int(6))\n twitter_followers_home.append(int(1100000))\n elif (x==26):\n home_team_rank.append(int(8))\n twitter_followers_home.append(int(1710000))\n elif (x==27):\n home_team_rank.append(int(29))\n twitter_followers_home.append(int(645000))\n elif (x == 28):\n home_team_rank.append(int(18))\n twitter_followers_home.append(int(662000))\n elif (x==29):\n home_team_rank.append(int(28))\n twitter_followers_home.append(int(726000))\n elif (x==30):\n twitter_followers_home.append(int(1200000))\n home_team_rank.append(int(9))\n\n#Same for away teams\n\ntwitter_followers_away=[]\naway_team_rank=[]\n\nfor x in (updated.Away_Team):\n if ((x==1)):\n away_team_rank.append(int(11))\n twitter_followers_away.append(int(2100000))\n elif (x==2):\n away_team_rank.append(int(17))\n twitter_followers_away.append(int(823000))\n elif (x==3):\n away_team_rank.append(int(3))\n twitter_followers_away.append(int(3500000))\n elif (x==4):\n away_team_rank.append(int(19))\n twitter_followers_away.append(int(1500000))\n elif (x==5):\n away_team_rank.append(int(24))\n twitter_followers_away.append(int(930000))\n elif (x==6):\n away_team_rank.append(int(5))\n twitter_followers_away.append(int(2300000))\n elif (x==7):\n away_team_rank.append(int(13))\n twitter_followers_away.append(int(1400000))\n elif (x==8):\n away_team_rank.append(int(27))\n twitter_followers_away.append(int(695000))\n elif (x==9):\n away_team_rank.append(int(26))\n twitter_followers_away.append(int(766000))\n elif (x==10):\n away_team_rank.append(int(25))\n twitter_followers_away.append(int(925000))\n elif (x==11):\n away_team_rank.append(int(14))\n twitter_followers_away.append(int(753000))\n elif (x==12):\n away_team_rank.append(int(2))\n twitter_followers_away.append(int(6170000))\n elif (x==13):\n away_team_rank.append(int(23))\n twitter_followers_away.append(int(991000))\n elif (x==14):\n away_team_rank.append(int(4))\n twitter_followers_away.append(int(3600000))\n elif (x==15):\n away_team_rank.append(int(15))\n twitter_followers_away.append(int(714000))\n elif (x==16):\n away_team_rank.append(int(7))\n twitter_followers_away.append(int(755000))\n elif (x==17):\n away_team_rank.append(int(21))\n twitter_followers_away.append(int(710000))\n elif (x==18):\n away_team_rank.append(int(17))\n twitter_followers_away.append(int(1800000))\n elif (x==19):\n away_team_rank.append(int(10))\n twitter_followers_away.append(int(4090000))\n elif (x==20):\n away_team_rank.append(int(20))\n twitter_followers_away.append(int(632000))\n elif (x==21):\n away_team_rank.append(int(30))\n twitter_followers_away.append(int(659000))\n elif (x==22):\n away_team_rank.append(int(1))\n twitter_followers_away.append(int(1780000))\n elif (x==23):\n away_team_rank.append(int(12))\n twitter_followers_away.append(int(2300000))\n elif (x==24):\n away_team_rank.append(int(22))\n twitter_followers_away.append(int(634000))\n elif (x==25):\n away_team_rank.append(int(6))\n twitter_followers_away.append(int(1100000))\n elif (x==26):\n away_team_rank.append(int(8))\n twitter_followers_away.append(int(1710000))\n elif (x==27):\n away_team_rank.append(int(29))\n twitter_followers_away.append(int(645000))\n elif (x == 28):\n away_team_rank.append(int(18))\n twitter_followers_away.append(int(662000))\n elif (x==29):\n away_team_rank.append(int(28))\n twitter_followers_away.append(int(726000))\n elif (x==30):\n twitter_followers_away.append(int(1200000))\n away_team_rank.append(int(9))\n\nupdated['Home_Team_Twitter']=twitter_followers_home\nupdated['Away_Team_Twitter']=twitter_followers_away\n\nupdated['Home_Team_Rank']=home_team_rank\nupdated['Away_Team_Rank']=away_team_rank\n\n#Get American Holiday data\n\nupdated['Game_Date'] = pd.to_datetime(updated.Game_Date)\ncal = calendar()\nholidays = cal.holidays(start=updated.Game_Date.min(), end=updated.Game_Date.max())\n\nupdated['Holiday'] = updated['Game_Date'].isin(holidays)\n\nholiday=[]\n\nfor holi in updated.Holiday:\n if holi==True:\n holiday.append(int(1))\n elif holi!=True:\n holiday.append(int(0))\nupdated['Holiday'] = holiday\n\n\nupdated['Rounded Viewers']=df_dummy\ncomplete=updated.fillna(0)\n\nprint len(complete.columns)\n\n\ncomplete.to_csv('output.csv')\n",
"id": "12436938",
"language": "Python",
"matching_score": 0.19124940037727356,
"max_stars_count": 1,
"path": "data_processing.py"
},
{
"content": "#---- Data Sources Dictionary----#\n# Providing sources for all datasets used in the project\n\nimport pandas as pd\nimport os\n\ndef load_data_sources():\n cwd = os.getcwd()\n data_path = \"/ML/Data/data_sources.csv\"\n file_path = str(cwd + data_path)\n data_source_df = pd.read_csv(file_path)\n \n return data_source_df\n",
"id": "1970983",
"language": "Python",
"matching_score": 0.04364868253469467,
"max_stars_count": 4,
"path": "code/data_sources.py"
},
{
"content": "from setuptools import setup\n\n\ndef readme():\n with open('README.md') as f:\n return f.read()\n\n\nsetup(name='football_packing',\n version='0.2',\n description='Calculate the packing rate for a given pass in football (soccer)',\n long_description=readme(),\n url='https://github.com/samirak93/Football-packing',\n author='<NAME>',\n author_email='<EMAIL>',\n license='LICENSE.txt',\n packages=['football_packing'],\n install_requires=[\n 'numpy >= 1.18.1',\n 'pandas >= 1.0.3',\n 'bokeh >= 2.0.2',\n 'scipy >= 1.4.1',\n 'scikit-learn >= 0.23.1',\n ],\n keywords='soccer football analytics packing',\n include_package_data=True,\n zip_safe=False)\n",
"id": "9700433",
"language": "Python",
"matching_score": 1.223024845123291,
"max_stars_count": 17,
"path": "setup.py"
},
{
"content": "from football_packing.packing import packing\nfrom football_packing.plot_packing import plot_packing\n",
"id": "11568392",
"language": "Python",
"matching_score": 0.831843376159668,
"max_stars_count": 17,
"path": "football_packing/__init__.py"
}
] | 3.044443 |
galadriel2486 | [
{
"content": "\"\"\"\nImportação das bibliotecas.\n\"\"\"\nimport random\n\nprint('\\n\\nOlá! Esse é o jogo do Letroca.\\n\\nINSTRUÇÕES:\\n'\n '- O jogo possui 5 níveis. Quanto maior o nível, maiores'\n ' serão as palavras.\\n'\n '- As palavras estarão embaralhadas, e seu papel será ordená-las.\\n'\n '- Se você acertar a palavra correta, avançará para o próximo nível,'\n ' caso contrário, deverá tentar até acertar.\\n'\n 'Divirta-se! :)\\n')\n\n\ndef sortear(lista):\n \"\"\"\n Cria uma lista de caracteres a partir de uma palavra selecionada da lista de termos.\n\n Entrada: lista (lista/string).\n Saída: sorteado (lista/string-char).\n \"\"\"\n sorteado = random.choice(lista)\n return sorteado\n\n\ndef embaralhar(palavra):\n \"\"\"\n Embaralha a lista de caracteres da palavra sorteada e reúne-os novamente.\n\n Entrada: palavra (lista/string-char).\n Saída: embaralhado (string).\n \"\"\"\n lista_caracteres = list(palavra)\n embaralhado = ''.join(random.sample(lista_caracteres, len(lista_caracteres)))\n return embaralhado\n\n\ndef verificar(palavra, entrada):\n \"\"\"\n Confere se as tentativas do usuário estão corretas conforme a palavra sorteada.\n Se não estiver, retorna à função principal e continua a iteração.\n\n Entrada: palavra, entrada (string, input/string).\n Saída: bool.\n \"\"\"\n return palavra == entrada\n\n\ndef main():\n \"\"\"\n Função principal.\n Primeiro foram definidos os conjuntos de palavras, por nível.\n O setup do jogo é realizado dentro do loop for de modo que,\n na iteração, uma palavra nova seja sorteada a cada nível.\n O input do usuário é transformado em minúscula com a função lower,\n assim evita erros de verificação por conta da diferença entre\n maiúsculas e minúsculas.\n Após o usuário passar por todos os níveis, o jogo sai do loop\n for e exibe uma mensagem final.\n \"\"\"\n conjuntos = {1: ['amor', 'fato', 'viés', 'mito', 'caos',\n 'agir', 'ócio', 'vale', 'alva', 'ágil'],\n 2: ['sagaz', 'atroz', 'assaz', 'ânimo',\n 'saber', 'ápice', 'temor', 'fugaz', 'mundo'],\n 3: ['etéreo', 'eximir', 'sisudo', 'objeto', 'acesso',\n 'sanção', 'receio', 'mazela', 'cômico', 'vulgar'],\n 4: ['quimera', 'imersão', 'isenção', 'parcial', 'modesto',\n 'padecer', 'emotivo', 'colapso', 'inércia', 'orgulho'],\n 5: ['metódico', 'consiste', 'desfecho', 'critério', 'suscitar',\n 'sucumbir', 'portanto', 'complexo', 'emulação', 'maestria']}\n\n for nivel in range(1, len(conjuntos) + 1):\n conjunto = conjuntos[nivel]\n palavra_sorteada = sortear(conjunto)\n palavra_embaralhada = embaralhar(palavra_sorteada)\n\n print(f'Nível: {nivel}\\nA palavra é: {palavra_embaralhada}.\\n')\n\n usuario = input('Faça a sua tentativa: ').lower()\n while not verificar(palavra_sorteada, usuario):\n usuario = input('Faça a sua tentativa: ').lower()\n\n print(f'Parabéns, acertou! A palavra correta é \"{palavra_sorteada}\".\\n')\n\n print('Uhu, você passou por todos os níveis!')\n\n\nif __name__ == '__main__':\n main()\n",
"id": "10796456",
"language": "Python",
"matching_score": 0,
"max_stars_count": 0,
"path": "main.py"
}
] | 0 |
bailb | [
{
"content": "import jsonUtils\nimport json\n\n\ndef conToClassDesc(encodejson,eList):\n originElemList=jsonUtils.ElementList()\n\n aElem=jsonUtils.Element(0,\"root\",\"rootStart\",\"obj\")\n originElemList.push_back(aElem)\n if (jsonUtils.decodeJson(encodejson,originElemList,1) == False):\n return False\n eStack = jsonUtils.ElementList()\n listCount = originElemList.count()\n for i in range(0,listCount):\n aEl=originElemList.getIndex(i)\n# print(\"i[%s] desc[%s] type[%s] name[%s] [%s]\"%(aEl._level,aEl._desc,aEl._eleType,aEl._name,i))\n if (aEl._eleType == 'objlist' or aEl._eleType == 'obj'):\n if (eStack.count() == 0):\n eStack.push(aEl)\n eList.pushParam(aEl._level,aEl._name,aEl._desc,aEl._eleType+\"begin\")\n else:\n ele=eStack.pop()\n if(ele._level == aEl._level):\n eList.pushParam(ele._level,ele._name,ele._desc,ele._eleType+\"end\")\n else:\n eStack.push(ele)\n eList.pushParam(aEl._level,aEl._name,aEl._desc,aEl._eleType+\"begin\")\n eStack.push(aEl)\n elif(aEl._eleType == 'var' or aEl._eleType == 'varlist'):\n if (eStack.count() == 0):\n eStack.push(aEl)\n else:\n ele=eStack.pop()\n if(ele._level == aEl._level):\n eList.pushParam(ele._level,ele._name,ele._desc,ele._eleType+\"end\")\n else:\n eStack.push(ele)\n eList.pushParam(aEl._level,aEl._name,aEl._desc,aEl._eleType)\n\n while (eStack.count() > 0):\n aEl=eStack.pop()\n eList.pushParam(aEl._level,aEl._name,aEl._desc,aEl._eleType+\"end\")\n",
"id": "12621725",
"language": "Python",
"matching_score": 3.9655184745788574,
"max_stars_count": 0,
"path": "JsonReflect.py"
},
{
"content": "#coding=utf-8\nimport json\n\nclass Element(object):\n def __init__(self,level,name,desc,eleType):\n self._level=level\n self._name=name\n self._desc=desc\n self._eleType=eleType \n\nclass ElementList(object):\n def __init__(self):\n self._eleList=[]\n\n def push_back(self,aElement):\n self._eleList.append(aElement)\n return self._eleList;\n def push(self,aElement):\n self._eleList.append(aElement)\n def pushParam(self,level,name,desc,eleType):\n aElem=Element(level,name,desc,eleType)\n self.push(aElem)\n def pop(self):\n if (len(self._eleList) > 0):\n return self._eleList.pop()\n else:\n return None\n def count(self):\n return len(self._eleList)\n\n def getIndex(self,index):\n if (len(self._eleList) > index):\n return self._eleList[index]\n else:\n return None\n\ndef getType(value):\n if(isinstance(value,dict)):\n return 'obj'\n elif(isinstance(value,list)):\n if(isinstance(value[0],str) or isinstance(value[0],unicode)):\n return 'varlist'\n else:\n return 'objlist'\n elif(isinstance(value,str) or isinstance(value,unicode)):\n return 'var'\n\ndef getDesc(string):\n if(isinstance(string,str) or isinstance(string,unicode)):\n return string\n else:\n return \"\"\n\ndef isListLegal(value):\n if(len(value) > 1):\n return False\n if(isinstance(value[0],list) or isinstance(value[0],dict) or isinstance(value[0],str) or isinstance(value[0],unicode)):\n return True\n else:\n return False\n\ndef decodeJson(jsonStr,elementList,i):\n if(isinstance(jsonStr,dict)):\n for item in jsonStr:\n#print(getDesc(jsonStr[item])+\" item : [\"+item + \"] getType[ \"+ getType(jsonStr[item]) + \"] type %s %s\"%(type(jsonStr[item]),i))\n desc=getDesc(jsonStr[item])\n valueType=getType(jsonStr[item])\n if(valueType==\"varlist\"):\n# print(\"desc:==============\"+jsonStr[item][0]);\n desc=jsonStr[item][0];\n aElem=Element(i,item,desc,valueType)\n elementList.push_back(aElem)\n\n if(isinstance(jsonStr[item],dict)):\n #print(\"This json=======================\")\n decodeJson(jsonStr[item],elementList,i=i+1)\n elif(isinstance(jsonStr[item],list)):\n if(False == isListLegal(jsonStr[item])):\n print(\"This json wasn't right[\"+item+\"]\")\n return False\n decodeJson(jsonStr[item],elementList,i)\n elif(isinstance(jsonStr[item],str) or isinstance(jsonStr[item],unicode)):\n# print(\"jsonStr[%s]\"%jsonStr[item])\n pass\n else:\n print(\"ERROR type:%s\" % type(jsonStr[item]))\n pass\n elif(isinstance(jsonStr,list)):\n length=len(jsonStr)\n for j in range(0, length):\n if(isinstance(jsonStr[j],dict)):\n decodeJson(jsonStr[j],elementList,i=i+1)\n else:\n pass\n # print(\"type [%s]\"%type(jsonStr[j]))\n else:\n print(\"jsonStr is not json object!\")\n return True\n",
"id": "11615293",
"language": "Python",
"matching_score": 1.8757530450820923,
"max_stars_count": 0,
"path": "jsonUtils.py"
},
{
"content": "import jsonUtils\nimport json\nimport JsonReflect\n\ndef tab(num):\n space=' '\n while(num):\n num=num-1\n space+=' '\n return space\n\ndef getValueType(desc):\n if not (desc.startswith(\"[\")):\n return \"string\";\n elif (desc.startswith(\"[int]\")):\n return \"int\";\n elif (desc.startswith(\"[long]\")):\n return \"long\";\n elif (desc.startswith(\"[bool]\")):\n return \"bool\";\n elif (desc.startswith(\"[float]\")):\n return \"float\"\n else:\n return desc.split(\"[\")[1].split(\"]\")[0];\n\ndef eleToCode(element):\n valueType = getValueType(element._desc);\n if (element._eleType == \"var\" or element._eleType == \"obj\"):\n return (tab(1)+valueType+\" \"+element._name+\"; //\"+element._desc);\n elif (element._eleType == \"objlist\" or element._eleType == \"varlist\"):\n return (tab(1)+\"std::list<\"+valueType+\"> \"+element._name+\"; //\"+element._desc); \n\ndef conToClassEx(eList):\n classStack = jsonUtils.ElementList();\n listCount = eList.count();\n if(listCount <= 0):\n print(\"eListCount shouldn't be lt 0\");\n return None;\n statckTopLevel=-1;\n codeString=\"\";\n for i in range(0,listCount):\n aEl=eList.getIndex(i);\n if (aEl._level >= statckTopLevel):\n classStack.push(aEl);\n statckTopLevel = aEl._level;\n #print(\"level[%s] desc[%s] type[%s] name[%s] index[%s]\"%(aEl._level,aEl._desc,aEl._eleType,aEl._name,i));\n elif(aEl._level < statckTopLevel):\n keyFlag=aEl._name;\n objLevel=aEl._level;\n #print(\"struct \"+aEl._name+\"_Element {\");\n codeString +=(\" struct \"+aEl._name+\"_Element { \\n\");\n while(True):\n topEl=classStack.pop();\n if (topEl._level == objLevel):\n #print(\" };\\n\");\n codeString += (\" };\\n\\n\");\n break;\n if (topEl._level >= statckTopLevel):\n statckTopLevel = topEl._level;\n#print(\"================================== \"+aEl._name + \" \" + aEl._eleType +\" %d \"%statckTopLevel);#+eleToCode(topEl));\n codeString += (eleToCode(topEl)+\"\\n\");\n # print(\"\\tlevel[%s] desc[%s] type[%s] name[%s] index[%s]\"%(topEl._level,topEl._desc,topEl._eleType,topEl._name,i)); \n elif(topEl._level < statckTopLevel):\n statckTopLevel = topEl._level;\n #print(tab(aEl._level)+ \" level[%s] desc[%s] \\\n # type[%s] name[%s] index[%s]\"%(aEl._level,aEl._desc,aEl._eleType,aEl._name,i));\n break;\n\n objElement=jsonUtils.Element(objLevel,aEl._name,\"[\"+aEl._name+\"_Element] list\",\"obj\");\n statckTopLevel-=1;\n if (aEl._eleType == \"objend\"):\n objElement=jsonUtils.Element(objLevel,aEl._name,\"[\"+aEl._name+\"_Element] list\",\"obj\"); \n elif (aEl._eleType == \"objlistend\"):\n objElement=jsonUtils.Element(objLevel,aEl._name,\"[\"+aEl._name+\"_Element] list\",\"objlist\");\n classStack.push(objElement);\n return codeString;\n\ndef getCPPValueType(desc):\n if not (desc.startswith(\"[\")):\n return \"FIELD_TYPE_STRING\";\n elif (desc.startswith(\"[int]\")):\n return \"FIELD_TYPE_INT\";\n elif (desc.startswith(\"[long]\")):\n return \"FIELD_TYPE_INT\";\n elif (desc.startswith(\"[bool]\")):\n return \"FIELD_TYPE_BOOL\";\n elif (desc.startswith(\"[float]\")):\n return \"FIELD_TYPE_FLOAT\"\n else:\n return desc.split(\"[\")[1].split(\"]\")[0];\n\ndef conJstrToMetaInfo(eList):\n classStack = jsonUtils.ElementList();\n listCount = eList.count();\n print(\"listCount[%d]\"%listCount);\n if(listCount <= 0):\n print(\"eListCount shouldn't be lt 0\");\n return None;\n codeString = \"\";\n stackTopLevel=0;\n for i in range(0,listCount):\n aEl=eList.getIndex(i);\n#print(\"==============type:\"+aEl._eleType+ \" name:\"+aEl._name+\" level %s\"%aEl._level);\n if (i == 0 or (aEl._level ==0 and (aEl._eleType.startswith(\"objbegin\") or aEl._eleType.startswith(\"objlistbegin\")))):\n classStack.push(aEl);\n stackTopLevel = aEl._level;\n #print(tab(aEl._level)+\"METAINFO_CREATE(\"+aEl._name+\"_Element);\");\n codeString += (tab(aEl._level)+\"METAINFO_CREATE(\"+aEl._name+\"_Element);\\n\");\n else:\n# print(\"type:\"+aEl._eleType);\n# print(\"classStack.count[%d]\"%classStack.count());\n stackTopEl=classStack.getIndex(classStack.count()-1); #just look, don't pop\n if (aEl._eleType.startswith(\"objlistbegin\") or aEl._eleType.startswith(\"objbegin\")):\n if (aEl._eleType.startswith(\"objbegin\")):\n #print(tab(aEl._level)+ \"METAINFO_CHILD_BEGIN(\"+stackTopEl._name+\"_Element,\"+aEl._name+\"_Element,\"+aEl._name+\");\"); \n codeString += (tab(aEl._level)+ \"METAINFO_CHILD_BEGIN(\"+stackTopEl._name+\"_Element,\"+aEl._name+\"_Element,\"+aEl._name+\");\\n\");\n else:\n #print(tab(aEl._level)+ \"METAINFO_CHILD_LIST_BEGIN(\"+stackTopEl._name+\"_Element,\"+aEl._name+\"_Element,\"+aEl._name+\");\");\n codeString += (tab(aEl._level)+ \"METAINFO_CHILD_LIST_BEGIN(\"+stackTopEl._name+\"_Element,\"+aEl._name+\"_Element,\"+aEl._name+\");\\n\");\n\n classStack.push(aEl)\n stackTopLevel = aEl._level;\n elif (aEl._eleType.startswith(\"varlist\")):\n #print(tab(aEl._level)+ \"METAINFO_ADD_MEMBER_LIST(\"+stackTopEl._name+\"_Element,\"+getCPPValueType(aEl._desc)+\",\"+aEl._name+\");\");\n codeString += (tab(aEl._level)+ \"METAINFO_ADD_MEMBER_LIST(\"+stackTopEl._name+\"_Element,\"+getCPPValueType(aEl._desc)+\",\"+aEl._name+\");\\n\");\n\n elif ((aEl._eleType.startswith(\"objlistend\") or aEl._eleType.startswith(\"objend\")) and i < listCount-1):\n #print(tab(aEl._level)+ \"METAINFO_CHILD_END();\\n\");\n codeString += (tab(aEl._level)+ \"METAINFO_CHILD_END();\\n\\n\");\n classStack.pop();#just pop\n elif (aEl._eleType.startswith(\"var\")):\n#print(tab(aEl._level)+ \"METAINFO_ADD_MEMBER(\"+stackTopEl._name+\"_Element,\"+getCPPValueType(aEl._desc)+\",\"+aEl._name+\");\");\n codeString += (tab(aEl._level)+ \"METAINFO_ADD_MEMBER(\"+stackTopEl._name+\"_Element,\"+getCPPValueType(aEl._desc)+\",\"+aEl._name+\");\\n\");\n\n return codeString;\ndef genCode(fileList):\n\n rawSourceContents=\"\";\n f = open('./template/CppSource.cpp','r')\n rawSourceContents = f.read()\n f.close();\n\n rawHeaderContents=\"\";\n f = open('./template/CppHeader.h','r')\n rawHeaderContents = f.read()\n f.close();\n\n for filePath in fileList:\n encodejson =json.load(open(filePath,'r'));\n eList = jsonUtils.ElementList();\n JsonReflect.conToClassDesc(encodejson,eList);\n\n structRootName = eList.getIndex(0)._name + \"_Element\";\n tarFileName = str(filePath.split('/',2)[2]).split('.',2)[0];\n metaName = tarFileName;\n\n structCode=conToClassEx(eList)\n metaDesc=conJstrToMetaInfo(eList)\n\n print(\"metaName:[\"+metaName+\"] targetName:[\"+tarFileName+\"]\");\n\n contents1= rawSourceContents.replace(\"$META_STRUCT$\",structCode);\n contents1= contents1.replace(\"$META_NAME$\",metaName);\n contents1 = contents1.replace(\"$META_INFO_DESC$\",metaDesc);\n\n print(\"targetFileName: \"+tarFileName);\n f = open(\"./source/\"+tarFileName+\".cpp\",'w')\n f.write(contents1);\n f.close();\n\n\n contents1 = \"\";\n contents1= rawHeaderContents.replace(\"$META_STRUCT$\",structCode);\n contents1= contents1.replace(\"$META_STRUCT_ROOT$\",structRootName);\n contents1 = contents1.replace(\"$META_NAME$\",metaName);\n\n f = open(\"./source/include/\"+tarFileName+\".h\",'w')\n f.write(contents1);\n f.close();\n\n\n",
"id": "9728915",
"language": "Python",
"matching_score": 2.804028272628784,
"max_stars_count": 0,
"path": "langUtils/cppUtils.py"
},
{
"content": "import jsonUtils\nimport json\nimport JsonReflect\nimport sys;\nimport os;\nsys.path.append(\"./langUtils\")\nimport cppUtils as langUtils \n\ndef eachFile(filepath):\n pathDir = os.listdir(filepath)\n fileList = [];\n for allDir in pathDir:\n filePath = os.path.join('%s/%s' % (filepath, allDir))\n fileList.append(filePath);\n print filePath \n return fileList \nlth=len(sys.argv) \nif (lth <= 1):\n langType=\"cpp\";\nelse:\n langType=sys.argv[1];\nif (langType == \"cpp\"):\n print(\"generate cpp code\");\n fileList = eachFile(\"./proto\");\n langUtils.genCode(fileList);\nelse:\n print(langType+\" not support yet \");\n\n",
"id": "8842812",
"language": "Python",
"matching_score": 0.4759007394313812,
"max_stars_count": 0,
"path": "build.py"
}
] | 2.339891 |
JayFoxRox | [
{
"content": "#!/usr/bin/env python3\n\nimport sys\nimport os\n\nfrom common import *\n\n# Open the HED file\nwith open(sys.argv[1], \"rb\") as f:\n\n # Get file size to know where it ends\n f.seek(0, os.SEEK_END) \n file_size = f.tell()\n f.seek(0)\n\n # Also open the WAD file\n with open(sys.argv[2], \"rb\") as fw:\n\n # Loop over HED entries\n while f.tell() < file_size - 7:\n print(f.tell(), file_size)\n name = read_string(f)\n #FIXME: Check for terminator?\n align(f, 4)\n offset = read32(f)\n size = read32(f)\n\n print(\"file: '%s'\" % name)\n\n fw.seek(offset)\n\n # Construct path\n file_export_path = os.path.join(\"out\", name)\n\n # Extract file\n with open(file_export_path, \"wb\") as fo:\n data = fw.read(size)\n fo.write(data)\n\n terminator = read8(f)\n assert(terminator == 0xFF)\n",
"id": "5783652",
"language": "Python",
"matching_score": 1.6480876207351685,
"max_stars_count": 13,
"path": "extract-hed-wad.py"
},
{
"content": "\nimport bpy\nimport os\nimport json\nimport sys\n\nargv = sys.argv\nargv = argv[argv.index(\"--\") + 1:]\n\n#FIXME: Parse arguments\n\ncontext = bpy.context\n\ndata_path = argv[0]\n\nprint(\"Path: '\" + data_path + \"'\")\n\n#context.space_data.show_backface_culling = True\n\n\n#create a scene\nscene = bpy.data.scenes.new(\"Import\")\ncamera_data = bpy.data.cameras.new(\"Camera\")\n\ncamera = bpy.data.objects.new(\"Camera\", camera_data)\ncamera.location = (-2.0, 3.0, 3.0)\ncamera.rotation_euler = (422.0, 0.0, 149)\nscene.objects.link(camera)\n\n# do the same for lights etc\n\nscene.update()\n\n# make a new scene with cam and lights linked\nscene.camera = camera\ncontext.screen.scene = scene\ncontext.scene.render.engine = 'BLENDER_GAME'\n\ndef fileContents(filename):\n with open(filename) as f:\n return f.read()\n\n\nenums = json.loads(fileContents(os.path.join(data_path, \"./enums.json\"))) # Load _Enums file [as json]\nobjs = json.loads(fileContents(os.path.join(data_path, \"./table.json\"))) # Load table file [as json]\ndil0 = json.loads(fileContents(os.path.join(data_path, \"./dil0.json\"))) # Load Visual Placement file [as json]\ndil1 = json.loads(fileContents(os.path.join(data_path, \"./dil1.json\"))) # Load Light Placement file [as json]\n\ndef resourceIndexFromName(name):\n return enums[name]['index']\n\ndef resourcePath(resource, extension):\n return str(resourceIndexFromName(resource['resource'])) + \"-\" + str(resource['index']) + \".\" + extension\n\ndef placementFromName(dil, name):\n for placement in dil['objects']:\n a = placement['name']\n b = name\n if a == b:\n return placement\n if a.lower() == b.lower():\n print(\"Case does not match!\")\n return placement\n print(\"Could not find '\" + name + \"'\")\n return None\n\ndef layer(index):\n lm = [False] * 20\n lm[index] = True\n return lm\n\nf = 0.001\ndef importMesh(resourceName, fileIndex):\n resourceIndex = resourceIndexFromName(resourceName)\n filename = str(resourceIndex) + \"-\" + str(fileIndex) + \".obj\"\n\n path = os.path.join(data_path, \"./converted/\" + filename)\n print(\"Loading \" + filename)\n\n # for collisions [floating vertices are kept!]:\n #bpy.ops.import_scene.obj(filepath=path, split_mode='OFF', use_split_objects=True, axis_forward='Y', axis_up='Z', use_image_search=False, filter_glob=\"*.obj;*.mtl\")\n # for visuals:\n bpy.ops.import_scene.obj(filepath=path, split_mode='ON', use_split_groups=True, axis_forward='Y', axis_up='Z', use_image_search=False, filter_glob=\"*.obj;*.mtl\")\n\n imported = bpy.context.selected_objects\n\n\n\n print(\"imported:\" + str(imported))\n #bpy.data.objects[]\n for i in imported:\n i.scale *= f\n\n return imported\n\ndef dilPlaceInstance(imported, placement):\n for i in imported:\n p = placement['position']\n xyz = (f * p[0], f * p[1], f * p[2])\n print(str(xyz))\n i.location = xyz\n r = placement['angle']\n i.rotation_mode = 'YXZ';\n i.rotation_euler = (r[0], r[2], r[1])\n s = placement['scale']\n #i.scale = (f * s[0], f * s[1], f * s[2]) # Doesn't seem to be used?!\n\ndef dilPlace(imported, obj):\n\n # Check if this is placed using DIL or directly\n try:\n dilPos = [d['data'] for d in obj if d['type'] == 'DilPos'][0] #FIXME: Turn this into a function!\n except:\n dilPos = None\n\n if dilPos == None:\n #FIXME: TODO\n print(\"Want this at standard loc [\" + str(obj) + \"]\")\n placement = [d['data'] for d in obj if d['type'] == 'Pos'][0]\n dilPlaceInstance(imported, placement)\n #\"Pos\": {\n # \"position\": [ -620.434143, 1590.591309, -200.085876 ],\n # \"scale\": [ 1.000000, 1.000000, 1.000000 ],\n # \"angle\": [ 0.000000, 0.000000, 0.000000 ]\n #},\n\n else:\n #FIXME: TODO\n print(\"pos: \" + str(dilPos) + \" for \" + str(obj))\n var = placementFromName(dil0, dilPos['dilName'])\n if var == None:\n #FIXME: Delete object now?!\n return\n placements = var['placements']\n for placement in placements:\n print(\"Want this at \" + str(placement))\n # FIXME: Place this object at all locations?!\n placement = placements[dilPos['dilIndex']]\n dilPlaceInstance(imported, placement)\n\n#print(objs)\n \n\n# Load lights from PBTable\nfor key in objs:\n obj = objs[key]\n if obj['type'] != \"PBTable\":\n continue\n obj = obj['data']\n\n for d in obj:\n\n if d['type'] == \"Dil\":\n visualDil = d['data']\n\n if d['type'] == \"LightingData\":\n lightDil = d['data'] # FIXME: Remove anything but \"name\" and \"index\" ?!\n lights = d['data']['lights']\n\n\nprint(\"Would be using \" + resourcePath(visualDil, \"json\") + \" as DIL Visual placement\")\nprint(\"Would be using \" + resourcePath(lightDil, \"json\") + \" as DIL Light placement\")\n\n# Place lights!\nif True:\n for light in lights:\n var = placementFromName(dil1, light['dilName'])\n if var == None:\n #FIXME: Delete object now?!\n continue\n\n print(var)\n placement = var['placements'][light['dilIndex']]\n \n # What about angle and scale?!\n p = placement['position']\n xyz = (f * p[0], f * p[1], f * p[2])\n\n bpy.ops.object.lamp_add(type='POINT', radius=1, view_align=False, location=xyz, layers=[True] * 20)\n bpy.context.object.data.name = light['dilName'] + \";\" + str(light['dilIndex']) + \";\" + light['type'] + \";\" + str(light['index'])\n bpy.context.object.data.color = light['color'] # RGB Color (FIXME: Normalize?!)\n #FIXME: unk0\n bpy.context.object.data.energy = light['brightness'] * 0.001 # Brightness [and a hacky factor I came up with]\n # bpy.context.object.data.falloff_type = 'CONSTANT' # Falloff mode\n # bpy.context.object.data.distance = 5.05 # Falloff style\n # bpy.context.object.data.use_sphere = True # Use bounding sphere for lamp influence\n\n\n# Iterate over objects and find PBObject entries\nfor key in objs:\n obj = objs[key]\n if obj['type'] != \"PBObject\":\n continue\n obj = obj['data']\n\n for d in obj:\n\n if d['type'] == \"Object\":\n print(\"Object: \" + str(obj))\n\n obj_object = d['data']\n t = obj_object['type']\n\n print(\"Parsing \" + key)\n\n # Mark bad objects\n\n bad = True\n l = 9\n\n # Visual only\n if t == \"PB_OBJECTTYPE_VISUAL\": l = 0\n\n # Collision and colliding trigger\n if t == \"PB_OBJECTTYPE_COLLISION\": l = 0\n if t == \"PB_OBJECTTYPE_TRAP\": l = 0\n\n # Playfield\n if t == \"PB_OBJECTTYPE_FLOOR\": l = 1\n\n # Lamps\n if t == \"PB_OBJECTTYPE_LAMPSET\": l = 2\n\n # Unknown\n if t == \"PB_OBJECTTYPE_UNIQUE\": l = 3\n\n # Game elements\n if t == \"PB_OBJECTTYPE_PLUNGER\": l = 4\n if t == \"PB_OBJECTTYPE_FLIPPER\": l = 4\n if t == \"PB_OBJECTTYPE_SLINGSHOT\": l = 4\n if t == \"PB_OBJECTTYPE_POPBUMPER\": l = 4\n if t == \"PB_OBJECTTYPE_SPINNER\": l = 4\n if t == \"PB_OBJECTTYPE_TARGET\": l = 4\n if t == \"PB_OBJECTTYPE_KICKER\": l = 4\n if t == \"PB_OBJECTTYPE_STOPPER\": l = 4\n\n if t == \"PB_OBJECTTYPE_DIVERTER_ONOFF\": l = 4 # FIXME: Does this also have visuals?!\n\n # Any object on layer >= 5 doesn't have a collision!\n\n # Game elements (which should be without collision)\n if t == \"PB_OBJECTTYPE_GATE\": l = 5\n if t == \"PB_OBJECTTYPE_GATE_ONEWAY\": l = 5\n if t == \"PB_OBJECTTYPE_WIRE\": l = 5\n\n # Triggers\n if t == \"PB_OBJECTTYPE_MAGNET\": l = 6\n if t == \"PB_OBJECTTYPE_OPTO\": l = 7\n if t == \"PB_OBJECTTYPE_BALLDRAIN\": l = 8\n\n\n\n\n\n #Not rendering: PB_OBJECTTYPE_BALLDRAIN\n #Not rendering: PB_OBJECTTYPE_COLLISION\n #Not rendering: PB_OBJECTTYPE_DIVERTER_ONOFF\n #Not rendering: PB_OBJECTTYPE_FLIPPER\n #Not rendering: PB_OBJECTTYPE_FLOOR\n #Not rendering: PB_OBJECTTYPE_GATE\n #Not rendering: PB_OBJECTTYPE_GATE_ONEWAY\n #Not rendering: PB_OBJECTTYPE_KICKER\n #Not rendering: PB_OBJECTTYPE_LAMPSET\n #Not rendering: PB_OBJECTTYPE_MAGNET\n #Not rendering: PB_OBJECTTYPE_OPTO\n #Not rendering: PB_OBJECTTYPE_PLUNGER\n #Not rendering: PB_OBJECTTYPE_PLUNGER_EXIT\n #Not rendering: PB_OBJECTTYPE_POPBUMPER\n #Not rendering: PB_OBJECTTYPE_SLINGSHOT\n #Not rendering: PB_OBJECTTYPE_SPINNER\n #Not rendering: PB_OBJECTTYPE_STOPPER\n #Not rendering: PB_OBJECTTYPE_TARGET\n #Not rendering: PB_OBJECTTYPE_TRAP\n #Not rendering: PB_OBJECTTYPE_UNIQUE\n #Not rendering: PB_OBJECTTYPE_VISUAL\n #Not rendering: PB_OBJECTTYPE_WIRE\n\n\n #if t == \"PB_OBJECTTYPE_WIRE\": l = 3\n #if t == \"PB_OBJECTTYPE_WIRE\": l = 3\n #if t == \"PB_OBJECTTYPE_WIRE\": l = 3\n #if t == \"PB_OBJECTTYPE_WIRE\": l = 3\n if bad:\n print(\"Not rendering: \" + t)\n #for i in imported:\n # i.layers = [False] * 20\n # i.layers[19] = True\n #continue\n\n if d['type'] == 'Collision':\n collision = d['data']\n ct = collision['type']\n if ct == 'Sphere':\n #FIXME: collision['mode'] should be \"Manual\"\n x,y,z = collision['position']\n radius = collision['radius']\n #FIXME: Create a new sphere object\n #for i in imported:\n # i.layers[0] = False\n # i.layers[1] = True\n #continue\n #imported = \n\n bpy.ops.mesh.primitive_ico_sphere_add(subdivisions=2, size=f * radius, view_align=False, enter_editmode=False, location=(f * x, f * y, f * z))\n bpy.ops.object.shade_smooth()\n imported = bpy.context.selected_objects\n\n # FIXME: Set physics type to sphere\n\n elif ct == 'Mesh':\n imported = []\n for mesh in collision['data']:\n imported.extend(importMesh(mesh['resource'], mesh['index']))\n print(\"Imported append: \" + str(imported))\n else:\n print(\"Damnit! Unsupported collision type: '\" + ct + \"'\")\n imported = None\n\n if imported:\n print(key + \" => \" + str(imported))\n dilPlace(imported, obj)\n for i in imported:\n i.layers = layer(l + 10)\n i.hide_render = True\n i.draw_type = 'SOLID'\n if l >= 5:\n i.game.physics_type = 'NO_COLLISION'\n \n\n if d['type'] == 'Models':\n for model in d['data']: \n\n imported = importMesh(model['resource'], model['index'])\n for i in imported:\n i.layers = layer(l)\n i.game.physics_type = 'NO_COLLISION'\n\n for i in imported:\n for m in i.material_slots:\n print(\"Slot: \" + str(m))\n m.material.game_settings.alpha_blend = 'ALPHA'\n #FIXME: Only do this if the linked texture has alpha.. otherwise this breaks >.< \n\n #meshes = [c for c in imported if c.type == 'MESH']\n #for m in meshes:\n # m.scale *= 0.001\n\n dilPlace(imported, obj)\n\n \n\n# Resize the entire scene\n# FIXME: Should be done while creating the scene or by setting a different scale of the scene\n\n# FIXME: Set material to alpha\n\n# Only show visuals\nscene.layers = [True] * 10 + [False] * 10\n\n#print(\"dir: \" + str(scene.game_settings.gravity))\n#print(\"used: \" + str(scene.use_gravity))\n#scene.game_settings.physics_gravity = (0.0, -0.5, -10.0) # Bugs out!\n\nif False:\n cams = [c for c in context.scene.objects if c.type == 'CAMERA']\n for c in cams:\n context.scene.camera = c \n print(\"Render \", model_path, context.scene.name, c.name)\n context.scene.render.filepath = \"somepathmadeupfrommodelname\"\n bpy.ops.render.render(write_still=True)\n",
"id": "9046258",
"language": "Python",
"matching_score": 2.294398784637451,
"max_stars_count": 9,
"path": "scripts/blender.py"
},
{
"content": "# Copyright (c) 2011 The LibYuv project authors. All Rights Reserved.\n#\n# Use of this source code is governed by a BSD-style license\n# that can be found in the LICENSE file in the root of the source\n# tree. An additional intellectual property rights grant can be found\n# in the file PATENTS. All contributing project authors may\n# be found in the AUTHORS file in the root of the source tree.\n\n{\n 'targets': [\n {\n 'target_name': 'libyuv_unittest',\n 'type': 'executable',\n 'dependencies': [\n 'libyuv.gyp:libyuv',\n # The tests are based on gtest\n 'testing/gtest.gyp:gtest',\n 'testing/gtest.gyp:gtest_main',\n ],\n 'sources': [\n # headers\n 'unit_test/unit_test.h',\n\n # sources\n 'unit_test/compare_test.cc',\n 'unit_test/rotate_test.cc',\n 'unit_test/scale_test.cc',\n 'unit_test/unit_test.cc',\n ],\n 'conditions': [\n ['OS==\"linux\"', {\n 'cflags': [\n '-fexceptions',\n ],\n }],\n ], # conditions\n },\n ], # targets\n}\n\n# Local Variables:\n# tab-width:2\n# indent-tabs-mode:nil\n# End:\n# vim: set expandtab tabstop=2 shiftwidth=2:\n",
"id": "10874060",
"language": "Python",
"matching_score": 0.15801683068275452,
"max_stars_count": 0,
"path": "libyuv_test.gyp"
}
] | 1.648088 |
LUke8544 | [
{
"content": "def 打印(a):\r\n print (a)\r\n\r\ndef 系统(a):\r\n import os\r\n os.system(a)\r\n",
"id": "234409",
"language": "Python",
"matching_score": 0,
"max_stars_count": 0,
"path": "__init__.py"
}
] | 0 |
Stepwisepl | [
{
"content": "from google.cloud import storage\nfrom google.cloud import translate_v2\n\nLANGUAGES_TRANSLATE_TO = {'PL', 'NO', 'ES', 'FR'}\nTMP_INPUT_FILE = '/tmp/input_file'\nTMP_OUTPUT_FILE = '/tmp/output_file'\nFILE_SUFFIX_TO_TRANSLATE = 'to_translate'\n\nstorage_client = storage.Client()\ntranslate_client = translate_v2.Client()\n\n\ndef translateFile(file_data, context):\n file_name: str = file_data['name']\n bucket_name = file_data['bucket']\n\n if not file_name.endswith(FILE_SUFFIX_TO_TRANSLATE):\n print(f'File not ending with \"{FILE_SUFFIX_TO_TRANSLATE}\" suffix: [{file_name}], ignoring.')\n return\n\n bucket = storage_client.bucket(bucket_name)\n # download file to /tmp (the only dir we have access to on google cloud functions)\n bucket.blob(file_name).download_to_filename(TMP_INPUT_FILE)\n\n #read file content into input_str\n input_str = open(TMP_INPUT_FILE).read()\n\n #write translated output into local /tmp\n with open(TMP_OUTPUT_FILE, 'w') as file:\n file.write(\"%s\\n\" % f'EN(original):\\n{input_str}')\n for language in LANGUAGES_TRANSLATE_TO:\n translated = translate_client.translate(\n input_str, target_language=language)\n file.write(\"%s\\n\" % f'{language}:\\n{translated[\"translatedText\"]}')\n\n # upload local output from /tmp into bucket\n output_file_name = f'{file_name.rpartition(FILE_SUFFIX_TO_TRANSLATE)[0]}_translated'\n bucket.blob(output_file_name).upload_from_filename(TMP_OUTPUT_FILE)\n\n\n# for local dev purposes\n# bucketTestData = {\n# \"bucket\":\"gcp-cloud-function-translator\",\n# \"name\":\"test_to_translate\"\n# }\n# translateFile(bucketTestData, None)",
"id": "8140363",
"language": "Python",
"matching_score": 0,
"max_stars_count": 0,
"path": "main.py"
}
] | 0 |
mohaque0 | [
{
"content": "from conans import ConanFile, CMake\nimport os\nimport shutil\n\ndef get_requirements():\n L = \"${conan.requires}\"\n if (L == None or L == \"\"):\n return None\n return tuple(L.split(\",\"))\n\nclass GenericConan(ConanFile):\n name = \"${conan.name}\"\n version = \"${conan.version}\"\n url = \"none\"\n description = \"${conan.description}\"\n settings = \"os\", \"compiler\", \"build_type\", \"arch\"\n options = {\"shared\": [True, False]}\n default_options = \"shared=False\"\n generators = \"cmake\"\n exports_sources = [\"CMakeLists.txt\", \"src/*\"]\n requires = get_requirements()\n\n # We do this in build because \"source\" is only executed by conan once forever.\n def copy_source_files_from(self, module_path):\n dst_dir = os.getcwd()\n\n print \"Copying source from %s to %s\" % (module_path, dst_dir)\n for name in os.listdir(module_path):\n srcpath = os.path.join(module_path, name)\n dstpath = os.path.join(dst_dir, name)\n\n if srcpath.endswith(\"conanfile.py\"):\n continue\n\n if os.path.exists(dstpath):\n if os.path.isdir(dstpath):\n shutil.rmtree(dstpath)\n else:\n os.remove(dstpath)\n\n if os.path.isfile(srcpath):\n shutil.copy(srcpath, dstpath)\n else:\n shutil.copytree(srcpath, dstpath)\n\n def generate_filelist(self):\n print \"Generating filelist.txt in \" + os.getcwd() + \" from \" + self.source_folder\n f = open(\"filelist.txt\", \"w\")\n\n for (path, dirs, files) in os.walk(os.path.join(self.source_folder, \"src\")):\n for filename in files:\n f.write(os.path.join(path, filename).replace(\"\\\\\", \"/\") + \"\\n\")\n\n f.close()\n\n def build(self):\n module_path = \"${conan.module_path}\"\n\n print \"Module: name=%s, version=%s, license=%s\" % (self.name, self.version, self.license)\n print \"Module Folder: \", module_path\n print \"Source Folder: \", self.source_folder\n print \"Current Folder: \", os.getcwd()\n print \"Requirements:\\n\", self.requires\n\n self.copy_source_files_from(module_path)\n self.generate_filelist()\n\n cmake = CMake(self)\n cmake.configure()\n cmake.build()\n\n # Explicit way:\n # self.run('cmake %s/hello %s'\n # % (self.source_folder, cmake.command_line))\n # self.run(\"cmake --build . %s\" % cmake.build_config)\n\n def package_info(self):\n if \"${conan.artifact_type}\" == \"lib\":\n self.cpp_info.libs=[\"${conan.artifact_name}\"]\n\n def package(self):\n self.copy(\"*.h\", dst=\"include\", src=\"src\")\n self.copy(\"*.hh\", dst=\"include\", src=\"src\")\n self.copy(\"*.hpp\", dst=\"include\", src=\"src\")\n self.copy(\"*.lib\", dst=\"lib\", keep_path=False)\n self.copy(\"*.dll\", dst=\"bin\", keep_path=False)\n self.copy(\"*.dylib*\", dst=\"lib\", keep_path=False)\n self.copy(\"*.so\", dst=\"lib\", keep_path=False, symlinks=True)\n self.copy(\"*.a\", dst=\"lib\", keep_path=False)\n\n",
"id": "12354568",
"language": "Python",
"matching_score": 4.3981547355651855,
"max_stars_count": 0,
"path": "plugin/src/scripts/conan/conanfile.py"
},
{
"content": "from conans import ConanFile, CMake, tools\nimport os\n\n\nclass UtilConan(ConanFile):\n name = \"Util\"\n version = \"0.1\"\n license = \"MIT\"\n url = \"<EMAIL>\"\n description = \"C++ Utility Library\"\n settings = {\"os\": None, \"compiler\": None, \"build_type\": None, \"arch\": [\"x86_64\"]}\n options = {\"shared\": [False]}\n default_options = \"shared=False\"\n generators = [\"cmake\", \"txt\"]\n exports_sources=[\"src/*\", \"CMakeLists.txt\", \"cmake/*\"]\n\n def generate_filelist(self):\n print(\"Generating filelist.txt\")\n f = open(\"filelist.txt\", \"w\")\n\n for (path, dirs, files) in os.walk(\"src\"):\n for filename in files:\n f.write(os.path.join(path, filename).replace(\"\\\\\", \"/\") + \"\\n\")\n\n f.close()\t\n\n def build(self):\n self.generate_filelist()\n\n cmake = CMake(self)\n cmake.configure()\n cmake.build()\n\n def package(self):\n self.copy(\"*.h\", dst=\"include\", src=\"src\")\n self.copy(\"*.hpp\", dst=\"include\", src=\"src\")\n self.copy(\"*.a\", dst=\"lib\", keep_path=False)\n self.copy(\"*.lib\", dst=\"lib\", keep_path=False)\n self.copy(\"*.dll\", dst=\"bin\", keep_path=False)\n self.copy(\"*.so\", dst=\"bin\", keep_path=False)\n self.copy(\"*.dylib\", dst=\"bin\", keep_path=False)\n\n def package_info(self):\n self.cpp_info.libs = [\"Util\"]\n",
"id": "11111159",
"language": "Python",
"matching_score": 2.193930149078369,
"max_stars_count": 0,
"path": "conanfile.py"
},
{
"content": "from conans import ConanFile, CMake, tools\nimport os\n\n\nclass TestUtilConan(ConanFile):\n settings = {\"os\": None, \"compiler\": None, \"build_type\": None, \"arch\": [\"x86_64\"]}\n generators = [\"cmake\", \"txt\"]\n requires = \"catch2/2.2.2@bincrafters/stable\"\n\n def generate_testlist(self):\n print(\"Working Directory: \" + os.getcwd())\n print(\"Source Folder: \" + self.source_folder)\n print(\"Generating testlist.txt in \" + os.getcwd() + \" from \" + self.source_folder)\n f = open(\"testlist.txt\", \"w\")\n\n for (path, dirs, files) in os.walk(self.source_folder):\n if (path.startswith(\"build\") or path.startswith(\"./build\") or \"CMakeFiles\" in path):\n continue\n\n for filename in files:\n if (filename.endswith(\"cpp\") or filename.endswith(\"hpp\")):\n f.write(os.path.join(path, filename).replace(\"\\\\\", \"/\") + \"\\n\")\n\n f.close()\n\n def build(self):\n self.generate_testlist()\n\n cmake = CMake(self)\n cmake.configure()\n cmake.build()\n\n def imports(self):\n self.copy(\"*.dll\", dst=\"bin\", src=\"bin\")\n self.copy(\"*.dylib*\", dst=\"bin\", src=\"lib\")\n self.copy('*.so*', dst='bin', src='lib')\n\n def test(self):\n if not tools.cross_building(self.settings):\n os.chdir(\"bin\")\n self.run(\".%stests\" % os.sep)\n",
"id": "1335991",
"language": "Python",
"matching_score": 0.297473669052124,
"max_stars_count": 0,
"path": "test/conanfile.py"
},
{
"content": "#!/usr/bin/env python\n\nimport sys\nimport os\nimport re\n\nTARGET_HEADER_FILE = \"build/includes/cradle.hpp\" \nPRAGMA_ONCE_MACRO = \"#pragma once\"\nINCLUDE_SEARCH_PATH = \"includes\"\nINCLUDE_REGEX_STRING = r'#include\\s+[\"<]([^\"\\n]*)[\">]'\n\nclass HeaderFile:\n def __init__(self, path):\n self.path = path\n self.processed = False\n\n def get_path(self):\n return self.path\n\n def mark_processed(self):\n self.processed = True\n\n def is_processed(self):\n return self.processed\n\ndef create_context():\n headers = {}\n\n for dirname, subdirs, files in os.walk(INCLUDE_SEARCH_PATH):\n for filename in files:\n header_file_path = os.path.join(dirname, filename)\n key = header_file_path[len(INCLUDE_SEARCH_PATH + \"/\"):]\n key = key.replace(\"\\\\\", \"/\")\n\n headers[key] = HeaderFile(header_file_path)\n\n return headers\n\ndef build_helper(target, context, headerFile):\n \"\"\"Process file at path source as a header. Resolves its includes against context and outputs to target.\"\"\"\n\n # Don't reprocess a file that has been processed.\n if headerFile.processed:\n return\n else:\n headerFile.mark_processed()\n\n f = open(headerFile.get_path())\n L = f.readlines()\n f.close()\n\n m = re.compile(INCLUDE_REGEX_STRING)\n\n for line in L:\n include = m.match(line)\n if include and include.group(1) in context:\n # Recursively handle includes we know about.\n build_helper(target, context, context[include.group(1)])\n elif line.startswith(PRAGMA_ONCE_MACRO):\n pass\n else:\n target.write(line)\n\ndef build(context, targetfile):\n print(\"Writing unified header to:\", targetfile)\n\n # Make the parent directory.\n target_parent_dir = os.path.dirname(targetfile)\n if not os.path.exists(target_parent_dir):\n os.makedirs(target_parent_dir)\n\n target = open(targetfile, \"w\")\n\n for key in context:\n build_helper(target, context, context[key])\n\n target.close()\n\ndef main(args):\n context = create_context()\n\n print(\"Creating unified header from:\")\n for header in sorted(context.keys()):\n print('\\t', header)\n print()\n\n build(context, TARGET_HEADER_FILE)\n \nif __name__==\"__main__\":\n main(sys.argv)\n",
"id": "7702758",
"language": "Python",
"matching_score": 0.22307690978050232,
"max_stars_count": 0,
"path": "compile.py"
}
] | 1.245702 |
MidAtlanticPortal | [
{
"content": "from django.shortcuts import get_object_or_404, render_to_response\r\nfrom django.template import RequestContext\r\nfrom data_manager.models import *\r\nfrom portal.base.models import PortalImage\r\n\r\n# hack for POR-224, until POR-206\r\ndef wagtail_feature_image(self):\r\n image = PortalImage.objects.filter(tags__name__in=[\"theme\"]).filter(tags__name__in=[self.name]).first()\r\n return image or None\r\n\r\nTheme.wagtail_feature_image = wagtail_feature_image\r\n\r\ndef theme_query():\r\n return Theme.objects.filter(visible=True).exclude(name='companion').extra(\r\n select={\r\n 'layer_count': \"SELECT COUNT(*) FROM data_manager_layer_themes as mm LEFT JOIN data_manager_layer as l ON mm.layer_id = l.id WHERE mm.theme_id = data_manager_theme.id AND l.layer_type != 'placeholder'\"\r\n }\r\n ).order_by('order')\r\n\r\ndef theme(request, theme_slug):\r\n from django.contrib.sites.shortcuts import get_current_site\r\n site = get_current_site(request)\r\n theme = get_object_or_404(theme_query(), name=theme_slug)\r\n template = 'data_catalog/theme.html'\r\n # layers = [x.dictCache(site.pk) for x in theme.layer_set.all().exclude(layer_type='placeholder').exclude(is_sublayer=True).order_by('order')]\r\n layers = []\r\n for layer in theme.layer_set.all().exclude(layer_type='placeholder').exclude(is_sublayer=True).order_by('name'):\r\n layers.append(layer.shortDict(site.pk))\r\n\r\n return render_to_response(\r\n template,\r\n {\r\n 'theme': theme,\r\n 'layers': layers,\r\n },\r\n context_instance=RequestContext(request)\r\n );\r\n",
"id": "10079048",
"language": "Python",
"matching_score": 1.7634152173995972,
"max_stars_count": 4,
"path": "marco/portal/data_catalog/views.py"
},
{
"content": "import os\n\nfrom django.conf.urls import patterns, include, url\nfrom django.conf.urls.static import static\nfrom django.conf import settings\nfrom django.contrib import admin\n\nfrom django.views.generic.base import RedirectView, TemplateView\n\nfrom wagtail.wagtailadmin import urls as wagtailadmin_urls\nfrom wagtail.wagtailsearch import urls as wagtailsearch_urls\nfrom wagtail.wagtaildocs import urls as wagtaildocs_urls\nfrom wagtail.wagtailcore import urls as wagtail_urls\nfrom wagtail.contrib.wagtailsitemaps.views import sitemap\nfrom wagtail.wagtailimages import urls as wagtailimages_urls\n\nimport mapgroups.urls\nimport accounts.urls\nimport explore.urls\n\nadmin.autodiscover()\n\n\n# Register search signal handlers\nfrom wagtail.wagtailsearch.signal_handlers import register_signal_handlers as wagtailsearch_register_signal_handlers\nwagtailsearch_register_signal_handlers()\n\n\nurlpatterns = patterns('',\n url('^sitemap\\.xml$', sitemap),\n\n url(r'^django-admin/', include(admin.site.urls)),\n\n url(r'^rpc$', 'rpc4django.views.serve_rpc_request'),\n\n # https://github.com/omab/python-social-auth/issues/399\n # I want the psa urls to be inside the account urls, but PSA doesn't allow\n # nested namespaces. It will likely be fixed in 0.22\n url('^account/auth/', include('social_django.urls', namespace='social')),\n url(r'^account/', include(accounts.urls.urls(namespace='account'))),\n url(r'^collaborate/groups/', include(mapgroups.urls.urls(namespace='groups'))),\n url(r'^groups/', include(mapgroups.urls.urls(namespace='groups'))),\n url(r'^g/', RedirectView.as_view(url='/groups/')), # 301\n\n url(r'^admin/', include(wagtailadmin_urls)),\n url(r'^search/', 'portal.base.views.search'),\n url(r'^documents/', include(wagtaildocs_urls)),\n\n # url(r'^data-catalog/', include('portal.data_catalog.urls')),\n url(r'^data-catalog/([A-Za-z0-9_-]+)/$', 'portal.data_catalog.views.theme'),\n url(r'^data-catalog/[A-Za-z0-9_-]*/', include('explore.urls')),\n url(r'^data_manager/', include('data_manager.urls')),\n url(r'^styleguide/$', 'marco_site.views.styleguide', name='styleguide'),\n url(r'^planner/', include('visualize.urls')),\n url(r'^embed/', include('visualize.urls')),\n url(r'^visualize/', include('visualize.urls')),\n url(r'^features/', include('features.urls')),\n url(r'^scenario/', include('scenarios.urls')),\n url(r'^drawing/', include('drawing.urls')),\n url(r'^proxy/', include('mp_proxy.urls')),\n\n url(r'^join/', RedirectView.as_view(url='/account/register/')),\n\n url(r'^images/', include(wagtailimages_urls)),\n url(r'', include(wagtail_urls)),\n)\n\n\nif settings.DEBUG:\n from django.contrib.staticfiles.urls import staticfiles_urlpatterns\n\n urlpatterns += staticfiles_urlpatterns()\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n",
"id": "5628032",
"language": "Python",
"matching_score": 0.39671891927719116,
"max_stars_count": 4,
"path": "marco/marco/urls.py"
},
{
"content": "from itertools import izip_longest\nimport json\nfrom data_manager.models import Layer\n\ntry:\n import urlparse as parse\nexcept ImportError:\n from urllib import parse\n\nfrom django.db import models\nfrom django.core.exceptions import ValidationError\n\nfrom wagtail.wagtailcore.models import Orderable\nfrom wagtail.wagtailcore.fields import RichTextField\nfrom wagtail.wagtailsearch import index\nfrom wagtail.wagtailadmin.edit_handlers import FieldPanel,InlinePanel,MultiFieldPanel\nfrom modelcluster.fields import ParentalKey\n\nfrom portal.base.models import PageBase, DetailPageBase, MediaItem\n\ndef grouper(iterable, n, fillvalue=None):\n \"\"\"Collect data into fixed-length chunks or blocks.\n See: https://docs.python.org/2/library/itertools.html#recipes\n \"\"\"\n # grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx\n args = [iter(iterable)] * n\n return izip_longest(fillvalue=fillvalue, *args)\n\n# The abstract model for ocean story sections, complete with panels\nclass OceanStorySectionBase(MediaItem):\n title = models.CharField(max_length=255, blank=True)\n body = RichTextField(blank=True)\n map_state = models.TextField()\n map_legend = models.BooleanField(default=False, help_text=(\"Check to \"\n \"display the map's legend to the right of the the section text.\"))\n\n panels = [\n FieldPanel('title'),\n MultiFieldPanel(MediaItem.panels, \"media\"),\n FieldPanel('body', classname=\"full\"),\n FieldPanel('map_state'),\n FieldPanel('map_legend'),\n ]\n\n index_fields = MediaItem.index_fields + (\n 'title',\n 'body',\n )\n\n class Meta:\n abstract = True\n\n def parsed_map_state(self):\n if not self.map_state.startswith(\"http\"):\n return json.loads(self.map_state)\n\n o = parse.urlparse(self.map_state)\n data_layers = {}\n params = parse.parse_qs(o.fragment)\n\n dls = params.pop('dls[]', [])\n\n # dls[]=[true,1,54,true,0.5,42,...] ->\n # dls[] = [(true, 1, 54), (true, 0.5, 42), ...]\n for visible, opacity, layer_id in grouper(dls, 3):\n visible = visible.lower() in ('true', '1')\n opacity = float(opacity)\n try:\n int(layer_id)\n except ValueError:\n # IDs that can't be converted to integers are features, e.g.,\n # 'drawing_aoi_13', which can't be displayed on ocean story\n # maps, so just continue.\n continue\n\n layer = Layer.objects.filter(id=layer_id)\n layer = layer.values('legend', 'show_legend', 'name', 'layer_type', 'url', 'arcgis_layers')\n\n # layer ID must be a string here\n data_layers[layer_id] = {}\n if not layer:\n continue\n layer = layer[0]\n\n data_layers[layer_id]['id'] = layer_id\n data_layers[layer_id]['name'] = layer['name']\n if layer['show_legend']:\n data_layers[layer_id]['legend'] = layer['legend']\n else:\n data_layers[layer_id]['legend'] = False\n data_layers[layer_id]['legend_source'] = 'img'\n data_layers[layer_id]['arcgis_layers'] = layer['arcgis_layers']\n if (layer['show_legend'] and (layer['legend'] == u'' or layer['legend'] == None)) and layer['layer_type'] == 'ArcRest' and '/export' in layer['url']:\n data_layers[layer_id]['legend_source'] = 'url'\n data_layers[layer_id]['legend'] = \"%s\" % layer['url'].split('/export')[0]\n\n s = {\n 'view': {\n 'center': (params.get('x', [-73.24])[0],\n params.get('y', [38.93])[0]),\n 'zoom': params.get('z', [7])[0],\n },\n 'url': self.map_state,\n 'baseLayer': params.get('basemap', ['Ocean'])[0],\n 'dataLayers': data_layers,\n }\n\n return s\n\n def clean(self):\n super(OceanStorySectionBase, self).clean()\n try:\n self.parsed_map_state()\n except Exception as e:\n raise ValidationError({'map_state': 'Invalid map state'})\n\n# The real model which combines the abstract model, an\n# Orderable helper class, and what amounts to a ForeignKey link\n# to the model we want to add sections to (OceanStory)\nclass OceanStorySection(Orderable, OceanStorySectionBase):\n page = ParentalKey('OceanStory', related_name='sections')\n\nclass OceanStories(PageBase):\n subpage_types = ['OceanStory']\n\n search_fields = (index.SearchField('description'),)\n\n def get_detail_children(self):\n return OceanStory.objects.child_of(self)\n\nclass OceanStory(DetailPageBase):\n parent_page_types = ['OceanStories']\n\n display_home_page = models.BooleanField(default=True, help_text=(\"Check to \"\n \"display this ocean story on the home page\"))\n hook = models.CharField(max_length=256, blank=True, null=True)\n explore_title = models.CharField(max_length=256, blank=True, null=True)\n explore_url = models.URLField(max_length=4096, blank=True, null=True)\n\n search_fields = DetailPageBase.search_fields + (\n index.SearchField('description'),\n index.SearchField('hook'),\n index.SearchField('get_sections_search_text'),\n )\n\n def as_json(self):\n # try:\n o = {'sections': [s.parsed_map_state() for s in self.sections.all()]}\n # except:\n # o = {'sections': []}\n return json.dumps(o)\n\n def get_siblings(self, inclusive=True):\n return self.__class__.objects.sibling_of(self, inclusive)\n\n def os_next_sibling(self):\n return self.get_next_siblings().live().filter(display_home_page=True).first() or \\\n self.get_siblings().live().filter(display_home_page=True).first()\n\n def os_prev_sibling(self):\n return self.get_prev_siblings().live().filter(display_home_page=True).first() or \\\n self.get_siblings().live().filter(display_home_page=True).last()\n\n def is_first_sibling(self):\n return len(self.get_prev_siblings().live().filter(display_home_page=True)) == 0\n\n\nOceanStory.content_panels = DetailPageBase.content_panels + [\n FieldPanel('display_home_page'),\n MultiFieldPanel([FieldPanel('hook'), FieldPanel('explore_title'), FieldPanel('explore_url')], \"Map overlay\"),\n InlinePanel('sections', label=\"Sections\" ),\n]\n",
"id": "10703663",
"language": "Python",
"matching_score": 5.648272514343262,
"max_stars_count": 4,
"path": "marco/portal/ocean_stories/models.py"
},
{
"content": "from django.db import models\nfrom django.dispatch.dispatcher import receiver\nfrom django.db.models.signals import pre_delete\nfrom django.core.exceptions import ValidationError\nfrom django.utils.safestring import mark_safe\n\nfrom wagtail.wagtailcore.models import Page\nfrom wagtail.wagtailcore.fields import RichTextField\nfrom wagtail.wagtailsearch import index\nfrom wagtail.wagtailadmin.edit_handlers import FieldPanel,MultiFieldPanel\nfrom wagtail.wagtailimages.edit_handlers import ImageChooserPanel\nfrom wagtail.wagtailimages.models import AbstractImage, AbstractRendition, Image\n\n\n# Portal defines its own custom image class to replace wagtailimages.Image,\n# providing various additional data fields\n# see https://github.com/torchbox/verdant-rca/blob/staging/django-verdant/rca/models.py\nclass PortalImage(AbstractImage):\n creator = models.CharField(max_length=255, blank=True)\n creator_URL = models.URLField(blank=True)\n\n search_fields = [x for x in AbstractImage.search_fields] + [\n index.SearchField('creator'),\n ]\n\n admin_form_fields = Image.admin_form_fields + (\n 'creator',\n 'creator_URL'\n )\n\n @classmethod\n def creatable_subpage_models(cls):\n import ipdb; ipdb.set_trace()\n print(cls)\n\n# Receive the pre_delete signal and delete the file associated with the model instance.\n@receiver(pre_delete, sender=PortalImage)\ndef image_delete(sender, instance, **kwargs):\n # Pass false so FileField doesn't save the model.\n instance.file.delete(False)\n\nclass PortalRendition(AbstractRendition):\n image = models.ForeignKey(PortalImage, related_name='renditions')\n\n class Meta:\n unique_together = (\n ('image', 'filter', 'focal_point_key'),\n )\n\n# Receive the pre_delete signal and delete the file associated with the model instance.\n@receiver(pre_delete, sender=PortalRendition)\ndef rendition_delete(sender, instance, **kwargs):\n # Pass false so FileField doesn't save the model.\n instance.file.delete(False)\n\nclass PageSection(models.Model):\n class Meta:\n abstract = True\n\n index_fields = ()\n\n def get_search_text(self):\n return '\\n'.join(getattr(self, field) for field in self.index_fields)\n\n\nclass MediaItem(PageSection):\n media_position_choices = (\n ('left','left'),\n ('right','right'),\n ('full','full'),\n )\n media_image = models.ForeignKey(\n 'base.PortalImage',\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n related_name='+'\n )\n media_embed_url = models.URLField(blank=True, help_text=(mark_safe(\"The URL to a \"\n \"video that you'd like to embed, e.g., https://vimeo.com/121095661.\")))\n media_caption = models.CharField(max_length=255, blank=True)\n media_position = models.CharField(max_length=8, choices=media_position_choices, default=media_position_choices[0][0])\n\n index_fields = PageSection.index_fields + (\n 'media_caption',\n )\n\n panels = [\n ImageChooserPanel('media_image'),\n FieldPanel('media_embed_url'),\n FieldPanel('media_caption'),\n FieldPanel('media_position'),\n ]\n\n class Meta:\n abstract = True\n\n def clean(self):\n if self.media_image is not None and self.media_embed_url != '':\n raise ValidationError({'media_image': '', 'media_embed_url': 'Provide either an image or an embed URL, but not both.'})\n\nclass PageBase(Page):\n is_abstract = True\n class Meta:\n abstract = True\n\n description = RichTextField(blank=True, null=True)\n search_fields = [x for x in Page.search_fields] + [ # Inherit search_fields from Page\n index.SearchField('description'),\n ]\n\n def get_sections_search_text(self):\n return '\\n'.join(section.get_search_text() for section in self.sections.all())\n\n content_panels = [\n MultiFieldPanel([\n FieldPanel('title', classname=\"title\"),\n FieldPanel('description'),\n ], 'Page')\n ]\n\n def portal_next_sibling(self):\n return self.get_next_siblings().live().first() or self.get_siblings().live().first()\n\n def portal_prev_sibling(self):\n return self.get_prev_siblings().live().first() or self.get_siblings().live().last()\n\nclass DetailPageBase(PageBase):\n is_abstract = True\n class Meta:\n abstract = True\n\n feature_image = models.ForeignKey(\n PortalImage,\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n related_name='+'\n )\n\n search_fields = (index.SearchField('description'),)\n\n subpage_types = []\n content_panels = PageBase.content_panels + [\n MultiFieldPanel([\n ImageChooserPanel('feature_image'),\n ], 'Detail')\n ]\n",
"id": "605261",
"language": "Python",
"matching_score": 3.7977452278137207,
"max_stars_count": 4,
"path": "marco/portal/base/models.py"
},
{
"content": "from django.db import models\n\nfrom wagtail.wagtailcore.fields import RichTextField\nfrom wagtail.wagtailadmin.edit_handlers import FieldPanel\nfrom wagtail.wagtailsearch import index\n\nfrom portal.base.models import PageBase\n\nclass Page(PageBase):\n\n body = RichTextField()\n search_fields = PageBase.search_fields + [\n index.SearchField('body'),\n ]\n content_panels = PageBase.content_panels + [\n FieldPanel('body', classname=\"full\"),\n ]\n",
"id": "4126631",
"language": "Python",
"matching_score": 2.068042516708374,
"max_stars_count": 4,
"path": "marco/portal/pages/models.py"
}
] | 2.068043 |
wodutoit | [
{
"content": "import logging\nimport azure.functions as func\n\ndef main(req: func.HttpRequest, connectionInfoJson: str) -> func.HttpResponse:\n return func.HttpResponse(\n connectionInfoJson,\n status_code=200,\n headers={\n 'Content-type': 'application/json'\n }\n )\n",
"id": "6773014",
"language": "Python",
"matching_score": 0,
"max_stars_count": 1,
"path": "src/chat/python/negotiate/__init__.py"
}
] | 0 |
Oeaps | [
{
"content": "\"\"\"\nutilities for model management in tf/keras\n\"\"\"\n\n# python imports\nimport itertools\nfrom tempfile import NamedTemporaryFile\n\n# third party imports\nimport numpy as np\nfrom tqdm import tqdm_notebook as tqdm\nimport tensorflow as tf\nfrom tensorflow import keras\nimport tensorflow.keras.backend as K\nimport tensorflow.keras.utils\n\n\ndef stack_models(models, connecting_node_ids=None):\n \"\"\"\n stacks keras models sequentially without nesting the models into layers\n (the nominal behaviour in keras as of 1/13/2018 is to nest models)\n This preserves the layers (i.e. does not copy layers). This means that if you modify the\n original layer weights, you are automatically affecting the new stacked model.\n\n Parameters:\n models: a list of models, in order of: [input_model, second_model, ..., final_output_model]\n connecting_node_ids (optional): a list of connecting node pointers from Nth model to N+1th model\n\n Returns:\n new stacked model pointer\n \"\"\"\n\n output_tensors = models[0].outputs\n stacked_inputs = [*models[0].inputs]\n\n # go through models 1 onwards and stack with current graph\n for mi in range(1, len(models)):\n \n # prepare input nodes - a combination of \n new_input_nodes = list(models[mi].inputs)\n stacked_inputs_contrib = list(models[mi].inputs)\n\n if connecting_node_ids is None: \n conn_id = list(range(len(new_input_nodes)))\n assert len(new_input_nodes) == len(models[mi-1].outputs), \\\n 'argument count does not match'\n else:\n conn_id = connecting_node_ids[mi-1]\n\n for out_idx, ii in enumerate(conn_id):\n new_input_nodes[ii] = output_tensors[out_idx]\n stacked_inputs_contrib[ii] = None\n \n output_tensors = mod_submodel(models[mi], new_input_nodes=new_input_nodes)\n stacked_inputs = stacked_inputs + stacked_inputs_contrib\n\n stacked_inputs_ = [i for i in stacked_inputs if i is not None]\n # check for unique, but keep order:\n stacked_inputs = []\n for inp in stacked_inputs_:\n if inp not in stacked_inputs:\n stacked_inputs.append(inp)\n new_model = keras.models.Model(stacked_inputs, output_tensors)\n return new_model\n\n\ndef mod_submodel(orig_model,\n new_input_nodes=None,\n input_layers=None):\n \"\"\"\n modify (cut and/or stitch) keras submodel\n\n layer objects themselved will be untouched - the new model, even if it includes, \n say, a subset of the previous layers, those layer objects will be shared with\n the original model\n\n given an original model:\n model stitching: given new input node(s), get output tensors of having pushed these \n nodes through the model\n \n model cutting: given input layer (pointers) inside the model, the new input nodes\n will match the new input layers, hence allowing cutting the model\n\n Parameters:\n orig_model: original keras model pointer\n new_input_nodes: a pointer to a new input node replacement\n input_layers: the name of the layer in the original model to replace input nodes\n \n Returns:\n pointer to modified model\n \"\"\"\n\n def _layer_dependency_dict(orig_model):\n \"\"\"\n output: a dictionary of all layers in the orig_model\n for each layer:\n dct[layer] is a list of lists of layers.\n \"\"\"\n\n if hasattr(orig_model, 'output_layers'):\n out_layers = orig_model.output_layers\n out_node_idx = orig_model.output_layers_node_indices\n node_list = [ol._inbound_nodes[out_node_idx[i]] for i, ol in enumerate(out_layers)]\n\n else:\n out_layers = orig_model._output_layers\n \n node_list = []\n for i, ol in enumerate(orig_model._output_layers):\n node_list += ol._inbound_nodes\n node_list = list(set(node_list ))\n \n dct = {}\n dct_node_idx = {}\n while len(node_list) > 0:\n node = node_list.pop(0)\n node_input_layers = node.inbound_layers\n node_indices = node.node_indices\n if not isinstance(node_input_layers, (list, tuple)):\n node_input_layers = [node_input_layers]\n node_indices = [node_indices]\n \n add = True\n # if not empty. we need to check that we're not adding the same layers through the same node.\n if len(dct.setdefault(node.outbound_layer, [])) > 0:\n for li, layers in enumerate(dct[node.outbound_layer]):\n if layers == node.inbound_layers and \\\n dct_node_idx[node.outbound_layer][li] == node_indices:\n add = False\n break\n if add:\n dct[node.outbound_layer].append(node_input_layers)\n dct_node_idx.setdefault(node.outbound_layer, []).append(node_indices)\n # append is in place\n\n # add new node\n \n for li, layer in enumerate(node_input_layers):\n if hasattr(layer, '_inbound_nodes'):\n node_list.append(layer._inbound_nodes[node_indices[li]])\n \n return dct\n\n def _get_new_layer_output(layer, new_layer_outputs, inp_layers):\n \"\"\"\n (recursive) given a layer, get new outbound_nodes based on new inbound_nodes\n\n new_layer_outputs is a (reference) dictionary that we will be adding\n to within the recursion stack.\n \"\"\"\n\n if layer not in new_layer_outputs:\n\n if layer not in inp_layers:\n raise Exception('layer %s is not in inp_layers' % layer.name)\n\n # for all input layers to this layer, gather their output (our input)\n for group in inp_layers[layer]:\n input_nodes = [None] * len(group)\n for li, inp_layer in enumerate(group):\n if inp_layer in new_layer_outputs:\n input_nodes[li] = new_layer_outputs[inp_layer]\n else: # recursive call\n input_nodes[li] = _get_new_layer_output(inp_layer, new_layer_outputs, inp_layers)\n\n # layer call\n if len(input_nodes) == 1:\n new_layer_outputs[layer] = layer(*input_nodes)\n else:\n new_layer_outputs[layer] = layer(input_nodes)\n\n return new_layer_outputs[layer]\n\n\n\n # for each layer create list of input layers\n inp_layers = _layer_dependency_dict(orig_model)\n\n # get input layers\n # These layers will be 'ignored' in that they will not be called!\n # instead, the outbound nodes of the layers will be the input nodes\n # computed below or passed in\n if input_layers is None: # if none provided, search for them\n # InputLayerClass = keras.engine.topology.InputLayer\n InputLayerClass = type(tf.keras.layers.InputLayer())\n input_layers = [l for l in orig_model.layers if isinstance(l, InputLayerClass)]\n\n else:\n if not isinstance(input_layers, (tuple, list)):\n input_layers = [input_layers]\n for idx, input_layer in enumerate(input_layers):\n # if it's a string, assume it's layer name, and get the layer pointer\n if isinstance(input_layer, str):\n input_layers[idx] = orig_model.get_layer(input_layer)\n\n # process new input nodes\n if new_input_nodes is None:\n input_nodes = list(orig_model.inputs)\n else:\n input_nodes = new_input_nodes\n assert len(input_nodes) == len(input_layers), \\\n 'input_nodes (%d) and input_layers (%d) have to match' % (len(input_nodes), len(input_layers))\n\n # initialize dictionary of layer:new_output_node\n # note: the input layers are not called, instead their outbound nodes\n # are assumed to be the given input nodes. If we call the nodes, we can run\n # into multiple-inbound-nodes problems, or if we completely skip the layers altogether\n # we have problems with multiple inbound input layers into subsequent layers\n new_layer_outputs = {}\n for i, input_layer in enumerate(input_layers):\n new_layer_outputs[input_layer] = input_nodes[i]\n\n # recursively go back from output layers and request new input nodes\n output_layers = []\n for layer in orig_model.layers:\n if hasattr(layer, '_inbound_nodes'):\n for i in range(len(layer._inbound_nodes)):\n if layer.get_output_at(i) in orig_model.outputs:\n output_layers.append(layer)\n break\n assert len(output_layers) == len(orig_model.outputs), \"Number of output layers don't match\"\n\n outputs = [None] * len(output_layers)\n for li, output_layer in enumerate(output_layers):\n outputs[li] = _get_new_layer_output(output_layer, new_layer_outputs, inp_layers)\n\n return outputs\n\n\ndef reset_weights(model, session=None):\n \"\"\"\n reset weights of model with the appropriate initializer.\n Note: only uses \"kernel_initializer\" and \"bias_initializer\"\n does not close session.\n\n Reference:\n https://www.codementor.io/nitinsurya/how-to-re-initialize-keras-model-weights-et41zre2g\n\n Parameters:\n model: keras model to reset\n session (optional): the current session\n \"\"\"\n\n if session is None:\n session = K.get_session()\n\n for layer in model.layers: \n reset = False\n if hasattr(layer, 'kernel_initializer'):\n layer.kernel.initializer.run(session=session)\n reset = True\n \n if hasattr(layer, 'bias_initializer'):\n layer.bias.initializer.run(session=session)\n reset = True\n \n if not reset:\n print('Could not find initializer for layer %s. skipping', layer.name)\n\n\ndef copy_weights(src_model, dst_model):\n \"\"\"\n copy weights from the src keras model to the dst keras model via layer names\n\n Parameters:\n src_model: source keras model to copy from\n dst_model: destination keras model to copy to\n \"\"\"\n\n for layer in tqdm(dst_model.layers):\n try:\n wts = src_model.get_layer(layer.name).get_weights()\n layer.set_weights(wts)\n except:\n print('Could not copy weights of %s' % layer.name)\n continue\n\n\ndef robust_multi_gpu(model, gpus, verbose=True):\n \"\"\"\n re-work keras model for multi-gpus if number of gpus is > 1\n\n Parameters:\n model: keras Model\n gpus: list of gpus to split to (e.g. [1, 4, 6]), or count of gpus available (e.g. 3)\n Note: if given int, assume that is the count of gpus, \n so if you want a single specific gpu, this function will not do that.\n verbose: whether to display what happened (default: True)\n \n Returns:\n keras model\n \"\"\"\n\n islist = isinstance(gpus, (list, tuple))\n if (islist and len(gpus) > 1) or (not islist and gpus > 1):\n count = gpus if not islist else len(gpus)\n print(\"Returning multi-gpu (%d) model\" % count)\n return keras.utils.multi_gpu_model(model, count)\n\n else:\n print(\"Returning keras model back (single gpu found)\")\n return model\n\n\ndef diagram(model):\n outfile = NamedTemporaryFile().name + '.png'\n tf.keras.utils.plot_model(model, to_file=outfile, show_shapes=True)\n\n from IPython.display import Image\n Image(outfile, width=100)\n",
"id": "1611304",
"language": "Python",
"matching_score": 0,
"max_stars_count": 0,
"path": "neurite/tf/utils/model.py"
},
{
"content": "\"\"\"\ntensorflow/keras utilities for the neurite project\n\nIf you use this code, please cite \n<NAME>, <NAME>, <NAME>\nAnatomical Priors in Convolutional Networks for Unsupervised Biomedical Segmentation, \nCVPR 2018\n\nor for the transformation/interpolation related functions:\n\nUnsupervised Learning for Fast Probabilistic Diffeomorphic Registration\n<NAME>, <NAME>, <NAME>, <NAME>\nMICCAI 2018.\n\nContact: adalca [at] csail [dot] mit [dot] edu\nLicense: GPLv3\n\"\"\"\n\n# python imports\nimport itertools\n\n# third party imports\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow import keras\nimport tensorflow.keras.backend as K\n\n# local imports\nimport pystrum.pynd.ndutils as nd\nimport neurite as ne\nimport neurite.py.utils\n\n\ndef interpn(vol, loc, interp_method='linear', fill_value=None):\n \"\"\"\n N-D gridded interpolation in tensorflow\n\n vol can have more dimensions than loc[i], in which case loc[i] acts as a slice \n for the first dimensions\n\n Parameters:\n vol: volume with size vol_shape or [*vol_shape, nb_features]\n loc: a N-long list of N-D Tensors (the interpolation locations) for the new grid\n each tensor has to have the same size (but not nec. same size as vol)\n or a tensor of size [*new_vol_shape, D]\n interp_method: interpolation type 'linear' (default) or 'nearest'\n fill_value: value to use for points outside the domain. If None, the nearest\n neighbors will be used (default).\n\n Returns:\n new interpolated volume of the same size as the entries in loc\n\n TODO:\n enable optional orig_grid - the original grid points.\n check out tf.contrib.resampler, only seems to work for 2D data\n \"\"\"\n \n if isinstance(loc, (list, tuple)):\n loc = tf.stack(loc, -1)\n nb_dims = loc.shape[-1]\n\n if len(vol.shape) not in [nb_dims, nb_dims+1]:\n raise Exception(\"Number of loc Tensors %d does not match volume dimension %d\"\n % (nb_dims, len(vol.shape[:-1])))\n\n if nb_dims > len(vol.shape):\n raise Exception(\"Loc dimension %d does not match volume dimension %d\"\n % (nb_dims, len(vol.shape)))\n\n if len(vol.shape) == nb_dims:\n vol = K.expand_dims(vol, -1)\n\n # flatten and float location Tensors\n if loc.dtype != tf.float32:\n loc = tf.cast(loc, 'float32')\n \n\n if isinstance(vol.shape, (tf.compat.v1.Dimension, tf.TensorShape)):\n volshape = vol.shape.as_list()\n else:\n volshape = vol.shape\n\n max_loc = [d - 1 for d in vol.get_shape().as_list()]\n\n # interpolate\n if interp_method == 'linear':\n # get floor. \n # This has to remain a tf.float32 since we will be using loc1 in a float32 op\n loc0 = tf.floor(loc)\n\n # clip values\n clipped_loc = [tf.clip_by_value(loc[...,d], 0, max_loc[d]) for d in range(nb_dims)]\n loc0lst = [tf.clip_by_value(loc0[...,d], 0, max_loc[d]) for d in range(nb_dims)]\n\n # get other end of point cube\n loc1 = [tf.clip_by_value(loc0lst[d] + 1, 0, max_loc[d]) for d in range(nb_dims)]\n locs = [[tf.cast(f, 'int32') for f in loc0lst], [tf.cast(f, 'int32') for f in loc1]]\n\n # compute the difference between the upper value and the original value\n # differences are basically 1 - (pt - floor(pt))\n # because: floor(pt) + 1 - pt = 1 + (floor(pt) - pt) = 1 - (pt - floor(pt))\n diff_loc1 = [loc1[d] - clipped_loc[d] for d in range(nb_dims)]\n diff_loc0 = [1 - d for d in diff_loc1]\n weights_loc = [diff_loc1, diff_loc0] # note reverse ordering since weights are inverse of diff.\n\n # go through all the cube corners, indexed by a ND binary vector \n # e.g. [0, 0] means this \"first\" corner in a 2-D \"cube\"\n cube_pts = list(itertools.product([0, 1], repeat=nb_dims))\n interp_vol = 0\n \n for c in cube_pts:\n \n # get nd values\n # note re: indices above volumes via https://github.com/tensorflow/tensorflow/issues/15091\n # It works on GPU because we do not perform index validation checking on GPU -- it's too\n # expensive. Instead we fill the output with zero for the corresponding value. The CPU\n # version caught the bad index and returned the appropriate error.\n subs = [locs[c[d]][d] for d in range(nb_dims)]\n\n # tf stacking is slow for large volumes, so we will use sub2ind and use single indexing.\n # indices = tf.stack(subs, axis=-1)\n # vol_val = tf.gather_nd(vol, indices)\n # faster way to gather than gather_nd, because the latter needs tf.stack which is slow :(\n idx = sub2ind2d(vol.shape[:-1], subs)\n vol_val = tf.gather(tf.reshape(vol, [-1, volshape[-1]]), idx)\n\n # get the weight of this cube_pt based on the distance\n # if c[d] is 0 --> want weight = 1 - (pt - floor[pt]) = diff_loc1\n # if c[d] is 1 --> want weight = pt - floor[pt] = diff_loc0\n wts_lst = [weights_loc[c[d]][d] for d in range(nb_dims)]\n # tf stacking is slow, we will use prod_n()\n # wlm = tf.stack(wts_lst, axis=0)\n # wt = tf.reduce_prod(wlm, axis=0)\n wt = prod_n(wts_lst)\n wt = K.expand_dims(wt, -1)\n \n # compute final weighted value for each cube corner\n interp_vol += wt * vol_val\n \n else:\n assert interp_method == 'nearest', 'method should be linear or nearest, got: %s' % interp_method\n roundloc = tf.cast(tf.round(loc), 'int32')\n roundloc = [tf.clip_by_value(roundloc[...,d], 0, max_loc[d]) for d in range(nb_dims)]\n\n # get values\n # tf stacking is slow. replace with gather\n # roundloc = tf.stack(roundloc, axis=-1)\n # interp_vol = tf.gather_nd(vol, roundloc)\n idx = sub2ind2d(vol.shape[:-1], roundloc)\n interp_vol = tf.gather(tf.reshape(vol, [-1, vol.shape[-1]]), idx) \n\n if fill_value is not None:\n out_type = interp_vol.dtype\n fill_value = tf.constant(fill_value, dtype=out_type)\n below = [tf.less(loc[...,d], 0) for d in range(nb_dims)]\n above = [tf.greater(loc[...,d], max_loc[d]) for d in range(nb_dims)]\n out_of_bounds = tf.reduce_any(tf.stack(below + above, axis=-1), axis=-1, keepdims=True)\n interp_vol *= tf.cast(tf.logical_not(out_of_bounds), dtype=out_type)\n interp_vol += tf.cast(out_of_bounds, dtype=out_type) * fill_value\n\n return interp_vol\n\n\ndef resize(vol, zoom_factor, interp_method='linear'):\n \"\"\"\n if zoom_factor is a list, it will determine the ndims, in which case vol has to be of length ndims of ndims + 1\n\n if zoom_factor is an integer, then vol must be of length ndims + 1\n\n \"\"\"\n\n if isinstance(zoom_factor, (list, tuple)):\n ndims = len(zoom_factor)\n vol_shape = vol.shape[:ndims]\n \n assert len(vol_shape) in (ndims, ndims+1), \\\n \"zoom_factor length %d does not match ndims %d\" % (len(vol_shape), ndims)\n\n else:\n vol_shape = vol.shape[:-1]\n ndims = len(vol_shape)\n zoom_factor = [zoom_factor] * ndims\n if not isinstance(vol_shape[0], int):\n vol_shape = vol_shape.as_list()\n\n new_shape = [vol_shape[f] * zoom_factor[f] for f in range(ndims)]\n new_shape = [int(f) for f in new_shape]\n\n lin = [tf.linspace(0., vol_shape[d]-1., new_shape[d]) for d in range(ndims)]\n grid = ne.utils.ndgrid(*lin)\n\n return ne.utils.interpn(vol, grid, interp_method=interp_method)\n\n\nzoom = resize\n\n\n###############################################################################\n# volumetric / axis operations\n###############################################################################\n\ndef tf_map_fn_axis(fn, elems, axis, **kwargs):\n \"\"\"\n apply tf.map_fn along a specific axis\n \n Parameters:\n fn: function to apply\n elems:\n if elems is a Tensor, axis is an int\n if elems is a list, axis is a list of same length\n axis: axis to apply along\n kwargs: other arguments for tf.map_fn\n\n \"\"\"\n \n # determine lists\n islist = isinstance(elems, (tuple, list))\n if not islist:\n elems = [elems]\n assert not isinstance(axis, (tuple, list)), 'axis cannot be list if elements are not list'\n axis = [axis]\n \n \n elems_perm = []\n for xi, x in enumerate(elems):\n a = axis[xi]\n s = len(x.get_shape().as_list())\n if a == -1: a = s - 1\n\n # move channels to front, so x will be [axis, ...]\n perm = [a] + list(range(0, a)) + list(range(a + 1, s))\n elems_perm.append(K.permute_dimensions(x, perm))\n\n # compute sptial deformation regularization for this channel\n if not islist:\n elems_perm = elems_perm[0]\n \n x_perm_trf = tf.map_fn(fn, elems_perm, **kwargs)\n if not islist:\n x_perm_trf = [x_perm_trf]\n \n\n # move in_channels back to end\n elems_trf = []\n for xi, x in enumerate(x_perm_trf):\n a = axis[xi]\n s = len(x.get_shape().as_list())\n if a == -1: a = s - 1\n \n perm = list(range(1, a + 1)) + [0] + list(range(a + 1, s))\n elems_trf.append(K.permute_dimensions(x, perm))\n \n if not islist:\n elems_trf = elems_trf[0]\n \n return elems_trf\n\n\ndef volshape_to_ndgrid(volshape, **kwargs):\n \"\"\"\n compute Tensor ndgrid from a volume size\n\n Parameters:\n volshape: the volume size\n **args: \"name\" (optional)\n\n Returns:\n A list of Tensors\n\n See Also:\n ndgrid\n \"\"\"\n \n isint = [float(d).is_integer() for d in volshape]\n if not all(isint):\n raise ValueError(\"volshape needs to be a list of integers\")\n\n linvec = [tf.range(0, d) for d in volshape]\n return ndgrid(*linvec, **kwargs)\n\n\ndef volshape_to_meshgrid(volshape, **kwargs):\n \"\"\"\n compute Tensor meshgrid from a volume size\n\n Warning: this uses the tf.meshgrid convention, of 'xy' indexing.\n to use `ij` indexing, use the ndgrid equivalent\n\n Parameters:\n volshape: the volume size\n **args: \"name\" (optional)\n\n Returns:\n A list of Tensors\n\n See Also:\n tf.meshgrid, meshgrid, ndgrid, volshape_to_ndgrid\n \"\"\"\n \n isint = [float(d).is_integer() for d in volshape]\n if not all(isint):\n raise ValueError(\"volshape needs to be a list of integers\")\n\n linvec = [tf.range(0, d) for d in volshape]\n return meshgrid(*linvec, **kwargs)\n\n\ndef ndgrid(*args, **kwargs):\n \"\"\"\n broadcast Tensors on an N-D grid with ij indexing\n uses meshgrid with ij indexing\n\n Parameters:\n *args: Tensors with rank 1\n **args: \"name\" (optional)\n\n Returns:\n A list of Tensors\n \n \"\"\"\n return meshgrid(*args, indexing='ij', **kwargs)\n\n\ndef meshgrid(*args, **kwargs):\n \"\"\"\n \n meshgrid code that builds on (copies) tensorflow's meshgrid but dramatically\n improves runtime by changing the last step to tiling instead of multiplication.\n https://github.com/tensorflow/tensorflow/blob/c19e29306ce1777456b2dbb3a14f511edf7883a8/tensorflow/python/ops/array_ops.py#L1921\n \n Broadcasts parameters for evaluation on an N-D grid.\n Given N one-dimensional coordinate arrays `*args`, returns a list `outputs`\n of N-D coordinate arrays for evaluating expressions on an N-D grid.\n Notes:\n `meshgrid` supports cartesian ('xy') and matrix ('ij') indexing conventions.\n When the `indexing` argument is set to 'xy' (the default), the broadcasting\n instructions for the first two dimensions are swapped.\n Examples:\n Calling `X, Y = meshgrid(x, y)` with the tensors\n ```python\n x = [1, 2, 3]\n y = [4, 5, 6]\n X, Y = meshgrid(x, y)\n # X = [[1, 2, 3],\n # [1, 2, 3],\n # [1, 2, 3]]\n # Y = [[4, 4, 4],\n # [5, 5, 5],\n # [6, 6, 6]]\n ```\n Args:\n *args: `Tensor`s with rank 1.\n **kwargs:\n - indexing: Either 'xy' or 'ij' (optional, default: 'xy').\n - name: A name for the operation (optional).\n Returns:\n outputs: A list of N `Tensor`s with rank N.\n Raises:\n TypeError: When no keyword arguments (kwargs) are passed.\n ValueError: When indexing keyword argument is not one of `xy` or `ij`.\n \"\"\"\n\n indexing = kwargs.pop(\"indexing\", \"xy\")\n name = kwargs.pop(\"name\", \"meshgrid\")\n if kwargs:\n key = list(kwargs.keys())[0]\n raise TypeError(\"'{}' is an invalid keyword argument \"\n \"for this function\".format(key))\n\n if indexing not in (\"xy\", \"ij\"):\n raise ValueError(\"indexing parameter must be either 'xy' or 'ij'\")\n\n # with ops.name_scope(name, \"meshgrid\", args) as name:\n ndim = len(args)\n s0 = (1,) * ndim\n\n # Prepare reshape by inserting dimensions with size 1 where needed\n output = []\n for i, x in enumerate(args):\n output.append(tf.reshape(tf.stack(x), (s0[:i] + (-1,) + s0[i + 1::])))\n # Create parameters for broadcasting each tensor to the full size\n shapes = [tf.size(x) for x in args]\n sz = [x.get_shape().as_list()[0] for x in args]\n\n # output_dtype = tf.convert_to_tensor(args[0]).dtype.base_dtype\n if indexing == \"xy\" and ndim > 1:\n output[0] = tf.reshape(output[0], (1, -1) + (1,) * (ndim - 2))\n output[1] = tf.reshape(output[1], (-1, 1) + (1,) * (ndim - 2))\n shapes[0], shapes[1] = shapes[1], shapes[0]\n sz[0], sz[1] = sz[1], sz[0]\n\n # This is the part of the implementation from tf that is slow. \n # We replace it below to get a ~6x speedup (essentially using tile instead of * tf.ones())\n # TODO(nolivia): improve performance with a broadcast \n # mult_fact = tf.ones(shapes, output_dtype)\n # return [x * mult_fact for x in output]\n for i in range(len(output)): \n stack_sz = [*sz[:i], 1, *sz[(i+1):]]\n if indexing == 'xy' and ndim > 1 and i < 2:\n stack_sz[0], stack_sz[1] = stack_sz[1], stack_sz[0]\n output[i] = tf.tile(output[i], tf.stack(stack_sz))\n return output\n\n\ndef flatten(v):\n \"\"\"\n flatten Tensor v\n \n Parameters:\n v: Tensor to be flattened\n \n Returns:\n flat Tensor\n \"\"\"\n\n return tf.reshape(v, [-1])\n \n \n###############################################################################\n# simple math functions, often used as activations \n###############################################################################\n\ndef softmax(x, axis=-1, alpha=1):\n \"\"\"\n building on keras implementation, with additional alpha parameter\n\n Softmax activation function.\n # Arguments\n x : Tensor.\n axis: Integer, axis along which the softmax normalization is applied.\n alpha: a value to multiply all x\n # Returns\n Tensor, output of softmax transformation.\n # Raises\n ValueError: In case `dim(x) == 1`.\n \"\"\"\n x = alpha * x\n ndim = K.ndim(x)\n if ndim == 2:\n return K.softmax(x)\n elif ndim > 2:\n e = K.exp(x - K.max(x, axis=axis, keepdims=True))\n s = K.sum(e, axis=axis, keepdims=True)\n return e / s\n else:\n raise ValueError('Cannot apply softmax to a tensor that is 1D')\n\n\ndef logtanh(x, a=1):\n \"\"\"\n log * tanh\n\n See Also: arcsinh\n \"\"\"\n return K.tanh(x) * K.log(2 + a * abs(x))\n\n\ndef arcsinh(x, alpha=1):\n \"\"\"\n asignh\n\n See Also: logtanh\n \"\"\"\n return tf.asinh(x * alpha) / alpha\n\n\ndef logistic(x, x0=0., alpha=1., L=1.):\n \"\"\"\n returns L/(1+exp(-alpha * (x-x0)))\n \"\"\"\n assert L > 0, 'L (height of logistic) should be > 0'\n assert alpha > 0, 'alpha (slope) of logistic should be > 0'\n \n return L / (1 + tf.exp(-alpha * (x-x0)))\n\n\ndef sigmoid(x):\n return logistic(x, x0=0., alpha=1., L=1.)\n\n\ndef logistic_fixed_ends(x, start=-1., end=1., L=1., **kwargs):\n \"\"\"\n f is logistic with fixed ends, so that f(start) = 0, and f(end) = L.\n this is currently done a bit heuristically: it's a sigmoid, with a linear function added to correct the ends.\n \"\"\"\n assert end > start, 'End of fixed points should be greater than start'\n # tf.assert_greater(end, start, message='assert')\n \n # clip to start and end\n x = tf.clip_by_value(x, start, end)\n \n # logistic function\n xv = logistic(x, L=L, **kwargs)\n \n # ends of linear corrective function\n sv = logistic(start, L=L, **kwargs)\n ev = logistic(end, L=L, **kwargs)\n \n # corrective function\n df = end - start\n linear_corr = (end-x)/df * (- sv) + (x-start)/df * (-ev + L)\n \n # return fixed logistic\n return xv + linear_corr\n\n\ndef sigmoid_fixed_ends(x, start=-1., end=1., L=1., **kwargs):\n return logistic_fixed_ends(x, start=-1., end=1., L=1., x0=0., alpha=1.)\n\n\ndef soft_round(x, alpha=25):\n fx = tf.floor(x)\n xd = x - fx\n return fx + logistic_fixed_ends(xd, start=0., end=1., x0=0.5, alpha=alpha)\n\n\ndef soft_delta(x, x0=0., alpha=100, reg='l1'):\n \"\"\"\n recommended defaults:\n alpha = 100 for l1\n alpha = 1000 for l2\n \"\"\"\n if reg == 'l1':\n xa = tf.abs(x - x0)\n else:\n assert reg == 'l2'\n xa = tf.square(x - x0)\n return (1 - logistic(xa, alpha=alpha)) * 2\n\n\ndef odd_shifted_relu(x, shift=-0.5, scale=2.0):\n \"\"\"\n Odd shifted ReLu\n Essentially in x > 0, it is a shifted ReLu, and in x < 0 it's a negative mirror. \n \"\"\"\n\n shift = float(shift)\n scale = float(scale)\n return scale * K.relu(x - shift) - scale * K.relu(- x - shift)\n\n\n###############################################################################\n# other\n###############################################################################\n\ndef perlin_vol(vol_shape, min_scale=0, max_scale=None, interp_method='linear', wt_type='monotonic'):\n \"\"\"\n generate perlin noise ND volume \n\n rough algorithm:\n \n vol = zeros\n for scale in scales:\n rand = generate random uniform noise at given scale\n vol += wt * upsampled rand to vol_shape \n \n\n Parameters\n ----------\n vol_shape: list indicating input shape.\n min_scale: higher min_scale = less high frequency noise\n the minimum rescale vol_shape/(2**min_scale), min_scale of 0 (default) \n means start by not rescaling, and go down.\n max_scale: maximum scale, if None computes such that smallest volume shape is [1]\n interp_order: interpolation (upscale) order, as used in ne.utils.zoom\n wt_type: the weight type between volumes. default: monotonically decreasing with image size.\n options: 'monotonic', 'random'\n \n https://github.com/adalca/matlib/blob/master/matlib/visual/perlin.m\n loosely inspired from http://nullprogram.com/blog/2007/11/20\n \"\"\"\n\n # input handling\n assert wt_type in ['monotonic', 'random'], \\\n \"wt_type should be in 'monotonic', 'random', got: %s\" % wt_type\n\n if max_scale is None:\n max_width = np.max(vol_shape)\n max_scale = np.ceil(np.log2(max_width)).astype('int')\n\n # decide on scales:\n scale_shapes = []\n wts = []\n for i in range(min_scale, max_scale + 1):\n scale_shapes.append(np.ceil([f / (2**i) for f in vol_shape]).astype('int'))\n \n # determine weight\n if wt_type == 'monotonic':\n wts.append(i + 1) # larger images (so more high frequencies) get lower weight\n else:\n wts.append(K.random_uniform([1])[0])\n\n wts = K.stack(wts)/K.sum(wts)\n wts = tf.cast(wts, tf.float32)\n\n\n # get perlin volume\n vol = K.zeros(vol_shape)\n for sci, sc in enumerate(scale_shapes):\n\n # get a small random volume\n rand_vol = K.random_uniform(sc)\n \n # interpolated rand volume to upper side\n reshape_factor = [vol_shape[d]/sc[d] for d in range(len(vol_shape))]\n interp_vol = zoom(rand_vol, reshape_factor, interp_method=interp_method)[..., 0]\n\n # add to existing volume\n vol = vol + wts[sci] * interp_vol\n \n return vol\n\n\ndef gaussian_kernel(sigma, windowsize=None, indexing='ij'):\n \"\"\"\n sigma will be a number of a list of numbers.\n\n # some guidance from my MATLAB file \n https://github.com/adalca/mivt/blob/master/src/gaussFilt.m\n\n Parameters:\n sigma: scalar or list of scalars\n windowsize (optional): scalar or list of scalars indicating the shape of the kernel\n \n Returns:\n ND kernel the same dimensiosn as the number of sigmas.\n\n Todo: could use MultivariateNormalDiag\n \"\"\"\n\n if not isinstance(sigma, (list, tuple)):\n sigma = [sigma]\n sigma = [np.maximum(f, np.finfo(float).eps) for f in sigma]\n\n nb_dims = len(sigma)\n\n # compute windowsize\n if windowsize is None:\n windowsize = [np.round(f * 3) * 2 + 1 for f in sigma]\n\n if len(sigma) != len(windowsize):\n raise ValueError('sigma and windowsize should have the same length.'\n 'Got vectors: ' + str(sigma) + 'and' + str(windowsize))\n\n # ok, let's get to work.\n mid = [(w - 1)/2 for w in windowsize]\n\n # list of volume ndgrid\n # N-long list, each entry of shape volshape\n mesh = volshape_to_meshgrid(windowsize, indexing=indexing) \n mesh = [tf.cast(f, 'float32') for f in mesh]\n\n # compute independent gaussians\n diff = [mesh[f] - mid[f] for f in range(len(windowsize))]\n exp_term = [- K.square(diff[f])/(2 * (sigma[f]**2)) for f in range(nb_dims)]\n norms = [exp_term[f] - np.log(sigma[f] * np.sqrt(2 * np.pi)) for f in range(nb_dims)]\n\n # add an all-ones entry and transform into a large matrix\n norms_matrix = tf.stack(norms, axis=-1) # *volshape x N\n g = K.sum(norms_matrix, -1) # volshape\n g = tf.exp(g)\n g /= tf.reduce_sum(g)\n\n return g\n\n\ndef sub2ind2d(siz, subs, **kwargs):\n \"\"\"\n assumes column-order major\n \"\"\"\n # subs is a list\n assert len(siz) == len(subs), \\\n 'found inconsistent siz and subs: %d %d' % (len(siz), len(subs))\n\n k = np.cumprod(siz[::-1])\n\n ndx = subs[-1]\n for i, v in enumerate(subs[:-1][::-1]):\n ndx = ndx + v * k[i]\n\n return ndx\n\n\ndef prod_n(lst):\n \"\"\"\n Alternative to tf.stacking and prod, since tf.stacking can be slow\n \"\"\"\n prod = lst[0]\n for p in lst[1:]:\n prod *= p\n return prod\n\n\n###############################################################################\n# functions from external source\n###############################################################################\n\ndef batch_gather(reference, indices):\n \"\"\"\n C+P From Keras pull request https://github.com/keras-team/keras/pull/6377/files\n \n Batchwise gathering of row indices.\n\n The numpy equivalent is `reference[np.arange(batch_size), indices]`, where\n `batch_size` is the first dimension of the reference tensor.\n\n # Arguments\n reference: A tensor with ndim >= 2 of shape.\n (batch_size, dim1, dim2, ..., dimN)\n indices: A 1d integer tensor of shape (batch_size) satisfying\n 0 <= i < dim2 for each element i.\n\n # Returns\n The selected tensor with shape (batch_size, dim2, ..., dimN).\n\n # Examples\n 1. If reference is `[[3, 5, 7], [11, 13, 17]]` and indices is `[2, 1]`\n then the result is `[7, 13]`.\n\n 2. If reference is\n ```\n [[[2, 3], [4, 5], [6, 7]],\n [[10, 11], [12, 13], [16, 17]]]\n ```\n and indices is `[2, 1]` then the result is `[[6, 7], [12, 13]]`.\n \"\"\"\n batch_size = K.shape(reference)[0]\n indices = tf.stack([tf.range(batch_size), indices], axis=1)\n return tf.gather_nd(reference, indices)\n",
"id": "4017399",
"language": "Python",
"matching_score": 0,
"max_stars_count": 0,
"path": "neurite/tf/utils/utils.py"
}
] | 0 |
xinzha623 | [
{
"content": "# Copyright 2017 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for tensorflow_transform.mappers.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n\nimport tensorflow as tf\nfrom tensorflow_transform import mappers\n\nimport unittest\nfrom tensorflow.python.framework import test_util\n\n\nclass MappersTest(test_util.TensorFlowTestCase):\n\n def assertSparseOutput(self, expected_indices, expected_values,\n expected_shape, actual_sparse_tensor, close_values):\n with tf.Session() as sess:\n sess.run(tf.tables_initializer())\n actual = actual_sparse_tensor.eval()\n self.assertAllEqual(expected_indices, actual.indices)\n self.assertAllEqual(expected_shape, actual.dense_shape)\n if close_values:\n self.assertAllClose(expected_values, actual.values)\n else:\n self.assertAllEqual(expected_values, actual.values)\n\n def testSegmentIndices(self):\n with tf.Session():\n self.assertAllEqual(\n mappers.segment_indices(tf.constant([0, 0, 1, 2, 2, 2], tf.int64),\n name='test_name').eval(),\n [0, 1, 0, 0, 1, 2])\n self.assertAllEqual(\n mappers.segment_indices(tf.constant([], tf.int64)).eval(),\n [])\n\n def testSegmentIndicesSkipOne(self):\n input_tensor = tf.constant([0, 0, 2, 2])\n with tf.Session():\n self.assertAllEqual([0, 1, 0, 1],\n mappers.segment_indices(input_tensor).eval())\n\n def testNGramsEmpty(self):\n output_tensor = mappers.ngrams(tf.string_split(tf.constant([''])),\n (1, 5), '')\n with tf.Session():\n output = output_tensor.eval()\n self.assertEqual((0, 2), output.indices.shape)\n self.assertAllEqual([1, 0], output.dense_shape)\n self.assertEqual(0, len(output.values))\n\n def testNGrams(self):\n string_tensor = tf.constant(['abc', 'def', 'fghijklm', 'z', ''])\n tokenized_tensor = tf.string_split(string_tensor, delimiter='')\n output_tensor = mappers.ngrams(\n tokens=tokenized_tensor,\n ngram_range=(1, 5),\n separator='')\n self.assertSparseOutput(\n expected_indices=[\n [0, 0], [0, 1], [0, 2], [0, 3], [0, 4], [0, 5],\n [1, 0], [1, 1], [1, 2], [1, 3], [1, 4], [1, 5],\n [2, 0], [2, 1], [2, 2], [2, 3], [2, 4], [2, 5], [2, 6], [2, 7],\n [2, 8], [2, 9], [2, 10], [2, 11], [2, 12], [2, 13], [2, 14],\n [2, 15], [2, 16], [2, 17], [2, 18], [2, 19], [2, 20], [2, 21],\n [2, 22], [2, 23], [2, 24], [2, 25], [2, 26], [2, 27], [2, 28],\n [2, 29], [3, 0]],\n expected_values=[\n 'a', 'ab', 'abc', 'b', 'bc', 'c',\n 'd', 'de', 'def', 'e', 'ef', 'f',\n 'f', 'fg', 'fgh', 'fghi', 'fghij', 'g', 'gh', 'ghi', 'ghij',\n 'ghijk', 'h', 'hi', 'hij', 'hijk', 'hijkl', 'i', 'ij', 'ijk',\n 'ijkl', 'ijklm', 'j', 'jk', 'jkl', 'jklm', 'k', 'kl', 'klm', 'l',\n 'lm', 'm', 'z'],\n expected_shape=[5, 30],\n actual_sparse_tensor=output_tensor,\n close_values=False)\n\n def testNGramsMinSizeNotOne(self):\n string_tensor = tf.constant(['abc', 'def', 'fghijklm', 'z', ''])\n tokenized_tensor = tf.string_split(string_tensor, delimiter='')\n output_tensor = mappers.ngrams(\n tokens=tokenized_tensor,\n ngram_range=(2, 5),\n separator='')\n self.assertSparseOutput(\n expected_indices=[\n [0, 0], [0, 1], [0, 2],\n [1, 0], [1, 1], [1, 2],\n [2, 0], [2, 1], [2, 2], [2, 3], [2, 4], [2, 5], [2, 6], [2, 7],\n [2, 8], [2, 9], [2, 10], [2, 11], [2, 12], [2, 13], [2, 14],\n [2, 15], [2, 16], [2, 17], [2, 18], [2, 19], [2, 20], [2, 21]],\n expected_values=[\n 'ab', 'abc', 'bc',\n 'de', 'def', 'ef',\n 'fg', 'fgh', 'fghi', 'fghij', 'gh', 'ghi', 'ghij', 'ghijk',\n 'hi', 'hij', 'hijk', 'hijkl', 'ij', 'ijk', 'ijkl', 'ijklm',\n 'jk', 'jkl', 'jklm', 'kl', 'klm', 'lm'],\n expected_shape=[5, 22],\n actual_sparse_tensor=output_tensor,\n close_values=False)\n\n def testNGramsWithSpaceSeparator(self):\n string_tensor = tf.constant(['One was Johnny', 'Two was a rat'])\n tokenized_tensor = tf.string_split(string_tensor, delimiter=' ')\n output_tensor = mappers.ngrams(\n tokens=tokenized_tensor,\n ngram_range=(1, 2),\n separator=' ')\n with tf.Session():\n output = output_tensor.eval()\n self.assertAllEqual(\n output.indices,\n [[0, 0], [0, 1], [0, 2], [0, 3], [0, 4],\n [1, 0], [1, 1], [1, 2], [1, 3], [1, 4], [1, 5], [1, 6]])\n self.assertAllEqual(output.values, [\n 'One', 'One was', 'was', 'was Johnny', 'Johnny',\n 'Two', 'Two was', 'was', 'was a', 'a', 'a rat', 'rat'])\n self.assertAllEqual(output.dense_shape, [2, 7])\n\n def testNGramsBadSizes(self):\n string_tensor = tf.constant(['abc', 'def', 'fghijklm', 'z', ''])\n tokenized_tensor = tf.string_split(string_tensor, delimiter='')\n with self.assertRaisesRegexp(ValueError, 'Invalid ngram_range'):\n mappers.ngrams(tokenized_tensor, (0, 5), separator='')\n with self.assertRaisesRegexp(ValueError, 'Invalid ngram_range'):\n mappers.ngrams(tokenized_tensor, (6, 5), separator='')\n\n def testTermFrequency(self):\n input_tensor = tf.SparseTensor(\n [[0, 0], [0, 1], [0, 2], [0, 3], [0, 4], [1, 0], [1, 1]],\n [1, 2, 0, 0, 0, 3, 0],\n [2, 5])\n self.assertSparseOutput(\n expected_indices=[[0, 0], [0, 1], [0, 2], [1, 0], [1, 3]],\n expected_values=[(3/5), (1/5), (1/5), (1/2), (1/2)],\n expected_shape=[2, 4],\n actual_sparse_tensor=mappers._to_term_frequency(input_tensor, 4),\n close_values=True)\n\n def testTermFrequencyUnusedTerm(self):\n input_tensor = tf.SparseTensor(\n [[0, 0], [0, 1], [0, 2], [0, 3], [0, 4], [1, 0], [1, 1]],\n [4, 2, 0, 0, 0, 3, 0],\n [2, 5])\n self.assertSparseOutput(\n expected_indices=[[0, 0], [0, 2], [0, 4], [1, 0], [1, 3]],\n expected_values=[(3/5), (1/5), (1/5), (1/2), (1/2)],\n expected_shape=[2, 5],\n actual_sparse_tensor=mappers._to_term_frequency(input_tensor, 5),\n close_values=True)\n\n def testCountDocsWithTerm(self):\n input_tensor = tf.SparseTensor(\n [[0, 0], [0, 1], [0, 2], [1, 0], [1, 3]],\n [(3/5), (1/5), (1/5), (1/2), (1/2)],\n [2, 4])\n output_tensor = mappers._count_docs_with_term(input_tensor)\n with tf.Session():\n output = output_tensor.eval()\n self.assertAllEqual([[2, 1, 1, 1]], output)\n\n def testCountDocsWithTermUnusedTerm(self):\n input_tensor = tf.SparseTensor(\n [[0, 0], [0, 2], [1, 0], [1, 3]],\n [(3/5), (1/5), (1/2), (1/2)],\n [2, 4])\n output_tensor = mappers._count_docs_with_term(input_tensor)\n with tf.Session():\n output = output_tensor.eval()\n self.assertAllEqual([[2, 0, 1, 1]], output)\n\n def testToTFIDF(self):\n term_freq = tf.SparseTensor(\n [[0, 0], [0, 1], [0, 2], [1, 0], [1, 3]],\n [(3/5), (1/5), (1/5), (1/2), (1/2)],\n [2, 4])\n reduced_term_freq = tf.constant([[2, 1, 1, 1]])\n output_tensor = mappers._to_tfidf(term_freq, reduced_term_freq, 2, True)\n log_3_over_2 = 1.4054651\n self.assertSparseOutput(\n expected_indices=[[0, 0], [0, 1], [0, 2], [1, 0], [1, 3]],\n expected_values=[(3/5), (1/5)*log_3_over_2, (1/5)*log_3_over_2,\n (1/2), (1/2)*log_3_over_2],\n expected_shape=[2, 4],\n actual_sparse_tensor=output_tensor,\n close_values=True)\n\n def testToTFIDFNotSmooth(self):\n term_freq = tf.SparseTensor(\n [[0, 0], [0, 1], [0, 2], [1, 0], [1, 3]],\n [(3/5), (1/5), (1/5), (1/2), (1/2)],\n [2, 4])\n reduced_term_freq = tf.constant([[2, 1, 1, 1]])\n output_tensor = mappers._to_tfidf(term_freq, reduced_term_freq, 2, False)\n log_2_over_1 = 1.6931471\n self.assertSparseOutput(\n expected_indices=[[0, 0], [0, 1], [0, 2], [1, 0], [1, 3]],\n expected_values=[(3/5), (1/5)*log_2_over_1, (1/5)*log_2_over_1,\n (1/2), (1/2)*log_2_over_1],\n expected_shape=[2, 4],\n actual_sparse_tensor=output_tensor,\n close_values=True)\n\n def testSplitTFIDF(self):\n tfidfs = tf.SparseTensor(\n [[0, 0], [0, 1], [2, 1], [2, 2]],\n [0.23104906, 0.19178806, 0.14384104, 0.34657359],\n [3, 4])\n\n out_index, out_weight = mappers._split_tfidfs_to_outputs(tfidfs)\n self.assertSparseOutput(\n expected_indices=[[0, 0], [0, 1], [2, 0], [2, 1]],\n expected_values=[0, 1, 1, 2],\n expected_shape=[3, 2],\n actual_sparse_tensor=out_index,\n close_values=False)\n self.assertSparseOutput(\n expected_indices=[[0, 0], [0, 1], [2, 0], [2, 1]],\n expected_values=[0.23104906, 0.19178806, 0.14384104, 0.34657359],\n expected_shape=[3, 2],\n actual_sparse_tensor=out_weight,\n close_values=True)\n\n def testSplitTFIDFWithEmptyInput(self):\n with tf.Graph().as_default():\n tfidf = tf.SparseTensor(\n values=tf.constant([], shape=[0], dtype=tf.float32),\n indices=tf.constant([], shape=[0, 2], dtype=tf.int64),\n dense_shape=[2, 0])\n\n _, weights = mappers._split_tfidfs_to_outputs(tfidf)\n\n with self.test_session() as sess:\n weights_shape = sess.run(weights.dense_shape)\n self.assertAllEqual(weights_shape, [2, 0])\n\n def testHashStringsNoKeyDenseInput(self):\n strings = tf.constant(['Car', 'Bus', 'Tree'])\n expected_output = [8, 4, 5]\n\n hash_buckets = 11\n hashed_strings = mappers.hash_strings(strings, hash_buckets)\n with self.test_session() as sess:\n output = sess.run(hashed_strings)\n self.assertAllEqual(expected_output, output)\n\n def testHashStringsNoKeySparseInput(self):\n strings = tf.SparseTensor(indices=[[0, 0], [0, 1], [1, 0]],\n values=['Dog', 'Cat', ''],\n dense_shape=[2, 2])\n hash_buckets = 17\n expected_indices = [[0, 0], [0, 1], [1, 0]]\n expected_values = [12, 4, 11]\n expected_shape = [2, 2]\n hashed_strings = mappers.hash_strings(strings, hash_buckets)\n self.assertSparseOutput(\n expected_indices=expected_indices,\n expected_values=expected_values,\n expected_shape=expected_shape,\n actual_sparse_tensor=hashed_strings,\n close_values=False)\n\n def testHashStringsWithKeyDenseInput(self):\n strings = tf.constant(['Cake', 'Pie', 'Sundae'])\n expected_output = [6, 5, 6]\n hash_buckets = 11\n hashed_strings = mappers.hash_strings(strings, hash_buckets, key=[123, 456])\n with self.test_session() as sess:\n output = sess.run(hashed_strings)\n self.assertAllEqual(expected_output, output)\n\n def testHashStringsWithKeySparseInput(self):\n strings = tf.SparseTensor(indices=[[0, 0], [0, 1], [1, 0], [2, 0]],\n values=['$$$', '%^#', '&$!#@', '$$$'],\n dense_shape=[3, 2])\n hash_buckets = 173\n expected_indices = [[0, 0], [0, 1], [1, 0], [2, 0]]\n expected_values = [16, 156, 9, 16]\n expected_shape = [3, 2]\n hashed_strings = mappers.hash_strings(strings, hash_buckets, key=[321, 555])\n self.assertSparseOutput(\n expected_indices=expected_indices,\n expected_values=expected_values,\n expected_shape=expected_shape,\n actual_sparse_tensor=hashed_strings,\n close_values=False)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"id": "766418",
"language": "Python",
"matching_score": 3.0786898136138916,
"max_stars_count": 0,
"path": "tensorflow_transform/mappers_test.py"
},
{
"content": "# Copyright 2017 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for tensorflow_transform.impl_helper.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n\nimport numpy as np\nimport six\nimport tensorflow as tf\nfrom tensorflow_transform import analyzers\nfrom tensorflow_transform import api\nfrom tensorflow_transform import impl_helper\nfrom tensorflow_transform import mappers\nfrom tensorflow_transform.tf_metadata import dataset_schema as sch\nimport unittest\nfrom tensorflow.contrib import lookup\nfrom tensorflow.python.framework import test_util\n\n\nclass ImplHelperTest(test_util.TensorFlowTestCase):\n\n def assertSparseValuesEqual(self, a, b):\n self.assertAllEqual(a.indices, b.indices)\n self.assertAllEqual(a.values, b.values)\n self.assertAllEqual(a.dense_shape, b.dense_shape)\n\n def toSchema(self, feature_spec):\n return sch.from_feature_spec(feature_spec)\n\n def testInferFeatureSchema(self):\n d = tf.placeholder(tf.int64, None)\n tensors = {\n 'a': tf.placeholder(tf.float32, (None,)),\n 'b': tf.placeholder(tf.string, (1, 2, 3)),\n 'c': tf.placeholder(tf.int64, None),\n 'd': d\n }\n d_column_schema = sch.ColumnSchema(tf.int64, [1, 2, 3],\n sch.FixedColumnRepresentation())\n api.set_column_schema(d, d_column_schema)\n schema = impl_helper.infer_feature_schema(tensors)\n expected_schema = sch.Schema(column_schemas={\n 'a': sch.ColumnSchema(tf.float32, [],\n sch.FixedColumnRepresentation()),\n 'b': sch.ColumnSchema(tf.string, [2, 3],\n sch.FixedColumnRepresentation()),\n 'c': sch.ColumnSchema(tf.int64, None,\n sch.FixedColumnRepresentation()),\n 'd': sch.ColumnSchema(tf.int64, [1, 2, 3],\n sch.FixedColumnRepresentation())\n })\n self.assertEqual(schema, expected_schema)\n\n def testInferFeatureSchemaBadRank(self):\n tensors = {\n 'a': tf.placeholder(tf.float32, ()),\n }\n with self.assertRaises(ValueError):\n impl_helper.infer_feature_schema(tensors)\n\n def testMakeFeedDict(self):\n tensors = {\n 'a': tf.placeholder(tf.int64),\n 'b': tf.placeholder(tf.float32),\n 'c': tf.placeholder(tf.float32),\n 'd': tf.placeholder(tf.float32),\n 'e': tf.sparse_placeholder(tf.string),\n 'f': tf.sparse_placeholder(tf.float32)\n }\n schema = self.toSchema({\n 'a': tf.FixedLenFeature(None, tf.int64),\n 'b': tf.FixedLenFeature([], tf.float32),\n 'c': tf.FixedLenFeature([1], tf.float32),\n 'd': tf.FixedLenFeature([2, 2], tf.float32),\n 'e': tf.VarLenFeature(tf.string),\n 'f': tf.SparseFeature('idx', 'val', tf.float32, 10)\n })\n\n # Feed some dense and sparse values.\n instances = [{\n 'a': 100,\n 'b': 1.0,\n 'c': [2.0],\n 'd': [[1.0, 2.0], [3.0, 4.0]],\n 'e': ['doe', 'a', 'deer'],\n 'f': ([2, 4, 8], [10.0, 20.0, 30.0])\n }, {\n 'a': 100,\n 'b': 2.0,\n 'c': [4.0],\n 'd': [[5.0, 6.0], [7.0, 8.0]],\n 'e': ['a', 'female', 'deer'],\n 'f': ([], [])\n }]\n\n feed_dict = impl_helper.make_feed_dict(tensors, schema, instances)\n self.assertSetEqual(set(six.iterkeys(feed_dict)),\n set(six.itervalues(tensors)))\n self.assertAllEqual(feed_dict[tensors['a']], [100, 100])\n self.assertAllEqual(feed_dict[tensors['b']], [1.0, 2.0])\n self.assertAllEqual(feed_dict[tensors['c']], [[2.0], [4.0]])\n self.assertAllEqual(feed_dict[tensors['d']], [[[1.0, 2.0], [3.0, 4.0]],\n [[5.0, 6.0], [7.0, 8.0]]])\n self.assertSparseValuesEqual(feed_dict[tensors['e']], tf.SparseTensorValue(\n indices=[(0, 0), (0, 1), (0, 2), (1, 0), (1, 1), (1, 2)],\n values=['doe', 'a', 'deer', 'a', 'female', 'deer'],\n dense_shape=(2, 3)))\n self.assertSparseValuesEqual(feed_dict[tensors['f']], tf.SparseTensorValue(\n indices=[(0, 2), (0, 4), (0, 8)], values=[10.0, 20.0, 30.0],\n dense_shape=(2, 10)))\n\n # Feed numpy versions of everything.\n instances = [{\n 'a': np.int64(100),\n 'b': np.array(1.0, np.float32),\n 'c': np.array([2.0], np.float32),\n 'd': np.array([[1.0, 2.0], [3.0, 4.0]], np.float32),\n 'e': ['doe', 'a', 'deer'],\n 'f': (np.array([2, 4, 8]), np.array([10.0, 20.0, 30.0])),\n }, {\n 'a': np.int64(100),\n 'b': np.array(2.0, np.float32),\n 'c': np.array([4.0], np.float32),\n 'd': np.array([[5.0, 6.0], [7.0, 8.0]], np.float32),\n 'e': ['a', 'female', 'deer'],\n 'f': (np.array([], np.int32), np.array([], np.float32))\n }]\n\n feed_dict = impl_helper.make_feed_dict(tensors, schema, instances)\n self.assertSetEqual(set(six.iterkeys(feed_dict)),\n set(six.itervalues(tensors)))\n self.assertAllEqual(feed_dict[tensors['a']], [100, 100])\n self.assertAllEqual(feed_dict[tensors['b']], [1.0, 2.0])\n self.assertAllEqual(feed_dict[tensors['c']], [[2.0], [4.0]])\n self.assertAllEqual(feed_dict[tensors['d']], [[[1.0, 2.0], [3.0, 4.0]],\n [[5.0, 6.0], [7.0, 8.0]]])\n self.assertSparseValuesEqual(feed_dict[tensors['e']], tf.SparseTensorValue(\n indices=[(0, 0), (0, 1), (0, 2), (1, 0), (1, 1), (1, 2)],\n values=['doe', 'a', 'deer', 'a', 'female', 'deer'],\n dense_shape=(2, 3)))\n self.assertSparseValuesEqual(feed_dict[tensors['f']], tf.SparseTensorValue(\n indices=[(0, 2), (0, 4), (0, 8)], values=[10.0, 20.0, 30.0],\n dense_shape=(2, 10)))\n\n # Feed some empty sparse values\n instances = [{\n 'a': 100,\n 'b': 5.0,\n 'c': [1.0],\n 'd': [[1.0, 2.0], [3.0, 4.0]],\n 'e': [],\n 'f': ([], [])\n }]\n feed_dict = impl_helper.make_feed_dict(tensors, schema, instances)\n self.assertSparseValuesEqual(feed_dict[tensors['e']], tf.SparseTensorValue(\n indices=np.empty([0, 2], np.int64), values=[], dense_shape=(1, 0)))\n self.assertSparseValuesEqual(feed_dict[tensors['f']], tf.SparseTensorValue(\n indices=np.empty([0, 2], np.int64), values=[], dense_shape=(1, 10)))\n\n def testMakeFeedDictError(self):\n # Missing features.\n tensors = {\n 'a': tf.placeholder(tf.int64),\n 'b': tf.placeholder(tf.int64)\n }\n schema = self.toSchema({\n 'a': tf.FixedLenFeature([1], tf.int64),\n 'b': tf.FixedLenFeature([1], tf.int64)\n })\n instances = [{'a': 100}]\n with self.assertRaises(KeyError):\n impl_helper.make_feed_dict(tensors, schema, instances)\n\n def testMalformedSparseFeatures(self):\n tensors = {\n 'a': tf.sparse_placeholder(tf.int64),\n }\n\n # Invalid indices.\n schema = self.toSchema({\n 'a': tf.SparseFeature('idx', 'val', tf.float32, 10)\n })\n instances = [{'a': ([-1, 2], [1.0, 2.0])}]\n with self.assertRaisesRegexp(\n ValueError, 'has index .* out of range'):\n impl_helper.make_feed_dict(tensors, schema, instances)\n\n instances = [{'a': ([11, 1], [1.0, 2.0])}]\n with self.assertRaisesRegexp(\n ValueError, 'has index .* out of range'):\n impl_helper.make_feed_dict(tensors, schema, instances)\n\n # Indices and values of different lengths.\n schema = self.toSchema({\n 'a': tf.SparseFeature('idx', 'val', tf.float32, 10)\n })\n instances = [{'a': ([1, 2], [1])}]\n with self.assertRaisesRegexp(\n ValueError, 'indices and values of different lengths'):\n impl_helper.make_feed_dict(tensors, schema, instances)\n\n # Tuple of the wrong length.\n instances = [{'a': ([1], [2], [3])}]\n with self.assertRaisesRegexp(\n ValueError, 'too many values to unpack'):\n impl_helper.make_feed_dict(tensors, schema, instances)\n\n def testMakeOutputDict(self):\n schema = self.toSchema({\n 'a': tf.FixedLenFeature(None, tf.int64),\n 'b': tf.FixedLenFeature([], tf.float32),\n 'c': tf.FixedLenFeature([1], tf.float32),\n 'd': tf.FixedLenFeature([2, 2], tf.float32),\n 'e': tf.VarLenFeature(tf.string),\n 'f': tf.SparseFeature('idx', 'val', tf.float32, 10)\n })\n\n fetches = {\n 'a': np.array([100, 200]),\n 'b': np.array([10.0, 20.0]),\n 'c': np.array([[40.0], [80.0]]),\n 'd': np.array([[[1.0, 2.0], [3.0, 4.0]],\n [[5.0, 6.0], [7.0, 8.0]]]),\n 'e': tf.SparseTensorValue(\n indices=np.array([(0, 0), (0, 1), (0, 2), (1, 0), (1, 1), (1, 2)]),\n values=np.array(['doe', 'a', 'deer', 'a', 'female', 'deer']),\n dense_shape=(2, 3)),\n 'f': tf.SparseTensorValue(\n indices=np.array([(0, 2), (0, 4), (0, 8), (1, 4), (1, 8)]),\n values=np.array([10.0, 20.0, 30.0, 40.0, 50.0]),\n dense_shape=(2, 20))\n }\n\n instance_dicts = impl_helper.to_instance_dicts(schema, fetches)\n self.assertEqual(2, len(instance_dicts))\n self.assertSetEqual(set(six.iterkeys(instance_dicts[0])),\n set(['a', 'b', 'c', 'd', 'e', 'f']))\n self.assertAllEqual(instance_dicts[0]['a'], 100)\n self.assertAllEqual(instance_dicts[0]['b'], 10.0)\n self.assertAllEqual(instance_dicts[0]['c'], [40.0])\n self.assertAllEqual(instance_dicts[0]['d'], [[1.0, 2.0], [3.0, 4.0]])\n self.assertAllEqual(instance_dicts[0]['e'], ['doe', 'a', 'deer'])\n self.assertEqual(len(instance_dicts[0]['f']), 2)\n self.assertAllEqual(instance_dicts[0]['f'][0], [2, 4, 8])\n self.assertAllEqual(instance_dicts[0]['f'][1], [10.0, 20.0, 30.0])\n self.assertAllEqual(instance_dicts[1]['a'], 200)\n self.assertAllEqual(instance_dicts[1]['b'], 20.0)\n self.assertAllEqual(instance_dicts[1]['c'], [80.0])\n self.assertAllEqual(instance_dicts[1]['d'], [[5.0, 6.0], [7.0, 8.0]])\n self.assertAllEqual(instance_dicts[1]['e'], ['a', 'female', 'deer'])\n self.assertEqual(len(instance_dicts[1]['f']), 2)\n self.assertAllEqual(instance_dicts[1]['f'][0], [4, 8])\n self.assertAllEqual(instance_dicts[1]['f'][1], [40.0, 50.0])\n\n def testMakeOutputDictErrorSparse(self):\n schema = self.toSchema({'a': tf.VarLenFeature(tf.string)})\n\n # SparseTensor that cannot be represented as VarLenFeature.\n fetches = {\n 'a': tf.SparseTensorValue(indices=np.array([(0, 2), (0, 4), (0, 8)]),\n values=np.array([10.0, 20.0, 30.0]),\n dense_shape=(1, 20))\n }\n with self.assertRaisesRegexp(\n ValueError, 'cannot be decoded by ListColumnRepresentation'):\n impl_helper.to_instance_dicts(schema, fetches)\n\n # SparseTensor of invalid rank.\n fetches = {\n 'a': tf.SparseTensorValue(\n indices=np.array([(0, 0, 1), (0, 0, 2), (0, 0, 3)]),\n values=np.array([10.0, 20.0, 30.0]),\n dense_shape=(1, 10, 10))\n }\n with self.assertRaisesRegexp(\n ValueError, 'cannot be decoded by ListColumnRepresentation'):\n impl_helper.to_instance_dicts(schema, fetches)\n\n # SparseTensor with indices that are out of order.\n fetches = {\n 'a': tf.SparseTensorValue(indices=np.array([(0, 2), (2, 4), (1, 8)]),\n values=np.array([10.0, 20.0, 30.0]),\n dense_shape=(3, 20))\n }\n with self.assertRaisesRegexp(\n ValueError, 'Encountered out-of-order sparse index'):\n impl_helper.to_instance_dicts(schema, fetches)\n\n # SparseTensors with different batch dimension sizes.\n schema = self.toSchema({\n 'a': tf.VarLenFeature(tf.string),\n 'b': tf.VarLenFeature(tf.string)\n })\n fetches = {\n 'a': tf.SparseTensorValue(indices=np.array([(0, 0)]),\n values=np.array([10.0]),\n dense_shape=(1, 20)),\n 'b': tf.SparseTensorValue(indices=np.array([(0, 0)]),\n values=np.array([10.0]),\n dense_shape=(2, 20))\n }\n with self.assertRaisesRegexp(\n ValueError,\n r'Inconsistent batch sizes: \"\\w\" had batch dimension \\d, \"\\w\" had batch'\n r' dimension \\d'):\n impl_helper.to_instance_dicts(schema, fetches)\n\n def testMakeOutputDictErrorDense(self):\n schema = self.toSchema({\n 'a': tf.FixedLenFeature((), tf.string),\n 'b': tf.FixedLenFeature((), tf.string)\n })\n # Tensors with different batch dimension sizes.\n fetches = {\n 'a': np.array([1]),\n 'b': np.array([1, 2])\n }\n with self.assertRaisesRegexp(\n ValueError,\n r'Inconsistent batch sizes: \"\\w\" had batch dimension \\d, \"\\w\" had batch'\n r' dimension \\d'):\n impl_helper.to_instance_dicts(schema, fetches)\n\n def testCreatePhasesWithApplyFunctionWithOverlappingInputsAndOutputs(self):\n string_placeholder = tf.placeholder(tf.string, shape=(None,))\n def degenerate_function(x):\n \"\"\"A function whose input tensors and output tensors overlap.\"\"\"\n return x\n api.apply_function(degenerate_function, string_placeholder)\n\n phases = impl_helper.create_phases()\n self.assertEqual(len(phases), 0)\n\n def testCreatePhasesWithMultipleLevelsOfAnalyzers(self):\n # Create graph similar to calling scale_to_0_1 except involving multiple\n # interleavings of analyzers and transforms.\n float_placeholder = tf.placeholder(tf.float32, shape=(None,))\n scaled_to_0 = float_placeholder - analyzers.min(float_placeholder)\n scaled_to_0 / analyzers.max(scaled_to_0) # pylint: disable=expression-not-assigned\n\n phases = impl_helper.create_phases()\n self.assertEqual(len(phases), 2)\n self.assertEqual(len(phases[0].analyzers), 1)\n self.assertEqual(len(phases[1].analyzers), 1)\n\n def testCreatePhasesWithTable(self):\n # Create a graph with table that can only be run after the first analyzer\n # has run. Note converting an integerized string into a float doesn't make\n # much sense, but is a legal tensorflow computation.\n string_placeholder = tf.placeholder(tf.string, shape=(None,))\n integerized = mappers.string_to_int(string_placeholder)\n integerized = tf.to_float(integerized)\n integerized / analyzers.max(integerized) # pylint: disable=expression-not-assigned\n\n phases = impl_helper.create_phases()\n self.assertEqual(len(phases), 2)\n self.assertEqual(len(phases[0].analyzers), 1)\n self.assertEqual(len(phases[1].analyzers), 1)\n self.assertEqual(len(phases[0].table_initializers), 0)\n self.assertEqual(len(phases[1].table_initializers), 1)\n\n def testCreatePhasesWithUnwrappedTable(self):\n # Create a graph with a table that is not wrapped in `apply_function`.\n string_placeholder = tf.placeholder(tf.string, shape=(None,))\n table = lookup.index_table_from_tensor(['a', 'b'])\n table.lookup(string_placeholder)\n\n with self.assertRaisesRegexp(ValueError, 'Found table initializers'):\n impl_helper.create_phases()\n\n def testCreatePhasesWithControlFlowOpsWrappedInApplyFunction(self):\n int_placeholder = tf.placeholder(tf.int64, shape=(None,))\n int_placeholder_minus_10 = api.apply_function(_subtract_ten,\n int_placeholder)\n # We need to call an analyzer after the loop because only the transitive\n # parents of analyzers are inspected by create_phases\n mappers.scale_to_0_1(int_placeholder_minus_10)\n\n phases = impl_helper.create_phases()\n self.assertEqual(len(phases), 1)\n # tft.scale_to_0_1 uses a single analyzer: analyzers._min_and_max.\n self.assertEqual(len(phases[0].analyzers), 1)\n\n def testCreatePhasesWithControlFlowOpsNotWrappedInApplyFunction(self):\n int_placeholder = tf.placeholder(tf.int64, shape=(None,))\n int_placeholder_minus_10 = _subtract_ten(int_placeholder)\n # We need to call an analyzer after the loop because only the transitive\n # parents of analyzers are inspected by create_phases\n mappers.scale_to_0_1(int_placeholder_minus_10)\n\n with self.assertRaisesRegexp(ValueError, 'Cycle detected'):\n impl_helper.create_phases()\n\n def testCopyTensorsCopiesProducesDifferentTensors(self):\n tensors = {\n 'dense': tf.placeholder(tf.int64, (None,), name='my_dense_input'),\n 'sparse': tf.sparse_placeholder(tf.int64, name='my_sparse_input')\n }\n copied_tensors = impl_helper.copy_tensors(tensors)\n\n self.assertNotEqual(tensors['dense'],\n copied_tensors['dense'])\n self.assertNotEqual(tensors['sparse'].indices,\n copied_tensors['sparse'].indices)\n self.assertNotEqual(tensors['sparse'].values,\n copied_tensors['sparse'].values)\n self.assertNotEqual(tensors['sparse'].dense_shape,\n copied_tensors['sparse'].dense_shape)\n\n def testCopyTensorsProducesEquivalentTensors(self):\n tensors = {\n 'dense': tf.placeholder(tf.int64, (None,), name='my_dense_input'),\n 'sparse': tf.sparse_placeholder(tf.int64, name='my_sparse_input')\n }\n copied_tensors = impl_helper.copy_tensors(tensors)\n\n with tf.Session() as session:\n dense_value = [1, 2]\n sparse_value = tf.SparseTensorValue(\n indices=[[0, 0], [0, 2], [1, 1]],\n values=[3, 4, 5],\n dense_shape=[2, 3])\n sample_tensors = session.run(copied_tensors, feed_dict={\n tensors['dense']: dense_value,\n tensors['sparse']: sparse_value\n })\n self.assertAllEqual(sample_tensors['dense'], dense_value)\n self.assertAllEqual(sample_tensors['sparse'].indices,\n sparse_value.indices)\n self.assertAllEqual(sample_tensors['sparse'].values,\n sparse_value.values)\n self.assertAllEqual(sample_tensors['sparse'].dense_shape,\n sparse_value.dense_shape)\n\n\ndef _subtract_ten(x):\n \"\"\"Subtracts 10 from x using control flow ops.\n\n This function is equivalent to \"x - 10\" but uses a tf.while_loop, in order\n to test the use of functions that involve control flow ops.\n\n Args:\n x: A tensor of integral type.\n\n Returns:\n A tensor representing x - 10.\n \"\"\"\n def stop_condition(counter, x_minus_counter):\n del x_minus_counter # unused\n return tf.less(counter, 10)\n def iteration(counter, x_minus_counter):\n return tf.add(counter, 1), tf.add(x_minus_counter, -1)\n initial_values = [tf.constant(0), x]\n return tf.while_loop(stop_condition, iteration, initial_values)[1]\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"id": "4440145",
"language": "Python",
"matching_score": 3.3384954929351807,
"max_stars_count": 0,
"path": "tensorflow_transform/impl_helper_test.py"
},
{
"content": "# Copyright 2017 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"The core public API of TFTransform. Provide functions to transform tensors.\n\nThe core tf.Transform API provides a way for the user to construct a function\nthat accepts and returns `Tensor`s. This function is built by composing regular\nfunctions built from TensorFlow ops, as well as special functions we refer to as\n`Analyzer`s. `Analyzer`s behave similarly to TensorFlow ops but require a full\npass over the whole dataset to compute their output value.\n\nThe user-defined preprocessing function should accept and return `Tensor`s that\nare batches from the dataset, whose batch size may vary. For example the\nfollowing preprocessing function centers the input 'x' while returning 'y'\nunchanged.\n\nimport tensorflow_transform as tft\n\ndef preprocessing_fn(inputs):\n x = inputs['x']\n y = inputs['y']\n\n # Apply the `mean` analyzer to obtain the mean x.\n x_mean = tft.mean(x)\n\n # Subtract the mean.\n x_centered = x - mean\n\n # Return a new dictionary containing x_centered, and y unchanged\n return {\n 'x_centered': x_centered,\n 'y': y\n }\n\nThis user-defined function then must be run using an implementation based on\nsome distributed computation framework. The canonical implementation uses\nApache Beam as the underlying framework. See beam/impl.py for how to use the\nBeam implementation.\n\"\"\"\n\nimport tensorflow as tf\nfrom tensorflow_transform import analyzers\n\nFUNCTION_APPLICATION_COLLECTION = 'tft_function_applications'\n\n\nclass FunctionApplication(object):\n \"\"\"Contains data to help tf.Transform keep track of function applications.\"\"\"\n\n def __init__(self, fn, args):\n def _decompose_tensors(tensor_list):\n result = []\n for tensor in tensor_list:\n if isinstance(tensor, tf.SparseTensor):\n result.append(tensor.indices)\n result.append(tensor.values)\n result.append(tensor.dense_shape)\n else:\n result.append(tensor)\n return result\n\n def _copy_tensor(tensor):\n if isinstance(tensor, tf.SparseTensor):\n return tf.SparseTensor(\n tf.identity(tensor.indices),\n tf.identity(tensor.values),\n tf.identity(tensor.dense_shape))\n else:\n return tf.identity(tensor)\n\n # Apply fn to its args, keeping track of any table initializers that are\n # added while fn is running, and also checking that no analyzers are added\n # while fn is running.\n all_table_initializers = tf.get_collection_ref(\n tf.GraphKeys.TABLE_INITIALIZERS)\n all_analyzers = tf.get_collection_ref(analyzers.ANALYZER_COLLECTION)\n original_num_table_initializers = len(all_table_initializers)\n original_num_analyzers = len(all_analyzers)\n output = fn(*args)\n if len(all_analyzers) != original_num_analyzers:\n raise ValueError(\n 'One or more `Analyzer`s were created while inside '\n 'FunctionApplication.__init__')\n\n # Set inputs and outputs of this op, flattening inputs and outputs into a\n # list of tensors, but storing outputs in the original format for the return\n # value of `apply_function`.\n self._table_initializers = all_table_initializers[\n original_num_table_initializers:]\n self._inputs = _decompose_tensors(args)\n # When traversing the graph, there isn't a clean way to handle `Map`s whose\n # inputs and outputs overlap. Therefore we apply tf.identity to all outputs\n # to ensure the outputs and inputs don't overlap.\n if isinstance(output, tuple):\n self._user_output = [_copy_tensor(tensor) for tensor in output]\n self._outputs = _decompose_tensors(self._user_output)\n else:\n self._user_output = _copy_tensor(output)\n self._outputs = _decompose_tensors([self._user_output])\n\n tf.add_to_collection(FUNCTION_APPLICATION_COLLECTION, self)\n\n @property\n def user_output(self):\n \"\"\"Outputs in the same format as the original return value of fn.\"\"\"\n return self._user_output\n\n @property\n def inputs(self):\n return self._inputs\n\n @property\n def outputs(self):\n return self._outputs\n\n @property\n def table_initializers(self):\n return self._table_initializers\n\n\ndef apply_function(fn, *args):\n \"\"\"Apply a function to its args in a way that tf.Transform can track.\n\n Functions that involve tables or control flow ops must be wrapped in\n apply_function. E.g.\n\n def preprocessing_fn(inputs):\n ...\n label = inputs['label']\n ...\n def _convert_label(x):\n table = lookup.index_table_from_tensor(['bad', 'good'])\n return table.lookup(x)\n\n label = api.apply_function(_convert_label, x) # Works\n label = _convert_label(x) # Doesn't work.\n\n The reason this function is needed is so that tf.Transform knows to treat the\n wrapped function as a single unit, and not try to trace the control flow in\n TensorFlow by analyzing the ops that make up the function. This function\n does not need to be used when calling helper functions in mappers.py as those\n functions already do the wrapping themselves.\n\n Args:\n fn: The function to apply\n *args: The arguments to apply `fn` to.\n\n Returns:\n The results of applying fn.\n \"\"\"\n return FunctionApplication(fn, args).user_output\n\n\n_TF_METADATA_TENSORS_COLLECTION = 'tft_metadata_tensors'\n_TF_METADATA_COLUMN_SCHEMAS_COLLECTION = 'tft_metadata_schemas'\n\n\ndef set_column_schema(tensor, column_schema):\n \"\"\"Sets the schema of a `Tensor` or `SparseTensor`.\"\"\"\n tf.add_to_collection(_TF_METADATA_TENSORS_COLLECTION, tensor)\n tf.add_to_collection(_TF_METADATA_COLUMN_SCHEMAS_COLLECTION, column_schema)\n\n\ndef get_column_schemas():\n \"\"\"Gets a dict from `Tensor` or `SparseTensor`s to `ColumnSchema`s.\"\"\"\n return dict(zip(\n tf.get_collection(_TF_METADATA_TENSORS_COLLECTION),\n tf.get_collection(_TF_METADATA_COLUMN_SCHEMAS_COLLECTION)))\n",
"id": "7014133",
"language": "Python",
"matching_score": 1.879988193511963,
"max_stars_count": 0,
"path": "tensorflow_transform/api.py"
},
{
"content": "# Copyright 2017 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Functions that involve a full pass over the dataset.\n\nThis module contains functions that are used in the preprocessing function, to\ndefine a full pass operation such as computing the sum, min, max or unique\nvalues of a tensor over the entire dataset. This is implemented by a reduction\noperation in the Beam implementation.\n\nFrom the user's point of view, an analyzer appears as a regular TensorFlow\nfunction, i.e. it accepts and returns tensors. However it is represented in\nthe graph as a `Analyzer` which is not a TensorFlow op, but a placeholder for\nthe computation that takes place outside of TensorFlow.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport re\n\nimport numpy as np\nimport tensorflow as tf\n\n\nANALYZER_COLLECTION = 'tft_analyzers'\nVOCAB_FILENAME_PREFIX = 'vocab_'\nVOCAB_FREQUENCY_FILENAME_PREFIX = 'vocab_frequency_'\n\n\nclass Analyzer(object):\n \"\"\"An operation-like class for full-pass analyses of data.\n\n An Analyzer is like a tf.Operation except that it requires computation over\n the full dataset. E.g. sum(my_tensor) will compute the sum of the value of\n my_tensor over all instances in the dataset. The Analyzer class contains the\n inputs to this computation, and placeholders which will later be converted to\n constants during a call to AnalyzeDataset.\n\n Args:\n inputs: The inputs to the analyzer.\n output_dtype_shape_and_is_asset: List of tuples of `(DType, Shape, bool)`\n for each output. A tf.placeholder with the given DType and Shape will be\n constructed to represent the output of the analyzer, and this placeholder\n will eventually be replaced by the actual value of the analyzer. The\n boolean value states whether this Tensor represents an asset filename or\n not.\n spec: A description of the computation to be done.\n name: Similar to a TF op name. Used to define a unique scope for this\n analyzer, which can be used for debugging info.\n\n Raises:\n ValueError: If the inputs are not all `Tensor`s.\n \"\"\"\n\n def __init__(self, inputs, output_dtype_shape_and_is_asset, spec, name):\n for tensor in inputs:\n if not isinstance(tensor, tf.Tensor):\n raise ValueError('Analyzers can only accept `Tensor`s as inputs')\n self._inputs = inputs\n self._outputs = []\n self._output_is_asset_map = {}\n with tf.name_scope(name) as scope:\n self._name = scope\n for dtype, shape, is_asset in output_dtype_shape_and_is_asset:\n output_tensor = tf.placeholder(dtype, shape)\n if is_asset and output_tensor.dtype != tf.string:\n raise ValueError(('Tensor {} cannot represent an asset, because it '\n 'is not a string.').format(output_tensor.name))\n self._outputs.append(output_tensor)\n self._output_is_asset_map[output_tensor] = is_asset\n self._spec = spec\n tf.add_to_collection(ANALYZER_COLLECTION, self)\n\n @property\n def inputs(self):\n return self._inputs\n\n @property\n def outputs(self):\n return self._outputs\n\n @property\n def spec(self):\n return self._spec\n\n @property\n def name(self):\n return self._name\n\n def output_is_asset(self, output_tensor):\n return self._output_is_asset_map[output_tensor]\n\n\nclass CombinerSpec(object):\n \"\"\"Analyze using combiner function.\n\n This object mirrors a beam.CombineFn, that will receive a beam PCollection\n representing the batched input tensors.\n \"\"\"\n\n def create_accumulator(self):\n \"\"\"Return a fresh, empty accumulator.\n\n Returns: An empty accumulator. This can be an Python value.\n \"\"\"\n raise NotImplementedError\n\n def add_input(self, accumulator, batch_values):\n \"\"\"Return result of folding a batch of inputs into accumulator.\n\n Args:\n accumulator: the current accumulator\n batch_values: A list of ndarrays representing the values of the inputs for\n a batch, which should be added to the accumulator.\n\n Returns: An accumulator that includes the batch of inputs.\n \"\"\"\n raise NotImplementedError\n\n def merge_accumulators(self, accumulators):\n \"\"\"Merges several accumulators to a single accumulator value.\n\n Args:\n accumulators: the accumulators to merge\n\n Returns: The sole merged accumulator.\n \"\"\"\n raise NotImplementedError\n\n def extract_output(self, accumulator):\n \"\"\"Return result of converting accumulator into the output value.\n\n Args:\n accumulator: the final accumulator value. Should be a list of ndarrays.\n\n Returns: A list of ndarrays representing the result of this combiner.\n \"\"\"\n raise NotImplementedError\n\n\ndef combine_analyzer(inputs, output_dtypes, output_shapes, combiner_spec, name):\n \"\"\"Applies the combiner over the whole dataset.\n\n Args:\n inputs: A list of input `Tensor`s or `SparseTensor`s.\n output_dtypes: The list of dtypes of the output of the analyzer.\n output_shapes: The list of dtypes of the output of the analyzer. Must have\n the same length as output_dtypes.\n combiner_spec: A subclass of CombinerSpec.\n name: Similar to a TF op name. Used to define a unique scope for this\n analyzer, which can be used for debugging info.\n\n Returns:\n A list of `Tensor`s representing the combined values. These will have\n `dtype` and `shape` given by `output_dtypes` and `output_shapes`. These\n dtypes and shapes must be compatible with the combiner_spec.\n\n Raises:\n ValueError: If output_dtypes and output_shapes have different lengths.\n \"\"\"\n if len(output_dtypes) != len(output_shapes):\n raise ValueError('output_dtypes (%r) and output_shapes (%r) had different'\n ' lengths' % output_dtypes, output_shapes)\n return Analyzer(\n inputs,\n [(output_dtype, output_shape, False)\n for output_dtype, output_shape in zip(output_dtypes, output_shapes)],\n combiner_spec,\n name).outputs\n\n\nclass _NumPyCombinerSpec(CombinerSpec):\n \"\"\"Combines the PCollection only on the 0th dimension using nparray.\"\"\"\n\n def __init__(self, fn, reduce_instance_dims):\n self._fn = fn\n self._reduce_instance_dims = reduce_instance_dims\n\n def create_accumulator(self):\n return None\n\n def add_input(self, accumulator, batch_values):\n if self._reduce_instance_dims:\n reduced_values = [self._fn(batch_value) for batch_value in batch_values]\n else:\n reduced_values = [self._fn(batch_value, axis=0)\n for batch_value in batch_values]\n if accumulator is None:\n return reduced_values\n else:\n return [\n self._fn((sub_accumulator, reduced_value), axis=0)\n for sub_accumulator, reduced_value\n in zip(accumulator, reduced_values)]\n\n def merge_accumulators(self, accumulators):\n # numpy's sum, min, max, etc functions operate on array-like objects, but\n # not arbitrary iterables. Convert the provided accumulators into a list\n return [\n self._fn(list(sub_accumulators), axis=0)\n for sub_accumulators in zip(*accumulators)]\n\n def extract_output(self, accumulator):\n return accumulator\n\n\ndef _numeric_combine(inputs, fn, reduce_instance_dims=True, name=None):\n \"\"\"Apply a reduction, defined by a numpy function to multiple inputs.\n\n Args:\n inputs: A list of tensors, which will be indpendently reduced.\n fn: A function to reduce tensors across instances/batches, to get a single\n output.\n reduce_instance_dims: By default collapses the batch and instance dimensions\n to arrive at a single scalar output. If False, only collapses the batch\n dimension and outputs a vector of the same shape as the input.\n name: (Optional) A name for this operation.\n\n Returns:\n A list of tensors with the same length as `inputs`, representing the\n input tensors that have been reduced by `fn` across instances and\n batches.\n \"\"\"\n for x in inputs:\n if not isinstance(x, tf.Tensor):\n raise TypeError('Expected a Tensor, but got %r' % x)\n\n if reduce_instance_dims:\n # If reducing over all dimensions, result is scalar.\n shapes = [() for _ in inputs]\n else:\n # If reducing over batch dimensions, with known shape, the result will be\n # the same shape as the input, but without the batch. If reducing over\n # batch dimensions, with unknown shape, the result will also have unknown\n # shape.\n shapes = [x.shape.as_list()[1:] if x.shape.dims is not None else None\n for x in inputs]\n return combine_analyzer(\n inputs,\n [x.dtype for x in inputs],\n shapes,\n _NumPyCombinerSpec(fn, reduce_instance_dims),\n name if name is not None else fn.__name__)\n\n\ndef min(x, reduce_instance_dims=True, name=None): # pylint: disable=redefined-builtin\n \"\"\"Computes the minimum of the values of a `Tensor` over the whole dataset.\n\n Args:\n x: A `Tensor`.\n reduce_instance_dims: By default collapses the batch and instance dimensions\n to arrive at a single scalar output. If False, only collapses the batch\n dimension and outputs a `Tensor` of the same shape as the input.\n name: (Optional) A name for this operation.\n\n Returns:\n A `Tensor`. Has the same type as `x`.\n \"\"\"\n return _numeric_combine([x], np.min, reduce_instance_dims, name)[0]\n\n\ndef max(x, reduce_instance_dims=True, name=None): # pylint: disable=redefined-builtin\n \"\"\"Computes the maximum of the values of a `Tensor` over the whole dataset.\n\n Args:\n x: A `Tensor`.\n reduce_instance_dims: By default collapses the batch and instance dimensions\n to arrive at a single scalar output. If False, only collapses the batch\n dimension and outputs a vector of the same shape as the input.\n name: (Optional) A name for this operation.\n\n Returns:\n A `Tensor`. Has the same type as `x`.\n \"\"\"\n return _numeric_combine([x], np.max, reduce_instance_dims, name)[0]\n\n\ndef _min_and_max(x, reduce_instance_dims=True, name=None): # pylint: disable=redefined-builtin\n with tf.name_scope(name, 'min_and_max'):\n # Unary minus op doesn't support tf.int64, so use 0 - x instead of -x.\n minus_x_min, x_max = _numeric_combine( # pylint: disable=unbalanced-tuple-unpacking\n [0 - x, x], np.max, reduce_instance_dims)\n return 0 - minus_x_min, x_max\n\n\ndef sum(x, reduce_instance_dims=True, name=None): # pylint: disable=redefined-builtin\n \"\"\"Computes the sum of the values of a `Tensor` over the whole dataset.\n\n Args:\n x: A `Tensor`.\n reduce_instance_dims: By default collapses the batch and instance dimensions\n to arrive at a single scalar output. If False, only collapses the batch\n dimension and outputs a vector of the same shape as the input.\n name: (Optional) A name for this operation.\n\n Returns:\n A `Tensor`. Has the same type as `x`.\n \"\"\"\n return _numeric_combine([x], np.sum, reduce_instance_dims, name)[0]\n\n\ndef size(x, reduce_instance_dims=True, name=None):\n \"\"\"Computes the total size of instances in a `Tensor` over the whole dataset.\n\n Args:\n x: A `Tensor`.\n reduce_instance_dims: By default collapses the batch and instance dimensions\n to arrive at a single scalar output. If False, only collapses the batch\n dimension and outputs a vector of the same shape as the input.\n name: (Optional) A name for this operation.\n\n Returns:\n A `Tensor`. Has the same type as `x`.\n \"\"\"\n with tf.name_scope(name, 'size'):\n # Note: Calling `sum` defined in this module, not the builtin.\n return sum(tf.ones_like(x), reduce_instance_dims)\n\n\ndef mean(x, reduce_instance_dims=True, name=None):\n \"\"\"Computes the mean of the values of a `Tensor` over the whole dataset.\n\n Args:\n x: A `Tensor`.\n reduce_instance_dims: By default collapses the batch and instance dimensions\n to arrive at a single scalar output. If False, only collapses the batch\n dimension and outputs a vector of the same shape as the input.\n name: (Optional) A name for this operation.\n\n Returns:\n A `Tensor` containing the mean. If `x` is floating point, the mean will\n have the same type as `x`. If `x` is integral, the output is cast to float32\n for int8 and int16 and float64 for int32 and int64 (similar to the behavior\n of tf.truediv).\n \"\"\"\n with tf.name_scope(name, 'mean'):\n # For now _numeric_combine will return a tuple with as many elements as the\n # input tuple.\n x_count, x_sum = _numeric_combine( # pylint: disable=unbalanced-tuple-unpacking\n [tf.ones_like(x), x], np.sum, reduce_instance_dims)\n return tf.divide(x_sum, x_count)\n\n\ndef var(x, reduce_instance_dims=True, name=None):\n \"\"\"Computes the variance of the values of a `Tensor` over the whole dataset.\n\n Uses the biased variance (0 delta degrees of freedom), as given by\n (x - mean(x))**2 / length(x).\n\n Args:\n x: A `Tensor`.\n reduce_instance_dims: By default collapses the batch and instance dimensions\n to arrive at a single scalar output. If False, only collapses the batch\n dimension and outputs a vector of the same shape as the input.\n name: (Optional) A name for this operation.\n\n Returns:\n A `Tensor` containing the variance. If `x` is floating point, the variance\n will have the same type as `x`. If `x` is integral, the output is cast to\n float32 for int8 and int16 and float64 for int32 and int64 (similar to the\n behavior of tf.truediv).\n \"\"\"\n with tf.name_scope(name, 'var'):\n # Note: Calling `mean`, `sum`, and `size` as defined in this module, not the\n # builtins.\n x_mean = mean(x, reduce_instance_dims)\n # x_mean will be float32 or float64, depending on type of x.\n squared_deviations = tf.square(tf.cast(x, x_mean.dtype) - x_mean)\n return mean(squared_deviations, reduce_instance_dims)\n\n\ndef _mean_and_var(x, reduce_instance_dims=True, name=None):\n \"\"\"More efficient combined `mean` and `var`. See `var`.\"\"\"\n with tf.name_scope(name, 'mean_and_var'):\n # Note: Calling `mean`, `sum`, and `size` as defined in this module, not the\n # builtins.\n x_mean = mean(x, reduce_instance_dims)\n # x_mean will be float32 or float64, depending on type of x.\n squared_deviations = tf.square(tf.cast(x, x_mean.dtype) - x_mean)\n x_var = mean(squared_deviations, reduce_instance_dims)\n return x_mean, x_var\n\n\nclass _UniquesSpec(object):\n \"\"\"Operation to compute unique values.\"\"\"\n\n def __init__(self, top_k, frequency_threshold,\n vocab_filename, store_frequency):\n self._top_k = top_k\n self._frequency_threshold = frequency_threshold\n self._vocab_filename = vocab_filename\n self._store_frequency = store_frequency\n\n @property\n def top_k(self):\n return self._top_k\n\n @property\n def frequency_threshold(self):\n return self._frequency_threshold\n\n @property\n def vocab_filename(self):\n return self._vocab_filename\n\n @property\n def store_frequency(self):\n return self._store_frequency\n\n\ndef sanitized_vocab_filename(filename=None, prefix=None):\n \"\"\"Generates a sanitized filename either from the given filename or the scope.\n\n If filename is specified, provide a sanitized version of the given filename.\n Otherwise generate a filename from the current scope. Note that it is the\n callers responsibility to ensure that filenames are unique across calls within\n a given preprocessing function.\n\n Args:\n filename: A filename with non-alpha characters replaced with underscores and\n spaces to hyphens.\n prefix: Prefix to use for the name of the vocab file, if filename\n is not given.\n\n Returns:\n A valid filename.\n\n Raises:\n ValueError: If neither filename and prefix are specified, or if both\n are specified.\n \"\"\"\n if filename is None and prefix is None:\n raise ValueError('Both filename and prefix cannot be None.')\n\n if filename is not None and prefix is not None:\n raise ValueError('Only one of filename or prefix can be specified.')\n\n if filename is None:\n filename = prefix + tf.get_default_graph().get_name_scope()\n # Replace non-alpha characters (excluding whitespaces) with '_'.\n filename = re.sub(r'[^\\w\\s-]', '_', filename).strip()\n # Replace whitespaces with '-'.\n return re.sub(r'[-\\s]+', '-', filename)\n\n\ndef uniques(x, top_k=None, frequency_threshold=None,\n vocab_filename=None, store_frequency=False, name=None):\n r\"\"\"Computes the unique values of a `Tensor` over the whole dataset.\n\n Computes The unique values taken by `x`, which can be a `Tensor` or\n `SparseTensor` of any size. The unique values will be aggregated over all\n dimensions of `x` and all instances.\n\n In case one of the tokens contains the '\\n' or '\\r' characters or is empty it\n will be discarded since we are currently writing the vocabularies as text\n files. This behavior will likely be fixed/improved in the future.\n\n The unique values are sorted by decreasing frequency and then decreasing\n lexicographical order.\n\n For large datasets it is highly recommended to either set frequency_threshold\n or top_k to control the size of the output, and also the run time of this\n operation.\n\n Args:\n x: An input `Tensor` or `SparseTensor` with dtype tf.string.\n top_k: Limit the generated vocabulary to the first `top_k` elements. If set\n to None, the full vocabulary is generated.\n frequency_threshold: Limit the generated vocabulary only to elements whose\n absolute frequency is >= to the supplied threshold. If set to None, the\n full vocabulary is generated. Absolute frequency means the number of\n occurences of the element in the dataset, as opposed to the proportion of\n instances that contain that element.\n vocab_filename: The file name for the vocabulary file. If none, the\n \"uniques\" scope name in the context of this graph will be used as the file\n name. If not None, should be unique within a given preprocessing function.\n NOTE To make your pipelines resilient to implementation details please\n set `vocab_filename` when you are using the vocab_filename on a downstream\n component.\n store_frequency: If True, frequency of the words is stored in the\n vocabulary file. Each line in the file will be of the form\n 'frequency word\\n'.\n name: (Optional) A name for this operation.\n\n Returns:\n The path name for the vocabulary file containing the unique values of `x`.\n\n Raises:\n ValueError: If `top_k` or `frequency_threshold` is negative.\n \"\"\"\n if top_k is not None:\n top_k = int(top_k)\n if top_k < 0:\n raise ValueError('top_k must be non-negative, but got: %r' % top_k)\n\n if frequency_threshold is not None:\n frequency_threshold = int(frequency_threshold)\n if frequency_threshold < 0:\n raise ValueError(\n 'frequency_threshold must be non-negative, but got: %r' %\n frequency_threshold)\n elif frequency_threshold <= 1:\n tf.logging.warn(\n 'frequency_threshold %d <= 1 is a no-op, use None instead.',\n frequency_threshold)\n\n if isinstance(x, tf.SparseTensor):\n x = x.values\n\n if x.dtype != tf.string:\n raise ValueError('expected tf.string but got %r' % x.dtype)\n\n with tf.name_scope(name, 'uniques'):\n if vocab_filename is not None:\n prefix = None\n elif store_frequency:\n prefix = VOCAB_FREQUENCY_FILENAME_PREFIX\n else:\n prefix = VOCAB_FILENAME_PREFIX\n\n # Make the file name path safe.\n vocab_filename = sanitized_vocab_filename(vocab_filename, prefix=prefix)\n\n spec = _UniquesSpec(top_k, frequency_threshold, vocab_filename,\n store_frequency)\n return Analyzer([x], [(tf.string, [], True)], spec, 'uniques').outputs[0]\n\n\nclass _QuantilesSpec(object):\n \"\"\"Operation to compute quantile boundaries.\"\"\"\n\n def __init__(self, epsilon, num_buckets):\n self._epsilon = epsilon\n self._num_buckets = num_buckets\n\n @property\n def epsilon(self):\n return self._epsilon\n\n @property\n def num_buckets(self):\n return self._num_buckets\n\n @property\n def bucket_dtype(self):\n return tf.float32\n\n\ndef quantiles(x, num_buckets, epsilon, name=None):\n \"\"\"Computes the quantile boundaries of a `Tensor` over the whole dataset.\n\n quantile boundaries are computed using approximate quantiles,\n and error tolerance is specified using `epsilon`. The boundaries divide the\n input tensor into approximately equal `num_buckets` parts.\n See go/squawd for details, and how to control the error due to approximation.\n\n Args:\n x: An input `Tensor` or `SparseTensor`.\n num_buckets: Values in the `x` are divided into approximately\n equal-sized buckets, where the number of buckets is num_buckets.\n This is a hint. The actual number of buckets computed can be\n less or more than the requested number. Use the generated metadata to\n find the computed number of buckets.\n epsilon: Error tolerance, typically a small fraction close to zero\n (e.g. 0.01). Higher values of epsilon increase the quantile approximation,\n and hence result in more unequal buckets, but could improve performance,\n and resource consumption. Some measured results on memory consumption:\n For epsilon = 0.001, the amount of memory for each buffer to hold the\n summary for 1 trillion input values is ~25000 bytes. If epsilon is\n relaxed to 0.01, the buffer size drops to ~2000 bytes for the same input\n size. If we use a strict epsilon value of 0, the buffer size is same size\n as the input, because the intermediate stages have to remember every input\n and the quantile boundaries can be found only after an equivalent to a\n full sorting of input. The buffer size also determines the amount of work\n in the different stages of the beam pipeline, in general, larger epsilon\n results in fewer and smaller stages, and less time. For more performance\n trade-offs see also http://web.cs.ucla.edu/~weiwang/paper/SSDBM07_2.pdf\n name: (Optional) A name for this operation.\n\n Returns:\n The bucket boundaries represented as a list, with num_bucket-1 elements\n See bucket_dtype() above for type of bucket boundaries.\n \"\"\"\n\n with tf.name_scope(name, 'quantiles'):\n spec = _QuantilesSpec(epsilon, num_buckets)\n quantile_boundaries = Analyzer(\n [x], [(spec.bucket_dtype, [1, None], False)], spec,\n 'quantiles').outputs[0]\n\n # The Analyzer returns a 2d matrix of 1*num_buckets. Below, we remove\n # the first dimension and return the boundaries as a simple 1d list.\n return quantile_boundaries[0:1]\n\n\nclass _CovarianceCombinerSpec(CombinerSpec):\n \"\"\"Combines the PCollection to compute the biased covariance matrix.\"\"\"\n\n def __init__(self, dtype=tf.float64):\n \"\"\"Store the dtype for np arrays/matrices for precision.\"\"\"\n self._output_dtype = dtype\n self._np_dtype = dtype.as_numpy_dtype\n\n def create_accumulator(self):\n \"\"\"Create an accumulator with all zero entries.\"\"\"\n return None\n\n def add_input(self, accumulator, batch_values):\n \"\"\"Compute sum of input cross-terms, sum of inputs, and count.\n\n The cross terms for a numeric 1d array x are given by the set:\n {z_ij = x_i * x_j for all indices i and j}. This is stored as a 2d array.\n Since next_input is an array of 1d numeric arrays (i.e. a 2d array),\n matmul(transpose(next_input), next_input) will automatically sum up\n the cross terms of each 1d array in next_input.\n\n Args:\n accumulator: running sum of cross terms, input vectors, and count\n batch_values: entries from the pipeline, which must be single element list\n containing a 2d array\n representing multiple 1d arrays\n\n Returns:\n An accumulator with next_input considered in its running list of\n sum_product, sum_vectors, and count of input rows.\n \"\"\"\n # Expect a single input representing the batch for the input tensor.\n batch_value, = batch_values\n\n assert len(np.shape(batch_value)) == 2\n\n batch_cross_terms = np.matmul(\n np.transpose(batch_value),\n batch_value\n ).astype(self._np_dtype)\n\n batch_sum = np.array(np.sum(batch_value, axis=0), self._np_dtype)\n batch_count = np.shape(batch_value)[0]\n\n if accumulator is None:\n return [batch_cross_terms, batch_sum, batch_count]\n else:\n sum_product, sum_vectors, count = accumulator\n return [sum_product + batch_cross_terms,\n sum_vectors + batch_sum,\n count + batch_count]\n\n def merge_accumulators(self, accumulators):\n \"\"\"Sums values in each accumulator entry.\"\"\"\n # Because each accumulator contains multiple arrays of different dimensions,\n # the np.sum operation must be explicitly used across the entries within\n # each accumulator. np.sum(list(accumulators)) does not work.\n\n sum_product = np.sum(\n [accumulator[0] for accumulator in accumulators], axis=0)\n sum_vectors = np.sum(\n [accumulator[1] for accumulator in accumulators], axis=0)\n count = np.sum([accumulator[2] for accumulator in accumulators], axis=0)\n return [sum_product, sum_vectors, count]\n\n def extract_output(self, accumulator):\n \"\"\"Run covariance logic on sum_product, sum of input vectors, and count.\n\n The formula used to compute the covariance is cov(x) = E(xx^T) - uu^T,\n where x is the original input to the combiner, and u = mean(x).\n E(xx^T) is computed by dividing sum of cross terms (index 0) by count\n (index 2). u is computed by taking the sum of rows (index 1) and dividing by\n the count (index 2).\n\n Args:\n accumulator: final accumulator as a list of the sum of cross-terms matrix,\n sum of input vectors, and count.\n\n Returns:\n A list containing a single 2d ndarray, the covariance matrix.\n \"\"\"\n\n sum_product, sum_vectors, count = accumulator\n expected_cross_terms = sum_product / count\n expected_terms = sum_vectors / count\n\n return [expected_cross_terms - np.outer(expected_terms, expected_terms)]\n\n\ndef covariance(x, dtype, name=None):\n \"\"\"Computes the covariance matrix over the whole dataset.\n\n The covariance matrix M is defined as follows:\n Let x[:j] be a tensor of the jth element of all input vectors in x, and let\n u_j = mean(x[:j]). The entry M[i,j] = E[(x[:i] - u_i)(x[:j] - u_j)].\n Notice that the diagonal entries correspond to variances of individual\n elements in the vector, i.e. M[i,i] corresponds to the variance of x[:i].\n\n Args:\n x: A rank-2 `Tensor`, 0th dim are rows, 1st dim are indices in each input\n vector.\n dtype: numpy dtype of entries in the returned matrix.\n name: (Optional) A name for this operation.\n\n Raises:\n ValueError: if input is not a rank-2 Tensor.\n\n Returns:\n A rank-2 (matrix) covariance `Tensor`\n \"\"\"\n\n if not isinstance(x, tf.Tensor):\n raise TypeError('Expected a Tensor, but got %r' % x)\n\n x.shape.assert_has_rank(2)\n\n input_dim = x.shape.as_list()[1]\n shape = (input_dim, input_dim)\n\n spec = _CovarianceCombinerSpec(dtype)\n return combine_analyzer(\n [x], [dtype], [shape], spec,\n name if name is not None else 'covariance')[0]\n\n\nclass _PCACombinerSpec(_CovarianceCombinerSpec):\n\n def __init__(self, output_dim=None, dtype=tf.float64):\n \"\"\"Store pca output dimension, and dtype for precision.\"\"\"\n super(_PCACombinerSpec, self).__init__(dtype=dtype)\n self._output_dim = output_dim\n\n def extract_output(self, accumulator):\n \"\"\"Compute PCA the accumulated data using the biased covariance matrix.\n\n Following the covariance computation in _CovarianceCombinerSpec,\n this method runs eigenvalue decomposition on the covariance matrix,\n sorts eigenvalues in decreasing order, and returns the first output_dim\n corresponding eigenvectors (principal components) as a matrix.\n\n Args:\n accumulator: final accumulator as a list of the sum of cross-terms matrix,\n sum of input vectors, and count.\n\n Returns:\n A list containing a matrix of shape (input_dim, output_dim).\n \"\"\"\n sum_product, sum_vectors, count = accumulator\n expected_cross_terms = sum_product / count\n expected_terms = sum_vectors / count\n cov = expected_cross_terms - np.outer(expected_terms, expected_terms)\n vals, vecs = np.linalg.eigh(cov)\n sorted_vecs = vecs[:, np.argsort(vals)[::-1]]\n if self._output_dim is None:\n return [sorted_vecs]\n else:\n return [sorted_vecs[:, :self._output_dim]]\n\n\ndef pca(x, output_dim, dtype, name=None):\n \"\"\"Computes pca on the dataset using biased covariance.\n\n The pca analyzer computes output_dim orthonormal vectors that capture\n directions/axes corresponding to the highest variances in the input vectors of\n x. The output vectors are returned as a rank-2 tensor with shape\n (input_dim, output_dim), where the 0th dimension are the components of each\n output vector, and the 1st dimension are the output vectors representing\n orthogonal directions in the input space, sorted in order of decreasing\n variances.\n\n The output rank-2 tensor (matrix) serves a useful transform purpose. Formally,\n the matrix can be used downstream in the transform step by multiplying it to\n the input tensor x. This transform reduces the dimension of input vectors to\n output_dim in a way that retains the maximal variance.\n\n NOTE: To properly use PCA, input vector components should be converted to\n similar units of measurement such that the vectors represent a Euclidean\n space. If no such conversion is available (e.g. one element represents time,\n another element distance), the canonical approach is to first apply a\n transformation to the input data to normalize numerical variances, i.e.\n tft.scale_to_z_score(). Normalization allows PCA to choose output axes that\n help decorrelate input axes.\n\n Below are a couple intuitive examples of PCA.\n\n Consider a simple 2-dimensional example:\n\n Input x is a series of vectors [e, e] where e is Gaussian with mean 0,\n variance 1. The two components are perfectly correlated, and the resulting\n covariance matrix is\n [[1 1],\n [1 1]].\n Applying PCA with output_dim = 1 would discover the first principal component\n [1 / sqrt(2), 1 / sqrt(2)]. When multipled to the original example, each\n vector [e, e] would be mapped to a scalar sqrt(2) * e. The second principal\n component would be [-1 / sqrt(2), 1 / sqrt(2)] and would map [e, e] to 0,\n which indicates that the second component captures no variance at all. This\n agrees with our intuition since we know that the two axes in the input are\n perfectly correlated and can be fully explained by a single scalar e.\n\n Consider a 3-dimensional example:\n\n Input x is a series of vectors [a, a, b], where a is a zero-mean, unit\n variance Gaussian. b is a zero-mean, variance 4 Gaussian and is independent of\n a. The first principal component of the unnormalized vector would be [0, 0, 1]\n since b has a much larger variance than any linear combination of the first\n two components. This would map [a, a, b] onto b, asserting that the axis with\n highest energy is the third component. While this may be the desired\n output if a and b correspond to the same units, it is not statistically\n desireable when the units are irreconciliable. In such a case, one should\n first normalize each component to unit variance first, i.e. b := b / 2.\n The first principal component of a normalized vector would yield\n [1 / sqrt(2), 1 / sqrt(2), 0], and would map [a, a, b] to sqrt(2) * a. The\n second component would be [0, 0, 1] and map [a, a, b] to b. As can be seen,\n the benefit of normalization is that PCA would capture highly correlated\n components first and collapse them into a lower dimension.\n\n Args:\n x: A rank-2 `Tensor`, 0th dim are rows, 1st dim are indices in row vectors.\n output_dim: The PCA output dimension (number of eigenvectors to return).\n dtype: numpy dtype of entries in the returned matrix.\n name: (Optional) A name for this operation.\n\n Raises:\n ValueError: if input is not a rank-2 Tensor.\n\n Returns:\n A 2D `Tensor` (matrix) M of shape (input_dim, output_dim).\n \"\"\"\n\n if not isinstance(x, tf.Tensor):\n raise TypeError('Expected a Tensor, but got %r' % x)\n\n x.shape.assert_has_rank(2)\n\n input_dim = x.shape.as_list()[1]\n shape = (input_dim, output_dim)\n\n spec = _PCACombinerSpec(output_dim, dtype)\n return combine_analyzer(\n [x], [dtype], [shape], spec,\n name if name is not None else 'pca')[0]\n",
"id": "3074471",
"language": "Python",
"matching_score": 5.718520641326904,
"max_stars_count": 0,
"path": "tensorflow_transform/analyzers.py"
},
{
"content": "# Copyright 2017 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Beam implementations of tf.Transform canonical analyzers.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport os\n\n\nimport apache_beam as beam\n\nfrom apache_beam.typehints import Any\nfrom apache_beam.typehints import KV\nfrom apache_beam.typehints import List\nfrom apache_beam.typehints import with_input_types\nfrom apache_beam.typehints import with_output_types\n\nimport numpy as np\nimport six\nimport tensorflow as tf\nfrom tensorflow_transform import analyzers\nfrom tensorflow_transform.beam import common\nfrom tensorflow.contrib.boosted_trees.python.ops import quantile_ops\nfrom tensorflow.python.ops import resources\n\n_DEFAULT_TENSORFLOW_CONFIG_BY_RUNNER = {\n # We rely on Beam to manage concurrency, i.e. we expect it to run one\n # session per CPU--so we don't want to proliferate TF threads.\n # Nonetheless we provide 4 threads per session for TF ops, 2 inter-\n # and 2 intra-thread. In many cases only 2 of these will be runnable\n # at any given time. This approach oversubscribes a bit to make sure\n # the CPUs are really saturated.\n #\n beam.runners.DataflowRunner:\n tf.ConfigProto(\n use_per_session_threads=True,\n inter_op_parallelism_threads=2,\n intra_op_parallelism_threads=2).SerializeToString(),\n\n}\n\n\ndef _maybe_deserialize_tf_config(serialized_tf_config):\n if serialized_tf_config is None:\n return None\n\n result = tf.ConfigProto()\n result.ParseFromString(serialized_tf_config)\n return result\n\n\n@with_input_types(List[np.ndarray])\n@with_output_types(List[Any])\nclass _AnalyzerImpl(beam.PTransform):\n \"\"\"PTransform that implements a given analyzer.\n\n _AnalyzerImpl accepts a PCollection where each element is a list of ndarrays.\n Each element in this list contains a batch of values for the corresponding\n input tensor of the analyzer. _AnalyzerImpl returns a PCollection containing a\n single element which is a list of values. Each element should be convertible\n to an ndarray via np.asarray, and the converted value will be the\n corresponding output tensor of the analyzer.\n\n _AnalyzerImpl dispatches to an implementation transform, with the same\n signature as _AnalyzerImpl.\n \"\"\"\n\n def __init__(self, spec, temp_assets_dir):\n self._spec = spec\n self._temp_assets_dir = temp_assets_dir\n\n def expand(self, pcoll):\n # pylint: disable=protected-access\n if isinstance(self._spec, analyzers._UniquesSpec):\n return pcoll | _UniquesAnalyzerImpl(self._spec, self._temp_assets_dir)\n elif isinstance(self._spec, analyzers._QuantilesSpec):\n return pcoll | _QuantilesAnalyzerImpl(self._spec)\n elif isinstance(self._spec, analyzers.CombinerSpec):\n return pcoll | beam.CombineGlobally(\n _CombineFnWrapper(self._spec)).without_defaults()\n else:\n raise NotImplementedError(self._spec.__class__)\n\n\ndef _flatten_value_to_list(batch_values):\n \"\"\"Converts an N-D dense or sparse batch to a 1-D list.\"\"\"\n # Ravel for flattening and tolist so that we go to native Python types\n # for more efficient followup processing.\n #\n batch_value, = batch_values\n return batch_value.ravel().tolist()\n\n\n@with_input_types(List[np.ndarray])\n@with_output_types(List[Any])\nclass _UniquesAnalyzerImpl(beam.PTransform):\n \"\"\"Saves the unique elements in a PCollection of batches.\"\"\"\n\n def __init__(self, spec, temp_assets_dir):\n assert isinstance(spec, analyzers._UniquesSpec) # pylint: disable=protected-access\n self._spec = spec\n self._temp_assets_dir = temp_assets_dir\n\n def expand(self, pcoll):\n top_k = self._spec.top_k\n frequency_threshold = self._spec.frequency_threshold\n assert top_k is None or top_k >= 0\n assert frequency_threshold is None or frequency_threshold >= 0\n\n # Creates a PCollection of (count, element) pairs, then iterates over\n # this to create a single element PCollection containing this list of\n # pairs in sorted order by decreasing counts (and by values for equal\n # counts).\n counts = (\n pcoll\n | 'FlattenValueToList' >> beam.Map(_flatten_value_to_list)\n | 'CountWithinList' >>\n # Specification of with_output_types allows for combiner optimizations.\n (beam.FlatMap(lambda lst: six.iteritems(collections.Counter(lst))).\n with_output_types(KV[common.PRIMITIVE_TYPE, int]))\n | 'CountGlobally' >> beam.CombinePerKey(sum))\n\n counts = (\n counts\n | 'FilterProblematicStrings' >> beam.Filter(\n lambda kv: kv[0] and '\\n' not in kv[0] and '\\r' not in kv[0])\n | 'SwapElementsAndCounts' >> beam.KvSwap())\n\n # Filter is cheaper than TopK computation and the two commute, so\n # filter first.\n if frequency_threshold is not None:\n counts |= ('FilterByFrequencyThreshold(%s)' % frequency_threshold >>\n beam.Filter(lambda kv: kv[0] >= frequency_threshold))\n\n if top_k is not None:\n counts = (counts\n | 'Top(%s)' % top_k\n >> beam.transforms.combiners.Top.Largest(top_k)\n | 'FlattenList' >> beam.FlatMap(lambda lst: lst))\n\n # Performance optimization to obviate reading from finely sharded files\n # via AsIter. By breaking fusion, we allow sharded files' sizes to be\n # automatically computed (when possible), so we end up reading from fewer\n # and larger files.\n counts |= 'Reshard' >> beam.transforms.Reshuffle() # pylint: disable=no-value-for-parameter\n\n # Using AsIter instead of AsList below in order to reduce max memory\n # usage (due to AsList caching).\n def order_by_decreasing_counts(ignored, counts_iter, store_frequency):\n \"\"\"Sort the vocabulary by frequency count.\"\"\"\n del ignored\n counts = list(counts_iter)\n if not counts:\n counts = [(1, '49d0cd50-04bb-48c0-bc6f-5b575dce351a')]\n counts.sort(reverse=True) # Largest first.\n\n # Log vocabulary size to metrics. Note we can call\n # beam.metrics.Metrics.distribution here because this function only gets\n # called once, so there is no need to amortize the cost of calling the\n # constructor by putting in a DoFn initializer.\n vocab_size_distribution = beam.metrics.Metrics.distribution(\n common.METRICS_NAMESPACE, 'vocabulary_size')\n vocab_size_distribution.update(len(counts))\n\n if store_frequency:\n # Returns ['count1 element1', ... ]\n return ['{} {}'.format(count, element) for count, element in counts]\n else:\n return [element for _, element in counts]\n\n vocabulary_file = os.path.join(self._temp_assets_dir,\n self._spec.vocab_filename)\n vocab_is_written = (\n pcoll.pipeline\n | 'Prepare' >> beam.Create([None])\n | 'OrderByDecreasingCounts' >> beam.FlatMap(\n order_by_decreasing_counts,\n counts_iter=beam.pvalue.AsIter(counts),\n store_frequency=self._spec.store_frequency)\n | 'WriteToFile' >> beam.io.WriteToText(vocabulary_file,\n shard_name_template=''))\n # Return the vocabulary path.\n wait_for_vocabulary_transform = (\n pcoll.pipeline\n | 'CreatePath' >> beam.Create([[vocabulary_file]])\n # Ensure that the analysis returns only after the file is written.\n | 'WaitForVocabularyFile' >> beam.Map(\n lambda x, y: x, y=beam.pvalue.AsIter(vocab_is_written)))\n return wait_for_vocabulary_transform\n\n\n@with_input_types(List[np.ndarray])\n@with_output_types(List[Any])\nclass _ComputeQuantiles(beam.CombineFn):\n \"\"\"Computes quantiles on the PCollection.\n\n This implementation is based on go/squawd.\n For additional details on the algorithm, such as streaming and summary,\n see also http://web.cs.ucla.edu/~weiwang/paper/SSDBM07_2.pdf\n \"\"\"\n\n def __init__(self, num_quantiles, epsilon, serialized_tf_config=None):\n self._num_quantiles = num_quantiles\n self._epsilon = epsilon\n self._serialized_tf_config = serialized_tf_config\n\n # _stamp_token is used to commit the state of the qaccumulator. In\n # this case, the qaccumulator state is completely returned and stored\n # as part of quantile_state/summary in the combiner fn (i.e the summary is\n # extracted and stored outside the qaccumulator). So we don't use\n # the timestamp mechanism to signify progress in the qaccumulator state.\n self._stamp_token = 0\n # Represents an empty summary. This could be changed to a tf.constant\n # implemented by the quantile ops library.\n self._empty_summary = None\n\n # Create a new session with a new graph for quantile ops.\n self._session = tf.Session(\n graph=tf.Graph(),\n config=_maybe_deserialize_tf_config(serialized_tf_config))\n with self._session.graph.as_default():\n with self._session.as_default():\n self._qaccumulator = quantile_ops.QuantileAccumulator(\n init_stamp_token=self._stamp_token,\n num_quantiles=self._num_quantiles,\n epsilon=self._epsilon,\n name='qaccumulator')\n resources.initialize_resources(resources.shared_resources()).run()\n\n def __reduce__(self):\n return _ComputeQuantiles, (self._num_quantiles,\n self._epsilon, self._serialized_tf_config)\n\n def create_accumulator(self):\n return self._empty_summary\n\n def add_input(self, summary, next_input):\n batch_value_as_list = _flatten_value_to_list(next_input)\n with self._session.graph.as_default():\n update = self._qaccumulator.add_summary(\n stamp_token=self._stamp_token,\n column=[batch_value_as_list],\n # All weights are equal, and the weight vector is the\n # same length as the input.\n example_weights=([[1] * len(batch_value_as_list)]))\n\n if summary is not self._empty_summary:\n self._session.run(\n self._qaccumulator.add_prebuilt_summary(\n stamp_token=self._stamp_token,\n summary=tf.constant(summary)))\n\n self._session.run(update)\n\n # After the flush_summary, qaccumulator will not contain any\n # uncommitted information that represents the input. Instead all the\n # digested information is returned as 'summary'. Many such summaries\n # will be combined by merge_accumulators().\n return self._session.run(\n self._qaccumulator.flush_summary(\n stamp_token=self._stamp_token,\n next_stamp_token=self._stamp_token))\n\n def merge_accumulators(self, summaries):\n if summaries is self._empty_summary:\n return self._empty_summary\n\n with self._session.graph.as_default():\n summary_placeholder = tf.placeholder(tf.string)\n add_summary = self._qaccumulator.add_prebuilt_summary(\n stamp_token=self._stamp_token,\n summary=summary_placeholder)\n for summary in summaries:\n self._session.run(add_summary, {summary_placeholder: summary})\n\n # Compute new summary.\n # All relevant state about the input is captured by 'summary'\n # (see comment at the end of add_input()).\n return self._session.run(\n self._qaccumulator.flush_summary(\n stamp_token=self._stamp_token,\n next_stamp_token=self._stamp_token))\n\n def extract_output(self, summary):\n if summary is self._empty_summary:\n return [[[]]]\n\n # All relevant state about the input is captured by 'summary'\n # (see comment in add_input() and merge_accumulators()).\n with self._session.graph.as_default():\n self._session.run(\n self._qaccumulator.add_prebuilt_summary(\n stamp_token=self._stamp_token, summary=tf.constant(summary)))\n self._session.run(\n self._qaccumulator.flush(\n stamp_token=self._stamp_token,\n next_stamp_token=self._stamp_token))\n are_ready_flush, buckets = (\n self._qaccumulator.get_buckets(stamp_token=self._stamp_token))\n buckets, _ = self._session.run([buckets, are_ready_flush])\n\n # Quantile boundaries is a list of the form\n # [np.ndarrary(min, <internal-boundaries>, max)]\n # The approximate quantile library can return less or more than requested\n # number of buckets. The max value can be same as the last internal\n # boundary, due to removal of duplicates.\n # Below, the min and/or max quantile boundaries are trimmed depending\n # on the actual boundaries returned by the library.\n if buckets.size >= (self._num_quantiles + 1):\n # Trim min/max.\n buckets = buckets[1:-1]\n elif buckets.size == self._num_quantiles:\n # Trim min only.\n buckets = buckets[1:]\n else:\n # Do not trim min/max, these are part of requested boundaries.\n pass\n\n return [[buckets]]\n\n\n@with_input_types(List[np.ndarray])\n@with_output_types(List[Any])\nclass _QuantilesAnalyzerImpl(beam.PTransform):\n \"\"\"Computes the quantile buckets in a PCollection of batches.\"\"\"\n\n def __init__(self, spec):\n assert isinstance(spec, analyzers._QuantilesSpec) # pylint: disable=protected-access\n self._spec = spec\n\n def expand(self, pcoll):\n serialized_tf_config = _DEFAULT_TENSORFLOW_CONFIG_BY_RUNNER.get(\n pcoll.pipeline.runner)\n return (pcoll\n | 'ComputeQuantiles' >> beam.CombineGlobally(\n _ComputeQuantiles(\n num_quantiles=self._spec.num_buckets,\n epsilon=self._spec.epsilon,\n serialized_tf_config=serialized_tf_config)))\n\n\n@with_input_types(List[np.ndarray])\n@with_output_types(List[Any])\nclass _CombineFnWrapper(beam.CombineFn):\n \"\"\"Class to wrap a analyzers._CombinerSpec as a beam.CombineFn.\"\"\"\n\n def __init__(self, spec):\n self._spec = spec\n\n def create_accumulator(self):\n return self._spec.create_accumulator()\n\n def add_input(self, accumulator, next_input):\n return self._spec.add_input(accumulator, next_input)\n\n def merge_accumulators(self, accumulators):\n return self._spec.merge_accumulators(accumulators)\n\n def extract_output(self, accumulator):\n return self._spec.extract_output(accumulator)\n",
"id": "6194009",
"language": "Python",
"matching_score": 4.082073211669922,
"max_stars_count": 0,
"path": "tensorflow_transform/beam/analyzer_impls.py"
},
{
"content": "# Copyright 2017 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for tensorflow_transform.beam.analyzer_impls.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport apache_beam as beam\nfrom apache_beam.testing.util import assert_that\nfrom apache_beam.testing.util import equal_to\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow_transform import analyzers\nfrom tensorflow_transform.beam import analyzer_impls as impl\n\nimport unittest\nfrom tensorflow.python.framework import test_util\n\n\nclass AnalyzerImplsTest(test_util.TensorFlowTestCase):\n\n def assertCombine(self, combine_fn, shards, expected, check_np_type=False):\n \"\"\"Tests the provided combiner.\n\n Args:\n combine_fn: A beam.ComineFn to exercise.\n shards: A list of next_inputs to add via the combiner.\n expected: The expected output from extract_output.\n check_np_type: check strict equivalence of output numpy type.\n\n Exercises create_accumulator, add_input, merge_accumulators,\n and extract_output.\n \"\"\"\n accumulators = [\n combine_fn.add_input(combine_fn.create_accumulator(), shard)\n for shard in shards]\n final_accumulator = combine_fn.merge_accumulators(accumulators)\n extracted = combine_fn.extract_output(final_accumulator)\n # Extract output 0 since all analyzers have a single output\n extracted = extracted[0]\n if check_np_type:\n # This is currently applicable only for quantile buckets, which conains a\n # single element list of numpy array; the numpy array contains the bucket\n # boundaries.\n self.assertEqual(len(expected), 1)\n self.assertEqual(len(extracted), 1)\n self.assertEqual(expected[0].dtype, extracted[0].dtype)\n self.assertAllEqual(expected, extracted)\n\n def testCombineOnBatchSimple(self):\n batch_1 = [np.ones((2, 6))]\n batch_2 = [np.ones((1, 6))]\n out = [3 for _ in range(6)]\n analyzer = impl._CombineFnWrapper(\n analyzers._NumPyCombinerSpec(np.sum, reduce_instance_dims=False))\n self.assertCombine(analyzer, [batch_1, batch_2], out)\n\n def testCombineOnBatchAllEmptyRow(self):\n analyzer = impl._CombineFnWrapper(\n analyzers._NumPyCombinerSpec(np.sum, reduce_instance_dims=False))\n self.assertCombine(analyzer, [[[[]]], [[[]]], [[[]]]], [])\n\n def testCombineOnBatchLotsOfData(self):\n shards = [[np.ones((1, 3))] for _ in range(2000)]\n out = [1 for _ in range(3)]\n analyzer = impl._CombineFnWrapper(\n analyzers._NumPyCombinerSpec(np.min, reduce_instance_dims=False))\n self.assertCombine(analyzer, shards, out)\n\n def testCombineOnBatchWithBeamPipeline(self):\n # Test with a real Beam pipeline instead of calling the Combiner methods\n # directly. This surfaces bugs that only occur within a Beam pipeline, e.g.\n # due to Beam passing iterators to merge_accumulators instead of lists.\n with beam.Pipeline() as p:\n batch_1 = [np.ones((2, 6), dtype=np.int)]\n batch_2 = [np.ones((1, 6), dtype=np.int)]\n expected_output = np.ones(6) * 3\n def assert_equals_expected(outputs):\n output, = outputs # Expect exactly one analyzer output\n return np.array_equal(output, expected_output)\n\n analyzer = impl._CombineFnWrapper(\n analyzers._NumPyCombinerSpec(np.sum, reduce_instance_dims=False))\n assert_that(p\n | beam.Create([batch_1, batch_2])\n | beam.CombineGlobally(analyzer)\n | beam.Map(assert_equals_expected),\n equal_to([True]))\n\n def _test_compute_quantiles_single_batch_helper(self, nptype):\n batch_1 = [np.linspace(1, 100, 100, nptype)]\n analyzer = impl._ComputeQuantiles(num_quantiles=3, epsilon=0.00001)\n out = np.array([[35, 68]], dtype=np.float32)\n self.assertCombine(analyzer, np.array([batch_1]), out, check_np_type=True)\n\n def testComputeQuantilesSingleBatch(self):\n self._test_compute_quantiles_single_batch_helper(np.double)\n self._test_compute_quantiles_single_batch_helper(np.float32)\n self._test_compute_quantiles_single_batch_helper(np.float64)\n self._test_compute_quantiles_single_batch_helper(np.int32)\n self._test_compute_quantiles_single_batch_helper(np.int64)\n\n def _test_compute_quantiles_multipe_batch_helper(self, nptype):\n batch_1 = [np.linspace(1, 100, 100, dtype=nptype)]\n batch_2 = [np.linspace(101, 200, 100, dtype=nptype)]\n batch_3 = [np.linspace(201, 300, 100, dtype=nptype)]\n analyzer = impl._ComputeQuantiles(num_quantiles=5, epsilon=0.00001)\n out = np.array([[61, 121, 181, 241]], dtype=np.float32)\n self.assertCombine(\n analyzer, np.array([batch_1, batch_2, batch_3]), out,\n check_np_type=True)\n\n def testComputeQuantilesMultipleBatch(self):\n self._test_compute_quantiles_multipe_batch_helper(np.double)\n self._test_compute_quantiles_multipe_batch_helper(np.float32)\n self._test_compute_quantiles_multipe_batch_helper(np.float64)\n self._test_compute_quantiles_multipe_batch_helper(np.int32)\n self._test_compute_quantiles_multipe_batch_helper(np.int64)\n\n def testCovarianceEmpty(self):\n \"\"\"Test empty array of inputs.\"\"\"\n analyzer = analyzers._CovarianceCombinerSpec(dtype=tf.float64)\n shards = [[[[]]], [[[]]]]\n out = np.empty((0, 0))\n self.assertCombine(analyzer, shards, out)\n\n def testCovarianceWithZeroAxis(self):\n \"\"\"Test an example with one zero variance axis.\"\"\"\n analyzer = analyzers._CovarianceCombinerSpec(dtype=tf.float64)\n shards = [\n [[[0, 0, 1]]],\n [[[4, 0, 1], [2, -1, 1]]],\n [[[2, 1, 1]]]\n ]\n out = np.array([[2, 0, 0], [0, 0.5, 0], [0, 0, 0]])\n self.assertCombine(analyzer, shards, out)\n\n def testCovarianceWithLargeNumbers(self):\n \"\"\"Test floating point precision with very large doubles.\"\"\"\n analyzer = analyzers._CovarianceCombinerSpec(dtype=tf.float64)\n shards = [\n [[[2e15, 0], [1e15, 0]]],\n [[[-2e15, 0], [-1e15, 0]]]\n ]\n out = np.array([[2.5e30, 0], [0, 0]])\n self.assertCombine(analyzer, shards, out)\n\n def testPCAWithZeroAxis(self):\n \"\"\"Test a PCA example with one zero variance axis.\"\"\"\n analyzer = analyzers._PCACombinerSpec(output_dim=2, dtype=tf.float64)\n shards = [\n [[[0, 0, 1]]],\n [[[4, 0, 1], [2, -1, 1]]],\n [[[2, 1, 1]]]\n ]\n out = np.array([[1, 0], [0, 1], [0, 0]])\n self.assertCombine(analyzer, shards, out)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"id": "3247000",
"language": "Python",
"matching_score": 1.6958743333816528,
"max_stars_count": 0,
"path": "tensorflow_transform/beam/analyzer_impls_test.py"
},
{
"content": "# Copyright 2017 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for transform_fn_io.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\n\nimport apache_beam as beam\nfrom apache_beam.testing import util as beam_test_util\n\nimport tensorflow as tf\nfrom tensorflow_transform.beam.tft_beam_io import beam_metadata_io\nfrom tensorflow_transform.beam.tft_beam_io import transform_fn_io\nfrom tensorflow_transform.tf_metadata import dataset_metadata\nfrom tensorflow_transform.tf_metadata import dataset_schema\nfrom tensorflow_transform.tf_metadata import futures\nfrom tensorflow_transform.tf_metadata import metadata_io\n\nimport unittest\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.lib.io import file_io\n\n_TEST_METADATA = dataset_metadata.DatasetMetadata({\n 'fixed_column': dataset_schema.ColumnSchema(\n tf.string, (1, 3, 2), dataset_schema.FixedColumnRepresentation()),\n 'fixed_column_with_default': dataset_schema.ColumnSchema(\n tf.float32, (1, 3, 2), dataset_schema.FixedColumnRepresentation(123.4)),\n 'list_columm': dataset_schema.ColumnSchema(\n tf.float32, (None,), dataset_schema.ListColumnRepresentation())\n})\n\n_TEST_METADATA_WITH_FUTURES = dataset_metadata.DatasetMetadata({\n 'fixed_column': dataset_schema.ColumnSchema(\n tf.string, (1, 3, 2), dataset_schema.FixedColumnRepresentation()),\n 'fixed_column_with_default': dataset_schema.ColumnSchema(\n tf.float32, (1, futures.Future('a'), 2),\n dataset_schema.FixedColumnRepresentation(123.4)),\n 'list_columm': dataset_schema.ColumnSchema(\n tf.float32, (None,), dataset_schema.ListColumnRepresentation())\n})\n\n\nclass BeamMetadataIoTest(test_util.TensorFlowTestCase):\n\n def assertMetadataEqual(self, a, b):\n # Use extra assertEqual for schemas, since full metadata assertEqual error\n # message is not conducive to debugging.\n self.assertEqual(a.schema.column_schemas, b.schema.column_schemas)\n self.assertEqual(a, b)\n\n def testReadTransformFn(self):\n path = self.get_temp_dir()\n # NOTE: we don't need to create or write to the transform_fn directory since\n # ReadTransformFn never inspects this directory.\n transform_fn_dir = os.path.join(path, transform_fn_io.TRANSFORM_FN_DIR)\n transformed_metadata_dir = os.path.join(\n path, transform_fn_io.TRANSFORMED_METADATA_DIR)\n metadata_io.write_metadata(_TEST_METADATA, transformed_metadata_dir)\n\n with beam.Pipeline() as pipeline:\n saved_model_dir_pcoll, metadata = (\n pipeline | transform_fn_io.ReadTransformFn(path))\n beam_test_util.assert_that(\n saved_model_dir_pcoll, beam_test_util.equal_to([transform_fn_dir]),\n label='AssertSavedModelDir')\n # NOTE: metadata is currently read in a non-deferred manner.\n self.assertEqual(metadata, _TEST_METADATA)\n\n def testWriteTransformFn(self):\n path = os.path.join(self.get_temp_dir(), 'output')\n\n with beam.Pipeline() as pipeline:\n # Create an empty directory for the source saved model dir.\n saved_model_dir = os.path.join(self.get_temp_dir(), 'source')\n file_io.recursive_create_dir(saved_model_dir)\n saved_model_dir_pcoll = (\n pipeline | 'CreateSavedModelDir' >> beam.Create([saved_model_dir]))\n metadata = beam_metadata_io.BeamDatasetMetadata(\n _TEST_METADATA_WITH_FUTURES,\n {\n 'a': pipeline | 'CreateA' >> beam.Create([3]),\n })\n\n _ = ((saved_model_dir_pcoll, metadata)\n | transform_fn_io.WriteTransformFn(path))\n\n transformed_metadata_dir = os.path.join(\n path, transform_fn_io.TRANSFORMED_METADATA_DIR)\n metadata = metadata_io.read_metadata(transformed_metadata_dir)\n self.assertEqual(metadata, _TEST_METADATA)\n\n transform_fn_dir = os.path.join(path, transform_fn_io.TRANSFORM_FN_DIR)\n self.assertTrue(file_io.file_exists(transform_fn_dir))\n self.assertTrue(file_io.is_directory(transform_fn_dir))\n\nif __name__ == '__main__':\n unittest.main()\n",
"id": "1624190",
"language": "Python",
"matching_score": 6.3817901611328125,
"max_stars_count": 0,
"path": "tensorflow_transform/beam/tft_beam_io/transform_fn_io_test.py"
},
{
"content": "# Copyright 2017 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for beam_metadata_io.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n\nimport apache_beam as beam\n\nimport tensorflow as tf\nfrom tensorflow_transform.beam.tft_beam_io import beam_metadata_io\nfrom tensorflow_transform.tf_metadata import dataset_metadata\nfrom tensorflow_transform.tf_metadata import dataset_schema\nfrom tensorflow_transform.tf_metadata import futures\nfrom tensorflow_transform.tf_metadata import metadata_io\n\nimport unittest\nfrom tensorflow.python.framework import test_util\n\n_TEST_METADATA = dataset_metadata.DatasetMetadata({\n 'fixed_column': dataset_schema.ColumnSchema(\n tf.string, (1, 3, 2), dataset_schema.FixedColumnRepresentation()),\n 'fixed_column_with_default': dataset_schema.ColumnSchema(\n tf.float32, (1, 3, 2), dataset_schema.FixedColumnRepresentation(123.4)),\n 'list_columm': dataset_schema.ColumnSchema(\n dataset_schema.IntDomain(tf.int64, min_value=-1, max_value=5),\n (None,), dataset_schema.ListColumnRepresentation())\n})\n\n_TEST_METADATA_WITH_FUTURES = dataset_metadata.DatasetMetadata({\n 'fixed_column': dataset_schema.ColumnSchema(\n tf.string, (1, 3, 2), dataset_schema.FixedColumnRepresentation()),\n 'fixed_column_with_default': dataset_schema.ColumnSchema(\n tf.float32, (1, futures.Future('a'), 2),\n dataset_schema.FixedColumnRepresentation(123.4)),\n 'list_columm': dataset_schema.ColumnSchema(\n dataset_schema.IntDomain(\n tf.int64, min_value=-1, max_value=futures.Future('b')),\n (None,), dataset_schema.ListColumnRepresentation())\n})\n\n\nclass BeamMetadataIoTest(test_util.TensorFlowTestCase):\n\n def assertMetadataEqual(self, a, b):\n # Use extra assertEqual for schemas, since full metadata assertEqual error\n # message is not conducive to debugging.\n self.assertEqual(a.schema.column_schemas, b.schema.column_schemas)\n self.assertEqual(a, b)\n\n def testWriteMetadataNonDeferred(self):\n # Write properties as metadata to disk.\n with beam.Pipeline() as pipeline:\n path = self.get_temp_dir()\n _ = (_TEST_METADATA\n | beam_metadata_io.WriteMetadata(path, pipeline))\n # Load from disk and check that it is as expected.\n metadata = metadata_io.read_metadata(path)\n self.assertMetadataEqual(metadata, _TEST_METADATA)\n\n def testWriteMetadataDeferredProperties(self):\n # Write deferred properties as metadata to disk.\n with beam.Pipeline() as pipeline:\n path = self.get_temp_dir()\n\n # Combine test metadata with a dict of PCollections resolving futures.\n metadata = beam_metadata_io.BeamDatasetMetadata(\n _TEST_METADATA_WITH_FUTURES,\n {\n 'a': pipeline | 'CreateA' >> beam.Create([3]),\n 'b': pipeline | 'CreateB' >> beam.Create([5])\n })\n\n _ = metadata | beam_metadata_io.WriteMetadata(path, pipeline)\n # Load from disk and check that it is as expected.\n metadata = metadata_io.read_metadata(path)\n self.assertMetadataEqual(metadata, _TEST_METADATA)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"id": "4828346",
"language": "Python",
"matching_score": 2.8356077671051025,
"max_stars_count": 0,
"path": "tensorflow_transform/beam/tft_beam_io/beam_metadata_io_test.py"
},
{
"content": "# Copyright 2017 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for dataset_metadata.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport pickle\n\nimport tensorflow as tf\n\nfrom tensorflow_transform.tf_metadata import dataset_schema as sch\nfrom tensorflow_transform.tf_metadata import futures\nfrom tensorflow_transform.tf_metadata import test_common\nimport unittest\n\n\nclass DatasetSchemaTest(unittest.TestCase):\n\n def test_feature_spec_roundtrip(self):\n schema = sch.from_feature_spec(test_common.test_feature_spec)\n generated_feature_spec = schema.as_feature_spec()\n self.assertEqual(test_common.test_feature_spec, generated_feature_spec)\n\n def test_feature_spec_unsupported_dtype(self):\n schema = sch.Schema()\n schema.column_schemas['fixed_float_with_default'] = (\n sch.ColumnSchema(tf.float64, [1], sch.FixedColumnRepresentation(0.0)))\n\n with self.assertRaisesRegexp(ValueError,\n 'tf.Example parser supports only types '\n r'\\[tf.string, tf.int64, tf.float32, tf.bool\\]'\n ', so it is invalid to generate a feature_spec'\n ' with type tf.float64.'):\n schema.as_feature_spec()\n\n\n def test_sequence_feature_not_supported(self):\n feature_spec = {\n # FixedLenSequenceFeatures\n 'fixed_seq_bool':\n tf.FixedLenSequenceFeature(shape=[10], dtype=tf.bool),\n 'fixed_seq_bool_allow_missing':\n tf.FixedLenSequenceFeature(\n shape=[5], dtype=tf.bool, allow_missing=True),\n 'fixed_seq_int':\n tf.FixedLenSequenceFeature(shape=[5], dtype=tf.int64),\n 'fixed_seq_float':\n tf.FixedLenSequenceFeature(shape=[5], dtype=tf.float32),\n 'fixed_seq_string':\n tf.FixedLenSequenceFeature(shape=[5], dtype=tf.string),\n }\n\n with self.assertRaisesRegexp(ValueError,\n 'DatasetSchema does not support '\n 'FixedLenSequenceFeature yet.'):\n sch.from_feature_spec(feature_spec)\n\n def test_manually_create_schema(self):\n schema = test_common.get_manually_created_schema()\n generated_feature_spec = schema.as_feature_spec()\n self.assertEqual(test_common.test_feature_spec, generated_feature_spec)\n\n def test_domain_picklable(self):\n domain = sch._dtype_to_domain(tf.float32)\n domain_new = pickle.loads(pickle.dumps(domain))\n\n self.assertEqual(type(domain), type(domain_new))\n self.assertEqual(domain.dtype, domain_new.dtype)\n\n def test_infer_column_schema_from_tensor(self):\n dense = tf.constant([[1., 2.], [3., 4.]], dtype=tf.float32, shape=[2, 2])\n column_schema = sch.infer_column_schema_from_tensor(dense)\n expected_column_schema = sch.ColumnSchema(\n tf.float32, [2], sch.FixedColumnRepresentation())\n self.assertEqual(expected_column_schema, column_schema)\n\n varlen = tf.sparse_placeholder(tf.string)\n column_schema = sch.infer_column_schema_from_tensor(varlen)\n expected_column_schema = sch.ColumnSchema(\n tf.string, [None], sch.ListColumnRepresentation())\n self.assertEqual(expected_column_schema, column_schema)\n\n def test_schema_equality(self):\n schema1 = sch.Schema(column_schemas={\n 'fixed_bool_with_default': sch.ColumnSchema(\n tf.bool, [1], sch.FixedColumnRepresentation(False)),\n 'var_float': sch.ColumnSchema(\n tf.float32, None, sch.ListColumnRepresentation())\n })\n schema2 = sch.Schema(column_schemas={\n 'fixed_bool_with_default': sch.ColumnSchema(\n tf.bool, [1], sch.FixedColumnRepresentation(False)),\n 'var_float': sch.ColumnSchema(\n tf.float32, None, sch.ListColumnRepresentation())\n })\n schema3 = sch.Schema(column_schemas={\n 'fixed_bool_with_default': sch.ColumnSchema(\n tf.bool, [1], sch.FixedColumnRepresentation(False)),\n 'var_float': sch.ColumnSchema(\n tf.float64, None, sch.ListColumnRepresentation())\n })\n schema4 = sch.Schema(column_schemas={\n 'fixed_bool_with_default': sch.ColumnSchema(\n tf.bool, [1], sch.FixedColumnRepresentation(False))\n })\n\n self.assertEqual(schema1, schema2)\n self.assertNotEqual(schema1, schema3)\n self.assertNotEqual(schema1, schema4)\n\n def test_column_schema_equality(self):\n c1 = sch.ColumnSchema(\n tf.bool, [1], sch.FixedColumnRepresentation(False))\n c2 = sch.ColumnSchema(\n tf.bool, [1], sch.FixedColumnRepresentation(False))\n c3 = sch.ColumnSchema(\n tf.bool, [1], sch.FixedColumnRepresentation())\n c4 = sch.ColumnSchema(\n tf.bool, [2], sch.FixedColumnRepresentation())\n\n self.assertEqual(c1, c2)\n self.assertNotEqual(c1, c3)\n self.assertNotEqual(c3, c4)\n\n def test_domain_equality(self):\n d1 = sch._dtype_to_domain(tf.int64)\n d2 = sch._dtype_to_domain(tf.int64)\n d3 = sch._dtype_to_domain(tf.int32)\n d4 = sch._dtype_to_domain(tf.bool)\n\n self.assertEqual(d1, d2)\n self.assertNotEqual(d1, d3)\n self.assertNotEqual(d3, d4)\n\n def test_int_domain_defaults(self):\n self.assertFalse(sch.IntDomain(tf.int64).is_categorical)\n self.assertTrue(sch.IntDomain(tf.int64, is_categorical=True).is_categorical)\n self.assertEqual(tf.int64.min, sch.IntDomain(tf.int64).min_value)\n self.assertEqual(-3,\n sch.IntDomain(tf.int64, min_value=-3).min_value)\n self.assertEqual(tf.int64.max, sch.IntDomain(tf.int64).max_value)\n self.assertEqual(3, sch.IntDomain(tf.int64, max_value=3).max_value)\n\n def test_axis_equality(self):\n a1 = sch.Axis(0)\n a2 = sch.Axis(0)\n a3 = sch.Axis(None)\n\n self.assertEqual(a1, a2)\n self.assertNotEqual(a1, a3)\n\n def test_column_representation_equality(self):\n fixed1 = sch.FixedColumnRepresentation(1.1)\n fixed2 = sch.FixedColumnRepresentation(1.1)\n fixed3 = sch.FixedColumnRepresentation()\n\n list1 = sch.ListColumnRepresentation()\n list2 = sch.ListColumnRepresentation()\n\n sparse1 = sch.SparseColumnRepresentation(\n 'val', [sch.SparseIndexField('idx1', False),\n sch.SparseIndexField('idx2', True)])\n sparse2 = sch.SparseColumnRepresentation(\n 'val', [sch.SparseIndexField('idx1', False),\n sch.SparseIndexField('idx2', True)])\n sparse3 = sch.SparseColumnRepresentation(\n 'val', [sch.SparseIndexField('idx1', False),\n sch.SparseIndexField('idx2', False)])\n\n self.assertEqual(fixed1, fixed2)\n self.assertNotEqual(fixed1, fixed3)\n self.assertNotEqual(fixed1, list1)\n self.assertNotEqual(fixed1, sparse1)\n\n self.assertEqual(list1, list2)\n self.assertNotEqual(list1, sparse1)\n\n self.assertEqual(sparse1, sparse2)\n self.assertNotEqual(sparse1, sparse3)\n\n def test_sparse_index_field_equality(self):\n f1 = sch.SparseIndexField('foo', False)\n f2 = sch.SparseIndexField('foo', False)\n f3 = sch.SparseIndexField('bar', False)\n\n self.assertEqual(f1, f2)\n self.assertNotEqual(f2, f3)\n\n def test_schema_with_futures(self):\n schema = sch.Schema()\n\n schema.column_schemas['fixed_bool_without_default'] = (\n sch.ColumnSchema(\n tf.bool,\n [5, futures.Future('foo_dim_1'), 7, futures.Future('foo_dim_3')],\n sch.FixedColumnRepresentation()))\n\n schema.column_schemas['fixed_int_with_default'] = (\n sch.ColumnSchema(tf.int64, [1], sch.FixedColumnRepresentation(\n default_value=futures.Future('bar_int_default'))))\n\n schema.column_schemas['fixed_categorical_int_with_range'] = (\n sch.ColumnSchema(sch.IntDomain(tf.int64,\n futures.Future('baz_int_min'),\n futures.Future('baz_int_max'),\n is_categorical=True),\n [1],\n sch.FixedColumnRepresentation(default_value=0)))\n\n self.assertFalse(schema.all_futures_resolved())\n schema.substitute_futures({'foo_dim_1': 6, 'foo_dim_3': 8,\n 'bar_int_default': 12,\n 'baz_int_min': 3, 'baz_int_max': 4})\n self.assertTrue(schema.all_futures_resolved())\n\n expected_schema = sch.Schema()\n\n expected_schema.column_schemas['fixed_bool_without_default'] = (\n sch.ColumnSchema(tf.bool, [5, 6, 7, 8],\n sch.FixedColumnRepresentation()))\n\n expected_schema.column_schemas['fixed_int_with_default'] = (\n sch.ColumnSchema(tf.int64, [1],\n sch.FixedColumnRepresentation(default_value=12)))\n\n expected_schema.column_schemas['fixed_categorical_int_with_range'] = (\n sch.ColumnSchema(\n sch.IntDomain(tf.int64, 3, 4, is_categorical=True), [1],\n sch.FixedColumnRepresentation(default_value=0)))\n\n self.assertEqual(expected_schema, schema)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"id": "11162940",
"language": "Python",
"matching_score": 4.473969459533691,
"max_stars_count": 0,
"path": "tensorflow_transform/tf_metadata/dataset_schema_test.py"
},
{
"content": "# Copyright 2017 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Common data and utilities for tf_metadata tests.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\nfrom tensorflow_transform.tf_metadata import dataset_schema as sch\n\n\ntest_feature_spec = {\n # FixedLenFeatures\n 'fixed_bool_with_default': tf.FixedLenFeature(\n shape=[1], dtype=tf.bool, default_value=False),\n 'fixed_bool_without_default': tf.FixedLenFeature(\n shape=[5], dtype=tf.bool),\n 'fixed_int_with_default': tf.FixedLenFeature(\n shape=[1], dtype=tf.int64, default_value=0),\n 'fixed_categorical_int_with_range': tf.FixedLenFeature(\n shape=[1], dtype=tf.int64, default_value=0),\n 'fixed_categorical_int_with_vocab': tf.FixedLenFeature(\n shape=[1], dtype=tf.int64, default_value=0),\n 'fixed_int_without_default': tf.FixedLenFeature(\n shape=[5], dtype=tf.int64),\n 'fixed_float_with_default': tf.FixedLenFeature(\n shape=[1], dtype=tf.float32, default_value=0.0),\n 'fixed_float_without_default': tf.FixedLenFeature(\n shape=[5], dtype=tf.float32),\n 'fixed_string_with_default': tf.FixedLenFeature(\n shape=[1], dtype=tf.string, default_value='default'),\n 'fixed_string_without_default': tf.FixedLenFeature(\n shape=[5], dtype=tf.string),\n '3d_fixed_int_without_default': tf.FixedLenFeature(\n shape=[5, 6, 7], dtype=tf.int64),\n\n # VarLenFeatures\n 'var_bool': tf.VarLenFeature(dtype=tf.bool),\n 'var_int': tf.VarLenFeature(dtype=tf.int64),\n 'var_float': tf.VarLenFeature(dtype=tf.float32),\n 'var_string': tf.VarLenFeature(dtype=tf.string),\n\n # SparseFeatures\n 'sparse_bool': tf.SparseFeature(\n index_key='sparse_bool_index', value_key='sparse_bool_value',\n dtype=tf.bool, size=15, already_sorted=True),\n 'sparse_int': tf.SparseFeature(\n index_key='sparse_int_index', value_key='sparse_int_value',\n dtype=tf.int64, size=150, already_sorted=False),\n 'sparse_float': tf.SparseFeature(\n index_key='sparse_float_index', value_key='sparse_float_value',\n dtype=tf.float32, size=1500),\n 'sparse_string': tf.SparseFeature(\n index_key='sparse_string_index', value_key='sparse_string_value',\n dtype=tf.string, size=15000, already_sorted=True),\n}\n\n\ndef get_test_schema():\n return sch.from_feature_spec(test_feature_spec)\n\n\ndef get_manually_created_schema():\n \"\"\"Provide a test schema built from scratch using the Schema classes.\"\"\"\n schema = sch.Schema()\n\n # FixedLenFeatures\n schema.column_schemas['fixed_bool_with_default'] = (\n sch.ColumnSchema(tf.bool, [1], sch.FixedColumnRepresentation(\n default_value=False)))\n\n schema.column_schemas['fixed_bool_without_default'] = (\n sch.ColumnSchema(tf.bool, [5], sch.FixedColumnRepresentation()))\n\n schema.column_schemas['fixed_int_with_default'] = (\n sch.ColumnSchema(tf.int64, [1], sch.FixedColumnRepresentation(\n default_value=0)))\n\n schema.column_schemas['fixed_categorical_int_with_range'] = (\n sch.ColumnSchema(sch.IntDomain(tf.int64, -5, 10, True), [1],\n sch.FixedColumnRepresentation(0)))\n\n schema.column_schemas['fixed_categorical_int_with_vocab'] = (\n sch.ColumnSchema(sch.IntDomain(tf.int64, vocabulary_file='test_filename'),\n [1],\n sch.FixedColumnRepresentation(0)))\n\n schema.column_schemas['fixed_int_without_default'] = (\n sch.ColumnSchema(tf.int64, [5], sch.FixedColumnRepresentation()))\n\n schema.column_schemas['fixed_float_with_default'] = (\n sch.ColumnSchema(tf.float32, [1], sch.FixedColumnRepresentation(\n default_value=0.0)))\n\n schema.column_schemas['fixed_float_without_default'] = (\n sch.ColumnSchema(tf.float32, [5], sch.FixedColumnRepresentation()))\n\n schema.column_schemas['fixed_string_with_default'] = (\n sch.ColumnSchema(tf.string, [1],\n sch.FixedColumnRepresentation(default_value='default')))\n\n schema.column_schemas['fixed_string_without_default'] = (\n sch.ColumnSchema(tf.string, [5], sch.FixedColumnRepresentation()))\n\n schema.column_schemas['3d_fixed_int_without_default'] = (\n sch.ColumnSchema(tf.int64, [5, 6, 7], sch.FixedColumnRepresentation()))\n\n # VarLenFeatures\n schema.column_schemas['var_bool'] = (\n sch.ColumnSchema(tf.bool, None, sch.ListColumnRepresentation()))\n\n schema.column_schemas['var_int'] = (\n sch.ColumnSchema(tf.int64, None, sch.ListColumnRepresentation()))\n\n schema.column_schemas['var_float'] = (\n sch.ColumnSchema(tf.float32, None, sch.ListColumnRepresentation()))\n\n schema.column_schemas['var_string'] = (\n sch.ColumnSchema(tf.string, None, sch.ListColumnRepresentation()))\n\n # SparseFeatures\n schema.column_schemas['sparse_bool'] = (\n sch.ColumnSchema(\n tf.bool, [15],\n sch.SparseColumnRepresentation('sparse_bool_value',\n [sch.SparseIndexField(\n 'sparse_bool_index', True)])))\n\n schema.column_schemas['sparse_int'] = (\n sch.ColumnSchema(\n tf.int64, [150],\n sch.SparseColumnRepresentation('sparse_int_value',\n [sch.SparseIndexField(\n 'sparse_int_index', False)])))\n\n schema.column_schemas['sparse_float'] = (\n sch.ColumnSchema(\n tf.float32, [1500],\n sch.SparseColumnRepresentation('sparse_float_value',\n [sch.SparseIndexField(\n 'sparse_float_index',\n False)])))\n\n schema.column_schemas['sparse_string'] = (\n sch.ColumnSchema(\n tf.string, [15000],\n sch.SparseColumnRepresentation('sparse_string_value',\n [sch.SparseIndexField(\n 'sparse_string_index',\n True)])))\n\n return schema\n",
"id": "340463",
"language": "Python",
"matching_score": 2.4717440605163574,
"max_stars_count": 0,
"path": "tensorflow_transform/tf_metadata/test_common.py"
},
{
"content": "# Copyright 2017 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Reader for v1 JSON to `Schema`.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport json\nimport six\nimport tensorflow as tf\n\nfrom tensorflow_transform.tf_metadata import dataset_schema as sch\n\n\ndef from_schema_json(schema_json):\n \"\"\"Translate a v1 JSON schema into a `Schema`.\"\"\"\n schema_dict = json.loads(schema_json)\n feature_column_schemas = {\n feature_dict['name']: _from_feature_dict(feature_dict)\n for feature_dict in schema_dict.get('feature', [])\n }\n sparse_feature_column_schemas = {\n sparse_feature_dict['name']: _from_sparse_feature_dict(\n sparse_feature_dict)\n for sparse_feature_dict in schema_dict.get('sparseFeature', [])\n }\n overlapping_keys = set(six.iterkeys(feature_column_schemas)).intersection(\n six.iterkeys(sparse_feature_column_schemas))\n if overlapping_keys:\n raise ValueError('Keys of dense and sparse features overlapped. '\n 'overlapping keys: %s' % overlapping_keys)\n feature_column_schemas.update(sparse_feature_column_schemas)\n return sch.Schema(feature_column_schemas)\n\n\ndef _from_feature_dict(feature_dict):\n \"\"\"Translate a JSON feature dict into a `ColumnSchema`.\"\"\"\n domain = _from_domain_dict(feature_dict['domain'])\n\n axes = []\n if 'fixedShape' in feature_dict:\n for axis in feature_dict['fixedShape'].get('axis', []):\n # int() is needed because protobuf JSON encodes int64 as string\n axes.append(sch.Axis(int(axis.get('size'))))\n elif 'valueCount' in feature_dict:\n # Value_count always means a 1-D feature of unknown size.\n # We don't support value_count.min and value_count.max yet.\n axes.append(sch.Axis(None))\n\n tf_options = feature_dict['parsingOptions']['tfOptions']\n if tf_options.get('fixedLenFeature') is not None:\n default_value = None\n try:\n # int() is needed because protobuf JSON encodes int64 as string\n default_value = int(tf_options['fixedLenFeature']['intDefaultValue'])\n except KeyError:\n try:\n default_value = tf_options['fixedLenFeature']['stringDefaultValue']\n except KeyError:\n try:\n default_value = tf_options['fixedLenFeature']['floatDefaultValue']\n except KeyError:\n pass\n representation = sch.FixedColumnRepresentation(default_value)\n elif tf_options.get('varLenFeature') is not None:\n representation = sch.ListColumnRepresentation()\n else:\n raise ValueError('Could not interpret tfOptions: {}'.format(tf_options))\n\n return sch.ColumnSchema(domain, axes, representation)\n\n\ndef _from_sparse_feature_dict(feature_dict):\n \"\"\"Translate a JSON sparse feature dict into a ColumnSchema.\"\"\"\n # assume there is only one value column\n value_feature = feature_dict['valueFeature'][0]\n domain = _from_domain_dict(value_feature['domain'])\n\n index_feature_dicts = feature_dict['indexFeature']\n\n # int() is needed because protobuf JSON encodes int64 as string\n axes = [sch.Axis(int(index_feature_dict['size']))\n for index_feature_dict in index_feature_dicts]\n\n value_field_name = value_feature['name']\n index_fields = [sch.SparseIndexField(index_feature_dict['name'],\n index_feature_dict['isSorted'])\n for index_feature_dict in index_feature_dicts]\n\n representation = sch.SparseColumnRepresentation(value_field_name,\n index_fields)\n\n return sch.ColumnSchema(domain, axes, representation)\n\n\ndef _from_domain_dict(domain):\n \"\"\"Translate a JSON domain dict into a Domain.\"\"\"\n if domain.get('ints') is not None:\n def maybe_to_int(s):\n return int(s) if s is not None else None\n return sch.IntDomain(\n tf.int64,\n maybe_to_int(domain['ints'].get('min')),\n maybe_to_int(domain['ints'].get('max')),\n domain['ints'].get('isCategorical'),\n domain['ints'].get('vocabularyFile', ''))\n if domain.get('floats') is not None:\n return sch.FloatDomain(tf.float32)\n if domain.get('strings') is not None:\n return sch.StringDomain(tf.string)\n if domain.get('bools') is not None:\n return sch.BoolDomain(tf.bool)\n raise ValueError('Unknown domain: {}'.format(domain))\n",
"id": "574449",
"language": "Python",
"matching_score": 5.142185688018799,
"max_stars_count": 0,
"path": "tensorflow_transform/tf_metadata/v1_json/schema_io_v1_json_reader.py"
},
{
"content": "# Copyright 2017 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Writer for `Schema` to v1 JSON.\"\"\"\n\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport json\n\nimport six\nimport tensorflow as tf\n\nfrom tensorflow_transform.tf_metadata import dataset_schema\n\n\n_FEATURE_TYPE_INT = 'INT'\n_FEATURE_TYPE_FLOAT = 'FLOAT'\n_FEATURE_TYPE_BYTES = 'BYTES'\n\n\ndef to_schema_json(schema):\n \"\"\"Converts in-memory `Schema` representation to v1 Schema JSON.\"\"\"\n\n result = {'feature': _get_features(schema),\n 'sparseFeature': _get_sparse_features(schema)}\n\n # not populated yet: Schema.string_domain\n\n return json.dumps(result, indent=2, separators=(',', ': '), sort_keys=True)\n\n\ndef _get_features(schema):\n result = []\n for name, column_schema in sorted(six.iteritems(schema.column_schemas)):\n if not isinstance(column_schema.representation,\n dataset_schema.SparseColumnRepresentation):\n result.append(_column_schema_to_dict_dense(name, column_schema))\n return result\n\n\ndef _get_sparse_features(schema):\n result = []\n for name, column_schema in sorted(six.iteritems(schema.column_schemas)):\n if isinstance(column_schema.representation,\n dataset_schema.SparseColumnRepresentation):\n result.append(_column_schema_to_dict_sparse(name, column_schema))\n return result\n\n\ndef _column_schema_to_dict_dense(name, column_schema):\n \"\"\"Translate a ColumnSchema for a dense column into JSON feature dict.\"\"\"\n representation = column_schema.representation\n\n result = {}\n result['name'] = name\n # Note result['deprecated'] is not populated in v1.\n # Note result['comment'] is not populated in v1.\n # Note result['presence'] is not populated in v1.\n\n if column_schema.is_fixed_size():\n axes = []\n for axis in column_schema.axes:\n # str() is needed to match protobuf JSON encoding of int64 as string\n axes.append({'size': str(axis.size)})\n result['fixedShape'] = {'axis': axes}\n else:\n # This is a 1-d variable length feature. We don't track max and min, so\n # just provide an empty value_count.\n result['valueCount'] = {}\n\n result['type'] = _to_feature_type_enum(column_schema.domain.dtype)\n result['domain'] = _to_domain(column_schema.domain)\n\n tf_options = _get_tf_options(representation, result['type'])\n result['parsingOptions'] = {'tfOptions': tf_options}\n\n return result\n\n\ndef _column_schema_to_dict_sparse(name, column_schema):\n \"\"\"Translate a ColumnSchema for a sparse column into JSON feature dict.\"\"\"\n representation = column_schema.representation\n\n result = {}\n result['name'] = name\n # Note result['deprecated'] is not populated in v1.\n # Note result['comment'] is not populated in v1.\n # Note result['presence'] is not populated in v1.\n\n index_feature_list = []\n # Note axes and index_fields must be in the same order.\n for (axis, index_field) in zip(\n column_schema.axes, representation.index_fields):\n\n # str() is needed to match protobuf JSON encoding of int64 as string\n index_feature_list.append({'name': index_field.name,\n 'size': str(axis.size),\n 'isSorted': index_field.is_sorted})\n\n result['indexFeature'] = index_feature_list\n result['valueFeature'] = [{'name': representation.value_field_name,\n 'type': _to_feature_type_enum(\n column_schema.domain.dtype),\n 'domain': _to_domain(column_schema.domain)}]\n\n return result\n\n\ndef _to_feature_type_enum(dtype):\n if dtype.is_integer:\n return _FEATURE_TYPE_INT\n if dtype.is_floating:\n return _FEATURE_TYPE_FLOAT\n if dtype == tf.string:\n return _FEATURE_TYPE_BYTES\n if dtype == tf.bool:\n return _FEATURE_TYPE_INT\n return 'TYPE_UNKNOWN'\n\n\ndef _to_domain(domain):\n \"\"\"Translates a Domain object into a JSON dict.\"\"\"\n result = {}\n # Domain names and bounds are not populated yet\n if isinstance(domain, dataset_schema.IntDomain):\n result['ints'] = {\n 'min': str(domain.min_value),\n 'max': str(domain.max_value),\n 'isCategorical': domain.is_categorical,\n 'vocabularyFile': domain.vocabulary_file\n }\n elif isinstance(domain, dataset_schema.FloatDomain):\n result['floats'] = {}\n elif isinstance(domain, dataset_schema.StringDomain):\n result['strings'] = {}\n elif isinstance(domain, dataset_schema.BoolDomain):\n result['bools'] = {}\n return result\n\n\ndef _get_tf_options(representation, type_string):\n \"\"\"Translate a ColumnRepresentation into JSON string for tf_options.\"\"\"\n tf_options = {}\n if isinstance(representation, dataset_schema.FixedColumnRepresentation):\n if representation.default_value is None:\n fixed_len_options = {}\n else:\n if type_string == 'BYTES':\n fixed_len_options = {'stringDefaultValue':\n representation.default_value}\n elif type_string == 'INT':\n int_default = int(representation.default_value)\n # str() is needed to match protobuf JSON encoding of int64 as string\n fixed_len_options = {'intDefaultValue': str(int_default)}\n elif type_string == 'FLOAT':\n fixed_len_options = {'floatDefaultValue':\n representation.default_value}\n else:\n raise ValueError(\"v1 Schema can't represent default value {} \"\n \"for type {}\".format(\n representation.default_value, type_string))\n tf_options['fixedLenFeature'] = fixed_len_options\n return tf_options\n\n if isinstance(representation, dataset_schema.ListColumnRepresentation):\n tf_options['varLenFeature'] = {}\n return tf_options\n\n raise TypeError('Cannot represent {} using the Feature representation; '\n 'the SparseFeature representation should have been '\n 'chosen.'.format(representation))\n\n",
"id": "3990379",
"language": "Python",
"matching_score": 1.3082679510116577,
"max_stars_count": 0,
"path": "tensorflow_transform/tf_metadata/v1_json/schema_io_v1_json_writer.py"
},
{
"content": "# Copyright 2017 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Serialization strategy mapping `Schema` to v1 JSON.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nfrom tensorflow_transform.tf_metadata import version_api\nfrom tensorflow_transform.tf_metadata.v1_json import schema_io_v1_json_reader\nfrom tensorflow_transform.tf_metadata.v1_json import schema_io_v1_json_writer\n\nfrom tensorflow.python.lib.io import file_io\n\n\nclass SchemaIOv1JSON(version_api.SchemaIO):\n \"\"\"Serialization strategy for the v1 Schema as JSON.\"\"\"\n\n def write(self, schema, path):\n \"\"\"Writes a v1 `Schema` to disk as JSON.\n\n The function converts the in-memory Schema representation to the v1 Schema\n JSON representation, and writes it to the specified path.\n\n Args:\n schema: The Schema to write.\n path: the filename to write to.\n \"\"\"\n schema_as_json = schema_io_v1_json_writer.to_schema_json(schema)\n\n basedir = os.path.dirname(path)\n if not file_io.file_exists(basedir):\n file_io.recursive_create_dir(basedir)\n\n file_io.write_string_to_file(path + \".json\", schema_as_json)\n\n def read(self, path):\n \"\"\"Reads a v1 JSON schema from disk.\"\"\"\n # Ensure that the Schema file exists\n if not file_io.file_exists(path + \".json\"):\n raise IOError(\"v1 Schema file does not exist at: %s\" % path)\n\n file_content = file_io.FileIO(path + \".json\", \"r\").read()\n return schema_io_v1_json_reader.from_schema_json(file_content)\n",
"id": "2146676",
"language": "Python",
"matching_score": 1.941970944404602,
"max_stars_count": 0,
"path": "tensorflow_transform/tf_metadata/v1_json/schema_io_v1_json.py"
},
{
"content": "# Copyright 2017 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Utilities to read and write metadata in standardized versioned formats.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow_transform.tf_metadata import dataset_metadata\nfrom tensorflow_transform.tf_metadata import metadata_directory\nfrom tensorflow_transform.tf_metadata import version_api\nfrom tensorflow_transform.tf_metadata.v1_json import schema_io_v1_json\n\n# The _all_versions dict registers metadata versions that this library knows\n# about. Typically all known versions will be written, and the most recent\n# known version available in a given directory will be parsed.\n_V1_JSON = version_api.MetadataVersion('v1', 'json',\n schema_io_v1_json.SchemaIOv1JSON(),\n None, None, None, None)\n_all_versions = {'1_JSON': _V1_JSON}.items() # make immutable\n\n\ndef read_metadata(paths, versions=_all_versions):\n \"\"\"Load metadata from multiple paths into a new DatasetMetadata.\"\"\"\n dm = dataset_metadata.DatasetMetadata()\n if isinstance(paths, list):\n _read_merge_all(dm, paths, versions)\n else:\n _read_merge(dm, paths, versions)\n return dm\n\n\ndef write_metadata(metadata, path, versions=_all_versions):\n \"\"\"Write all known versions, for forward compatibility.\n\n Args:\n metadata: A `DatasetMetadata` to write.\n path: a path to a directory where metadata should be written.\n versions: a dict of {version_id: MetadataVersion}; defaults to all known\n versions.\n \"\"\"\n basedir = metadata_directory.DatasetMetadataDirectory(path)\n for _, version in versions:\n vdir = basedir.version_dir(version)\n version.write(metadata, vdir)\n\n\ndef _read_merge_all(metadata, paths, versions=_all_versions):\n \"\"\"Load metadata from multiple paths into a DatasetMetadata.\n\n Args:\n metadata: A `DatasetMetadata` to update.\n paths: a list of file paths, each pointing to a metadata directory\n having the prescribed structure. Each one may provide different\n metadata versions.\n versions: a dict of {version_id: MetadataVersion}; defaults to all known\n versions.\n \"\"\"\n for path in paths:\n _read_merge(metadata, path, versions)\n\n\ndef _read_merge(metadata, path, versions=_all_versions):\n \"\"\"Load metadata from a path into a DatasetMetadata.\n\n Args:\n metadata: A `DatasetMetadata` to update.\n path: A metadata directory having the prescribed structure. Each one may\n provide different metadata versions.\n versions: a dict of {version_id: MetadataVersion}; defaults to all known\n versions.\n \"\"\"\n basedir = metadata_directory.DatasetMetadataDirectory(path)\n\n (_, version), = versions\n vdir = basedir.version_dir(version)\n other = version.read(vdir)\n metadata.merge(other)\n",
"id": "4920421",
"language": "Python",
"matching_score": 2.393329381942749,
"max_stars_count": 0,
"path": "tensorflow_transform/tf_metadata/metadata_io.py"
},
{
"content": "# Copyright 2017 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for dataset_metadata.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tempfile\n\n\nfrom tensorflow_transform.tf_metadata import dataset_metadata\nfrom tensorflow_transform.tf_metadata import metadata_io\nfrom tensorflow_transform.tf_metadata import version_api\nfrom tensorflow_transform.tf_metadata.vtest import schema_io_vtest\nimport unittest\n\n\n_VTEST = version_api.MetadataVersion('vTest', None,\n schema_io_vtest.SchemaIOvTest(),\n None, None, None, None)\n_test_versions = {'test': _VTEST}.items() # make immutable\n\n\nclass DatasetMetadataTest(unittest.TestCase):\n\n def test_write_and_read(self):\n basedir = tempfile.mkdtemp()\n original_schema = schema_io_vtest.TestSchema(\n {'test_feature_1': 'bogus 1', 'test_feature_2': 'bogus 2'})\n original = dataset_metadata.DatasetMetadata(schema=original_schema)\n\n metadata_io.write_metadata(original, basedir, versions=_test_versions)\n reloaded = metadata_io.read_metadata(basedir, versions=_test_versions)\n\n self.assertTrue('test_feature_1' in reloaded.schema.column_schemas)\n self.assertTrue('test_feature_2' in reloaded.schema.column_schemas)\n self.assertEqual(2, len(reloaded.schema.column_schemas))\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"id": "8685034",
"language": "Python",
"matching_score": 2.6095781326293945,
"max_stars_count": 0,
"path": "tensorflow_transform/tf_metadata/metadata_io_test.py"
},
{
"content": "# Copyright 2017 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Serialization strategy mapping `Schema` to v1 protos.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow_transform.tf_metadata import dataset_schema\nfrom tensorflow_transform.tf_metadata import version_api\n\n\nclass SchemaIOvTest(version_api.SchemaIO):\n\n def write(self, schema, path):\n with open(path + \".test\", \"w\") as f:\n f.write(\"\\n\".join(schema.column_schemas.keys()))\n\n def read(self, path):\n with open(path + \".test\") as f:\n all_feature_names = f.read().splitlines()\n return TestSchema(all_feature_names)\n\n\nclass TestSchema(dataset_schema.Schema):\n\n def __init__(self, feature_names):\n features = {feature_name: \"Bogus FeatureSchema for %s\" % feature_name\n for feature_name in feature_names}\n super(TestSchema, self).__init__(features)\n\n",
"id": "6000335",
"language": "Python",
"matching_score": 0.9109023809432983,
"max_stars_count": 0,
"path": "tensorflow_transform/tf_metadata/vtest/schema_io_vtest.py"
},
{
"content": "# coding=utf-8\n#\n# Copyright 2017 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tensorflow-transform ExampleProtoCoder tests.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport pickle\nimport sys\n\n# Note that this needs to happen before any non-python imports, so we do it\n# pretty early on.\nif any(arg == '--proto_implementation_type=python' for arg in sys.argv):\n os.environ['PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION'] = 'python'\nelif any(arg == '--proto_implementation_type=cpp' for arg in sys.argv):\n os.environ['PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION'] = 'cpp'\nelif any(arg.startswith('--proto_implementation_type') for arg in sys.argv):\n raise ValueError('Unexpected value for --proto_implementation_type')\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow_transform.coders import example_proto_coder\nfrom tensorflow_transform.tf_metadata import dataset_schema\n\nfrom google.protobuf.internal import api_implementation\nfrom google.protobuf import text_format\nimport unittest\n\n\n\nclass ExampleProtoCoderTest(unittest.TestCase):\n\n _INPUT_SCHEMA = dataset_schema.from_feature_spec({\n 'scalar_feature_1': tf.FixedLenFeature(shape=[], dtype=tf.int64),\n 'scalar_feature_2': tf.FixedLenFeature(shape=[], dtype=tf.int64),\n 'scalar_feature_3': tf.FixedLenFeature(shape=[], dtype=tf.float32),\n 'varlen_feature_1': tf.VarLenFeature(dtype=tf.float32),\n 'varlen_feature_2': tf.VarLenFeature(dtype=tf.string),\n '1d_vector_feature': tf.FixedLenFeature(shape=[1], dtype=tf.string),\n '2d_vector_feature': tf.FixedLenFeature(shape=[2, 2], dtype=tf.float32),\n 'sparse_feature': tf.SparseFeature('idx', 'value', tf.float32, 10),\n })\n\n\n def _assert_encode_decode(self, coder, expected_proto_text, expected_decoded):\n example = tf.train.Example()\n text_format.Merge(expected_proto_text, example)\n data = example.SerializeToString()\n\n # Assert the data is decoded into the expected format.\n decoded = coder.decode(data)\n np.testing.assert_equal(expected_decoded, decoded)\n\n # Assert the decoded data can be encoded back into the original proto.\n encoded = coder.encode(decoded)\n parsed_example = tf.train.Example()\n parsed_example.ParseFromString(encoded)\n self.assertEqual(example, parsed_example)\n\n # Assert the data can be decoded from the encoded string.\n decoded_again = coder.decode(encoded)\n np.testing.assert_equal(expected_decoded, decoded_again)\n\n def _assert_decode_encode(self, coder, expected_proto_text, expected_decoded):\n example = tf.train.Example()\n text_format.Merge(expected_proto_text, example)\n\n # Assert the expected decoded data can be encoded into the expected proto.\n encoded = coder.encode(expected_decoded)\n parsed_example = tf.train.Example()\n parsed_example.ParseFromString(encoded)\n self.assertEqual(example, parsed_example)\n\n # Assert the encoded data can be decoded into the original input.\n decoded = coder.decode(encoded)\n np.testing.assert_equal(expected_decoded, decoded)\n\n # Assert the decoded data can be encoded back into the expected proto.\n encoded_again = coder.encode(decoded)\n parsed_example_again = tf.train.Example()\n parsed_example_again.ParseFromString(encoded_again)\n np.testing.assert_equal(example, parsed_example_again)\n\n def test_example_proto_coder(self):\n # We use a single coder and invoke multiple encodes and decodes on it to\n # make sure that cache consistency is implemented properly.\n coder = example_proto_coder.ExampleProtoCoder(self._INPUT_SCHEMA)\n\n # Python types.\n example_proto_text = \"\"\"\n features {\n feature { key: \"scalar_feature_1\" value { int64_list { value: [ 12 ] } } }\n feature { key: \"varlen_feature_1\"\n value { float_list { value: [ 89.0 ] } } }\n feature { key: \"scalar_feature_2\" value { int64_list { value: [ 12 ] } } }\n feature { key: \"scalar_feature_3\"\n value { float_list { value: [ 1.0 ] } } }\n feature { key: \"1d_vector_feature\"\n value { bytes_list { value: [ 'this is a ,text' ] } } }\n feature { key: \"2d_vector_feature\"\n value { float_list { value: [ 1.0, 2.0, 3.0, 4.0 ] } } }\n feature { key: \"varlen_feature_2\"\n value { bytes_list { value: [ 'female' ] } } }\n feature { key: \"value\" value { float_list { value: [ 12.0, 20.0 ] } } }\n feature { key: \"idx\" value { int64_list { value: [ 1, 4 ] } } }\n }\n \"\"\"\n expected_decoded = {\n 'scalar_feature_1': 12,\n 'scalar_feature_2': 12,\n 'scalar_feature_3': 1.0,\n 'varlen_feature_1': [89.0],\n '1d_vector_feature': ['this is a ,text'],\n '2d_vector_feature': [[1.0, 2.0], [3.0, 4.0]],\n 'varlen_feature_2': ['female'],\n 'sparse_feature': ([1, 4], [12.0, 20.0])\n }\n self._assert_encode_decode(coder, example_proto_text, expected_decoded)\n self._assert_decode_encode(coder, example_proto_text, expected_decoded)\n\n # Numpy types (with different values from above).\n example_proto_text = \"\"\"\n features {\n feature { key: \"scalar_feature_1\" value { int64_list { value: [ 13 ] } } }\n feature { key: \"varlen_feature_1\" value { float_list { } } }\n feature { key: \"scalar_feature_2\"\n value { int64_list { value: [ 214 ] } } }\n feature { key: \"scalar_feature_3\"\n value { float_list { value: [ 2.0 ] } } }\n feature { key: \"1d_vector_feature\"\n value { bytes_list { value: [ 'this is another ,text' ] } } }\n feature { key: \"2d_vector_feature\"\n value { float_list { value: [ 9.0, 8.0, 7.0, 6.0 ] } } }\n feature { key: \"varlen_feature_2\"\n value { bytes_list { value: [ 'male' ] } } }\n feature { key: \"value\" value { float_list { value: [ 13.0, 21.0 ] } } }\n feature { key: \"idx\" value { int64_list { value: [ 2, 5 ] } } }\n }\n \"\"\"\n expected_decoded = {\n 'scalar_feature_1': np.array(13),\n 'scalar_feature_2': np.int32(214),\n 'scalar_feature_3': np.array(2.0),\n 'varlen_feature_1': np.array([]),\n '1d_vector_feature': np.array(['this is another ,text']),\n '2d_vector_feature': np.array([[9.0, 8.0], [7.0, 6.0]]),\n 'varlen_feature_2': np.array(['male']),\n 'sparse_feature': (np.array([2, 5]), np.array([13.0, 21.0]))\n }\n self._assert_encode_decode(coder, example_proto_text, expected_decoded)\n self._assert_decode_encode(coder, example_proto_text, expected_decoded)\n\n def test_example_proto_coder_default_value(self):\n input_schema = dataset_schema.from_feature_spec({\n 'scalar_feature_3':\n tf.FixedLenFeature(shape=[], dtype=tf.float32, default_value=1.0),\n 'scalar_feature_4':\n tf.FixedLenFeature(shape=[], dtype=tf.float32, default_value=0.0),\n '1d_vector_feature':\n tf.FixedLenFeature(\n shape=[1], dtype=tf.float32, default_value=[2.0]),\n '2d_vector_feature':\n tf.FixedLenFeature(\n shape=[2, 2],\n dtype=tf.float32,\n default_value=[[1.0, 2.0], [3.0, 4.0]]),\n })\n coder = example_proto_coder.ExampleProtoCoder(input_schema)\n\n # Python types.\n example_proto_text = \"\"\"\n features {\n }\n \"\"\"\n example = tf.train.Example()\n text_format.Merge(example_proto_text, example)\n data = example.SerializeToString()\n\n # Assert the data is decoded into the expected format.\n expected_decoded = {\n 'scalar_feature_3': 1.0,\n 'scalar_feature_4': 0.0,\n '1d_vector_feature': [2.0],\n '2d_vector_feature': [[1.0, 2.0], [3.0, 4.0]],\n }\n decoded = coder.decode(data)\n np.testing.assert_equal(expected_decoded, decoded)\n\n def test_example_proto_coder_bad_default_value(self):\n input_schema = dataset_schema.from_feature_spec({\n 'scalar_feature_2': tf.FixedLenFeature(shape=[2], dtype=tf.float32,\n default_value=[1.0]),\n })\n with self.assertRaisesRegexp(ValueError,\n 'got default value with incorrect shape'):\n example_proto_coder.ExampleProtoCoder(input_schema)\n\n input_schema = dataset_schema.from_feature_spec({\n 'scalar_feature_2': tf.FixedLenFeature(shape=[], dtype=tf.float32,\n default_value=[0.0]),\n })\n with self.assertRaisesRegexp(ValueError,\n 'got default value with incorrect shape'):\n example_proto_coder.ExampleProtoCoder(input_schema)\n\n input_schema = dataset_schema.from_feature_spec({\n '2d_vector_feature':\n tf.FixedLenFeature(\n shape=[2, 3],\n dtype=tf.float32,\n default_value=[[1.0, 1.0], [1.0]]),\n })\n with self.assertRaisesRegexp(ValueError,\n 'got default value with incorrect shape'):\n example_proto_coder.ExampleProtoCoder(input_schema)\n\n def test_example_proto_coder_picklable(self):\n coder = example_proto_coder.ExampleProtoCoder(self._INPUT_SCHEMA)\n\n example_proto_text = \"\"\"\n features {\n feature { key: \"scalar_feature_1\" value { int64_list { value: [ 12 ] } } }\n feature { key: \"varlen_feature_1\"\n value { float_list { value: [ 89.0 ] } } }\n feature { key: \"scalar_feature_2\" value { int64_list { value: [ 12 ] } } }\n feature { key: \"scalar_feature_3\"\n value { float_list { value: [ 2.0 ] } } }\n feature { key: \"1d_vector_feature\"\n value { bytes_list { value: [ 'this is a ,text' ] } } }\n feature { key: \"2d_vector_feature\"\n value { float_list { value: [ 1.0, 2.0, 3.0, 4.0 ] } } }\n feature { key: \"varlen_feature_2\"\n value { bytes_list { value: [ 'female' ] } } }\n feature { key: \"value\" value { float_list { value: [ 12.0, 20.0 ] } } }\n feature { key: \"idx\" value { int64_list { value: [ 1, 4 ] } } }\n }\n \"\"\"\n expected_decoded = {\n 'scalar_feature_1': 12,\n 'scalar_feature_2': 12,\n 'scalar_feature_3': 2.0,\n 'varlen_feature_1': [89.0],\n '1d_vector_feature': ['this is a ,text'],\n '2d_vector_feature': [[1.0, 2.0], [3.0, 4.0]],\n 'varlen_feature_2': ['female'],\n 'sparse_feature': ([1, 4], [12.0, 20.0])\n }\n\n # Ensure we can pickle right away.\n coder = pickle.loads(pickle.dumps(coder))\n self._assert_encode_decode(coder, example_proto_text, expected_decoded)\n self._assert_decode_encode(coder, example_proto_text, expected_decoded)\n\n # And after use.\n coder = pickle.loads(pickle.dumps(coder))\n self._assert_encode_decode(coder, example_proto_text, expected_decoded)\n self._assert_decode_encode(coder, example_proto_text, expected_decoded)\n\n def test_example_proto_coder_unicode(self):\n coder = example_proto_coder.ExampleProtoCoder(\n dataset_schema.from_feature_spec({\n 'unicode_feature': tf.FixedLenFeature(shape=[], dtype=tf.string)\n }))\n\n encoded_example = coder.encode({'unicode_feature': u'Hello κόσμε'})\n example = tf.train.Example()\n example.ParseFromString(encoded_example)\n self.assertEqual(\n example.features.feature['unicode_feature'].bytes_list.value[0],\n u'Hello κόσμε'.encode('utf-8'))\n\n def test_example_proto_coder_error(self):\n input_schema = dataset_schema.from_feature_spec({\n '2d_vector_feature': tf.FixedLenFeature(shape=[2, 2], dtype=tf.int64),\n })\n coder = example_proto_coder.ExampleProtoCoder(input_schema)\n\n example_decoded_value = {\n '2d_vector_feature': [1, 2, 3]\n }\n example_proto_text = \"\"\"\n features {\n feature { key: \"1d_vector_feature\"\n value { int64_list { value: [ 1, 2, 3 ] } } }\n }\n \"\"\"\n example = tf.train.Example()\n text_format.Merge(example_proto_text, example)\n\n # Ensure that we raise an exception for trying to encode invalid data.\n with self.assertRaisesRegexp(ValueError, 'got wrong number of values'):\n _ = coder.encode(example_decoded_value)\n\n # Ensure that we raise an exception for trying to parse invalid data.\n with self.assertRaisesRegexp(ValueError, 'got wrong number of values'):\n _ = coder.decode(example.SerializeToString())\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"id": "3559919",
"language": "Python",
"matching_score": 2.244279384613037,
"max_stars_count": 0,
"path": "tensorflow_transform/coders/example_proto_coder_test.py"
},
{
"content": "# Copyright 2017 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Example using census data from UCI repository.\"\"\"\n\n# pylint: disable=g-bad-import-order\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport os\nimport pprint\nimport tempfile\n\n\nimport tensorflow as tf\nimport tensorflow_transform as tft\nfrom apache_beam.io import textio\nfrom apache_beam.io import tfrecordio\nfrom tensorflow.contrib import learn\nfrom tensorflow.contrib import lookup\nfrom tensorflow.contrib.learn.python.learn.utils import input_fn_utils\n\nfrom tensorflow_transform.beam import impl as beam_impl\nfrom tensorflow_transform.beam.tft_beam_io import transform_fn_io\nfrom tensorflow_transform.coders import csv_coder\nfrom tensorflow_transform.coders import example_proto_coder\nfrom tensorflow_transform.saved import saved_transform_io\nfrom tensorflow_transform.tf_metadata import dataset_metadata\nfrom tensorflow_transform.tf_metadata import dataset_schema\nfrom tensorflow_transform.tf_metadata import metadata_io\n\nimport apache_beam as beam\n\nCATEGORICAL_FEATURE_KEYS = [\n 'workclass', 'education', 'marital-status', 'occupation', 'relationship',\n 'race', 'sex', 'native-country'\n]\nNUMERIC_FEATURE_KEYS = [\n 'age', 'education-num', 'capital-gain', 'capital-loss',\n 'hours-per-week'\n]\nLABEL_KEY = 'label'\n\n\ndef _create_raw_metadata():\n \"\"\"Create a DatasetMetadata for the raw data.\"\"\"\n column_schemas = {\n key: dataset_schema.ColumnSchema(\n tf.string, [], dataset_schema.FixedColumnRepresentation())\n for key in CATEGORICAL_FEATURE_KEYS\n }\n column_schemas.update({\n key: dataset_schema.ColumnSchema(\n tf.float32, [], dataset_schema.FixedColumnRepresentation())\n for key in NUMERIC_FEATURE_KEYS\n })\n column_schemas[LABEL_KEY] = dataset_schema.ColumnSchema(\n tf.string, [], dataset_schema.FixedColumnRepresentation())\n raw_data_metadata = dataset_metadata.DatasetMetadata(dataset_schema.Schema(\n column_schemas))\n return raw_data_metadata\n\nRAW_DATA_METADATA = _create_raw_metadata()\n\n# Constants used for training. Note that the number of instances will be\n# computed by tf.Transform in future versions, in which case it can be read from\n# the metadata. Similarly BUCKET_SIZES will not be needed as this information\n# will be stored in the metadata for each of the columns. The bucket size\n# includes all listed categories in the dataset description as well as one extra\n# for \"?\" which represents unknown.\nTRAIN_BATCH_SIZE = 128\nTRAIN_NUM_EPOCHS = 200\nNUM_TRAIN_INSTANCES = 32561\nNUM_TEST_INSTANCES = 16281\nBUCKET_SIZES = [9, 17, 8, 15, 17, 6, 3, 43]\n\n# Names of temp files\nTRANSFORMED_TRAIN_DATA_FILEBASE = 'train_transformed'\nTRANSFORMED_TEST_DATA_FILEBASE = 'test_transformed'\nEXPORTED_MODEL_DIR = 'exported_model_dir'\n\n# Functions for preprocessing\n\n\ndef transform_data(train_data_file, test_data_file, working_dir):\n \"\"\"Transform the data and write out as a TFRecord of Example protos.\n\n Read in the data using the CSV reader, and transform it using a\n preprocessing pipeline that scales numeric data and converts categorical data\n from strings to int64 values indices, by creating a vocabulary for each\n category.\n\n Args:\n train_data_file: File containing training data\n test_data_file: File containing test data\n working_dir: Directory to write transformed data and metadata to\n \"\"\"\n\n def preprocessing_fn(inputs):\n \"\"\"Preprocess input columns into transformed columns.\"\"\"\n outputs = {}\n\n # Scale numeric columns to have range [0, 1].\n for key in NUMERIC_FEATURE_KEYS:\n outputs[key] = tft.scale_to_0_1(inputs[key])\n\n # For all categorical columns except the label column, we use\n # tft.string_to_int which computes the set of unique values and uses this\n # to convert the strings to indices.\n for key in CATEGORICAL_FEATURE_KEYS:\n outputs[key] = tft.string_to_int(inputs[key])\n\n # For the label column we provide the mapping from string to index.\n def convert_label(label):\n table = lookup.index_table_from_tensor(['>50K', '<=50K'])\n return table.lookup(label)\n outputs[LABEL_KEY] = tft.apply_function(convert_label, inputs[LABEL_KEY])\n\n return outputs\n\n # The \"with\" block will create a pipeline, and run that pipeline at the exit\n # of the block.\n with beam.Pipeline() as pipeline:\n with beam_impl.Context(temp_dir=tempfile.mkdtemp()):\n # Create a coder to read the census data with the schema. To do this we\n # need to list all columns in order since the schema doesn't specify the\n # order of columns in the csv.\n ordered_columns = [\n 'age', 'workclass', 'fnlwgt', 'education', 'education-num',\n 'marital-status', 'occupation', 'relationship', 'race', 'sex',\n 'capital-gain', 'capital-loss', 'hours-per-week', 'native-country',\n 'label'\n ]\n converter = csv_coder.CsvCoder(ordered_columns, RAW_DATA_METADATA.schema)\n\n # Read in raw data and convert using CSV converter. Note that we apply\n # some Beam transformations here, which will not be encoded in the TF\n # graph since we don't do the from within tf.Transform's methods\n # (AnalyzeDataset, TransformDataset etc.). These transformations are just\n # to get data into a format that the CSV converter can read, in particular\n # removing empty lines and removing spaces after commas.\n raw_data = (\n pipeline\n | 'ReadTrainData' >> textio.ReadFromText(train_data_file)\n | 'FilterTrainData' >> beam.Filter(lambda line: line)\n | 'FixCommasTrainData' >> beam.Map(\n lambda line: line.replace(', ', ','))\n | 'DecodeTrainData' >> beam.Map(converter.decode))\n\n # Combine data and schema into a dataset tuple. Note that we already used\n # the schema to read the CSV data, but we also need it to interpret\n # raw_data.\n raw_dataset = (raw_data, RAW_DATA_METADATA)\n transformed_dataset, transform_fn = (\n raw_dataset | beam_impl.AnalyzeAndTransformDataset(preprocessing_fn))\n transformed_data, transformed_metadata = transformed_dataset\n\n _ = transformed_data | 'WriteTrainData' >> tfrecordio.WriteToTFRecord(\n os.path.join(working_dir, TRANSFORMED_TRAIN_DATA_FILEBASE),\n coder=example_proto_coder.ExampleProtoCoder(\n transformed_metadata.schema))\n\n # Now apply transform function to test data. In this case we also remove\n # the header line from the CSV file and the trailing period at the end of\n # each line.\n raw_test_data = (\n pipeline\n | 'ReadTestData' >> textio.ReadFromText(test_data_file)\n | 'FilterTestData' >> beam.Filter(\n lambda line: line and line != '|1x3 Cross validator')\n | 'FixCommasTestData' >> beam.Map(\n lambda line: line.replace(', ', ','))\n | 'RemoveTrailingPeriodsTestData' >> beam.Map(lambda line: line[:-1])\n | 'DecodeTestData' >> beam.Map(converter.decode))\n\n raw_test_dataset = (raw_test_data, RAW_DATA_METADATA)\n\n transformed_test_dataset = (\n (raw_test_dataset, transform_fn) | beam_impl.TransformDataset())\n # Don't need transformed data schema, it's the same as before.\n transformed_test_data, _ = transformed_test_dataset\n\n _ = transformed_test_data | 'WriteTestData' >> tfrecordio.WriteToTFRecord(\n os.path.join(working_dir, TRANSFORMED_TEST_DATA_FILEBASE),\n coder=example_proto_coder.ExampleProtoCoder(\n transformed_metadata.schema))\n\n # Will write a SavedModel and metadata to two subdirectories of\n # working_dir, given by transform_fn_io.TRANSFORM_FN_DIR and\n # transform_fn_io.TRANSFORMED_METADATA_DIR respectively.\n _ = (\n transform_fn\n | 'WriteTransformFn' >>\n transform_fn_io.WriteTransformFn(working_dir))\n\n# Functions for training\n\n\ndef _make_training_input_fn(working_dir, filebase, batch_size):\n \"\"\"Creates an input function reading from transformed data.\n\n Args:\n working_dir: Directory to read transformed data and metadata from and to\n write exported model to.\n filebase: Base filename (relative to `working_dir`) of examples.\n batch_size: Batch size.\n\n Returns:\n The input function for training or eval.\n \"\"\"\n transformed_metadata = metadata_io.read_metadata(\n os.path.join(\n working_dir, transform_fn_io.TRANSFORMED_METADATA_DIR))\n transformed_feature_spec = transformed_metadata.schema.as_feature_spec()\n\n def input_fn():\n \"\"\"Input function for training and eval.\"\"\"\n transformed_features = tf.contrib.learn.io.read_batch_features(\n os.path.join(working_dir, filebase + '*'),\n batch_size, transformed_feature_spec, tf.TFRecordReader)\n\n # Extract features and label from the transformed tensors.\n transformed_labels = transformed_features.pop(LABEL_KEY)\n\n return transformed_features, transformed_labels\n\n return input_fn\n\n\ndef _make_serving_input_fn(working_dir):\n \"\"\"Creates an input function reading from raw data.\n\n Args:\n working_dir: Directory to read transformed metadata from.\n\n Returns:\n The serving input function.\n \"\"\"\n raw_feature_spec = RAW_DATA_METADATA.schema.as_feature_spec()\n # Remove label since it is not available during serving.\n raw_feature_spec.pop(LABEL_KEY)\n\n def serving_input_fn():\n \"\"\"Input function for serving.\"\"\"\n # Get raw features by generating the basic serving input_fn and calling it.\n # Here we generate an input_fn that expects a parsed Example proto to be fed\n # to the model at serving time. See also\n # input_fn_utils.build_default_serving_input_fn.\n raw_input_fn = input_fn_utils.build_parsing_serving_input_fn(\n raw_feature_spec)\n raw_features, _, default_inputs = raw_input_fn()\n\n # Apply the transform function that was used to generate the materialized\n # data.\n _, transformed_features = (\n saved_transform_io.partially_apply_saved_transform(\n os.path.join(working_dir, transform_fn_io.TRANSFORM_FN_DIR),\n raw_features))\n\n return input_fn_utils.InputFnOps(transformed_features, None, default_inputs)\n\n return serving_input_fn\n\n\ndef train_and_evaluate(working_dir, num_train_instances=NUM_TRAIN_INSTANCES,\n num_test_instances=NUM_TEST_INSTANCES):\n \"\"\"Train the model on training data and evaluate on test data.\n\n Args:\n working_dir: Directory to read transformed data and metadata from and to\n write exported model to.\n num_train_instances: Number of instances in train set\n num_test_instances: Number of instances in test set\n\n Returns:\n The results from the estimator's 'evaluate' method\n \"\"\"\n\n # Wrap scalars as real valued columns.\n real_valued_columns = [tf.feature_column.numeric_column(key, shape=())\n for key in NUMERIC_FEATURE_KEYS]\n\n # Wrap categorical columns. Note the combiner is irrelevant since the input\n # only has one value set per feature per instance.\n one_hot_columns = [\n tf.feature_column.categorical_column_with_identity(\n key, num_buckets=num_buckets)\n for key, num_buckets in zip(CATEGORICAL_FEATURE_KEYS, BUCKET_SIZES)]\n\n estimator = learn.LinearClassifier(real_valued_columns + one_hot_columns)\n\n # Fit the model using the default optimizer.\n train_input_fn = _make_training_input_fn(\n working_dir, TRANSFORMED_TRAIN_DATA_FILEBASE,\n batch_size=TRAIN_BATCH_SIZE)\n estimator.fit(\n input_fn=train_input_fn,\n max_steps=TRAIN_NUM_EPOCHS * num_train_instances / TRAIN_BATCH_SIZE)\n\n # Evaluate model on test dataset.\n eval_input_fn = _make_training_input_fn(\n working_dir, TRANSFORMED_TEST_DATA_FILEBASE,\n batch_size=1)\n\n # Export the model.\n serving_input_fn = _make_serving_input_fn(working_dir)\n exported_model_dir = os.path.join(working_dir, EXPORTED_MODEL_DIR)\n estimator.export_savedmodel(exported_model_dir, serving_input_fn)\n\n return estimator.evaluate(input_fn=eval_input_fn, steps=num_test_instances)\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n 'input_data_dir',\n help='path to directory containing input data')\n parser.add_argument(\n '--working_dir',\n help='optional, path to directory to hold transformed data')\n args = parser.parse_args()\n\n if args.working_dir:\n working_dir = args.working_dir\n else:\n working_dir = tempfile.mkdtemp(dir=args.input_data_dir)\n\n train_data_file = os.path.join(args.input_data_dir, 'adult.data')\n test_data_file = os.path.join(args.input_data_dir, 'adult.test')\n\n transform_data(train_data_file, test_data_file, working_dir)\n\n results = train_and_evaluate(working_dir)\n\n pprint.pprint(results)\n\nif __name__ == '__main__':\n main()\n",
"id": "7267598",
"language": "Python",
"matching_score": 3.5736677646636963,
"max_stars_count": 0,
"path": "examples/census_example.py"
},
{
"content": "# Copyright 2017 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Library for testing Tensorflow Transform.\"\"\"\n\nimport os\n\n\nimport six\nimport tensorflow as tf\nfrom tensorflow_transform.beam import impl as beam_impl\nfrom tensorflow_transform.beam.tft_beam_io import beam_metadata_io\nfrom tensorflow_transform.beam.tft_beam_io import transform_fn_io\nfrom tensorflow.python.framework import test_util\n\n\nclass TransformTestCase(test_util.TensorFlowTestCase):\n \"\"\"Base test class for testing tf-transform preprocessing functions.\"\"\"\n\n # Display context for failing rows in data assertions.\n longMessage = True # pylint: disable=invalid-name\n\n def assertDataCloseOrEqual(self, a_data, b_data):\n \"\"\"Assert two datasets contain nearly equal values.\n\n Args:\n a_data: a sequence of dicts whose values are\n either strings, lists of strings, numeric types or a pair of\n those.\n b_data: same types as a_data\n\n Raises:\n AssertionError: if the two datasets are not the same.\n \"\"\"\n self.assertEqual(len(a_data), len(b_data),\n 'len(%r) != len(%r)' % (a_data, b_data))\n for i, (a_row, b_row) in enumerate(zip(a_data, b_data)):\n self.assertItemsEqual(a_row.keys(), b_row.keys(), msg='Row %d' % i)\n for key in a_row.keys():\n a_value = a_row[key]\n b_value = b_row[key]\n msg = 'Row %d, key %s' % (i, key)\n if isinstance(a_value, tuple):\n self._assertValuesCloseOrEqual(a_value[0], b_value[0], msg=msg)\n self._assertValuesCloseOrEqual(a_value[1], b_value[1], msg=msg)\n else:\n self._assertValuesCloseOrEqual(a_value, b_value, msg=msg)\n\n def _assertValuesCloseOrEqual(self, a_value, b_value, msg=None):\n try:\n if (isinstance(a_value, str) or\n isinstance(a_value, list) and a_value and\n isinstance(a_value[0], str)):\n self.assertAllEqual(a_value, b_value)\n else:\n self.assertAllClose(a_value, b_value)\n except (AssertionError, TypeError) as e:\n if msg:\n e.args = ((e.args[0] + ' : ' + msg,) + e.args[1:])\n raise\n\n def _resolveDeferredMetadata(self, transformed_metadata):\n \"\"\"Asserts that there is no unresolved metadata.\"\"\"\n # We should be able to call ResolveBeamFutures in all cases, but because\n # we are using Beam's automaterialization, we don't have access to an\n # explicit pipeline. Therefore we only call ResolveBeamFutures when we\n # are sure that transformed_metadata contains at least one element.\n if transformed_metadata.pcollections:\n transformed_metadata = (\n (transformed_metadata | beam_metadata_io.ResolveBeamFutures(None))[0])\n else:\n transformed_metadata = transformed_metadata.dataset_metadata\n\n # No more unresolved metadata should remain.\n unresolved_futures = transformed_metadata.substitute_futures({})\n self.assertEqual(unresolved_futures, [])\n return transformed_metadata\n\n def assertAnalyzeAndTransformResults(self,\n input_data,\n input_metadata,\n preprocessing_fn,\n expected_data=None,\n expected_metadata=None,\n only_check_core_metadata=False,\n expected_asset_file_contents=None,\n test_data=None,\n desired_batch_size=None):\n \"\"\"Assert that input data and metadata is transformed as expected.\n\n This methods asserts transformed data and transformed metadata match\n with expected_data and expected_metadata.\n\n Args:\n input_data: A sequence of dicts whose values are\n either strings, lists of strings, numeric types or a pair of those.\n input_metadata: DatasetMetadata describing input_data.\n preprocessing_fn: A function taking a dict of tensors and returning\n a dict of tensors.\n expected_data: (optional) A dataset with the same type constraints as\n input_data, but representing the output after transformation.\n If supplied, transformed data is asserted to be equal.\n expected_metadata: (optional) DatasetMetadata describing the transformed\n data. If supplied, transformed metadata is asserted to be equal.\n only_check_core_metadata: A boolean to indicate if all elements in\n the transformed metadata is asserted to be equal to expected metadata.\n If True, only transformed feature names, dtypes and representations\n are asserted.\n expected_asset_file_contents: (optional) A dictionary from asset filenames\n to their expected content as a list of text lines. Values should be\n the expected result of calling f.readlines() on the given asset files.\n Asset filenames are relative to the saved model's asset directory.\n test_data: (optional) If this is provided then instead of calling\n AnalyzeAndTransformDataset with input_data, this function will call\n AnalyzeDataset with input_data and TransformDataset with test_data.\n Note that this is the case even if input_data and test_data are equal.\n test_data should also conform to input_metadata.\n desired_batch_size: (optional) A batch size to batch elements by. If not\n provided, a batch size will be computed automatically.\n Raises:\n AssertionError: if the expected data does not match the results of\n transforming input_data according to preprocessing_fn, or\n (if provided) if the expected metadata does not match.\n \"\"\"\n if expected_asset_file_contents is None:\n expected_asset_file_contents = {}\n # Note: we don't separately test AnalyzeDataset and TransformDataset as\n # AnalyzeAndTransformDataset currently simply composes these two\n # transforms. If in future versions of the code, the implementation\n # differs, we should also run AnalyzeDataset and TransformDatset composed.\n temp_dir = self.get_temp_dir()\n with beam_impl.Context(\n temp_dir=temp_dir, desired_batch_size=desired_batch_size):\n if test_data is None:\n (transformed_data, transformed_metadata), transform_fn = (\n (input_data, input_metadata)\n | beam_impl.AnalyzeAndTransformDataset(preprocessing_fn))\n else:\n transform_fn = ((input_data, input_metadata)\n | beam_impl.AnalyzeDataset(preprocessing_fn))\n transformed_data, transformed_metadata = (\n ((test_data, input_metadata), transform_fn)\n | beam_impl.TransformDataset())\n\n # Write transform_fn so we can test its assets\n if expected_asset_file_contents:\n _ = transform_fn | transform_fn_io.WriteTransformFn(temp_dir)\n\n if expected_data is not None:\n self.assertDataCloseOrEqual(expected_data, transformed_data)\n\n if expected_metadata:\n transformed_metadata = self._resolveDeferredMetadata(transformed_metadata)\n\n if only_check_core_metadata:\n # preprocessing_fn may add metadata to column schema only relevant to\n # internal implementation such as vocabulary_file. As such, only check\n # feature names, dtypes and representations are as expected.\n self.assertSameElements(\n transformed_metadata.schema.column_schemas.keys(),\n expected_metadata.schema.column_schemas.keys())\n for k, v in transformed_metadata.schema.column_schemas.iteritems():\n expected_schema = expected_metadata.schema.column_schemas[k]\n self.assertEqual(expected_schema.representation, v.representation,\n \"representation doesn't match for feature '%s'\" % k)\n self.assertEqual(expected_schema.domain.dtype, v.domain.dtype,\n \"dtype doesn't match for feature '%s'\" % k)\n else:\n # Check the entire DatasetMetadata is as expected.\n # Use extra assertEqual for schemas, since full metadata assertEqual\n # error message is not conducive to debugging.\n self.assertEqual(expected_metadata.schema.column_schemas,\n transformed_metadata.schema.column_schemas)\n self.assertEqual(expected_metadata, transformed_metadata)\n\n for filename, file_contents in six.iteritems(expected_asset_file_contents):\n full_filename = os.path.join(\n temp_dir, transform_fn_io.TRANSFORM_FN_DIR, 'assets', filename)\n with tf.gfile.Open(full_filename) as f:\n self.assertEqual(f.readlines(), file_contents)\n",
"id": "1014014",
"language": "Python",
"matching_score": 3.9629671573638916,
"max_stars_count": 0,
"path": "tensorflow_transform/beam/tft_unit.py"
},
{
"content": "# Copyright 2017 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Transforms to read/write metadata from disk.\n\nA write/read cycle will render all metadata deferred, but in general users\nshould avoid doing this anyway and pass around live metadata objects.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\n\nimport apache_beam as beam\nimport six\nfrom tensorflow_transform.tf_metadata import metadata_io\n\n\nclass BeamDatasetMetadata(\n collections.namedtuple(\n 'BeamDatasetMetadata', ['dataset_metadata', 'pcollections'])):\n \"\"\"A class like DatasetMetadata that also holds a dict of `PCollection`s.\n\n DatasetMetadata allows values to be instances of the `Future` class which\n allows us to represent deferred objects. This class allows us to also\n embed Beam values. We do this by adding a dictionary, `pcollections` which\n maps the names of futures to Beam `PCollection`s.\n \"\"\"\n\n @property\n def schema(self):\n return self.dataset_metadata.schema\n\n @schema.setter\n def schema(self, value):\n self.dataset_metadata.schema = value\n\n @property\n def provenance(self):\n return self.dataset_metadata.provenance\n\n @property\n def statistics(self):\n return self.dataset_metadata.statistics\n\n @property\n def anomalies(self):\n return self.dataset_metadata.anomalies\n\n @property\n def problem_statements(self):\n return self.dataset_metadata.problem_statements\n\n def merge(self, other):\n raise NotImplementedError\n\n\nclass ResolveBeamFutures(beam.PTransform):\n \"\"\"A PTransform to resolve futures of a DatasetMetadata.\"\"\"\n\n # NOTE: The pipeline metadata is required by PTransform given that all the\n # inputs may be non-deferred.\n def __init__(self, pipeline):\n super(ResolveBeamFutures, self).__init__()\n self.pipeline = pipeline\n\n def _extract_input_pvalues(self, metadata):\n return metadata, getattr(metadata, 'pcollections', {}).values()\n\n def expand(self, metadata):\n if isinstance(metadata, BeamDatasetMetadata):\n pcollections = metadata.pcollections\n metadata = metadata.dataset_metadata\n else:\n pcollections = {}\n\n # Extract `PCollection`s from futures.\n tensor_value_pairs = []\n for name, pcoll in six.iteritems(pcollections):\n tensor_value_pairs.append(\n pcoll\n | 'AddName[%s]' % name >> beam.Map(lambda x, name=name: (name, x)))\n tensor_value_mapping = beam.pvalue.AsDict(\n tensor_value_pairs | 'MergeTensorValuePairs' >> beam.Flatten(\n pipeline=self.pipeline))\n\n def resolve_futures(dummy_input, updated_metadata, future_values):\n updated_metadata.substitute_futures(future_values)\n return updated_metadata\n\n return (self.pipeline\n | 'CreateSingleton' >> beam.Create([None])\n | 'ResolveFutures' >> beam.Map(resolve_futures, metadata,\n tensor_value_mapping))\n\n\nclass WriteMetadata(beam.PTransform):\n \"\"\"A PTransform to write Metadata to disk.\n\n Input can either be a DatasetMetadata or a tuple of properties.\n \"\"\"\n\n # NOTE: The pipeline metadata is required by PTransform given that all the\n # inputs may be non-deferred.\n def __init__(self, path, pipeline):\n super(WriteMetadata, self).__init__()\n self._path = path\n self.pipeline = pipeline\n\n def _extract_input_pvalues(self, metadata):\n return metadata, getattr(metadata, 'pcollections', {}).values()\n\n def expand(self, metadata):\n return (metadata\n | 'ResolveBeamFutures' >> ResolveBeamFutures(self.pipeline)\n | 'WriteMetadata' >> beam.Map(metadata_io.write_metadata,\n self._path))\n",
"id": "8381242",
"language": "Python",
"matching_score": 2.171854257583618,
"max_stars_count": 0,
"path": "tensorflow_transform/beam/tft_beam_io/beam_metadata_io.py"
},
{
"content": "# Copyright 2017 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Allow setting metadata values as Futures, and filling them in later.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n\nimport abc\nimport six\n\n\nclass Future(object):\n\n def __init__(self, name):\n self._name = name\n\n @property\n def name(self):\n return self._name\n\n def __repr__(self):\n return \"{}({})\".format(self.__class__.__name__, repr(self.__dict__))\n\n\ndef _substitute_futures(obj, name_to_value, started=None):\n \"\"\"Substitute `Future`s hierarchically within the given object or collection.\n\n Args:\n obj: an object, dict, list, or set potentially containing `Future`s.\n name_to_value: a dict of Future name to value.\n started: a set of objects that have already been visited, to avoid cycles.\n\n Returns:\n A list of remaining Futures that were not substituted.\n \"\"\"\n if started is None:\n started = set()\n\n if isinstance(obj, dict):\n iterable = six.iteritems(obj)\n def subst_fn(key, name):\n obj[key] = name_to_value[name]\n elif isinstance(obj, list):\n iterable = enumerate(obj)\n def subst_fn(key, name):\n obj[key] = name_to_value[name]\n elif isinstance(obj, set):\n iterable = {entry: entry for entry in obj}\n def subst_fn(key, name):\n obj.remove(key)\n obj.add(name_to_value[name])\n else:\n if obj in started:\n return\n started.add(obj)\n iterable = six.iteritems(obj.__dict__)\n def subst_fn(key, name):\n obj.__setattr__(key, name_to_value[name])\n\n return [future\n for k, v in iterable\n for future in _maybe_subst(k, v, name_to_value, started, subst_fn)]\n\n\ndef _maybe_subst(k, v, name_to_value, started, subst_fn):\n if isinstance(v, Future):\n if v.name in name_to_value:\n subst_fn(k, v.name)\n else:\n return [v]\n if isinstance(v, (FutureContent, dict, list, set)):\n return _substitute_futures(v, name_to_value, started)\n return []\n\n\nclass FutureContent(object):\n \"\"\"An object that may contain some fields that are Futures.\"\"\"\n\n __metaclass__ = abc.ABCMeta\n\n def substitute_futures(self, name_to_value, started=None):\n return _substitute_futures(self, name_to_value, started)\n\n def all_futures_resolved(self, started=None):\n \"\"\"Determine whether any Futures remain to be substituted.\"\"\"\n return not _substitute_futures(self, {}, started)\n",
"id": "11992537",
"language": "Python",
"matching_score": 0.5908820629119873,
"max_stars_count": 0,
"path": "tensorflow_transform/tf_metadata/futures.py"
},
{
"content": "# Copyright 2017 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"In-memory representation of the problem statements associated with a dataset.\n\"\"\"\n\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\n\n\nclass ProblemStatements(collections.namedtuple('ProblemStatements', [])):\n\n def __eq__(self, other):\n if isinstance(other, self.__class__):\n return self._asdict() == other._asdict()\n return NotImplemented\n\n def __ne__(self, other):\n return not self == other\n\n def merge(self, other):\n pass\n\n\nclass ProblemStatement(collections.namedtuple(\n 'ProblemStatement',\n ['raw_feature_keys',\n 'raw_label_keys',\n 'raw_weights_keys',\n 'transformed_feature_keys',\n 'transformed_label_keys',\n 'transformed_weights_keys'])):\n # the main constraint that we could enforce is that a transformed feature or\n # weight cannot depend on a raw label.\n pass\n",
"id": "3462290",
"language": "Python",
"matching_score": 0.6701017618179321,
"max_stars_count": 0,
"path": "tensorflow_transform/tf_metadata/dataset_problem_statements.py"
},
{
"content": "# Copyright 2017 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Representation of versioned metadata serialization strategies.\n\nSpecific serialization strategies should subclass the abstract *IO classes.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport abc\nimport collections\n\n\nfrom tensorflow_transform.tf_metadata import dataset_metadata\n\n\nclass MetadataVersion(collections.namedtuple(\"MetadataVersion\",\n [\"version_key\",\n \"version_flavor\",\n \"schema_io\",\n \"statistics_io\",\n \"anomalies_io\",\n \"provenance_io\",\n \"problem_statements_io\"])):\n \"\"\"A specific metadata serialization format.\"\"\"\n\n def read(self, vdir):\n \"\"\"Read metadata from the given path.\n\n Args:\n vdir: A `DatasetMetadataVersionDirectory` from which the metadata should\n be read.\n\n Returns:\n A `DatasetMetadata` object.\n \"\"\"\n\n schema = None\n provenance = None\n statistics = None\n anomalies = None\n problem_statements = None\n\n if self.schema_io is not None:\n schema = self.schema_io.read(\n vdir.schema_filename)\n if self.provenance_io is not None:\n provenance = self.provenance_io.read(\n vdir.provenance_filename)\n if self.statistics_io is not None:\n statistics = self.statistics_io.read(\n vdir.statistics_filename)\n if self.anomalies_io is not None:\n anomalies = self.anomalies_io.read(\n vdir.anomalies_filename)\n if self.problem_statements_io is not None:\n problem_statements = self.problem_statements_io.read(\n vdir.problem_statements_filename)\n\n return dataset_metadata.DatasetMetadata(\n schema=schema,\n statistics=statistics,\n anomalies=anomalies,\n provenance=provenance,\n problem_statements=problem_statements)\n\n def write(self, metadata, vdir):\n \"\"\"Write metadata to a given path.\n\n Args:\n metadata: A `DatasetMetadata` to write.\n vdir: A `DatasetMetadataVersionDirectory` where the metadata should\n be written.\n \"\"\"\n vdir.create()\n\n if self.schema_io is not None:\n self.schema_io.write(metadata.schema, vdir.schema_filename)\n if self.provenance_io is not None:\n self.provenance_io.write(metadata.provenance, vdir.provenance_filename)\n if self.statistics_io is not None:\n self.statistics_io.write(metadata.statistics, vdir.statistics_path)\n if self.anomalies_io is not None:\n self.anomalies_io.write(metadata.anomalies, vdir.anomalies_path)\n if self.problem_statements_io is not None:\n self.problem_statements_io.write(metadata.problem_statements,\n vdir.problem_statements_path)\n\n\nclass SchemaIO(object):\n \"\"\"A SchemaIO represents a serialization strategy.\n\n It maps the in-memory `Schema` representation to and from a specific\n serialization format, such as certain protos, a JSON representation, etc.\n \"\"\"\n\n __metaclass__ = abc.ABCMeta\n\n @abc.abstractmethod\n def write(self, schema, path):\n \"\"\"Write the schema to the given path.\n\n Args:\n schema: A `Schema` object to write.\n path: A path where the schema will be written as a single file (not a\n directory). The implementation may append an appropriate filename\n extension (e.g. \".pbtxt\", \".json\") to the name.\n \"\"\"\n raise NotImplementedError(\"Calling an abstract method.\")\n\n @abc.abstractmethod\n def read(self, path):\n \"\"\"Read the schema from the given path.\n\n Args:\n path: A path from which the schema should be read. This path may exclude\n the implementation-specific filename extension.\n\n Returns:\n A `Schema` object.\n \"\"\"\n raise NotImplementedError(\"Calling an abstract method.\")\n\n\nclass ProvenanceIO(object):\n \"\"\"A ProvenanceIO represents a serialization strategy.\n\n It maps the in-memory `Provenance` representation to and from a specific\n serialization format, such as certain protos, a JSON representation, etc.\n \"\"\"\n\n __metaclass__ = abc.ABCMeta\n\n @abc.abstractmethod\n def write(self, provenance, path):\n \"\"\"Write the provenance to the given path.\n\n Args:\n provenance: A `Provenance` object to write.\n path: A path where the provenance will be written as a single file (not a\n directory). The implementation may append an appropriate filename\n extension (e.g. \".pbtxt\", \".json\") to the name.\n \"\"\"\n raise NotImplementedError(\"Calling an abstract method.\")\n\n @abc.abstractmethod\n def read(self, path):\n \"\"\"Read the provenance from the given path.\n\n Args:\n path: A path from which the provenance should be read.\n\n Returns:\n A `Provenance` object.\n \"\"\"\n raise NotImplementedError(\"Calling an abstract method.\")\n\n\nclass StatisticsIO(object):\n \"\"\"A StatisticsIO represents a serialization strategy.\n\n It maps the in-memory `Statistics` representation to and from a specific\n serialization format, such as certain protos, a JSON representation, etc.\n \"\"\"\n\n __metaclass__ = abc.ABCMeta\n\n @abc.abstractmethod\n def write(self, statistics, path):\n \"\"\"Write the statistics to the given path.\n\n Args:\n statistics: A `Statistics` object to write.\n path: A path where the statistics should be written. The implementation\n will write files within a directory at this location. The directory\n is expected to exist already. Multiple files may be written within\n this directory.\n \"\"\"\n # The implementation may choose the filenames it writes, but should take\n # care not to overwrite existing files.\n raise NotImplementedError(\"Calling an abstract method.\")\n\n @abc.abstractmethod\n def read(self, path):\n \"\"\"Read the statistics from the given path.\n\n Args:\n path: A path from which the statistics should be read, representing a\n directory that may contain multiple files. All of these files will be\n read and their contents merged.\n\n Returns:\n A `Statistics` object.\n \"\"\"\n raise NotImplementedError(\"Calling an abstract method.\")\n\n\nclass AnomaliesIO(object):\n \"\"\"An AnomaliesIO represents a serialization strategy.\n\n It maps the in-memory `Anomalies` representation to and from a specific\n serialization format, such as certain protos, a JSON representation, etc.\n \"\"\"\n\n __metaclass__ = abc.ABCMeta\n\n @abc.abstractmethod\n def write(self, anomalies, path):\n \"\"\"Write the anomalies to the given path.\n\n Args:\n anomalies: An `Anomalies` object to write.\n path: A path where the anomalies should be written. The implementation\n will write files within a directory at this location. The directory\n is expected to exist already. Multiple files may be written within\n this directory.\n \"\"\"\n # The implementation may choose the filenames it writes, but should take\n # care not to overwrite existing files.\n raise NotImplementedError(\"Calling an abstract method.\")\n\n @abc.abstractmethod\n def read(self, path):\n \"\"\"Read the anomalies from the given path.\n\n Args:\n path: A path from which the anomalies should be read, representing a\n directory that may contain multiple files. All of these files will be\n read and their contents merged.\n\n Returns:\n An `Anomalies` object.\n \"\"\"\n raise NotImplementedError(\"Calling an abstract method.\")\n\n\nclass ProblemStatementsIO(object):\n \"\"\"A ProblemStatementsIO represents a serialization strategy.\n\n It maps the in-memory `ProblemStatements` representation to and from a\n specific serialization format, such as certain protos, a JSON representation,\n etc.\n \"\"\"\n\n __metaclass__ = abc.ABCMeta\n\n @abc.abstractmethod\n def write(self, problem_statements, path):\n \"\"\"Write the problem_statements to the given path.\n\n Args:\n problem_statements: A `ProblemStatements` object to write.\n path: A path where the problem_statements should be written. The\n implementation will write files within a directory at this location.\n The directory is expected to exist already. Multiple files may be\n written within this directory.\n \"\"\"\n # The implementation may choose the filenames it writes, but should take\n # care not to overwrite existing files.\n raise NotImplementedError(\"Calling an abstract method.\")\n\n @abc.abstractmethod\n def read(self, path):\n \"\"\"Read the problem_statements from the given path.\n\n Args:\n path: A path from which the problem_statements should be read,\n representing a directory that may contain multiple files. All of these\n files will be read and their contents merged.\n\n Returns:\n A `ProblemStatements` object.\n \"\"\"\n raise NotImplementedError(\"Calling an abstract method.\")\n\n",
"id": "8649014",
"language": "Python",
"matching_score": 3.354100465774536,
"max_stars_count": 0,
"path": "tensorflow_transform/tf_metadata/version_api.py"
},
{
"content": "# Copyright 2017 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"A prescribed directory structure for storing metadata in versioned formats.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nimport tensorflow as tf\n\n\nclass DatasetMetadataDirectory(object):\n \"\"\"A prescribed directory structure for storing metadata in versioned formats.\n \"\"\"\n\n def __init__(self, basepath):\n self._basepath = basepath\n\n @property\n def assets_path(self):\n return os.path.join(self._basepath, 'assets')\n\n def version_dir(self, version):\n if version.version_flavor is not None:\n version_flavor_dir = version.version_key + '-' + version.version_flavor\n else:\n version_flavor_dir = version.version_key\n return DatasetMetadataVersionDirectory(\n os.path.join(self._basepath, version_flavor_dir))\n\n @property\n def basepath(self):\n return self._basepath\n\n\nclass DatasetMetadataVersionDirectory(object):\n \"\"\"A prescribed directory structure for storing metadata in a known format.\"\"\"\n\n def __init__(self, basepath):\n self._basepath = basepath\n\n def create(self):\n tf.gfile.MakeDirs(self._basepath)\n\n @property\n def schema_filename(self):\n return os.path.join(self._basepath, 'schema')\n\n @property\n def provenance_filename(self):\n return os.path.join(self._basepath, 'provenance')\n\n @property\n def statistics_path(self):\n return os.path.join(self._basepath, 'statistics')\n\n @property\n def anomalies_path(self):\n return os.path.join(self._basepath, 'anomalies')\n\n @property\n def problem_statements_path(self):\n return os.path.join(self._basepath, 'problem_statements')\n\n",
"id": "4404940",
"language": "Python",
"matching_score": 2.6088836193084717,
"max_stars_count": 0,
"path": "tensorflow_transform/tf_metadata/metadata_directory.py"
}
] | 2.609231 |
Ronll | [
{
"content": "# List of all entry-points of the Angular cdk-experimental package.\nCDK_EXPERIMENTAL_ENTRYPOINTS = [\n \"column-resize\",\n \"combobox\",\n \"dialog\",\n \"menu\",\n \"listbox\",\n \"popover-edit\",\n \"scrolling\",\n \"selection\",\n]\n\n# List of all entry-point targets of the Angular cdk-experimental package.\nCDK_EXPERIMENTAL_TARGETS = [\"//src/cdk-experimental\"] + \\\n [\"//src/cdk-experimental/%s\" % ep for ep in CDK_EXPERIMENTAL_ENTRYPOINTS]\n",
"id": "6758166",
"language": "Python",
"matching_score": 0,
"max_stars_count": 2,
"path": "src/cdk-experimental/config.bzl"
}
] | 0 |
henrhie | [
{
"content": "import numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import LabelEncoder\nimport matplotlib.pyplot as plt\nfrom sklearn.preprocessing import OneHotEncoder\nimport math\n\ndef main():\n\tdata = pd.read_csv(\"c:/users/henry/desktop/datasets/b_cancer.csv\") #loads dataset\n\tlb = LabelEncoder() #instantiate the LabelEncoder class\n\tx = data.iloc[:,2:32].values\n\ty = data[\"diagnosis\"].values\n\ty = lb.fit_transform(y) #converts the labels into ones and zeros\n\tx = (x-np.mean(x))/np.std(x) #normalize the input features\n\tx_train,x_test,y_train,y_test = train_test_split(x,y , test_size=0.3, random_state=0)\n\tx_train = x_train.T #transposes the dataset to make matrix multiplication feasible\n\tx_test = x_test.T\n\ty_train = y_train.reshape(-1,1)\n\ty_train = y_train.T\n\ty_test = y_test.reshape(-1,1)\n\ty_test = y_test.T\n\tlayer_dims =[x_train.shape[0],42,62,12,20,11,1]\n\tlearning_rate=1e-3\n\tprint(\"learning_rate for training is \" + str(learning_rate))\n\tprint(y_test)\n\tparameters = network_model(x_train,y_train,x_test,y_test,learning_rate=learning_rate,epochs=10000,layer_dims=layer_dims,lambd=0.0,learning_decay=0.00000001,p_keep=1.0,beta=0.9,optimizer=\"gradient descent\")\n\ttrain_predictions = predict(x_train,parameters)\n\tpredictions = predict(x_test,parameters)\n\tprint(train_predictions)\n\tprint(predictions)\n\ttrain_score = accuracy_score(train_predictions,y_train)\n\tprint(train_score)\n\tscore = accuracy_score(predictions,y_test)\n\tprint(score)\n\ndef initialize_parameters(layer_dims):\n\tparameters = {}\n\tL = len(layer_dims)\n\tfor l in range(1,L):\n\t\tparameters[\"W\" + str(l)] = np.random.randn(layer_dims[l],layer_dims[l-1]) * np.sqrt(2/(layer_dims[l-1])) # He weight initialization technique..By He et Al\n\t\tparameters[\"b\" + str(l)] = np.zeros((layer_dims[l],1))\n\treturn parameters\n\ndef linear_forward(A,W,b):\n\tZ = np.dot(W,A) + b\n\tcache = (A,W,b)\n\treturn Z,cache\n\ndef sigmoid(Z):\n\tcache = Z\n\ts = 1/(1 + np.exp(-Z))\n\treturn s, cache\n\ndef relu(Z):\n\ts =np.maximum(0,Z)\n\tcache = Z\n\treturn s,cache\n\ndef leaky_relu(Z, alpha):\n\ts = np.maximum(Z*alpha,Z)\n\tcache = Z\n\treturn s,cache\n\ndef linear_activation_forward(A_prev,W,b,activation):\n\tif activation == \"relu\":\n\t\tZ,linear_cache = linear_forward(A_prev,W,b)\n\t\tA, activation_cache = relu(Z)\n\n\telif activation ==\"sigmoid\":\n\t\tZ,linear_cache = linear_forward(A_prev,W,b)\n\t\tA, activation_cache = sigmoid(Z)\n\tcache = (linear_cache,activation_cache)\n\treturn A, cache\n\ndef L_model_forward(X, parameters,p_keep=1):\n\tcaches = []\n\tdropout_dict={}\n\tL = len(parameters) //2\n\tA = X\n\tfor i in range(1,L):\n\t\tA_prev = A\n\t\tA,cache = linear_activation_forward(A_prev,parameters[\"W\" + str(i)],parameters[\"b\" + str(i)], activation=\"relu\")\n\t\tdropout_dict[\"D\"+ str(i)]= np.random.rand(A.shape[0],A.shape[1])\n\t\tdropout_dict[\"D\" + str(i)] = dropout_dict[\"D\" + str(i)] < p_keep\n\t\tA = A*dropout_dict[\"D\" + str(i)]\n\t\tA/=p_keep\n\t\tcaches.append(cache)\n\tAL, cache = linear_activation_forward(A,parameters[\"W\" + str(L)], parameters[\"b\" + str(L)], activation = \"sigmoid\")\n\tcaches.append(cache)\n\treturn AL, caches,dropout_dict\ndef relu_backward(dA,Z):\n\tA,_ = relu(Z)\n\ts = (A>0)\n\tdZ = dA * s\n\treturn dZ\n\ndef sigmoid_backward(dA,Z):\n\ts,cache = sigmoid(Z)\n\tderivative = s * (1-s)\n\tdZ = dA * derivative\n\treturn dZ\n\ndef linear_backward(dZ, cache,lambd):\n\tm = len(cache)\n\tlinear_cache, activation_cache = cache\n\tA_prev,W,b = linear_cache\n\tZ = activation_cache\n\tdW = 1/m *(np.dot(dZ,A_prev.T) + (lambd*W))\n\tdb = 1/m * np.sum(dZ, axis=1, keepdims = True)\n\tdA_prev = np.dot(W.T, dZ)\n\treturn dW, db,dA_prev\n\ndef linear_backward_activation(dA, cache, activation,lambd):\n\tif activation == \"relu\":\n\t\tlinear_cache,activation_cache = cache\n\t\tZ = activation_cache\n\t\tdZ = relu_backward(dA,Z)\n\t\tdW,db,dA_prev = linear_backward(dZ, cache,lambd)\n\telif activation == \"sigmoid\":\n\t\tlinear_cache,activation_cache = cache\n\t\tZ = activation_cache\n\t\tdZ = sigmoid_backward(dA,Z)\n\t\tdW,db,dA_prev = linear_backward(dZ, cache,lambd)\n\treturn dW, db, dA_prev\n\ndef l_model_backward(AL,Y,cache,lambd,dropout_dict,p_keep):\n\tgrads = {}\n\tY.shape = (AL.shape)\n\tdAL = -np.divide(Y,AL) + np.divide(1-Y,1-AL+(1e-18))\n\tcurrent_cache = cache[-1]\n\tL = len(cache)\n\tgrads[\"dW\" + str(L)],grads[\"db\" + str(L)], grads[\"dA\" + str(L-1)] = linear_backward_activation(dAL, current_cache, activation =\"sigmoid\",lambd=0.0)\n\tgrads[\"dA\"+ str(L-1)] = grads[\"dA\" + str(L-1)] *dropout_dict[\"D\" + str(L-1)]\n\tgrads[\"dA\" + str(L-1)]/=p_keep\n\tfor i in reversed(range(L-1)):\n\t\tcurrent_cache = cache[i]\n\t\tgrads[\"dW\"+ str(i+1)], grads[\"db\" + str(i+1)], grads[\"dA\"+ str(i)] = linear_backward_activation(grads[\"dA\" + str(i+1)],current_cache,activation=\"relu\",lambd=0.0)\n\t\tif i == 0:\n\t\t\tbreak\n\t\telse:\n\t\t\tgrads[\"dA\"+ str(i)] = grads[\"dA\" + str(i)] * dropout_dict[\"D\" + str(i)]\n\t\t\tgrads[\"dA\" + str(i)] /=p_keep\n\treturn grads\n\ndef update_parameters(parameters,grads, learning_rate):\n\tL = len(parameters) //2\n\tfor l in range(1,L):\n\t\tparameters[\"W\"+ str(l)] =parameters[\"W\" + str(l)] - learning_rate * grads[\"dW\" + str(l)]\n\t\tparameters[\"b\" + str(l)] = parameters[\"b\"+ str(l)] - learning_rate * grads[\"db\" + str(l)]\n\treturn parameters\n\ndef dict_to_vector(dictionary):\n\tvalues = []\n\tkeys = []\n\tfor key,value in dictionary.items():\n\t\tvalues.append(value)\n\t\tkeys.append(key)\n\tnew_vector = np.array(values)\n\tnew_vector = new_vector.reshape(-1,1)\n\tnew_keys = np.array(keys)\n\treturn new_vector, new_keys\n\ndef vector_to_dict(vector,keys):\n dict={}\n for i in range(len(keys)):\n dict[keys[i]] = vector[i]\n return dictionary\n\ndef extract_weight(dict):\n L = len(dict)//2\n values=[]\n for i in range(1,L+1):\n values.append(dict[\"W\" + str(i)])\n return values\n\ndef calc_norm(weight):\n norm =0\n L = len(weight)\n for i in range(L):\n norm+=np.sum(np.square(weight[i]))\n return norm\n\ndef random_mini_batches(X,Y,mini_batch_size,seed=0):\n\tmini_batch =[]\n\tm = Y.shape[1]\n\tpermutation = list(np.random.permutation(m))\n\tX_shuffled = X[:,permutation]\n\tY_shuffled = Y[:,permutation].reshape(Y.shape[0],m)\n\tnum_complete_minibatches = math.floor(m/mini_batch_size)\n\tfor i in range(num_complete_minibatches):\n\t\tX_minibatch = X_shuffled[:,mini_batch_size*i:mini_batch_size*(i+1)]\n\t\tY_minibatch = Y_shuffled[:,mini_batch_size*i:mini_batch_size*(i+1)]\n\t\tminibatches = (X_minibatch,Y_minibatch)\n\t\tmini_batch.append(minibatches)\n\tif m % mini_batch_size !=0:\n\t\tend = m - mini_batch_size * math.floor(m / mini_batch_size)\n\t\tX_minibatch = X_shuffled[:,num_complete_minibatches*mini_batch_size:]\n\t\tY_minibatch = Y_shuffled[:,num_complete_minibatches*mini_batch_size:]\n\t\tminibatches = (X_minibatch,Y_minibatch)\n\t\tmini_batch.append(minibatches)\n\treturn mini_batch\n\ndef initialize_velocities(params):\n\tv ={}\n\tL = len(params)//2\n\tfor i in range(L):\n\t\tv[\"dW\" + str(i+1)] = np.zeros_like(params[\"W\" + str(i+1)])\n\t\tv[\"db\" + str(i+1)] = np.zeros_like(params[\"b\" + str(i+1)])\n\treturn v\n\n\ndef update_parameters_with_momentum(params,learning_rate,grads,v,beta):\n\tL = len(params)//2\n\tfor i in range(L):\n\t\tv[\"dW\" + str(i+1)] = beta*v[\"dW\" + str(i+1)] + (1-beta)*grads[\"dW\" + str(i+1)]\n\t\tv[\"db\" + str(i+1)] = beta*v[\"db\" + str(i+1)] + (1-beta)*grads[\"db\" + str(i+1)]\n\n\t\tparams[\"W\"+ str(i+1)] = params[\"W\" + str(i+1)] - learning_rate*v[\"dW\"+ str(i+1)]\n\t\tparams[\"b\" + str(i+1)] = params[\"b\" + str(i+1)] - learning_rate*v[\"db\" + str(i+1)]\n\treturn params,v\n\ndef initialize_rmsprop(params):\n\tL = len(params)//2\n\ts={}\n\tfor l in range(L):\n\t\ts[\"dW\" + str(l+1)] = np.zeros_like(params[\"W\" + str(l+1)])\n\t\ts[\"db\" + str(l+1)] = np.zeros_like(params[\"W\"+ str((l+1))])\n\treturn s\n\ndef update_rmsprop(s,t,params,grads,learning_rate,beta_2=0.999,epsilon=1e-8):\n\tL = len(grads)//2\n\ts_corrected ={}\n\tfor l in range(L):\n\t\ts[\"dW\" + str(l+1)] = (s[\"dW\"+ str(l+1)]*beta2) + (1-beta2) * np.square(grads[\"dW\" + str(l+1)])\n\t\ts[\"db\" + str(l+1)] = (s[\"db\"+ str(l+1)]*beta2) + (1-beta2) * np.square(grads[\"db\" + str(l+1)])\n\t\ts_corrected[\"dW\" + str(l+1)] = np.divide(s[\"dW\" + str(l+1)],1 - np.power(beta2,t))\n\t\ts_corrected[\"db\" + str(l+1)] = np.divide(s[\"db\" + str(l+1)],1 - np.power(beta2,t))\n\t\tparams[\"W\" + str(l+1)] = params[\"W\" + str(l+1)] - np.divide(learning_rate,np.sqrt(s_corrected[\"dW\" + str(l+1)] + epsilon))\n\t\tparams[\"b\" + str(l+1)] = params[\"b\" + str(l+1)] - np.divide(learning_rate,np.sqrt(s_corrected[\"db\" + str(l+1)] + epsilon))\n\treturn params,s_corrected\n\ndef initialize_adam(params):\n\tL = len(params)//2\n\ts={}\n\tv={}\n\tfor l in range(L):\n\t\tv[\"dW\" + str(l+1)] = np.zeros_like(params[\"W\" + str(l+1)])\n\t\tv[\"db\" + str(l+1)] = np.zeros_like(params[\"b\" + str(l+1)])\n\t\ts[\"dW\" + str(l+1)] = np.zeros_like(params[\"W\" + str(l+1)])\n\t\ts[\"db\"+ str(l+1)] = np.zeros_like(params[\"b\" + str(l+1)])\n\treturn v,s\n\ndef update_adam(params,grads,v,s,t,learning_rate,epsilon=1e-8,beta1=0.9,beta2=0.999):\n\tv_corrected= {}\n\ts_corrected ={}\n\tL =len(params)//2\n\tfor l in range(L):\n\t\tv[\"dW\" + str(l+1)] = v[\"dW\" + str(l+1)]*beta1 + (1-beta1)*grads[\"dW\" + str(l+1)]\n\t\tv[\"db\" + str(l+1)] = v[\"db\" + str(l+1)]*beta1 + (1-beta1)*grads[\"db\" + str(l+1)]\n\t\tv_corrected[\"dW\" + str(l+1)] = v[\"dW\" + str(l+1)]/(1-np.power(beta1,t))\n\t\tv_corrected[\"db\" + str(l+1)] = v[\"db\" + str(l+1)]/(1-np.power(beta1,t))\n\t\ts[\"dW\" + str(l+1)] = s[\"dW\" + str(l+1)]*beta2 + (1-beta2)*(grads[\"dW\" + str(l+1)])**2\n\t\ts[\"db\" + str(l+1)] = s[\"db\" + str(l+1)]*beta2 + (1-beta2)*(grads[\"db\" + str(l+1)])**2\n\t\ts_corrected[\"dW\" + str(l+1)] = (s[\"dW\" + str(l+1)])/(1 - np.power(beta2,t))\n\t\ts_corrected[\"db\" + str(l+1)] = (s[\"db\" + str(l+1)])/(1 - np.power(beta2,t))\n\t\tparams[\"W\" + str(l+1)] = params[\"W\" + str(l+1)] - learning_rate*np.divide(v_corrected[\"dW\" + str(l+1)],np.sqrt(s_corrected[\"dW\" + str(l+1)]+epsilon))\n\t\tparams[\"b\" + str(l+1)] = params[\"b\" + str(l+1)] - learning_rate*np.divide(v_corrected[\"db\" + str(l+1)],np.sqrt(s_corrected[\"db\" + str(l+1)]+epsilon))\n\treturn params,s_corrected,v_corrected\n\ndef compute_cost(AL,Y,lambd,parameters):\n\tL = len(parameters)//2\n\tm = AL.shape[1]\n\tweight_array = extract_weight(parameters)\n\tnorm = calc_norm(weight_array)\n\tregu_term =1/m*(lambd/2) * norm # l2 regularization term\n\tcost_intial = -1/m * np.sum((Y*np.log(AL)) + (1-Y)*(np.log(1-AL)))\n\tcost = cost_intial + regu_term\n\treturn cost\n\n\ndef network_model(x_train,y_train,x_test,y_test,learning_rate,epochs,layer_dims,lambd,learning_decay,p_keep,beta,optimizer = None):\n\tfig = plt.figure()\n\tax1 = fig.add_subplot(211)\n\tax2 = fig.add_subplot(212)\n\tparameters = initialize_parameters(layer_dims)\n\tt=0\n\tcosts = []\n\tcost1=[]\n\tscores1 = []\n\tscores2 =[]\n\tv_adam,s_adam = initialize_adam(parameters)\n\tv_momentum = initialize_velocities(parameters)\n\ts_prop = initialize_rmsprop(parameters)\n\tfor i in range(epochs):\n\t\tlearning_rate = learning_rate - (learning_rate*learning_decay)\n\t\tminibatches = random_mini_batches(x_train,y_train,mini_batch_size=16)\n\t\tfor mini_batch in minibatches:\n\t\t\tX_minibatch,Y_minibatch = mini_batch\n\t\t\tAL,cache,dropout_dict = L_model_forward(X_minibatch,parameters,p_keep)\n\t\t\tcost_train = compute_cost(AL,Y_minibatch,lambd,parameters)\n\t\t\tcosts.append(cost_train)\n\t\t\tgrads = l_model_backward(AL,Y_minibatch, cache,lambd,dropout_dict,p_keep)\n\t\t\tif optimizer is not None:\n\n\t\t\t\tif optimizer == \"gradient descent\": #Gradient Descent\n\t\t\t\t\tparameters = update_parameters(parameters,grads,learning_rate)\n\n\t\t\t\telif optimizer == \"adam\": # Adaptive Moment Estimation Adam\n\t\t\t\t\tt+=1\n\t\t\t\t\tparameters,s_adam,v_adam = update_adam(parameters,grads,v_adam,s_adam,t,learning_rate)\n\n\t\t\t\telif optimizer == \"gradient descent with momentum\": #Gradient Descent with momentum\n\t\t\t\t\tparameters,v_momentum = update_parameters_with_momentum(parameters,learning_rate,grads,v_momentum,beta)\n\n\t\t\t\telif optimzer == \"rmsprop\":\n\t\t\t\t\tt+=1\n\t\t\t\t\tparameters,s_prop = update_rmsprop(s_prop,t,parameters,grads,learning_rate)\n\n\n\t\t\tpredictions = predict(x_train,parameters)\n\t\t\tscore = accuracy_score(predictions,y_train)\n\t\t\tscores1.append(score)\n\t\tif i%50 ==0:\n\t\t\tprint(\"cross entropy loss after \"+ str(i) + \"th epoch = \" + str(cost_train))\n\t\t\tprint(\"accuracy after \" + str(i) + \"th epoch = \" + str(score))\n\t\t\tprint(\"current learning_rate = \" + str(learning_rate))\n\tax1.plot(costs)\n\tax2.plot(scores1, label = \" training set\")\n\tplt.legend()\n\tplt.show()\n\treturn parameters\n\ndef predict(x_test,parameters):\n\tpredictions,_,_ = L_model_forward(x_test,parameters)\n\tfor i in range(predictions.shape[1]):\n\t\tif predictions[0,i] >= 0.5:\n\t\t\tpredictions[0,i] = 1\n\t\telif predictions[0,i] < 0.5:\n\t\t\tpredictions[0,i] = 0\n\treturn predictions\n\ndef accuracy_score(predictions,actual):\n\tcounter = 0\n\tfor i in range(predictions.shape[1]):\n\t\tif predictions[0,i] == actual[0,i]:\n\t\t\tcounter+=1\n\t\telse:\n\t\t\tpass\n\treturn counter/predictions.shape[1]\n\nif __name__ == '__main__':\n\tmain()\n",
"id": "12417138",
"language": "Python",
"matching_score": 0,
"max_stars_count": 0,
"path": "neural_network.py"
}
] | 0 |
sida-wang | [
{
"content": "\"\"\"Bin data for ocean water mass analysis.\"\"\"\n\nimport sys\nscript_dir = sys.path[0]\nimport os\nimport pdb\nimport argparse\nimport logging\n\nimport numpy as np\nimport iris\nimport iris.coord_categorisation\nfrom iris.experimental.equalise_cubes import equalise_attributes\nimport cmdline_provenance as cmdprov\nfrom statsmodels.stats.weightstats import DescrStatsW\n\nrepo_dir = '/'.join(script_dir.split('/')[:-1])\nmodule_dir = repo_dir + '/modules'\nsys.path.append(module_dir)\ntry:\n import water_mass\n import general_io as gio\n import convenient_universal as uconv\n import spatial_weights\nexcept ImportError:\n raise ImportError('Script and modules in wrong directories')\n\n\nmom_vars = {\"temp_nonlocal_KPP\": \"cp*rho*dzt*nonlocal tendency from KPP\",\n \"temp_vdiffuse_diff_cbt\": \"vert diffusion of heat due to diff_cbt\",\n \"mixdownslope_temp\": \"cp*mixdownslope*rho*dzt*temp\",\n \"temp_sigma_diff\" : \"thk wghtd sigma-diffusion heating\",\n \"temp_vdiffuse_k33\": \"vert diffusion of heat due to K33 from neutral diffusion\",\n \"neutral_diffusion_temp\": \"rho*dzt*cp*explicit neutral diffusion tendency (heating)\",\n \"temp_tendency\": \"time tendency for tracer Conservative temperature\"}\n\n\ndef construct_cube(outdata_dict, w_cube, t_cube, s_cube, b_cube, years,\n t_values, t_edges, t_units, s_values, s_edges, s_units,\n log, mul_ts=False, pct_edges_ts=[]):\n \"\"\"Create the iris cube for output\"\"\"\n \n for key, data in outdata_dict.items():\n outdata_dict[key], b_values, flag_values, flag_meanings = uconv.add_globe_basin(data, b_cube)\n\n year_coord = iris.coords.DimCoord(years,\n standard_name=t_cube.coord('year').standard_name,\n long_name=t_cube.coord('year').long_name,\n var_name=t_cube.coord('year').var_name,\n units=t_cube.coord('year').units)\n\n t_bounds = uconv.get_bounds_list(t_edges)\n t_coord_std_name = t_cube.standard_name\n t_coord_long_name = t_cube.long_name\n t_coord_var_name = t_cube.var_name\n if pct_edges_ts:\n t_coord_std_name = t_coord_std_name + '_percentile_bins'\n t_coord_long_name = t_coord_long_name + ' percentile bins'\n t_coord_var_name = t_coord_var_name + '_pct_bins'\n t_coord_units = '%'\n iris.std_names.STD_NAMES[t_coord_std_name] = {'canonical_units': '%'}\n else:\n t_coord_units = t_units\n t_coord = iris.coords.DimCoord(t_values,\n standard_name=t_coord_std_name,\n long_name=t_coord_long_name,\n var_name=t_coord_var_name,\n units=t_coord_units,\n bounds=t_bounds)\n\n s_bounds = uconv.get_bounds_list(s_edges)\n s_coord_std_name = s_cube.standard_name\n s_coord_long_name = s_cube.long_name\n s_coord_var_name = s_cube.var_name\n if pct_edges_ts:\n s_coord_std_name = s_coord_std_name + '_percentile_bins'\n s_coord_long_name = s_coord_long_name + ' percentile bins'\n s_coord_var_name = s_coord_var_name + '_pct_bins'\n s_coord_units = '%'\n iris.std_names.STD_NAMES[s_coord_std_name] = {'canonical_units': '%'}\n else:\n s_coord_units = s_units\n s_coord = iris.coords.DimCoord(s_values,\n standard_name=s_coord_std_name,\n long_name=s_coord_long_name,\n var_name=s_coord_var_name,\n units=s_coord_units,\n bounds=s_bounds)\n\n b_coord = iris.coords.DimCoord(b_values,\n standard_name=b_cube.standard_name,\n long_name=b_cube.long_name,\n var_name=b_cube.var_name,\n units=b_cube.units,\n attrbutes={'flag_values': flag_values,\n 'flag_meanings': flag_meanings})\n \n tbin_dim_coords_list = [(year_coord, 0), (t_coord, 1), (b_coord, 2)]\n sbin_dim_coords_list = [(year_coord, 0), (s_coord, 1), (b_coord, 2)]\n tsbin_dim_coords_list = [(year_coord, 0), (s_coord, 1), (t_coord, 2), (b_coord, 3)]\n\n outcube_list = iris.cube.CubeList([])\n wvar_list = ['w', 'wt', 'ws'] if mul_ts else ['w']\n for wvar in wvar_list:\n std_base_name = w_cube.standard_name\n long_base_name = w_cube.long_name\n var_base_name = w_cube.var_name\n if wvar == 'wt':\n if std_base_name:\n std_base_name = t_cube.standard_name + '_times_' + std_base_name\n long_base_name = t_cube.long_name.strip() + ' times ' + long_base_name\n var_base_name = t_cube.var_name + '_' + var_base_name\n if wvar == 'ws':\n if std_base_name:\n std_base_name = s_cube.standard_name + '_times_' + std_base_name \n long_base_name = s_cube.long_name.strip() + ' times ' + long_base_name\n var_base_name = s_cube.var_name + '_' + var_base_name\n\n if std_base_name:\n tbin_std_name = std_base_name + '_binned_by_temperature'\n iris.std_names.STD_NAMES[tbin_std_name] = {'canonical_units': str(w_cube.units)}\n else:\n tbin_std_name = None\n tbin_cube = iris.cube.Cube(outdata_dict[wvar + '_tbin'],\n standard_name=tbin_std_name,\n long_name=long_base_name + ' binned by temperature',\n var_name=var_base_name + '_tbin',\n units=w_cube.units,\n attributes=t_cube.attributes,\n dim_coords_and_dims=tbin_dim_coords_list)\n tbin_cube.attributes['history'] = log\n outcube_list.append(tbin_cube)\n\n if std_base_name:\n sbin_std_name = std_base_name + '_binned_by_salinity'\n iris.std_names.STD_NAMES[sbin_std_name] = {'canonical_units': str(w_cube.units)}\n else:\n sbin_std_name = None\n sbin_cube = iris.cube.Cube(outdata_dict[wvar + '_sbin'],\n standard_name=sbin_std_name,\n long_name=long_base_name + ' binned by salinity',\n var_name=var_base_name + '_sbin',\n units=w_cube.units,\n attributes=t_cube.attributes,\n dim_coords_and_dims=sbin_dim_coords_list)\n sbin_cube.attributes['history'] = log\n outcube_list.append(sbin_cube)\n\n if std_base_name:\n tsbin_std_name = std_base_name + '_binned_by_temperature_and_salinity'\n iris.std_names.STD_NAMES[tsbin_std_name] = {'canonical_units': str(w_cube.units)}\n else:\n tsbin_std_name = None\n tsbin_cube = iris.cube.Cube(outdata_dict[wvar + '_tsbin'],\n standard_name=tsbin_std_name,\n long_name=long_base_name + ' binned by temperature and salinity',\n var_name=var_base_name + '_tsbin',\n units=w_cube.units,\n attributes=t_cube.attributes,\n dim_coords_and_dims=tsbin_dim_coords_list)\n tsbin_cube.attributes['history'] = log\n outcube_list.append(tsbin_cube)\n\n if pct_edges_ts:\n pct_edges_t, pct_edges_s = pct_edges_ts\n\n pct_t_coord_std_name = t_cube.standard_name + '_percentile'\n iris.std_names.STD_NAMES[pct_t_coord_std_name] = {'canonical_units': '%'}\n pct_t_coord = iris.coords.DimCoord(t_edges,\n standard_name=pct_t_coord_std_name,\n long_name=t_cube.long_name + ' percentile',\n var_name=t_cube.var_name + '_pct',\n units='%')\n pct_edges_t_cube = iris.cube.Cube(pct_edges_t,\n standard_name=t_cube.standard_name,\n long_name=t_cube.long_name,\n var_name=t_cube.var_name,\n units=t_units,\n attributes=t_cube.attributes,\n dim_coords_and_dims=[(year_coord, 0), (pct_t_coord, 1)])\n pct_edges_t_cube.attributes['history'] = log\n outcube_list.append(pct_edges_t_cube)\n\n pct_s_coord_std_name = s_cube.standard_name + '_percentile'\n iris.std_names.STD_NAMES[pct_s_coord_std_name] = {'canonical_units': '%'}\n pct_s_coord = iris.coords.DimCoord(s_edges,\n standard_name=pct_s_coord_std_name,\n long_name=s_cube.long_name + ' percentile',\n var_name=s_cube.var_name + '_pct',\n units='%')\n\n pct_edges_s_cube = iris.cube.Cube(pct_edges_s,\n standard_name=s_cube.standard_name,\n long_name=s_cube.long_name,\n var_name=s_cube.var_name,\n units=s_units,\n attributes=s_cube.attributes,\n dim_coords_and_dims=[(year_coord, 0), (pct_s_coord, 1)])\n pct_edges_s_cube.attributes['history'] = log\n outcube_list.append(pct_edges_s_cube)\n\n return outcube_list\n\n\ndef clipping_details(orig_data, clipped_data, bin_edges, var_name):\n \"\"\"Details of the clipping\"\"\"\n\n bin_min = bin_edges[0]\n bin_second_min = bin_edges[1]\n bin_max = bin_edges[-1]\n bin_second_max = bin_edges[-2]\n\n npoints_under = np.sum(orig_data < bin_min)\n npoints_min = np.sum(orig_data <= bin_second_min) - npoints_under\n npoints_clip_min = np.sum(clipped_data <= bin_second_min) - np.sum(clipped_data < bin_min)\n assert npoints_clip_min == npoints_under + npoints_min\n\n npoints_over = np.sum(orig_data > bin_max)\n npoints_max = np.sum(orig_data <= bin_max) - np.sum(orig_data <= bin_second_max)\n npoints_clip_max = np.sum(clipped_data <= bin_max) - np.sum(clipped_data <= bin_second_max)\n assert npoints_clip_max == npoints_over + npoints_max\n\n logging.info(f\"First {var_name} bin had {npoints_min} values, clipping added {npoints_under}\")\n logging.info(f\"Last {var_name} bin had {npoints_max} values, clipping added {npoints_over}\")\n \n\ndef bin_data(df, var_list, edge_list, mul_ts=False):\n \"\"\"Bin the data.\n\n Args:\n df (pandas.DataFrame) -- Data\n var_list (list) -- Variables for binning axes\n edge_list (list) -- Bin edges for each bin axis variable\n mul_ts (bool) -- Bin weights times T and S too\n\n \"\"\"\n\n data_list = []\n for var, edges in zip(var_list, edge_list):\n assert var in ['temperature', 'salinity', 'basin']\n values = np.clip(df[var].values, edges[0], edges[-1])\n clipping_details(df[var].values, values, edges, var)\n data_list.append(values)\n data = np.array(data_list).T\n\n w_data = df['weight'].astype(np.float64).values\n w_dist, edges = np.histogramdd(data, weights=w_data, bins=edge_list)\n binned_total_weight = w_dist.sum()\n orig_total_weight = w_data.sum()\n np.testing.assert_allclose(orig_total_weight, binned_total_weight, rtol=1e-03)\n if mul_ts:\n ws_dist, edges = np.histogramdd(data, weights=w_data * df['salinity'].values, bins=edge_list)\n wt_dist, edges = np.histogramdd(data, weights=w_data * df['temperature'].values, bins=edge_list)\n return w_dist, ws_dist, wt_dist\n else:\n return w_dist\n \n\ndef get_weights_data(file_list, var, area_file):\n \"\"\"Read the weights data file/s\"\"\"\n \n w_var = mom_vars[var] if var in mom_vars else var\n if ('vol' in w_var) or ('area' in w_var):\n assert len(file_list) == 1\n w_cube = gio.get_ocean_weights(file_list[0])\n history = w_cube.attributes['history'] \n else:\n assert area_file, \"Area file needed for flux weights\"\n w_cube, history = gio.combine_files(file_list, var, checks=True)\n\n return w_cube, history\n\n\ndef get_log(inargs, w_history, t_history, s_history, b_cube, a_cube):\n \"\"\"Get the log entry for the output file history attribute.\"\"\"\n\n metadata_dict = {}\n if w_history: \n metadata_dict[inargs.weights_files[0]] = w_history[0]\n if t_history: \n metadata_dict[inargs.temperature_files[0]] = t_history[0]\n if s_history: \n metadata_dict[inargs.salinity_files[0]] = s_history[0]\n if 'history' in b_cube.attributes:\n metadata_dict[inargs.basin_file] = b_cube.attributes['history']\n if a_cube:\n if 'history' in a_cube.attributes:\n metadata_dict[inargs.area_file] = a_cube.attributes['history']\n\n log = cmdprov.new_log(infile_history=metadata_dict, git_repo=repo_dir)\n\n return log\n\n\ndef get_bin_data(files, var, w_cube):\n \"\"\"Get binning variable data.\"\"\"\n\n cube, history = gio.combine_files(files, var, checks=True)\n w_coord_names = [coord.name() for coord in w_cube.dim_coords]\n coord_names = [coord.name() for coord in cube.dim_coords]\n assert w_cube.shape[-2:] == cube.shape[-2:]\n if not w_cube.shape == cube.shape:\n if (w_cube.ndim == 3) and (cube.ndim == 4) and (w_coord_names[0] == coord_names[0]):\n #e.g. w_cube is surface flux (time, i, j),\n #cube is full depth temperature (time, depth, i, j)\n cube = cube[:, 0, ::]\n cube.remove_coord(coord_names[1])\n assert w_cube.shape == cube.shape \n elif (w_cube.ndim == 2) and (cube.ndim == 4):\n #e.g. w_cube is area (i, j),\n #cube is full depth temperature (time, depth, i, j)\n cube = cube[:, 0, ::]\n cube.remove_coord(coord_names[1])\n assert w_cube.shape == cube.shape[1:]\n else:\n #e.g. w_cube is area (i, j),\n #cube is surface temperature (time, i, j)\n #e.g. w_cube is volume (depth, i, j),\n #cube is temperature (time, depth, i, j)\n assert w_cube.shape == cube.shape[1:]\n\n return cube, history\n\n\ndef weighted_percentiles(data, weights, percentiles):\n \"\"\"Return the weighted percentiles.\n\n Args:\n data (np.ndarray) : Bin variable (e.g. temperature, salinity)\n weights (np.ndarray): Weights (e.g. cell volume, area)\n percentiles (np.ndarray): Array of requested percentiles (e.g. 0-1 by 0.01)\n\n \"\"\"\n \n assert percentiles.max() <= 1.0\n assert percentiles.min() >= 0.0\n\n wq = DescrStatsW(data=data, weights=weights)\n bin_edges = wq.quantile(probs=percentiles, return_pandas=False)\n\n # manual method does not give a clean results...\n #ix = np.argsort(data)\n #data = data[ix] # sort data\n #weights = weights[ix] # sort weights\n #cdf = (np.cumsum(weights) - 0.5 * weights) / np.sum(weights) # 'like' a CDF function\n #perc = np.arange(0, 1.01, 0.01)\n #test2 = np.interp(perc, cdf, data)\n\n return bin_edges\n\n\ndef main(inargs):\n \"\"\"Run the program.\"\"\"\n\n logging.basicConfig(level=logging.DEBUG)\n\n spatial_data = ('vol' in inargs.weights_var) or ('area' in inargs.weights_var)\n flux_data = not spatial_data\n\n w_cube, w_history = get_weights_data(inargs.weights_files, inargs.weights_var, inargs.area_file)\n t_cube, t_history = get_bin_data(inargs.temperature_files, inargs.temperature_var, w_cube)\n s_cube, s_history = get_bin_data(inargs.salinity_files, inargs.salinity_var, w_cube)\n b_cube = iris.load_cube(inargs.basin_file, 'region')\n if inargs.area_file:\n assert flux_data\n a_cube = gio.get_ocean_weights(inargs.area_file)\n else:\n assert spatial_data\n a_cube = None\n\n log = get_log(inargs, w_history, t_history, s_history, b_cube, a_cube)\n\n b_values, b_edges = uconv.get_basin_details(b_cube)\n if inargs.bin_percentile:\n pct_edges = np.arange(0, 1.01, 0.01)\n pct_values = (pct_edges[1:] + pct_edges[:-1]) / 2 \n nt_values = ns_values = len(pct_values)\n s_bounds = (-0.2, 80)\n pct_cube = a_cube\n else:\n t_min, t_max = inargs.temperature_bounds\n t_step = inargs.tbin_size\n t_edges = np.arange(t_min, t_max + t_step, t_step)\n t_values = (t_edges[1:] + t_edges[:-1]) / 2 \n s_values, s_edges = uconv.salinity_bins()\n s_bounds=(s_edges[0], s_edges[-1])\n nt_values = len(t_values)\n ns_values = len(s_values)\n pct_cube = None\n\n iris.coord_categorisation.add_year(t_cube, 'time')\n iris.coord_categorisation.add_year(s_cube, 'time')\n t_years = set(t_cube.coord('year').points)\n s_years = set(s_cube.coord('year').points)\n assert t_years == s_years\n if flux_data:\n iris.coord_categorisation.add_year(w_cube, 'time')\n w_years = set(w_cube.coord('year').points)\n assert w_years == t_years\n years = np.array(list(t_years))\n years.sort()\n \n w_tbin_outdata = np.ma.zeros([len(years), nt_values, len(b_values)])\n w_sbin_outdata = np.ma.zeros([len(years), ns_values, len(b_values)])\n w_tsbin_outdata = np.ma.zeros([len(years), ns_values, nt_values, len(b_values)])\n if spatial_data:\n ws_tbin_outdata = np.ma.zeros([len(years), nt_values, len(b_values)])\n wt_tbin_outdata = np.ma.zeros([len(years), nt_values, len(b_values)])\n ws_sbin_outdata = np.ma.zeros([len(years), ns_values, len(b_values)])\n wt_sbin_outdata = np.ma.zeros([len(years), ns_values, len(b_values)])\n ws_tsbin_outdata = np.ma.zeros([len(years), ns_values, nt_values, len(b_values)])\n wt_tsbin_outdata = np.ma.zeros([len(years), ns_values, nt_values, len(b_values)])\n if inargs.bin_percentile:\n pct_edges_t = np.ma.zeros([len(years), nt_values + 1])\n pct_edges_s = np.ma.zeros([len(years), ns_values + 1])\n if inargs.bin_clim:\n iris.coord_categorisation.add_month(s_cube, 'time')\n s_year_cube = s_cube.aggregated_by(['month'], iris.analysis.MEAN)\n s_year_cube.remove_coord('month')\n s_year_cube.replace_coord(s_cube[0:12, ::].coord('time'))\n iris.coord_categorisation.add_month(t_cube, 'time')\n t_year_cube = t_cube.aggregated_by(['month'], iris.analysis.MEAN)\n t_year_cube.remove_coord('month')\n t_year_cube.replace_coord(t_cube[0:12, ::].coord('time'))\n for year_index, year in enumerate(years):\n print(year) \n year_constraint = iris.Constraint(year=year)\n if not inargs.bin_clim:\n s_year_cube = s_cube.extract(year_constraint)\n t_year_cube = t_cube.extract(year_constraint)\n if flux_data:\n w_year_cube = w_cube.extract(year_constraint)\n w_year_cube = spatial_weights.multiply_by_area(w_year_cube, area_cube=a_cube)\n else:\n w_year_cube = w_cube\n df, s_units, t_units = water_mass.create_df(w_year_cube, t_year_cube, s_year_cube, b_cube,\n pct_cube=pct_cube,\n multiply_weights_by_days_in_year_frac=True)\n if inargs.bin_percentile:\n weight_var = 'percentile_weights' if pct_cube else 'weight'\n t_edges = weighted_percentiles(df['temperature'].values, df[weight_var].values, pct_edges)\n s_edges = weighted_percentiles(df['salinity'].values, df[weight_var].values, pct_edges)\n pct_edges_t[year_index, :] = t_edges\n pct_edges_s[year_index, :] = s_edges\n if flux_data:\n w_tbin_outdata[year_index, ::] = bin_data(df, ['temperature', 'basin'], [t_edges, b_edges])\n w_sbin_outdata[year_index, ::] = bin_data(df, ['salinity', 'basin'], [s_edges, b_edges])\n w_tsbin_outdata[year_index, ::] = bin_data(df, ['salinity', 'temperature', 'basin'], [s_edges, t_edges, b_edges])\n else:\n tbin_list = bin_data(df, ['temperature', 'basin'], [t_edges, b_edges], mul_ts=True)\n sbin_list = bin_data(df, ['salinity', 'basin'], [s_edges, b_edges], mul_ts=True)\n tsbin_list = bin_data(df, ['salinity', 'temperature', 'basin'], [s_edges, t_edges, b_edges], mul_ts=True)\n w_tbin_outdata[year_index, ::], ws_tbin_outdata[year_index, ::], wt_tbin_outdata[year_index, ::] = tbin_list\n w_sbin_outdata[year_index, ::], ws_sbin_outdata[year_index, ::], wt_sbin_outdata[year_index, ::] = sbin_list\n w_tsbin_outdata[year_index, ::], ws_tsbin_outdata[year_index, ::], wt_tsbin_outdata[year_index, ::] = tsbin_list\n\n outdata_dict = {}\n outdata_dict['w_tbin'] = np.ma.masked_invalid(w_tbin_outdata)\n outdata_dict['w_sbin'] = np.ma.masked_invalid(w_sbin_outdata)\n outdata_dict['w_tsbin'] = np.ma.masked_invalid(w_tsbin_outdata)\n if spatial_data:\n outdata_dict['ws_tbin'] = np.ma.masked_invalid(ws_tbin_outdata)\n outdata_dict['wt_tbin'] = np.ma.masked_invalid(wt_tbin_outdata)\n outdata_dict['ws_sbin'] = np.ma.masked_invalid(ws_sbin_outdata)\n outdata_dict['wt_sbin'] = np.ma.masked_invalid(wt_sbin_outdata)\n outdata_dict['ws_tsbin'] = np.ma.masked_invalid(ws_tsbin_outdata)\n outdata_dict['wt_tsbin'] = np.ma.masked_invalid(wt_tsbin_outdata)\n if inargs.bin_percentile:\n t_values = s_values = pct_values * 100\n t_edges = s_edges = pct_edges * 100\n pct_edges_ts = [pct_edges_t, pct_edges_s]\n else:\n pct_edges_ts = []\n outcube_list = construct_cube(outdata_dict, w_year_cube, t_cube, s_cube, b_cube, years,\n t_values, t_edges, t_units, s_values, s_edges, s_units,\n log, mul_ts=spatial_data, pct_edges_ts=pct_edges_ts)\n\n equalise_attributes(outcube_list)\n iris.save(outcube_list, inargs.outfile)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=__doc__,\n argument_default=argparse.SUPPRESS,\n formatter_class=argparse.RawDescriptionHelpFormatter)\n\n parser.add_argument(\"weights_files\", type=str, nargs='*', help=\"volume, area or a flux\")\n parser.add_argument(\"weights_var\", type=str, help=\"weights variable\")\n parser.add_argument(\"basin_file\", type=str, help=\"basin file (from calc_basin.py)\")\n parser.add_argument(\"outfile\", type=str, help=\"output file\")\n \n parser.add_argument(\"--temperature_files\", type=str, nargs='*', help=\"temperature files for the binning\") \n parser.add_argument(\"--temperature_var\", type=str, help=\"temperature variable\")\n parser.add_argument(\"--salinity_files\", type=str, nargs='*', help=\"salinity files for the binning\") \n parser.add_argument(\"--salinity_var\", type=str, help=\"salinity variable\")\n\n parser.add_argument(\"--area_file\", type=str, default=None, help=\"For converting m-2 flux to total\")\n\n parser.add_argument(\"--temperature_bounds\", type=float, nargs=2, default=(-6, 50),\n help='bounds for the temperature (Y) axis')\n bin_default = 1/3.\n parser.add_argument(\"--tbin_size\", type=float, default=bin_default, help='temperature bin size')\n\n parser.add_argument(\"--bin_clim\", action=\"store_true\", default=False,\n help=\"Use the bin file climatology for binning\")\n parser.add_argument(\"--bin_percentile\", action=\"store_true\", default=False,\n help=\"Use percentiles for binning\")\n\n args = parser.parse_args() \n main(args)\n\n",
"id": "3915134",
"language": "Python",
"matching_score": 2.0323853492736816,
"max_stars_count": 0,
"path": "bin/water_mass_binning.py"
},
{
"content": "import pandas as pd\nimport geopandas as gpd\nimport numpy as np\nfrom shapely.geometry import Point\n\nfrom bokeh.io import curdoc, show, output_notebook\nfrom bokeh.layouts import row, column\nfrom bokeh.models import (CDSView, ColorBar, ColumnDataSource,\n CustomJS, CustomJSFilter, \n GeoJSONDataSource, HoverTool,\n LinearColorMapper, Slider, ContinuousColorMapper,\n BooleanFilter, WheelZoomTool,\n TapTool, OpenURL, Circle, RangeSlider, CheckboxButtonGroup,\n Toggle)\nfrom bokeh.plotting import figure\nfrom bokeh.tile_providers import CARTODBPOSITRON, get_provider\n\ndef toggle_callback(toggle):\n js=CustomJS(args=dict(toggle=toggle), code=\"\"\"\n if (toggle.button_type==\"danger\") {\n toggle.button_type=\"success\"\n toggle.label='Active'\n }\n else {\n toggle.button_type=\"danger\"\n toggle.label='Inactive' \n }\n\"\"\")\n return js\n \n \nclass Filter:\n\n def __init__(self, name, slider, toggle):\n self.name = name\n self.slider_ = slider\n self.toggle_ = toggle\n\nSTATISTICS = ['record_min_temp', 'actual_min_temp', 'average_min_temp', 'average_max_temp', 'actual_max_temp', 'record_max_temp']\nX_RANGE = [16000000, 16600000]\nY_RANGE = [-4850000, -4150000]\n\nnpoints = 100\n\nxpoints = np.random.randint(X_RANGE[0],X_RANGE[1],npoints)\nypoints = np.random.randint(Y_RANGE[0],Y_RANGE[1],npoints)\n\ntest_points = [Point(i) for i in zip(xpoints, ypoints)]\n\ngdf = gpd.GeoDataFrame({'var1':np.random.randint(0,100,npoints),\n 'var2':np.random.randint(0,100,npoints),\n 'var3':np.random.randint(0,100,npoints)}, geometry=test_points)\ngeosource = GeoJSONDataSource(geojson=gdf.to_json())\n\ntest_view = CDSView(source=geosource, filters=[BooleanFilter(booleans=[True]*len(gdf))])\n\ntile_provider = get_provider('CARTODBPOSITRON')\n\ntools = [\"pan, wheel_zoom, box_zoom, reset, tap\"]\n\np = figure(plot_width=1000,\n x_axis_type=\"mercator\", y_axis_type=\"mercator\",\n x_axis_label=\"Longitude\", y_axis_label=\"Latitude\",\n x_range=X_RANGE, y_range=Y_RANGE, tools=tools,\n title='Bores', output_backend='webgl')\np.add_tile(tile_provider)\npoints_render = p.circle(x='x',y='y', source=geosource, view=test_view, size=10)\n\np.toolbar.logo = None\np.toolbar.active_scroll = p.select_one(WheelZoomTool)\np.add_tools(HoverTool(renderers=[points_render],\n tooltips=[('Var1','@var1'),\n ('Var2','@var2'),\n ('Var3','@var3')]))\n\nfilter_list = {}\n\nfor var in ['var1', 'var2', 'var3']:\n min_ = 0\n max_ = 100\n slider = RangeSlider(start=min_, end=max_, step=0.1, \n value=(min_,max_), title=f'{var} range')\n toggle = Toggle(label=\"Inactive\", button_type=\"danger\", aspect_ratio=3)\n toggle.js_on_click(toggle_callback(toggle))\n filter_list[var] = Filter(var, slider, toggle)\n \n\ndef update_plot(attrname, old, new):\n mask = [True]*len(gdf)\n for key, filter in filter_list.items():\n if filter.toggle_.active:\n mask = mask & (gdf[key] >= filter.slider_.value[0]) & (gdf[key] <= filter.slider_.value[1])\n test_view.filters[0] = BooleanFilter(booleans=mask)\n\nfor _,filter in filter_list.items():\n filter.slider_.on_change('value',update_plot)\n filter.toggle_.on_change('active',update_plot)\n \ncontrols = column([row(filter.slider_, filter.toggle_) for key, filter in filter_list.items()])\n\nlayout = row(controls, p, name='layout')\n\n#show(layout)\ncurdoc().add_root(layout)\n#curdoc().title = \"Weather\"\n",
"id": "1620426",
"language": "Python",
"matching_score": 2.677251100540161,
"max_stars_count": 0,
"path": "bokeh-app/main.py"
},
{
"content": "from datetime import datetime as dt\n\n#import cv2\n\nfrom bokeh.io import curdoc\nfrom bokeh.models import ColumnDataSource\nfrom bokeh.plotting import figure\nfrom bokeh.sampledata.haar_cascade import frontalface_default_path\n\nCAMERA_WIDTH, CAMERA_HEIGHT = (1280, 780)\n\n\nimg_plot = figure(plot_width=CAMERA_WIDTH//2, plot_height=CAMERA_HEIGHT//2,\n x_range=(0, CAMERA_WIDTH), y_range=(0, CAMERA_HEIGHT),\n x_axis_type=None, y_axis_type=None,\n tools=\"\", toolbar_location=None, name=\"image\")\n\n\nts_plot = figure(plot_width=CAMERA_WIDTH//2, plot_height=150,\n tools=\"\", toolbar_location=None, name=\"ts\")\n\ncurdoc().add_root(img_plot)\ncurdoc().add_root(ts_plot)\ncurdoc().title = \"Face Detection\"\n",
"id": "1514926",
"language": "Python",
"matching_score": 2.331946611404419,
"max_stars_count": 0,
"path": "examples/app/faces/main.py"
}
] | 2.331947 |
OtObOx | [
{
"content": "import pretty_midi\nimport os\n\n#inputフォルダから読み込む\ninpath = 'input/'\n#outputフォルダから読み込む\noutpath = 'output'\nflist = os.listdir(inpath)\nfor title in flist:\n\tprint(title)\n\t# MIDIファイルのロード\n\tmidi_data = pretty_midi.PrettyMIDI(inpath + title)\n\t# 新しいMIDIファイルの構築\n\tnewmidi = pretty_midi.PrettyMIDI()\n\tpiano_program = pretty_midi.instrument_name_to_program('Acoustic Grand Piano')\n\tpiano = pretty_midi.Instrument(program=piano_program)\n\n\tfor instrument in midi_data.instruments:\n\t # ドラムでないトラックの音をpianoに入れる(ノートを入れる)\n\t if not instrument.is_drum:\n\t for note in instrument.notes:\n\t piano.notes.append(note)\n\n\t#ノートを新しく生成したMIDIオブジェクトに入れる\n\tnewmidi.instruments.append(piano)\n\t# outputディレクトリがなければ作成\n\tif not os.path.isdir(outpath):\n\t os.mkdir(outpath)\n\t# 該当曲のディレクトリを作成\n\tnewmidi.write(outpath + '/' + str(title))\n",
"id": "2290106",
"language": "Python",
"matching_score": 0,
"max_stars_count": 0,
"path": "midi_converter.py"
}
] | 0 |
Dasajev | [
{
"content": " # -*- coding: utf-8 -*-\n\nfrom __future__ import unicode_literals, print_function, division\nfrom bs4 import BeautifulSoup\nimport requests\nimport re\nimport json\nimport utils\n\ndef scrape(url):\n\tr = utils.getUrl(url)\n\tbs = BeautifulSoup(r.content)\n\tif not bs:\n\t\treturn\n\n\trecipe={}\n\n\ttitle_dev = bs.find(\"div\", {'class': \"title-wrapper\"})\n\tname = title_dev.find(\"span\").get_text()\n\trecipe[\"name\"]=name\n\t\n\tingredients = []\n\ttable = bs.find(\"table\", {'class': \"list-ingredients\"})\n\tfor row in table.findAll(\"tr\"):\n\t\tingredient = {}\n\t\tcell = row.find(\"td\", {'class': \"name\"})\n\t\tif cell is None:\n\t\t\tcontinue\n\n\t\tname = cell.find(\"span\").renderContents().decode(\"utf8\")\n\n\t\tcell = row.find(\"td\", {'class':\"amount-unit\"})\n\t\tif cell is not None:\n\t\t\tamount = cell.find(\"span\", {'data-view-element': \"amount\"}).renderContents().decode(\"utf8\")\n\t\t\tunit = cell.find(\"span\", {'data-view-element': \"unit\"}).renderContents().decode(\"utf8\")\n\n\t\tingredient[\"name\"]=name\n\t\tingredient[\"amount\"]=amount\n\t\tingredient[\"unit\"]=unit\n\t\tingredients.append(ingredient)\n\n\tdiv = bs.find(\"div\", {'class':\"instructions\"})\n\tinstructions = div.get_text().strip()\n\n\trecipe[\"instructions\"]=instructions\n\trecipe[\"ingredients\"]=ingredients\n\n\treturn json.dumps(recipe)",
"id": "7059071",
"language": "Python",
"matching_score": 3.6586663722991943,
"max_stars_count": 0,
"path": "kotikokki.py"
},
{
"content": " # -*- coding: utf-8 -*-\n\nfrom __future__ import unicode_literals, print_function, division\nfrom bs4 import BeautifulSoup\nimport re\nimport json\nimport utils\n\ndef scrape(url):\n\tr = utils.getUrl(url)\n\tbs = BeautifulSoup(r.content)\n\tif not bs:\n\t\treturn\n\n\trecipe={}\n\n\ttitle_header = bs.find(\"h2\", {'class': \"recipe-title\"})\n\tname = title_header.find(\"span\").get_text()\n\trecipe[\"name\"]=name\n\t\n\tingredients = []\n\tnames = bs.findAll(\"span\", {'itemprop': \"name\"})\n\tamounts = bs.findAll(\"span\", {'itemprop': \"amount\"})\n\t#skip recipe name \n\tnames = names[1:]\n\tfor idx, name in enumerate(names):\n\t\tingredient = {}\n\t\tingredient[\"amount\"] = amounts[idx].get_text().strip()\n\t\tingredient[\"name\"] = name.get_text().strip()\n\t\tingredients.append(ingredient)\n\n\tdiv = bs.find(\"div\", {'itemprop':\"instructions\"})\n\tinstructions = div.get_text().strip()\n\n\t\n\trecipe[\"instructions\"]=instructions\n\trecipe[\"ingredients\"]=ingredients\n\treturn json.dumps(recipe)",
"id": "1001252",
"language": "Python",
"matching_score": 4.194054126739502,
"max_stars_count": 0,
"path": "pioneerwoman.py"
},
{
"content": " # -*- coding: utf-8 -*-\n\nfrom __future__ import unicode_literals, print_function, division\nfrom bs4 import BeautifulSoup\nimport re\nimport json\nimport utils\n\ndef scrape(url):\n\tr = utils.getUrl(url)\n\tbs = BeautifulSoup(r.content)\n\tif not bs:\n\t\treturn\n\n\trecipe={}\n\n\ttitle_header = bs.find(\"h1\", {'class': \"entry-title\"})\n\tname = title_header.get_text()\n\trecipe[\"name\"]=name\n\t\n\tingredients = []\n\tingredient_list = bs.findAll(\"li\", {'itemprop': \"ingredients\"})\n\tfor item in ingredient_list:\n\t\tingredient = {}\n\t\tname = item.get_text().strip()\n\t\tdollar_pos = name.find(\"$\")\n\t\tif dollar_pos != -1:\n\t\t\tname = name[:dollar_pos-1]\n\t\tingredient[\"name\"] = name\n\t\tingredients.append(ingredient)\n\n\tinstructions = []\n\tinstructions_list = bs.findAll(\"li\", {'itemprop':\"recipeInstructions\"})\n\tfor item in instructions_list:\n\t\tinstructions.append(item.get_text())\n\t\n\trecipe[\"instructions\"]=\"\".join(instructions)\n\trecipe[\"ingredients\"]=ingredients\n\treturn json.dumps(recipe)\n",
"id": "9800570",
"language": "Python",
"matching_score": 0.911159098148346,
"max_stars_count": 0,
"path": "budgetbytes.py"
},
{
"content": "import requests\n\n\ndef getUrl(url, nocache=False, params=None, headers=None, cookies=None):\n \"\"\"Gets data, bs and headers for the given url,\n using the internal cache if necessary\"\"\"\n\n browser = \"\"\"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.11\n (KHTML, like Gecko) Chrome/23.0.1271.95 Safari/537.11\"\"\"\n # Common session for all requests\n s = requests.session()\n s.verify = False\n s.stream = True # Don't fetch content unless asked\n s.headers.update({'User-Agent': browser})\n # Custom headers from requester\n if headers:\n s.headers.update(headers)\n # Custom cookies from requester\n if cookies:\n s.cookies.update(cookies)\n\n try:\n r = s.get(url, params=params)\n except requests.exceptions.InvalidSchema:\n print(\"Invalid schema in URI: %s\" % url)\n return None\n except requests.exceptions.ConnectionError:\n print(\"Connection error when connecting to %s\" % url)\n return None\n\n size = int(r.headers.get('Content-Length', 0)) // 1024\n if size > 2048:\n print(\"Content too large, will not fetch: %skB %s\" % (size, url))\n return None\n\n return r\n\n\ndef split_amounts_and_ingredients(raw_data):\n parsed = []\n for item in raw_data:\n if ')' in item:\n amount, item = item.split(')')\n parsed.append({'ingredient':item.strip(), 'amount':amount+(')').strip()})\n else:\n amount, item = item.split(' ')\n parsed.append({'ingredient':item.strip(), 'amount':amount.strip()})\n\n return parsed\n",
"id": "4761394",
"language": "Python",
"matching_score": 1.5488543510437012,
"max_stars_count": 0,
"path": "utils.py"
},
{
"content": "# -*- coding: utf-8 -*-\n\nfrom __future__ import unicode_literals, division\nfrom bs4 import BeautifulSoup\nimport json\nimport utils\n\n\ndef scrape(url):\n r = utils.getUrl(url)\n bs = BeautifulSoup(r.content)\n if not bs:\n return\n\n recipe = {}\n\n j = bs.find('script', {'type': 'application/ld+json'})\n data = json.loads(j.string)\n print data\n\n recipe[\"name\"] = data['description']\n recipe[\"instructions\"] = data['recipeInstructions']\n recipe[\"ingredients\"] = utils.split_amounts_and_ingredients(\n data['recipeIngredient']\n )\n return json.dumps(recipe)\n",
"id": "319062",
"language": "Python",
"matching_score": 1.2467502355575562,
"max_stars_count": 0,
"path": "k_ruoka.py"
},
{
"content": "import k_ruoka\nimport pioneerwoman\nimport kotikokki\nimport budgetbytes\nimport argparse\n\nif __name__ == '__main__':\n\tparser = argparse.ArgumentParser(description='Downloads recipes as JSON.')\n\tparser.add_argument(\"url\")\n\targs = parser.parse_args()\n\t\n\turl = args.url\n\n\tif \"pioneerwoman.com\" in url:\n\t\tprint (pioneerwoman.scrape(url))\n\telif \"k-ruoka.fi\" in url:\n\t\tprint (k_ruoka.scrape(url))\n\telif \"kotikokki.net\" in url:\n\t\tprint (kotikokki.scrape(url))\n\telif \"budgetbytes.com\" in url:\n\t\tprint (budgetbytes.scrape(url))",
"id": "5491674",
"language": "Python",
"matching_score": 0.23279769718647003,
"max_stars_count": 0,
"path": "main.py"
}
] | 1.397802 |
MaximRouiller | [
{
"content": "class _Properties:\n\tprovisioningState = \"\"\n\nclass ResourceGroups_CreateOrUpdate:\n\tid = \"\"\n\tname = \"\"\n\ttype = \"\"\n\tproperties = _Properties()\n\tProperties = _Properties\n\tlocation = \"\"\n\tmanagedBy = \"\"\n\ttags = {}\n\n",
"id": "4538310",
"language": "Python",
"matching_score": 0,
"max_stars_count": 0,
"path": "example/pythonModel.py"
}
] | 0 |
Eugeneiregi | [
{
"content": "import unittest\nfrom user import User\nfrom credentials import Credentials\n\nclass TestUser(unittest.TestCase):\n '''\n Test class for user class\n '''\n\nif __name__== '__main__':\n unittest.main()\n\n def setUp(self):\n '''\n test to run before each test cases\n '''\n\n self.new_credentials = Credentials(\"Facebook\", \"1234567\")\n self.new_user = User(\"Eugene\", \"peter\", \"Iregi\")\n\n def test_init(self):\n '''\n test_init for testing object initialization\n '''\n\n self.assertEqual(self.new_credentials.account_name, \"Facebook\")\n\n self.assertEqual(self.new_credentials.password, \"<PASSWORD>\")\n\n self.assertEqual(self.new_user.first_name, \"Eugene\")\n\n self.assertEqual(self.new_user.last_name, \"peter\")\n\n self.assertEqual(self.new_user.username, \"Iregi\")\n\n def test_view_user(self):\n '''\n method that shows a list of users saved\n '''\n\n self.assertEqual(User.view_users(), User.user_list)\n\n def test_view_credentials(self):\n '''\n method to view user credentials\n '''\n\n self.assertEqual(Credentials.view_credentials(), Credentials.credential_list)\n def test_save_credentials(self):\n '''\n test to see if credential object is saved into the credential list\n '''\n\n self.new_credentials.save_credentials()\n\n self.assertEqual(len(Credentials.credential_list), 1)\n\n def test_save_user(self):\n '''\n test to see if contact object is saved into the credential list\n '''\n\n self.new_user.save_user()\n\n self.assertEqual(len(User.user_list), 1)\n\n\n def tearDown(self):\n '''\n Clean up for tests\n '''\n\n Credentials.credential_list = []\n\n User.user_list = []\n\n def test_save_mulltiple_credentials(self):\n '''\n test_save_multiple credentials to check if we can save multiple credentials object to our credentials_list\n '''\n\n self.new_credentials.save_credentials()\n test_credentials = Credentials(\"Ig\", \"32667843\")\n test_credentials.save_credentials()\n\n self.assertEqual(len(Credentials.credential_list), 2)\n\n\n\n def test_save_multiple_user(self):\n '''\n test to check if we can save multiple users object to our user_list\n '''\n\n self.new_user.save_user()\n test_user = User(\"Ado\", \"<PASSWORD>\", \"Philly\")\n test_user.save_user()\n\n self.assertEqual(len(User.user_list), 2)\n\n def test_delete_credentials(self):\n '''\n To test if we can remove credentials from our list\n '''\n\n self.new_credentials.save_credentials()\n test_credentials = Credentials(\"Ig\", \"32667843\")\n test_credentials.save_credentials()\n\n self.new_credentials.delete_credentials()\n self.assertEqual(len(Credentials.credential_list), 1)\n\n def test_delete_user(self):\n '''\n To test if we can remove user from our list\n '''\n\n self.new_user.save_user()\n test_user = User(\"Ado\", \"Pass\", \"Philly\")\n test_user.save_user()\n\n self.new_user.delete_user()\n self.assertEqual(len(User.user_list), 1)\n\n\nif __name__== '__main__':\n unittest.main()\n",
"id": "3859707",
"language": "Python",
"matching_score": 2.7664642333984375,
"max_stars_count": 0,
"path": "lock_test.py"
},
{
"content": "import os\nfrom credentials import Credentials\nimport pyperclip\nimport time\n\n\ndef generate_password():\n '''\n\n generate password\n '''\n\n return os.urandom(8)\n\n\nprint(\"Hello! Welcome to Password app where passwords are found\")\nprint(\"****** Use Yes or No as shortcodes to navigate******\")\nprint(\"Kindly enter your name for continuation\")\nuser_name = input()\nprint(\"Thank you please wait...\")\ntime.sleep(2)\n\nprint(\"You have successfully created an account kindlyy proceed\")\nprint(f\"Welcome {user_name}\")\n\nprint(\"Do you have an existing account? *****Reply Yes *****\")\nhave_passed = input()\n\nif have_passed == \"Yes\":\n proceed =True\n print(\"***** Enter Your Account name and password *****\")\n print(\"Enter Account Name\")\n account_name = input()\n print(\"Enter Password\")\n password = input()\n user_credentials = Credentials(account_name, password)\n Credentials.credential_list.append(user_credentials)\n print(user_credentials)\n while proceed:\n print(\"Do you have other accounts?***** Yes or No *****\")\n no_account = input()\n if no_account == \"No\":\n proceed= False\n else:\n print(\"Enter Account Name\")\n account_name = input()\n print(\"Enter Password\")\n password = input()\n\n print(\"Create new Account and choose or Generate Password with okay button\")\n accept = input()\n\n if accept == \"Yes\":\n print(\"Enter Account Name\")\n acc = input()\n print(\"Would You like us to generate a Password for you or input?***** Yes or No? *****\")\n gen = input()\n if gen == \"Yes\":\n gen_pass = str(generate_password())\n print(\"Password generated is \" + gen_pass)\n # cred = Credentials(acc, gen_pass)\n # Credentials.credential_list.append(cred)\n\n print(\"********* Here are your accounts*****************\")\n for list_accounts in Credentials.credential_list:\n print(list_accounts)\n\n print(\"Do You wish to delete account? Yes Or No\")\n doaway = input()\n\n if doaway == \"Yes\":\n print(\"Enter account Name\")\n goo = input()\n print(Credentials.credential_list.clear())\n\n print(\"You account has been deleted successfully\")\n print(\"Bye have a nice day\")\n print(\"Copyright@2019. Thanks For Using Password-Locker APP.\")\n",
"id": "11206409",
"language": "Python",
"matching_score": 0,
"max_stars_count": 0,
"path": "run.py"
},
{
"content": "from django.contrib import admin\n\nfrom .models import Profile, Rate, Project\n\nadmin.site.register(Project)\nadmin.site.register(Profile)\n\nadmin.site.register(Rate)\n",
"id": "8916326",
"language": "Python",
"matching_score": 0,
"max_stars_count": 0,
"path": "Award/admin.py"
}
] | 0 |
lauragarciamayo | [
{
"content": "import random\nimport struct\nimport time\nimport csv\nimport os\nimport sys\nimport binascii\nimport numpy as np\n\n################\n#adc\nimport Adafruit_BBIO.ADC as ADC\nADC.setup()\nfrom time import sleep\n\npin1=\"P9_35\" #ain0\npin2=\"P9_37\" #ain2\npin3=\"P9_39\" #ain6\n################\n#pwm and GPIO\nimport Adafruit_BBIO.PWM as PWM\nimport Adafruit_BBIO.GPIO as GPIO\nfrom time import sleep\n\n#set GPIO\n\n#control pins left motor\ninput_1A=\"P8_12\"\ninput_2A=\"P8_14\"\n#control pins right motor\ninput_3A=\"P8_8\"\ninput_4A=\"P8_10\"\n\n#set pins as output\nGPIO.setup(input_1A,GPIO.OUT)\nGPIO.setup(input_2A,GPIO.OUT)\nGPIO.setup(input_3A,GPIO.OUT)\nGPIO.setup(input_4A,GPIO.OUT)\n\n#set PWM\nmotor1=\"P9_14\"\nmotor2=\"P9_16\"\n\n################\nclass trajectory(object):\n\tdef __init__(self,x_coord_origin,y_coord_origin,x_coord_s1,y_coord_s1,x_coord_s2,y_coord_s2,x_coord_s3,y_coord_s3):\n\t\tself.x_coord_origin=x_coord_origin\n\t\tself.y_coord_origin=y_coord_origin\n\t\tself.x_coord_s1=x_coord_s1\n\t\tself.y_coord_s1=y_coord_s1\n\t\tself.x_coord_s2=x_coord_s2\n\t\tself.y_coord_s2=y_coord_s2\n\t\tself.x_coord_s3=x_coord_s3\n\t\tself.y_coord_s3=y_coord_s3\n\nclass state_transition(object):\n\tdef __init__(self, reward, next_s_type,next_state,terminal,emergency):\t\n\t#def __init__(self, agent_next_location, reward, next_s_type,next_state,terminal,emergency):\n\t #self.agent_next_location = agent_next_location \n\t self.reward = reward\n\t self.next_s_type = next_s_type\n\t self.next_state = next_state\n\t self.terminal= terminal\n\t self.emergency=emergency\n\n\ndef readSensors(pin1,pin2,pin3):\n\t\n\t# Read ADC voltages#################################\n\t#read sensor 1 (right)\n\tsensor1Val=ADC.read(pin1)\n\tsensor1Volts=sensor1Val*1.8 #range (0.96v : >=10cms to 0.13v: >70cms)\n\t\n\t#read sensor 2 (front)\n\tsensor2Val=ADC.read(pin2)\n\tsensor2Volts=sensor2Val*1.8 #range (0.96v : >=10cms to 0.13v: >70cms)\n\n\t#read sensor 3 (left)\n\tsensor3Val=ADC.read(pin3)\n\tsensor3Volts=sensor3Val*1.8 #range (0.96v : >=10cms to 0.13v: >70cms)\n\n\t#print \"the ADC voltage is= : \",sensor1Volts,sensor2Volts,sensor3Volts\n\n\tsensors=list()\n\tsensors.append(sensor1Volts)\n\tsensors.append(sensor2Volts)\n\tsensors.append(sensor3Volts)\t\n\n\tprint sensors\n\treturn sensors\n\t\ndef euclideanDistance2integers(sensors):\n\t\n\t#array of integers\t\n\tstate_vector=np.zeros(3) #array of 3 elements\n\t\n\tstate_vector[sensors.index(max(sensors))]=1\n\tstate_vector[sensors.index(min(sensors))]=3\n\n\tfor i in range(0,3):\n\t\tif state_vector[i]==0:\n\t \t\tstate_vector[i]=2\n \n\treturn\tstate_vector\n\ndef state_type(sensors,critical_voltage):\n\n\t# Find out the type of state: health or subhealth\n\tif sensors[0]>critical_voltage or sensors[1]>critical_voltage or sensors[2]>critical_voltage: \n\t\ts_type=1 #subhealth state\t \n\telse:\n\t s_type=2 #health state\n \n\treturn s_type\n\ndef trans_reward(action,s_type,current_state):\n\n\t# assign rewards to state vector\n\n\tif action==4: #special action, emergency\n\t reward=0\n\telse:\n\t #action=\n\t\t# 1=forward,2=left,3=right\n\t\t\n\t if current_state.argmax()==action: #best reward\n\t\tif s_type==1: #subhealth state\n\t\t\treward=0\n\t\telse: #health state\n\t\t reward=5\n\t\t\n\t elif current_state.argmin()==action: #worst reward\n\t\tif s_type==1: #subhealth state\n\t\t\treward=-3\n\t\telse: #health state\n\t\t reward=0\n\t else:\n\n\t\tif s_type==1: #subhealth state\n\t\t\treward=-1\n\t\telse: #health state\n\t\t reward=1\n\n\treturn reward\n\ndef gotoRight(): #turn on 1st motor (left motor)\n\t\n\t#set pins high/low\n\t# 1a high and 2a low =right\n\t# 1a low and 2a high=left\n\t# both high/both low =stop\n\tGPIO.output(input_1A,GPIO.HIGH)\n\tGPIO.output(input_2A,GPIO.LOW)\n\tGPIO.output(input_3A,GPIO.LOW)\n\tGPIO.output(input_4A,GPIO.LOW)\n\n\tduty=30\n\t\n\tPWM.start(motor1, duty)\n\tsleep(0.2)\n\tPWM.stop(motor1) #disable pwm immediately to rotate few angles\n\tPWM.cleanup()\n\ndef gotoLeft(): #turn on 2nd motor (right motor)\n\t\n\t#set pins high/low\n\t# 1a high and 2a low =right\n\t# 1a low and 2a high=left\n\t# both high/both low =stop\n\tGPIO.output(input_1A,GPIO.LOW)\n\tGPIO.output(input_2A,GPIO.LOW)\n\tGPIO.output(input_3A,GPIO.HIGH)\n\tGPIO.output(input_4A,GPIO.LOW)\n\n\tduty=30\n\tPWM.start(motor2, duty)\n\tsleep(0.2)\n\tPWM.stop(motor2) #disable pwm immediately to rotate few angles\n\tPWM.cleanup()\n\ndef goForward():\n\t\n\t#set pins high/low\n\t# 1a higsh and 2a low =right\n\t# 1a low and 2a high=left\n\t# both high/both low =stop\n\tGPIO.output(input_1A,GPIO.HIGH)\n\tGPIO.output(input_2A,GPIO.LOW)\n\tGPIO.output(input_3A,GPIO.HIGH)\n\tGPIO.output(input_4A,GPIO.LOW)\n\n\tduty=100\n\t\n\tPWM.start(motor1, duty)\n\tPWM.start(motor2, duty)\n\t#sleep(0.2)\n\tPWM.stop(motor1) #disable pwm immediately to rotate few angles\n\tPWM.stop(motor2)\n\tPWM.cleanup()\n\ndef goBackward():\n\t\n\t#set pins high/low\n\t# 1a high and 2a low =right\n\t# 1a low and 2a high=left\n\t# both high/both low =stoP\n\tGPIO.output(input_1A,GPIO.LOW)\n\tGPIO.output(input_2A,GPIO.HIGH)\n\tGPIO.output(input_3A,GPIO.LOW)\n\tGPIO.output(input_4A,GPIO.HIGH)\n\n\tduty=30\n\t\n\tPWM.start(motor1, duty)\n\tPWM.start(motor2, duty)\n\tsleep(0.4)\n\tPWM.stop(motor1) #disable pwm immediately to rotate few angles\n\tPWM.stop(motor2)\n\tPWM.cleanup()\n\ndef execute_action(action):\n\n#set of actions: 1:forward, 2:left 3:right 4:backwards/emergency\n\tduty=40\n\tif action==1: #forward\n\t\tgoForward()\n\telif action==2: #left\n\t\tgotoLeft()\n\telif action==3: #right\n\t\tgotoRight()\n\telif action==4: #backward\t\n\t\tgoBackward()\n\telif action==5: #right and forward\n\t\tgotoRight()\n\t\tgoForward()\n\telse: #left and forward\n\t\tgotoLeft()\n\t\tgoForward()\n\t\t\n\n\ndef environment(action,current_state,s_type,critical_voltage,emergency_voltage,termination_voltage):\t\n\t\n\t#execute action\n\texecute_action(action)\t\n\n\t#obtain sensors measurement (voltages)\n\tsensors=readSensors(pin1,pin2,pin3)\n\n\t#check emergency case\n\tif (sensors[1]>=emergency_voltage):\n\t\temergency=1\n\n\telse:\n\t\temergency=0\n\t\t\t\n\t\n\t#find out the type of state (health or subhealth)\n\tnext_s_type=state_type(sensors,critical_voltage) \t\n\t#print \"state_type=\",next_s_type\n\n\t#convert voltage/distance to interger verctor (state vector)\n\tnext_state=euclideanDistance2integers(sensors) #vector of 3 integers from 1-3\n\t#print \"state= \",next_state\n\t\n\t#detect terminal state (3 sensors must give a voltage equal or greater than 0.9v\n\tif sensors[0]<=termination_voltage and sensors[1]<=termination_voltage and sensors[2]<=termination_voltage:\n\t\tterminal=1\n\telse:\n\t\tterminal=0\n\n\t#rewards\n\treward=trans_reward(action,s_type,current_state)\n\n\treturn state_transition(reward, next_s_type,next_state,terminal,emergency)\n\t#return state_transition(agent_next_location, reward, next_s_type,next_state,terminal,emergency)\n\n\ndef state_index(s,s_type):\n\n\tref_state1=np.array([1,2,3])\n\tref_state2=np.array([1,3,2])\n\tref_state3=np.array([3,2,1])\n\tref_state4=np.array([3,1,2])\n\tref_state5=np.array([2,3,1])\n\tref_state6=np.array([2,1,3])\n\n\tif np.array_equal(s,ref_state1)== True:\n\t\ts_index=1\n\telif np.array_equal(s,ref_state2)== True:\n\t\ts_index=2\n\telif np.array_equal(s,ref_state3)== True:\n\t\ts_index=3\n\telif np.array_equal(s,ref_state4)== True:\n\t\ts_index=4\n\telif np.array_equal(s,ref_state5)== True:\n\t\ts_index=5\n\telse:\n\t\ts_index=6\n\t\n\tif s_type==1: #subhealth state\n\t s_index=s_index+6\n\t \n\ts_index=s_index-1 #array indexing goes from 0 to 11\n\t\n\treturn s_index\n\n\n#MAIN\nif __name__ == \"__main__\":\n\t\n\t#define start state\n\t#define emergengy flag\n\t#agent's trajectory coordinates array (coordinates of origin, s1,s2,s3)\n\trobotTrajectory=trajectory([],[],[],[],[],[],[],[]) #define original coordinates\n\t#inicializar state_transition\n\n\tNepisodes=1\n\n\tcritical_voltage=0.4 # ~15cms from the maze\n\temergency_voltage=0.85 # 10cms or less from the maze\n\ttermination_voltage=0.18 #[v]\n\t\n\t#actions\n\ta=[1,2,3]\t#set of actions: 1:forward, 2:left 3:right\n\tNactions=3 #number of actions\n\n\t#states\n\tNstates=12\t#12 states: 6 health and 6 subhealth\n\n\t#action value function Q\n\tQ=np.zeros((Nstates,Nactions)) \n\tQ[0,2]=100\n\tQ[1,1]=100\n\tQ[2,0]=100\n\tQ[3,0]=100\n\tQ[4,1]=100\n\tQ[5,2]=100\n\tQ[6,2]=100\n\tQ[7,1]=100\n\tQ[8,0]=100\n\tQ[9,0]=100\n\tQ[10,1]=100\n\tQ[11,2]=100\n\n\t#Qlearning parameters\n\tgamma=0.9 #discount factor\n\talpha=0.1 #step size constant\n\tepsilon=0.1 #epsilon for e-greedy policy\n\n\t#INITIAL STATE\n\t#obtain sensors measurement (voltages)\n\tsensors=readSensors(pin1,pin2,pin3)\n\t\n\t#check emergency case\n\tif (sensors[1]>=emergency_voltage):\n\t\temergency=1\n\telse:\n\t\temergency=0\n\n\t\n\t#find out the type of state (health or subhealth)\n\ts_type_start=state_type(sensors,critical_voltage) \t\n\tprint \"s_type_start\",s_type_start\n\t\n\t#convert voltage/distance to interger verctor (state vector)\n\tstart_state=euclideanDistance2integers(sensors) #vector of 3 integers from 1-3\n\tprint \"start_state=\",start_state\n\n\t#index of next state\n\ts_index=state_index(start_state,s_type_start)\n\tprint \"s_index_start=\",s_index\n\t\n\tfor i in range(0,Nepisodes):\n\t\n\t\tterminal=0\n\t\t#initialize state\n\t\ts=start_state\n\t\ts_type=s_type_start\n#\t\tlocation=initial_location\n\n\t\t#run until we find a terminal state\n\t\twhile terminal==0:\n\t\t\t#choose next action (e-greedy)\n\t\t\tif (emergency == 1):\n\t\t\t\ta=4 #go backwards \n\t\t\telse:\n\n\t\t\t\tQ_row=Q[s_index-1]\n\t\t\t\tif random.random()<(1-epsilon):\n\t\t\t\t\t\n\t\t\t\t\tif np.all(Q_row==0):\n\t\t\t\t\t\ta=random.randint(1,3)\n\t\t\t\t\telse:\n\t\t\t\t\t\ta=Q_row.argmax() +1\n\t\t\t\telse:\n\t\t\t\t\ta=random.randint(1,3)\n\t\t\t\n\t\t\tprint \"action=\",a\n\t\n\t\t\t#execute the selected action and reach next state\n\t\t\t#s_transition=environment(a,s,s_type,location,critical_voltage,emergency_voltage,termination_voltage)\t\n\t\t\ts_transition=environment(a,s,s_type,critical_voltage,emergency_voltage,termination_voltage)\n\t\t\t\n\t\t\t#index of next state\n\t\t\ts_index_next=state_index(s_transition.next_state,s_transition.next_s_type)\n\t\n\t\t\tif a !=4: #emergency\n\t\t\t\t#update Q\n\t\t\t\tQ[s_index,a-1]=Q[s_index,a-1]+alpha*(s_transition.reward+gamma*max(Q[s_index_next,:])-Q[s_index,a-1])\n\t\n\t\t\t#update state, type of state and location\n\t\t\ts=s_transition.next_state\n\t\t\ts_type=s_transition.next_s_type\n\t\t\ts_index=s_index_next\n\t\t\t\n\t\t\tprint \"next_state=\",s\n\t\t\tprint \"next_s_type=\",s_type\n\t\t\tprint \"next_s_index=\",s_index\n\t\t\tprint \"next_Q=\",Q\n\t\t\tprint \"\\n\\n\"\n\t\t\t#location.x_coord_origin=s_transition.x_coord_origin\n\t\t\t#location.y_coord_origin=s_transition.x_coord_origin\n\t\t\t#location.x_coord_s1=s_transition.x_coord_s1\n\t\t\t#location.y_coord_s1=s_transition.x_coord_s1\n\t\t\t#location.y_coord_s2=s_transition.x_coord_s2\n\t\t\t#location.x_coord_s2=s_transition.x_coord_s2\n\t\t\t#location.y_coord_s3=s_transition.x_coord_s3\n\t\t\t#location.x_coord_s3=s_transition.x_coord_s3\n\n\t\t\t#update trajectory array (array of set of coordinates) \n\t\t\t#robotTrajectory.x_coord_origin.append(s_transition.agent_next_location.x_coord_origin)\n\t\t\t#robotTrajectory.y_coord_origin.append(s_transition.agent_next_location.y_coord_origin)\n\t\t\t#robotTrajectory.x_coord_s1.append(s_transition.agent_next_location.x_coord_s1)\n\t\t\t#robotTrajectory.y_coord_s1.append(s_transition.agent_next_location.y_coord_s1)\n\t\t\t#robotTrajectory.x_coord_s2.append(s_transition.agent_next_location.x_coord_s2)\n\t\t\t#robotTrajectory.y_coord_s2.append(s_transition.agent_next_location.y_coord_s2)\n\t\t\t#robotTrajectory.x_coord_s3.append(s_transition.agent_next_location.x_coord_s3)\n\t\t\t#robotTrajectory.y_coord_s3.append(s_transition.agent_next_location.y_coord_s3)\n\n\t\t\temergency=s_transition.emergency\n\t\t\tterminal=s_transition.terminal\n\t\n\t\t\tprint \"emergency=\",emergency\n\t\t\tprint \"terminal=\",terminal\n\t\t\t#print \"type=\",s_type\n\t\t\t\n\t\t\t#sleep(0.25)\n \n \n \n \n\n\n",
"id": "7079934",
"language": "Python",
"matching_score": 0,
"max_stars_count": 19,
"path": "Policy-I/python_code_robot_hardware/main_robot_project.py"
}
] | 0 |
ZNHU | [
{
"content": "import pytest\nimport tensorflow as tf\nfrom packaging.version import parse as version\n\nfrom tf_keras_vis.utils import num_of_gpus, find_layer\n\n\nclass TestUtils():\n def test_num_of_gpus_if_no_gpus(self, monkeypatch):\n def list_physical_devices(name):\n return None\n\n def list_logical_devices(name):\n return None\n\n if version(tf.version.VERSION) < version(\"2.1.0\"):\n monkeypatch.setattr(tf.config.experimental, \"list_physical_devices\",\n list_physical_devices)\n monkeypatch.setattr(tf.config.experimental, \"list_logical_devices\",\n list_logical_devices)\n\n else:\n monkeypatch.setattr(tf.config, \"list_physical_devices\", list_physical_devices)\n monkeypatch.setattr(tf.config, \"list_logical_devices\", list_logical_devices)\n a, b, = num_of_gpus()\n assert a == 0\n assert b == 0\n\n def test_num_of_gpus(self, monkeypatch):\n def list_physical_devices(name):\n return ['dummy-a', 'dummy-b']\n\n def list_logical_devices(name):\n return ['a1', 'a2', 'b1', 'b2']\n\n if version(tf.version.VERSION) < version(\"2.1.0\"):\n monkeypatch.setattr(tf.config.experimental, \"list_physical_devices\",\n list_physical_devices)\n monkeypatch.setattr(tf.config.experimental, \"list_logical_devices\",\n list_logical_devices)\n\n else:\n monkeypatch.setattr(tf.config, \"list_physical_devices\", list_physical_devices)\n monkeypatch.setattr(tf.config, \"list_logical_devices\", list_logical_devices)\n a, b, = num_of_gpus()\n assert a == 2\n assert b == 4\n\n @pytest.mark.parametrize(\"offset_of_child_layer\", [\n False,\n True,\n ])\n def test_find_layer(self, offset_of_child_layer, conv_model):\n model = tf.keras.Sequential([\n tf.keras.layers.Conv2D(3, 3, padding='same', input_shape=(8, 8, 3)),\n conv_model,\n tf.keras.layers.Dense(1),\n ])\n offset = conv_model.layers[-1] if offset_of_child_layer else None\n actual = find_layer(model, lambda l: l.name == 'conv-1', offset=offset)\n assert conv_model.get_layer(name='conv-1') == actual\n",
"id": "9322224",
"language": "Python",
"matching_score": 1.5747796297073364,
"max_stars_count": 1,
"path": "tests/tf-keras-vis/utils/utils_test.py"
},
{
"content": "import numpy as np\nimport pytest\nimport tensorflow as tf\n\nfrom tf_keras_vis import ModelVisualization\nfrom tf_keras_vis.utils.test import dummy_sample\n\n\nclass MockVisualizer(ModelVisualization):\n def __call__(self):\n pass\n\n\nclass TestModelVisualization():\n def _replace_activation(self, returns=False):\n def func(model):\n model.layers[-1].activation = tf.keras.activations.linear\n if returns:\n return model\n\n return func\n\n @pytest.mark.parametrize(\"modifier,clone,expected_same,expected_activation\", [\n (None, False, True, tf.keras.activations.softmax),\n (None, True, True, tf.keras.activations.softmax),\n ('not-return', False, True, tf.keras.activations.linear),\n ('not-return', True, False, tf.keras.activations.linear),\n ('return', False, True, tf.keras.activations.linear),\n ('return', True, False, tf.keras.activations.linear),\n ])\n def test__init__(self, modifier, clone, expected_same, expected_activation, conv_model):\n if modifier == 'return':\n mock = MockVisualizer(conv_model,\n model_modifier=self._replace_activation(returns=True),\n clone=clone)\n elif modifier == 'not-return':\n mock = MockVisualizer(conv_model,\n model_modifier=self._replace_activation(returns=False),\n clone=clone)\n else:\n mock = MockVisualizer(conv_model, clone=clone)\n assert (mock.model is conv_model) == expected_same\n assert mock.model.layers[-1].activation == expected_activation\n assert np.array_equal(mock.model.get_weights()[0], conv_model.get_weights()[0])\n\n @pytest.mark.parametrize(\"score,expected_shape\", [\n (dummy_sample((2, 32, 32, 3)), (2, )),\n ((dummy_sample((32, 32, 3)), dummy_sample((32, 32, 3))), (2, )),\n ([dummy_sample((32, 32, 3)), dummy_sample((32, 32, 3))], (2, )),\n (tf.constant(dummy_sample((2, 32, 32, 3))), (2, )),\n ((tf.constant(dummy_sample((32, 32, 3))), tf.constant(dummy_sample((32, 32, 3)))), (2, )),\n ([tf.constant(dummy_sample((32, 32, 3))),\n tf.constant(dummy_sample((32, 32, 3)))], (2, )),\n ])\n def test_mean_score_value(self, score, expected_shape, conv_model):\n actual = MockVisualizer(conv_model)._mean_score_value(score)\n assert actual.shape == expected_shape\n",
"id": "12301022",
"language": "Python",
"matching_score": 1.8239936828613281,
"max_stars_count": 1,
"path": "tests/tf-keras-vis/tf_keras_vis_test.py"
},
{
"content": "import pytest\nimport tensorflow as tf\nfrom packaging.version import parse as version\nfrom tensorflow.keras.models import load_model\n\nfrom tf_keras_vis.gradcam import Gradcam\nfrom tf_keras_vis.utils.test import (MockListOfScore, MockScore,\n MockTupleOfScore, does_not_raise,\n dummy_sample, mock_conv_model,\n mock_conv_model_with_flot32_output,\n mock_multiple_io_model)\n\nif version(tf.version.VERSION) >= version(\"2.4.0\"):\n from tensorflow.keras.mixed_precision import set_global_policy\n\n\nclass TestGradcamWithDenseModel():\n def test__call__(self, dense_model):\n gradcam = Gradcam(dense_model)\n with pytest.raises(ValueError):\n result = gradcam(MockScore(), dummy_sample((1, 8, 8, 3)))\n assert result.shape == (1, 8, 8)\n\n\nclass TestGradcam():\n @pytest.mark.parametrize(\"scores,expectation\", [\n (None, pytest.raises(ValueError)),\n (MockScore(), does_not_raise()),\n (MockTupleOfScore(), does_not_raise()),\n (MockListOfScore(), does_not_raise()),\n ([MockScore()], does_not_raise()),\n ])\n def test__call__if_score_is_(self, scores, expectation, conv_model):\n gradcam = Gradcam(conv_model)\n with expectation:\n result = gradcam(scores, dummy_sample((1, 8, 8, 3)))\n assert result.shape == (1, 8, 8)\n\n @pytest.mark.parametrize(\"seed_input,expected,expectation\", [\n (None, None, pytest.raises(ValueError)),\n (dummy_sample((8, )), None, pytest.raises(ValueError)),\n (dummy_sample((8, 8, 3)), (1, 8, 8), does_not_raise()),\n ([dummy_sample((8, 8, 3))], [(1, 8, 8)], does_not_raise()),\n (dummy_sample((1, 8, 8, 3)), (1, 8, 8), does_not_raise()),\n ([dummy_sample((1, 8, 8, 3))], [(1, 8, 8)], does_not_raise()),\n ])\n def test__call__if_seed_input_is_(self, seed_input, expected, expectation, conv_model):\n gradcam = Gradcam(conv_model)\n with expectation:\n result = gradcam(MockScore(), seed_input)\n if type(expected) is list:\n assert type(result) is list\n expected = expected[0]\n result = result[0]\n assert result.shape == expected\n\n @pytest.mark.parametrize(\"penultimate_layer,seek_penultimate_conv_layer,expectation\", [\n (None, True, does_not_raise()),\n (-1, True, does_not_raise()),\n ('dense-1', True, does_not_raise()),\n ('dense-1', False, pytest.raises(ValueError)),\n (1, False, does_not_raise()),\n (1, True, does_not_raise()),\n ('conv-1', True, does_not_raise()),\n (0, True, pytest.raises(ValueError)),\n ('inupt-1', True, pytest.raises(ValueError)),\n (MockScore(), True, pytest.raises(ValueError)),\n ])\n def test__call__if_penultimate_layer_is(self, penultimate_layer, seek_penultimate_conv_layer,\n expectation, conv_model):\n gradcam = Gradcam(conv_model)\n with expectation:\n result = gradcam(MockScore(),\n dummy_sample((1, 8, 8, 3)),\n penultimate_layer=penultimate_layer,\n seek_penultimate_conv_layer=seek_penultimate_conv_layer)\n assert result.shape == (1, 8, 8)\n\n def test__call__if_normalize_gradient_is_True(self, conv_model):\n gradcam = Gradcam(conv_model)\n result = gradcam(MockScore(), dummy_sample((1, 8, 8, 3)), normalize_gradient=True)\n assert result.shape == (1, 8, 8)\n\n def test__call__if_expand_cam_is_False(self, conv_model):\n gradcam = Gradcam(conv_model)\n result = gradcam(MockScore(), dummy_sample((1, 8, 8, 3)), expand_cam=False)\n assert result.shape == (1, 6, 6)\n\n\nclass TestGradcamWithMultipleInputsModel():\n @pytest.mark.parametrize(\"scores,expectation\", [\n (None, pytest.raises(ValueError)),\n (MockScore(), does_not_raise()),\n (MockTupleOfScore(), does_not_raise()),\n (MockListOfScore(), does_not_raise()),\n ([MockScore()], does_not_raise()),\n ])\n def test__call__if_score_is_(self, scores, expectation, multiple_inputs_model):\n gradcam = Gradcam(multiple_inputs_model)\n with expectation:\n result = gradcam(scores, [dummy_sample((1, 8, 8, 3)), dummy_sample((1, 10, 10, 3))])\n assert len(result) == 2\n assert result[0].shape == (1, 8, 8)\n assert result[1].shape == (1, 10, 10)\n\n @pytest.mark.parametrize(\"seed_input,expectation\", [\n (None, pytest.raises(ValueError)),\n (dummy_sample((1, 8, 8, 3)), pytest.raises(ValueError)),\n ([dummy_sample((1, 8, 8, 3))], pytest.raises(ValueError)),\n ([dummy_sample((1, 8, 8, 3)), dummy_sample((1, 10, 10, 3))], does_not_raise()),\n ])\n def test__call__if_seed_input_is_(self, seed_input, expectation, multiple_inputs_model):\n gradcam = Gradcam(multiple_inputs_model)\n with expectation:\n result = gradcam(MockScore(), seed_input)\n assert result[0].shape == (1, 8, 8)\n assert result[1].shape == (1, 10, 10)\n\n\nclass TestGradcamWithMultipleOutputsModel():\n @pytest.mark.parametrize(\"scores,expectation\", [\n (None, pytest.raises(ValueError)),\n ([None], pytest.raises(ValueError)),\n (MockScore(), does_not_raise()),\n ([MockScore()], does_not_raise()),\n ([None, None], pytest.raises(ValueError)),\n ([MockScore(), None], pytest.raises(ValueError)),\n ([MockScore(), MockScore()], does_not_raise()),\n ([MockTupleOfScore(), MockTupleOfScore()], does_not_raise()),\n ([MockListOfScore(), MockListOfScore()], does_not_raise()),\n ])\n def test__call__if_score_is_(self, scores, expectation, multiple_outputs_model):\n gradcam = Gradcam(multiple_outputs_model)\n with expectation:\n result = gradcam(scores, dummy_sample((1, 8, 8, 3)))\n assert result.shape == (1, 8, 8)\n\n @pytest.mark.parametrize(\"seed_input,expected,expectation\", [\n (None, None, pytest.raises(ValueError)),\n (dummy_sample((8, )), None, pytest.raises(ValueError)),\n (dummy_sample((8, 8, 3)), (1, 8, 8), does_not_raise()),\n ([dummy_sample((8, 8, 3))], [(1, 8, 8)], does_not_raise()),\n (dummy_sample((1, 8, 8, 3)), (1, 8, 8), does_not_raise()),\n ([dummy_sample((1, 8, 8, 3))], [(1, 8, 8)], does_not_raise()),\n ])\n def test__call__if_seed_input_is_(self, seed_input, expected, expectation,\n multiple_outputs_model):\n gradcam = Gradcam(multiple_outputs_model)\n with expectation:\n result = gradcam(MockScore(), seed_input)\n if type(expected) is list:\n assert type(result) is list\n expected = expected[0]\n result = result[0]\n assert result.shape == expected\n\n\nclass TestGradcamWithMultipleIOModel():\n @pytest.mark.parametrize(\"scores,expectation\", [\n (None, pytest.raises(ValueError)),\n ([None], pytest.raises(ValueError)),\n (MockScore(), does_not_raise()),\n ([MockScore()], does_not_raise()),\n ([None, None], pytest.raises(ValueError)),\n ([MockScore(), None], pytest.raises(ValueError)),\n ([MockScore(), MockScore()], does_not_raise()),\n ([MockTupleOfScore(), MockTupleOfScore()], does_not_raise()),\n ([MockListOfScore(), MockListOfScore()], does_not_raise()),\n ])\n def test__call__if_score_is_(self, scores, expectation, multiple_io_model):\n gradcam = Gradcam(multiple_io_model)\n with expectation:\n result = gradcam(scores, [dummy_sample((1, 8, 8, 3)), dummy_sample((1, 10, 10, 3))])\n assert result[0].shape == (1, 8, 8)\n assert result[1].shape == (1, 10, 10)\n\n @pytest.mark.parametrize(\"seed_input,expectation\", [\n (None, pytest.raises(ValueError)),\n (dummy_sample((1, 8, 8, 3)), pytest.raises(ValueError)),\n ([dummy_sample((1, 8, 8, 3))], pytest.raises(ValueError)),\n ([dummy_sample((1, 8, 8, 3)), dummy_sample((1, 10, 10, 3))], does_not_raise()),\n ])\n def test__call__if_seed_input_is_(self, seed_input, expectation, multiple_io_model):\n gradcam = Gradcam(multiple_io_model)\n with expectation:\n result = gradcam(MockScore(), seed_input)\n assert result[0].shape == (1, 8, 8)\n assert result[1].shape == (1, 10, 10)\n\n\n@pytest.mark.skipif(version(tf.version.VERSION) < version(\"2.4.0\"),\n reason=\"This test is enabled when tensorflow version is 2.4.0+.\")\nclass TestGradcamWithMixedPrecision():\n def test__call__with_single_io(self, tmpdir):\n set_global_policy('mixed_float16')\n model = mock_conv_model()\n self._test_for_single_io(model)\n path = tmpdir.mkdir(\"tf-keras-vis\").join(\"single_io.h5\")\n model.save(path)\n set_global_policy('float32')\n model = load_model(path)\n self._test_for_single_io(model)\n\n def test__call__with_float32_output_model(self, tmpdir):\n set_global_policy('mixed_float16')\n model = mock_conv_model_with_flot32_output()\n self._test_for_single_io(model)\n path = tmpdir.mkdir(\"tf-keras-vis\").join(\"float32_output.h5\")\n model.save(path)\n set_global_policy('float32')\n model = load_model(path)\n self._test_for_single_io(model)\n\n def _test_for_single_io(self, model):\n gradcam = Gradcam(model)\n result = gradcam(MockScore(), dummy_sample((1, 8, 8, 3)))\n assert result.shape == (1, 8, 8)\n\n def test__call__with_multiple_io(self, tmpdir):\n set_global_policy('mixed_float16')\n model = mock_multiple_io_model()\n self._test_for_multiple_io(model)\n path = tmpdir.mkdir(\"tf-keras-vis\").join(\"multiple_io.h5\")\n model.save(path)\n set_global_policy('float32')\n model = load_model(path)\n self._test_for_multiple_io(model)\n\n def _test_for_multiple_io(self, model):\n gradcam = Gradcam(model)\n result = gradcam(MockScore(), [dummy_sample((1, 8, 8, 3)), dummy_sample((1, 10, 10, 3))])\n assert result[0].shape == (1, 8, 8)\n assert result[1].shape == (1, 10, 10)\n",
"id": "3366012",
"language": "Python",
"matching_score": 5.4119791984558105,
"max_stars_count": 1,
"path": "tests/tf-keras-vis/gradcam_test.py"
},
{
"content": "import pytest\nimport tensorflow as tf\nfrom packaging.version import parse as version\nfrom tensorflow.keras.models import load_model\n\nfrom tf_keras_vis.activation_maximization import ActivationMaximization\nfrom tf_keras_vis.utils.input_modifiers import Jitter, Rotate\nfrom tf_keras_vis.utils.test import (MockCallback, MockListOfScore, MockScore,\n MockTupleOfScore, does_not_raise,\n dummy_sample, mock_conv_model,\n mock_conv_model_with_flot32_output,\n mock_multiple_io_model)\n\nif version(tf.version.VERSION) >= version(\"2.4.0\"):\n from tensorflow.keras.mixed_precision import set_global_policy\n\n\nclass TestActivationMaximization():\n @pytest.mark.parametrize(\"scores,expectation\", [\n (None, pytest.raises(ValueError)),\n (MockScore(), does_not_raise()),\n (MockTupleOfScore(), does_not_raise()),\n (MockListOfScore(), does_not_raise()),\n ([MockScore()], does_not_raise()),\n ])\n def test__call__if_score_is_(self, scores, expectation, conv_model):\n activation_maximization = ActivationMaximization(conv_model)\n with expectation:\n result = activation_maximization(scores, steps=3)\n assert result.shape == (1, 8, 8, 3)\n\n @pytest.mark.parametrize(\"seed_input,expected\", [\n ([dummy_sample((8, 8, 3))], [(1, 8, 8, 3)]),\n (dummy_sample((1, 8, 8, 3)), (1, 8, 8, 3)),\n ([dummy_sample((1, 8, 8, 3))], [(1, 8, 8, 3)]),\n ])\n def test__call__if_seed_input_is_(self, seed_input, expected, conv_model):\n activation_maximization = ActivationMaximization(conv_model)\n result = activation_maximization(MockScore(), seed_input=seed_input, steps=3)\n if type(expected) is list:\n assert type(result) == list\n result = result[0]\n expected = expected[0]\n assert result.shape == expected\n\n def test__call__with_callback(self, conv_model):\n activation_maximization = ActivationMaximization(conv_model)\n mock = MockCallback()\n result = activation_maximization(MockScore(), steps=3, callbacks=mock)\n assert result.shape == (1, 8, 8, 3)\n assert mock.on_begin_was_called\n assert mock.on_call_was_called\n assert mock.on_end_was_called\n\n def test__call__with_gradient_modifier(self, conv_model):\n activation_maximization = ActivationMaximization(conv_model)\n result = activation_maximization(MockScore(), steps=3, gradient_modifier=lambda x: x * 0.0)\n assert result.shape == (1, 8, 8, 3)\n\n def test__call__if_normalize_gradient_is_True(self, conv_model):\n activation_maximization = ActivationMaximization(conv_model)\n result = activation_maximization(MockScore(), steps=3, normalize_gradient=True)\n assert result.shape == (1, 8, 8, 3)\n\n\nclass TestActivationMaximizationWithMultipleInputsModel():\n @pytest.mark.parametrize(\"scores,expectation\", [\n (None, pytest.raises(ValueError)),\n (MockScore(), does_not_raise()),\n (MockTupleOfScore(), does_not_raise()),\n (MockListOfScore(), does_not_raise()),\n ([MockScore()], does_not_raise()),\n ])\n def test__call__if_score_is_(self, scores, expectation, multiple_inputs_model):\n activation_maximization = ActivationMaximization(multiple_inputs_model)\n with expectation:\n result = activation_maximization(scores, steps=3)\n assert result[0].shape == (1, 8, 8, 3)\n assert result[1].shape == (1, 10, 10, 3)\n\n @pytest.mark.parametrize(\"seed_inputs,expectation\", [\n (None, does_not_raise()),\n (dummy_sample((1, 8, 8, 3)), pytest.raises(ValueError)),\n ([dummy_sample((1, 8, 8, 3))], pytest.raises(ValueError)),\n ([dummy_sample((1, 8, 8, 3)), None], pytest.raises(ValueError)),\n ([None, dummy_sample((1, 10, 10, 3))], pytest.raises(ValueError)),\n ([dummy_sample((8, 8, 3)), dummy_sample((10, 10, 3))], does_not_raise()),\n ([dummy_sample((1, 8, 8, 3)), dummy_sample((10, 10, 3))], does_not_raise()),\n ([dummy_sample((8, 8, 3)), dummy_sample((1, 10, 10, 3))], does_not_raise()),\n ([dummy_sample((1, 8, 8, 3)), dummy_sample((1, 10, 10, 3))], does_not_raise()),\n ])\n def test__call__if_seed_input_is_(self, seed_inputs, expectation, multiple_inputs_model):\n activation_maximization = ActivationMaximization(multiple_inputs_model)\n with expectation:\n result = activation_maximization(MockScore(), steps=3, seed_input=seed_inputs)\n assert result[0].shape == (1, 8, 8, 3)\n assert result[1].shape == (1, 10, 10, 3)\n\n\nclass TestActivationMaximizationWithMultipleOutputsModel():\n @pytest.mark.parametrize(\"scores,expectation\", [\n (None, pytest.raises(ValueError)),\n (MockScore(), does_not_raise()),\n ([MockScore()], does_not_raise()),\n ([MockScore(), None], pytest.raises(ValueError)),\n ([MockScore(), MockScore()], does_not_raise()),\n ([MockTupleOfScore(), MockTupleOfScore()], does_not_raise()),\n ([MockListOfScore(), MockListOfScore()], does_not_raise()),\n ])\n def test__call__if_score_is_(self, scores, expectation, multiple_outputs_model):\n activation_maximization = ActivationMaximization(multiple_outputs_model)\n with expectation:\n result = activation_maximization(scores, steps=3)\n assert result.shape == (1, 8, 8, 3)\n\n @pytest.mark.parametrize(\"seed_input,expected\", [\n ([dummy_sample((8, 8, 3))], [(1, 8, 8, 3)]),\n (dummy_sample((1, 8, 8, 3)), (1, 8, 8, 3)),\n ([dummy_sample((1, 8, 8, 3))], [(1, 8, 8, 3)]),\n ])\n def test__call__if_seed_input_is_(self, seed_input, expected, multiple_outputs_model):\n activation_maximization = ActivationMaximization(multiple_outputs_model)\n result = activation_maximization(MockScore(), seed_input=seed_input, steps=3)\n if type(expected) is list:\n assert type(result) == list\n result = result[0]\n expected = expected[0]\n assert result.shape == expected\n\n\nclass TestActivationMaximizationWithMultipleIOModel():\n @pytest.mark.parametrize(\"scores,expectation\", [\n (None, pytest.raises(ValueError)),\n (MockScore(), does_not_raise()),\n ([MockScore()], does_not_raise()),\n ([MockScore(), None], pytest.raises(ValueError)),\n ([MockScore(), MockScore()], does_not_raise()),\n ([MockTupleOfScore(), MockTupleOfScore()], does_not_raise()),\n ([MockListOfScore(), MockListOfScore()], does_not_raise()),\n ])\n def test__call__if_score_is_(self, scores, expectation, multiple_io_model):\n activation_maximization = ActivationMaximization(multiple_io_model)\n with expectation:\n result = activation_maximization(scores, steps=3)\n assert result[0].shape == (1, 8, 8, 3)\n assert result[1].shape == (1, 10, 10, 3)\n\n @pytest.mark.parametrize(\"seed_inputs,expectation\", [\n (None, does_not_raise()),\n (dummy_sample((1, 8, 8, 3)), pytest.raises(ValueError)),\n ([dummy_sample((1, 8, 8, 3))], pytest.raises(ValueError)),\n ([dummy_sample((1, 8, 8, 3)), None], pytest.raises(ValueError)),\n ([None, dummy_sample((1, 10, 10, 3))], pytest.raises(ValueError)),\n ([dummy_sample((8, 8, 3)), dummy_sample((10, 10, 3))], does_not_raise()),\n ([dummy_sample((1, 8, 8, 3)), dummy_sample((10, 10, 3))], does_not_raise()),\n ([dummy_sample((8, 8, 3)), dummy_sample((1, 10, 10, 3))], does_not_raise()),\n ([dummy_sample((1, 8, 8, 3)), dummy_sample((1, 10, 10, 3))], does_not_raise()),\n ])\n def test__call__if_seed_input_is_(self, seed_inputs, expectation, multiple_io_model):\n activation_maximization = ActivationMaximization(multiple_io_model)\n with expectation:\n result = activation_maximization(MockScore(), steps=3, seed_input=seed_inputs)\n assert result[0].shape == (1, 8, 8, 3)\n assert result[1].shape == (1, 10, 10, 3)\n\n def test__call__with_inputs_modifiers(self, multiple_io_model):\n activation_maximization = ActivationMaximization(multiple_io_model)\n result = activation_maximization(\n MockScore(), steps=3, input_modifiers={'input-1': [Jitter(jitter=8),\n Rotate(degree=3)]})\n assert result[0].shape == (1, 8, 8, 3)\n assert result[1].shape == (1, 10, 10, 3)\n\n\n@pytest.mark.skipif(version(tf.version.VERSION) < version(\"2.4.0\"),\n reason=\"This test is enabled when tensorflow version is 2.4.0+.\")\nclass TestActivationMaximizationWithMixedPrecision():\n def test__call__with_single_io(self, tmpdir):\n set_global_policy('mixed_float16')\n model = mock_conv_model()\n self._test_for_single_io(model)\n path = tmpdir.mkdir(\"tf-keras-vis\").join(\"single_io.h5\")\n model.save(path)\n set_global_policy('float32')\n model = load_model(path)\n self._test_for_single_io(model)\n\n @pytest.mark.skip(reson=\"Because can't avoid error. It may be any bug in Tensorflow.\")\n def test__call__with_float32_output_model(self, tmpdir):\n set_global_policy('mixed_float16')\n model = mock_conv_model_with_flot32_output()\n self._test_for_single_io(model)\n path = tmpdir.mkdir(\"tf-keras-vis\").join(\"float32_output.h5\")\n model.save(path)\n set_global_policy('float32')\n model = load_model(path)\n self._test_for_single_io(model)\n\n def _test_for_single_io(self, model):\n activation_maximization = ActivationMaximization(model)\n result = activation_maximization(MockScore(), steps=3)\n assert result.shape == (1, 8, 8, 3)\n\n def test__call__with_multiple_io(self, tmpdir):\n set_global_policy('mixed_float16')\n model = mock_multiple_io_model()\n self._test_for_multiple_io(model)\n path = tmpdir.mkdir(\"tf-keras-vis\").join(\"multiple_io.h5\")\n model.save(path)\n set_global_policy('float32')\n model = load_model(path)\n self._test_for_multiple_io(model)\n\n def _test_for_multiple_io(self, model):\n activation_maximization = ActivationMaximization(model)\n result = activation_maximization(MockScore(), steps=3)\n assert result[0].shape == (1, 8, 8, 3)\n assert result[1].shape == (1, 10, 10, 3)\n\n def test__call__when_reuse_optimizer(self):\n set_global_policy('mixed_float16')\n optimizer = tf.keras.optimizers.RMSprop()\n model = mock_conv_model()\n activation_maximization = ActivationMaximization(model)\n result = activation_maximization(MockScore(), steps=3, optimizer=optimizer)\n assert result.shape == (1, 8, 8, 3)\n with pytest.raises(ValueError):\n result = activation_maximization(MockScore(), steps=3, optimizer=optimizer)\n assert result.shape == (1, 8, 8, 3)\n",
"id": "1747161",
"language": "Python",
"matching_score": 5.713696479797363,
"max_stars_count": 1,
"path": "tests/tf-keras-vis/activation_maximization/activation_maximization_test.py"
},
{
"content": "import numpy as np\nimport pytest\nimport tensorflow as tf\nfrom packaging.version import parse as version\nfrom tensorflow.keras.models import load_model\n\nfrom tf_keras_vis.saliency import Saliency\nfrom tf_keras_vis.utils.scores import BinaryScore, CategoricalScore\nfrom tf_keras_vis.utils.test import (MockListOfScore, MockScore,\n MockTupleOfScore, does_not_raise,\n dummy_sample, mock_conv_model,\n mock_conv_model_with_flot32_output,\n mock_multiple_io_model)\n\nif version(tf.version.VERSION) >= version(\"2.4.0\"):\n from tensorflow.keras.mixed_precision import set_global_policy\n\n\nclass TestSaliency():\n @pytest.mark.parametrize(\"scores,expectation\", [\n (None, pytest.raises(ValueError)),\n (MockScore(), does_not_raise()),\n (MockTupleOfScore(), does_not_raise()),\n (MockListOfScore(), does_not_raise()),\n ([MockScore()], does_not_raise()),\n ])\n def test__call__if_score_is_(self, scores, expectation, conv_model):\n saliency = Saliency(conv_model)\n with expectation:\n result = saliency(scores, dummy_sample((1, 8, 8, 3)))\n assert result.shape == (1, 8, 8)\n\n @pytest.mark.parametrize(\"seed_input,expected,expectation\", [\n (None, None, pytest.raises(ValueError)),\n (dummy_sample((8, )), None, pytest.raises(ValueError)),\n (dummy_sample((8, 8, 3)), (1, 8, 8), does_not_raise()),\n ([dummy_sample((8, 8, 3))], [(1, 8, 8)], does_not_raise()),\n (dummy_sample((1, 8, 8, 3)), (1, 8, 8), does_not_raise()),\n ([dummy_sample((1, 8, 8, 3))], [(1, 8, 8)], does_not_raise()),\n ])\n def test__call__if_seed_input_is_(self, seed_input, expected, expectation, conv_model):\n saliency = Saliency(conv_model)\n with expectation:\n result = saliency(MockScore(), seed_input)\n if type(expected) is list:\n assert type(result) is list\n expected = expected[0]\n result = result[0]\n assert result.shape == expected\n\n @pytest.mark.parametrize(\"keepdims,expected\", [\n (False, (1, 8, 8)),\n (True, (1, 8, 8, 3)),\n ])\n def test__call__if_keepdims_is_(self, keepdims, expected, conv_model):\n saliency = Saliency(conv_model)\n result = saliency(MockScore(), dummy_sample((1, 8, 8, 3)), keepdims=keepdims)\n assert result.shape == expected\n\n @pytest.mark.parametrize(\"smooth_samples\", [1, 3])\n def test__call__if_smoothing_is_active(self, smooth_samples, conv_model):\n saliency = Saliency(conv_model)\n result = saliency(MockScore(), dummy_sample((1, 8, 8, 3)), smooth_samples=smooth_samples)\n assert result.shape == (1, 8, 8)\n\n def test__call__if_model_has_only_dense_layers(self, dense_model):\n saliency = Saliency(dense_model)\n result = saliency(MockScore(), dummy_sample((3, )), keepdims=True)\n assert result.shape == (1, 3)\n\n @pytest.mark.parametrize(\"score_class,modefier_enabled,clone_enabled,\"\n \"batch_size,expectation\", [\n (BinaryScore, False, False, 0, does_not_raise()),\n (BinaryScore, False, False, 1, does_not_raise()),\n (BinaryScore, False, False, 5, does_not_raise()),\n (BinaryScore, False, True, 5, does_not_raise()),\n (BinaryScore, True, False, 5, does_not_raise()),\n (BinaryScore, True, True, 5, does_not_raise()),\n (CategoricalScore, False, False, 0, does_not_raise()),\n (CategoricalScore, False, False, 1, does_not_raise()),\n (CategoricalScore, False, False, 5, does_not_raise()),\n (CategoricalScore, False, True, 5, does_not_raise()),\n (CategoricalScore, True, False, 5, does_not_raise()),\n (CategoricalScore, True, True, 5, does_not_raise()),\n ])\n def test__call__with_categorical_score(self, score_class, modefier_enabled, clone_enabled,\n batch_size, expectation, conv_model, conv_sigmoid_model):\n # Release v.0.6.0@dev(May 22 2021):\n # Add this case to test Saliency with ScoreClasses.\n def model_modifier(model):\n model.layers[-1].activation = tf.keras.activations.linear\n\n if score_class is BinaryScore:\n model = conv_sigmoid_model\n else:\n model = conv_model\n\n score_targets = np.random.randint(0, 1, max(batch_size, 1))\n score = score_class(list(score_targets))\n\n seed_input_shape = (8, 8, 3)\n if batch_size > 0:\n seed_input_shape = (batch_size, ) + seed_input_shape\n seed_input = dummy_sample(seed_input_shape)\n\n with expectation:\n saliency = Saliency(model,\n model_modifier=model_modifier if modefier_enabled else None,\n clone=clone_enabled)\n result = saliency(score, seed_input=seed_input)\n if modefier_enabled and clone_enabled:\n assert model is not saliency.model\n else:\n assert model is saliency.model\n assert result.shape == (max(batch_size, 1), 8, 8)\n\n\nclass TestSaliencyWithMultipleInputsModel():\n @pytest.mark.parametrize(\"scores,expectation\", [\n (None, pytest.raises(ValueError)),\n (MockScore(), does_not_raise()),\n (MockTupleOfScore(), does_not_raise()),\n (MockListOfScore(), does_not_raise()),\n ([MockScore()], does_not_raise()),\n ])\n def test__call__if_score_is_(self, scores, expectation, multiple_inputs_model):\n saliency = Saliency(multiple_inputs_model)\n with expectation:\n result = saliency(scores, [dummy_sample((1, 8, 8, 3)), dummy_sample((1, 10, 10, 3))])\n assert len(result) == 2\n assert result[0].shape == (1, 8, 8)\n assert result[1].shape == (1, 10, 10)\n\n @pytest.mark.parametrize(\"seed_input,expectation\", [\n (None, pytest.raises(ValueError)),\n (dummy_sample((1, 8, 8, 3)), pytest.raises(ValueError)),\n ([dummy_sample((1, 8, 8, 3))], pytest.raises(ValueError)),\n ([dummy_sample((1, 8, 8, 3)), dummy_sample((1, 10, 10, 3))], does_not_raise()),\n ])\n def test__call__if_seed_input_is_(self, seed_input, expectation, multiple_inputs_model):\n saliency = Saliency(multiple_inputs_model)\n with expectation:\n result = saliency(MockScore(), seed_input)\n assert len(result) == 2\n assert result[0].shape == (1, 8, 8)\n assert result[1].shape == (1, 10, 10)\n\n @pytest.mark.parametrize(\"keepdims,expected\", [\n (False, [(1, 8, 8), (1, 10, 10)]),\n (True, [(1, 8, 8, 3), (1, 10, 10, 3)]),\n ])\n def test__call__if_keepdims_is_(self, keepdims, expected, multiple_inputs_model):\n saliency = Saliency(multiple_inputs_model)\n result = saliency(MockScore(), [dummy_sample(\n (1, 8, 8, 3)), dummy_sample((1, 10, 10, 3))],\n keepdims=keepdims)\n assert len(result) == 2\n assert result[0].shape == expected[0]\n assert result[1].shape == expected[1]\n\n\nclass TestSaliencyWithMultipleOutputsModel():\n @pytest.mark.parametrize(\"scores,expectation\", [\n (None, pytest.raises(ValueError)),\n ([None], pytest.raises(ValueError)),\n (MockScore(), does_not_raise()),\n ([MockScore()], does_not_raise()),\n ([MockScore(), None], pytest.raises(ValueError)),\n ([MockScore(), MockScore()], does_not_raise()),\n ([MockTupleOfScore(), MockTupleOfScore()], does_not_raise()),\n ([MockListOfScore(), MockListOfScore()], does_not_raise()),\n ])\n def test__call__if_score_is_(self, scores, expectation, multiple_outputs_model):\n saliency = Saliency(multiple_outputs_model)\n with expectation:\n result = saliency(scores, dummy_sample((1, 8, 8, 3)))\n assert result.shape == (1, 8, 8)\n\n @pytest.mark.parametrize(\"seed_input,expected,expectation\", [\n (None, None, pytest.raises(ValueError)),\n (dummy_sample((8, )), None, pytest.raises(ValueError)),\n (dummy_sample((8, 8, 3)), (1, 8, 8), does_not_raise()),\n ([dummy_sample((8, 8, 3))], [(1, 8, 8)], does_not_raise()),\n (dummy_sample((1, 8, 8, 3)), (1, 8, 8), does_not_raise()),\n ([dummy_sample((1, 8, 8, 3))], [(1, 8, 8)], does_not_raise()),\n ])\n def test__call__if_seed_input_is_(self, seed_input, expected, expectation,\n multiple_outputs_model):\n saliency = Saliency(multiple_outputs_model)\n with expectation:\n result = saliency(MockScore(), seed_input)\n if type(expected) is list:\n assert type(result) is list\n expected = expected[0]\n result = result[0]\n assert result.shape == expected\n\n\nclass TestSaliencyWithMultipleIOModel():\n @pytest.mark.parametrize(\"scores,expectation\", [\n (None, pytest.raises(ValueError)),\n ([None], pytest.raises(ValueError)),\n (MockScore(), does_not_raise()),\n ([MockScore()], does_not_raise()),\n ([MockScore(), None], pytest.raises(ValueError)),\n ([MockScore(), MockScore()], does_not_raise()),\n ([MockTupleOfScore(), MockTupleOfScore()], does_not_raise()),\n ([MockListOfScore(), MockListOfScore()], does_not_raise()),\n ])\n def test__call__if_score_is_(self, scores, expectation, multiple_io_model):\n saliency = Saliency(multiple_io_model)\n with expectation:\n result = saliency(scores, [dummy_sample((1, 8, 8, 3)), dummy_sample((1, 10, 10, 3))])\n assert len(result) == 2\n assert result[0].shape == (1, 8, 8)\n assert result[1].shape == (1, 10, 10)\n\n @pytest.mark.parametrize(\"seed_input,expectation\", [\n (None, pytest.raises(ValueError)),\n (dummy_sample((1, 8, 8, 3)), pytest.raises(ValueError)),\n ([dummy_sample((1, 8, 8, 3))], pytest.raises(ValueError)),\n ([dummy_sample((1, 8, 8, 3)), dummy_sample((1, 10, 10, 3))], does_not_raise()),\n ])\n def test__call__if_seed_input_is_(self, seed_input, expectation, multiple_io_model):\n saliency = Saliency(multiple_io_model)\n with expectation:\n result = saliency(MockScore(), seed_input)\n assert len(result) == 2\n assert result[0].shape == (1, 8, 8)\n assert result[1].shape == (1, 10, 10)\n\n\n@pytest.mark.skipif(version(tf.version.VERSION) < version(\"2.4.0\"),\n reason=\"This test is enabled when tensorflow version is 2.4.0+.\")\nclass TestSaliencyWithMixedPrecision():\n def test__call__with_single_io(self, tmpdir):\n set_global_policy('mixed_float16')\n model = mock_conv_model()\n self._test_for_single_io(model)\n path = tmpdir.mkdir(\"tf-keras-vis\").join(\"single_io.h5\")\n model.save(path)\n set_global_policy('float32')\n model = load_model(path)\n self._test_for_single_io(model)\n\n def test__call__with_float32_output_model(self, tmpdir):\n set_global_policy('mixed_float16')\n model = mock_conv_model_with_flot32_output()\n self._test_for_single_io(model)\n path = tmpdir.mkdir(\"tf-keras-vis\").join(\"float32_output.h5\")\n model.save(path)\n set_global_policy('float32')\n model = load_model(path)\n self._test_for_single_io(model)\n\n def _test_for_single_io(self, model):\n saliency = Saliency(model)\n result = saliency(MockScore(), dummy_sample((1, 8, 8, 3)))\n assert result.shape == (1, 8, 8)\n\n def test__call__with_multiple_io(self, tmpdir):\n set_global_policy('mixed_float16')\n model = mock_multiple_io_model()\n self._test_for_multiple_io(model)\n path = tmpdir.mkdir(\"tf-keras-vis\").join(\"multiple_io.h5\")\n model.save(path)\n set_global_policy('float32')\n model = load_model(path)\n self._test_for_multiple_io(model)\n\n def _test_for_multiple_io(self, model):\n saliency = Saliency(model)\n result = saliency(MockScore(), [dummy_sample((1, 8, 8, 3)), dummy_sample((1, 10, 10, 3))])\n assert len(result) == 2\n assert result[0].shape == (1, 8, 8)\n assert result[1].shape == (1, 10, 10)\n",
"id": "9161633",
"language": "Python",
"matching_score": 3.2153687477111816,
"max_stars_count": 1,
"path": "tests/tf-keras-vis/saliency_test.py"
},
{
"content": "import numpy as np\nimport pytest\nimport tensorflow as tf\n\nfrom tf_keras_vis.utils.scores import (BinaryScore, CategoricalScore,\n InactiveScore)\nfrom tf_keras_vis.utils.test import does_not_raise, dummy_sample\n\n\nclass TestInactiveScore():\n @pytest.mark.parametrize(\"output,expected_shape,expectation\", [\n (dummy_sample((1, 1)), (1, 1), does_not_raise()),\n (dummy_sample((10, 5)), (10, 5), does_not_raise()),\n (dummy_sample((1, 224, 224, 3)), (1, 224, 224, 3), does_not_raise()),\n ])\n def test__call__(self, output, expected_shape, expectation):\n with expectation:\n actual = InactiveScore()(output)\n assert np.all(actual == 0.0)\n assert actual.shape == expected_shape\n\n\nclass TestBinaryScore():\n @pytest.mark.parametrize(\"target_values,expected,expectation\", [\n (None, None, pytest.raises(ValueError)),\n (0, [False], does_not_raise()),\n (1, [True], does_not_raise()),\n (100, None, pytest.raises(ValueError)),\n (-1, None, pytest.raises(ValueError)),\n (1.0, [True], does_not_raise()),\n ([None], None, pytest.raises(ValueError)),\n ([0, 0], [False, False], does_not_raise()),\n ([0, 1, 0], [False, True, False], does_not_raise()),\n ([-1, 0], None, pytest.raises(ValueError)),\n ])\n def test__init__(self, target_values, expected, expectation):\n with expectation:\n score = BinaryScore(target_values)\n assert score.target_values == expected\n\n @pytest.mark.parametrize(\"target_values,output,expected,expectation\", [\n (False, [[1, 1, 0], [1, 0, 1]], [0], pytest.raises(ValueError)),\n (False, [[1]], [0], does_not_raise()),\n (False, [[0]], [1], does_not_raise()),\n (True, [[1]], [1], does_not_raise()),\n (True, [[0]], [0], does_not_raise()),\n (True, [[0], [1], [0]], [0, 1, 0], does_not_raise()),\n (False, [[0], [1], [0]], [1, 0, 1], does_not_raise()),\n ([True, False, True], [[0], [1], [0]], [0, 0, 0], does_not_raise()),\n ([False, True, False], [[0], [1], [0]], [1, 1, 1], does_not_raise()),\n ])\n def test__call__(self, target_values, output, expected, expectation):\n output = tf.constant(output, tf.float32)\n score = BinaryScore(target_values)\n with expectation:\n score_value = score(output)\n assert score_value == expected\n\n\nclass TestCategoricalScore():\n @pytest.mark.parametrize(\"indices,expected,expectation\", [\n (None, None, pytest.raises(ValueError)),\n (5, [5], does_not_raise()),\n ((1, ), [1], does_not_raise()),\n ([3], [3], does_not_raise()),\n ([None], None, pytest.raises(ValueError)),\n ([2, None], None, pytest.raises(ValueError)),\n ((0, 8, 3), [0, 8, 3], does_not_raise()),\n ([0, 8, 3], [0, 8, 3], does_not_raise()),\n ])\n def test__init__(self, indices, expected, expectation):\n with expectation:\n score = CategoricalScore(indices)\n assert score.indices == expected\n\n @pytest.mark.parametrize(\"indices,output_shape,expectation\", [\n (2, (1, ), pytest.raises(ValueError)),\n (2, (1, 2), pytest.raises(ValueError)),\n (2, (1, 4, 1), pytest.raises(ValueError)),\n (2, (1, 4, 3), does_not_raise()),\n (2, (2, 4, 3), does_not_raise()),\n (2, (8, 32, 32, 3), does_not_raise()),\n ])\n def test__call__(self, indices, output_shape, expectation):\n output = tf.constant(dummy_sample(output_shape), tf.float32)\n score = CategoricalScore(indices)\n with expectation:\n score_value = score(output)\n assert score_value.shape == output_shape[0:1]\n",
"id": "5553329",
"language": "Python",
"matching_score": 2.2485129833221436,
"max_stars_count": 1,
"path": "tests/tf-keras-vis/utils/scores_test.py"
},
{
"content": "from abc import ABC, abstractmethod\n\nimport tensorflow as tf\n\nfrom tf_keras_vis.utils import listify\n\n\nclass Score(ABC):\n def __init__(self, name):\n self.name = name\n\n @abstractmethod\n def __call__(self, output):\n raise NotImplementedError()\n\n\nclass InactiveScore(Score):\n def __init__(self):\n super().__init__('InactiveScore')\n\n def __call__(self, output):\n return output * 0.0\n\n\nclass BinaryScore(Score):\n def __init__(self, target_values):\n '''\n target_values: bool values. When the type of values is not bool,\n they will be casted to bool.\n '''\n super().__init__('BinaryScore')\n self.target_values = listify(target_values)\n if len(list(filter(lambda v: not self._is_valid(v), self.target_values))) > 0:\n raise ValueError(\"Only allow bool or [0, 1]. target_values: [{}]\".format(target_values))\n self.target_values = [bool(v) for v in self.target_values]\n if len(self.target_values) == 0:\n raise ValueError('target_values is required. [{}]'.format(target_values))\n\n def __call__(self, output):\n if output.ndim != 1 and not (output.ndim == 2 and output.shape[1] == 1):\n raise ValueError(\"output shape must be (batch_size, 1), but was {}\".format(\n output.shape))\n output = tf.reshape(output, (-1, ))\n target_values = self.target_values\n if len(target_values) == 1 and len(target_values) < output.shape[0]:\n target_values = target_values * output.shape[0]\n score = [val if positive else 1.0 - val for val, positive in zip(output, target_values)]\n return score\n\n def _is_valid(self, value):\n return value in [False, True, 0, 1]\n\n\nclass CategoricalScore(Score):\n def __init__(self, indices):\n super().__init__('CategoricalScore')\n self.indices = listify(indices)\n if None in self.indices:\n raise ValueError(\"Can't accept None. indices: [{}]\".format(indices))\n if len(self.indices) == 0:\n raise ValueError('indices is required. [{}]'.format(indices))\n\n def __call__(self, output):\n if output.ndim < 2:\n raise ValueError(\"output ndims must be 2 or more (batch_size, ..., channels), \"\n \"but was {}\".format(output.ndim))\n if output.shape[-1] <= max(self.indices):\n raise ValueError(\"Invalid index value. indices: {}, output.shape: {}\".format(\n self.indices, output.shape))\n indices = self.indices\n if len(indices) == 1 and len(indices) < output.shape[0]:\n indices = indices * output.shape[0]\n score = [output[i, ..., index] for i, index in enumerate(indices)]\n score = tf.stack(score, axis=0)\n score = tf.math.reduce_mean(score, axis=tuple(range(score.ndim))[1:])\n return score\n",
"id": "3603947",
"language": "Python",
"matching_score": 1.0720773935317993,
"max_stars_count": 1,
"path": "tf_keras_vis/utils/scores.py"
},
{
"content": "import numpy as np\nimport tensorflow as tf\nimport tensorflow.keras.backend as K\nfrom scipy.ndimage.interpolation import zoom\n\nfrom tf_keras_vis.gradcam import Gradcam\nfrom tf_keras_vis.utils import (is_mixed_precision, listify, standardize, zoom_factor)\n\n\nclass Scorecam(Gradcam):\n def __call__(self,\n score,\n seed_input,\n penultimate_layer=-1,\n seek_penultimate_conv_layer=True,\n activation_modifier=lambda cam: K.relu(cam),\n expand_cam=True,\n batch_size=32,\n max_N=None,\n training=False,\n standardize_cam=True):\n \"\"\"Generate score-weighted class activation maps (CAM)\n by using gradient-free visualization method.\n\n For details on Score-CAM, see the paper:\n [Score-CAM: Score-Weighted Visual Explanations for Convolutional Neural Networks ]\n (https://arxiv.org/pdf/1910.01279.pdf).\n\n # Arguments\n score: A score function. If the model has multiple outputs, you can use a different\n score on each output by passing a list of scores.\n seed_input: An N-dim Numpy array. If the model has multiple inputs,\n you have to pass a list of N-dim Numpy arrays.\n penultimate_layer: A number of integer or a tf.keras.layers.Layer object.\n seek_penultimate_conv_layer: True to seek the penultimate layter that is a subtype of\n `keras.layers.convolutional.Conv` class.\n If False, the penultimate layer is that was elected by penultimate_layer index.\n activation_modifier: A function to modify activations.\n expand_cam: True to expand cam to same as input image size.\n ![Note] Even if the model has multiple inputs, this function return only one cam\n value (That's, when `expand_cam` is True, multiple cam images are generated from\n a model that has multiple inputs).\n batch_size: Integer or None. Number of samples per batch.\n If unspecified, batch_size will default to 32.\n max_N: Integer or None. Setting None or under Zero is that we do NOT recommend,\n because it takes huge time. If not None and over Zero of Integer,\n run as Faster-ScoreCAM.\n Set larger number, need more time to visualize CAM but to be able to get\n clearer attention images.\n (see for details: https://github.com/tabayashi0117/Score-CAM#faster-score-cam)\n training: A bool whether the model's trainig-mode turn on or off.\n standardize_cam: A bool. If True(default), cam will be standardized.\n # Returns\n The heatmap image or a list of their images that indicate the `seed_input` regions\n whose change would most contribute the score value,\n # Raises\n ValueError: In case of invalid arguments for `score`, or `penultimate_layer`.\n \"\"\"\n\n # Preparing\n scores = self._get_scores_for_multiple_outputs(score)\n seed_inputs = self._get_seed_inputs_for_multiple_inputs(seed_input)\n penultimate_output_tensor = self._find_penultimate_output(penultimate_layer,\n seek_penultimate_conv_layer)\n # Processing score-cam\n penultimate_output = tf.keras.Model(inputs=self.model.inputs,\n outputs=penultimate_output_tensor)(seed_inputs,\n training=training)\n if is_mixed_precision(self.model):\n penultimate_output = tf.cast(penultimate_output, self.model.variable_dtype)\n\n # For efficiently visualizing, extract maps that has a large variance.\n # This excellent idea is devised by tabayashi0117.\n # (see for details: https://github.com/tabayashi0117/Score-CAM#faster-score-cam)\n if max_N is not None and max_N > -1:\n activation_map_std = tf.math.reduce_std(penultimate_output,\n axis=tuple(\n range(penultimate_output.ndim)[1:-1]),\n keepdims=True)\n _, top_k_indices = tf.math.top_k(activation_map_std, max_N)\n top_k_indices, _ = tf.unique(tf.reshape(top_k_indices, (-1, )))\n penultimate_output = tf.gather(penultimate_output, top_k_indices, axis=-1)\n channels = penultimate_output.shape[-1]\n\n # Upsampling activation-maps\n input_shapes = [seed_input.shape for seed_input in seed_inputs]\n factors = (zoom_factor(penultimate_output.shape[:-1], input_shape[:-1])\n for input_shape in input_shapes)\n upsampled_activation_maps = [zoom(penultimate_output, factor + (1, )) for factor in factors]\n map_shapes = [activation_map.shape for activation_map in upsampled_activation_maps]\n\n # Normalizing activation-maps\n min_activation_maps = (np.min(activation_map,\n axis=tuple(range(activation_map.ndim)[1:-1]),\n keepdims=True)\n for activation_map in upsampled_activation_maps)\n max_activation_maps = (np.max(activation_map,\n axis=tuple(range(activation_map.ndim)[1:-1]),\n keepdims=True)\n for activation_map in upsampled_activation_maps)\n normalized_activation_maps = (\n (activation_map - min_activation_map) /\n (max_activation_map - min_activation_map + K.epsilon())\n for activation_map, min_activation_map, max_activation_map in zip(\n upsampled_activation_maps, min_activation_maps, max_activation_maps))\n\n # Masking inputs\n input_tile_axes = ((map_shape[-1], ) + tuple(np.ones(len(input_shape), np.int))\n for input_shape, map_shape in zip(input_shapes, map_shapes))\n mask_templates = (np.tile(seed_input, axes)\n for seed_input, axes in zip(seed_inputs, input_tile_axes))\n map_transpose_axes = ((len(map_shape) - 1, ) + tuple(range(len(map_shape))[:-1])\n for map_shape in map_shapes)\n masks = (np.transpose(activation_map,\n transpose_axis) for activation_map, transpose_axis in zip(\n normalized_activation_maps, map_transpose_axes))\n map_tile_axes = (tuple(np.ones(len(map_shape), np.int)) + (input_shape[-1], )\n for input_shape, map_shape in zip(input_shapes, map_shapes))\n masks = (np.tile(np.expand_dims(activation_map, axis=-1), tile_axis)\n for activation_map, tile_axis in zip(masks, map_tile_axes))\n masked_seed_inputs = (mask_template * mask\n for mask_template, mask in zip(mask_templates, masks))\n masked_seed_inputs = [\n np.reshape(masked_seed_input, (-1, ) + masked_seed_input.shape[2:])\n for masked_seed_input in masked_seed_inputs\n ]\n\n # Predicting masked seed-inputs\n preds = self.model.predict(masked_seed_inputs, batch_size=batch_size)\n preds = (np.reshape(prediction, (channels, -1, prediction.shape[-1]))\n for prediction in listify(preds))\n\n # Calculating weights\n weights = ([score(p) for p in prediction] for score, prediction in zip(scores, preds))\n weights = (np.array(w, dtype=np.float32) for w in weights)\n weights = (np.reshape(w, (penultimate_output.shape[0], -1, channels)) for w in weights)\n weights = (np.mean(w, axis=1) for w in weights)\n weights = np.array(list(weights), dtype=np.float32)\n weights = np.sum(weights, axis=0)\n\n # Generate cam\n cam = K.batch_dot(penultimate_output, weights)\n if activation_modifier is not None:\n cam = activation_modifier(cam)\n\n if not expand_cam:\n if standardize_cam:\n cam = standardize(cam)\n return cam\n\n factors = (zoom_factor(cam.shape, X.shape) for X in seed_inputs)\n cam = [zoom(cam, factor) for factor in factors]\n if standardize_cam:\n cam = [standardize(x) for x in cam]\n if len(self.model.inputs) == 1 and not isinstance(seed_input, list):\n cam = cam[0]\n return cam\n\n\nScoreCAM = Scorecam\n",
"id": "12292288",
"language": "Python",
"matching_score": 3.256819248199463,
"max_stars_count": 1,
"path": "tf_keras_vis/scorecam.py"
},
{
"content": "from abc import ABC, abstractmethod\n\nimport tensorflow as tf\n\nfrom tf_keras_vis.utils import listify\n\n\nclass ModelVisualization(ABC):\n \"\"\"Visualization class for Keras models.\n \"\"\"\n def __init__(self, model, model_modifier=None, clone=True):\n \"\"\"Create Visualization class instance that analize the model for debugging.\n\n # Arguments\n model: The `tf.keras.Model` instance. When `model_modifier` is NOT None,\n This model will be cloned by `tf.keras.models.clone_model` function\n and then will be modified by `model_modifier` according to needs.\n model_modifier: A function that modify `model` instance. For example, in\n ActivationMaximization usually, this function is used to replace the softmax\n function that was applied to the model outputs.\n clone: A bool. When False, the model won't be cloned. Note that, although when True,\n the model won't be clone if `model_modifier` is None.\n \"\"\"\n self.model = model\n if model_modifier is not None:\n if clone:\n self.model = tf.keras.models.clone_model(self.model)\n self.model.set_weights(model.get_weights())\n new_model = model_modifier(self.model)\n if new_model is not None:\n self.model = new_model\n\n @abstractmethod\n def __call__(self):\n \"\"\"Analize the model.\n\n # Returns\n Results of analizing the model.\n \"\"\"\n raise NotImplementedError()\n\n def _get_scores_for_multiple_outputs(self, score):\n scores = listify(score)\n if len(scores) == 1 and len(scores) < len(self.model.outputs):\n scores = scores * len(self.model.outputs)\n for score in scores:\n if not callable(score):\n raise ValueError('Score object must be callable! [{}]'.format(score))\n if len(scores) != len(self.model.outputs):\n raise ValueError(('The model has {} outputs, '\n 'but the number of score-functions you passed is {}.').format(\n len(self.model.outputs), len(scores)))\n return scores\n\n def _get_seed_inputs_for_multiple_inputs(self, seed_input):\n seed_inputs = listify(seed_input)\n if len(seed_inputs) != len(self.model.inputs):\n raise ValueError(('The model has {} inputs, '\n 'but the number of seed-inputs tensors you passed is {}.').format(\n len(self.model.inputs), len(seed_inputs)))\n seed_inputs = (x if tf.is_tensor(x) else tf.constant(x) for x in seed_inputs)\n seed_inputs = (tf.expand_dims(x, axis=0) if len(x.shape) == len(tensor.shape[1:]) else x\n for x, tensor in zip(seed_inputs, self.model.inputs))\n seed_inputs = list(seed_inputs)\n for i, (x, tensor) in enumerate(zip(seed_inputs, self.model.inputs)):\n if len(x.shape) != len(tensor.shape):\n raise ValueError((\"seed_input's shape is invalid. model-input index: {},\"\n \" model-input shape: {},\"\n \" seed_input shape: {}.\".format(i, tensor.shape, x.shape)))\n return seed_inputs\n\n def _calculate_scores(self, outputs, score_functions):\n score_values = (func(output) for output, func in zip(outputs, score_functions))\n score_values = (self._mean_score_value(score) for score in score_values)\n score_values = list(score_values)\n return score_values\n\n def _mean_score_value(self, score):\n if not tf.is_tensor(score):\n if type(score) in [list, tuple]:\n if len(score) > 0 and tf.is_tensor(score[0]):\n score = tf.stack(score, axis=0)\n else:\n score = tf.constant(score)\n else:\n score = tf.constant(score)\n score = tf.math.reduce_mean(score, axis=tuple(range(score.ndim))[1:])\n return score\n",
"id": "5979078",
"language": "Python",
"matching_score": 2.8243353366851807,
"max_stars_count": 1,
"path": "tf_keras_vis/__init__.py"
},
{
"content": "import warnings\nfrom collections import defaultdict\n\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow.keras.backend as K\nfrom packaging.version import parse as version\n\nfrom tf_keras_vis import ModelVisualization\nfrom tf_keras_vis.utils import (check_steps, is_mixed_precision, listify, lower_precision_dtype)\nfrom tf_keras_vis.utils.input_modifiers import Jitter, Rotate\nfrom tf_keras_vis.utils.regularizers import Norm, TotalVariation2D\n\nif version(tf.version.VERSION) >= version(\"2.4.0\"):\n from tensorflow.keras.mixed_precision import LossScaleOptimizer\n\n\nclass ActivationMaximization(ModelVisualization):\n def __call__(\n self,\n score,\n seed_input=None,\n input_range=(0, 255),\n input_modifiers=[Jitter(jitter=8), Rotate(degree=3)],\n regularizers=[TotalVariation2D(weight=1.),\n Norm(weight=1., p=2)],\n steps=200,\n optimizer=None, # When None, the default is tf.optimizers.RMSprop(1.0, 0.95)\n normalize_gradient=None, # Disabled option.\n gradient_modifier=None,\n callbacks=None,\n training=False,\n unconnected_gradients=tf.UnconnectedGradients.NONE):\n \"\"\"Generate the model inputs that maximize the output of the given `score` functions.\n\n # Arguments\n score: A score function or a list of score functions.\n If the model has multiple outputs, you can use a different function\n on each output by passing a list of functions. The score value that will be\n maximized will be the mean of all individual score functions\n (and sum of all regularization values).\n seed_input: `None`(default), an N-dim Numpy array or a list of N-dim Numpy arrays.\n When `None`, the seed_input value will be generated with randome uniform noise.\n If the model has multiple inputs, you have to pass a list of N-dim Numpy arrays.\n input_range: A tuple that specifies the input range as a `(min, max)` tuple\n or a list of the tuple. If the model has multiple inputs, you can use\n a different input range on each input by passing as list of input ranges.\n When `None` or a `(None, None)` tuple, the range of a input value\n (i.e., the result of this function) will be no applied any limitation.\n input_modifiers: A input modifier function, a list of input modifier functions,\n or a dictionary that has a list of input_modifiers functions.\n You can also use a instance of `tf_keras-vis.utils.input_modifiers.InputModifier`'s\n subclass, instead of a function. If the model has multiple inputs, you have to pass\n a dictionary of input modifier functions or instances on each model inputs:\n such as `input_modifiers={'input_a': [ input_modifier_a_1, input_modifier_a_2 ],\n 'input_b': input_modifier_b, ... }`.\n regularizers: A regularization function or a list of regularization functions. You can\n also use a instance of `tf_keras-vis.utils.regularizers.Regularizer`'s subclass,\n instead of a function. A regularization value will be calculated with\n a corresponding model input will add to the score value.\n steps: The number of gradient descent iterations.\n optimizer: A `tf.optimizers.Optimizer` instance.\n normalize_gradient: Note! This option is now disabled.\n gradient_modifier: A function to modify gradients. This function is executed before\n normalizing gradients.\n callbacks: A `tf_keras_vis.activation_maimization.callbacks.Callback` instance\n or a list of them.\n training: A bool whether the model's trainig-mode turn on or off.\n unconnected_gradients: Specifies the gradient value returned when the given input\n tensors are unconnected. Accepted values are constants defined in the class\n `tf.UnconnectedGradients` and the default value is NONE.\n # Returns\n An Numpy arrays when the model has a single input and `seed_input` is None or An N-dim\n Numpy Array, Or a list of Numpy arrays when otherwise.\n # Raises\n ValueError: In case of invalid arguments for `score`, `input_range`, `input_modifiers`\n or `regularizers`.\n \"\"\"\n if normalize_gradient is not None:\n warnings.warn(\n ('`normalize_gradient` option of ActivationMaximization#__call__() is disabled.,'\n ' And this will be removed in future.'), DeprecationWarning)\n\n # Check model\n mixed_precision_model = is_mixed_precision(self.model)\n\n # optimizer\n optimizer = self._get_optimizer(optimizer, mixed_precision_model)\n\n # scores\n scores = self._get_scores_for_multiple_outputs(score)\n\n # Get initial seed-inputs\n input_ranges = self._get_input_ranges(input_range)\n seed_inputs = self._get_seed_inputs(seed_input, input_ranges)\n\n # input_modifiers\n input_modifiers = self._get_input_modifiers(input_modifiers)\n\n # regularizers\n regularizers = self._get_regularizers(regularizers)\n\n callbacks = listify(callbacks)\n for callback in callbacks:\n callback.on_begin()\n\n for i in range(check_steps(steps)):\n # Apply input modifiers\n for j, name in enumerate(self.model.input_names):\n for modifier in input_modifiers[name]:\n seed_inputs[j] = modifier(seed_inputs[j])\n\n if mixed_precision_model:\n seed_inputs = (tf.cast(X, dtype=lower_precision_dtype(self.model))\n for X in seed_inputs)\n seed_inputs = [tf.Variable(X) for X in seed_inputs]\n\n # Calculate gradients\n with tf.GradientTape(watch_accessed_variables=False) as tape:\n tape.watch(seed_inputs)\n outputs = self.model(seed_inputs, training=training)\n outputs = listify(outputs)\n score_values = self._calculate_scores(outputs, scores)\n # Calculate regularization values\n regularizations = [(regularizer.name, regularizer(seed_inputs))\n for regularizer in regularizers]\n regularized_score_values = [\n (-1. * score_value) + sum([v for _, v in regularizations])\n for score_value in score_values\n ]\n if mixed_precision_model:\n regularized_score_values = [\n optimizer.get_scaled_loss(score_value)\n for score_value in regularized_score_values\n ]\n grads = tape.gradient(regularized_score_values,\n seed_inputs,\n unconnected_gradients=unconnected_gradients)\n grads = listify(grads)\n if mixed_precision_model:\n grads = optimizer.get_unscaled_gradients(grads)\n if gradient_modifier is not None:\n grads = (gradient_modifier(g) for g in grads)\n optimizer.apply_gradients(zip(grads, seed_inputs))\n\n for callback in callbacks:\n callback(i,\n self._apply_clip(seed_inputs, input_ranges),\n grads,\n score_values,\n outputs,\n regularizations=regularizations,\n overall_score=regularized_score_values)\n\n for callback in callbacks:\n callback.on_end()\n\n cliped_value = self._apply_clip(seed_inputs, input_ranges)\n if len(self.model.inputs) == 1 and (seed_input is None or not isinstance(seed_input, list)):\n cliped_value = cliped_value[0]\n\n return cliped_value\n\n def _get_optimizer(self, optimizer, mixed_precision_model):\n if optimizer is None:\n optimizer = tf.optimizers.RMSprop(1.0, 0.95)\n if mixed_precision_model:\n try:\n # Wrap optimizer\n optimizer = LossScaleOptimizer(optimizer)\n except ValueError as e:\n raise ValueError(\n (\"The same `optimizer` instance should be NOT used twice or more.\"\n \" You can be able to avoid this error by creating new optimizer instance\"\n \" each calling __call__().\")) from e\n return optimizer\n\n def _get_input_ranges(self, input_range):\n input_ranges = listify(input_range,\n return_empty_list_if_none=False,\n convert_tuple_to_list=False)\n if len(input_ranges) == 1 and len(self.model.inputs) > 1:\n input_ranges = input_ranges * len(self.model.inputs)\n input_ranges = [(None, None) if r is None else r for r in input_ranges]\n for i, r in enumerate(input_ranges):\n if len(r) != 2:\n raise ValueError(\n 'The length of input range tuple must be 2 (Or it is just `None`, not tuple), '\n 'but you passed {} as `input_ranges[{}]`.'.format(r, i))\n return input_ranges\n\n def _get_seed_inputs(self, seed_inputs, input_ranges):\n # Prepare seed_inputs\n if seed_inputs is None or len(seed_inputs) == 0:\n # Replace None to 0.0-1.0 or any properly value\n input_ranges = ((0., 1.) if low is None and high is None else (low, high)\n for low, high in input_ranges)\n input_ranges = ((high - np.abs(high / 2.0), high) if low is None else (low, high)\n for low, high in input_ranges)\n input_ranges = ((low, low + np.abs(low * 2.0)) if high is None else (low, high)\n for low, high in input_ranges)\n input_ranges = list(input_ranges)\n # Prepare input_shape\n input_shapes = (input_tensor.shape[1:] for input_tensor in self.model.inputs)\n # Generae seed-inputs\n seed_inputs = (tf.random.uniform(shape, low, high)\n for (low, high), shape in zip(input_ranges, input_shapes))\n else:\n seed_inputs = listify(seed_inputs)\n # Convert numpy to tf-tensor\n seed_inputs = (tf.constant(X, dtype=input_tensor.dtype)\n for X, input_tensor in zip(seed_inputs, self.model.inputs))\n # Do expand_dims when tensor doesn't have the dim for samples\n seed_inputs = (tf.expand_dims(X, axis=0) if len(X.shape) < len(input_tensor.shape) else X\n for X, input_tensor in zip(seed_inputs, self.model.inputs))\n seed_inputs = list(seed_inputs)\n if len(seed_inputs) != len(self.model.inputs):\n raise ValueError(\n (\"The lengths of seed_inputs and model's inputs don't match.\"\n \" seed_inputs: {}, model's inputs: {}\").format(len(seed_inputs),\n len(self.model.inputs)))\n return seed_inputs\n\n def _get_input_modifiers(self, input_modifier):\n input_modifiers = self._get_dict(input_modifier, keys=self.model.input_names)\n if len(input_modifiers) != len(self.model.inputs):\n raise ValueError('The model has {} inputs, but you passed {} as input_modifiers. '\n 'When the model has multiple inputs, '\n 'you must pass a dictionary as input_modifiers.'.format(\n len(self.model.inputs), input_modifier))\n return input_modifiers\n\n def _get_regularizers(self, regularizer):\n regularizers = listify(regularizer)\n return regularizers\n\n def _get_dict(self, values, keys):\n if isinstance(values, dict):\n _values = defaultdict(list, values)\n for key in keys:\n _values[key] = listify(_values[key])\n else:\n _values = defaultdict(list)\n values = listify(values)\n for k in keys:\n _values[k] = values\n return _values\n\n def _apply_clip(self, seed_inputs, input_ranges):\n input_ranges = [(input_tensor.dtype.min if low is None else low,\n input_tensor.dtype.max if high is None else high)\n for (low, high), input_tensor in zip(input_ranges, self.model.inputs)]\n clipped_values = (np.array(K.clip(X, low, high))\n for X, (low, high) in zip(seed_inputs, input_ranges))\n clipped_values = [\n X.astype(np.int) if isinstance(t, int) else X.astype(np.float)\n for X, (t, _) in zip(clipped_values, input_ranges)\n ]\n return clipped_values\n",
"id": "6979697",
"language": "Python",
"matching_score": 3.1958727836608887,
"max_stars_count": 1,
"path": "tf_keras_vis/activation_maximization/__init__.py"
},
{
"content": "from abc import ABC, abstractmethod\n\nimport numpy as np\nimport tensorflow as tf\nfrom scipy.ndimage import rotate\n\n\nclass InputModifier(ABC):\n \"\"\"Abstract class for defining an input modifier.\n \"\"\"\n @abstractmethod\n def __call__(self, seed_input):\n \"\"\"Implement modification to the input before processing gradient descent.\n\n # Arguments:\n seed_input: A tf.Tensor.\n # Returns:\n The modified `seed_input`.\n \"\"\"\n raise NotImplementedError()\n\n\nclass Jitter(InputModifier):\n def __init__(self, jitter=8):\n \"\"\"Implements an input modifier that introduces random jitter.\n Jitter has been shown to produce crisper activation maximization images.\n\n # Arguments:\n jitter: Integer. The amount of jitter to apply.\n \"\"\"\n self.jitter = int(jitter)\n\n def __call__(self, seed_input):\n ndim = len(seed_input.shape)\n seed_input = tf.roll(seed_input,\n shift=tuple(np.random.randint(-self.jitter, self.jitter, ndim - 2)),\n axis=tuple(range(ndim)[1:-1]))\n return seed_input\n\n\nclass Rotate(InputModifier):\n def __init__(self, degree=3.):\n \"\"\"Implements an input modifier that introduces random rotation.\n Rotate has been shown to produce crisper activation maximization images.\n\n # Arguments:\n degree: Integer or float. The amount of rotation to apply.\n \"\"\"\n self.rg = float(degree)\n\n def __call__(self, seed_input):\n if tf.is_tensor(seed_input):\n seed_input = seed_input.numpy()\n if seed_input.dtype == np.float16:\n seed_input = seed_input.astype(np.float32)\n seed_input = rotate(seed_input,\n np.random.uniform(-self.rg, self.rg),\n axes=tuple(range(len(seed_input.shape))[1:-1]),\n reshape=False,\n mode='nearest',\n order=1,\n prefilter=True)\n return tf.constant(seed_input)\n",
"id": "12205423",
"language": "Python",
"matching_score": 1.043087124824524,
"max_stars_count": 1,
"path": "tf_keras_vis/utils/input_modifiers.py"
},
{
"content": "from contextlib import contextmanager\n\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.keras import backend as K\nfrom tensorflow.keras.layers import (Conv2D, Dense, GlobalAveragePooling2D,\n Input)\nfrom tensorflow.keras.models import Model\n\nfrom tf_keras_vis.activation_maximization.callbacks import Callback\nfrom tf_keras_vis.utils.input_modifiers import InputModifier\nfrom tf_keras_vis.utils.regularizers import Regularizer\nfrom tf_keras_vis.utils.scores import Score\n\n\ndef mock_dense_model():\n inputs = Input((3, ), name='input-1')\n x = Dense(5, activation='relu', name='dense-1')(inputs)\n x = Dense(2, activation='softmax', name='dense-2')(x)\n return Model(inputs=inputs, outputs=x)\n\n\ndef mock_conv_model_with_sigmoid_output():\n inputs = Input((8, 8, 3), name='input-1')\n x = Conv2D(6, 3, activation='relu', name='conv-1')(inputs)\n x = GlobalAveragePooling2D()(x)\n x = Dense(1, activation='sigmoid', name='dense-1')(x)\n return Model(inputs=inputs, outputs=x)\n\n\ndef mock_conv_model():\n inputs = Input((8, 8, 3), name='input-1')\n x = Conv2D(6, 3, activation='relu', name='conv-1')(inputs)\n x = GlobalAveragePooling2D()(x)\n x = Dense(2, activation='softmax', name='dense-1')(x)\n return Model(inputs=inputs, outputs=x)\n\n\ndef mock_multiple_inputs_model():\n input_1 = Input((8, 8, 3), name='input-1')\n input_2 = Input((10, 10, 3), name='input-2')\n x1 = Conv2D(6, 3, padding='same', activation='relu', name='conv-1')(input_1)\n x2 = Conv2D(6, 3, activation='relu', name='conv-2')(input_2)\n x = K.concatenate([x1, x2], axis=-1)\n x = GlobalAveragePooling2D()(x)\n x = Dense(2, activation='softmax', name='dense-1')(x)\n return Model(inputs=[input_1, input_2], outputs=x)\n\n\ndef mock_multiple_outputs_model():\n inputs = Input((8, 8, 3), name='input-1')\n x = Conv2D(6, 3, activation='relu', name='conv-1')(inputs)\n x = GlobalAveragePooling2D()(x)\n x1 = Dense(2, activation='softmax', name='dense-1')(x)\n x2 = Dense(1, name='dense-2')(x)\n return Model(inputs=inputs, outputs=[x1, x2])\n\n\ndef mock_multiple_io_model():\n input_1 = Input((8, 8, 3), name='input-1')\n input_2 = Input((10, 10, 3), name='input-2')\n x1 = Conv2D(6, 3, padding='same', activation='relu', name='conv-1')(input_1)\n x2 = Conv2D(6, 3, activation='relu', name='conv-2')(input_2)\n x = K.concatenate([x1, x2], axis=-1)\n x = GlobalAveragePooling2D()(x)\n x1 = Dense(2, activation='softmax', name='dense-1')(x)\n x2 = Dense(1, name='dense-2')(x)\n return Model(inputs=[input_1, input_2], outputs=[x1, x2])\n\n\ndef mock_conv_model_with_flot32_output():\n inputs = Input((8, 8, 3), name='input-1')\n x = Conv2D(6, 3, activation='relu', name='conv-1')(inputs)\n x = GlobalAveragePooling2D()(x)\n x = Dense(2, dtype=tf.float32, activation='softmax', name='dense-1')(x)\n return Model(inputs=inputs, outputs=x)\n\n\ndef dummy_sample(shape, dtype=np.float32):\n length = np.prod(shape)\n values = np.array(list(range(length)))\n values = np.reshape(values, shape)\n values = values.astype(dtype)\n return values\n\n\n@contextmanager\ndef does_not_raise():\n yield\n\n\nclass MockCallback(Callback):\n def __init__(self):\n self.on_begin_was_called = False\n self.on_call_was_called = False\n self.on_end_was_called = False\n\n def on_begin(self):\n self.on_begin_was_called = True\n\n def __call__(self, i, values, grads, losses, model_outpus, **kwargs):\n self.on_call_was_called = True\n\n def on_end(self):\n self.on_end_was_called = True\n\n\nclass MockInputModifier(InputModifier):\n def __init__(self):\n self.seed_input = None\n\n def __call__(self, seed_input):\n self.seed_input = seed_input\n return seed_input\n\n\nclass MockScore(Score):\n def __init__(self, name='noname'):\n self.name = name\n self.output = None\n\n def __call__(self, output):\n self.output = output\n return output\n\n\nclass MockTupleOfScore(Score):\n def __init__(self, name='noname'):\n self.name = name\n self.output = None\n\n def __call__(self, output):\n self.output = output\n return tuple(o for o in output)\n\n\nclass MockListOfScore(Score):\n def __init__(self, name='noname'):\n self.name = name\n self.output = None\n\n def __call__(self, output):\n self.output = output\n return list(o for o in output)\n\n\nclass MockRegularizer(Regularizer):\n def __init__(self, name='noname'):\n self.name = name\n self.inputs = None\n\n def __call__(self, inputs):\n self.inputs = inputs\n return inputs\n\n\nclass MockGradientModifier():\n def __init__(self):\n self.gradients = None\n\n def __call__(self, gradients):\n self.gradients = gradients\n return gradients\n",
"id": "2750561",
"language": "Python",
"matching_score": 3.6895275115966797,
"max_stars_count": 1,
"path": "tf_keras_vis/utils/test.py"
},
{
"content": "import pytest\n\nfrom tf_keras_vis.utils.test import (mock_conv_model,\n mock_conv_model_with_sigmoid_output,\n mock_dense_model,\n mock_multiple_inputs_model,\n mock_multiple_io_model,\n mock_multiple_outputs_model)\n\n\n@pytest.fixture\ndef dense_model():\n return mock_dense_model()\n\n\n@pytest.fixture\ndef conv_model():\n return mock_conv_model()\n\n\n@pytest.fixture\ndef conv_sigmoid_model():\n return mock_conv_model_with_sigmoid_output()\n\n\n@pytest.fixture\ndef multiple_inputs_model():\n return mock_multiple_inputs_model()\n\n\n@pytest.fixture\ndef multiple_outputs_model():\n return mock_multiple_outputs_model()\n\n\n@pytest.fixture\ndef multiple_io_model():\n return mock_multiple_io_model()\n",
"id": "2466013",
"language": "Python",
"matching_score": 0.40894120931625366,
"max_stars_count": 1,
"path": "tests/tf-keras-vis/conftest.py"
},
{
"content": "from setuptools import find_packages, setup\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetup(\n name=\"tf-keras-vis\",\n version=\"0.6.1\",\n author=\"keisen\",\n author_email=\"<EMAIL>\",\n description=\"Neural network visualization toolkit for tf.keras\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/keisen/tf-keras-vis\",\n packages=find_packages(),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n python_requires='>=3.6, <3.10',\n install_requires=['scipy', 'pillow', 'deprecated', 'imageio', 'packaging'],\n extras_require={\n 'develop': ['flake8', 'isort', 'yapf', 'pytest', 'pytest-pycodestyle', 'pytest-cov'],\n 'examples': ['jupyterlab==2.*', 'jedi==0.17.*', 'matplotlib'],\n },\n include_package_data=True,\n)\n",
"id": "8779725",
"language": "Python",
"matching_score": 0.8880765438079834,
"max_stars_count": 1,
"path": "setup.py"
},
{
"content": "from abc import ABC\n\nimport imageio\nimport numpy as np\nimport tensorflow as tf\nfrom PIL import Image, ImageDraw, ImageFont\n\n\nclass Callback(ABC):\n \"\"\"Abstract class for defining callbacks.\n \"\"\"\n def on_begin(self):\n \"\"\"Called at the begin of optimization process.\n \"\"\"\n pass\n\n def __call__(self, i, values, grads, scores, model_outputs, **kwargs):\n \"\"\"This function will be called within\n `tf_keras_vis.activation_maximization.ActivationMaximization` instance.\n\n # Arguments:\n i: The optimizer iteration.\n values: The current `values`.\n grads: The gradient of input images with respect to `values`.\n scores: A list of score values with respect to each the model outputs.\n model_outputs: A list of the model outputs.\n kwargs: Optional named arguments that will be used different ways by each\n `tf_keras_vis.activation_maximization.ActivationMaximization`.\n \"\"\"\n pass\n\n def on_end(self):\n \"\"\"Called at the end of optimization process.\n \"\"\"\n pass\n\n\nclass PrintLogger(Callback):\n \"\"\"Callback to print values during optimization.\n \"\"\"\n def __init__(self, interval=10):\n \"\"\"\n # Arguments:\n interval: An integer that appears the interval of printing.\n \"\"\"\n self.interval = interval\n\n def __call__(self, i, values, grads, scores, model_outputs, **kwargs):\n i += 1\n if (i % self.interval == 0):\n tf.print('Steps: {:03d}\\tScores: {},\\tRegularizations: {}'.format(\n i, self._tolist(scores), self._tolist(kwargs['regularizations'])))\n\n def _tolist(self, ary):\n if isinstance(ary, list) or isinstance(ary, (np.ndarray, np.generic)):\n return [self._tolist(e) for e in ary]\n elif isinstance(ary, tuple):\n return tuple(self._tolist(e) for e in ary)\n elif tf.is_tensor(ary):\n return ary.numpy().tolist()\n else:\n return ary\n\n\nclass GifGenerator2D(Callback):\n \"\"\"Callback to construct gif of optimized image.\n \"\"\"\n def __init__(self, path):\n \"\"\"\n # Arguments:\n path: The file path to save gif.\n \"\"\"\n self.path = path\n\n def on_begin(self):\n self.data = None\n\n def __call__(self, i, values, grads, scores, model_outputs, **kwargs):\n if self.data is None:\n self.data = [[] for i in range(len(values))]\n for n, value in enumerate(values):\n img = Image.fromarray(value[0].astype(np.uint8)) # 1st image in a batch\n ImageDraw.Draw(img).text((10, 10),\n \"Step {}\".format(i + 1),\n font=ImageFont.load_default())\n self.data[n].append(np.asarray(img))\n\n def on_end(self):\n path = self.path if self.path.endswith('.gif') else '{}.gif'.format(self.path)\n for i in range(len(self.data)):\n writer = None\n try:\n writer = imageio.get_writer(path, mode='I', loop=0)\n for data in self.data[i]:\n writer.append_data(data)\n finally:\n if writer is not None:\n writer.close()\n",
"id": "7830574",
"language": "Python",
"matching_score": 3.5795235633850098,
"max_stars_count": 1,
"path": "tf_keras_vis/activation_maximization/callbacks.py"
},
{
"content": "import os\n\nfrom tf_keras_vis.activation_maximization import ActivationMaximization\nfrom tf_keras_vis.activation_maximization.callbacks import (GifGenerator2D,\n PrintLogger)\nfrom tf_keras_vis.utils.test import MockScore\n\n\nclass TestPrintLogger():\n def test__init__(self):\n interval = 999\n logger = PrintLogger(interval)\n assert logger.interval == interval\n\n def test__call__(self, conv_model):\n activation_maximization = ActivationMaximization(conv_model)\n result = activation_maximization(MockScore(), steps=1, callbacks=PrintLogger(1))\n assert result.shape == (1, 8, 8, 3)\n\n def test__call__without_regularizations(self, conv_model):\n activation_maximization = ActivationMaximization(conv_model)\n result = activation_maximization(MockScore(),\n steps=1,\n regularizers=None,\n callbacks=PrintLogger(1))\n assert result.shape == (1, 8, 8, 3)\n\n\nclass TestGifGenerator2D():\n def test__init__(self, tmpdir):\n path = tmpdir.mkdir(\"tf-keras-vis\").join(\"test.gif\")\n generator = GifGenerator2D(path)\n assert generator.path == path\n\n def test__call__(self, tmpdir, conv_model):\n path = tmpdir.mkdir(\"tf-keras-vis\").join(\"test.gif\")\n activation_maximization = ActivationMaximization(conv_model)\n assert not os.path.isfile(path)\n result = activation_maximization(MockScore(), steps=1, callbacks=GifGenerator2D(str(path)))\n assert os.path.isfile(path)\n assert result.shape == (1, 8, 8, 3)\n",
"id": "7698091",
"language": "Python",
"matching_score": 2.4354171752929688,
"max_stars_count": 1,
"path": "tests/tf-keras-vis/activation_maximization/callbacks_test.py"
},
{
"content": "import warnings\n\nwarnings.warn(('`tf_keras_vis.utils.callbacks` module is deprecated. '\n 'Please use `tf_keras_vis.activation_maximization.callbacks` instead.'),\n DeprecationWarning)\n\nfrom tf_keras_vis.activation_maximization.callbacks import \\\n Callback as OptimizerCallback # noqa: F401 E402 E501\nfrom tf_keras_vis.activation_maximization.callbacks import \\\n GifGenerator2D # noqa: F401 E402\nfrom tf_keras_vis.activation_maximization.callbacks import \\\n GifGenerator2D as GifGenerator # noqa: F401 E402\nfrom tf_keras_vis.activation_maximization.callbacks import \\\n PrintLogger as Print # noqa: F401 E402\n",
"id": "11003250",
"language": "Python",
"matching_score": 1.8511691093444824,
"max_stars_count": 1,
"path": "tf_keras_vis/utils/callbacks.py"
},
{
"content": "import warnings\n\nwarnings.warn(('`tf_keras_vis.utils.losses` module is deprecated. '\n 'Please use `tf_keras_vis.utils.scores` instead.'), DeprecationWarning)\n\nfrom tf_keras_vis.utils.scores import \\\n BinaryScore as BinaryLoss # noqa: F401 E402\nfrom tf_keras_vis.utils.scores import \\\n CategoricalScore as CategoricalLoss # noqa: F401 E402\nfrom tf_keras_vis.utils.scores import \\\n InactiveScore as InactiveLoss # noqa: F401 E402\nfrom tf_keras_vis.utils.scores import Score as Loss # noqa: F401 E402\n",
"id": "2210503",
"language": "Python",
"matching_score": 0.6068689227104187,
"max_stars_count": 1,
"path": "tf_keras_vis/utils/losses.py"
},
{
"content": "from abc import ABC, abstractmethod\n\nimport numpy as np\nimport tensorflow as tf\nfrom deprecated import deprecated\n\n\nclass Regularizer(ABC):\n def __init__(self, name):\n self.name = name\n\n @abstractmethod\n def __call__(self, inputs):\n raise NotImplementedError()\n\n\nclass TotalVariation2D(Regularizer):\n def __init__(self, weight=10., name='TotalVariation2D'):\n super().__init__(name)\n self.weight = weight\n\n def __call__(self, overall_inputs):\n tv = 0.\n for X in overall_inputs:\n tv += tf.image.total_variation(X) / np.prod(X.shape)\n return self.weight * tv\n\n\n@deprecated(version='0.6.0',\n reason=\"Please use TotalVariation2D class instead of TotalVariation class.\")\nclass TotalVariation(TotalVariation2D):\n def __init__(self, weight=10.):\n super().__init__(weight=weight, name='TotalVariation') # pragma: no cover\n\n\nclass Norm(Regularizer):\n def __init__(self, weight=10., p=2, name='Norm'):\n super().__init__(name)\n self.weight = weight\n self.p = p\n\n def __call__(self, overall_inputs):\n norm = 0.\n for X in overall_inputs:\n X = tf.reshape(X, (X.shape[0], -1))\n norm += tf.norm(X, ord=self.p, axis=-1) / X.shape[-1]\n return self.weight * norm\n\n\n@deprecated(version='0.6.0', reason=\"Please use Norm class instead of L2Norm class.\")\nclass L2Norm(Norm):\n def __init__(self, weight=10.):\n super().__init__(weight=weight, p=2, name='L2Norm') # pragma: no cover\n",
"id": "6397636",
"language": "Python",
"matching_score": 0.11767181754112244,
"max_stars_count": 1,
"path": "tf_keras_vis/utils/regularizers.py"
}
] | 2.248513 |
AutomoxCommunity | [
{
"content": "# coding: utf-8\n\n\"\"\"\n Automox Console API\n\n API for use with the Automox Console # noqa: E501\n\n OpenAPI spec version: 2021-11-16\n Contact: <EMAIL>\n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\nclass PrePatchPrepatch(object):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n \"\"\"\n Attributes:\n swagger_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n swagger_types = {\n 'total': 'int',\n 'needs_attention': 'int',\n '_none': 'int',\n 'low': 'int',\n 'medium': 'int',\n 'high': 'int',\n 'critical': 'int',\n 'other': 'int',\n 'devices': 'list[PrePatchPrepatchDevices]'\n }\n\n attribute_map = {\n 'total': 'total',\n 'needs_attention': 'needsAttention',\n '_none': 'none',\n 'low': 'low',\n 'medium': 'medium',\n 'high': 'high',\n 'critical': 'critical',\n 'other': 'other',\n 'devices': 'devices'\n }\n\n def __init__(self, total=None, needs_attention=None, _none=None, low=None, medium=None, high=None, critical=None, other=None, devices=None): # noqa: E501\n \"\"\"PrePatchPrepatch - a model defined in Swagger\"\"\" # noqa: E501\n self._total = None\n self._needs_attention = None\n self.__none = None\n self._low = None\n self._medium = None\n self._high = None\n self._critical = None\n self._other = None\n self._devices = None\n self.discriminator = None\n if total is not None:\n self.total = total\n if needs_attention is not None:\n self.needs_attention = needs_attention\n if _none is not None:\n self._none = _none\n if low is not None:\n self.low = low\n if medium is not None:\n self.medium = medium\n if high is not None:\n self.high = high\n if critical is not None:\n self.critical = critical\n if other is not None:\n self.other = other\n if devices is not None:\n self.devices = devices\n\n @property\n def total(self):\n \"\"\"Gets the total of this PrePatchPrepatch. # noqa: E501\n\n\n :return: The total of this PrePatchPrepatch. # noqa: E501\n :rtype: int\n \"\"\"\n return self._total\n\n @total.setter\n def total(self, total):\n \"\"\"Sets the total of this PrePatchPrepatch.\n\n\n :param total: The total of this PrePatchPrepatch. # noqa: E501\n :type: int\n \"\"\"\n\n self._total = total\n\n @property\n def needs_attention(self):\n \"\"\"Gets the needs_attention of this PrePatchPrepatch. # noqa: E501\n\n\n :return: The needs_attention of this PrePatchPrepatch. # noqa: E501\n :rtype: int\n \"\"\"\n return self._needs_attention\n\n @needs_attention.setter\n def needs_attention(self, needs_attention):\n \"\"\"Sets the needs_attention of this PrePatchPrepatch.\n\n\n :param needs_attention: The needs_attention of this PrePatchPrepatch. # noqa: E501\n :type: int\n \"\"\"\n\n self._needs_attention = needs_attention\n\n @property\n def _none(self):\n \"\"\"Gets the _none of this PrePatchPrepatch. # noqa: E501\n\n\n :return: The _none of this PrePatchPrepatch. # noqa: E501\n :rtype: int\n \"\"\"\n return self.__none\n\n @_none.setter\n def _none(self, _none):\n \"\"\"Sets the _none of this PrePatchPrepatch.\n\n\n :param _none: The _none of this PrePatchPrepatch. # noqa: E501\n :type: int\n \"\"\"\n\n self.__none = _none\n\n @property\n def low(self):\n \"\"\"Gets the low of this PrePatchPrepatch. # noqa: E501\n\n\n :return: The low of this PrePatchPrepatch. # noqa: E501\n :rtype: int\n \"\"\"\n return self._low\n\n @low.setter\n def low(self, low):\n \"\"\"Sets the low of this PrePatchPrepatch.\n\n\n :param low: The low of this PrePatchPrepatch. # noqa: E501\n :type: int\n \"\"\"\n\n self._low = low\n\n @property\n def medium(self):\n \"\"\"Gets the medium of this PrePatchPrepatch. # noqa: E501\n\n\n :return: The medium of this PrePatchPrepatch. # noqa: E501\n :rtype: int\n \"\"\"\n return self._medium\n\n @medium.setter\n def medium(self, medium):\n \"\"\"Sets the medium of this PrePatchPrepatch.\n\n\n :param medium: The medium of this PrePatchPrepatch. # noqa: E501\n :type: int\n \"\"\"\n\n self._medium = medium\n\n @property\n def high(self):\n \"\"\"Gets the high of this PrePatchPrepatch. # noqa: E501\n\n\n :return: The high of this PrePatchPrepatch. # noqa: E501\n :rtype: int\n \"\"\"\n return self._high\n\n @high.setter\n def high(self, high):\n \"\"\"Sets the high of this PrePatchPrepatch.\n\n\n :param high: The high of this PrePatchPrepatch. # noqa: E501\n :type: int\n \"\"\"\n\n self._high = high\n\n @property\n def critical(self):\n \"\"\"Gets the critical of this PrePatchPrepatch. # noqa: E501\n\n\n :return: The critical of this PrePatchPrepatch. # noqa: E501\n :rtype: int\n \"\"\"\n return self._critical\n\n @critical.setter\n def critical(self, critical):\n \"\"\"Sets the critical of this PrePatchPrepatch.\n\n\n :param critical: The critical of this PrePatchPrepatch. # noqa: E501\n :type: int\n \"\"\"\n\n self._critical = critical\n\n @property\n def other(self):\n \"\"\"Gets the other of this PrePatchPrepatch. # noqa: E501\n\n\n :return: The other of this PrePatchPrepatch. # noqa: E501\n :rtype: int\n \"\"\"\n return self._other\n\n @other.setter\n def other(self, other):\n \"\"\"Sets the other of this PrePatchPrepatch.\n\n\n :param other: The other of this PrePatchPrepatch. # noqa: E501\n :type: int\n \"\"\"\n\n self._other = other\n\n @property\n def devices(self):\n \"\"\"Gets the devices of this PrePatchPrepatch. # noqa: E501\n\n\n :return: The devices of this PrePatchPrepatch. # noqa: E501\n :rtype: list[PrePatchPrepatchDevices]\n \"\"\"\n return self._devices\n\n @devices.setter\n def devices(self, devices):\n \"\"\"Sets the devices of this PrePatchPrepatch.\n\n\n :param devices: The devices of this PrePatchPrepatch. # noqa: E501\n :type: list[PrePatchPrepatchDevices]\n \"\"\"\n\n self._devices = devices\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n if issubclass(PrePatchPrepatch, dict):\n for key, value in self.items():\n result[key] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, PrePatchPrepatch):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n",
"id": "10242525",
"language": "Python",
"matching_score": 2.4214653968811035,
"max_stars_count": 1,
"path": "automox_console_sdk/models/pre_patch_prepatch.py"
},
{
"content": "# coding: utf-8\n\n\"\"\"\n Automox Console API\n\n API for use with the Automox Console # noqa: E501\n\n OpenAPI spec version: 2021-11-16\n Contact: <EMAIL>\n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\nclass PrePatchPrepatchDevices(object):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n \"\"\"\n Attributes:\n swagger_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n swagger_types = {\n 'id': 'int',\n 'name': 'str',\n 'create_time': 'datetime',\n 'group': 'str',\n 'connected': 'bool',\n 'needs_reboot': 'bool',\n 'os_family': 'str',\n 'compliant': 'bool',\n 'patches': 'Patches'\n }\n\n attribute_map = {\n 'id': 'id',\n 'name': 'name',\n 'create_time': 'createTime',\n 'group': 'group',\n 'connected': 'connected',\n 'needs_reboot': 'needsReboot',\n 'os_family': 'os_family',\n 'compliant': 'compliant',\n 'patches': 'patches'\n }\n\n def __init__(self, id=None, name=None, create_time=None, group=None, connected=None, needs_reboot=None, os_family=None, compliant=None, patches=None): # noqa: E501\n \"\"\"PrePatchPrepatchDevices - a model defined in Swagger\"\"\" # noqa: E501\n self._id = None\n self._name = None\n self._create_time = None\n self._group = None\n self._connected = None\n self._needs_reboot = None\n self._os_family = None\n self._compliant = None\n self._patches = None\n self.discriminator = None\n if id is not None:\n self.id = id\n if name is not None:\n self.name = name\n if create_time is not None:\n self.create_time = create_time\n if group is not None:\n self.group = group\n if connected is not None:\n self.connected = connected\n if needs_reboot is not None:\n self.needs_reboot = needs_reboot\n if os_family is not None:\n self.os_family = os_family\n if compliant is not None:\n self.compliant = compliant\n if patches is not None:\n self.patches = patches\n\n @property\n def id(self):\n \"\"\"Gets the id of this PrePatchPrepatchDevices. # noqa: E501\n\n\n :return: The id of this PrePatchPrepatchDevices. # noqa: E501\n :rtype: int\n \"\"\"\n return self._id\n\n @id.setter\n def id(self, id):\n \"\"\"Sets the id of this PrePatchPrepatchDevices.\n\n\n :param id: The id of this PrePatchPrepatchDevices. # noqa: E501\n :type: int\n \"\"\"\n\n self._id = id\n\n @property\n def name(self):\n \"\"\"Gets the name of this PrePatchPrepatchDevices. # noqa: E501\n\n\n :return: The name of this PrePatchPrepatchDevices. # noqa: E501\n :rtype: str\n \"\"\"\n return self._name\n\n @name.setter\n def name(self, name):\n \"\"\"Sets the name of this PrePatchPrepatchDevices.\n\n\n :param name: The name of this PrePatchPrepatchDevices. # noqa: E501\n :type: str\n \"\"\"\n\n self._name = name\n\n @property\n def create_time(self):\n \"\"\"Gets the create_time of this PrePatchPrepatchDevices. # noqa: E501\n\n\n :return: The create_time of this PrePatchPrepatchDevices. # noqa: E501\n :rtype: datetime\n \"\"\"\n return self._create_time\n\n @create_time.setter\n def create_time(self, create_time):\n \"\"\"Sets the create_time of this PrePatchPrepatchDevices.\n\n\n :param create_time: The create_time of this PrePatchPrepatchDevices. # noqa: E501\n :type: datetime\n \"\"\"\n\n self._create_time = create_time\n\n @property\n def group(self):\n \"\"\"Gets the group of this PrePatchPrepatchDevices. # noqa: E501\n\n\n :return: The group of this PrePatchPrepatchDevices. # noqa: E501\n :rtype: str\n \"\"\"\n return self._group\n\n @group.setter\n def group(self, group):\n \"\"\"Sets the group of this PrePatchPrepatchDevices.\n\n\n :param group: The group of this PrePatchPrepatchDevices. # noqa: E501\n :type: str\n \"\"\"\n\n self._group = group\n\n @property\n def connected(self):\n \"\"\"Gets the connected of this PrePatchPrepatchDevices. # noqa: E501\n\n\n :return: The connected of this PrePatchPrepatchDevices. # noqa: E501\n :rtype: bool\n \"\"\"\n return self._connected\n\n @connected.setter\n def connected(self, connected):\n \"\"\"Sets the connected of this PrePatchPrepatchDevices.\n\n\n :param connected: The connected of this PrePatchPrepatchDevices. # noqa: E501\n :type: bool\n \"\"\"\n\n self._connected = connected\n\n @property\n def needs_reboot(self):\n \"\"\"Gets the needs_reboot of this PrePatchPrepatchDevices. # noqa: E501\n\n\n :return: The needs_reboot of this PrePatchPrepatchDevices. # noqa: E501\n :rtype: bool\n \"\"\"\n return self._needs_reboot\n\n @needs_reboot.setter\n def needs_reboot(self, needs_reboot):\n \"\"\"Sets the needs_reboot of this PrePatchPrepatchDevices.\n\n\n :param needs_reboot: The needs_reboot of this PrePatchPrepatchDevices. # noqa: E501\n :type: bool\n \"\"\"\n\n self._needs_reboot = needs_reboot\n\n @property\n def os_family(self):\n \"\"\"Gets the os_family of this PrePatchPrepatchDevices. # noqa: E501\n\n\n :return: The os_family of this PrePatchPrepatchDevices. # noqa: E501\n :rtype: str\n \"\"\"\n return self._os_family\n\n @os_family.setter\n def os_family(self, os_family):\n \"\"\"Sets the os_family of this PrePatchPrepatchDevices.\n\n\n :param os_family: The os_family of this PrePatchPrepatchDevices. # noqa: E501\n :type: str\n \"\"\"\n\n self._os_family = os_family\n\n @property\n def compliant(self):\n \"\"\"Gets the compliant of this PrePatchPrepatchDevices. # noqa: E501\n\n\n :return: The compliant of this PrePatchPrepatchDevices. # noqa: E501\n :rtype: bool\n \"\"\"\n return self._compliant\n\n @compliant.setter\n def compliant(self, compliant):\n \"\"\"Sets the compliant of this PrePatchPrepatchDevices.\n\n\n :param compliant: The compliant of this PrePatchPrepatchDevices. # noqa: E501\n :type: bool\n \"\"\"\n\n self._compliant = compliant\n\n @property\n def patches(self):\n \"\"\"Gets the patches of this PrePatchPrepatchDevices. # noqa: E501\n\n\n :return: The patches of this PrePatchPrepatchDevices. # noqa: E501\n :rtype: Patches\n \"\"\"\n return self._patches\n\n @patches.setter\n def patches(self, patches):\n \"\"\"Sets the patches of this PrePatchPrepatchDevices.\n\n\n :param patches: The patches of this PrePatchPrepatchDevices. # noqa: E501\n :type: Patches\n \"\"\"\n\n self._patches = patches\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n if issubclass(PrePatchPrepatchDevices, dict):\n for key, value in self.items():\n result[key] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, PrePatchPrepatchDevices):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n",
"id": "1631951",
"language": "Python",
"matching_score": 1.4278115034103394,
"max_stars_count": 1,
"path": "automox_console_sdk/models/pre_patch_prepatch_devices.py"
},
{
"content": "# coding: utf-8\n\n\"\"\"\n Automox Console API\n\n API for use with the Automox Console # noqa: E501\n\n OpenAPI spec version: 2021-09-20\n Contact: <EMAIL>\n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\nclass PoliciesBody(object):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n \"\"\"\n Attributes:\n swagger_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n swagger_types = {\n 'name': 'str',\n 'policy_type_name': 'str',\n 'organization_id': 'int',\n 'schedule_days': 'int',\n 'schedule_weeks_of_month': 'int',\n 'schedule_months': 'int',\n 'schedule_time': 'str',\n 'configuration': 'PolicyConfiguration',\n 'notify_user': 'bool',\n 'notes': 'str',\n 'server_groups': 'list[int]',\n 'auto_patch': 'bool',\n 'auto_reboot': 'bool',\n 'notify_reboot_user': 'bool'\n }\n\n attribute_map = {\n 'name': 'name',\n 'policy_type_name': 'policy_type_name',\n 'organization_id': 'organization_id',\n 'schedule_days': 'schedule_days',\n 'schedule_weeks_of_month': 'schedule_weeks_of_month',\n 'schedule_months': 'schedule_months',\n 'schedule_time': 'schedule_time',\n 'configuration': 'configuration',\n 'notify_user': 'notify_user',\n 'notes': 'notes',\n 'server_groups': 'server_groups',\n 'auto_patch': 'auto_patch',\n 'auto_reboot': 'auto_reboot',\n 'notify_reboot_user': 'notify_reboot_user'\n }\n\n def __init__(self, name=None, policy_type_name=None, organization_id=None, schedule_days=None, schedule_weeks_of_month=None, schedule_months=None, schedule_time=None, configuration=None, notify_user=None, notes=None, server_groups=None, auto_patch=None, auto_reboot=None, notify_reboot_user=None): # noqa: E501\n \"\"\"PoliciesBody - a model defined in Swagger\"\"\" # noqa: E501\n self._name = None\n self._policy_type_name = None\n self._organization_id = None\n self._schedule_days = None\n self._schedule_weeks_of_month = None\n self._schedule_months = None\n self._schedule_time = None\n self._configuration = None\n self._notify_user = None\n self._notes = None\n self._server_groups = None\n self._auto_patch = None\n self._auto_reboot = None\n self._notify_reboot_user = None\n self.discriminator = None\n self.name = name\n self.policy_type_name = policy_type_name\n self.organization_id = organization_id\n self.schedule_days = schedule_days\n self.schedule_weeks_of_month = schedule_weeks_of_month\n self.schedule_months = schedule_months\n self.schedule_time = schedule_time\n self.configuration = configuration\n self.notify_user = notify_user\n if notes is not None:\n self.notes = notes\n if server_groups is not None:\n self.server_groups = server_groups\n if auto_patch is not None:\n self.auto_patch = auto_patch\n if auto_reboot is not None:\n self.auto_reboot = auto_reboot\n if notify_reboot_user is not None:\n self.notify_reboot_user = notify_reboot_user\n\n @property\n def name(self):\n \"\"\"Gets the name of this PoliciesBody. # noqa: E501\n\n The name of the policy. # noqa: E501\n\n :return: The name of this PoliciesBody. # noqa: E501\n :rtype: str\n \"\"\"\n return self._name\n\n @name.setter\n def name(self, name):\n \"\"\"Sets the name of this PoliciesBody.\n\n The name of the policy. # noqa: E501\n\n :param name: The name of this PoliciesBody. # noqa: E501\n :type: str\n \"\"\"\n if name is None:\n raise ValueError(\"Invalid value for `name`, must not be `None`\") # noqa: E501\n\n self._name = name\n\n @property\n def policy_type_name(self):\n \"\"\"Gets the policy_type_name of this PoliciesBody. # noqa: E501\n\n\n :return: The policy_type_name of this PoliciesBody. # noqa: E501\n :rtype: str\n \"\"\"\n return self._policy_type_name\n\n @policy_type_name.setter\n def policy_type_name(self, policy_type_name):\n \"\"\"Sets the policy_type_name of this PoliciesBody.\n\n\n :param policy_type_name: The policy_type_name of this PoliciesBody. # noqa: E501\n :type: str\n \"\"\"\n if policy_type_name is None:\n raise ValueError(\"Invalid value for `policy_type_name`, must not be `None`\") # noqa: E501\n allowed_values = [\"patch\", \"custom\", \"required_software\"] # noqa: E501\n if policy_type_name not in allowed_values:\n raise ValueError(\n \"Invalid value for `policy_type_name` ({0}), must be one of {1}\" # noqa: E501\n .format(policy_type_name, allowed_values)\n )\n\n self._policy_type_name = policy_type_name\n\n @property\n def organization_id(self):\n \"\"\"Gets the organization_id of this PoliciesBody. # noqa: E501\n\n Organization ID for the specified policy # noqa: E501\n\n :return: The organization_id of this PoliciesBody. # noqa: E501\n :rtype: int\n \"\"\"\n return self._organization_id\n\n @organization_id.setter\n def organization_id(self, organization_id):\n \"\"\"Sets the organization_id of this PoliciesBody.\n\n Organization ID for the specified policy # noqa: E501\n\n :param organization_id: The organization_id of this PoliciesBody. # noqa: E501\n :type: int\n \"\"\"\n if organization_id is None:\n raise ValueError(\"Invalid value for `organization_id`, must not be `None`\") # noqa: E501\n\n self._organization_id = organization_id\n\n @property\n def schedule_days(self):\n \"\"\"Gets the schedule_days of this PoliciesBody. # noqa: E501\n\n Decimal value of binary day schedule. See [Policy and Device Filters, and Scheduling - Example Days per Week](/developer-portal/policy_filters_schedule/#example-days-per-week). # noqa: E501\n\n :return: The schedule_days of this PoliciesBody. # noqa: E501\n :rtype: int\n \"\"\"\n return self._schedule_days\n\n @schedule_days.setter\n def schedule_days(self, schedule_days):\n \"\"\"Sets the schedule_days of this PoliciesBody.\n\n Decimal value of binary day schedule. See [Policy and Device Filters, and Scheduling - Example Days per Week](/developer-portal/policy_filters_schedule/#example-days-per-week). # noqa: E501\n\n :param schedule_days: The schedule_days of this PoliciesBody. # noqa: E501\n :type: int\n \"\"\"\n if schedule_days is None:\n raise ValueError(\"Invalid value for `schedule_days`, must not be `None`\") # noqa: E501\n\n self._schedule_days = schedule_days\n\n @property\n def schedule_weeks_of_month(self):\n \"\"\"Gets the schedule_weeks_of_month of this PoliciesBody. # noqa: E501\n\n Decimal value of binary week schedule. See [Policy and Device Filters, and Scheduling - Example Weeks per Month](/developer-portal/policy_filters_schedule/#example-weeks-per-month). # noqa: E501\n\n :return: The schedule_weeks_of_month of this PoliciesBody. # noqa: E501\n :rtype: int\n \"\"\"\n return self._schedule_weeks_of_month\n\n @schedule_weeks_of_month.setter\n def schedule_weeks_of_month(self, schedule_weeks_of_month):\n \"\"\"Sets the schedule_weeks_of_month of this PoliciesBody.\n\n Decimal value of binary week schedule. See [Policy and Device Filters, and Scheduling - Example Weeks per Month](/developer-portal/policy_filters_schedule/#example-weeks-per-month). # noqa: E501\n\n :param schedule_weeks_of_month: The schedule_weeks_of_month of this PoliciesBody. # noqa: E501\n :type: int\n \"\"\"\n if schedule_weeks_of_month is None:\n raise ValueError(\"Invalid value for `schedule_weeks_of_month`, must not be `None`\") # noqa: E501\n\n self._schedule_weeks_of_month = schedule_weeks_of_month\n\n @property\n def schedule_months(self):\n \"\"\"Gets the schedule_months of this PoliciesBody. # noqa: E501\n\n Decimal value of binary month schedule. See [Policy and Device Filters, and Scheduling - Example Months per Year](/developer-portal/policy_filters_schedule/#example-months-per-year). # noqa: E501\n\n :return: The schedule_months of this PoliciesBody. # noqa: E501\n :rtype: int\n \"\"\"\n return self._schedule_months\n\n @schedule_months.setter\n def schedule_months(self, schedule_months):\n \"\"\"Sets the schedule_months of this PoliciesBody.\n\n Decimal value of binary month schedule. See [Policy and Device Filters, and Scheduling - Example Months per Year](/developer-portal/policy_filters_schedule/#example-months-per-year). # noqa: E501\n\n :param schedule_months: The schedule_months of this PoliciesBody. # noqa: E501\n :type: int\n \"\"\"\n if schedule_months is None:\n raise ValueError(\"Invalid value for `schedule_months`, must not be `None`\") # noqa: E501\n\n self._schedule_months = schedule_months\n\n @property\n def schedule_time(self):\n \"\"\"Gets the schedule_time of this PoliciesBody. # noqa: E501\n\n Scheduled time for automatic policy execution. Format: `\\\"hh:mm\\\"` # noqa: E501\n\n :return: The schedule_time of this PoliciesBody. # noqa: E501\n :rtype: str\n \"\"\"\n return self._schedule_time\n\n @schedule_time.setter\n def schedule_time(self, schedule_time):\n \"\"\"Sets the schedule_time of this PoliciesBody.\n\n Scheduled time for automatic policy execution. Format: `\\\"hh:mm\\\"` # noqa: E501\n\n :param schedule_time: The schedule_time of this PoliciesBody. # noqa: E501\n :type: str\n \"\"\"\n if schedule_time is None:\n raise ValueError(\"Invalid value for `schedule_time`, must not be `None`\") # noqa: E501\n\n self._schedule_time = schedule_time\n\n @property\n def configuration(self):\n \"\"\"Gets the configuration of this PoliciesBody. # noqa: E501\n\n\n :return: The configuration of this PoliciesBody. # noqa: E501\n :rtype: PolicyConfiguration\n \"\"\"\n return self._configuration\n\n @configuration.setter\n def configuration(self, configuration):\n \"\"\"Sets the configuration of this PoliciesBody.\n\n\n :param configuration: The configuration of this PoliciesBody. # noqa: E501\n :type: PolicyConfiguration\n \"\"\"\n if configuration is None:\n raise ValueError(\"Invalid value for `configuration`, must not be `None`\") # noqa: E501\n\n self._configuration = configuration\n\n @property\n def notify_user(self):\n \"\"\"Gets the notify_user of this PoliciesBody. # noqa: E501\n\n Display notification 15 minutes before patching. # noqa: E501\n\n :return: The notify_user of this PoliciesBody. # noqa: E501\n :rtype: bool\n \"\"\"\n return self._notify_user\n\n @notify_user.setter\n def notify_user(self, notify_user):\n \"\"\"Sets the notify_user of this PoliciesBody.\n\n Display notification 15 minutes before patching. # noqa: E501\n\n :param notify_user: The notify_user of this PoliciesBody. # noqa: E501\n :type: bool\n \"\"\"\n if notify_user is None:\n raise ValueError(\"Invalid value for `notify_user`, must not be `None`\") # noqa: E501\n\n self._notify_user = notify_user\n\n @property\n def notes(self):\n \"\"\"Gets the notes of this PoliciesBody. # noqa: E501\n\n Policy notes # noqa: E501\n\n :return: The notes of this PoliciesBody. # noqa: E501\n :rtype: str\n \"\"\"\n return self._notes\n\n @notes.setter\n def notes(self, notes):\n \"\"\"Sets the notes of this PoliciesBody.\n\n Policy notes # noqa: E501\n\n :param notes: The notes of this PoliciesBody. # noqa: E501\n :type: str\n \"\"\"\n\n self._notes = notes\n\n @property\n def server_groups(self):\n \"\"\"Gets the server_groups of this PoliciesBody. # noqa: E501\n\n Integer array. Server groups to link with the policy # noqa: E501\n\n :return: The server_groups of this PoliciesBody. # noqa: E501\n :rtype: list[int]\n \"\"\"\n return self._server_groups\n\n @server_groups.setter\n def server_groups(self, server_groups):\n \"\"\"Sets the server_groups of this PoliciesBody.\n\n Integer array. Server groups to link with the policy # noqa: E501\n\n :param server_groups: The server_groups of this PoliciesBody. # noqa: E501\n :type: list[int]\n \"\"\"\n\n self._server_groups = server_groups\n\n @property\n def auto_patch(self):\n \"\"\"Gets the auto_patch of this PoliciesBody. # noqa: E501\n\n Enable or Disable automatic execution of the policy. # noqa: E501\n\n :return: The auto_patch of this PoliciesBody. # noqa: E501\n :rtype: bool\n \"\"\"\n return self._auto_patch\n\n @auto_patch.setter\n def auto_patch(self, auto_patch):\n \"\"\"Sets the auto_patch of this PoliciesBody.\n\n Enable or Disable automatic execution of the policy. # noqa: E501\n\n :param auto_patch: The auto_patch of this PoliciesBody. # noqa: E501\n :type: bool\n \"\"\"\n\n self._auto_patch = auto_patch\n\n @property\n def auto_reboot(self):\n \"\"\"Gets the auto_reboot of this PoliciesBody. # noqa: E501\n\n Enable or Disable automatic reboots following policy execution. # noqa: E501\n\n :return: The auto_reboot of this PoliciesBody. # noqa: E501\n :rtype: bool\n \"\"\"\n return self._auto_reboot\n\n @auto_reboot.setter\n def auto_reboot(self, auto_reboot):\n \"\"\"Sets the auto_reboot of this PoliciesBody.\n\n Enable or Disable automatic reboots following policy execution. # noqa: E501\n\n :param auto_reboot: The auto_reboot of this PoliciesBody. # noqa: E501\n :type: bool\n \"\"\"\n\n self._auto_reboot = auto_reboot\n\n @property\n def notify_reboot_user(self):\n \"\"\"Gets the notify_reboot_user of this PoliciesBody. # noqa: E501\n\n Display modified notification 15 minutes before patching. This message should inform the user that a reboot will follow patching actions. # noqa: E501\n\n :return: The notify_reboot_user of this PoliciesBody. # noqa: E501\n :rtype: bool\n \"\"\"\n return self._notify_reboot_user\n\n @notify_reboot_user.setter\n def notify_reboot_user(self, notify_reboot_user):\n \"\"\"Sets the notify_reboot_user of this PoliciesBody.\n\n Display modified notification 15 minutes before patching. This message should inform the user that a reboot will follow patching actions. # noqa: E501\n\n :param notify_reboot_user: The notify_reboot_user of this PoliciesBody. # noqa: E501\n :type: bool\n \"\"\"\n\n self._notify_reboot_user = notify_reboot_user\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n if issubclass(PoliciesBody, dict):\n for key, value in self.items():\n result[key] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, PoliciesBody):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n",
"id": "7606687",
"language": "Python",
"matching_score": 4.403937816619873,
"max_stars_count": 1,
"path": "automox_console_sdk/models/policies_body.py"
},
{
"content": "# coding: utf-8\n\n\"\"\"\n Automox Console API\n\n API for use with the Automox Console # noqa: E501\n\n OpenAPI spec version: 2021-09-20\n Contact: <EMAIL>\n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\nclass Policy(object):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n \"\"\"\n Attributes:\n swagger_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n swagger_types = {\n 'configuration': 'PolicyConfiguration',\n 'id': 'int',\n 'name': 'str',\n 'notes': 'str',\n 'organization_id': 'int',\n 'policy_type_name': 'str',\n 'schedule_days': 'int',\n 'schedule_weeks_of_month': 'int',\n 'schedule_months': 'int',\n 'schedule_time': 'str',\n 'next_remediation': 'datetime',\n 'server_groups': 'list[int]'\n }\n\n attribute_map = {\n 'configuration': 'configuration',\n 'id': 'id',\n 'name': 'name',\n 'notes': 'notes',\n 'organization_id': 'organization_id',\n 'policy_type_name': 'policy_type_name',\n 'schedule_days': 'schedule_days',\n 'schedule_weeks_of_month': 'schedule_weeks_of_month',\n 'schedule_months': 'schedule_months',\n 'schedule_time': 'schedule_time',\n 'next_remediation': 'next_remediation',\n 'server_groups': 'server_groups'\n }\n\n def __init__(self, configuration=None, id=None, name=None, notes=None, organization_id=None, policy_type_name=None, schedule_days=None, schedule_weeks_of_month=None, schedule_months=None, schedule_time=None, next_remediation=None, server_groups=None): # noqa: E501\n \"\"\"Policy - a model defined in Swagger\"\"\" # noqa: E501\n self._configuration = None\n self._id = None\n self._name = None\n self._notes = None\n self._organization_id = None\n self._policy_type_name = None\n self._schedule_days = None\n self._schedule_weeks_of_month = None\n self._schedule_months = None\n self._schedule_time = None\n self._next_remediation = None\n self._server_groups = None\n self.discriminator = None\n self.configuration = configuration\n if id is not None:\n self.id = id\n self.name = name\n self.notes = notes\n self.organization_id = organization_id\n self.policy_type_name = policy_type_name\n self.schedule_days = schedule_days\n if schedule_weeks_of_month is not None:\n self.schedule_weeks_of_month = schedule_weeks_of_month\n if schedule_months is not None:\n self.schedule_months = schedule_months\n self.schedule_time = schedule_time\n if next_remediation is not None:\n self.next_remediation = next_remediation\n self.server_groups = server_groups\n\n @property\n def configuration(self):\n \"\"\"Gets the configuration of this Policy. # noqa: E501\n\n\n :return: The configuration of this Policy. # noqa: E501\n :rtype: PolicyConfiguration\n \"\"\"\n return self._configuration\n\n @configuration.setter\n def configuration(self, configuration):\n \"\"\"Sets the configuration of this Policy.\n\n\n :param configuration: The configuration of this Policy. # noqa: E501\n :type: PolicyConfiguration\n \"\"\"\n if configuration is None:\n raise ValueError(\"Invalid value for `configuration`, must not be `None`\") # noqa: E501\n\n self._configuration = configuration\n\n @property\n def id(self):\n \"\"\"Gets the id of this Policy. # noqa: E501\n\n The ID of the relevant policy. # noqa: E501\n\n :return: The id of this Policy. # noqa: E501\n :rtype: int\n \"\"\"\n return self._id\n\n @id.setter\n def id(self, id):\n \"\"\"Sets the id of this Policy.\n\n The ID of the relevant policy. # noqa: E501\n\n :param id: The id of this Policy. # noqa: E501\n :type: int\n \"\"\"\n\n self._id = id\n\n @property\n def name(self):\n \"\"\"Gets the name of this Policy. # noqa: E501\n\n Name of the policy # noqa: E501\n\n :return: The name of this Policy. # noqa: E501\n :rtype: str\n \"\"\"\n return self._name\n\n @name.setter\n def name(self, name):\n \"\"\"Sets the name of this Policy.\n\n Name of the policy # noqa: E501\n\n :param name: The name of this Policy. # noqa: E501\n :type: str\n \"\"\"\n if name is None:\n raise ValueError(\"Invalid value for `name`, must not be `None`\") # noqa: E501\n\n self._name = name\n\n @property\n def notes(self):\n \"\"\"Gets the notes of this Policy. # noqa: E501\n\n Policy notes # noqa: E501\n\n :return: The notes of this Policy. # noqa: E501\n :rtype: str\n \"\"\"\n return self._notes\n\n @notes.setter\n def notes(self, notes):\n \"\"\"Sets the notes of this Policy.\n\n Policy notes # noqa: E501\n\n :param notes: The notes of this Policy. # noqa: E501\n :type: str\n \"\"\"\n if notes is None:\n raise ValueError(\"Invalid value for `notes`, must not be `None`\") # noqa: E501\n\n self._notes = notes\n\n @property\n def organization_id(self):\n \"\"\"Gets the organization_id of this Policy. # noqa: E501\n\n Organization ID for the specified policy # noqa: E501\n\n :return: The organization_id of this Policy. # noqa: E501\n :rtype: int\n \"\"\"\n return self._organization_id\n\n @organization_id.setter\n def organization_id(self, organization_id):\n \"\"\"Sets the organization_id of this Policy.\n\n Organization ID for the specified policy # noqa: E501\n\n :param organization_id: The organization_id of this Policy. # noqa: E501\n :type: int\n \"\"\"\n if organization_id is None:\n raise ValueError(\"Invalid value for `organization_id`, must not be `None`\") # noqa: E501\n\n self._organization_id = organization_id\n\n @property\n def policy_type_name(self):\n \"\"\"Gets the policy_type_name of this Policy. # noqa: E501\n\n Name for the policy type # noqa: E501\n\n :return: The policy_type_name of this Policy. # noqa: E501\n :rtype: str\n \"\"\"\n return self._policy_type_name\n\n @policy_type_name.setter\n def policy_type_name(self, policy_type_name):\n \"\"\"Sets the policy_type_name of this Policy.\n\n Name for the policy type # noqa: E501\n\n :param policy_type_name: The policy_type_name of this Policy. # noqa: E501\n :type: str\n \"\"\"\n if policy_type_name is None:\n raise ValueError(\"Invalid value for `policy_type_name`, must not be `None`\") # noqa: E501\n allowed_values = [\"patch\", \"custom\", \"required_software\"] # noqa: E501\n if policy_type_name not in allowed_values:\n raise ValueError(\n \"Invalid value for `policy_type_name` ({0}), must be one of {1}\" # noqa: E501\n .format(policy_type_name, allowed_values)\n )\n\n self._policy_type_name = policy_type_name\n\n @property\n def schedule_days(self):\n \"\"\"Gets the schedule_days of this Policy. # noqa: E501\n\n Decimal value of binary day schedule. See [Policy and Device Filters, and Scheduling - Example Days per Week](/developer-portal/policy_filters_schedule/#example-days-per-week). # noqa: E501\n\n :return: The schedule_days of this Policy. # noqa: E501\n :rtype: int\n \"\"\"\n return self._schedule_days\n\n @schedule_days.setter\n def schedule_days(self, schedule_days):\n \"\"\"Sets the schedule_days of this Policy.\n\n Decimal value of binary day schedule. See [Policy and Device Filters, and Scheduling - Example Days per Week](/developer-portal/policy_filters_schedule/#example-days-per-week). # noqa: E501\n\n :param schedule_days: The schedule_days of this Policy. # noqa: E501\n :type: int\n \"\"\"\n if schedule_days is None:\n raise ValueError(\"Invalid value for `schedule_days`, must not be `None`\") # noqa: E501\n\n self._schedule_days = schedule_days\n\n @property\n def schedule_weeks_of_month(self):\n \"\"\"Gets the schedule_weeks_of_month of this Policy. # noqa: E501\n\n Decimal value of binary week schedule. See [Policy and Device Filters, and Scheduling - Example Weeks per Month](/developer-portal/policy_filters_schedule/#example-weeks-per-month). # noqa: E501\n\n :return: The schedule_weeks_of_month of this Policy. # noqa: E501\n :rtype: int\n \"\"\"\n return self._schedule_weeks_of_month\n\n @schedule_weeks_of_month.setter\n def schedule_weeks_of_month(self, schedule_weeks_of_month):\n \"\"\"Sets the schedule_weeks_of_month of this Policy.\n\n Decimal value of binary week schedule. See [Policy and Device Filters, and Scheduling - Example Weeks per Month](/developer-portal/policy_filters_schedule/#example-weeks-per-month). # noqa: E501\n\n :param schedule_weeks_of_month: The schedule_weeks_of_month of this Policy. # noqa: E501\n :type: int\n \"\"\"\n\n self._schedule_weeks_of_month = schedule_weeks_of_month\n\n @property\n def schedule_months(self):\n \"\"\"Gets the schedule_months of this Policy. # noqa: E501\n\n Decimal value of binary month schedule. See [Policy and Device Filters, and Scheduling - Example Months per Year](/developer-portal/policy_filters_schedule/#example-months-per-year). # noqa: E501\n\n :return: The schedule_months of this Policy. # noqa: E501\n :rtype: int\n \"\"\"\n return self._schedule_months\n\n @schedule_months.setter\n def schedule_months(self, schedule_months):\n \"\"\"Sets the schedule_months of this Policy.\n\n Decimal value of binary month schedule. See [Policy and Device Filters, and Scheduling - Example Months per Year](/developer-portal/policy_filters_schedule/#example-months-per-year). # noqa: E501\n\n :param schedule_months: The schedule_months of this Policy. # noqa: E501\n :type: int\n \"\"\"\n\n self._schedule_months = schedule_months\n\n @property\n def schedule_time(self):\n \"\"\"Gets the schedule_time of this Policy. # noqa: E501\n\n Scheduled time for automatic policy execution. Format: `\\\"hh:mm\\\"` # noqa: E501\n\n :return: The schedule_time of this Policy. # noqa: E501\n :rtype: str\n \"\"\"\n return self._schedule_time\n\n @schedule_time.setter\n def schedule_time(self, schedule_time):\n \"\"\"Sets the schedule_time of this Policy.\n\n Scheduled time for automatic policy execution. Format: `\\\"hh:mm\\\"` # noqa: E501\n\n :param schedule_time: The schedule_time of this Policy. # noqa: E501\n :type: str\n \"\"\"\n if schedule_time is None:\n raise ValueError(\"Invalid value for `schedule_time`, must not be `None`\") # noqa: E501\n\n self._schedule_time = schedule_time\n\n @property\n def next_remediation(self):\n \"\"\"Gets the next_remediation of this Policy. # noqa: E501\n\n\n :return: The next_remediation of this Policy. # noqa: E501\n :rtype: datetime\n \"\"\"\n return self._next_remediation\n\n @next_remediation.setter\n def next_remediation(self, next_remediation):\n \"\"\"Sets the next_remediation of this Policy.\n\n\n :param next_remediation: The next_remediation of this Policy. # noqa: E501\n :type: datetime\n \"\"\"\n\n self._next_remediation = next_remediation\n\n @property\n def server_groups(self):\n \"\"\"Gets the server_groups of this Policy. # noqa: E501\n\n Integer array. Server groups to link with the policy # noqa: E501\n\n :return: The server_groups of this Policy. # noqa: E501\n :rtype: list[int]\n \"\"\"\n return self._server_groups\n\n @server_groups.setter\n def server_groups(self, server_groups):\n \"\"\"Sets the server_groups of this Policy.\n\n Integer array. Server groups to link with the policy # noqa: E501\n\n :param server_groups: The server_groups of this Policy. # noqa: E501\n :type: list[int]\n \"\"\"\n if server_groups is None:\n raise ValueError(\"Invalid value for `server_groups`, must not be `None`\") # noqa: E501\n\n self._server_groups = server_groups\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n if issubclass(Policy, dict):\n for key, value in self.items():\n result[key] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, Policy):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n",
"id": "5666535",
"language": "Python",
"matching_score": 1.1870365142822266,
"max_stars_count": 1,
"path": "automox_console_sdk/models/policy.py"
},
{
"content": "# coding: utf-8\n\n\"\"\"\n Automox Console API\n\n API for use with the Automox Console # noqa: E501\n\n OpenAPI spec version: 2021-11-16\n Contact: <EMAIL>\n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\nclass RequiredSoftwarePolicyConfiguration(object):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n \"\"\"\n Attributes:\n swagger_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n swagger_types = {\n 'device_filters_enabled': 'bool',\n 'device_filters': 'DeviceFilters',\n 'missed_patch_window': 'bool',\n 'os_family': 'str',\n 'package_name': 'str',\n 'package_version': 'str',\n 'installation_code': 'str'\n }\n\n attribute_map = {\n 'device_filters_enabled': 'device_filters_enabled',\n 'device_filters': 'device_filters',\n 'missed_patch_window': 'missed_patch_window',\n 'os_family': 'os_family',\n 'package_name': 'package_name',\n 'package_version': 'package_version',\n 'installation_code': 'installation_code'\n }\n\n def __init__(self, device_filters_enabled=False, device_filters=None, missed_patch_window=None, os_family=None, package_name=None, package_version=None, installation_code=None): # noqa: E501\n \"\"\"RequiredSoftwarePolicyConfiguration - a model defined in Swagger\"\"\" # noqa: E501\n self._device_filters_enabled = None\n self._device_filters = None\n self._missed_patch_window = None\n self._os_family = None\n self._package_name = None\n self._package_version = None\n self._installation_code = None\n self.discriminator = None\n if device_filters_enabled is not None:\n self.device_filters_enabled = device_filters_enabled\n if device_filters is not None:\n self.device_filters = device_filters\n if missed_patch_window is not None:\n self.missed_patch_window = missed_patch_window\n if os_family is not None:\n self.os_family = os_family\n self.package_name = package_name\n self.package_version = package_version\n self.installation_code = installation_code\n\n @property\n def device_filters_enabled(self):\n \"\"\"Gets the device_filters_enabled of this RequiredSoftwarePolicyConfiguration. # noqa: E501\n\n Enable or disable Device Filters. # noqa: E501\n\n :return: The device_filters_enabled of this RequiredSoftwarePolicyConfiguration. # noqa: E501\n :rtype: bool\n \"\"\"\n return self._device_filters_enabled\n\n @device_filters_enabled.setter\n def device_filters_enabled(self, device_filters_enabled):\n \"\"\"Sets the device_filters_enabled of this RequiredSoftwarePolicyConfiguration.\n\n Enable or disable Device Filters. # noqa: E501\n\n :param device_filters_enabled: The device_filters_enabled of this RequiredSoftwarePolicyConfiguration. # noqa: E501\n :type: bool\n \"\"\"\n\n self._device_filters_enabled = device_filters_enabled\n\n @property\n def device_filters(self):\n \"\"\"Gets the device_filters of this RequiredSoftwarePolicyConfiguration. # noqa: E501\n\n\n :return: The device_filters of this RequiredSoftwarePolicyConfiguration. # noqa: E501\n :rtype: DeviceFilters\n \"\"\"\n return self._device_filters\n\n @device_filters.setter\n def device_filters(self, device_filters):\n \"\"\"Sets the device_filters of this RequiredSoftwarePolicyConfiguration.\n\n\n :param device_filters: The device_filters of this RequiredSoftwarePolicyConfiguration. # noqa: E501\n :type: DeviceFilters\n \"\"\"\n\n self._device_filters = device_filters\n\n @property\n def missed_patch_window(self):\n \"\"\"Gets the missed_patch_window of this RequiredSoftwarePolicyConfiguration. # noqa: E501\n\n Enable or Disable Missed Patch Window setting # noqa: E501\n\n :return: The missed_patch_window of this RequiredSoftwarePolicyConfiguration. # noqa: E501\n :rtype: bool\n \"\"\"\n return self._missed_patch_window\n\n @missed_patch_window.setter\n def missed_patch_window(self, missed_patch_window):\n \"\"\"Sets the missed_patch_window of this RequiredSoftwarePolicyConfiguration.\n\n Enable or Disable Missed Patch Window setting # noqa: E501\n\n :param missed_patch_window: The missed_patch_window of this RequiredSoftwarePolicyConfiguration. # noqa: E501\n :type: bool\n \"\"\"\n\n self._missed_patch_window = missed_patch_window\n\n @property\n def os_family(self):\n \"\"\"Gets the os_family of this RequiredSoftwarePolicyConfiguration. # noqa: E501\n\n\n :return: The os_family of this RequiredSoftwarePolicyConfiguration. # noqa: E501\n :rtype: str\n \"\"\"\n return self._os_family\n\n @os_family.setter\n def os_family(self, os_family):\n \"\"\"Sets the os_family of this RequiredSoftwarePolicyConfiguration.\n\n\n :param os_family: The os_family of this RequiredSoftwarePolicyConfiguration. # noqa: E501\n :type: str\n \"\"\"\n\n self._os_family = os_family\n\n @property\n def package_name(self):\n \"\"\"Gets the package_name of this RequiredSoftwarePolicyConfiguration. # noqa: E501\n\n\n :return: The package_name of this RequiredSoftwarePolicyConfiguration. # noqa: E501\n :rtype: str\n \"\"\"\n return self._package_name\n\n @package_name.setter\n def package_name(self, package_name):\n \"\"\"Sets the package_name of this RequiredSoftwarePolicyConfiguration.\n\n\n :param package_name: The package_name of this RequiredSoftwarePolicyConfiguration. # noqa: E501\n :type: str\n \"\"\"\n if package_name is None:\n raise ValueError(\"Invalid value for `package_name`, must not be `None`\") # noqa: E501\n\n self._package_name = package_name\n\n @property\n def package_version(self):\n \"\"\"Gets the package_version of this RequiredSoftwarePolicyConfiguration. # noqa: E501\n\n\n :return: The package_version of this RequiredSoftwarePolicyConfiguration. # noqa: E501\n :rtype: str\n \"\"\"\n return self._package_version\n\n @package_version.setter\n def package_version(self, package_version):\n \"\"\"Sets the package_version of this RequiredSoftwarePolicyConfiguration.\n\n\n :param package_version: The package_version of this RequiredSoftwarePolicyConfiguration. # noqa: E501\n :type: str\n \"\"\"\n if package_version is None:\n raise ValueError(\"Invalid value for `package_version`, must not be `None`\") # noqa: E501\n\n self._package_version = package_version\n\n @property\n def installation_code(self):\n \"\"\"Gets the installation_code of this RequiredSoftwarePolicyConfiguration. # noqa: E501\n\n\n :return: The installation_code of this RequiredSoftwarePolicyConfiguration. # noqa: E501\n :rtype: str\n \"\"\"\n return self._installation_code\n\n @installation_code.setter\n def installation_code(self, installation_code):\n \"\"\"Sets the installation_code of this RequiredSoftwarePolicyConfiguration.\n\n\n :param installation_code: The installation_code of this RequiredSoftwarePolicyConfiguration. # noqa: E501\n :type: str\n \"\"\"\n if installation_code is None:\n raise ValueError(\"Invalid value for `installation_code`, must not be `None`\") # noqa: E501\n\n self._installation_code = installation_code\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n if issubclass(RequiredSoftwarePolicyConfiguration, dict):\n for key, value in self.items():\n result[key] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, RequiredSoftwarePolicyConfiguration):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n",
"id": "10178469",
"language": "Python",
"matching_score": 1.1983188390731812,
"max_stars_count": 1,
"path": "automox_console_sdk/models/required_software_policy_configuration.py"
},
{
"content": "# coding: utf-8\n\n\"\"\"\n Automox Console API\n\n API for use with the Automox Console # noqa: E501\n\n OpenAPI spec version: 2021-09-01\n Contact: <EMAIL>\n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\nfrom setuptools import setup, find_packages # noqa: H301\n\nNAME = \"automox-console-sdk\"\nVERSION = \"0.3.0\"\nDESCRIPTION = \"Automox Console SDK for Python\"\n\nwith open('README.md', 'r') as fh:\n long_description = fh.read()\n\n# To install the library, run the following\n#\n# python setup.py install\n#\n# prerequisite: setuptools\n# http://pypi.python.org/pypi/setuptools\n\nREQUIRES = [\"urllib3 >= 1.15\", \"six >= 1.10\", \"certifi\", \"python-dateutil\"]\n\nsetup(\n name=NAME,\n version=VERSION,\n description=DESCRIPTION,\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n author_email=\"<EMAIL>\",\n url=\"https://github.com/AutomoxCommunity/automox-console-sdk-python\",\n keywords=[\"Swagger\", \"Automox Console API\"],\n install_requires=REQUIRES,\n packages=find_packages(),\n include_package_data=True,\n license='MIT',\n classifiers=[\n 'Intended Audience :: Developers',\n 'Natural Language :: English',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.9'\n ]\n)\n",
"id": "11029388",
"language": "Python",
"matching_score": 0.30554747581481934,
"max_stars_count": 1,
"path": "setup.py"
},
{
"content": "# coding: utf-8\n\n\"\"\"\n Automox Console API\n\n API for use with the Automox Console # noqa: E501\n\n OpenAPI spec version: 2021-11-16\n Contact: <EMAIL>\n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\nfrom __future__ import absolute_import\n\nimport unittest\n\nimport automox_console_sdk\nfrom automox_console_sdk.models.inline_response410 import InlineResponse410 # noqa: E501\nfrom automox_console_sdk.rest import ApiException\n\n\nclass TestInlineResponse410(unittest.TestCase):\n \"\"\"InlineResponse410 unit test stubs\"\"\"\n\n def setUp(self):\n pass\n\n def tearDown(self):\n pass\n\n def testInlineResponse410(self):\n \"\"\"Test InlineResponse410\"\"\"\n # FIXME: construct object with mandatory attributes with example values\n # model = automox_console_sdk.models.inline_response410.InlineResponse410() # noqa: E501\n pass\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"id": "5573391",
"language": "Python",
"matching_score": 2.651829481124878,
"max_stars_count": 1,
"path": "test/test_inline_response410.py"
},
{
"content": "# coding: utf-8\n\n\"\"\"\n Automox Console API\n\n API for use with the Automox Console # noqa: E501\n\n OpenAPI spec version: 2021-10-04\n Contact: <EMAIL>\n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\nfrom __future__ import absolute_import\n\nimport unittest\n\nimport automox_console_sdk\nfrom automox_console_sdk.models.patch_advanced_policy_configuration import PatchAdvancedPolicyConfiguration # noqa: E501\nfrom automox_console_sdk.rest import ApiException\n\n\nclass TestPatchAdvancedPolicyConfiguration(unittest.TestCase):\n \"\"\"PatchAdvancedPolicyConfiguration unit test stubs\"\"\"\n\n def setUp(self):\n pass\n\n def tearDown(self):\n pass\n\n def testPatchAdvancedPolicyConfiguration(self):\n \"\"\"Test PatchAdvancedPolicyConfiguration\"\"\"\n # FIXME: construct object with mandatory attributes with example values\n # model = automox_console_sdk.models.patch_advanced_policy_configuration.PatchAdvancedPolicyConfiguration() # noqa: E501\n pass\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"id": "3918525",
"language": "Python",
"matching_score": 2.4179153442382812,
"max_stars_count": 1,
"path": "test/test_patch_advanced_policy_configuration.py"
},
{
"content": "# coding: utf-8\n\n# flake8: noqa\n\n\"\"\"\n Automox Console API\n\n API for use with the Automox Console # noqa: E501\n\n OpenAPI spec version: 2021-11-16\n Contact: <EMAIL>\n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\nfrom __future__ import absolute_import\n\n# import apis into sdk package\nfrom automox_console_sdk.api.api_keys_api import APIKeysApi\nfrom automox_console_sdk.api.approvals_api import ApprovalsApi\nfrom automox_console_sdk.api.commands_api import CommandsApi\nfrom automox_console_sdk.api.devices_api import DevicesApi\nfrom automox_console_sdk.api.events_api import EventsApi\nfrom automox_console_sdk.api.extracts_api import ExtractsApi\nfrom automox_console_sdk.api.groups_api import GroupsApi\nfrom automox_console_sdk.api.organizations_api import OrganizationsApi\nfrom automox_console_sdk.api.packages_api import PackagesApi\nfrom automox_console_sdk.api.policies_api import PoliciesApi\nfrom automox_console_sdk.api.reports_api import ReportsApi\nfrom automox_console_sdk.api.users_api import UsersApi\nfrom automox_console_sdk.api.worklets_api import WorkletsApi\n# import ApiClient\nfrom automox_console_sdk.api_client import ApiClient\nfrom automox_console_sdk.configuration import Configuration\n# import models into sdk package\nfrom automox_console_sdk.models.any_ofinline_response2001 import AnyOfinlineResponse2001\nfrom automox_console_sdk.models.api_key import ApiKey\nfrom automox_console_sdk.models.api_key_user import ApiKeyUser\nfrom automox_console_sdk.models.api_keys_id_body import ApiKeysIdBody\nfrom automox_console_sdk.models.approvals_id_body import ApprovalsIdBody\nfrom automox_console_sdk.models.batch import Batch\nfrom automox_console_sdk.models.command import Command\nfrom automox_console_sdk.models.compatibility_checks import CompatibilityChecks\nfrom automox_console_sdk.models.custom_policy import CustomPolicy\nfrom automox_console_sdk.models.custom_policy_configuration import CustomPolicyConfiguration\nfrom automox_console_sdk.models.data_extract import DataExtract\nfrom automox_console_sdk.models.data_extract_parameters import DataExtractParameters\nfrom automox_console_sdk.models.dataextracts_body import DataextractsBody\nfrom automox_console_sdk.models.dataextracts_parameters import DataextractsParameters\nfrom automox_console_sdk.models.device_filters import DeviceFilters\nfrom automox_console_sdk.models.device_filters_inner import DeviceFiltersInner\nfrom automox_console_sdk.models.device_status import DeviceStatus\nfrom automox_console_sdk.models.device_status_policy_statuses import DeviceStatusPolicyStatuses\nfrom automox_console_sdk.models.event import Event\nfrom automox_console_sdk.models.event_data import EventData\nfrom automox_console_sdk.models.filters import Filters\nfrom automox_console_sdk.models.id_files_body import IdFilesBody\nfrom automox_console_sdk.models.id_queues_body import IdQueuesBody\nfrom automox_console_sdk.models.inline_response200 import InlineResponse200\nfrom automox_console_sdk.models.inline_response2001 import InlineResponse2001\nfrom automox_console_sdk.models.inline_response2002 import InlineResponse2002\nfrom automox_console_sdk.models.inline_response2003 import InlineResponse2003\nfrom automox_console_sdk.models.inline_response400 import InlineResponse400\nfrom automox_console_sdk.models.inline_response403 import InlineResponse403\nfrom automox_console_sdk.models.inline_response410 import InlineResponse410\nfrom automox_console_sdk.models.needs_attention import NeedsAttention\nfrom automox_console_sdk.models.non_compliant import NonCompliant\nfrom automox_console_sdk.models.non_compliant_non_compliant import NonCompliantNonCompliant\nfrom automox_console_sdk.models.non_compliant_non_compliant_devices import NonCompliantNonCompliantDevices\nfrom automox_console_sdk.models.non_compliant_non_compliant_packages import NonCompliantNonCompliantPackages\nfrom automox_console_sdk.models.non_compliant_non_compliant_policies import NonCompliantNonCompliantPolicies\nfrom automox_console_sdk.models.notification_response_data import NotificationResponseData\nfrom automox_console_sdk.models.notification_sent_data import NotificationSentData\nfrom automox_console_sdk.models.one_of_device_filters_inner_value_items import OneOfDeviceFiltersInnerValueItems\nfrom automox_console_sdk.models.one_of_event_data import OneOfEventData\nfrom automox_console_sdk.models.one_of_patch_advanced_policy_configuration_advanced_filter_right import OneOfPatchAdvancedPolicyConfigurationAdvancedFilterRight\nfrom automox_console_sdk.models.one_of_patch_policy_configuration import OneOfPatchPolicyConfiguration\nfrom automox_console_sdk.models.one_of_server_with_policies_server_policies_items import OneOfServerWithPoliciesServerPoliciesItems\nfrom automox_console_sdk.models.org_endpoint_limit_data import OrgEndpointLimitData\nfrom automox_console_sdk.models.organization import Organization\nfrom automox_console_sdk.models.packages import Packages\nfrom automox_console_sdk.models.patch_advanced_policy_configuration import PatchAdvancedPolicyConfiguration\nfrom automox_console_sdk.models.patch_advanced_policy_configuration_advanced_filter import PatchAdvancedPolicyConfigurationAdvancedFilter\nfrom automox_console_sdk.models.patch_applied_data import PatchAppliedData\nfrom automox_console_sdk.models.patch_failed_data import PatchFailedData\nfrom automox_console_sdk.models.patch_filter_policy_configuration import PatchFilterPolicyConfiguration\nfrom automox_console_sdk.models.patch_policy import PatchPolicy\nfrom automox_console_sdk.models.patch_policy_configuration import PatchPolicyConfiguration\nfrom automox_console_sdk.models.patches import Patches\nfrom automox_console_sdk.models.policy_action_data import PolicyActionData\nfrom automox_console_sdk.models.policy_device_filters_output import PolicyDeviceFiltersOutput\nfrom automox_console_sdk.models.policy_device_filters_output_results import PolicyDeviceFiltersOutputResults\nfrom automox_console_sdk.models.policy_device_filters_output_server_group import PolicyDeviceFiltersOutputServerGroup\nfrom automox_console_sdk.models.policy_device_filters_preview import PolicyDeviceFiltersPreview\nfrom automox_console_sdk.models.policy_stats import PolicyStats\nfrom automox_console_sdk.models.pre_patch import PrePatch\nfrom automox_console_sdk.models.pre_patch_prepatch import PrePatchPrepatch\nfrom automox_console_sdk.models.pre_patch_prepatch_devices import PrePatchPrepatchDevices\nfrom automox_console_sdk.models.required_software_policy import RequiredSoftwarePolicy\nfrom automox_console_sdk.models.required_software_policy_configuration import RequiredSoftwarePolicyConfiguration\nfrom automox_console_sdk.models.saml_data import SamlData\nfrom automox_console_sdk.models.server import Server\nfrom automox_console_sdk.models.server_detail import ServerDetail\nfrom automox_console_sdk.models.server_detail_disks import ServerDetailDISKS\nfrom automox_console_sdk.models.server_detail_nics import ServerDetailNICS\nfrom automox_console_sdk.models.server_group import ServerGroup\nfrom automox_console_sdk.models.server_group_create_or_update_request import ServerGroupCreateOrUpdateRequest\nfrom automox_console_sdk.models.server_policy_status import ServerPolicyStatus\nfrom automox_console_sdk.models.server_with_policies import ServerWithPolicies\nfrom automox_console_sdk.models.server_with_policies_policy_status import ServerWithPoliciesPolicyStatus\nfrom automox_console_sdk.models.servers_batch_body import ServersBatchBody\nfrom automox_console_sdk.models.servers_id_body import ServersIdBody\nfrom automox_console_sdk.models.serversbatch_actions import ServersbatchActions\nfrom automox_console_sdk.models.slack_data import SlackData\nfrom automox_console_sdk.models.software_approvals import SoftwareApprovals\nfrom automox_console_sdk.models.system_event_data import SystemEventData\nfrom automox_console_sdk.models.user import User\nfrom automox_console_sdk.models.user_data import UserData\nfrom automox_console_sdk.models.user_features import UserFeatures\nfrom automox_console_sdk.models.user_id_api_keys_body import UserIdApiKeysBody\nfrom automox_console_sdk.models.user_orgs import UserOrgs\nfrom automox_console_sdk.models.user_prefs import UserPrefs\nfrom automox_console_sdk.models.user_rbac_roles import UserRbacRoles\nfrom automox_console_sdk.models.worklet import Worklet\nfrom automox_console_sdk.models.worklet_details import WorkletDetails\nfrom automox_console_sdk.models.wsus_config import WsusConfig\n",
"id": "11008218",
"language": "Python",
"matching_score": 3.3489573001861572,
"max_stars_count": 1,
"path": "automox_console_sdk/__init__.py"
},
{
"content": "# coding: utf-8\n\n\"\"\"\n Automox Console API\n\n API for use with the Automox Console # noqa: E501\n\n OpenAPI spec version: 2021-10-04\n Contact: <EMAIL>\n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\nfrom __future__ import absolute_import\n\nimport unittest\n\nimport automox_console_sdk\nfrom automox_console_sdk.models.one_of_server_with_policies_server_policies_items import OneOfServerWithPoliciesServerPoliciesItems # noqa: E501\nfrom automox_console_sdk.rest import ApiException\n\n\nclass TestOneOfServerWithPoliciesServerPoliciesItems(unittest.TestCase):\n \"\"\"OneOfServerWithPoliciesServerPoliciesItems unit test stubs\"\"\"\n\n def setUp(self):\n pass\n\n def tearDown(self):\n pass\n\n def testOneOfServerWithPoliciesServerPoliciesItems(self):\n \"\"\"Test OneOfServerWithPoliciesServerPoliciesItems\"\"\"\n # FIXME: construct object with mandatory attributes with example values\n # model = automox_console_sdk.models.one_of_server_with_policies_server_policies_items.OneOfServerWithPoliciesServerPoliciesItems() # noqa: E501\n pass\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"id": "3921049",
"language": "Python",
"matching_score": 0.13557150959968567,
"max_stars_count": 1,
"path": "test/test_one_of_server_with_policies_server_policies_items.py"
},
{
"content": "# coding: utf-8\n\n\"\"\"\n Automox Console API\n\n API for use with the Automox Console # noqa: E501\n\n OpenAPI spec version: 2021-11-16\n Contact: <EMAIL>\n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\nfrom __future__ import absolute_import\n\nimport re # noqa: F401\n\n# python 2 and python 3 compatibility library\nimport six\n\nfrom automox_console_sdk.api_client import ApiClient\n\n\nclass EventsApi(object):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n Ref: https://github.com/swagger-api/swagger-codegen\n \"\"\"\n\n def __init__(self, api_client=None):\n if api_client is None:\n api_client = ApiClient()\n self.api_client = api_client\n\n def get_events(self, o, **kwargs): # noqa: E501\n \"\"\"Retrieve All Event Objects for the Authenticated User # noqa: E501\n\n Events Include: Policy Actions, Device Addition/Removal, User Addition/Removal # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.get_events(o, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param int o: Organization ID. Response will include devices for the specified Automox organization. The organization will be assumed based on the API key, if not specified. (required)\n :param int page: The page of results you wish to be returned with page numbers starting at 0. See [About Automox API - Pagination](/developer-portal/about-ax-api/#pagination)\n :param int count_only: Use instead of `page` or `limit` to retrieve only the total count of events for the organization, or when used with an `eventName`, retrieve a count of that specific type of event.\n :param int policy_id: Retrieve events for a specific policy.\n :param int server_id: Retrieve events for a specific device.\n :param int user_id: Retrieve events for a specific user.\n :param str event_name: Name for the event type.\n :param date start_date: Limit responses to include only events after this date. Format: (YYYY-MM-DD).\n :param date end_date: Limit responses to include only events before this date. Format: (YYYY-MM-DD).\n :param int limit: A limit on the number of results to be returned, between 1 and 500, with a default of 500. Use with page parameter. See [About Automox API - Pagination](/developer-portal/about-ax-api/#pagination)\n :return: list[Event]\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n kwargs['_return_http_data_only'] = True\n if kwargs.get('async_req'):\n return self.get_events_with_http_info(o, **kwargs) # noqa: E501\n else:\n (data) = self.get_events_with_http_info(o, **kwargs) # noqa: E501\n return data\n\n def get_events_with_http_info(self, o, **kwargs): # noqa: E501\n \"\"\"Retrieve All Event Objects for the Authenticated User # noqa: E501\n\n Events Include: Policy Actions, Device Addition/Removal, User Addition/Removal # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.get_events_with_http_info(o, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param int o: Organization ID. Response will include devices for the specified Automox organization. The organization will be assumed based on the API key, if not specified. (required)\n :param int page: The page of results you wish to be returned with page numbers starting at 0. See [About Automox API - Pagination](/developer-portal/about-ax-api/#pagination)\n :param int count_only: Use instead of `page` or `limit` to retrieve only the total count of events for the organization, or when used with an `eventName`, retrieve a count of that specific type of event.\n :param int policy_id: Retrieve events for a specific policy.\n :param int server_id: Retrieve events for a specific device.\n :param int user_id: Retrieve events for a specific user.\n :param str event_name: Name for the event type.\n :param date start_date: Limit responses to include only events after this date. Format: (YYYY-MM-DD).\n :param date end_date: Limit responses to include only events before this date. Format: (YYYY-MM-DD).\n :param int limit: A limit on the number of results to be returned, between 1 and 500, with a default of 500. Use with page parameter. See [About Automox API - Pagination](/developer-portal/about-ax-api/#pagination)\n :return: list[Event]\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n\n all_params = ['o', 'page', 'count_only', 'policy_id', 'server_id', 'user_id', 'event_name', 'start_date', 'end_date', 'limit'] # noqa: E501\n all_params.append('async_req')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in six.iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_events\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'o' is set\n if ('o' not in params or\n params['o'] is None):\n raise ValueError(\"Missing the required parameter `o` when calling `get_events`\") # noqa: E501\n\n collection_formats = {}\n\n path_params = {}\n\n query_params = []\n if 'page' in params:\n query_params.append(('page', params['page'])) # noqa: E501\n if 'count_only' in params:\n query_params.append(('countOnly', params['count_only'])) # noqa: E501\n if 'policy_id' in params:\n query_params.append(('policyId', params['policy_id'])) # noqa: E501\n if 'server_id' in params:\n query_params.append(('serverId', params['server_id'])) # noqa: E501\n if 'user_id' in params:\n query_params.append(('userId', params['user_id'])) # noqa: E501\n if 'event_name' in params:\n query_params.append(('eventName', params['event_name'])) # noqa: E501\n if 'start_date' in params:\n query_params.append(('startDate', params['start_date'])) # noqa: E501\n if 'end_date' in params:\n query_params.append(('endDate', params['end_date'])) # noqa: E501\n if 'limit' in params:\n query_params.append(('limit', params['limit'])) # noqa: E501\n if 'o' in params:\n query_params.append(('o', params['o'])) # noqa: E501\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.select_header_accept(\n ['application/json']) # noqa: E501\n\n # Authentication setting\n auth_settings = ['bearerAuth'] # noqa: E501\n\n return self.api_client.call_api(\n '/events', 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='list[Event]', # noqa: E501\n auth_settings=auth_settings,\n async_req=params.get('async_req'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)\n",
"id": "672205",
"language": "Python",
"matching_score": 5.656434535980225,
"max_stars_count": 1,
"path": "automox_console_sdk/api/events_api.py"
},
{
"content": "# coding: utf-8\n\n\"\"\"\n Automox Console API\n\n API for use with the Automox Console # noqa: E501\n\n OpenAPI spec version: 2021-11-16\n Contact: <EMAIL>\n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\nfrom __future__ import absolute_import\n\nimport re # noqa: F401\n\n# python 2 and python 3 compatibility library\nimport six\n\nfrom automox_console_sdk.api_client import ApiClient\n\n\nclass PoliciesApi(object):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n Ref: https://github.com/swagger-api/swagger-codegen\n \"\"\"\n\n def __init__(self, api_client=None):\n if api_client is None:\n api_client = ApiClient()\n self.api_client = api_client\n\n def create_policy(self, o, **kwargs): # noqa: E501\n \"\"\"Create a New Policy # noqa: E501\n\n Creates a new policy for a specified organization. For more info on filter types and scheduling, see [Policy and Device Filters, and Scheduling](/developer-portal/policy_filters_schedule). # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.create_policy(o, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param int o: Organization ID for the specified policy. (required)\n :param object body:\n :return: None\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n kwargs['_return_http_data_only'] = True\n if kwargs.get('async_req'):\n return self.create_policy_with_http_info(o, **kwargs) # noqa: E501\n else:\n (data) = self.create_policy_with_http_info(o, **kwargs) # noqa: E501\n return data\n\n def create_policy_with_http_info(self, o, **kwargs): # noqa: E501\n \"\"\"Create a New Policy # noqa: E501\n\n Creates a new policy for a specified organization. For more info on filter types and scheduling, see [Policy and Device Filters, and Scheduling](/developer-portal/policy_filters_schedule). # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.create_policy_with_http_info(o, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param int o: Organization ID for the specified policy. (required)\n :param object body:\n :return: None\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n\n all_params = ['o', 'body'] # noqa: E501\n all_params.append('async_req')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in six.iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method create_policy\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'o' is set\n if ('o' not in params or\n params['o'] is None):\n raise ValueError(\"Missing the required parameter `o` when calling `create_policy`\") # noqa: E501\n\n collection_formats = {}\n\n path_params = {}\n\n query_params = []\n if 'o' in params:\n query_params.append(('o', params['o'])) # noqa: E501\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.select_header_accept(\n ['application/json']) # noqa: E501\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501\n ['application/json']) # noqa: E501\n\n # Authentication setting\n auth_settings = ['bearerAuth'] # noqa: E501\n\n return self.api_client.call_api(\n '/policies', 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type=None, # noqa: E501\n auth_settings=auth_settings,\n async_req=params.get('async_req'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)\n\n def delete_policy(self, id, o, **kwargs): # noqa: E501\n \"\"\"Delete Specific Policy Object # noqa: E501\n\n Deletes a specific policy object for the authenticated user. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.delete_policy(id, o, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param int id: Policy ID for the specified policy (required)\n :param int o: Organization ID for the specified policy (required)\n :return: None\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n kwargs['_return_http_data_only'] = True\n if kwargs.get('async_req'):\n return self.delete_policy_with_http_info(id, o, **kwargs) # noqa: E501\n else:\n (data) = self.delete_policy_with_http_info(id, o, **kwargs) # noqa: E501\n return data\n\n def delete_policy_with_http_info(self, id, o, **kwargs): # noqa: E501\n \"\"\"Delete Specific Policy Object # noqa: E501\n\n Deletes a specific policy object for the authenticated user. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.delete_policy_with_http_info(id, o, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param int id: Policy ID for the specified policy (required)\n :param int o: Organization ID for the specified policy (required)\n :return: None\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n\n all_params = ['id', 'o'] # noqa: E501\n all_params.append('async_req')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in six.iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method delete_policy\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'id' is set\n if ('id' not in params or\n params['id'] is None):\n raise ValueError(\"Missing the required parameter `id` when calling `delete_policy`\") # noqa: E501\n # verify the required parameter 'o' is set\n if ('o' not in params or\n params['o'] is None):\n raise ValueError(\"Missing the required parameter `o` when calling `delete_policy`\") # noqa: E501\n\n collection_formats = {}\n\n path_params = {}\n if 'id' in params:\n path_params['id'] = params['id'] # noqa: E501\n\n query_params = []\n if 'o' in params:\n query_params.append(('o', params['o'])) # noqa: E501\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.select_header_accept(\n ['application/json']) # noqa: E501\n\n # Authentication setting\n auth_settings = ['bearerAuth'] # noqa: E501\n\n return self.api_client.call_api(\n '/policies/{id}', 'DELETE',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type=None, # noqa: E501\n auth_settings=auth_settings,\n async_req=params.get('async_req'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)\n\n def execute_policy(self, id, o, action, **kwargs): # noqa: E501\n \"\"\"Schedule a Policy for Immediate Remediation # noqa: E501\n\n Schedule a policy for immediate remediation. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.execute_policy(id, o, action, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param int id: Policy ID for the specified policy (required)\n :param int o: Organization ID for the specified policy (required)\n :param str action: Specify the action to be taken. Possible values: `remediateAll`, `remediateServer` Format: `action=remediateServer` (required)\n :param int server_id: Specify the specific Server to run the policy for. Only applicable when action is set to \\\"remediateServer\\\" Format: serverId=123456\n :return: None\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n kwargs['_return_http_data_only'] = True\n if kwargs.get('async_req'):\n return self.execute_policy_with_http_info(id, o, action, **kwargs) # noqa: E501\n else:\n (data) = self.execute_policy_with_http_info(id, o, action, **kwargs) # noqa: E501\n return data\n\n def execute_policy_with_http_info(self, id, o, action, **kwargs): # noqa: E501\n \"\"\"Schedule a Policy for Immediate Remediation # noqa: E501\n\n Schedule a policy for immediate remediation. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.execute_policy_with_http_info(id, o, action, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param int id: Policy ID for the specified policy (required)\n :param int o: Organization ID for the specified policy (required)\n :param str action: Specify the action to be taken. Possible values: `remediateAll`, `remediateServer` Format: `action=remediateServer` (required)\n :param int server_id: Specify the specific Server to run the policy for. Only applicable when action is set to \\\"remediateServer\\\" Format: serverId=123456\n :return: None\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n\n all_params = ['id', 'o', 'action', 'server_id'] # noqa: E501\n all_params.append('async_req')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in six.iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method execute_policy\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'id' is set\n if ('id' not in params or\n params['id'] is None):\n raise ValueError(\"Missing the required parameter `id` when calling `execute_policy`\") # noqa: E501\n # verify the required parameter 'o' is set\n if ('o' not in params or\n params['o'] is None):\n raise ValueError(\"Missing the required parameter `o` when calling `execute_policy`\") # noqa: E501\n # verify the required parameter 'action' is set\n if ('action' not in params or\n params['action'] is None):\n raise ValueError(\"Missing the required parameter `action` when calling `execute_policy`\") # noqa: E501\n\n collection_formats = {}\n\n path_params = {}\n if 'id' in params:\n path_params['id'] = params['id'] # noqa: E501\n\n query_params = []\n if 'o' in params:\n query_params.append(('o', params['o'])) # noqa: E501\n if 'action' in params:\n query_params.append(('action', params['action'])) # noqa: E501\n if 'server_id' in params:\n query_params.append(('serverId', params['server_id'])) # noqa: E501\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.select_header_accept(\n ['application/json']) # noqa: E501\n\n # Authentication setting\n auth_settings = ['bearerAuth'] # noqa: E501\n\n return self.api_client.call_api(\n '/policies/{id}/action', 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type=None, # noqa: E501\n auth_settings=auth_settings,\n async_req=params.get('async_req'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)\n\n def generate_policy_device_filter_preview(self, body, o, **kwargs): # noqa: E501\n \"\"\"Policy Device Filters Preview # noqa: E501\n\n Generate a preview of the list of devices that matches the provided device filter set. For more information, see [Policy and Device Filters, and Scheduling](/developer-portal/policy_filters_schedule). # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.generate_policy_device_filter_preview(body, o, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param PolicyDeviceFiltersPreview body: (required)\n :param int o: Organization ID. If omitted, results will include all organizations for the authenticated user. (required)\n :param int page: The page of results you wish to be returned with page numbers starting at 0. See [About Automox API - Pagination](/developer-portal/about-ax-api/#pagination)\n :param int limit: A limit on the number of results to be returned, between 1 and 200 with a default of 25. Use with `page` parameter. See [About Automox API - Pagination](/developer-portal/about-ax-api/#pagination)\n :return: list[PolicyDeviceFiltersOutput]\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n kwargs['_return_http_data_only'] = True\n if kwargs.get('async_req'):\n return self.generate_policy_device_filter_preview_with_http_info(body, o, **kwargs) # noqa: E501\n else:\n (data) = self.generate_policy_device_filter_preview_with_http_info(body, o, **kwargs) # noqa: E501\n return data\n\n def generate_policy_device_filter_preview_with_http_info(self, body, o, **kwargs): # noqa: E501\n \"\"\"Policy Device Filters Preview # noqa: E501\n\n Generate a preview of the list of devices that matches the provided device filter set. For more information, see [Policy and Device Filters, and Scheduling](/developer-portal/policy_filters_schedule). # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.generate_policy_device_filter_preview_with_http_info(body, o, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param PolicyDeviceFiltersPreview body: (required)\n :param int o: Organization ID. If omitted, results will include all organizations for the authenticated user. (required)\n :param int page: The page of results you wish to be returned with page numbers starting at 0. See [About Automox API - Pagination](/developer-portal/about-ax-api/#pagination)\n :param int limit: A limit on the number of results to be returned, between 1 and 200 with a default of 25. Use with `page` parameter. See [About Automox API - Pagination](/developer-portal/about-ax-api/#pagination)\n :return: list[PolicyDeviceFiltersOutput]\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n\n all_params = ['body', 'o', 'page', 'limit'] # noqa: E501\n all_params.append('async_req')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in six.iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method generate_policy_device_filter_preview\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'body' is set\n if ('body' not in params or\n params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `generate_policy_device_filter_preview`\") # noqa: E501\n # verify the required parameter 'o' is set\n if ('o' not in params or\n params['o'] is None):\n raise ValueError(\"Missing the required parameter `o` when calling `generate_policy_device_filter_preview`\") # noqa: E501\n\n collection_formats = {}\n\n path_params = {}\n\n query_params = []\n if 'o' in params:\n query_params.append(('o', params['o'])) # noqa: E501\n if 'page' in params:\n query_params.append(('page', params['page'])) # noqa: E501\n if 'limit' in params:\n query_params.append(('limit', params['limit'])) # noqa: E501\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.select_header_accept(\n ['application/json']) # noqa: E501\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501\n ['application/json']) # noqa: E501\n\n # Authentication setting\n auth_settings = ['bearerAuth'] # noqa: E501\n\n return self.api_client.call_api(\n '/policies/device-filters-preview', 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='list[PolicyDeviceFiltersOutput]', # noqa: E501\n auth_settings=auth_settings,\n async_req=params.get('async_req'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)\n\n def get_policies(self, o, **kwargs): # noqa: E501\n \"\"\"List All Policy Objects # noqa: E501\n\n Retrieves a list of all policy objects for the authenticated user. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.get_policies(o, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param int o: Organization ID for retrieving policies (required)\n :param int page: The page of results you wish to be returned with page numbers starting at 0. See [About Automox API - Pagination](/developer-portal/about-ax-api/#pagination)\n :param int limit: A limit on the number of results to be returned, between 1 and 500, with a default of 500. Use with `page` parameter. See [About Automox API - Pagination](/developer-portal/about-ax-api/#pagination)\n :return: list[object]\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n kwargs['_return_http_data_only'] = True\n if kwargs.get('async_req'):\n return self.get_policies_with_http_info(o, **kwargs) # noqa: E501\n else:\n (data) = self.get_policies_with_http_info(o, **kwargs) # noqa: E501\n return data\n\n def get_policies_with_http_info(self, o, **kwargs): # noqa: E501\n \"\"\"List All Policy Objects # noqa: E501\n\n Retrieves a list of all policy objects for the authenticated user. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.get_policies_with_http_info(o, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param int o: Organization ID for retrieving policies (required)\n :param int page: The page of results you wish to be returned with page numbers starting at 0. See [About Automox API - Pagination](/developer-portal/about-ax-api/#pagination)\n :param int limit: A limit on the number of results to be returned, between 1 and 500, with a default of 500. Use with `page` parameter. See [About Automox API - Pagination](/developer-portal/about-ax-api/#pagination)\n :return: list[object]\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n\n all_params = ['o', 'page', 'limit'] # noqa: E501\n all_params.append('async_req')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in six.iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_policies\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'o' is set\n if ('o' not in params or\n params['o'] is None):\n raise ValueError(\"Missing the required parameter `o` when calling `get_policies`\") # noqa: E501\n\n collection_formats = {}\n\n path_params = {}\n\n query_params = []\n if 'o' in params:\n query_params.append(('o', params['o'])) # noqa: E501\n if 'page' in params:\n query_params.append(('page', params['page'])) # noqa: E501\n if 'limit' in params:\n query_params.append(('limit', params['limit'])) # noqa: E501\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.select_header_accept(\n ['application/json']) # noqa: E501\n\n # Authentication setting\n auth_settings = ['bearerAuth'] # noqa: E501\n\n return self.api_client.call_api(\n '/policies', 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='list[object]', # noqa: E501\n auth_settings=auth_settings,\n async_req=params.get('async_req'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)\n\n def get_policy(self, id, o, **kwargs): # noqa: E501\n \"\"\"List Specific Policy Object # noqa: E501\n\n Returns a specific policy object for the authenticated user. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.get_policy(id, o, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param int id: Policy ID for the specified policy (required)\n :param int o: Organization ID for the specified policy (required)\n :return: InlineResponse2001\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n kwargs['_return_http_data_only'] = True\n if kwargs.get('async_req'):\n return self.get_policy_with_http_info(id, o, **kwargs) # noqa: E501\n else:\n (data) = self.get_policy_with_http_info(id, o, **kwargs) # noqa: E501\n return data\n\n def get_policy_with_http_info(self, id, o, **kwargs): # noqa: E501\n \"\"\"List Specific Policy Object # noqa: E501\n\n Returns a specific policy object for the authenticated user. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.get_policy_with_http_info(id, o, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param int id: Policy ID for the specified policy (required)\n :param int o: Organization ID for the specified policy (required)\n :return: InlineResponse2001\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n\n all_params = ['id', 'o'] # noqa: E501\n all_params.append('async_req')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in six.iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_policy\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'id' is set\n if ('id' not in params or\n params['id'] is None):\n raise ValueError(\"Missing the required parameter `id` when calling `get_policy`\") # noqa: E501\n # verify the required parameter 'o' is set\n if ('o' not in params or\n params['o'] is None):\n raise ValueError(\"Missing the required parameter `o` when calling `get_policy`\") # noqa: E501\n\n collection_formats = {}\n\n path_params = {}\n if 'id' in params:\n path_params['id'] = params['id'] # noqa: E501\n\n query_params = []\n if 'o' in params:\n query_params.append(('o', params['o'])) # noqa: E501\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.select_header_accept(\n ['application/json']) # noqa: E501\n\n # Authentication setting\n auth_settings = ['bearerAuth'] # noqa: E501\n\n return self.api_client.call_api(\n '/policies/{id}', 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='InlineResponse2001', # noqa: E501\n auth_settings=auth_settings,\n async_req=params.get('async_req'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)\n\n def get_policy_stats(self, o, **kwargs): # noqa: E501\n \"\"\"List Policy Compliance Stats # noqa: E501\n\n Retrieve policy compliance statistics for all policies. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.get_policy_stats(o, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param int o: Organization ID for retrieving policy stats. Omit this to retrieve stats for policies in all organizations that the authenticated user can access (required)\n :return: list[PolicyStats]\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n kwargs['_return_http_data_only'] = True\n if kwargs.get('async_req'):\n return self.get_policy_stats_with_http_info(o, **kwargs) # noqa: E501\n else:\n (data) = self.get_policy_stats_with_http_info(o, **kwargs) # noqa: E501\n return data\n\n def get_policy_stats_with_http_info(self, o, **kwargs): # noqa: E501\n \"\"\"List Policy Compliance Stats # noqa: E501\n\n Retrieve policy compliance statistics for all policies. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.get_policy_stats_with_http_info(o, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param int o: Organization ID for retrieving policy stats. Omit this to retrieve stats for policies in all organizations that the authenticated user can access (required)\n :return: list[PolicyStats]\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n\n all_params = ['o'] # noqa: E501\n all_params.append('async_req')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in six.iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_policy_stats\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'o' is set\n if ('o' not in params or\n params['o'] is None):\n raise ValueError(\"Missing the required parameter `o` when calling `get_policy_stats`\") # noqa: E501\n\n collection_formats = {}\n\n path_params = {}\n\n query_params = []\n if 'o' in params:\n query_params.append(('o', params['o'])) # noqa: E501\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.select_header_accept(\n ['application/json']) # noqa: E501\n\n # Authentication setting\n auth_settings = ['bearerAuth'] # noqa: E501\n\n return self.api_client.call_api(\n '/policystats', 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='list[PolicyStats]', # noqa: E501\n auth_settings=auth_settings,\n async_req=params.get('async_req'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)\n\n def update_policy(self, body, o, id, **kwargs): # noqa: E501\n \"\"\"Updates a specific policy object for the authenticated user. # noqa: E501\n\n Updates a specific policy object for the authenticated user. For more info on filter types and scheduling, see [Policy and Device Filters, and Scheduling](/developer-portal/policy_filters_schedule). # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.update_policy(body, o, id, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param object body: (required)\n :param int o: Organization ID for the specified policy (required)\n :param int id: Policy ID for the specified policy (required)\n :return: None\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n kwargs['_return_http_data_only'] = True\n if kwargs.get('async_req'):\n return self.update_policy_with_http_info(body, o, id, **kwargs) # noqa: E501\n else:\n (data) = self.update_policy_with_http_info(body, o, id, **kwargs) # noqa: E501\n return data\n\n def update_policy_with_http_info(self, body, o, id, **kwargs): # noqa: E501\n \"\"\"Updates a specific policy object for the authenticated user. # noqa: E501\n\n Updates a specific policy object for the authenticated user. For more info on filter types and scheduling, see [Policy and Device Filters, and Scheduling](/developer-portal/policy_filters_schedule). # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.update_policy_with_http_info(body, o, id, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param object body: (required)\n :param int o: Organization ID for the specified policy (required)\n :param int id: Policy ID for the specified policy (required)\n :return: None\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n\n all_params = ['body', 'o', 'id'] # noqa: E501\n all_params.append('async_req')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in six.iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method update_policy\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'body' is set\n if ('body' not in params or\n params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `update_policy`\") # noqa: E501\n # verify the required parameter 'o' is set\n if ('o' not in params or\n params['o'] is None):\n raise ValueError(\"Missing the required parameter `o` when calling `update_policy`\") # noqa: E501\n # verify the required parameter 'id' is set\n if ('id' not in params or\n params['id'] is None):\n raise ValueError(\"Missing the required parameter `id` when calling `update_policy`\") # noqa: E501\n\n collection_formats = {}\n\n path_params = {}\n if 'id' in params:\n path_params['id'] = params['id'] # noqa: E501\n\n query_params = []\n if 'o' in params:\n query_params.append(('o', params['o'])) # noqa: E501\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.select_header_accept(\n ['application/json']) # noqa: E501\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501\n ['application/json']) # noqa: E501\n\n # Authentication setting\n auth_settings = ['bearerAuth'] # noqa: E501\n\n return self.api_client.call_api(\n '/policies/{id}', 'PUT',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type=None, # noqa: E501\n auth_settings=auth_settings,\n async_req=params.get('async_req'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)\n\n def upload_policy_file(self, id, **kwargs): # noqa: E501\n \"\"\"Upload installation file to a Required Software policy. # noqa: E501\n\n Upload file to a Required Software policy. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.upload_policy_file(id, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param int id: The policy ID for the target policy. (required)\n :param str file:\n :return: None\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n kwargs['_return_http_data_only'] = True\n if kwargs.get('async_req'):\n return self.upload_policy_file_with_http_info(id, **kwargs) # noqa: E501\n else:\n (data) = self.upload_policy_file_with_http_info(id, **kwargs) # noqa: E501\n return data\n\n def upload_policy_file_with_http_info(self, id, **kwargs): # noqa: E501\n \"\"\"Upload installation file to a Required Software policy. # noqa: E501\n\n Upload file to a Required Software policy. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.upload_policy_file_with_http_info(id, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param int id: The policy ID for the target policy. (required)\n :param str file:\n :return: None\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n\n all_params = ['id', 'file'] # noqa: E501\n all_params.append('async_req')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in six.iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method upload_policy_file\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'id' is set\n if ('id' not in params or\n params['id'] is None):\n raise ValueError(\"Missing the required parameter `id` when calling `upload_policy_file`\") # noqa: E501\n\n collection_formats = {}\n\n path_params = {}\n if 'id' in params:\n path_params['id'] = params['id'] # noqa: E501\n\n query_params = []\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n if 'file' in params:\n local_var_files['file'] = params['file'] # noqa: E501\n\n body_params = None\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.select_header_accept(\n ['application/json']) # noqa: E501\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501\n ['multipart/form-data']) # noqa: E501\n\n # Authentication setting\n auth_settings = ['bearerAuth'] # noqa: E501\n\n return self.api_client.call_api(\n '/policies/{id}/files', 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type=None, # noqa: E501\n auth_settings=auth_settings,\n async_req=params.get('async_req'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)\n",
"id": "8490343",
"language": "Python",
"matching_score": 5.698761463165283,
"max_stars_count": 1,
"path": "automox_console_sdk/api/policies_api.py"
},
{
"content": "# coding: utf-8\n\n\"\"\"\n Automox Console API\n\n API for use with the Automox Console # noqa: E501\n\n OpenAPI spec version: 2021-11-16\n Contact: <EMAIL>\n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\nfrom __future__ import absolute_import\n\nimport re # noqa: F401\n\n# python 2 and python 3 compatibility library\nimport six\n\nfrom automox_console_sdk.api_client import ApiClient\n\n\nclass GroupsApi(object):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n Ref: https://github.com/swagger-api/swagger-codegen\n \"\"\"\n\n def __init__(self, api_client=None):\n if api_client is None:\n api_client = ApiClient()\n self.api_client = api_client\n\n def create_server_group(self, o, **kwargs): # noqa: E501\n \"\"\"Creates a new server group. # noqa: E501\n\n Creates a new server group. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.create_server_group(o, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param int o: Organization ID for the created group. (required)\n :param ServerGroupCreateOrUpdateRequest body:\n :return: ServerGroup\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n kwargs['_return_http_data_only'] = True\n if kwargs.get('async_req'):\n return self.create_server_group_with_http_info(o, **kwargs) # noqa: E501\n else:\n (data) = self.create_server_group_with_http_info(o, **kwargs) # noqa: E501\n return data\n\n def create_server_group_with_http_info(self, o, **kwargs): # noqa: E501\n \"\"\"Creates a new server group. # noqa: E501\n\n Creates a new server group. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.create_server_group_with_http_info(o, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param int o: Organization ID for the created group. (required)\n :param ServerGroupCreateOrUpdateRequest body:\n :return: ServerGroup\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n\n all_params = ['o', 'body'] # noqa: E501\n all_params.append('async_req')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in six.iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method create_server_group\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'o' is set\n if ('o' not in params or\n params['o'] is None):\n raise ValueError(\"Missing the required parameter `o` when calling `create_server_group`\") # noqa: E501\n\n collection_formats = {}\n\n path_params = {}\n\n query_params = []\n if 'o' in params:\n query_params.append(('o', params['o'])) # noqa: E501\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.select_header_accept(\n ['application/json']) # noqa: E501\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501\n ['application/json']) # noqa: E501\n\n # Authentication setting\n auth_settings = ['bearerAuth'] # noqa: E501\n\n return self.api_client.call_api(\n '/servergroups', 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='ServerGroup', # noqa: E501\n auth_settings=auth_settings,\n async_req=params.get('async_req'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)\n\n def delete_server_group(self, id, o, **kwargs): # noqa: E501\n \"\"\"Deletes a server group. # noqa: E501\n\n **NOTE:** Any devices that belong to the deleted group will be moved to the organization's Default Group # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.delete_server_group(id, o, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param int id: Server Group ID for the specified group. (required)\n :param int o: Organization ID for the created group. (required)\n :return: None\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n kwargs['_return_http_data_only'] = True\n if kwargs.get('async_req'):\n return self.delete_server_group_with_http_info(id, o, **kwargs) # noqa: E501\n else:\n (data) = self.delete_server_group_with_http_info(id, o, **kwargs) # noqa: E501\n return data\n\n def delete_server_group_with_http_info(self, id, o, **kwargs): # noqa: E501\n \"\"\"Deletes a server group. # noqa: E501\n\n **NOTE:** Any devices that belong to the deleted group will be moved to the organization's Default Group # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.delete_server_group_with_http_info(id, o, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param int id: Server Group ID for the specified group. (required)\n :param int o: Organization ID for the created group. (required)\n :return: None\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n\n all_params = ['id', 'o'] # noqa: E501\n all_params.append('async_req')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in six.iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method delete_server_group\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'id' is set\n if ('id' not in params or\n params['id'] is None):\n raise ValueError(\"Missing the required parameter `id` when calling `delete_server_group`\") # noqa: E501\n # verify the required parameter 'o' is set\n if ('o' not in params or\n params['o'] is None):\n raise ValueError(\"Missing the required parameter `o` when calling `delete_server_group`\") # noqa: E501\n\n collection_formats = {}\n\n path_params = {}\n if 'id' in params:\n path_params['id'] = params['id'] # noqa: E501\n\n query_params = []\n if 'o' in params:\n query_params.append(('o', params['o'])) # noqa: E501\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.select_header_accept(\n ['application/json']) # noqa: E501\n\n # Authentication setting\n auth_settings = ['bearerAuth'] # noqa: E501\n\n return self.api_client.call_api(\n '/servergroups/{id}', 'DELETE',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type=None, # noqa: E501\n auth_settings=auth_settings,\n async_req=params.get('async_req'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)\n\n def get_server_group(self, id, o, **kwargs): # noqa: E501\n \"\"\"List Specific Group Object # noqa: E501\n\n Returns a specific server group object for the authenticated user. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.get_server_group(id, o, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param int id: Server Group ID for the specified group. (required)\n :param int o: Organization ID for the specified group. (required)\n :return: ServerGroup\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n kwargs['_return_http_data_only'] = True\n if kwargs.get('async_req'):\n return self.get_server_group_with_http_info(id, o, **kwargs) # noqa: E501\n else:\n (data) = self.get_server_group_with_http_info(id, o, **kwargs) # noqa: E501\n return data\n\n def get_server_group_with_http_info(self, id, o, **kwargs): # noqa: E501\n \"\"\"List Specific Group Object # noqa: E501\n\n Returns a specific server group object for the authenticated user. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.get_server_group_with_http_info(id, o, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param int id: Server Group ID for the specified group. (required)\n :param int o: Organization ID for the specified group. (required)\n :return: ServerGroup\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n\n all_params = ['id', 'o'] # noqa: E501\n all_params.append('async_req')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in six.iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_server_group\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'id' is set\n if ('id' not in params or\n params['id'] is None):\n raise ValueError(\"Missing the required parameter `id` when calling `get_server_group`\") # noqa: E501\n # verify the required parameter 'o' is set\n if ('o' not in params or\n params['o'] is None):\n raise ValueError(\"Missing the required parameter `o` when calling `get_server_group`\") # noqa: E501\n\n collection_formats = {}\n\n path_params = {}\n if 'id' in params:\n path_params['id'] = params['id'] # noqa: E501\n\n query_params = []\n if 'o' in params:\n query_params.append(('o', params['o'])) # noqa: E501\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.select_header_accept(\n ['application/json']) # noqa: E501\n\n # Authentication setting\n auth_settings = ['bearerAuth'] # noqa: E501\n\n return self.api_client.call_api(\n '/servergroups/{id}', 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='ServerGroup', # noqa: E501\n auth_settings=auth_settings,\n async_req=params.get('async_req'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)\n\n def get_server_groups(self, o, **kwargs): # noqa: E501\n \"\"\"List All Group Objects # noqa: E501\n\n Retrieves all server group objects for the authenticated user. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.get_server_groups(o, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param int o: Organization ID for retrieving groups. (required)\n :param int page: The page of results you wish to be returned with page numbers starting at 0. See [About Automox API - Pagination](/developer-portal/about-ax-api/#pagination)\n :param int limit: A limit on the number of results to be returned, between 1 and 500, with a default of 500. Use with `page` parameter. See [About Automox API - Pagination](/developer-portal/about-ax-api/#pagination)\n :return: list[ServerGroup]\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n kwargs['_return_http_data_only'] = True\n if kwargs.get('async_req'):\n return self.get_server_groups_with_http_info(o, **kwargs) # noqa: E501\n else:\n (data) = self.get_server_groups_with_http_info(o, **kwargs) # noqa: E501\n return data\n\n def get_server_groups_with_http_info(self, o, **kwargs): # noqa: E501\n \"\"\"List All Group Objects # noqa: E501\n\n Retrieves all server group objects for the authenticated user. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.get_server_groups_with_http_info(o, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param int o: Organization ID for retrieving groups. (required)\n :param int page: The page of results you wish to be returned with page numbers starting at 0. See [About Automox API - Pagination](/developer-portal/about-ax-api/#pagination)\n :param int limit: A limit on the number of results to be returned, between 1 and 500, with a default of 500. Use with `page` parameter. See [About Automox API - Pagination](/developer-portal/about-ax-api/#pagination)\n :return: list[ServerGroup]\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n\n all_params = ['o', 'page', 'limit'] # noqa: E501\n all_params.append('async_req')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in six.iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_server_groups\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'o' is set\n if ('o' not in params or\n params['o'] is None):\n raise ValueError(\"Missing the required parameter `o` when calling `get_server_groups`\") # noqa: E501\n\n collection_formats = {}\n\n path_params = {}\n\n query_params = []\n if 'o' in params:\n query_params.append(('o', params['o'])) # noqa: E501\n if 'page' in params:\n query_params.append(('page', params['page'])) # noqa: E501\n if 'limit' in params:\n query_params.append(('limit', params['limit'])) # noqa: E501\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.select_header_accept(\n ['application/json']) # noqa: E501\n\n # Authentication setting\n auth_settings = ['bearerAuth'] # noqa: E501\n\n return self.api_client.call_api(\n '/servergroups', 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='list[ServerGroup]', # noqa: E501\n auth_settings=auth_settings,\n async_req=params.get('async_req'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)\n\n def update_server_group(self, body, o, id, **kwargs): # noqa: E501\n \"\"\"Updates an existing server group. # noqa: E501\n\n Updates server group settings. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.update_server_group(body, o, id, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param ServerGroupCreateOrUpdateRequest body: (required)\n :param int o: Organization ID for the created group. (required)\n :param int id: Server Group ID for the specified group. (required)\n :return: None\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n kwargs['_return_http_data_only'] = True\n if kwargs.get('async_req'):\n return self.update_server_group_with_http_info(body, o, id, **kwargs) # noqa: E501\n else:\n (data) = self.update_server_group_with_http_info(body, o, id, **kwargs) # noqa: E501\n return data\n\n def update_server_group_with_http_info(self, body, o, id, **kwargs): # noqa: E501\n \"\"\"Updates an existing server group. # noqa: E501\n\n Updates server group settings. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.update_server_group_with_http_info(body, o, id, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param ServerGroupCreateOrUpdateRequest body: (required)\n :param int o: Organization ID for the created group. (required)\n :param int id: Server Group ID for the specified group. (required)\n :return: None\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n\n all_params = ['body', 'o', 'id'] # noqa: E501\n all_params.append('async_req')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in six.iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method update_server_group\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'body' is set\n if ('body' not in params or\n params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `update_server_group`\") # noqa: E501\n # verify the required parameter 'o' is set\n if ('o' not in params or\n params['o'] is None):\n raise ValueError(\"Missing the required parameter `o` when calling `update_server_group`\") # noqa: E501\n # verify the required parameter 'id' is set\n if ('id' not in params or\n params['id'] is None):\n raise ValueError(\"Missing the required parameter `id` when calling `update_server_group`\") # noqa: E501\n\n collection_formats = {}\n\n path_params = {}\n if 'id' in params:\n path_params['id'] = params['id'] # noqa: E501\n\n query_params = []\n if 'o' in params:\n query_params.append(('o', params['o'])) # noqa: E501\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.select_header_accept(\n ['application/json']) # noqa: E501\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501\n ['application/json']) # noqa: E501\n\n # Authentication setting\n auth_settings = ['bearerAuth'] # noqa: E501\n\n return self.api_client.call_api(\n '/servergroups/{id}', 'PUT',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type=None, # noqa: E501\n auth_settings=auth_settings,\n async_req=params.get('async_req'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)\n",
"id": "3563658",
"language": "Python",
"matching_score": 4.704336643218994,
"max_stars_count": 1,
"path": "automox_console_sdk/api/groups_api.py"
},
{
"content": "# coding: utf-8\n\n\"\"\"\n Automox Console API\n\n API for use with the Automox Console # noqa: E501\n\n OpenAPI spec version: 2021-11-16\n Contact: <EMAIL>\n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\nfrom __future__ import absolute_import\n\nimport re # noqa: F401\n\n# python 2 and python 3 compatibility library\nimport six\n\nfrom automox_console_sdk.api_client import ApiClient\n\n\nclass PackagesApi(object):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n Ref: https://github.com/swagger-api/swagger-codegen\n \"\"\"\n\n def __init__(self, api_client=None):\n if api_client is None:\n api_client = ApiClient()\n self.api_client = api_client\n\n def get_device_packages(self, id, o, **kwargs): # noqa: E501\n \"\"\"List Software Packages for Specific Device # noqa: E501\n\n Returns the software packages for the specified device. Packages Include: Pending updates and currently installed updates/applications # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.get_device_packages(id, o, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param int id: Server ID for the specified device. (required)\n :param int o: Organization ID for the specified device (required)\n :param int page: The page of results you wish to be returned with page numbers starting at 0. See [About Automox API - Pagination](/developer-portal/about-ax-api/#pagination)\n :param int limit: A limit on the number of results to be returned, between 1 and 500, with a default of 500. Use with `page` parameter. See [About Automox API - Pagination](/developer-portal/about-ax-api/#pagination)\n :return: list[Packages]\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n kwargs['_return_http_data_only'] = True\n if kwargs.get('async_req'):\n return self.get_device_packages_with_http_info(id, o, **kwargs) # noqa: E501\n else:\n (data) = self.get_device_packages_with_http_info(id, o, **kwargs) # noqa: E501\n return data\n\n def get_device_packages_with_http_info(self, id, o, **kwargs): # noqa: E501\n \"\"\"List Software Packages for Specific Device # noqa: E501\n\n Returns the software packages for the specified device. Packages Include: Pending updates and currently installed updates/applications # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.get_device_packages_with_http_info(id, o, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param int id: Server ID for the specified device. (required)\n :param int o: Organization ID for the specified device (required)\n :param int page: The page of results you wish to be returned with page numbers starting at 0. See [About Automox API - Pagination](/developer-portal/about-ax-api/#pagination)\n :param int limit: A limit on the number of results to be returned, between 1 and 500, with a default of 500. Use with `page` parameter. See [About Automox API - Pagination](/developer-portal/about-ax-api/#pagination)\n :return: list[Packages]\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n\n all_params = ['id', 'o', 'page', 'limit'] # noqa: E501\n all_params.append('async_req')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in six.iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_device_packages\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'id' is set\n if ('id' not in params or\n params['id'] is None):\n raise ValueError(\"Missing the required parameter `id` when calling `get_device_packages`\") # noqa: E501\n # verify the required parameter 'o' is set\n if ('o' not in params or\n params['o'] is None):\n raise ValueError(\"Missing the required parameter `o` when calling `get_device_packages`\") # noqa: E501\n\n collection_formats = {}\n\n path_params = {}\n if 'id' in params:\n path_params['id'] = params['id'] # noqa: E501\n\n query_params = []\n if 'o' in params:\n query_params.append(('o', params['o'])) # noqa: E501\n if 'page' in params:\n query_params.append(('page', params['page'])) # noqa: E501\n if 'limit' in params:\n query_params.append(('limit', params['limit'])) # noqa: E501\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.select_header_accept(\n ['application/json']) # noqa: E501\n\n # Authentication setting\n auth_settings = ['bearerAuth'] # noqa: E501\n\n return self.api_client.call_api(\n '/servers/{id}/packages', 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='list[Packages]', # noqa: E501\n auth_settings=auth_settings,\n async_req=params.get('async_req'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)\n\n def get_organization_packages(self, id, o, **kwargs): # noqa: E501\n \"\"\"List All Software Packages for All Devices # noqa: E501\n\n This will list all pending/installed updates, and all installed applications, for all devices in a given organization. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.get_organization_packages(id, o, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param int id: Organization ID for retrieving package list. (required)\n :param int o: Organization ID of the target organization. (required)\n :param int include_unmanaged: Include applications Automox does not currently support for patching.\n :param int awaiting: Filter based installation status of package. `awaiting=1`: Packages that are currently available but not installed. `awaiting=0`: Packages that are already installed.\n :param int page: The page of results you wish to be returned with page numbers starting at 0. See [About Automox API - Pagination](/developer-portal/about-ax-api/#pagination)\n :param int limit: A limit on the number of results to be returned, between 1 and 500, with a default of 500. Use with `page` parameter. See [About Automox API - Pagination](/developer-portal/about-ax-api/#pagination)\n :return: list[Packages]\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n kwargs['_return_http_data_only'] = True\n if kwargs.get('async_req'):\n return self.get_organization_packages_with_http_info(id, o, **kwargs) # noqa: E501\n else:\n (data) = self.get_organization_packages_with_http_info(id, o, **kwargs) # noqa: E501\n return data\n\n def get_organization_packages_with_http_info(self, id, o, **kwargs): # noqa: E501\n \"\"\"List All Software Packages for All Devices # noqa: E501\n\n This will list all pending/installed updates, and all installed applications, for all devices in a given organization. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.get_organization_packages_with_http_info(id, o, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param int id: Organization ID for retrieving package list. (required)\n :param int o: Organization ID of the target organization. (required)\n :param int include_unmanaged: Include applications Automox does not currently support for patching.\n :param int awaiting: Filter based installation status of package. `awaiting=1`: Packages that are currently available but not installed. `awaiting=0`: Packages that are already installed.\n :param int page: The page of results you wish to be returned with page numbers starting at 0. See [About Automox API - Pagination](/developer-portal/about-ax-api/#pagination)\n :param int limit: A limit on the number of results to be returned, between 1 and 500, with a default of 500. Use with `page` parameter. See [About Automox API - Pagination](/developer-portal/about-ax-api/#pagination)\n :return: list[Packages]\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n\n all_params = ['id', 'o', 'include_unmanaged', 'awaiting', 'page', 'limit'] # noqa: E501\n all_params.append('async_req')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in six.iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_organization_packages\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'id' is set\n if ('id' not in params or\n params['id'] is None):\n raise ValueError(\"Missing the required parameter `id` when calling `get_organization_packages`\") # noqa: E501\n # verify the required parameter 'o' is set\n if ('o' not in params or\n params['o'] is None):\n raise ValueError(\"Missing the required parameter `o` when calling `get_organization_packages`\") # noqa: E501\n\n collection_formats = {}\n\n path_params = {}\n if 'id' in params:\n path_params['id'] = params['id'] # noqa: E501\n\n query_params = []\n if 'include_unmanaged' in params:\n query_params.append(('includeUnmanaged', params['include_unmanaged'])) # noqa: E501\n if 'awaiting' in params:\n query_params.append(('awaiting', params['awaiting'])) # noqa: E501\n if 'o' in params:\n query_params.append(('o', params['o'])) # noqa: E501\n if 'page' in params:\n query_params.append(('page', params['page'])) # noqa: E501\n if 'limit' in params:\n query_params.append(('limit', params['limit'])) # noqa: E501\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.select_header_accept(\n ['application/json']) # noqa: E501\n\n # Authentication setting\n auth_settings = ['bearerAuth'] # noqa: E501\n\n return self.api_client.call_api(\n '/orgs/{id}/packages', 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='list[Packages]', # noqa: E501\n auth_settings=auth_settings,\n async_req=params.get('async_req'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)\n",
"id": "7782641",
"language": "Python",
"matching_score": 6.655290603637695,
"max_stars_count": 1,
"path": "automox_console_sdk/api/packages_api.py"
},
{
"content": "# coding: utf-8\n\n\"\"\"\n Automox Console API\n\n API for use with the Automox Console # noqa: E501\n\n OpenAPI spec version: 2021-11-16\n Contact: <EMAIL>\n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\nfrom __future__ import absolute_import\n\nimport re # noqa: F401\n\n# python 2 and python 3 compatibility library\nimport six\n\nfrom automox_console_sdk.api_client import ApiClient\n\n\nclass DevicesApi(object):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n Ref: https://github.com/swagger-api/swagger-codegen\n \"\"\"\n\n def __init__(self, api_client=None):\n if api_client is None:\n api_client = ApiClient()\n self.api_client = api_client\n\n def batch_update_devices(self, body, o, **kwargs): # noqa: E501\n \"\"\"Updates multiple devices (server objects). # noqa: E501\n\n Updates multiple devices (server objects) in a batch. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.batch_update_devices(body, o, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param ServersBatchBody body: Update devices (required)\n :param int o: Organization ID for the specified devices (required)\n :return: Batch\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n kwargs['_return_http_data_only'] = True\n if kwargs.get('async_req'):\n return self.batch_update_devices_with_http_info(body, o, **kwargs) # noqa: E501\n else:\n (data) = self.batch_update_devices_with_http_info(body, o, **kwargs) # noqa: E501\n return data\n\n def batch_update_devices_with_http_info(self, body, o, **kwargs): # noqa: E501\n \"\"\"Updates multiple devices (server objects). # noqa: E501\n\n Updates multiple devices (server objects) in a batch. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.batch_update_devices_with_http_info(body, o, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param ServersBatchBody body: Update devices (required)\n :param int o: Organization ID for the specified devices (required)\n :return: Batch\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n\n all_params = ['body', 'o'] # noqa: E501\n all_params.append('async_req')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in six.iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method batch_update_devices\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'body' is set\n if ('body' not in params or\n params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `batch_update_devices`\") # noqa: E501\n # verify the required parameter 'o' is set\n if ('o' not in params or\n params['o'] is None):\n raise ValueError(\"Missing the required parameter `o` when calling `batch_update_devices`\") # noqa: E501\n\n collection_formats = {}\n\n path_params = {}\n\n query_params = []\n if 'o' in params:\n query_params.append(('o', params['o'])) # noqa: E501\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.select_header_accept(\n ['application/json']) # noqa: E501\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501\n ['application/json']) # noqa: E501\n\n # Authentication setting\n auth_settings = ['bearerAuth'] # noqa: E501\n\n return self.api_client.call_api(\n '/servers/batch', 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='Batch', # noqa: E501\n auth_settings=auth_settings,\n async_req=params.get('async_req'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)\n\n def delete_device(self, id, o, **kwargs): # noqa: E501\n \"\"\"Deletes a device (server object) from the organization. # noqa: E501\n\n **NOTE:** The associated command queue will be purged. Any pending custom commands for the device are removed. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.delete_device(id, o, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param int id: Server ID for the specified device (required)\n :param int o: Organization ID for the specified device (required)\n :return: None\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n kwargs['_return_http_data_only'] = True\n if kwargs.get('async_req'):\n return self.delete_device_with_http_info(id, o, **kwargs) # noqa: E501\n else:\n (data) = self.delete_device_with_http_info(id, o, **kwargs) # noqa: E501\n return data\n\n def delete_device_with_http_info(self, id, o, **kwargs): # noqa: E501\n \"\"\"Deletes a device (server object) from the organization. # noqa: E501\n\n **NOTE:** The associated command queue will be purged. Any pending custom commands for the device are removed. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.delete_device_with_http_info(id, o, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param int id: Server ID for the specified device (required)\n :param int o: Organization ID for the specified device (required)\n :return: None\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n\n all_params = ['id', 'o'] # noqa: E501\n all_params.append('async_req')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in six.iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method delete_device\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'id' is set\n if ('id' not in params or\n params['id'] is None):\n raise ValueError(\"Missing the required parameter `id` when calling `delete_device`\") # noqa: E501\n # verify the required parameter 'o' is set\n if ('o' not in params or\n params['o'] is None):\n raise ValueError(\"Missing the required parameter `o` when calling `delete_device`\") # noqa: E501\n\n collection_formats = {}\n\n path_params = {}\n if 'id' in params:\n path_params['id'] = params['id'] # noqa: E501\n\n query_params = []\n if 'o' in params:\n query_params.append(('o', params['o'])) # noqa: E501\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.select_header_accept(\n ['application/json']) # noqa: E501\n\n # Authentication setting\n auth_settings = ['bearerAuth'] # noqa: E501\n\n return self.api_client.call_api(\n '/servers/{id}', 'DELETE',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type=None, # noqa: E501\n auth_settings=auth_settings,\n async_req=params.get('async_req'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)\n\n def get_device_packages(self, id, o, **kwargs): # noqa: E501\n \"\"\"List Software Packages for Specific Device # noqa: E501\n\n Returns the software packages for the specified device. Packages Include: Pending updates and currently installed updates/applications # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.get_device_packages(id, o, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param int id: Server ID for the specified device. (required)\n :param int o: Organization ID for the specified device (required)\n :param int page: The page of results you wish to be returned with page numbers starting at 0. See [About Automox API - Pagination](/developer-portal/about-ax-api/#pagination)\n :param int limit: A limit on the number of results to be returned, between 1 and 500, with a default of 500. Use with `page` parameter. See [About Automox API - Pagination](/developer-portal/about-ax-api/#pagination)\n :return: list[Packages]\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n kwargs['_return_http_data_only'] = True\n if kwargs.get('async_req'):\n return self.get_device_packages_with_http_info(id, o, **kwargs) # noqa: E501\n else:\n (data) = self.get_device_packages_with_http_info(id, o, **kwargs) # noqa: E501\n return data\n\n def get_device_packages_with_http_info(self, id, o, **kwargs): # noqa: E501\n \"\"\"List Software Packages for Specific Device # noqa: E501\n\n Returns the software packages for the specified device. Packages Include: Pending updates and currently installed updates/applications # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.get_device_packages_with_http_info(id, o, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param int id: Server ID for the specified device. (required)\n :param int o: Organization ID for the specified device (required)\n :param int page: The page of results you wish to be returned with page numbers starting at 0. See [About Automox API - Pagination](/developer-portal/about-ax-api/#pagination)\n :param int limit: A limit on the number of results to be returned, between 1 and 500, with a default of 500. Use with `page` parameter. See [About Automox API - Pagination](/developer-portal/about-ax-api/#pagination)\n :return: list[Packages]\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n\n all_params = ['id', 'o', 'page', 'limit'] # noqa: E501\n all_params.append('async_req')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in six.iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_device_packages\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'id' is set\n if ('id' not in params or\n params['id'] is None):\n raise ValueError(\"Missing the required parameter `id` when calling `get_device_packages`\") # noqa: E501\n # verify the required parameter 'o' is set\n if ('o' not in params or\n params['o'] is None):\n raise ValueError(\"Missing the required parameter `o` when calling `get_device_packages`\") # noqa: E501\n\n collection_formats = {}\n\n path_params = {}\n if 'id' in params:\n path_params['id'] = params['id'] # noqa: E501\n\n query_params = []\n if 'o' in params:\n query_params.append(('o', params['o'])) # noqa: E501\n if 'page' in params:\n query_params.append(('page', params['page'])) # noqa: E501\n if 'limit' in params:\n query_params.append(('limit', params['limit'])) # noqa: E501\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.select_header_accept(\n ['application/json']) # noqa: E501\n\n # Authentication setting\n auth_settings = ['bearerAuth'] # noqa: E501\n\n return self.api_client.call_api(\n '/servers/{id}/packages', 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='list[Packages]', # noqa: E501\n auth_settings=auth_settings,\n async_req=params.get('async_req'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)\n\n def get_device_queues(self, id, o, **kwargs): # noqa: E501\n \"\"\"Upcoming Commands Queue for Specific Device # noqa: E501\n\n Returns the queue of upcoming commands for the specified device. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.get_device_queues(id, o, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param int id: Server ID for the specified device. (required)\n :param int o: Organization ID for the specified device. (required)\n :return: list[Command]\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n kwargs['_return_http_data_only'] = True\n if kwargs.get('async_req'):\n return self.get_device_queues_with_http_info(id, o, **kwargs) # noqa: E501\n else:\n (data) = self.get_device_queues_with_http_info(id, o, **kwargs) # noqa: E501\n return data\n\n def get_device_queues_with_http_info(self, id, o, **kwargs): # noqa: E501\n \"\"\"Upcoming Commands Queue for Specific Device # noqa: E501\n\n Returns the queue of upcoming commands for the specified device. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.get_device_queues_with_http_info(id, o, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param int id: Server ID for the specified device. (required)\n :param int o: Organization ID for the specified device. (required)\n :return: list[Command]\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n\n all_params = ['id', 'o'] # noqa: E501\n all_params.append('async_req')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in six.iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_device_queues\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'id' is set\n if ('id' not in params or\n params['id'] is None):\n raise ValueError(\"Missing the required parameter `id` when calling `get_device_queues`\") # noqa: E501\n # verify the required parameter 'o' is set\n if ('o' not in params or\n params['o'] is None):\n raise ValueError(\"Missing the required parameter `o` when calling `get_device_queues`\") # noqa: E501\n\n collection_formats = {}\n\n path_params = {}\n if 'id' in params:\n path_params['id'] = params['id'] # noqa: E501\n\n query_params = []\n if 'o' in params:\n query_params.append(('o', params['o'])) # noqa: E501\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.select_header_accept(\n ['application/json']) # noqa: E501\n\n # Authentication setting\n auth_settings = ['bearerAuth'] # noqa: E501\n\n return self.api_client.call_api(\n '/servers/{id}/queues', 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='list[Command]', # noqa: E501\n auth_settings=auth_settings,\n async_req=params.get('async_req'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)\n\n def get_devices(self, o, limit, page, **kwargs): # noqa: E501\n \"\"\"List All Devices # noqa: E501\n\n Retrieves a detailed list of all devices (server objects) for the authenticated user. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.get_devices(o, limit, page, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param int o: Organization ID. Response will include devices for the specified Automox Organization. The organization will be assumed based on the API key, if not specified. (required)\n :param int limit: A limit on the number of results to be returned, between 1 and 500, with a default of 500. Use with page parameter. See [About Automox API - Pagination](/developer-portal/about-ax-api/#pagination) (required)\n :param int page: The page of results you wish to be returned with page numbers starting at 0. See [About Automox API - Pagination](/developer-portal/about-ax-api/#pagination) (required)\n :param int group_id: Filter based on membership to a specific Server Group ID\n :param int ps_version: Shows version of PowerShell running on the device, if applicable.\n :param int pending: Filter based on status of pending patches. Format: `pending=1`\n :param str patch_status: Filter based on presence of ANY available patches that aren't already installed. Value must be 'missing' Format: `patchStatus=missing`\n :param int policy_id: Filter based on association to a given Policy ID. Format: `policyId=12345`\n :param int exception: Filter based on the exception property to exclude the device from reports. Device is still monitored when excluded from reports and statistics. Format: `exception=1`\n :param int managed: Filter based on device's Managed status. Unmanaged indicates no linked policies. Format: `managed=0`\n :param bool filters_is_compatible:\n :param str sort_columns: The column you want to sort by.\n :param str sort_dir: The sort direction, ascending or descending.\n :return: list[Server]\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n kwargs['_return_http_data_only'] = True\n if kwargs.get('async_req'):\n return self.get_devices_with_http_info(o, limit, page, **kwargs) # noqa: E501\n else:\n (data) = self.get_devices_with_http_info(o, limit, page, **kwargs) # noqa: E501\n return data\n\n def get_devices_with_http_info(self, o, limit, page, **kwargs): # noqa: E501\n \"\"\"List All Devices # noqa: E501\n\n Retrieves a detailed list of all devices (server objects) for the authenticated user. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.get_devices_with_http_info(o, limit, page, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param int o: Organization ID. Response will include devices for the specified Automox Organization. The organization will be assumed based on the API key, if not specified. (required)\n :param int limit: A limit on the number of results to be returned, between 1 and 500, with a default of 500. Use with page parameter. See [About Automox API - Pagination](/developer-portal/about-ax-api/#pagination) (required)\n :param int page: The page of results you wish to be returned with page numbers starting at 0. See [About Automox API - Pagination](/developer-portal/about-ax-api/#pagination) (required)\n :param int group_id: Filter based on membership to a specific Server Group ID\n :param int ps_version: Shows version of PowerShell running on the device, if applicable.\n :param int pending: Filter based on status of pending patches. Format: `pending=1`\n :param str patch_status: Filter based on presence of ANY available patches that aren't already installed. Value must be 'missing' Format: `patchStatus=missing`\n :param int policy_id: Filter based on association to a given Policy ID. Format: `policyId=12345`\n :param int exception: Filter based on the exception property to exclude the device from reports. Device is still monitored when excluded from reports and statistics. Format: `exception=1`\n :param int managed: Filter based on device's Managed status. Unmanaged indicates no linked policies. Format: `managed=0`\n :param bool filters_is_compatible:\n :param str sort_columns: The column you want to sort by.\n :param str sort_dir: The sort direction, ascending or descending.\n :return: list[Server]\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n\n all_params = ['o', 'limit', 'page', 'group_id', 'ps_version', 'pending', 'patch_status', 'policy_id', 'exception', 'managed', 'filters_is_compatible', 'sort_columns', 'sort_dir'] # noqa: E501\n all_params.append('async_req')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in six.iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_devices\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'o' is set\n if ('o' not in params or\n params['o'] is None):\n raise ValueError(\"Missing the required parameter `o` when calling `get_devices`\") # noqa: E501\n # verify the required parameter 'limit' is set\n if ('limit' not in params or\n params['limit'] is None):\n raise ValueError(\"Missing the required parameter `limit` when calling `get_devices`\") # noqa: E501\n # verify the required parameter 'page' is set\n if ('page' not in params or\n params['page'] is None):\n raise ValueError(\"Missing the required parameter `page` when calling `get_devices`\") # noqa: E501\n\n collection_formats = {}\n\n path_params = {}\n\n query_params = []\n if 'group_id' in params:\n query_params.append(('groupId', params['group_id'])) # noqa: E501\n if 'ps_version' in params:\n query_params.append(('PS_VERSION', params['ps_version'])) # noqa: E501\n if 'o' in params:\n query_params.append(('o', params['o'])) # noqa: E501\n if 'pending' in params:\n query_params.append(('pending', params['pending'])) # noqa: E501\n if 'patch_status' in params:\n query_params.append(('patchStatus', params['patch_status'])) # noqa: E501\n if 'policy_id' in params:\n query_params.append(('policyId', params['policy_id'])) # noqa: E501\n if 'exception' in params:\n query_params.append(('exception', params['exception'])) # noqa: E501\n if 'managed' in params:\n query_params.append(('managed', params['managed'])) # noqa: E501\n if 'limit' in params:\n query_params.append(('limit', params['limit'])) # noqa: E501\n if 'page' in params:\n query_params.append(('page', params['page'])) # noqa: E501\n if 'filters_is_compatible' in params:\n query_params.append(('filters[is_compatible]', params['filters_is_compatible'])) # noqa: E501\n if 'sort_columns' in params:\n query_params.append(('sortColumns[]', params['sort_columns'])) # noqa: E501\n if 'sort_dir' in params:\n query_params.append(('sortDir', params['sort_dir'])) # noqa: E501\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.select_header_accept(\n ['application/json']) # noqa: E501\n\n # Authentication setting\n auth_settings = ['bearerAuth'] # noqa: E501\n\n return self.api_client.call_api(\n '/servers', 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='list[Server]', # noqa: E501\n auth_settings=auth_settings,\n async_req=params.get('async_req'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)\n\n def get_server(self, id, o, **kwargs): # noqa: E501\n \"\"\"List a Specific Device # noqa: E501\n\n Returns a specific device (server object) for the authenticated user. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.get_server(id, o, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param int id: Server ID for the specified device. (required)\n :param int o: Organization ID for the specified device. (required)\n :param int ps_version: The version of PowerShell running on the device.\n :return: ServerWithPolicies\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n kwargs['_return_http_data_only'] = True\n if kwargs.get('async_req'):\n return self.get_server_with_http_info(id, o, **kwargs) # noqa: E501\n else:\n (data) = self.get_server_with_http_info(id, o, **kwargs) # noqa: E501\n return data\n\n def get_server_with_http_info(self, id, o, **kwargs): # noqa: E501\n \"\"\"List a Specific Device # noqa: E501\n\n Returns a specific device (server object) for the authenticated user. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.get_server_with_http_info(id, o, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param int id: Server ID for the specified device. (required)\n :param int o: Organization ID for the specified device. (required)\n :param int ps_version: The version of PowerShell running on the device.\n :return: ServerWithPolicies\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n\n all_params = ['id', 'o', 'ps_version'] # noqa: E501\n all_params.append('async_req')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in six.iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_server\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'id' is set\n if ('id' not in params or\n params['id'] is None):\n raise ValueError(\"Missing the required parameter `id` when calling `get_server`\") # noqa: E501\n # verify the required parameter 'o' is set\n if ('o' not in params or\n params['o'] is None):\n raise ValueError(\"Missing the required parameter `o` when calling `get_server`\") # noqa: E501\n\n collection_formats = {}\n\n path_params = {}\n if 'id' in params:\n path_params['id'] = params['id'] # noqa: E501\n\n query_params = []\n if 'o' in params:\n query_params.append(('o', params['o'])) # noqa: E501\n if 'ps_version' in params:\n query_params.append(('PS_VERSION', params['ps_version'])) # noqa: E501\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.select_header_accept(\n ['application/json']) # noqa: E501\n\n # Authentication setting\n auth_settings = ['bearerAuth'] # noqa: E501\n\n return self.api_client.call_api(\n '/servers/{id}', 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='ServerWithPolicies', # noqa: E501\n auth_settings=auth_settings,\n async_req=params.get('async_req'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)\n\n def issue_device_command(self, o, id, **kwargs): # noqa: E501\n \"\"\"Issue a command to a device # noqa: E501\n\n Force a device to Scan, Patch, or Reboot for immediate execution. **Note: The `installAllUpdates` option ignores any Policy Filters** # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.issue_device_command(o, id, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param int o: Organization ID for the specified device (required)\n :param int id: Server ID for the specified device (required)\n :param IdQueuesBody body:\n :return: None\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n kwargs['_return_http_data_only'] = True\n if kwargs.get('async_req'):\n return self.issue_device_command_with_http_info(o, id, **kwargs) # noqa: E501\n else:\n (data) = self.issue_device_command_with_http_info(o, id, **kwargs) # noqa: E501\n return data\n\n def issue_device_command_with_http_info(self, o, id, **kwargs): # noqa: E501\n \"\"\"Issue a command to a device # noqa: E501\n\n Force a device to Scan, Patch, or Reboot for immediate execution. **Note: The `installAllUpdates` option ignores any Policy Filters** # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.issue_device_command_with_http_info(o, id, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param int o: Organization ID for the specified device (required)\n :param int id: Server ID for the specified device (required)\n :param IdQueuesBody body:\n :return: None\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n\n all_params = ['o', 'id', 'body'] # noqa: E501\n all_params.append('async_req')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in six.iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method issue_device_command\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'o' is set\n if ('o' not in params or\n params['o'] is None):\n raise ValueError(\"Missing the required parameter `o` when calling `issue_device_command`\") # noqa: E501\n # verify the required parameter 'id' is set\n if ('id' not in params or\n params['id'] is None):\n raise ValueError(\"Missing the required parameter `id` when calling `issue_device_command`\") # noqa: E501\n\n collection_formats = {}\n\n path_params = {}\n if 'id' in params:\n path_params['id'] = params['id'] # noqa: E501\n\n query_params = []\n if 'o' in params:\n query_params.append(('o', params['o'])) # noqa: E501\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.select_header_accept(\n ['application/json']) # noqa: E501\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501\n ['application/json']) # noqa: E501\n\n # Authentication setting\n auth_settings = ['bearerAuth'] # noqa: E501\n\n return self.api_client.call_api(\n '/servers/{id}/queues', 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type=None, # noqa: E501\n auth_settings=auth_settings,\n async_req=params.get('async_req'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)\n\n def update_device(self, body, o, id, **kwargs): # noqa: E501\n \"\"\"Updates a device (server object). # noqa: E501\n\n Send a JSON object in the request body to update device details). # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.update_device(body, o, id, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param ServersIdBody body: Device update (required)\n :param int o: Organization ID for the specified device. (required)\n :param int id: Server ID for the specified device. (required)\n :return: None\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n kwargs['_return_http_data_only'] = True\n if kwargs.get('async_req'):\n return self.update_device_with_http_info(body, o, id, **kwargs) # noqa: E501\n else:\n (data) = self.update_device_with_http_info(body, o, id, **kwargs) # noqa: E501\n return data\n\n def update_device_with_http_info(self, body, o, id, **kwargs): # noqa: E501\n \"\"\"Updates a device (server object). # noqa: E501\n\n Send a JSON object in the request body to update device details). # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.update_device_with_http_info(body, o, id, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param ServersIdBody body: Device update (required)\n :param int o: Organization ID for the specified device. (required)\n :param int id: Server ID for the specified device. (required)\n :return: None\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n\n all_params = ['body', 'o', 'id'] # noqa: E501\n all_params.append('async_req')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in six.iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method update_device\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'body' is set\n if ('body' not in params or\n params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `update_device`\") # noqa: E501\n # verify the required parameter 'o' is set\n if ('o' not in params or\n params['o'] is None):\n raise ValueError(\"Missing the required parameter `o` when calling `update_device`\") # noqa: E501\n # verify the required parameter 'id' is set\n if ('id' not in params or\n params['id'] is None):\n raise ValueError(\"Missing the required parameter `id` when calling `update_device`\") # noqa: E501\n\n collection_formats = {}\n\n path_params = {}\n if 'id' in params:\n path_params['id'] = params['id'] # noqa: E501\n\n query_params = []\n if 'o' in params:\n query_params.append(('o', params['o'])) # noqa: E501\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.select_header_accept(\n ['application/json']) # noqa: E501\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501\n ['application/json']) # noqa: E501\n\n # Authentication setting\n auth_settings = ['bearerAuth'] # noqa: E501\n\n return self.api_client.call_api(\n '/servers/{id}', 'PUT',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type=None, # noqa: E501\n auth_settings=auth_settings,\n async_req=params.get('async_req'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)\n",
"id": "5441341",
"language": "Python",
"matching_score": 6.587990760803223,
"max_stars_count": 1,
"path": "automox_console_sdk/api/devices_api.py"
},
{
"content": "# coding: utf-8\n\n\"\"\"\n Automox Console API\n\n API for use with the Automox Console # noqa: E501\n\n OpenAPI spec version: 2021-11-16\n Contact: <EMAIL>\n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\nfrom __future__ import absolute_import\n\nimport re # noqa: F401\n\n# python 2 and python 3 compatibility library\nimport six\n\nfrom automox_console_sdk.api_client import ApiClient\n\n\nclass CommandsApi(object):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n Ref: https://github.com/swagger-api/swagger-codegen\n \"\"\"\n\n def __init__(self, api_client=None):\n if api_client is None:\n api_client = ApiClient()\n self.api_client = api_client\n\n def get_device_queues(self, id, o, **kwargs): # noqa: E501\n \"\"\"Upcoming Commands Queue for Specific Device # noqa: E501\n\n Returns the queue of upcoming commands for the specified device. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.get_device_queues(id, o, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param int id: Server ID for the specified device. (required)\n :param int o: Organization ID for the specified device. (required)\n :return: list[Command]\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n kwargs['_return_http_data_only'] = True\n if kwargs.get('async_req'):\n return self.get_device_queues_with_http_info(id, o, **kwargs) # noqa: E501\n else:\n (data) = self.get_device_queues_with_http_info(id, o, **kwargs) # noqa: E501\n return data\n\n def get_device_queues_with_http_info(self, id, o, **kwargs): # noqa: E501\n \"\"\"Upcoming Commands Queue for Specific Device # noqa: E501\n\n Returns the queue of upcoming commands for the specified device. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.get_device_queues_with_http_info(id, o, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param int id: Server ID for the specified device. (required)\n :param int o: Organization ID for the specified device. (required)\n :return: list[Command]\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n\n all_params = ['id', 'o'] # noqa: E501\n all_params.append('async_req')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in six.iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_device_queues\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'id' is set\n if ('id' not in params or\n params['id'] is None):\n raise ValueError(\"Missing the required parameter `id` when calling `get_device_queues`\") # noqa: E501\n # verify the required parameter 'o' is set\n if ('o' not in params or\n params['o'] is None):\n raise ValueError(\"Missing the required parameter `o` when calling `get_device_queues`\") # noqa: E501\n\n collection_formats = {}\n\n path_params = {}\n if 'id' in params:\n path_params['id'] = params['id'] # noqa: E501\n\n query_params = []\n if 'o' in params:\n query_params.append(('o', params['o'])) # noqa: E501\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.select_header_accept(\n ['application/json']) # noqa: E501\n\n # Authentication setting\n auth_settings = ['bearerAuth'] # noqa: E501\n\n return self.api_client.call_api(\n '/servers/{id}/queues', 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='list[Command]', # noqa: E501\n auth_settings=auth_settings,\n async_req=params.get('async_req'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)\n\n def issue_device_command(self, o, id, **kwargs): # noqa: E501\n \"\"\"Issue a command to a device # noqa: E501\n\n Force a device to Scan, Patch, or Reboot for immediate execution. **Note: The `installAllUpdates` option ignores any Policy Filters** # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.issue_device_command(o, id, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param int o: Organization ID for the specified device (required)\n :param int id: Server ID for the specified device (required)\n :param IdQueuesBody body:\n :return: None\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n kwargs['_return_http_data_only'] = True\n if kwargs.get('async_req'):\n return self.issue_device_command_with_http_info(o, id, **kwargs) # noqa: E501\n else:\n (data) = self.issue_device_command_with_http_info(o, id, **kwargs) # noqa: E501\n return data\n\n def issue_device_command_with_http_info(self, o, id, **kwargs): # noqa: E501\n \"\"\"Issue a command to a device # noqa: E501\n\n Force a device to Scan, Patch, or Reboot for immediate execution. **Note: The `installAllUpdates` option ignores any Policy Filters** # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.issue_device_command_with_http_info(o, id, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param int o: Organization ID for the specified device (required)\n :param int id: Server ID for the specified device (required)\n :param IdQueuesBody body:\n :return: None\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n\n all_params = ['o', 'id', 'body'] # noqa: E501\n all_params.append('async_req')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in six.iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method issue_device_command\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'o' is set\n if ('o' not in params or\n params['o'] is None):\n raise ValueError(\"Missing the required parameter `o` when calling `issue_device_command`\") # noqa: E501\n # verify the required parameter 'id' is set\n if ('id' not in params or\n params['id'] is None):\n raise ValueError(\"Missing the required parameter `id` when calling `issue_device_command`\") # noqa: E501\n\n collection_formats = {}\n\n path_params = {}\n if 'id' in params:\n path_params['id'] = params['id'] # noqa: E501\n\n query_params = []\n if 'o' in params:\n query_params.append(('o', params['o'])) # noqa: E501\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.select_header_accept(\n ['application/json']) # noqa: E501\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501\n ['application/json']) # noqa: E501\n\n # Authentication setting\n auth_settings = ['bearerAuth'] # noqa: E501\n\n return self.api_client.call_api(\n '/servers/{id}/queues', 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type=None, # noqa: E501\n auth_settings=auth_settings,\n async_req=params.get('async_req'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)\n",
"id": "2934157",
"language": "Python",
"matching_score": 3.030552387237549,
"max_stars_count": 1,
"path": "automox_console_sdk/api/commands_api.py"
},
{
"content": "# coding: utf-8\n\n\"\"\"\n Automox Console API\n\n API for use with the Automox Console # noqa: E501\n\n OpenAPI spec version: 2021-11-16\n Contact: <EMAIL>\n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\nfrom __future__ import absolute_import\n\nimport re # noqa: F401\n\n# python 2 and python 3 compatibility library\nimport six\n\nfrom automox_console_sdk.api_client import ApiClient\n\n\nclass UsersApi(object):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n Ref: https://github.com/swagger-api/swagger-codegen\n \"\"\"\n\n def __init__(self, api_client=None):\n if api_client is None:\n api_client = ApiClient()\n self.api_client = api_client\n\n def decrypt_user_api_key(self, user_id, id, o, **kwargs): # noqa: E501\n \"\"\"Decrypt User API Key # noqa: E501\n\n This endpoint allows you to decrypt the API key for an authenticated user. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.decrypt_user_api_key(user_id, id, o, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param int user_id: The ID of the user to decrypt keys for. (required)\n :param int id: The ID of the API key to decrypt (required)\n :param int o: The Organization of the user. (required)\n :return: InlineResponse2003\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n kwargs['_return_http_data_only'] = True\n if kwargs.get('async_req'):\n return self.decrypt_user_api_key_with_http_info(user_id, id, o, **kwargs) # noqa: E501\n else:\n (data) = self.decrypt_user_api_key_with_http_info(user_id, id, o, **kwargs) # noqa: E501\n return data\n\n def decrypt_user_api_key_with_http_info(self, user_id, id, o, **kwargs): # noqa: E501\n \"\"\"Decrypt User API Key # noqa: E501\n\n This endpoint allows you to decrypt the API key for an authenticated user. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.decrypt_user_api_key_with_http_info(user_id, id, o, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param int user_id: The ID of the user to decrypt keys for. (required)\n :param int id: The ID of the API key to decrypt (required)\n :param int o: The Organization of the user. (required)\n :return: InlineResponse2003\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n\n all_params = ['user_id', 'id', 'o'] # noqa: E501\n all_params.append('async_req')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in six.iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method decrypt_user_api_key\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'user_id' is set\n if ('user_id' not in params or\n params['user_id'] is None):\n raise ValueError(\"Missing the required parameter `user_id` when calling `decrypt_user_api_key`\") # noqa: E501\n # verify the required parameter 'id' is set\n if ('id' not in params or\n params['id'] is None):\n raise ValueError(\"Missing the required parameter `id` when calling `decrypt_user_api_key`\") # noqa: E501\n # verify the required parameter 'o' is set\n if ('o' not in params or\n params['o'] is None):\n raise ValueError(\"Missing the required parameter `o` when calling `decrypt_user_api_key`\") # noqa: E501\n\n collection_formats = {}\n\n path_params = {}\n if 'user_id' in params:\n path_params['userId'] = params['user_id'] # noqa: E501\n if 'id' in params:\n path_params['id'] = params['id'] # noqa: E501\n\n query_params = []\n if 'o' in params:\n query_params.append(('o', params['o'])) # noqa: E501\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.select_header_accept(\n ['application/json']) # noqa: E501\n\n # Authentication setting\n auth_settings = ['bearerAuth'] # noqa: E501\n\n return self.api_client.call_api(\n '/users/{userId}/api_keys/{id}/decrypt', 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='InlineResponse2003', # noqa: E501\n auth_settings=auth_settings,\n async_req=params.get('async_req'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)\n\n def get_user_by_id(self, user_id, o, **kwargs): # noqa: E501\n \"\"\"Retrieves a user by user ID # noqa: E501\n\n Retrieves a user by user ID # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.get_user_by_id(user_id, o, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param int user_id: The User ID of the user to retrieve (required)\n :param int o: The Organization whose users you wish to list. If you omit this value, the application will detect and use your default Organization. (required)\n :return: list[User]\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n kwargs['_return_http_data_only'] = True\n if kwargs.get('async_req'):\n return self.get_user_by_id_with_http_info(user_id, o, **kwargs) # noqa: E501\n else:\n (data) = self.get_user_by_id_with_http_info(user_id, o, **kwargs) # noqa: E501\n return data\n\n def get_user_by_id_with_http_info(self, user_id, o, **kwargs): # noqa: E501\n \"\"\"Retrieves a user by user ID # noqa: E501\n\n Retrieves a user by user ID # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.get_user_by_id_with_http_info(user_id, o, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param int user_id: The User ID of the user to retrieve (required)\n :param int o: The Organization whose users you wish to list. If you omit this value, the application will detect and use your default Organization. (required)\n :return: list[User]\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n\n all_params = ['user_id', 'o'] # noqa: E501\n all_params.append('async_req')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in six.iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_user_by_id\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'user_id' is set\n if ('user_id' not in params or\n params['user_id'] is None):\n raise ValueError(\"Missing the required parameter `user_id` when calling `get_user_by_id`\") # noqa: E501\n # verify the required parameter 'o' is set\n if ('o' not in params or\n params['o'] is None):\n raise ValueError(\"Missing the required parameter `o` when calling `get_user_by_id`\") # noqa: E501\n\n collection_formats = {}\n\n path_params = {}\n if 'user_id' in params:\n path_params['userId'] = params['user_id'] # noqa: E501\n\n query_params = []\n if 'o' in params:\n query_params.append(('o', params['o'])) # noqa: E501\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.select_header_accept(\n ['application/json']) # noqa: E501\n\n # Authentication setting\n auth_settings = ['bearerAuth'] # noqa: E501\n\n return self.api_client.call_api(\n '/users/{userId}', 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='list[User]', # noqa: E501\n auth_settings=auth_settings,\n async_req=params.get('async_req'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)\n\n def get_users(self, o, **kwargs): # noqa: E501\n \"\"\"List All Users With Access to a Given Organization # noqa: E501\n\n Retrieves a list of all users with access to an organization # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.get_users(o, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param int o: The Organization whose users you wish to list. If you omit this value, the application will detect and use your default Organization. (required)\n :param int page: The page of results you wish to be returned with page numbers starting at 0. See [About Automox API - Pagination](/developer-portal/about-ax-api/#pagination)\n :param int limit: A limit on the number of results to be returned, between 1 and 500 with a default of 500. Use with `page` parameter. See [About Automox API - Pagination](/developer-portal/about-ax-api/#pagination)\n :return: list[User]\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n kwargs['_return_http_data_only'] = True\n if kwargs.get('async_req'):\n return self.get_users_with_http_info(o, **kwargs) # noqa: E501\n else:\n (data) = self.get_users_with_http_info(o, **kwargs) # noqa: E501\n return data\n\n def get_users_with_http_info(self, o, **kwargs): # noqa: E501\n \"\"\"List All Users With Access to a Given Organization # noqa: E501\n\n Retrieves a list of all users with access to an organization # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.get_users_with_http_info(o, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param int o: The Organization whose users you wish to list. If you omit this value, the application will detect and use your default Organization. (required)\n :param int page: The page of results you wish to be returned with page numbers starting at 0. See [About Automox API - Pagination](/developer-portal/about-ax-api/#pagination)\n :param int limit: A limit on the number of results to be returned, between 1 and 500 with a default of 500. Use with `page` parameter. See [About Automox API - Pagination](/developer-portal/about-ax-api/#pagination)\n :return: list[User]\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n\n all_params = ['o', 'page', 'limit'] # noqa: E501\n all_params.append('async_req')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in six.iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_users\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'o' is set\n if ('o' not in params or\n params['o'] is None):\n raise ValueError(\"Missing the required parameter `o` when calling `get_users`\") # noqa: E501\n\n collection_formats = {}\n\n path_params = {}\n\n query_params = []\n if 'o' in params:\n query_params.append(('o', params['o'])) # noqa: E501\n if 'page' in params:\n query_params.append(('page', params['page'])) # noqa: E501\n if 'limit' in params:\n query_params.append(('limit', params['limit'])) # noqa: E501\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.select_header_accept(\n ['application/json']) # noqa: E501\n\n # Authentication setting\n auth_settings = ['bearerAuth'] # noqa: E501\n\n return self.api_client.call_api(\n '/users', 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='list[User]', # noqa: E501\n auth_settings=auth_settings,\n async_req=params.get('async_req'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)\n",
"id": "10316071",
"language": "Python",
"matching_score": 6.103545188903809,
"max_stars_count": 1,
"path": "automox_console_sdk/api/users_api.py"
},
{
"content": "# coding: utf-8\n\n\"\"\"\n Automox Console API\n\n API for use with the Automox Console # noqa: E501\n\n OpenAPI spec version: 2021-11-16\n Contact: <EMAIL>\n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\nfrom __future__ import absolute_import\n\nimport re # noqa: F401\n\n# python 2 and python 3 compatibility library\nimport six\n\nfrom automox_console_sdk.api_client import ApiClient\n\n\nclass APIKeysApi(object):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n Ref: https://github.com/swagger-api/swagger-codegen\n \"\"\"\n\n def __init__(self, api_client=None):\n if api_client is None:\n api_client = ApiClient()\n self.api_client = api_client\n\n def create_user_api_key(self, o, user_id, **kwargs): # noqa: E501\n \"\"\"Creates an API key for a user # noqa: E501\n\n **Note:** A user is only allowed to have a maximum of 10 active keys per organization at any given time. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.create_user_api_key(o, user_id, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param int o: The Organization of the user. (required)\n :param int user_id: User ID of the user to create an API key (required)\n :param UserIdApiKeysBody body:\n :return: ApiKey\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n kwargs['_return_http_data_only'] = True\n if kwargs.get('async_req'):\n return self.create_user_api_key_with_http_info(o, user_id, **kwargs) # noqa: E501\n else:\n (data) = self.create_user_api_key_with_http_info(o, user_id, **kwargs) # noqa: E501\n return data\n\n def create_user_api_key_with_http_info(self, o, user_id, **kwargs): # noqa: E501\n \"\"\"Creates an API key for a user # noqa: E501\n\n **Note:** A user is only allowed to have a maximum of 10 active keys per organization at any given time. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.create_user_api_key_with_http_info(o, user_id, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param int o: The Organization of the user. (required)\n :param int user_id: User ID of the user to create an API key (required)\n :param UserIdApiKeysBody body:\n :return: ApiKey\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n\n all_params = ['o', 'user_id', 'body'] # noqa: E501\n all_params.append('async_req')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in six.iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method create_user_api_key\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'o' is set\n if ('o' not in params or\n params['o'] is None):\n raise ValueError(\"Missing the required parameter `o` when calling `create_user_api_key`\") # noqa: E501\n # verify the required parameter 'user_id' is set\n if ('user_id' not in params or\n params['user_id'] is None):\n raise ValueError(\"Missing the required parameter `user_id` when calling `create_user_api_key`\") # noqa: E501\n\n collection_formats = {}\n\n path_params = {}\n if 'user_id' in params:\n path_params['userId'] = params['user_id'] # noqa: E501\n\n query_params = []\n if 'o' in params:\n query_params.append(('o', params['o'])) # noqa: E501\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.select_header_accept(\n ['application/json']) # noqa: E501\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501\n ['application/json']) # noqa: E501\n\n # Authentication setting\n auth_settings = ['bearerAuth'] # noqa: E501\n\n return self.api_client.call_api(\n '/users/{userId}/api_keys', 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='ApiKey', # noqa: E501\n auth_settings=auth_settings,\n async_req=params.get('async_req'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)\n\n def decrypt_user_api_key(self, user_id, id, o, **kwargs): # noqa: E501\n \"\"\"Decrypt User API Key # noqa: E501\n\n This endpoint allows you to decrypt the API key for an authenticated user. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.decrypt_user_api_key(user_id, id, o, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param int user_id: The ID of the user to decrypt keys for. (required)\n :param int id: The ID of the API key to decrypt (required)\n :param int o: The Organization of the user. (required)\n :return: InlineResponse2003\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n kwargs['_return_http_data_only'] = True\n if kwargs.get('async_req'):\n return self.decrypt_user_api_key_with_http_info(user_id, id, o, **kwargs) # noqa: E501\n else:\n (data) = self.decrypt_user_api_key_with_http_info(user_id, id, o, **kwargs) # noqa: E501\n return data\n\n def decrypt_user_api_key_with_http_info(self, user_id, id, o, **kwargs): # noqa: E501\n \"\"\"Decrypt User API Key # noqa: E501\n\n This endpoint allows you to decrypt the API key for an authenticated user. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.decrypt_user_api_key_with_http_info(user_id, id, o, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param int user_id: The ID of the user to decrypt keys for. (required)\n :param int id: The ID of the API key to decrypt (required)\n :param int o: The Organization of the user. (required)\n :return: InlineResponse2003\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n\n all_params = ['user_id', 'id', 'o'] # noqa: E501\n all_params.append('async_req')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in six.iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method decrypt_user_api_key\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'user_id' is set\n if ('user_id' not in params or\n params['user_id'] is None):\n raise ValueError(\"Missing the required parameter `user_id` when calling `decrypt_user_api_key`\") # noqa: E501\n # verify the required parameter 'id' is set\n if ('id' not in params or\n params['id'] is None):\n raise ValueError(\"Missing the required parameter `id` when calling `decrypt_user_api_key`\") # noqa: E501\n # verify the required parameter 'o' is set\n if ('o' not in params or\n params['o'] is None):\n raise ValueError(\"Missing the required parameter `o` when calling `decrypt_user_api_key`\") # noqa: E501\n\n collection_formats = {}\n\n path_params = {}\n if 'user_id' in params:\n path_params['userId'] = params['user_id'] # noqa: E501\n if 'id' in params:\n path_params['id'] = params['id'] # noqa: E501\n\n query_params = []\n if 'o' in params:\n query_params.append(('o', params['o'])) # noqa: E501\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.select_header_accept(\n ['application/json']) # noqa: E501\n\n # Authentication setting\n auth_settings = ['bearerAuth'] # noqa: E501\n\n return self.api_client.call_api(\n '/users/{userId}/api_keys/{id}/decrypt', 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='InlineResponse2003', # noqa: E501\n auth_settings=auth_settings,\n async_req=params.get('async_req'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)\n\n def delete_user_api_key(self, user_id, id, o, **kwargs): # noqa: E501\n \"\"\"Deletes an API Key by ID # noqa: E501\n\n Deletes an API Key by ID # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.delete_user_api_key(user_id, id, o, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param int user_id: User ID of the user to delete API Key for (required)\n :param int id: The ID of the API key to delete (required)\n :param int o: The Organization whose keys you want to delete. (required)\n :return: None\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n kwargs['_return_http_data_only'] = True\n if kwargs.get('async_req'):\n return self.delete_user_api_key_with_http_info(user_id, id, o, **kwargs) # noqa: E501\n else:\n (data) = self.delete_user_api_key_with_http_info(user_id, id, o, **kwargs) # noqa: E501\n return data\n\n def delete_user_api_key_with_http_info(self, user_id, id, o, **kwargs): # noqa: E501\n \"\"\"Deletes an API Key by ID # noqa: E501\n\n Deletes an API Key by ID # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.delete_user_api_key_with_http_info(user_id, id, o, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param int user_id: User ID of the user to delete API Key for (required)\n :param int id: The ID of the API key to delete (required)\n :param int o: The Organization whose keys you want to delete. (required)\n :return: None\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n\n all_params = ['user_id', 'id', 'o'] # noqa: E501\n all_params.append('async_req')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in six.iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method delete_user_api_key\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'user_id' is set\n if ('user_id' not in params or\n params['user_id'] is None):\n raise ValueError(\"Missing the required parameter `user_id` when calling `delete_user_api_key`\") # noqa: E501\n # verify the required parameter 'id' is set\n if ('id' not in params or\n params['id'] is None):\n raise ValueError(\"Missing the required parameter `id` when calling `delete_user_api_key`\") # noqa: E501\n # verify the required parameter 'o' is set\n if ('o' not in params or\n params['o'] is None):\n raise ValueError(\"Missing the required parameter `o` when calling `delete_user_api_key`\") # noqa: E501\n\n collection_formats = {}\n\n path_params = {}\n if 'user_id' in params:\n path_params['userId'] = params['user_id'] # noqa: E501\n if 'id' in params:\n path_params['id'] = params['id'] # noqa: E501\n\n query_params = []\n if 'o' in params:\n query_params.append(('o', params['o'])) # noqa: E501\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.select_header_accept(\n ['application/json']) # noqa: E501\n\n # Authentication setting\n auth_settings = ['bearerAuth'] # noqa: E501\n\n return self.api_client.call_api(\n '/users/{userId}/api_keys/{id}', 'DELETE',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type=None, # noqa: E501\n auth_settings=auth_settings,\n async_req=params.get('async_req'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)\n\n def get_organization_api_keys(self, id, **kwargs): # noqa: E501\n \"\"\"List All API Keys for Organization # noqa: E501\n\n **PREREQUISITES:** You must have **Full Administrator** privileges! This endpoint allows you to list all keys for an organization. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.get_organization_api_keys(id, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param int id: The ID of the organization to list keys for. (required)\n :param int limit: A limit on the number of results to be returned, between 1 and 500, with a default of 500. Use with `page` parameter. See [About Automox API - Pagination](/developer-portal/about-ax-api/#pagination)\n :param int page: The page of results you wish to be returned with page numbers starting at 0. See [About Automox API - Pagination](/developer-portal/about-ax-api/#pagination)\n :return: InlineResponse200\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n kwargs['_return_http_data_only'] = True\n if kwargs.get('async_req'):\n return self.get_organization_api_keys_with_http_info(id, **kwargs) # noqa: E501\n else:\n (data) = self.get_organization_api_keys_with_http_info(id, **kwargs) # noqa: E501\n return data\n\n def get_organization_api_keys_with_http_info(self, id, **kwargs): # noqa: E501\n \"\"\"List All API Keys for Organization # noqa: E501\n\n **PREREQUISITES:** You must have **Full Administrator** privileges! This endpoint allows you to list all keys for an organization. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.get_organization_api_keys_with_http_info(id, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param int id: The ID of the organization to list keys for. (required)\n :param int limit: A limit on the number of results to be returned, between 1 and 500, with a default of 500. Use with `page` parameter. See [About Automox API - Pagination](/developer-portal/about-ax-api/#pagination)\n :param int page: The page of results you wish to be returned with page numbers starting at 0. See [About Automox API - Pagination](/developer-portal/about-ax-api/#pagination)\n :return: InlineResponse200\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n\n all_params = ['id', 'limit', 'page'] # noqa: E501\n all_params.append('async_req')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in six.iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_organization_api_keys\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'id' is set\n if ('id' not in params or\n params['id'] is None):\n raise ValueError(\"Missing the required parameter `id` when calling `get_organization_api_keys`\") # noqa: E501\n\n collection_formats = {}\n\n path_params = {}\n if 'id' in params:\n path_params['id'] = params['id'] # noqa: E501\n\n query_params = []\n if 'limit' in params:\n query_params.append(('limit', params['limit'])) # noqa: E501\n if 'page' in params:\n query_params.append(('page', params['page'])) # noqa: E501\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.select_header_accept(\n ['application/json']) # noqa: E501\n\n # Authentication setting\n auth_settings = ['bearerAuth'] # noqa: E501\n\n return self.api_client.call_api(\n '/orgs/{id}/api_keys', 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='InlineResponse200', # noqa: E501\n auth_settings=auth_settings,\n async_req=params.get('async_req'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)\n\n def get_user_api_key(self, user_id, id, o, **kwargs): # noqa: E501\n \"\"\"Retrieves an API key object by ID # noqa: E501\n\n Note: The response does not contain the encrypted portion of the key. See [Decrypt User API Key](/openapi/axconsole/operation/decryptUserApiKey/) # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.get_user_api_key(user_id, id, o, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param int user_id: The ID of the user to view keys for. (required)\n :param int id: The ID of the API key object to retrieve (required)\n :param int o: The Organization whose keys you want to view. (required)\n :return: ApiKey\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n kwargs['_return_http_data_only'] = True\n if kwargs.get('async_req'):\n return self.get_user_api_key_with_http_info(user_id, id, o, **kwargs) # noqa: E501\n else:\n (data) = self.get_user_api_key_with_http_info(user_id, id, o, **kwargs) # noqa: E501\n return data\n\n def get_user_api_key_with_http_info(self, user_id, id, o, **kwargs): # noqa: E501\n \"\"\"Retrieves an API key object by ID # noqa: E501\n\n Note: The response does not contain the encrypted portion of the key. See [Decrypt User API Key](/openapi/axconsole/operation/decryptUserApiKey/) # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.get_user_api_key_with_http_info(user_id, id, o, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param int user_id: The ID of the user to view keys for. (required)\n :param int id: The ID of the API key object to retrieve (required)\n :param int o: The Organization whose keys you want to view. (required)\n :return: ApiKey\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n\n all_params = ['user_id', 'id', 'o'] # noqa: E501\n all_params.append('async_req')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in six.iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_user_api_key\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'user_id' is set\n if ('user_id' not in params or\n params['user_id'] is None):\n raise ValueError(\"Missing the required parameter `user_id` when calling `get_user_api_key`\") # noqa: E501\n # verify the required parameter 'id' is set\n if ('id' not in params or\n params['id'] is None):\n raise ValueError(\"Missing the required parameter `id` when calling `get_user_api_key`\") # noqa: E501\n # verify the required parameter 'o' is set\n if ('o' not in params or\n params['o'] is None):\n raise ValueError(\"Missing the required parameter `o` when calling `get_user_api_key`\") # noqa: E501\n\n collection_formats = {}\n\n path_params = {}\n if 'user_id' in params:\n path_params['userId'] = params['user_id'] # noqa: E501\n if 'id' in params:\n path_params['id'] = params['id'] # noqa: E501\n\n query_params = []\n if 'o' in params:\n query_params.append(('o', params['o'])) # noqa: E501\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.select_header_accept(\n ['application/json']) # noqa: E501\n\n # Authentication setting\n auth_settings = ['bearerAuth'] # noqa: E501\n\n return self.api_client.call_api(\n '/users/{userId}/api_keys/{id}', 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='ApiKey', # noqa: E501\n auth_settings=auth_settings,\n async_req=params.get('async_req'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)\n\n def get_user_api_keys(self, user_id, o, **kwargs): # noqa: E501\n \"\"\"Retrieves a list of API key objects for a user # noqa: E501\n\n Returns a list of API keys for the given user under the requested organization. This response does not include the encrypted portion of the key. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.get_user_api_keys(user_id, o, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param int user_id: User ID of the user to retrieve API key objects (required)\n :param int o: The Organization of the user. (required)\n :param int page: The page of results you wish to be returned with page numbers starting at 0. See [About Automox API - Pagination](/developer-portal/about-ax-api/#pagination)\n :param int limit: A limit on the number of results to be returned, between 1 and 500 with a default of 500. Use with page parameter. See [About Automox API - Pagination](/developer-portal/about-ax-api/#pagination)\n :return: InlineResponse2002\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n kwargs['_return_http_data_only'] = True\n if kwargs.get('async_req'):\n return self.get_user_api_keys_with_http_info(user_id, o, **kwargs) # noqa: E501\n else:\n (data) = self.get_user_api_keys_with_http_info(user_id, o, **kwargs) # noqa: E501\n return data\n\n def get_user_api_keys_with_http_info(self, user_id, o, **kwargs): # noqa: E501\n \"\"\"Retrieves a list of API key objects for a user # noqa: E501\n\n Returns a list of API keys for the given user under the requested organization. This response does not include the encrypted portion of the key. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.get_user_api_keys_with_http_info(user_id, o, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param int user_id: User ID of the user to retrieve API key objects (required)\n :param int o: The Organization of the user. (required)\n :param int page: The page of results you wish to be returned with page numbers starting at 0. See [About Automox API - Pagination](/developer-portal/about-ax-api/#pagination)\n :param int limit: A limit on the number of results to be returned, between 1 and 500 with a default of 500. Use with page parameter. See [About Automox API - Pagination](/developer-portal/about-ax-api/#pagination)\n :return: InlineResponse2002\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n\n all_params = ['user_id', 'o', 'page', 'limit'] # noqa: E501\n all_params.append('async_req')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in six.iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_user_api_keys\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'user_id' is set\n if ('user_id' not in params or\n params['user_id'] is None):\n raise ValueError(\"Missing the required parameter `user_id` when calling `get_user_api_keys`\") # noqa: E501\n # verify the required parameter 'o' is set\n if ('o' not in params or\n params['o'] is None):\n raise ValueError(\"Missing the required parameter `o` when calling `get_user_api_keys`\") # noqa: E501\n\n collection_formats = {}\n\n path_params = {}\n if 'user_id' in params:\n path_params['userId'] = params['user_id'] # noqa: E501\n\n query_params = []\n if 'o' in params:\n query_params.append(('o', params['o'])) # noqa: E501\n if 'page' in params:\n query_params.append(('page', params['page'])) # noqa: E501\n if 'limit' in params:\n query_params.append(('limit', params['limit'])) # noqa: E501\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.select_header_accept(\n ['application/json']) # noqa: E501\n\n # Authentication setting\n auth_settings = ['bearerAuth'] # noqa: E501\n\n return self.api_client.call_api(\n '/users/{userId}/api_keys', 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='InlineResponse2002', # noqa: E501\n auth_settings=auth_settings,\n async_req=params.get('async_req'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)\n\n def update_user_api_key(self, o, user_id, id, **kwargs): # noqa: E501\n \"\"\"Update an API Key by ID # noqa: E501\n\n Update an API Key by ID # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.update_user_api_key(o, user_id, id, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param int o: The Organization of the key. (required)\n :param int user_id: User ID of the user to update keys for (required)\n :param int id: The ID of the API key to update (required)\n :param ApiKeysIdBody body: Enable/Disable API key\n :return: ApiKey\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n kwargs['_return_http_data_only'] = True\n if kwargs.get('async_req'):\n return self.update_user_api_key_with_http_info(o, user_id, id, **kwargs) # noqa: E501\n else:\n (data) = self.update_user_api_key_with_http_info(o, user_id, id, **kwargs) # noqa: E501\n return data\n\n def update_user_api_key_with_http_info(self, o, user_id, id, **kwargs): # noqa: E501\n \"\"\"Update an API Key by ID # noqa: E501\n\n Update an API Key by ID # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.update_user_api_key_with_http_info(o, user_id, id, async_req=True)\n >>> result = thread.get()\n\n :param async_req bool\n :param int o: The Organization of the key. (required)\n :param int user_id: User ID of the user to update keys for (required)\n :param int id: The ID of the API key to update (required)\n :param ApiKeysIdBody body: Enable/Disable API key\n :return: ApiKey\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n\n all_params = ['o', 'user_id', 'id', 'body'] # noqa: E501\n all_params.append('async_req')\n all_params.append('_return_http_data_only')\n all_params.append('_preload_content')\n all_params.append('_request_timeout')\n\n params = locals()\n for key, val in six.iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method update_user_api_key\" % key\n )\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'o' is set\n if ('o' not in params or\n params['o'] is None):\n raise ValueError(\"Missing the required parameter `o` when calling `update_user_api_key`\") # noqa: E501\n # verify the required parameter 'user_id' is set\n if ('user_id' not in params or\n params['user_id'] is None):\n raise ValueError(\"Missing the required parameter `user_id` when calling `update_user_api_key`\") # noqa: E501\n # verify the required parameter 'id' is set\n if ('id' not in params or\n params['id'] is None):\n raise ValueError(\"Missing the required parameter `id` when calling `update_user_api_key`\") # noqa: E501\n\n collection_formats = {}\n\n path_params = {}\n if 'user_id' in params:\n path_params['userId'] = params['user_id'] # noqa: E501\n if 'id' in params:\n path_params['id'] = params['id'] # noqa: E501\n\n query_params = []\n if 'o' in params:\n query_params.append(('o', params['o'])) # noqa: E501\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.select_header_accept(\n ['application/json']) # noqa: E501\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501\n ['application/json']) # noqa: E501\n\n # Authentication setting\n auth_settings = ['bearerAuth'] # noqa: E501\n\n return self.api_client.call_api(\n '/users/{userId}/api_keys/{id}', 'PUT',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='ApiKey', # noqa: E501\n auth_settings=auth_settings,\n async_req=params.get('async_req'),\n _return_http_data_only=params.get('_return_http_data_only'),\n _preload_content=params.get('_preload_content', True),\n _request_timeout=params.get('_request_timeout'),\n collection_formats=collection_formats)\n",
"id": "9087594",
"language": "Python",
"matching_score": 0.7897502779960632,
"max_stars_count": 1,
"path": "automox_console_sdk/api/api_keys_api.py"
},
{
"content": "\"\"\"Use case for automating the ingestion of CVE reports\"\"\"\nimport glob\nimport os\nimport sys\nimport time\nfrom getpass import getpass\nfrom io import FileIO\n\nimport requests\n\n\ndef upload_cve(file: FileIO) -> dict:\n \"\"\" Uploads vulnerability list to Automox Vulnerability Sync endpoint.\n\n Args:\n file (FileIO): A CSV file containing vulnerability data.\n\n Returns:\n response_data (dict): API response from Automox Vulnerability Sync\n https://developer.automox.com/openapi/vulnsync/operation/UploadCSVBatch/\n \"\"\"\n\n response_data = {}\n\n task = \"patch\"\n url = f\"https://console.automox.com/api/orgs/{organization}/tasks/{task}/batches/upload\"\n\n filename = os.path.basename(file.name)\n\n try:\n headers = {\n \"Authorization\": f\"Bearer {api_secret}\",\n }\n\n files = [\n ('file', (filename, file, 'text/csv'))\n ]\n\n response = requests.request(\"POST\", url, headers=headers, files=files)\n\n response_data = response.json()\n\n if \"errors\" in response_data and len(response_data['errors']) > 0:\n msg = \"\"\n msg = msg.join(response_data['errors'])\n\n raise Exception(msg)\n except (requests.RequestException, Exception) as error:\n print(f\"Error: Unable to complete CSV upload request. ({error})\")\n\n return response_data\n\ndef get_unprocessed_cves(directory: str) -> list:\n \"\"\"Returns a list of CSV files to upload and process.\n\n Args:\n directory (str): Directory to look in for CSVs.\n\n Returns:\n cve_files (list): List of files to be processed and uploaded.\n \"\"\"\n\n cve_files = []\n\n paths = glob.glob(f\"{directory}/*.csv\")\n\n for path in paths:\n try:\n cve_file = open(path, \"rb\")\n\n cve_files.append(cve_file)\n except (OSError, IOError) as error:\n print(f\"Error: Could not open a CSV. {error}\")\n\n print(f\"Found {len(cve_files)} file(s) to upload.\")\n\n return cve_files\n\ndef process_cves(unprocessed_cve_list: list) -> dict:\n \"\"\"Handles uploading and moving the CSV file to the processed directory.\n\n Args:\n unprocessed_cve_list (list): List of files to process.\n\n Returns:\n uploaded_batches (dict): Dictionary of batch ids correlated to API batch upload responses.\n \"\"\"\n\n uploaded_batches = {}\n\n for file in unprocessed_cve_list:\n try:\n # Make the request to upload the batch file\n print(f\"Sending {os.path.basename(file.name)} to Automox Vulnerability Sync...\")\n\n response = upload_cve(file)\n\n if response['id']:\n uploaded_batches[response['id']] = response\n\n upload_output = (\n \"==============================\\n\"\n f\"BATCH ID: {response['id']}\\n\"\n f\"{response['source']} has been uploaded.\\n\"\n \"==============================\"\n )\n\n print(upload_output)\n\n path = os.path.realpath(file.name)\n directory = os.path.dirname(path)\n filename = os.path.basename(file.name)\n new_path = f\"{directory}/processed/{filename}\"\n\n print(f\"Moving {filename} to {new_path}\\n\")\n\n os.rename(path, new_path)\n except OSError as error:\n print(f\"Error processing CVE: {error}\")\n\n return uploaded_batches\n\ndef update_batches(uploaded_batches: dict) -> dict:\n \"\"\"Polls the Automox API for the status of batches contained in this dictionary.\n\n When CSV files containing CVE information is uploaded to the Automox Vulnerability Sync API, a task list is built\n\n Args:\n uploaded_batches (dict): A dictionary of the latest responses from the Automox API about the status of a batch.\n\n Returns:\n uploaded_batches (dict): An updated dictionary of the latest responses from the Automox API about the status of a batch.\n \"\"\"\n\n for batch_id, batch in uploaded_batches.items():\n try:\n if batch['status'] != \"awaiting_approval\":\n headers = {\n \"Authorization\": f\"Bearer {api_secret}\",\n }\n\n response = requests.get(f\"https://console.automox.com/api/orgs/{organization}/tasks/batches/{batch['id']}\", headers=headers)\n\n response_data = response.json()\n\n if \"errors\" in response_data and len(response_data['errors']) > 0:\n msg = \"\"\n msg = msg.join(response_data['errors'])\n\n raise Exception(msg)\n\n uploaded_batches[batch_id] = response_data\n except (requests.RequestException, Exception) as error:\n print(f\"Error: Unable to update batch {batch_id} status. ({error})\")\n\n return uploaded_batches\n\ntry:\n # Directory to watch for new CVE CSVs\n WATCH_DIR = os.getenv(\"WATCH_DIR\") or \"./cve_queue\"\n\n # Prompt for inputs\n api_secret = os.getenv('AUTOMOX_API_KEY') or getpass('Enter your API Key: ')\n organization = os.getenv('AUTOMOX_ORGANIZATION_ID') or input(\"Enter your Organization ID: \")\n\n cve_list = get_unprocessed_cves(WATCH_DIR)\n\n if len(cve_list) == 0:\n sys.exit()\n\n batches = process_cves(cve_list)\n\n # Assumes the batches have not been built upon receipt.\n batches_complete = len(batches) == 0\n\n while not batches_complete:\n print(\"Batches are still building... Checking for updates...\")\n batches = update_batches(batches)\n\n for batch_id, batch in batches.items():\n batches_complete = True\n\n if not batches[batch_id]['status'] == \"awaiting_approval\":\n batches_complete = False\n\n time.sleep(10)\n\n print(\"Batches are done processing!\")\n\n for batch_id, batch in batches.items():\n output = (\n \"==============================\\n\"\n f\"BATCH ID: {batch['id']}\\n\"\n f\"{batch['source']} has been processed.\\n\"\n f\"Total Vulnerabilities: {batch['cve_count']}\\n\"\n f\"Devices Impacted: {batch['impacted_device_count']}\\n\"\n f\"Tasks Pending Creation: {batch['task_count']}\\n\"\n f\"Batch Issues: {batch['issue_count']}\\n\"\n f\"Unknown Hosts: {batch['unknown_host_count']}\\n\"\n \"==============================\"\n )\n\n print(output)\nexcept Exception as e:\n print(f\"Error: {e}\\n\")\n raise\nexcept KeyboardInterrupt:\n print (\"Ctrl+C Pressed. Shutting down.\")\n",
"id": "12799152",
"language": "Python",
"matching_score": 0.9975802302360535,
"max_stars_count": 1,
"path": "examples/use-cases/vuln_sync_csv_upload/vuln_upload.py"
},
{
"content": "# coding: utf-8\n\n\"\"\"\n Automox Console API\n\n API for use with the Automox Console # noqa: E501\n\n OpenAPI spec version: 2021-11-16\n Contact: <EMAIL>\n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\nclass SoftwareApprovals(object):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n \"\"\"\n Attributes:\n swagger_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n swagger_types = {\n 'id': 'int',\n 'software_version_id': 'int',\n 'policy_id': 'int',\n 'manual_approval': 'bool',\n 'manual_approval_time': 'datetime',\n 'auto_approval': 'bool'\n }\n\n attribute_map = {\n 'id': 'id',\n 'software_version_id': 'software_version_id',\n 'policy_id': 'policy_id',\n 'manual_approval': 'manual_approval',\n 'manual_approval_time': 'manual_approval_time',\n 'auto_approval': 'auto_approval'\n }\n\n def __init__(self, id=None, software_version_id=None, policy_id=None, manual_approval=None, manual_approval_time=None, auto_approval=None): # noqa: E501\n \"\"\"SoftwareApprovals - a model defined in Swagger\"\"\" # noqa: E501\n self._id = None\n self._software_version_id = None\n self._policy_id = None\n self._manual_approval = None\n self._manual_approval_time = None\n self._auto_approval = None\n self.discriminator = None\n if id is not None:\n self.id = id\n if software_version_id is not None:\n self.software_version_id = software_version_id\n if policy_id is not None:\n self.policy_id = policy_id\n self.manual_approval = manual_approval\n if manual_approval_time is not None:\n self.manual_approval_time = manual_approval_time\n if auto_approval is not None:\n self.auto_approval = auto_approval\n\n @property\n def id(self):\n \"\"\"Gets the id of this SoftwareApprovals. # noqa: E501\n\n\n :return: The id of this SoftwareApprovals. # noqa: E501\n :rtype: int\n \"\"\"\n return self._id\n\n @id.setter\n def id(self, id):\n \"\"\"Sets the id of this SoftwareApprovals.\n\n\n :param id: The id of this SoftwareApprovals. # noqa: E501\n :type: int\n \"\"\"\n\n self._id = id\n\n @property\n def software_version_id(self):\n \"\"\"Gets the software_version_id of this SoftwareApprovals. # noqa: E501\n\n\n :return: The software_version_id of this SoftwareApprovals. # noqa: E501\n :rtype: int\n \"\"\"\n return self._software_version_id\n\n @software_version_id.setter\n def software_version_id(self, software_version_id):\n \"\"\"Sets the software_version_id of this SoftwareApprovals.\n\n\n :param software_version_id: The software_version_id of this SoftwareApprovals. # noqa: E501\n :type: int\n \"\"\"\n\n self._software_version_id = software_version_id\n\n @property\n def policy_id(self):\n \"\"\"Gets the policy_id of this SoftwareApprovals. # noqa: E501\n\n\n :return: The policy_id of this SoftwareApprovals. # noqa: E501\n :rtype: int\n \"\"\"\n return self._policy_id\n\n @policy_id.setter\n def policy_id(self, policy_id):\n \"\"\"Sets the policy_id of this SoftwareApprovals.\n\n\n :param policy_id: The policy_id of this SoftwareApprovals. # noqa: E501\n :type: int\n \"\"\"\n\n self._policy_id = policy_id\n\n @property\n def manual_approval(self):\n \"\"\"Gets the manual_approval of this SoftwareApprovals. # noqa: E501\n\n true = Approved, false = Rejected # noqa: E501\n\n :return: The manual_approval of this SoftwareApprovals. # noqa: E501\n :rtype: bool\n \"\"\"\n return self._manual_approval\n\n @manual_approval.setter\n def manual_approval(self, manual_approval):\n \"\"\"Sets the manual_approval of this SoftwareApprovals.\n\n true = Approved, false = Rejected # noqa: E501\n\n :param manual_approval: The manual_approval of this SoftwareApprovals. # noqa: E501\n :type: bool\n \"\"\"\n if manual_approval is None:\n raise ValueError(\"Invalid value for `manual_approval`, must not be `None`\") # noqa: E501\n\n self._manual_approval = manual_approval\n\n @property\n def manual_approval_time(self):\n \"\"\"Gets the manual_approval_time of this SoftwareApprovals. # noqa: E501\n\n\n :return: The manual_approval_time of this SoftwareApprovals. # noqa: E501\n :rtype: datetime\n \"\"\"\n return self._manual_approval_time\n\n @manual_approval_time.setter\n def manual_approval_time(self, manual_approval_time):\n \"\"\"Sets the manual_approval_time of this SoftwareApprovals.\n\n\n :param manual_approval_time: The manual_approval_time of this SoftwareApprovals. # noqa: E501\n :type: datetime\n \"\"\"\n\n self._manual_approval_time = manual_approval_time\n\n @property\n def auto_approval(self):\n \"\"\"Gets the auto_approval of this SoftwareApprovals. # noqa: E501\n\n\n :return: The auto_approval of this SoftwareApprovals. # noqa: E501\n :rtype: bool\n \"\"\"\n return self._auto_approval\n\n @auto_approval.setter\n def auto_approval(self, auto_approval):\n \"\"\"Sets the auto_approval of this SoftwareApprovals.\n\n\n :param auto_approval: The auto_approval of this SoftwareApprovals. # noqa: E501\n :type: bool\n \"\"\"\n\n self._auto_approval = auto_approval\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n if issubclass(SoftwareApprovals, dict):\n for key, value in self.items():\n result[key] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, SoftwareApprovals):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n",
"id": "12198166",
"language": "Python",
"matching_score": 1.6677145957946777,
"max_stars_count": 1,
"path": "automox_console_sdk/models/software_approvals.py"
},
{
"content": "# coding: utf-8\n\n\"\"\"\n Automox Console API\n\n API for use with the Automox Console # noqa: E501\n\n OpenAPI spec version: 2021-11-16\n Contact: <EMAIL>\n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\nclass ServerPolicyStatus(object):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n \"\"\"\n Attributes:\n swagger_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n swagger_types = {\n 'id': 'int',\n 'organization_id': 'int',\n 'policy_id': 'int',\n 'server_id': 'int',\n 'policy_name': 'str',\n 'policy_type_name': 'str',\n 'status': 'int',\n 'result': 'str',\n 'create_time': 'str'\n }\n\n attribute_map = {\n 'id': 'id',\n 'organization_id': 'organization_id',\n 'policy_id': 'policy_id',\n 'server_id': 'server_id',\n 'policy_name': 'policy_name',\n 'policy_type_name': 'policy_type_name',\n 'status': 'status',\n 'result': 'result',\n 'create_time': 'create_time'\n }\n\n def __init__(self, id=None, organization_id=None, policy_id=None, server_id=None, policy_name=None, policy_type_name=None, status=None, result=None, create_time=None): # noqa: E501\n \"\"\"ServerPolicyStatus - a model defined in Swagger\"\"\" # noqa: E501\n self._id = None\n self._organization_id = None\n self._policy_id = None\n self._server_id = None\n self._policy_name = None\n self._policy_type_name = None\n self._status = None\n self._result = None\n self._create_time = None\n self.discriminator = None\n if id is not None:\n self.id = id\n if organization_id is not None:\n self.organization_id = organization_id\n if policy_id is not None:\n self.policy_id = policy_id\n if server_id is not None:\n self.server_id = server_id\n if policy_name is not None:\n self.policy_name = policy_name\n if policy_type_name is not None:\n self.policy_type_name = policy_type_name\n if status is not None:\n self.status = status\n if result is not None:\n self.result = result\n if create_time is not None:\n self.create_time = create_time\n\n @property\n def id(self):\n \"\"\"Gets the id of this ServerPolicyStatus. # noqa: E501\n\n\n :return: The id of this ServerPolicyStatus. # noqa: E501\n :rtype: int\n \"\"\"\n return self._id\n\n @id.setter\n def id(self, id):\n \"\"\"Sets the id of this ServerPolicyStatus.\n\n\n :param id: The id of this ServerPolicyStatus. # noqa: E501\n :type: int\n \"\"\"\n\n self._id = id\n\n @property\n def organization_id(self):\n \"\"\"Gets the organization_id of this ServerPolicyStatus. # noqa: E501\n\n\n :return: The organization_id of this ServerPolicyStatus. # noqa: E501\n :rtype: int\n \"\"\"\n return self._organization_id\n\n @organization_id.setter\n def organization_id(self, organization_id):\n \"\"\"Sets the organization_id of this ServerPolicyStatus.\n\n\n :param organization_id: The organization_id of this ServerPolicyStatus. # noqa: E501\n :type: int\n \"\"\"\n\n self._organization_id = organization_id\n\n @property\n def policy_id(self):\n \"\"\"Gets the policy_id of this ServerPolicyStatus. # noqa: E501\n\n\n :return: The policy_id of this ServerPolicyStatus. # noqa: E501\n :rtype: int\n \"\"\"\n return self._policy_id\n\n @policy_id.setter\n def policy_id(self, policy_id):\n \"\"\"Sets the policy_id of this ServerPolicyStatus.\n\n\n :param policy_id: The policy_id of this ServerPolicyStatus. # noqa: E501\n :type: int\n \"\"\"\n\n self._policy_id = policy_id\n\n @property\n def server_id(self):\n \"\"\"Gets the server_id of this ServerPolicyStatus. # noqa: E501\n\n\n :return: The server_id of this ServerPolicyStatus. # noqa: E501\n :rtype: int\n \"\"\"\n return self._server_id\n\n @server_id.setter\n def server_id(self, server_id):\n \"\"\"Sets the server_id of this ServerPolicyStatus.\n\n\n :param server_id: The server_id of this ServerPolicyStatus. # noqa: E501\n :type: int\n \"\"\"\n\n self._server_id = server_id\n\n @property\n def policy_name(self):\n \"\"\"Gets the policy_name of this ServerPolicyStatus. # noqa: E501\n\n\n :return: The policy_name of this ServerPolicyStatus. # noqa: E501\n :rtype: str\n \"\"\"\n return self._policy_name\n\n @policy_name.setter\n def policy_name(self, policy_name):\n \"\"\"Sets the policy_name of this ServerPolicyStatus.\n\n\n :param policy_name: The policy_name of this ServerPolicyStatus. # noqa: E501\n :type: str\n \"\"\"\n\n self._policy_name = policy_name\n\n @property\n def policy_type_name(self):\n \"\"\"Gets the policy_type_name of this ServerPolicyStatus. # noqa: E501\n\n\n :return: The policy_type_name of this ServerPolicyStatus. # noqa: E501\n :rtype: str\n \"\"\"\n return self._policy_type_name\n\n @policy_type_name.setter\n def policy_type_name(self, policy_type_name):\n \"\"\"Sets the policy_type_name of this ServerPolicyStatus.\n\n\n :param policy_type_name: The policy_type_name of this ServerPolicyStatus. # noqa: E501\n :type: str\n \"\"\"\n allowed_values = [\"patch\", \"custom\", \"required_software\"] # noqa: E501\n if policy_type_name not in allowed_values:\n raise ValueError(\n \"Invalid value for `policy_type_name` ({0}), must be one of {1}\" # noqa: E501\n .format(policy_type_name, allowed_values)\n )\n\n self._policy_type_name = policy_type_name\n\n @property\n def status(self):\n \"\"\"Gets the status of this ServerPolicyStatus. # noqa: E501\n\n\n :return: The status of this ServerPolicyStatus. # noqa: E501\n :rtype: int\n \"\"\"\n return self._status\n\n @status.setter\n def status(self, status):\n \"\"\"Sets the status of this ServerPolicyStatus.\n\n\n :param status: The status of this ServerPolicyStatus. # noqa: E501\n :type: int\n \"\"\"\n\n self._status = status\n\n @property\n def result(self):\n \"\"\"Gets the result of this ServerPolicyStatus. # noqa: E501\n\n\n :return: The result of this ServerPolicyStatus. # noqa: E501\n :rtype: str\n \"\"\"\n return self._result\n\n @result.setter\n def result(self, result):\n \"\"\"Sets the result of this ServerPolicyStatus.\n\n\n :param result: The result of this ServerPolicyStatus. # noqa: E501\n :type: str\n \"\"\"\n\n self._result = result\n\n @property\n def create_time(self):\n \"\"\"Gets the create_time of this ServerPolicyStatus. # noqa: E501\n\n\n :return: The create_time of this ServerPolicyStatus. # noqa: E501\n :rtype: str\n \"\"\"\n return self._create_time\n\n @create_time.setter\n def create_time(self, create_time):\n \"\"\"Sets the create_time of this ServerPolicyStatus.\n\n\n :param create_time: The create_time of this ServerPolicyStatus. # noqa: E501\n :type: str\n \"\"\"\n\n self._create_time = create_time\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n if issubclass(ServerPolicyStatus, dict):\n for key, value in self.items():\n result[key] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, ServerPolicyStatus):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n",
"id": "2252055",
"language": "Python",
"matching_score": 1.3242968320846558,
"max_stars_count": 1,
"path": "automox_console_sdk/models/server_policy_status.py"
},
{
"content": "# coding: utf-8\n\n\"\"\"\n Automox Console API\n\n API for use with the Automox Console # noqa: E501\n\n OpenAPI spec version: 2021-11-16\n Contact: <EMAIL>\n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\nclass ServersbatchActions(object):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n \"\"\"\n Attributes:\n swagger_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n swagger_types = {\n 'attribute': 'str',\n 'action': 'str',\n 'value': 'list[str]'\n }\n\n attribute_map = {\n 'attribute': 'attribute',\n 'action': 'action',\n 'value': 'value'\n }\n\n def __init__(self, attribute=None, action=None, value=None): # noqa: E501\n \"\"\"ServersbatchActions - a model defined in Swagger\"\"\" # noqa: E501\n self._attribute = None\n self._action = None\n self._value = None\n self.discriminator = None\n if attribute is not None:\n self.attribute = attribute\n if action is not None:\n self.action = action\n if value is not None:\n self.value = value\n\n @property\n def attribute(self):\n \"\"\"Gets the attribute of this ServersbatchActions. # noqa: E501\n\n Name of the attribute # noqa: E501\n\n :return: The attribute of this ServersbatchActions. # noqa: E501\n :rtype: str\n \"\"\"\n return self._attribute\n\n @attribute.setter\n def attribute(self, attribute):\n \"\"\"Sets the attribute of this ServersbatchActions.\n\n Name of the attribute # noqa: E501\n\n :param attribute: The attribute of this ServersbatchActions. # noqa: E501\n :type: str\n \"\"\"\n allowed_values = [\"tags\"] # noqa: E501\n if attribute not in allowed_values:\n raise ValueError(\n \"Invalid value for `attribute` ({0}), must be one of {1}\" # noqa: E501\n .format(attribute, allowed_values)\n )\n\n self._attribute = attribute\n\n @property\n def action(self):\n \"\"\"Gets the action of this ServersbatchActions. # noqa: E501\n\n What action should be taken # noqa: E501\n\n :return: The action of this ServersbatchActions. # noqa: E501\n :rtype: str\n \"\"\"\n return self._action\n\n @action.setter\n def action(self, action):\n \"\"\"Sets the action of this ServersbatchActions.\n\n What action should be taken # noqa: E501\n\n :param action: The action of this ServersbatchActions. # noqa: E501\n :type: str\n \"\"\"\n allowed_values = [\"apply\", \"remove\"] # noqa: E501\n if action not in allowed_values:\n raise ValueError(\n \"Invalid value for `action` ({0}), must be one of {1}\" # noqa: E501\n .format(action, allowed_values)\n )\n\n self._action = action\n\n @property\n def value(self):\n \"\"\"Gets the value of this ServersbatchActions. # noqa: E501\n\n The value to use for the action # noqa: E501\n\n :return: The value of this ServersbatchActions. # noqa: E501\n :rtype: list[str]\n \"\"\"\n return self._value\n\n @value.setter\n def value(self, value):\n \"\"\"Sets the value of this ServersbatchActions.\n\n The value to use for the action # noqa: E501\n\n :param value: The value of this ServersbatchActions. # noqa: E501\n :type: list[str]\n \"\"\"\n\n self._value = value\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n if issubclass(ServersbatchActions, dict):\n for key, value in self.items():\n result[key] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, ServersbatchActions):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n",
"id": "11373500",
"language": "Python",
"matching_score": 0.5106936097145081,
"max_stars_count": 1,
"path": "automox_console_sdk/models/serversbatch_actions.py"
},
{
"content": "# coding: utf-8\n\n\"\"\"\n Automox Console API\n\n API for use with the Automox Console # noqa: E501\n\n OpenAPI spec version: 2021-11-16\n Contact: <EMAIL>\n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\nclass CompatibilityChecks(object):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n \"\"\"\n Attributes:\n swagger_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n swagger_types = {\n 'low_diskspace': 'bool',\n 'missing_secure_token': 'bool',\n 'app_store_disconnected': 'bool',\n 'missing_powershell': 'bool',\n 'missing_wmi_integrity_check': 'bool',\n 'wsus_disconnected': 'bool',\n 'windows_update_server_disconnected': 'bool'\n }\n\n attribute_map = {\n 'low_diskspace': 'low_diskspace',\n 'missing_secure_token': 'missing_secure_token',\n 'app_store_disconnected': 'app_store_disconnected',\n 'missing_powershell': 'missing_powershell',\n 'missing_wmi_integrity_check': 'missing_wmi_integrity_check',\n 'wsus_disconnected': 'wsus_disconnected',\n 'windows_update_server_disconnected': 'windows_update_server_disconnected'\n }\n\n def __init__(self, low_diskspace=None, missing_secure_token=None, app_store_disconnected=None, missing_powershell=None, missing_wmi_integrity_check=None, wsus_disconnected=None, windows_update_server_disconnected=None): # noqa: E501\n \"\"\"CompatibilityChecks - a model defined in Swagger\"\"\" # noqa: E501\n self._low_diskspace = None\n self._missing_secure_token = None\n self._app_store_disconnected = None\n self._missing_powershell = None\n self._missing_wmi_integrity_check = None\n self._wsus_disconnected = None\n self._windows_update_server_disconnected = None\n self.discriminator = None\n if low_diskspace is not None:\n self.low_diskspace = low_diskspace\n if missing_secure_token is not None:\n self.missing_secure_token = missing_secure_token\n if app_store_disconnected is not None:\n self.app_store_disconnected = app_store_disconnected\n if missing_powershell is not None:\n self.missing_powershell = missing_powershell\n if missing_wmi_integrity_check is not None:\n self.missing_wmi_integrity_check = missing_wmi_integrity_check\n if wsus_disconnected is not None:\n self.wsus_disconnected = wsus_disconnected\n if windows_update_server_disconnected is not None:\n self.windows_update_server_disconnected = windows_update_server_disconnected\n\n @property\n def low_diskspace(self):\n \"\"\"Gets the low_diskspace of this CompatibilityChecks. # noqa: E501\n\n\n :return: The low_diskspace of this CompatibilityChecks. # noqa: E501\n :rtype: bool\n \"\"\"\n return self._low_diskspace\n\n @low_diskspace.setter\n def low_diskspace(self, low_diskspace):\n \"\"\"Sets the low_diskspace of this CompatibilityChecks.\n\n\n :param low_diskspace: The low_diskspace of this CompatibilityChecks. # noqa: E501\n :type: bool\n \"\"\"\n\n self._low_diskspace = low_diskspace\n\n @property\n def missing_secure_token(self):\n \"\"\"Gets the missing_secure_token of this CompatibilityChecks. # noqa: E501\n\n\n :return: The missing_secure_token of this CompatibilityChecks. # noqa: E501\n :rtype: bool\n \"\"\"\n return self._missing_secure_token\n\n @missing_secure_token.setter\n def missing_secure_token(self, missing_secure_token):\n \"\"\"Sets the missing_secure_token of this CompatibilityChecks.\n\n\n :param missing_secure_token: The missing_secure_token of this CompatibilityChecks. # noqa: E501\n :type: bool\n \"\"\"\n\n self._missing_secure_token = missing_secure_token\n\n @property\n def app_store_disconnected(self):\n \"\"\"Gets the app_store_disconnected of this CompatibilityChecks. # noqa: E501\n\n\n :return: The app_store_disconnected of this CompatibilityChecks. # noqa: E501\n :rtype: bool\n \"\"\"\n return self._app_store_disconnected\n\n @app_store_disconnected.setter\n def app_store_disconnected(self, app_store_disconnected):\n \"\"\"Sets the app_store_disconnected of this CompatibilityChecks.\n\n\n :param app_store_disconnected: The app_store_disconnected of this CompatibilityChecks. # noqa: E501\n :type: bool\n \"\"\"\n\n self._app_store_disconnected = app_store_disconnected\n\n @property\n def missing_powershell(self):\n \"\"\"Gets the missing_powershell of this CompatibilityChecks. # noqa: E501\n\n\n :return: The missing_powershell of this CompatibilityChecks. # noqa: E501\n :rtype: bool\n \"\"\"\n return self._missing_powershell\n\n @missing_powershell.setter\n def missing_powershell(self, missing_powershell):\n \"\"\"Sets the missing_powershell of this CompatibilityChecks.\n\n\n :param missing_powershell: The missing_powershell of this CompatibilityChecks. # noqa: E501\n :type: bool\n \"\"\"\n\n self._missing_powershell = missing_powershell\n\n @property\n def missing_wmi_integrity_check(self):\n \"\"\"Gets the missing_wmi_integrity_check of this CompatibilityChecks. # noqa: E501\n\n\n :return: The missing_wmi_integrity_check of this CompatibilityChecks. # noqa: E501\n :rtype: bool\n \"\"\"\n return self._missing_wmi_integrity_check\n\n @missing_wmi_integrity_check.setter\n def missing_wmi_integrity_check(self, missing_wmi_integrity_check):\n \"\"\"Sets the missing_wmi_integrity_check of this CompatibilityChecks.\n\n\n :param missing_wmi_integrity_check: The missing_wmi_integrity_check of this CompatibilityChecks. # noqa: E501\n :type: bool\n \"\"\"\n\n self._missing_wmi_integrity_check = missing_wmi_integrity_check\n\n @property\n def wsus_disconnected(self):\n \"\"\"Gets the wsus_disconnected of this CompatibilityChecks. # noqa: E501\n\n\n :return: The wsus_disconnected of this CompatibilityChecks. # noqa: E501\n :rtype: bool\n \"\"\"\n return self._wsus_disconnected\n\n @wsus_disconnected.setter\n def wsus_disconnected(self, wsus_disconnected):\n \"\"\"Sets the wsus_disconnected of this CompatibilityChecks.\n\n\n :param wsus_disconnected: The wsus_disconnected of this CompatibilityChecks. # noqa: E501\n :type: bool\n \"\"\"\n\n self._wsus_disconnected = wsus_disconnected\n\n @property\n def windows_update_server_disconnected(self):\n \"\"\"Gets the windows_update_server_disconnected of this CompatibilityChecks. # noqa: E501\n\n\n :return: The windows_update_server_disconnected of this CompatibilityChecks. # noqa: E501\n :rtype: bool\n \"\"\"\n return self._windows_update_server_disconnected\n\n @windows_update_server_disconnected.setter\n def windows_update_server_disconnected(self, windows_update_server_disconnected):\n \"\"\"Sets the windows_update_server_disconnected of this CompatibilityChecks.\n\n\n :param windows_update_server_disconnected: The windows_update_server_disconnected of this CompatibilityChecks. # noqa: E501\n :type: bool\n \"\"\"\n\n self._windows_update_server_disconnected = windows_update_server_disconnected\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n if issubclass(CompatibilityChecks, dict):\n for key, value in self.items():\n result[key] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, CompatibilityChecks):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n",
"id": "6132947",
"language": "Python",
"matching_score": 0.9290083646774292,
"max_stars_count": 1,
"path": "automox_console_sdk/models/compatibility_checks.py"
},
{
"content": "# coding: utf-8\n\n\"\"\"\n Automox Console API\n\n API for use with the Automox Console # noqa: E501\n\n OpenAPI spec version: 2021-11-16\n Contact: <EMAIL>\n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\nclass ServerDetail(object):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n \"\"\"\n Attributes:\n swagger_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n swagger_types = {\n 'cpu': 'str',\n 'disks': 'list[ServerDetailDISKS]',\n 'model': 'str',\n 'nics': 'list[ServerDetailNICS]',\n 'ram': 'str',\n 'serial': 'str',\n 'servicetag': 'str',\n 'vendor': 'str',\n 'version': 'str'\n }\n\n attribute_map = {\n 'cpu': 'CPU',\n 'disks': 'DISKS',\n 'model': 'MODEL',\n 'nics': 'NICS',\n 'ram': 'RAM',\n 'serial': 'SERIAL',\n 'servicetag': 'SERVICETAG',\n 'vendor': 'VENDOR',\n 'version': 'VERSION'\n }\n\n def __init__(self, cpu=None, disks=None, model=None, nics=None, ram=None, serial=None, servicetag=None, vendor=None, version=None): # noqa: E501\n \"\"\"ServerDetail - a model defined in Swagger\"\"\" # noqa: E501\n self._cpu = None\n self._disks = None\n self._model = None\n self._nics = None\n self._ram = None\n self._serial = None\n self._servicetag = None\n self._vendor = None\n self._version = None\n self.discriminator = None\n if cpu is not None:\n self.cpu = cpu\n if disks is not None:\n self.disks = disks\n if model is not None:\n self.model = model\n if nics is not None:\n self.nics = nics\n if ram is not None:\n self.ram = ram\n if serial is not None:\n self.serial = serial\n if servicetag is not None:\n self.servicetag = servicetag\n if vendor is not None:\n self.vendor = vendor\n if version is not None:\n self.version = version\n\n @property\n def cpu(self):\n \"\"\"Gets the cpu of this ServerDetail. # noqa: E501\n\n\n :return: The cpu of this ServerDetail. # noqa: E501\n :rtype: str\n \"\"\"\n return self._cpu\n\n @cpu.setter\n def cpu(self, cpu):\n \"\"\"Sets the cpu of this ServerDetail.\n\n\n :param cpu: The cpu of this ServerDetail. # noqa: E501\n :type: str\n \"\"\"\n\n self._cpu = cpu\n\n @property\n def disks(self):\n \"\"\"Gets the disks of this ServerDetail. # noqa: E501\n\n\n :return: The disks of this ServerDetail. # noqa: E501\n :rtype: list[ServerDetailDISKS]\n \"\"\"\n return self._disks\n\n @disks.setter\n def disks(self, disks):\n \"\"\"Sets the disks of this ServerDetail.\n\n\n :param disks: The disks of this ServerDetail. # noqa: E501\n :type: list[ServerDetailDISKS]\n \"\"\"\n\n self._disks = disks\n\n @property\n def model(self):\n \"\"\"Gets the model of this ServerDetail. # noqa: E501\n\n\n :return: The model of this ServerDetail. # noqa: E501\n :rtype: str\n \"\"\"\n return self._model\n\n @model.setter\n def model(self, model):\n \"\"\"Sets the model of this ServerDetail.\n\n\n :param model: The model of this ServerDetail. # noqa: E501\n :type: str\n \"\"\"\n\n self._model = model\n\n @property\n def nics(self):\n \"\"\"Gets the nics of this ServerDetail. # noqa: E501\n\n\n :return: The nics of this ServerDetail. # noqa: E501\n :rtype: list[ServerDetailNICS]\n \"\"\"\n return self._nics\n\n @nics.setter\n def nics(self, nics):\n \"\"\"Sets the nics of this ServerDetail.\n\n\n :param nics: The nics of this ServerDetail. # noqa: E501\n :type: list[ServerDetailNICS]\n \"\"\"\n\n self._nics = nics\n\n @property\n def ram(self):\n \"\"\"Gets the ram of this ServerDetail. # noqa: E501\n\n\n :return: The ram of this ServerDetail. # noqa: E501\n :rtype: str\n \"\"\"\n return self._ram\n\n @ram.setter\n def ram(self, ram):\n \"\"\"Sets the ram of this ServerDetail.\n\n\n :param ram: The ram of this ServerDetail. # noqa: E501\n :type: str\n \"\"\"\n\n self._ram = ram\n\n @property\n def serial(self):\n \"\"\"Gets the serial of this ServerDetail. # noqa: E501\n\n\n :return: The serial of this ServerDetail. # noqa: E501\n :rtype: str\n \"\"\"\n return self._serial\n\n @serial.setter\n def serial(self, serial):\n \"\"\"Sets the serial of this ServerDetail.\n\n\n :param serial: The serial of this ServerDetail. # noqa: E501\n :type: str\n \"\"\"\n\n self._serial = serial\n\n @property\n def servicetag(self):\n \"\"\"Gets the servicetag of this ServerDetail. # noqa: E501\n\n\n :return: The servicetag of this ServerDetail. # noqa: E501\n :rtype: str\n \"\"\"\n return self._servicetag\n\n @servicetag.setter\n def servicetag(self, servicetag):\n \"\"\"Sets the servicetag of this ServerDetail.\n\n\n :param servicetag: The servicetag of this ServerDetail. # noqa: E501\n :type: str\n \"\"\"\n\n self._servicetag = servicetag\n\n @property\n def vendor(self):\n \"\"\"Gets the vendor of this ServerDetail. # noqa: E501\n\n\n :return: The vendor of this ServerDetail. # noqa: E501\n :rtype: str\n \"\"\"\n return self._vendor\n\n @vendor.setter\n def vendor(self, vendor):\n \"\"\"Sets the vendor of this ServerDetail.\n\n\n :param vendor: The vendor of this ServerDetail. # noqa: E501\n :type: str\n \"\"\"\n\n self._vendor = vendor\n\n @property\n def version(self):\n \"\"\"Gets the version of this ServerDetail. # noqa: E501\n\n\n :return: The version of this ServerDetail. # noqa: E501\n :rtype: str\n \"\"\"\n return self._version\n\n @version.setter\n def version(self, version):\n \"\"\"Sets the version of this ServerDetail.\n\n\n :param version: The version of this ServerDetail. # noqa: E501\n :type: str\n \"\"\"\n\n self._version = version\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n if issubclass(ServerDetail, dict):\n for key, value in self.items():\n result[key] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, ServerDetail):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n",
"id": "7916614",
"language": "Python",
"matching_score": 3.240145683288574,
"max_stars_count": 1,
"path": "automox_console_sdk/models/server_detail.py"
},
{
"content": "# coding: utf-8\n\n\"\"\"\n Automox Console API\n\n API for use with the Automox Console # noqa: E501\n\n OpenAPI spec version: 2021-11-16\n Contact: <EMAIL>\n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\nclass ServerDetailNICS(object):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n \"\"\"\n Attributes:\n swagger_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n swagger_types = {\n 'connected': 'bool',\n 'device': 'str',\n 'ips': 'list[str]',\n 'mac': 'str',\n 'type': 'str',\n 'vendor': 'str'\n }\n\n attribute_map = {\n 'connected': 'CONNECTED',\n 'device': 'DEVICE',\n 'ips': 'IPS',\n 'mac': 'MAC',\n 'type': 'TYPE',\n 'vendor': 'VENDOR'\n }\n\n def __init__(self, connected=None, device=None, ips=None, mac=None, type=None, vendor=None): # noqa: E501\n \"\"\"ServerDetailNICS - a model defined in Swagger\"\"\" # noqa: E501\n self._connected = None\n self._device = None\n self._ips = None\n self._mac = None\n self._type = None\n self._vendor = None\n self.discriminator = None\n if connected is not None:\n self.connected = connected\n if device is not None:\n self.device = device\n if ips is not None:\n self.ips = ips\n if mac is not None:\n self.mac = mac\n if type is not None:\n self.type = type\n if vendor is not None:\n self.vendor = vendor\n\n @property\n def connected(self):\n \"\"\"Gets the connected of this ServerDetailNICS. # noqa: E501\n\n\n :return: The connected of this ServerDetailNICS. # noqa: E501\n :rtype: bool\n \"\"\"\n return self._connected\n\n @connected.setter\n def connected(self, connected):\n \"\"\"Sets the connected of this ServerDetailNICS.\n\n\n :param connected: The connected of this ServerDetailNICS. # noqa: E501\n :type: bool\n \"\"\"\n\n self._connected = connected\n\n @property\n def device(self):\n \"\"\"Gets the device of this ServerDetailNICS. # noqa: E501\n\n\n :return: The device of this ServerDetailNICS. # noqa: E501\n :rtype: str\n \"\"\"\n return self._device\n\n @device.setter\n def device(self, device):\n \"\"\"Sets the device of this ServerDetailNICS.\n\n\n :param device: The device of this ServerDetailNICS. # noqa: E501\n :type: str\n \"\"\"\n\n self._device = device\n\n @property\n def ips(self):\n \"\"\"Gets the ips of this ServerDetailNICS. # noqa: E501\n\n\n :return: The ips of this ServerDetailNICS. # noqa: E501\n :rtype: list[str]\n \"\"\"\n return self._ips\n\n @ips.setter\n def ips(self, ips):\n \"\"\"Sets the ips of this ServerDetailNICS.\n\n\n :param ips: The ips of this ServerDetailNICS. # noqa: E501\n :type: list[str]\n \"\"\"\n\n self._ips = ips\n\n @property\n def mac(self):\n \"\"\"Gets the mac of this ServerDetailNICS. # noqa: E501\n\n\n :return: The mac of this ServerDetailNICS. # noqa: E501\n :rtype: str\n \"\"\"\n return self._mac\n\n @mac.setter\n def mac(self, mac):\n \"\"\"Sets the mac of this ServerDetailNICS.\n\n\n :param mac: The mac of this ServerDetailNICS. # noqa: E501\n :type: str\n \"\"\"\n\n self._mac = mac\n\n @property\n def type(self):\n \"\"\"Gets the type of this ServerDetailNICS. # noqa: E501\n\n\n :return: The type of this ServerDetailNICS. # noqa: E501\n :rtype: str\n \"\"\"\n return self._type\n\n @type.setter\n def type(self, type):\n \"\"\"Sets the type of this ServerDetailNICS.\n\n\n :param type: The type of this ServerDetailNICS. # noqa: E501\n :type: str\n \"\"\"\n\n self._type = type\n\n @property\n def vendor(self):\n \"\"\"Gets the vendor of this ServerDetailNICS. # noqa: E501\n\n\n :return: The vendor of this ServerDetailNICS. # noqa: E501\n :rtype: str\n \"\"\"\n return self._vendor\n\n @vendor.setter\n def vendor(self, vendor):\n \"\"\"Sets the vendor of this ServerDetailNICS.\n\n\n :param vendor: The vendor of this ServerDetailNICS. # noqa: E501\n :type: str\n \"\"\"\n\n self._vendor = vendor\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n if issubclass(ServerDetailNICS, dict):\n for key, value in self.items():\n result[key] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, ServerDetailNICS):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n",
"id": "8596358",
"language": "Python",
"matching_score": 0.6828303337097168,
"max_stars_count": 1,
"path": "automox_console_sdk/models/server_detail_nics.py"
},
{
"content": "# coding: utf-8\n\n\"\"\"\n Automox Console API\n\n API for use with the Automox Console # noqa: E501\n\n OpenAPI spec version: 2021-11-16\n Contact: <EMAIL>\n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\nclass PolicyDeviceFiltersOutputResults(object):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n \"\"\"\n Attributes:\n swagger_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n swagger_types = {\n 'id': 'int',\n 'uuid': 'str',\n 'name': 'str',\n 'custom_name': 'str',\n 'os_version': 'str',\n 'os_family': 'str',\n 'server_group': 'PolicyDeviceFiltersOutputServerGroup',\n 'tags': 'list[str]',\n 'ip_addr': 'str',\n 'ip_addrs_private': 'list[str]',\n 'organizational_unit': 'list[str]'\n }\n\n attribute_map = {\n 'id': 'id',\n 'uuid': 'uuid',\n 'name': 'name',\n 'custom_name': 'custom_name',\n 'os_version': 'os_version',\n 'os_family': 'os_family',\n 'server_group': 'server_group',\n 'tags': 'tags',\n 'ip_addr': 'ip_addr',\n 'ip_addrs_private': 'ip_addrs_private',\n 'organizational_unit': 'organizational_unit'\n }\n\n def __init__(self, id=None, uuid=None, name=None, custom_name=None, os_version=None, os_family=None, server_group=None, tags=None, ip_addr=None, ip_addrs_private=None, organizational_unit=None): # noqa: E501\n \"\"\"PolicyDeviceFiltersOutputResults - a model defined in Swagger\"\"\" # noqa: E501\n self._id = None\n self._uuid = None\n self._name = None\n self._custom_name = None\n self._os_version = None\n self._os_family = None\n self._server_group = None\n self._tags = None\n self._ip_addr = None\n self._ip_addrs_private = None\n self._organizational_unit = None\n self.discriminator = None\n if id is not None:\n self.id = id\n if uuid is not None:\n self.uuid = uuid\n if name is not None:\n self.name = name\n if custom_name is not None:\n self.custom_name = custom_name\n if os_version is not None:\n self.os_version = os_version\n if os_family is not None:\n self.os_family = os_family\n if server_group is not None:\n self.server_group = server_group\n if tags is not None:\n self.tags = tags\n if ip_addr is not None:\n self.ip_addr = ip_addr\n if ip_addrs_private is not None:\n self.ip_addrs_private = ip_addrs_private\n if organizational_unit is not None:\n self.organizational_unit = organizational_unit\n\n @property\n def id(self):\n \"\"\"Gets the id of this PolicyDeviceFiltersOutputResults. # noqa: E501\n\n\n :return: The id of this PolicyDeviceFiltersOutputResults. # noqa: E501\n :rtype: int\n \"\"\"\n return self._id\n\n @id.setter\n def id(self, id):\n \"\"\"Sets the id of this PolicyDeviceFiltersOutputResults.\n\n\n :param id: The id of this PolicyDeviceFiltersOutputResults. # noqa: E501\n :type: int\n \"\"\"\n\n self._id = id\n\n @property\n def uuid(self):\n \"\"\"Gets the uuid of this PolicyDeviceFiltersOutputResults. # noqa: E501\n\n\n :return: The uuid of this PolicyDeviceFiltersOutputResults. # noqa: E501\n :rtype: str\n \"\"\"\n return self._uuid\n\n @uuid.setter\n def uuid(self, uuid):\n \"\"\"Sets the uuid of this PolicyDeviceFiltersOutputResults.\n\n\n :param uuid: The uuid of this PolicyDeviceFiltersOutputResults. # noqa: E501\n :type: str\n \"\"\"\n\n self._uuid = uuid\n\n @property\n def name(self):\n \"\"\"Gets the name of this PolicyDeviceFiltersOutputResults. # noqa: E501\n\n\n :return: The name of this PolicyDeviceFiltersOutputResults. # noqa: E501\n :rtype: str\n \"\"\"\n return self._name\n\n @name.setter\n def name(self, name):\n \"\"\"Sets the name of this PolicyDeviceFiltersOutputResults.\n\n\n :param name: The name of this PolicyDeviceFiltersOutputResults. # noqa: E501\n :type: str\n \"\"\"\n\n self._name = name\n\n @property\n def custom_name(self):\n \"\"\"Gets the custom_name of this PolicyDeviceFiltersOutputResults. # noqa: E501\n\n\n :return: The custom_name of this PolicyDeviceFiltersOutputResults. # noqa: E501\n :rtype: str\n \"\"\"\n return self._custom_name\n\n @custom_name.setter\n def custom_name(self, custom_name):\n \"\"\"Sets the custom_name of this PolicyDeviceFiltersOutputResults.\n\n\n :param custom_name: The custom_name of this PolicyDeviceFiltersOutputResults. # noqa: E501\n :type: str\n \"\"\"\n\n self._custom_name = custom_name\n\n @property\n def os_version(self):\n \"\"\"Gets the os_version of this PolicyDeviceFiltersOutputResults. # noqa: E501\n\n\n :return: The os_version of this PolicyDeviceFiltersOutputResults. # noqa: E501\n :rtype: str\n \"\"\"\n return self._os_version\n\n @os_version.setter\n def os_version(self, os_version):\n \"\"\"Sets the os_version of this PolicyDeviceFiltersOutputResults.\n\n\n :param os_version: The os_version of this PolicyDeviceFiltersOutputResults. # noqa: E501\n :type: str\n \"\"\"\n\n self._os_version = os_version\n\n @property\n def os_family(self):\n \"\"\"Gets the os_family of this PolicyDeviceFiltersOutputResults. # noqa: E501\n\n\n :return: The os_family of this PolicyDeviceFiltersOutputResults. # noqa: E501\n :rtype: str\n \"\"\"\n return self._os_family\n\n @os_family.setter\n def os_family(self, os_family):\n \"\"\"Sets the os_family of this PolicyDeviceFiltersOutputResults.\n\n\n :param os_family: The os_family of this PolicyDeviceFiltersOutputResults. # noqa: E501\n :type: str\n \"\"\"\n\n self._os_family = os_family\n\n @property\n def server_group(self):\n \"\"\"Gets the server_group of this PolicyDeviceFiltersOutputResults. # noqa: E501\n\n\n :return: The server_group of this PolicyDeviceFiltersOutputResults. # noqa: E501\n :rtype: PolicyDeviceFiltersOutputServerGroup\n \"\"\"\n return self._server_group\n\n @server_group.setter\n def server_group(self, server_group):\n \"\"\"Sets the server_group of this PolicyDeviceFiltersOutputResults.\n\n\n :param server_group: The server_group of this PolicyDeviceFiltersOutputResults. # noqa: E501\n :type: PolicyDeviceFiltersOutputServerGroup\n \"\"\"\n\n self._server_group = server_group\n\n @property\n def tags(self):\n \"\"\"Gets the tags of this PolicyDeviceFiltersOutputResults. # noqa: E501\n\n\n :return: The tags of this PolicyDeviceFiltersOutputResults. # noqa: E501\n :rtype: list[str]\n \"\"\"\n return self._tags\n\n @tags.setter\n def tags(self, tags):\n \"\"\"Sets the tags of this PolicyDeviceFiltersOutputResults.\n\n\n :param tags: The tags of this PolicyDeviceFiltersOutputResults. # noqa: E501\n :type: list[str]\n \"\"\"\n\n self._tags = tags\n\n @property\n def ip_addr(self):\n \"\"\"Gets the ip_addr of this PolicyDeviceFiltersOutputResults. # noqa: E501\n\n\n :return: The ip_addr of this PolicyDeviceFiltersOutputResults. # noqa: E501\n :rtype: str\n \"\"\"\n return self._ip_addr\n\n @ip_addr.setter\n def ip_addr(self, ip_addr):\n \"\"\"Sets the ip_addr of this PolicyDeviceFiltersOutputResults.\n\n\n :param ip_addr: The ip_addr of this PolicyDeviceFiltersOutputResults. # noqa: E501\n :type: str\n \"\"\"\n\n self._ip_addr = ip_addr\n\n @property\n def ip_addrs_private(self):\n \"\"\"Gets the ip_addrs_private of this PolicyDeviceFiltersOutputResults. # noqa: E501\n\n\n :return: The ip_addrs_private of this PolicyDeviceFiltersOutputResults. # noqa: E501\n :rtype: list[str]\n \"\"\"\n return self._ip_addrs_private\n\n @ip_addrs_private.setter\n def ip_addrs_private(self, ip_addrs_private):\n \"\"\"Sets the ip_addrs_private of this PolicyDeviceFiltersOutputResults.\n\n\n :param ip_addrs_private: The ip_addrs_private of this PolicyDeviceFiltersOutputResults. # noqa: E501\n :type: list[str]\n \"\"\"\n\n self._ip_addrs_private = ip_addrs_private\n\n @property\n def organizational_unit(self):\n \"\"\"Gets the organizational_unit of this PolicyDeviceFiltersOutputResults. # noqa: E501\n\n\n :return: The organizational_unit of this PolicyDeviceFiltersOutputResults. # noqa: E501\n :rtype: list[str]\n \"\"\"\n return self._organizational_unit\n\n @organizational_unit.setter\n def organizational_unit(self, organizational_unit):\n \"\"\"Sets the organizational_unit of this PolicyDeviceFiltersOutputResults.\n\n\n :param organizational_unit: The organizational_unit of this PolicyDeviceFiltersOutputResults. # noqa: E501\n :type: list[str]\n \"\"\"\n\n self._organizational_unit = organizational_unit\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n if issubclass(PolicyDeviceFiltersOutputResults, dict):\n for key, value in self.items():\n result[key] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, PolicyDeviceFiltersOutputResults):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n",
"id": "10827163",
"language": "Python",
"matching_score": 2.5445945262908936,
"max_stars_count": 1,
"path": "automox_console_sdk/models/policy_device_filters_output_results.py"
},
{
"content": "# coding: utf-8\n\n\"\"\"\n Automox Console API\n\n API for use with the Automox Console # noqa: E501\n\n OpenAPI spec version: 2021-11-16\n Contact: <EMAIL>\n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\nclass ServersIdBody(object):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n \"\"\"\n Attributes:\n swagger_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n swagger_types = {\n 'server_group_id': 'int',\n 'ip_addrs': 'str',\n 'exception': 'bool',\n 'tags': 'list[str]',\n 'custom_name': 'str'\n }\n\n attribute_map = {\n 'server_group_id': 'server_group_id',\n 'ip_addrs': 'ip_addrs',\n 'exception': 'exception',\n 'tags': 'tags',\n 'custom_name': 'custom_name'\n }\n\n def __init__(self, server_group_id=None, ip_addrs=None, exception=None, tags=None, custom_name=None): # noqa: E501\n \"\"\"ServersIdBody - a model defined in Swagger\"\"\" # noqa: E501\n self._server_group_id = None\n self._ip_addrs = None\n self._exception = None\n self._tags = None\n self._custom_name = None\n self.discriminator = None\n self.server_group_id = server_group_id\n if ip_addrs is not None:\n self.ip_addrs = ip_addrs\n self.exception = exception\n if tags is not None:\n self.tags = tags\n if custom_name is not None:\n self.custom_name = custom_name\n\n @property\n def server_group_id(self):\n \"\"\"Gets the server_group_id of this ServersIdBody. # noqa: E501\n\n Server Group ID for the specified group. # noqa: E501\n\n :return: The server_group_id of this ServersIdBody. # noqa: E501\n :rtype: int\n \"\"\"\n return self._server_group_id\n\n @server_group_id.setter\n def server_group_id(self, server_group_id):\n \"\"\"Sets the server_group_id of this ServersIdBody.\n\n Server Group ID for the specified group. # noqa: E501\n\n :param server_group_id: The server_group_id of this ServersIdBody. # noqa: E501\n :type: int\n \"\"\"\n if server_group_id is None:\n raise ValueError(\"Invalid value for `server_group_id`, must not be `None`\") # noqa: E501\n\n self._server_group_id = server_group_id\n\n @property\n def ip_addrs(self):\n \"\"\"Gets the ip_addrs of this ServersIdBody. # noqa: E501\n\n Server IP address. # noqa: E501\n\n :return: The ip_addrs of this ServersIdBody. # noqa: E501\n :rtype: str\n \"\"\"\n return self._ip_addrs\n\n @ip_addrs.setter\n def ip_addrs(self, ip_addrs):\n \"\"\"Sets the ip_addrs of this ServersIdBody.\n\n Server IP address. # noqa: E501\n\n :param ip_addrs: The ip_addrs of this ServersIdBody. # noqa: E501\n :type: str\n \"\"\"\n\n self._ip_addrs = ip_addrs\n\n @property\n def exception(self):\n \"\"\"Gets the exception of this ServersIdBody. # noqa: E501\n\n Use the exception property to exclude the device from reports and statistics. # noqa: E501\n\n :return: The exception of this ServersIdBody. # noqa: E501\n :rtype: bool\n \"\"\"\n return self._exception\n\n @exception.setter\n def exception(self, exception):\n \"\"\"Sets the exception of this ServersIdBody.\n\n Use the exception property to exclude the device from reports and statistics. # noqa: E501\n\n :param exception: The exception of this ServersIdBody. # noqa: E501\n :type: bool\n \"\"\"\n if exception is None:\n raise ValueError(\"Invalid value for `exception`, must not be `None`\") # noqa: E501\n\n self._exception = exception\n\n @property\n def tags(self):\n \"\"\"Gets the tags of this ServersIdBody. # noqa: E501\n\n List of tags. # noqa: E501\n\n :return: The tags of this ServersIdBody. # noqa: E501\n :rtype: list[str]\n \"\"\"\n return self._tags\n\n @tags.setter\n def tags(self, tags):\n \"\"\"Sets the tags of this ServersIdBody.\n\n List of tags. # noqa: E501\n\n :param tags: The tags of this ServersIdBody. # noqa: E501\n :type: list[str]\n \"\"\"\n\n self._tags = tags\n\n @property\n def custom_name(self):\n \"\"\"Gets the custom_name of this ServersIdBody. # noqa: E501\n\n Custom name that will display in the console instead of the hostname. # noqa: E501\n\n :return: The custom_name of this ServersIdBody. # noqa: E501\n :rtype: str\n \"\"\"\n return self._custom_name\n\n @custom_name.setter\n def custom_name(self, custom_name):\n \"\"\"Sets the custom_name of this ServersIdBody.\n\n Custom name that will display in the console instead of the hostname. # noqa: E501\n\n :param custom_name: The custom_name of this ServersIdBody. # noqa: E501\n :type: str\n \"\"\"\n\n self._custom_name = custom_name\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n if issubclass(ServersIdBody, dict):\n for key, value in self.items():\n result[key] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, ServersIdBody):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n",
"id": "2595312",
"language": "Python",
"matching_score": 1.3795171976089478,
"max_stars_count": 1,
"path": "automox_console_sdk/models/servers_id_body.py"
},
{
"content": "import automox_console_sdk as automox\nfrom automox_console_sdk.api import DevicesApi\nfrom automox_console_sdk.api import GroupsApi\nfrom automox_console_sdk.models import ServersIdBody, ServerGroupCreateOrUpdateRequest\nimport csv\nfrom getpass import getpass\nimport os\n\n\ndef map_automox_devices(d_api):\n hostname_map, ip_map = {}, {}\n page = 0\n\n while True:\n devices_page = d_api.get_devices(o=org_id, limit=500, page=page)\n\n # All devices retrieved once no more are returned\n if len(devices_page) == 0:\n break\n\n for d in devices_page:\n hostname_map[d.name] = d\n\n # Iterate IP address\n for ip in d.ip_addrs:\n ip_map[ip] = d\n page += 1\n\n return hostname_map, ip_map\n\n\ndef get_automox_groups(g_api):\n group_list = []\n default_group_id = 0\n page = 0\n while True:\n groups_page = g_api.get_server_groups(o=org_id, limit=500, page=page)\n\n if len(groups_page) == 0:\n break\n\n for g in groups_page:\n if not g.name:\n default_group_id = g.id\n group_list.append(g)\n\n page += 1\n\n return group_list, default_group_id\n\n\ndef check_csv_headers(csv_file, field_list):\n with open(csv_file, 'r') as fh:\n reader = csv.DictReader(fh, skipinitialspace=True)\n\n dict_from_csv = dict(list(reader)[0])\n\n for field in field_list:\n if field not in dict_from_csv:\n exit(f\"ERROR: {field} is not a header value in {csv_file}\")\n\n\nif __name__ == '__main__':\n # Prompt for inputs\n # API Key\n api_key = os.getenv('AUTOMOX_API_KEY') or getpass(\"Enter your API Key: \")\n # Org ID\n org_id = int(os.getenv('AUTOMOX_ORGANIZATION_ID') or input(\"Enter your Organization ID: \"))\n # Location of CSV\n csv_location = input(\"Enter the location of the CSV: \") or exit(\"ERROR: Must provide location to csv file\")\n # Field for Hostname\n hn_field = input(\"Enter the CSV field for Hostname (default: hostname): \") or \"hostname\"\n # Field for IP Address\n ip_field = input(\"Enter the CSV field for IP Address (default: ip_address): \") or \"ip_address\"\n # Field for group\n group_field = input(\"Enter the CSV field to map to the Automox Server Group: \")\n # Field(s) for tags\n tag_fields = input(\"Enter the comma separated CSV field names to map to Automox device tags: \").split(',')\n\n # Check if provided inputs are valid headers in CSV\n check_csv_headers(csv_location, [hn_field, ip_field, group_field] + tag_fields)\n\n config = automox.Configuration()\n client = automox.ApiClient(configuration=config)\n client.default_headers['Authorization'] = f\"Bearer {api_key}\"\n\n devices_api = DevicesApi(client)\n groups_api = GroupsApi(client)\n\n # Add devices to dict for quick lookup by ip/hostname\n device_hostname_map, device_ip_map = map_automox_devices(devices_api)\n\n # Get all groups\n groups, default_server_group_id = get_automox_groups(groups_api)\n\n # Iterate CSV\n with open(csv_location, 'r') as fh:\n reader = csv.DictReader(fh, skipinitialspace=True)\n for row in reader:\n device = None\n # Check by hostname first\n if hn_field and row[hn_field] in device_hostname_map:\n device = device_hostname_map.get(row[hn_field], None)\n # Check for device by ip as fallback\n elif ip_field and row[ip_field] in device_ip_map:\n device = device_ip_map.get(row[ip_field], None)\n\n # Device found, update based on CSV values\n if device:\n tags = [row[tf] for tf in tag_fields if row.get(tf, None)]\n\n group_name = row[group_field] if group_field in row else \"Default\"\n group = next((g for g in groups if g.name == row[group_field]), None)\n\n # Create group if it doesn't exist yet\n if group is None:\n g = ServerGroupCreateOrUpdateRequest(name=row[group_field], refresh_interval=1440,\n parent_server_group_id=default_server_group_id,\n ui_color=\"#FFFF00\")\n try:\n group = groups_api.create_server_group(o=org_id, body=g)\n groups.append(group)\n print(f\"Successfully created Group ID {group.id} ({row[group_field]})\")\n except Exception as e:\n print(f\"Failed to created group [{row[group_field]}]: {e}\")\n\n server_update = ServersIdBody(server_group_id=group.id, exception=device.exception, tags=tags)\n try:\n update_result = devices_api.update_device(o=org_id, id=device.id, body=server_update)\n print(f\"Successfully updated Device ID {device.id} ({row[hn_field]}, {row[ip_field]})\")\n except Exception as e:\n print(f\"Failed to update Device ID {device.id} ({row[hn_field]}, {row[ip_field]}): {e}\")\n else:\n print(f\"No device matching hostname [{row[hn_field]}] or ip address [{row[ip_field]}]\")\n",
"id": "7627724",
"language": "Python",
"matching_score": 5.834218978881836,
"max_stars_count": 1,
"path": "examples/use-cases/update_devices_by_csv/update_devices_by_csv.py"
},
{
"content": "import automox_console_sdk as automox\nfrom automox_console_sdk.api import DevicesApi\nfrom automox_console_sdk.api import GroupsApi\nfrom automox_console_sdk.models import ServersIdBody, ServerGroupCreateOrUpdateRequest\nfrom getpass import getpass\nimport ldap3\nimport re\nimport os\nimport ssl\n\n\ndef map_automox_devices(d_api):\n hostname_map, ip_map = {}, {}\n page = 0\n\n while True:\n devices_page = d_api.get_devices(o=org_id, limit=500, page=page)\n\n # All devices retrieved once no more are returned\n if len(devices_page) == 0:\n break\n\n for d in devices_page:\n hostname_map[d.name.lower()] = d\n\n # Iterate IP address\n for ip in d.ip_addrs:\n ip_map[ip] = d\n page += 1\n\n return hostname_map, ip_map\n\n\ndef get_automox_groups(g_api):\n group_list = []\n default_group_id = 0\n page = 0\n while True:\n groups_page = g_api.get_server_groups(o=org_id, limit=500, page=page)\n\n if len(groups_page) == 0:\n break\n\n for g in groups_page:\n if not g.name:\n default_group_id = g.id\n group_list.append(g)\n\n page += 1\n\n return group_list, default_group_id\n\n\ndef get_ou_from_dn(dn):\n return(','.join(ldap3.utils.dn.to_dn(dn)[1:]))\n\n\nif __name__ == '__main__':\n # Prompt for inputs\n # API Key\n api_key = os.getenv('AUTOMOX_API_KEY') or getpass(\"Enter your API Key: \")\n # Org ID\n org_id = int(os.getenv('AUTOMOX_ORGANIZATION_ID') or input(\"Enter your Organization ID: \"))\n # LDAP/AD Details\n ldap_url = os.getenv('LDAP_URL') or input(\"Enter your LDAP Bind URL (eg ldap://domain-controller:389): \")\n ldap_user = os.getenv('LDAP_USER') or input(\"Enter your LDAP Bind User: \")\n ldap_password = os.getenv('LDAP_PASSWORD') or getpass(\"Enter your LDAP Bind Password: \")\n ldap_base = os.getenv('LDAP_BASE') or input(\"Enter your LDAP base for computers (eg dc=example, dc=com): \")\n # Computer query/filter\n ldap_computer_filter = os.getenv('LDAP_COMPUTER_FILTER') or \\\n input(\"Enter your LDAP Computer Filter (default: (&(objectClass=computer))\") or \\\n \"(&(objectClass=computer))\"\n # Attribute for hostname comparison\n hn_attribute = input(\"Enter the LDAP/AD computer field for hostname comparison (default: name): \") or \"name\"\n # Attribute for FQDN Address comparison\n fqdn_attribute = input(\"Enter the LDAP/AD computer attribute for FQDN comparison (default: dnsHostName): \") or \"dnsHostName\"\n # Attributes for tagging\n tag_attributes = input(\"Enter a comma separated list of LDAP/AD computer attributes used for tagging devices: \")\n tag_attributes = tag_attributes.split(',')\n tag_prefix = 'AD-'\n\n ldap_attributes = list(filter(None, [hn_attribute, fqdn_attribute] + tag_attributes))\n counter_created_groups, counter_matched_devices, counter_unmatched_devices = 0, 0, 0\n\n config = automox.Configuration()\n client = automox.ApiClient(configuration=config)\n client.default_headers['Authorization'] = f\"Bearer {api_key}\"\n\n devices_api = DevicesApi(client)\n groups_api = GroupsApi(client)\n\n # Add devices to dict for quick lookup by ip/hostname\n device_hostname_map, device_ip_map = map_automox_devices(devices_api)\n\n # Get all groups\n groups, default_server_group_id = get_automox_groups(groups_api)\n\n # Pull computers from LDAP/Active Directory\n try:\n if ldap_url.startswith('ldaps://'):\n tls = ldap3.Tls(ca_certs_file=os.getenv('CA_CERT_FILE'),\n validate=ssl.CERT_REQUIRED,\n version=ssl.PROTOCOL_TLSv1)\n server = ldap3.Server(ldap_url, use_ssl = True, tls = tls)\n else:\n server = ldap3.Server(ldap_url)\n conn = ldap3.Connection(server, ldap_user, ldap_password, client_strategy=ldap3.SAFE_SYNC, auto_bind=True)\n except Exception as e:\n exit(f\"Failed to connect to {ldap_url}: {e}\")\n\n search_params = {'search_base': ldap_base,\n 'search_filter': ldap_computer_filter,\n 'attributes': ldap_attributes,\n 'paged_size': 1000}\n\n while True:\n try:\n status, result, response, _ = conn.search(**search_params)\n except ldap3.core.exceptions.LDAPAttributeError as lae:\n exit(f\"Failed to query directory due to an invalid attribute being requested, please confirm spelling and \"\n f\"try again: {lae}\")\n\n # Process devices returned by LDAP/AD\n for d in response:\n device_dn = d.get('dn')\n if device_dn is None:\n continue\n\n device = None\n try:\n device_hostname = d.get('attributes').get(hn_attribute).lower()\n except Exception:\n device_hostname = None\n try:\n device_fqdn = d.get('attributes').get(fqdn_attribute)[0].lower()\n except Exception:\n device_fqdn = None\n # Check by hostname first\n if device_hostname and device_hostname in device_hostname_map:\n device = device_hostname_map.get(device_hostname, None)\n # Check for device by ip as fallback\n elif device_fqdn and device_fqdn in device_ip_map:\n device = device_ip_map.get(device_fqdn, None)\n\n # Device found, update group based on ServiceNow field value\n if device:\n # Pull group value based on DN of computer\n group_value = get_ou_from_dn(device_dn)\n # Trim to max group name limit\n group_value = group_value[:44]\n\n group = next((g for g in groups if g.name == group_value), None)\n\n # Gather current device tags not prefixed with AD-\n tags = set()\n for t in device.tags:\n if not t.startswith(tag_prefix):\n tags.add(t)\n managed_tags = set()\n for ta in tag_attributes:\n tag_value = d.get('attributes').get(ta)\n if tag_value is not None:\n managed_tags.add(f\"{tag_prefix}-{ta}-{tag_value}\")\n tags.update(managed_tags)\n\n # Create group if it doesn't exist yet\n if group is None:\n g = ServerGroupCreateOrUpdateRequest(name=group_value, refresh_interval=1440,\n parent_server_group_id=default_server_group_id,\n ui_color=\"#FFFF00\")\n try:\n group = groups_api.create_server_group(o=org_id, body=g)\n groups.append(group)\n print(f\"Successfully created Group ID {group.id} ({group_value})\")\n counter_created_groups += 1\n except Exception as e:\n print(f\"Failed to created group [{group_value}]: {e}\")\n\n server_update = ServersIdBody(server_group_id=group.id, exception=device.exception, tags=list(tags))\n try:\n update_result = devices_api.update_device(o=org_id, id=device.id, body=server_update)\n print(f\"Successfully updated Device ID {device.id} ({device_hostname}, {device_fqdn})\")\n counter_matched_devices += 1\n except Exception as e:\n print(f\"Failed to update Device ID {device.id} ({device_hostname}, {device_fqdn}): {e}\")\n else:\n #print(f\"No device matching hostname [{device_hostname}] or fqdn [{device_fqdn}]\")\n counter_unmatched_devices += 1\n\n # Should we page again\n cookie = conn.result['controls']['1.2.840.113556.1.4.319']['value']['cookie']\n if cookie:\n search_params['paged_cookie'] = cookie\n else:\n break\n\n print(f\"Script complete; matched devices: {counter_matched_devices}, unmatched devices: \"\n f\"{counter_unmatched_devices}, groups created: {counter_created_groups}\")\n",
"id": "417387",
"language": "Python",
"matching_score": 3.9377822875976562,
"max_stars_count": 1,
"path": "examples/use-cases/group_devices_by_activedirectory_ou/group_devices_by_activedirectory_ou.py"
},
{
"content": "import automox_console_sdk as automox\nfrom automox_console_sdk.api import DevicesApi\nimport logging\nimport os\nimport sys\n\nconfig = automox.Configuration()\n\nclient = automox.ApiClient(configuration=config)\nclient.default_headers['Authorization'] = f\"Bearer {os.getenv('AUTOMOX_API_KEY')}\"\n\n# Logging\nlogger = logging.getLogger()\nlogger.setLevel(logging.DEBUG)\nch = logging.StreamHandler(sys.stdout)\nch.setLevel(logging.INFO)\nlogger.addHandler(ch)\nconfig.debug = False\n\ndevices_api = DevicesApi(client)\n\norganization = os.getenv('AUTOMOX_ORGANIZATION_ID')\n\npage = 0\nwhile True:\n devices_page = devices_api.get_devices(o=organization, limit=500, page=page)\n for d in devices_page:\n print(f\"Device ID: {d.id}, Hostname: {d.name}, Server Group ID: {d.server_group_id}, IP Addresses: {d.ip_addrs}\")\n\n if len(devices_page) == 0:\n break\n\n page += 1\n",
"id": "8233437",
"language": "Python",
"matching_score": 3.87213397026062,
"max_stars_count": 1,
"path": "examples/scripts/list_devices.py"
},
{
"content": "import automox_console_sdk as automox\nfrom automox_console_sdk.api import PoliciesApi\nimport logging\nimport os\nimport sys\n\nconfig = automox.Configuration()\n\nclient = automox.ApiClient(configuration=config)\nclient.default_headers['Authorization'] = f\"Bearer {os.getenv('AUTOMOX_API_KEY')}\"\n\n# Logging\nlogger = logging.getLogger()\nlogger.setLevel(logging.DEBUG)\nch = logging.StreamHandler(sys.stdout)\nch.setLevel(logging.INFO)\nlogger.addHandler(ch)\nconfig.debug = False\n\npolicies_api = PoliciesApi(client)\n\norganization = os.getenv('AUTOMOX_ORGANIZATION_ID')\n\npage = 0\nwhile True:\n policies_page = policies_api.get_policies(o=organization, limit=500, page=page)\n for policy in policies_page:\n print(f\"Policy ID: {policy['id']}, Name: {policy['name']}, Type: {policy['policy_type_name']}\")\n\n if len(policies_page) == 0:\n break\n\n page += 1\n",
"id": "10428622",
"language": "Python",
"matching_score": 0.44448190927505493,
"max_stars_count": 1,
"path": "examples/scripts/list_policies.py"
},
{
"content": "import automox_console_sdk as automox\nimport os\nimport time\nimport socket\nimport json\n\nfrom automox_console_sdk import EventsApi\nfrom getpass import getpass\n\nHOST = os.getenv(\"HOST\") or \"127.0.0.1\"\nPORT = int(os.getenv(\"PORT\")) or 514\n\ndef get_recent_events(event_id=None):\n try:\n page = 0\n\n while True:\n recent_events = []\n events_page = events_api.get_events(o=organization, limit=500, page=page)\n\n if len(events_page) == 0:\n return recent_events\n\n for event in events_page:\n # Stop looking for new events when we see one we know\n if event.id == event_id:\n return recent_events\n\n recent_events.append(event)\n\n page += 1\n except Exception as e:\n print(f\"Error - Could not retrieve events: {e}\")\n\ndef create_syslog_payload(recent_events):\n payload_string = \"\"\n\n for event in recent_events:\n payload = {\n \"date\" : event.create_time.strftime(\"%b %d %Y %H:%M:%S\"),\n \"message\" : event.name,\n \"data\" : json.dumps(event.data)\n }\n\n payload_string += f\"{payload['date']}: {payload['message']} ({payload['data']})\\n\"\n\n payload = str.encode(payload_string)\n\n return payload\n\ntry:\n # Prompt for inputs\n api_secret = os.getenv('AUTOMOX_API_KEY') or getpass('Enter your API Key: ')\n organization = os.getenv('AUTOMOX_ORGANIZATION_ID') or input(\"Enter your Organization ID: \")\n\n config = automox.Configuration()\n client = automox.ApiClient(configuration=config)\n\n client.default_headers['Authorization'] = f\"Bearer {api_secret}\"\n\n events_api = EventsApi(client)\n\n # Initial placeholder value. Gets overwritten in the main loop.\n top_id = None\n\n # Our main loop will run every 10 seconds to check for new events\n while True:\n try:\n if not top_id:\n events = events_api.get_events(o=organization, page=0, limit=1)\n top_id = events[0].id\n except Exception as e:\n print(f\"Error - Could not retrieve events: {e}\")\n\n print(\"Getting recent events...\")\n recent_events = get_recent_events(top_id)\n\n print(f\"Found {len(recent_events)} new event(s)\")\n\n # Move the marker event_id for the most recent event.\n if len(recent_events) > 0:\n top_id = recent_events[0].id\n\n # Prepare the data for the syslog service\n payload = create_syslog_payload(recent_events)\n\n try:\n # Send data to the listening syslog server.\n udp_client_socket = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM)\n udp_client_socket.sendto(payload, (HOST, PORT))\n except socket.error as e:\n print(f\"Could not establish connection to socket: {e}\")\n\n time.sleep(10)\nexcept Exception as e:\n print(f\"Error: {e}\")\nexcept KeyboardInterrupt:\n print (\"Crtl+C Pressed. Shutting down.\")\n",
"id": "1034908",
"language": "Python",
"matching_score": 2.821526527404785,
"max_stars_count": 1,
"path": "examples/use-cases/push_events_to_syslog/pull_events.py"
},
{
"content": "import os\nimport socketserver\n\nHOST = os.getenv(\"HOST\") or \"0.0.0.0\"\nPORT = int(os.getenv(\"PORT\")) or 514\nclass UDPHandler(socketserver.BaseRequestHandler):\n def handle(self):\n data = bytes.decode(self.request[0].strip())\n\n print(f\"{data}\")\n\ntry:\n print(f\"Setting up a syslog to listen on: {HOST}:{PORT}\")\n\n server = socketserver.UDPServer((HOST,PORT), UDPHandler)\n server.serve_forever(poll_interval=0.5)\nexcept (IOError, SystemExit):\n raise\nexcept KeyboardInterrupt:\n print (\"Crtl+C Pressed. Shutting down.\")\n",
"id": "2695027",
"language": "Python",
"matching_score": 0.3082534670829773,
"max_stars_count": 1,
"path": "examples/use-cases/push_events_to_syslog/syslog.py"
}
] | 2.421465 |
jpvirtan | [
{
"content": "#Conversion utility for producing a CityJSON -file from CSV data \n#By <NAME>, Aalto University, 2020\n#Tested on Windows platform, Python 3.7.6, no external libraries should be required.\n#Please see the README.md for further information\n#Please see the LICENCE.txt for licence information\n\nimport csv\nimport json\n\nprint(\"Welcome\")\nprint (\"Please ensure that you use comma as separator.\")\n\n#Define path\nprint(\"Please input path to file for conversion (.csv)\")\npath = input()\n\n#Define object title substring\nprint(\"Please provide name suffix for CityJSON objects\")\nsubname = input()\n\n#Lets open the file, see what we have...\ntry:\n\trowlist = []\n\twith open(path, newline='') as csvfile:\n\t\tCSVreader = csv.reader(csvfile, delimiter=',', quotechar='\"')\n\t\tfor row in CSVreader:\n\t\t\trowlist.append(row)\nexcept:\n\tprint(\"Reading file failed. Aborting\")\n\texit()\n\nprint(\"Succesfully read treelist from: \" +path)\nprint(\"Found a total of \" +str(len(rowlist))+ \" rows.\")\nprint(\"Here's a small sample:\")\nprint(rowlist[0])\nprint(rowlist[1])\nprint(rowlist[2])\n\n#Query user for column order\nprint(\"Please indicate coordinate positions from columns:\")\nm = 0\nwhile m < len(rowlist[0]):\n\tprint(str(m) + \": \" + rowlist[0][m])\n\tm = m + 1\nprint(\"Indicate X:\")\nxpos = input()\nprint(\"Indicate Y:\")\nypos = input()\nprint(\"Indicate Z:\")\nzpos = input()\nprint(\"Indicate position of object ID:\")\nidpos = input()\nprint(\"Indicate position of sub-object index:\")\nsubidpos = input()\n\ntry:\n\txpos = int(xpos)\n\typos = int(ypos)\n\tzpos = int(zpos)\n\tidpos = int(idpos)\n\tsubidpos = int(subidpos)\n\n\tif xpos >= len(rowlist[0]) and ypos >= len(rowlist[0]) and zpos >= len(rowlist[0]) and idpos >= len(rowlist[0]) and subidpos >= len(rowlist[0]):\n\t\tprint(\"Please input a valid selection.\")\n\t\texit()\n\nexcept:\n\tprint(\"Please input valid selection...\")\n\texit()\n\n#Define crop\nprint(\"Enable crop?\")\nprint(\"0 = No, process everything\")\nprint(\"1 = Yes, exclude objects outside processing boundary\")\ncrop = input()\n\ntry:\n\tcrop = int(crop)\nexcept:\n\tprint(\"Please input valid selection.\")\n\texit()\n\nif crop == 0:\n\tprint(\"Processing the whole thing.\")\n\txmin = 0\n\txmax = 0\n\tymin = 0\n\tymax = 0\nif crop == 1:\n\tprint(\"Please input crop range.\")\n\tprint(\"X min:\")\n\txmin = float(input())\n\tprint(\"X max:\")\n\txmax = float(input())\n\tprint(\"Y min:\")\n\tymin = float(input())\n\tprint(\"Y max:\")\n\tymax = float(input())\nif crop != 0 and crop != 1:\n\tprint(\"Please input valid selection.\")\n\texit()\n\n#Create empty dictionary\ncityJSONRoot={}\n\n#Set up CityJSON base structure\ncityJSONRoot[\"type\"] = \"CityJSON\"\ncityJSONRoot[\"version\"] = \"1.0\"\ncityJSONRoot[\"CityObjects\"] = \"\"\ncityJSONRoot[\"vertices\"] = \"\"\n\n#Create empty objects that are to hold objects and vertices\ncityJSONObjects = {}\ncityJSONVertices = []\n\n#Query user for processing style \nprint(\"Please specify what to process\")\nprint(\"1 = Points (plot as rectangle or with a 3D tree symbol)\")\nprint(\"2 = Polygons\")\ngetSelection = input()\n\ntry:\n\tgetSelection = int(getSelection)\nexcept:\n\tprint(\"Please input integer number.\")\n\texit()\n\nif (getSelection != 1) and (getSelection != 2):\n\tprint(\"Incorrect selection. Aborting.\")\n\texit()\nelse:\n\tif getSelection == 1:\n\t\tobjectType = \"Tree\"\n\tif getSelection == 2:\n\t\tobjectType = \"Area\"\n\t\t\t\n#This will track the amount of processed objects\nwrittenTrees = 0\n\n#Processing track for polygonal areas\nif objectType == \"Area\":\n\n\t#Query user for flipping normals\n\tprint(\"Enable flipping normals?\")\n\tprint(\"0 = No\")\n\tprint(\"1 = Yes\")\n\tflip = input()\n\n\ttry:\n\t\tflip = int(flip)\n\texcept:\n\t\tprint(\"Please input valid selection.\")\n\t\texit()\n\t\n\t#Query user for what object type to write\n\tpolygonObjectTypes = [\"Building\",\"BuildingPart\",\"BuildingInstallation\",\"Road\",\"Railway\",\"TransportSquare\",\"WaterBody\",\"LandUse\",\"PlantCover\",\"CityFurniture\",\"GenericCityObject\"]\n\tprint(\"Please select object type to use:\")\n\tm = 0\n\twhile m < len(polygonObjectTypes):\n\t\tprint(str(m) + \": \" + polygonObjectTypes[m])\n\t\tm = m + 1\n\n\tselectedObjectType = input()\n\ttry:\n\t\tselectedObjectType = int(selectedObjectType)\n\texcept:\n\t\tprint(\"Please input valid selection\")\n\t\texit()\n\t\n\tif selectedObjectType > len(polygonObjectTypes):\n\t\tprint(\"Incorrect.\")\n\t\texit()\n\n\tprint(\"Selected: \" + polygonObjectTypes[selectedObjectType])\n\n\t#Read file\n\tpointlist = []\n\twith open(path, newline='') as csvfile:\n\t\tCSVreader = csv.reader(csvfile, delimiter=',', quotechar='\"')\n\t\tfor row in CSVreader:\n\t\t\tpointlist.append(row)\n\n\ti = 1 #Index holding the row to process, starts with 1 to skip header!\n\n\tpolyID = 1 #What polygon to process\n\tpolyPoints = []\n\tpolys = []\n\n\twhile i < len(pointlist): #Run as long as there are rows (e.g. points)\n\n\t\t#Check if new polygon\n\t\tif polyID == int(pointlist[i][idpos]):\n\t\t\tpolyPoints.append(pointlist[i])\n\t\t\ti = i + 1\n\t\telse:\n\t\t\tif len(polyPoints) == 0: #Check for an empty polygon\n\t\t\t\tprint(\"Found an empty polygon at: \" +str(polyID) + \" skipping this.\")\n\t\t\t\tprint(pointlist[i])\n\t\t\t\tpolyPoints = []\n\t\t\t\tpolyID = polyID + 1\n\t\t\telse: #New polygon begings, re-initialize\n\t\t\t\tpolys.append(polyPoints)\n\t\t\t\tpolyPoints = []\n\t\t\t\tpolyID = polyID + 1\n\t\t\t\t\n\tprint(\"Processed polygons \" +str(polyID-1))\n\tprint(\"Kept \" +str(len(polys)))\n\t\n\t#Processing an individual polygon. All metadata is taken from the first point, as they are assumed to remain constant\n\ti = 0\n\tpointIndex = 0 #Index holding the vertice number, used in assembling boundary\n\twhile i < len(polys):\n\t\tpolyPoints = polys[i]\n\t\tsubID = \"\"\n\t\tsubIDcount = 0\n\n\t\t#Compute midpoint, used for cropping, if crop is enabled\n\t\txMed = 0\n\t\tyMed = 0\n\t\tn = 0\n\t\twhile n < len(polyPoints):\n\t\t\txMed = xMed + float(polyPoints[n][xpos])\n\t\t\tyMed = yMed + float(polyPoints[n][ypos])\n\t\t\tif polyPoints[n][subidpos] != subID:\n\t\t\t\tsubID = polyPoints[n][subidpos]\n\t\t\t\tsubIDcount = subIDcount + 1\n\t\t\tn = n + 1\n\t\txMed = xMed / len(polyPoints)\n\t\tyMed = yMed / len(polyPoints)\n\t\tprint(\"Polygon \" + str(i) + \" center at X=\" +str(xMed) + \" Y=\" +str(yMed))\n\t\tprint(\"Polygon had \" + str(subIDcount) + \" sub-polygons.\")\n\n\t\tif (xmin < xMed < xmax and ymin < yMed < ymax) or crop == 0:\n\t\t\tcityJSONAttributes = {}\n\t\t\t\n\t\t\t#This just dumps all attributes from original file to CityJSON, note that is super-non-schema-compliant\n\t\t\tm = 0\n\t\t\twhile m < len(polyPoints[0]):\n\t\t\t\tif m != xpos and m != ypos and m != zpos and m != idpos:\n\t\t\t\t\tcityJSONAttributes[pointlist[0][m]] = polyPoints[0][m]\n\t\t\t\tm = m + 1\n\n\t\t\t#Create dict for geometry attributes\n\t\t\tcityJSONGeometryAttributes = {}\n\t\t\tcityJSONGeometryAttributes[\"type\"] = \"MultiSurface\"\n\t\t\tcityJSONGeometryAttributes[\"lod\"] = 2 #Note fixed LOD here!\n\n\t\t\t#Asseble lists for point indexes to form boundary and sub-boundary list\n\t\t\t#Sub boundary is used for objects that consists of multiple polygons\n\t\t\tboundaryList = []\n\t\t\tsubBoundaryList = []\n\n\t\t\tn = 0\n\t\t\tsubID = polyPoints[n][subidpos]\n\t\t\twhile n < len(polyPoints):\n\n\t\t\t\tif polyPoints[n][subidpos] != subID:\n\t\t\t\t\tif flip == 1: #Flipping normals now!\n\t\t\t\t\t\tflippedList = []\n\t\t\t\t\t\tm = 0\n\t\t\t\t\t\twhile m < len(subBoundaryList):\n\t\t\t\t\t\t\tflippedList.append(subBoundaryList[len(subBoundaryList)-(m+1)])\n\t\t\t\t\t\t\tm = m + 1\n\t\t\t\t\t\tsubBoundaryList = flippedList #Replace original boundary list with a flipped one\n\t\t\t\t\tboundaryList.append([subBoundaryList])\n\t\t\t\t\tsubBoundaryList = [] #Begin new sub-boundary list\n\t\t\t\t\tsubBoundaryList.append(pointIndex) #Add points to new sb list\n\t\t\t\t\tsubID = polyPoints[n][subidpos] #Update index following which sb were are writing to\n\t\t\t\telif n == (len(polyPoints)-1): #Last point of polygon reached\n\t\t\t\t\tsubBoundaryList.append(pointIndex) #Still add to the same sb\n\t\t\t\t\tif flip == 1: #Flipping normals now!\n\t\t\t\t\t\tflippedList = []\n\t\t\t\t\t\tm = 0\n\t\t\t\t\t\twhile m < len(subBoundaryList):\n\t\t\t\t\t\t\tflippedList.append(subBoundaryList[len(subBoundaryList)-(m+1)])\n\t\t\t\t\t\t\tm = m + 1\n\t\t\t\t\t\tsubBoundaryList = flippedList #Replace original boundary list with a flipped one\n\t\t\t\t\tboundaryList.append([subBoundaryList]) #Close object\n\t\t\t\telse:\n\t\t\t\t\tsubBoundaryList.append(pointIndex) #Add to same sb as before, we remain in same object\n\n\t\t\t\ttry:\n\t\t\t\t\tcityJSONVertices.append([float(polyPoints[n][xpos]),float(polyPoints[n][ypos]),float(polyPoints[n][zpos])]) #This is to go to vertice list, note that it is shared for the entire file\n\t\t\t\t\twrittenTrees = writtenTrees + 1\n\t\t\t\texcept:\n\t\t\t\t\tprint(\"Error encountered on this poly, writing Z = 0\")\n\t\t\t\t\tprint(polyPoints[n])\n\t\t\t\t\tcityJSONVertices.append([float(polyPoints[n][xpos]),float(polyPoints[n][ypos]),0.0]) #This is to go to vertice list, note that it is shared for the entire file\n\t\t\t\t\twrittenTrees = writtenTrees + 1\n\n\t\t\t\tpointIndex = pointIndex + 1\n\t\t\t\tn = n + 1\n\n\t\t\tcityJSONGeometryAttributes[\"boundaries\"] = boundaryList\n\t\t\t#Basic structure for objects\n\t\t\tcityJSONObject = {}\n\n\t\t\tcityJSONObject[\"type\"] = polygonObjectTypes[selectedObjectType]\n\n\t\t\tcityJSONObject[\"attributes\"] = cityJSONAttributes\n\t\t\tcityJSONObject[\"geometry\"] = [cityJSONGeometryAttributes]\n\t\t\tcityJSONObjects[polygonObjectTypes[selectedObjectType]+\"_\"+subname+\"_\"+polyPoints[1][idpos]] = cityJSONObject\n\n\t\ti = i + 1\n\n#This is the processing pipeline for points, such as tree objects\nif objectType == \"Tree\":\n\ttreeList = []\n\twith open(path, newline='') as csvfile:\n\t\tCSVreader = csv.reader(csvfile, delimiter=',', quotechar='\"')\n\t\tfor row in CSVreader:\n\t\t\ttreeList.append(row)\n\n\t#Query user for lod-representation to use\n\tprint(\"Please specify model to utilize:\")\n\tprint(\"1 = 2D rectangle\")\n\tprint(\"2 = Simplified 3D tree icon\")\n\ttreeModel = input()\n\n\ttry:\n\t\ttreeModel = int(treeModel)\n\texcept:\n\t\tprint(\"Please input integer number.\")\n\t\texit()\n\n\tif (treeModel != 1) and (treeModel != 2):\n\t\tprint(\"Incorrect selection. Aborting.\")\n\t\texit()\n\n\t#Default for tree size, later overwritten by what is found from data\n\ttreeScale = 1\n\n\t#This is used to interpret Helsinki tree register size markings\n\tsizeGroups = [\"0_-_10_cm\",\"20_-_30_cm\",\"30_-_50_cm\",\"50_-_70_cm\",\"70_cm_-\"]\n\t# This is not smart, but remains here due to issues with original datasets.\n\t# Should be fixed in the future to be more ubiquitous...\n\n\t#Index to track how we progress in tree list\n\ti = 1\n\n\twhile i < len(treeList):\n\t\ty = float(treeList[i][xpos])\n\t\tx = float(treeList[i][ypos])\n\t\tz = float(treeList[i][zpos])\n\t\n\t\tif (xmin < x < xmax and ymin < y < ymax) or crop == 0:\n\t\t\tn = 0\n\t\t\twhile n < len(sizeGroups):\n\t\t\t\tif treeList[i][9] == sizeGroups[n]:\n\t\t\t\t\ttreeScale = n * 3\n\t\t\t\tn = n + 1\n\t\n\t\t\t#Dict for object attributes\n\t\t\tcityJSONAttributes = {}\n\n\t\t\t#This just dumps all attributes from original file to CityJSON, note that is super-non-schema-compliant\n\t\t\tm = 0\n\t\t\twhile m < len(treeList[i]):\n\t\t\t\tif m != xpos and m != ypos and m != zpos and m != idpos:\n\t\t\t\t\tcityJSONAttributes[treeList[i][m]] = treeList[i][m]\n\t\t\t\tm = m + 1\n\n\t\t\t#Dict for geometry attributes\n\t\t\tcityJSONGeometryAttributes = {}\n\t\t\tcityJSONGeometryAttributes[\"type\"] = \"MultiSurface\"\n\t\t\tcityJSONGeometryAttributes[\"lod\"] = treeModel\n\n\t\t\t#Create geometry for rectangular patch\n\t\t\tif treeModel == 1:\n\t\t\t\tcityJSONGeometryAttributes[\"boundaries\"] = [[[writtenTrees*4,writtenTrees*4+1,writtenTrees*4+2,writtenTrees*4+3]]]\n\n\t\t\t#Create geometry for a small 3D tree symbol.\n\t\t\tif treeModel == 2:\n\t\t\t\tcityJSONGeometryAttributes[\"boundaries\"] = [\n\t\t\t\t[[writtenTrees*13+0,writtenTrees*13+2,writtenTrees*13+5,writtenTrees*13+3]],\n\t\t\t\t[[writtenTrees*13+2,writtenTrees*13+1,writtenTrees*13+4,writtenTrees*13+5]],\n\t\t\t\t[[writtenTrees*13+1,writtenTrees*13+0,writtenTrees*13+3,writtenTrees*13+4]],\n\t\t\t\t[[writtenTrees*13+3,writtenTrees*13+5,writtenTrees*13+8,writtenTrees*13+6]],\n\t\t\t\t[[writtenTrees*13+5,writtenTrees*13+4,writtenTrees*13+7,writtenTrees*13+8]],\n\t\t\t\t[[writtenTrees*13+4,writtenTrees*13+3,writtenTrees*13+6,writtenTrees*13+7]],\n\t\t\t\t[[writtenTrees*13+6,writtenTrees*13+8,writtenTrees*13+11,writtenTrees*13+9]],\n\t\t\t\t[[writtenTrees*13+8,writtenTrees*13+7,writtenTrees*13+10,writtenTrees*13+11]],\n\t\t\t\t[[writtenTrees*13+7,writtenTrees*13+6,writtenTrees*13+9,writtenTrees*13+10]],\n\t\t\t\t[[writtenTrees*13+9,writtenTrees*13+11,writtenTrees*13+12]],\n\t\t\t\t[[writtenTrees*13+11,writtenTrees*13+10,writtenTrees*13+12]],\n\t\t\t\t[[writtenTrees*13+10,writtenTrees*13+9,writtenTrees*13+12]],\n\t\t\t\t]\n\t\t\t\t#In future, this should be expressed as templates for performance improvements in visualization.\n\t\t\n\t\t\t#Assebling the object\n\t\t\tcityJSONObject = {}\n\t\t\tcityJSONObject[\"type\"] = \"SolitaryVegetationObject\"\n\t\t\tcityJSONObject[\"attributes\"] = cityJSONAttributes\n\t\t\tcityJSONObject[\"geometry\"] = [cityJSONGeometryAttributes]\n\t\t\tcityJSONObjects[\"Tree_\" + subname + \"_\" + treeList[i][1]] = cityJSONObject\n\t\n\t\t\t#Add vertices for rectangular patch\n\t\t\tif treeModel == 1:\n\t\t\t\tcityJSONVertices.append([x+treeScale,y+treeScale,z])\n\t\t\t\tcityJSONVertices.append([x-treeScale,y+treeScale,z])\n\t\t\t\tcityJSONVertices.append([x-treeScale,y-treeScale,z])\n\t\t\t\tcityJSONVertices.append([x+treeScale,y-treeScale,z])\n\t\t\t\n\t\t\t#Add vertices for tree symbol\n\t\t\tif treeModel == 2:\n\t\t\t\tcityJSONVertices.append([x+0.000000*treeScale,y-0.093815*treeScale,z+0.000000*treeScale])\n\t\t\t\tcityJSONVertices.append([x-0.081246*treeScale,y+0.046907*treeScale,z+0.000000*treeScale])\n\t\t\t\tcityJSONVertices.append([x+0.081246*treeScale,y+0.046907*treeScale,z+0.000000*treeScale])\n\t\t\t\tcityJSONVertices.append([x+0.000000*treeScale,y-0.093815*treeScale,z+0.295725*treeScale])\n\t\t\t\tcityJSONVertices.append([x-0.081246*treeScale,y+0.046907*treeScale,z+0.295725*treeScale])\n\t\t\t\tcityJSONVertices.append([x+0.081246*treeScale,y+0.046907*treeScale,z+0.295725*treeScale])\n\t\t\t\tcityJSONVertices.append([x-0.000000*treeScale,y-0.351669*treeScale,z+0.696748*treeScale])\n\t\t\t\tcityJSONVertices.append([x-0.304555*treeScale,y+0.175835*treeScale,z+0.696748*treeScale])\n\t\t\t\tcityJSONVertices.append([x+0.304555*treeScale,y+0.175835*treeScale,z+0.696748*treeScale])\n\t\t\t\tcityJSONVertices.append([x-0.000000*treeScale,y-0.284935*treeScale,z+0.914895*treeScale])\n\t\t\t\tcityJSONVertices.append([x-0.246761*treeScale,y+0.142468*treeScale,z+0.914895*treeScale])\n\t\t\t\tcityJSONVertices.append([x+0.246761*treeScale,y+0.142468*treeScale,z+0.914895*treeScale])\n\t\t\t\tcityJSONVertices.append([x+0.000000*treeScale,y-0.000000*treeScale,z+1.000180*treeScale])\n\t\t\twrittenTrees = writtenTrees + 1\n\t\ti = i + 1\n\n#Assembling the entire file\ncityJSONRoot[\"CityObjects\"] = cityJSONObjects\ncityJSONRoot[\"vertices\"] = cityJSONVertices\n\n#Path to write to, overwritten if there already\noutPath = path + \".json\"\n\n#Open write, write, close write\nwrite = open(outPath,\"w\")\nwrite.write(json.dumps(cityJSONRoot,indent=1))\nwrite.close()\n\n#Print some stats.\nprint(\"Processed a total of \" + str(i-1) + \" objects.\")\nprint(\"Wrote out a total of \" + str(writtenTrees) + \" points.\")",
"id": "8642024",
"language": "Python",
"matching_score": 0,
"max_stars_count": 0,
"path": "CSVtoJSON.py"
}
] | 0 |
nugit | [
{
"content": "# Design and implement a data structure for Least Recently Used (LRU) cache. It should support the following operations: get and put.\n\n# get(key) - Get the value (will always be positive) of the key if the key exists in the cache, otherwise return -1.\n# put(key, value) - put or insert the value if the key is not already present. When the cache reaches its capacity, it should invalidate the least recently used item before inserting the new item.\n# The LRUCache will be initialized with an integer corresponding to its capacity. Capacity indicates the maximum number of unique keys it can hold at a time.\n\n# Definition of “least recently used” : An access to an item is defined as a get or a put operation of the item. “Least recently used” item is the one with the oldest access time.\n\n# NOTE: If you are using any global variables, make sure to clear them in the constructor.\n\n# Input :\n# capacity = 2\n# put(1, 10)\n# put(5, 12)\n# get(5) returns 12\n# get(1) returns 10\n# get(10) returns -1\n# put(6, 14) this pushes out key = 5 as LRU is full.\n# get(5) returns -1\n\n\nclass LRU:\n def __init__(self,capacity):\n self.capacity = capacity\n self.cache = {}\n self.lru = []\n\n def reorder(self,key):\n self.lru.remove(key)\n self.lru.append(key)\n\n def get(self,key):\n if key in self.lru:\n self.reorder(key)\n return self.cache[key]\n else:\n return -1\n\n def put(self, key, value):\n def add(key, value):\n self.lru.append(key)\n self.cache[key] = value\n\n if key in self.lru:\n self.reorder(key)\n self.cache[key] = value\n elif len(self.lru) == self.capacity:\n k = self.lru.pop(0)\n del self.cache[k]\n add(key,value)\n else:\n add(key,value)\n\nlru = LRU(2)\nprint (lru.put(1, 10))\nprint (lru.put(5, 12))\nprint (lru.get(5)) #returns 12\nprint (lru.get(1)) #returns 10\nprint (lru.get(10)) #returns -1\nprint (lru.put(6, 14)) #this pushes out key = 5 as LRU is full.\nprint (lru.get(5)) #returns -1\n",
"id": "12803359",
"language": "Python",
"matching_score": 0,
"max_stars_count": 8,
"path": "python/data_structures/least_recently_used_cache.py"
}
] | 0 |
TR-MATIC | [
{
"content": "#This is intended to be a subprocess. It's controlled from another place and receives data.\n\nimport time\nimport os\nimport signal\n\n\nos.kill(9636, signal.SIGABRT)\n\nfor cnt in range(600):\n data_file = open(\"synchro.txt\", mode=\"r\", encoding=\"UTF-8\")\n current_time = str(data_file.read())\n data_file.close()\n print(\"Read No {:>3} : {}\".format(str(cnt), current_time))\n time.sleep(1.0)\n",
"id": "7996864",
"language": "Python",
"matching_score": 0,
"max_stars_count": 0,
"path": "source_code/receiver.py"
},
{
"content": "#This is intended to be the main process. It controls others and gives the data feed.\n\nimport time\nimport os\n\nfor cnt in range(6000):\n current_time = time.strftime(\"%H:%M:%S\")\n data_file = open(\"synchro.txt\", mode=\"w\", encoding=\"UTF-8\")\n data_file.write(current_time)\n data_file.close()\n if not (cnt % 10):\n print(\"{:>4}/\".format(str(cnt)), end=\"\")\n time.sleep(0.1)\n if not (cnt % 250):\n print(\"\\n >> {} <<\".format(os.getpid()))\n",
"id": "6993669",
"language": "Python",
"matching_score": 0,
"max_stars_count": 0,
"path": "source_code/transmitter.py"
}
] | 0 |
ayeright | [
{
"content": "from msc.rot13 import rot13\nfrom msc.rot13 import rot13_char\n\n\ndef test_rot13_char_non_alpha():\n assert '#' == rot13_char('#')\n",
"id": "11862945",
"language": "Python",
"matching_score": 0.3186648488044739,
"max_stars_count": 0,
"path": "msc/tests/test_ayeright.py"
},
{
"content": "def test_nothing():\n # This is a stupid test that tests nothing.\n pass\n",
"id": "7054971",
"language": "Python",
"matching_score": 0.009870159439742565,
"max_stars_count": 0,
"path": "msc/tests/test_mikej888.py"
},
{
"content": "import os\nfrom pathlib import Path\n\nimport pandas as pd\nimport click\n\n\ndef download_all_datasets() -> (pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame):\n \"\"\"\n Download datasets from the UCI Machine Learning Repository.\n\n Target variable of each dataset is in the final column and all other columns are features.\n\n Returns:\n boston_housing_data: Boston Housing dataset.\n yacht_hydrodynamics_data: Yacht Hydrodynamics dataset.\n concrete_strength_data: Concrete Compressive Strength dataset.\n energy_efficiency_data: Energy Efficiency dataset.\n \"\"\"\n boston_housing_data = download_boston_housing_dataset()\n yacht_hydrodynamics_data = download_yacht_hydrodynamics_dataset()\n concrete_strength_data = download_concrete_strength_dataset()\n energy_efficiency_data = download_energy_efficiency_dataset()\n return boston_housing_data, yacht_hydrodynamics_data, concrete_strength_data, energy_efficiency_data\n\n\ndef download_boston_housing_dataset() -> pd.DataFrame:\n \"\"\"\n Download the Boston Housing dataset from the UCI Machine Learning Repository.\n\n Returns:\n Boston Housing dataset. Target variable is in final column.\n \"\"\"\n url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/housing/housing.data'\n columns = [\n 'CRIM',\n 'ZN',\n 'INDUS',\n 'CHAS',\n 'NOX',\n 'RM',\n 'AGE',\n 'DIS',\n 'RAD',\n 'TAX',\n 'PTRATIO',\n 'B',\n 'LSTAT',\n 'MEDV',\n ]\n return pd.read_csv(url, delim_whitespace=True, names=columns)\n\n\ndef download_yacht_hydrodynamics_dataset() -> pd.DataFrame:\n \"\"\"\n Download the Yacht Hydrodynamics dataset from the UCI Machine Learning Repository.\n\n Returns:\n Yacht Hydrodynamics dataset. Target variable is in final column.\n \"\"\"\n url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00243/yacht_hydrodynamics.data'\n columns = [\n 'buoyancy_longitudinal_position',\n 'prismatic_coefficient',\n 'length_displacement_ratio',\n 'beam_draught_ratio',\n 'length_beam_ratio',\n 'froude_number',\n 'residuary_resistance',\n ]\n return pd.read_csv(url, delim_whitespace=True, names=columns)\n\n\ndef download_concrete_strength_dataset() -> pd.DataFrame:\n \"\"\"\n Download the Concrete Compressive Strength dataset from the UCI Machine Learning Repository.\n\n Returns:\n Concrete Compressive Strength dataset. Target variable is in final column.\n \"\"\"\n url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/concrete/compressive/Concrete_Data.xls'\n return pd.read_excel(url)\n\n\ndef download_energy_efficiency_dataset() -> pd.DataFrame:\n \"\"\"\n Download the Energy Efficiency dataset from the UCI Machine Learning Repository.\n\n Returns:\n Energy Efficiency dataset. Target variable is in final column. This dataset has two targets, heating and cooling\n load, but we return only heating load.\n \"\"\"\n url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00242/ENB2012_data.xlsx'\n return pd.read_excel(url).iloc[:, :-1]\n\n\n@click.command()\n@click.option('--boston-housing-output-path', type=str, help='The parquet file path to save the Boston Housing dataset')\n@click.option('--yacht-hydrodynamics-output-path', type=str, help='The parquet file path to save the Yacht '\n 'Hydrodynamics dataset')\n@click.option('--concrete-strength-output-path', type=str, help='The parquet file path to save the Concrete '\n 'Compressive Strength dataset')\n@click.option('--energy-efficiency-output-path', type=str, help='The parquet file path to save the Energy Efficiency '\n 'dataset')\ndef main(boston_housing_output_path: str, yacht_hydrodynamics_output_path: str, concrete_strength_output_path: str,\n energy_efficiency_output_path: str):\n \"\"\"\n Download datasets from the UCI Machine Learning Repository and save them to disk.\n\n Args:\n boston_housing_output_path: The parquet file path to save the Boston Housing dataset.\n yacht_hydrodynamics_output_path: The parquet file path to save the Yacht Hydrodynamics dataset.\n concrete_strength_output_path: The parquet file path to save the Concrete Compressive Strength dataset.\n energy_efficiency_output_path: The parquet file path to save the Energy Efficiency dataset.\n \"\"\"\n boston_housing_data, yacht_hydrodynamics_data, concrete_strength_data, energy_efficiency_data \\\n = download_all_datasets()\n\n for x in [boston_housing_output_path, yacht_hydrodynamics_output_path, concrete_strength_output_path,\n energy_efficiency_output_path]:\n Path(os.path.dirname(x)).mkdir(parents=True, exist_ok=True)\n\n boston_housing_data.to_parquet(boston_housing_output_path)\n yacht_hydrodynamics_data.to_parquet(yacht_hydrodynamics_output_path)\n concrete_strength_data.to_parquet(concrete_strength_output_path)\n energy_efficiency_data.to_parquet(energy_efficiency_output_path)\n\n\nif __name__ == '__main__':\n main()\n",
"id": "5501229",
"language": "Python",
"matching_score": 3.6570510864257812,
"max_stars_count": 0,
"path": "experiments/uci_datasets.py"
},
{
"content": "import os\nfrom pathlib import Path\nimport tempfile\nfrom typing import Optional\nimport urllib\n\nimport click\nimport pandas as pd\nfrom sklearn.datasets import load_svmlight_file\n\n\nAUSTRALIAN_URL = 'https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/binary/australian_scale'\nBREAST_CANCER_URL = 'https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/binary/breast-cancer_scale'\n\n\ndef download_all_datasets() -> (pd.DataFrame, pd.DataFrame):\n \"\"\"\n Download datasets from the LIBSVM repository.\n\n Binary target variable of each dataset is in the final column and all other columns are features.\n\n Returns:\n australian_data: Scaled Australian dataset.\n breast_cancer_data: Scaled Breast Cancer dataset.\n \"\"\"\n australian_data = download_libsvm_dataset(url=AUSTRALIAN_URL, y_map={-1: 0})\n breast_cancer_data = download_libsvm_dataset(url=BREAST_CANCER_URL, y_map={2: 0, 4: 1})\n return australian_data, breast_cancer_data\n\n\ndef download_libsvm_dataset(url: str, y_map: Optional[dict] = None) -> pd.DataFrame:\n \"\"\"\n Download the LIBSVM dataset at the given URL.\n\n Target variable is in the final column and all other columns are features.\n\n Args:\n url: The URL of the dataset.\n y_map: Map for substituting values of the target variable.\n\n Returns:\n A DataFrame with columns x1, x2, ..., xn, y.\n \"\"\"\n y_map = y_map or dict()\n\n with tempfile.TemporaryDirectory() as tmp:\n tmp_file_path = os.path.join(tmp, 'libsvm.txt')\n urllib.request.urlretrieve(url, tmp_file_path)\n X, y = load_svmlight_file(tmp_file_path)\n\n df = pd.DataFrame(X.toarray(), columns=[f'x{i}' for i in range(X.shape[1])])\n df['y'] = y\n df['y'] = df['y'].replace(y_map)\n\n return df\n\n\n@click.command()\n@click.option('--australian-output-path', type=str, help='The parquet file path to save the scaled Australian dataset')\n@click.option('--breast-cancer-output-path', type=str, help='The parquet file path to save the scaled Breast Cancer '\n 'dataset')\ndef main(australian_output_path: str, breast_cancer_output_path: str):\n \"\"\"\n Download datasets from the LIBSVM repository and save them to disk.\n\n Args:\n australian_output_path: The parquet file path to save the Australian dataset.\n breast_cancer_output_path: The parquet file path to save the Breast Cancer dataset.\n \"\"\"\n australian_data, breast_cancer_data = download_all_datasets()\n\n for x in [australian_output_path, breast_cancer_output_path]:\n Path(os.path.dirname(x)).mkdir(parents=True, exist_ok=True)\n\n australian_data.to_parquet(australian_output_path)\n breast_cancer_data.to_parquet(breast_cancer_output_path)\n\n\nif __name__ == '__main__':\n main()\n",
"id": "9342583",
"language": "Python",
"matching_score": 0.5640338659286499,
"max_stars_count": 0,
"path": "experiments/libsvm_datasets.py"
},
{
"content": "import os\nfrom pathlib import Path\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport click\n\n\ndef run_analysis(results: pd.DataFrame, analysis_output_dir: str):\n \"\"\"\n Aggregate the experiment results and generate box plots to show the distribution of the test mean squared error for\n each dataset.\n\n Args:\n results: The results of each cross-validation fold. The number of rows in the DataFrame is equal to\n n_datasets * n_folds. The DataFrame has the following columns:\n - mse_pretrained: (float) The test MSE of the pre-trained weight vector.\n - mse_swa: (float) The test MSE of the average weight vector (SWA solution).\n - mse_gradient_fa: (float) The test MSE of the ensemble constructed from the online gradient FA posterior.\n - mse_em_fa: (float) The test MSE of the ensemble constructed from the online EM FA posterior.\n - pinball05_pretrained: (float) The pinball loss with alpha=0.05 of the pre-trained weight vector.\n - pinball05_swa: (float) The pinball loss with alpha=0.05 of the average weight vector (SWA solution).\n - pinball05_gradient_fa: (float) The pinball loss with alpha=0.05 of the ensemble constructed from the online\n gradient FA posterior.\n - pinball05_em_fa: (float) The pinball loss with alpha=0.05 of the ensemble constructed from the online EM FA\n posterior.\n - pinball95_pretrained: (float) The pinball loss with alpha=0.95 of the pre-trained weight vector.\n - pinball95_swa: (float) The pinball loss with alpha=0.95 of the average weight vector (SWA solution).\n - pinball95_gradient_fa: (float) The pinball loss with alpha=0.95 of the ensemble constructed from the online\n gradient FA posterior.\n - pinball95_em_fa: (float) The pinball loss with alpha=0.95 of the ensemble constructed from the online EM FA\n posterior.\n - dataset: (str) The name of the dataset.\n - fold: (int) The index of the cross-validation fold.\n analysis_output_dir: The directory path to save the output of the analysis.\n \"\"\"\n metric_columns = ['mse_pretrained', 'mse_swa', 'mse_gradient_fa', 'mse_em_fa'] + \\\n ['pinball05_pretrained', 'pinball05_swa', 'pinball05_gradient_fa', 'pinball05_em_fa'] + \\\n ['pinball95_pretrained', 'pinball95_swa', 'pinball95_gradient_fa', 'pinball95_em_fa']\n\n for dataset_label in results['dataset'].unique():\n dataset_results = results[results['dataset'] == dataset_label]\n\n means = dataset_results[metric_columns].mean().reset_index()\n standard_errors = dataset_results[metric_columns].sem().reset_index()\n\n means.to_csv(os.path.join(\n analysis_output_dir,\n f'linear_regression_predictions_metric_means__{dataset_label}.csv'),\n index=False,\n )\n\n standard_errors.to_csv(os.path.join(\n analysis_output_dir,\n f'linear_regression_predictions_metric_standard_errors__{dataset_label}.csv'),\n index=False,\n )\n\n generate_and_save_error_bar_plot(\n results=dataset_results,\n png_path=os.path.join(\n analysis_output_dir, f'linear_regression_predictions_mse__{dataset_label}.png',\n ),\n )\n\n\ndef generate_and_save_error_bar_plot(results: pd.DataFrame, png_path: str):\n \"\"\"\n For each algorithm, plot the mean of the MSE with standard error bars.\n\n Save the plot to the given png file.\n\n Args:\n results: Should contain columns 'mse_pretrained', 'mse_swa', 'mse_gradient_fa', 'mse_em_fa'. For each column,\n the mean and standard error will be plotted on a single figure.\n png_path: The file path to save the plot as a png file.\n \"\"\"\n plt.rcParams.update({'font.size': 15})\n\n fig, ax = plt.subplots(1, 1, figsize=(8, 6))\n metric_columns = ['mse_pretrained', 'mse_swa', 'mse_gradient_fa', 'mse_em_fa']\n labels = ['Pre-trained', 'SWA', 'SGA FA ensemble', 'EM FA Ensemble']\n markers = ['o', 's', 'v', 'X']\n for x, metric_name in enumerate(metric_columns):\n y = results[metric_name].mean()\n se = results[metric_name].sem()\n\n ax.errorbar(x, y, se, marker=markers[x], markersize=20, elinewidth=3, label=labels[x], capsize=10, capthick=3)\n\n ax.set_xticks([])\n plt.ylabel('Mean squared error')\n plt.legend()\n\n plt.savefig(png_path, format='png')\n\n\n@click.command()\n@click.option('--results-input-path', type=str, help='The parquet file path from which to load the experiment results')\n@click.option('--analysis-output-dir', type=str, help='The directory path to save the output of the analysis')\ndef main(results_input_path: str, analysis_output_dir: str):\n \"\"\"\n Analyse the results from the linear regression predictions experiments.\n\n Save the analysis to the given output directory.\n\n Args:\n results_input_path: The parquet file path from which to load the experiment results.\n analysis_output_dir: The directory path to save the output of the analysis.\n \"\"\"\n results = pd.read_parquet(results_input_path)\n\n Path(analysis_output_dir).mkdir(parents=True, exist_ok=True)\n\n run_analysis(\n results,\n analysis_output_dir,\n )\n\n\nif __name__ == '__main__':\n main()\n",
"id": "2560710",
"language": "Python",
"matching_score": 3.71097731590271,
"max_stars_count": 0,
"path": "experiments/linear_regression_predictions_analysis.py"
},
{
"content": "import os\nfrom pathlib import Path\nfrom typing import Dict, List\n\nimport torch\nfrom torch import Tensor\nfrom torch.utils.data import TensorDataset, DataLoader\nfrom torch.optim import SGD\nfrom torch.nn.functional import mse_loss\nfrom pytorch_lightning import Trainer\nfrom pytorch_lightning.callbacks import Callback\nimport numpy as np\nimport pandas as pd\nimport click\nimport yaml\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import KFold\n\nfrom swafa.models import FeedForwardNet\nfrom swafa.callbacks import WeightPosteriorCallback\nfrom swafa.fa import OnlineGradientFactorAnalysis, OnlineEMFactorAnalysis, OnlineFactorAnalysis\nfrom swafa.posterior import ModelPosterior\nfrom experiments.utils.factory import OPTIMISER_FACTORY\nfrom experiments.utils.metrics import pinball_loss\n\n\ndef run_all_experiments(\n datasets: List[pd.DataFrame],\n dataset_labels: List[str],\n latent_dim: int,\n n_folds: int,\n lr_pretrain: float,\n lr_swa: float,\n n_epochs_pretrain: int,\n n_epochs_swa: int,\n n_batches_per_epoch: int,\n weight_decay: float,\n gradient_optimiser: str,\n gradient_optimiser_kwargs: dict,\n gradient_warm_up_time_steps: int,\n em_warm_up_time_steps: int,\n n_posterior_samples: int,\n) -> pd.DataFrame:\n \"\"\"\n Run experiments on the given datasets.\n\n For each dataset, train a linear model to predict the target variable via SGD. Split training up into two parts, a\n pre-training phase followed by a second phase during which the weight vectors sampled after each batch update are\n used to estimate the posterior distribution of the weights via online gradient factor analysis (FA) and online\n expectation-maximisation (EM) FA.\n\n After training, sample weight vectors from the posteriors and use them to construct ensembles. Compute the test mean\n squared error (MSE) of the pre-trained weights, the average weights (SWA solution) and the two ensembles constructed\n from the online gradient FA posterior and the online EM FA posterior.\n\n Args:\n datasets: A list of datasets. Each dataset contains features and a target variable, where the target variable is\n in the final column.\n dataset_labels: A label for each of the datasets.\n latent_dim: The latent dimension of the FA models.\n n_folds: The number of folds of cross-validation to run for each dataset.\n lr_pretrain: The learning rate to use during the pre-training phase.\n lr_swa: The learning rate to use while sampling weight vectors after pre-training.\n n_epochs_pretrain: The number of pre-training epochs.\n n_epochs_swa: The number of epochs for which to sample weight vector after pre-training.\n n_batches_per_epoch: The number of batches per training epoch.\n weight_decay: The L2 regularisation strength to use while training.\n gradient_optimiser: The name of the PyTorch optimiser used in the online gradient FA learning algorithm. Options\n are 'sgd' and 'adam'.\n gradient_optimiser_kwargs: Keyword arguments for the PyTorch optimiser used in the online gradient FA learning\n algorithm.\n gradient_warm_up_time_steps: The number of time steps on which to update the running mean of the FA model in the\n online gradient algorithm before updating the other parameters.\n em_warm_up_time_steps: The number of time steps on which to update the running means of the FA model in the\n online EM algorithm before updating the other parameters.\n n_posterior_samples: The number of samples of the weight vector to draw from each posterior to form each\n ensemble.\n\n Returns:\n The results of each cross-validation fold. The number of rows in the DataFrame is equal to n_datasets * n_folds.\n The DataFrame has the following columns:\n - mse_pretrained: (float) The test MSE of the pre-trained weight vector.\n - mse_swa: (float) The test MSE of the average weight vector (SWA solution).\n - mse_gradient_fa: (float) The test MSE of the ensemble constructed from the online gradient FA posterior.\n - mse_em_fa: (float) The test MSE of the ensemble constructed from the online EM FA posterior.\n - pinball05_pretrained: (float) The pinball loss with alpha=0.05 of the pre-trained weight vector.\n - pinball05_swa: (float) The pinball loss with alpha=0.05 of the average weight vector (SWA solution).\n - pinball05_gradient_fa: (float) The pinball loss with alpha=0.05 of the ensemble constructed from the online\n gradient FA posterior.\n - pinball05_em_fa: (float) The pinball loss with alpha=0.05 of the ensemble constructed from the online EM FA\n posterior.\n - pinball95_pretrained: (float) The pinball loss with alpha=0.95 of the pre-trained weight vector.\n - pinball95_swa: (float) The pinball loss with alpha=0.95 of the average weight vector (SWA solution).\n - pinball95_gradient_fa: (float) The pinball loss with alpha=0.95 of the ensemble constructed from the online\n gradient FA posterior.\n - pinball95_em_fa: (float) The pinball loss with alpha=0.95 of the ensemble constructed from the online EM FA\n posterior.\n - dataset: (str) The name of the dataset.\n - fold: (int) The index of the cross-validation fold.\n \"\"\"\n results = []\n for label, dataset in zip(dataset_labels, datasets):\n print(f'Running experiments on {label} dataset...')\n print('-' * 100)\n\n dataset_results = run_dataset_experiments(\n dataset=dataset,\n dataset_label=label,\n latent_dim=latent_dim,\n n_folds=n_folds,\n lr_pretrain=lr_pretrain,\n lr_swa=lr_swa,\n n_epochs_pretrain=n_epochs_pretrain,\n n_epochs_swa=n_epochs_swa,\n n_batches_per_epoch=n_batches_per_epoch,\n weight_decay=weight_decay,\n gradient_optimiser=gradient_optimiser,\n gradient_optimiser_kwargs=gradient_optimiser_kwargs,\n gradient_warm_up_time_steps=gradient_warm_up_time_steps,\n em_warm_up_time_steps=em_warm_up_time_steps,\n n_posterior_samples=n_posterior_samples,\n )\n\n results.append(dataset_results)\n print('-' * 100)\n\n return pd.concat(results, ignore_index=True)\n\n\ndef run_dataset_experiments(\n dataset: pd.DataFrame,\n dataset_label: str,\n latent_dim: int,\n n_folds: int,\n lr_pretrain: float,\n lr_swa: float,\n n_epochs_pretrain: int,\n n_epochs_swa: int,\n n_batches_per_epoch: int,\n weight_decay: float,\n gradient_optimiser: str,\n gradient_optimiser_kwargs: dict,\n gradient_warm_up_time_steps: int,\n em_warm_up_time_steps: int,\n n_posterior_samples: int,\n) -> pd.DataFrame:\n \"\"\"\n Run experiments on the given dataset.\n\n Train a linear model to predict the target variable via SGD. Split training up into two parts, a pre-training phase\n followed by a second phase during which the weight vectors sampled after each batch update are used to estimate the\n posterior distribution of the weights via online gradient factor analysis (FA) and online expectation-maximisation\n (EM) FA.\n\n After training, sample weight vectors from the posteriors and use them to construct ensembles. Compute the test mean\n squared error (MSE) of the pre-trained weights, the average weights (SWA solution) and the two ensembles constructed\n from the online gradient FA posterior and the online EM FA posterior.\n\n Args:\n dataset: Contains features and a target variable, where the target variable is in the final column.\n dataset_label: A label for the dataset.\n latent_dim: The latent dimension of the FA models.\n n_folds: The number of folds of cross-validation to run for each dataset.\n lr_pretrain: The learning rate to use during the pre-training phase.\n lr_swa: The learning rate to use while sampling weight vectors after pre-training.\n n_epochs_pretrain: The number of pre-training epochs.\n n_epochs_swa: The number of epochs for which to sample weight vector after pre-training.\n n_batches_per_epoch: The number of batches per training epoch.\n weight_decay: The L2 regularisation strength to use while training.\n gradient_optimiser: The name of the PyTorch optimiser used in the online gradient FA learning algorithm. Options\n are 'sgd' and 'adam'.\n gradient_optimiser_kwargs: Keyword arguments for the PyTorch optimiser used in the online gradient FA learning\n algorithm.\n gradient_warm_up_time_steps: The number of time steps on which to update the running mean of the FA model in the\n online gradient algorithm before updating the other parameters.\n em_warm_up_time_steps: The number of time steps on which to update the running means of the FA model in the\n online EM algorithm before updating the other parameters.\n n_posterior_samples: The number of samples of the weight vector to draw from each posterior to form each\n ensemble.\n\n Returns:\n The results of each cross-validation fold. The number of rows in the DataFrame is equal to n_folds. The\n DataFrame has the following columns:\n - mse_pretrained: (float) The test MSE of the pre-trained weight vector.\n - mse_swa: (float) The test MSE of the average weight vector (SWA solution).\n - mse_gradient_fa: (float) The test MSE of the ensemble constructed from the online gradient FA posterior.\n - mse_em_fa: (float) The test MSE of the ensemble constructed from the online EM FA posterior.\n - pinball05_pretrained: (float) The pinball loss with alpha=0.05 of the pre-trained weight vector.\n - pinball05_swa: (float) The pinball loss with alpha=0.05 of the average weight vector (SWA solution).\n - pinball05_gradient_fa: (float) The pinball loss with alpha=0.05 of the ensemble constructed from the online\n gradient FA posterior.\n - pinball05_em_fa: (float) The pinball loss with alpha=0.05 of the ensemble constructed from the online EM FA\n posterior.\n - pinball95_pretrained: (float) The pinball loss with alpha=0.95 of the pre-trained weight vector.\n - pinball95_swa: (float) The pinball loss with alpha=0.95 of the average weight vector (SWA solution).\n - pinball95_gradient_fa: (float) The pinball loss with alpha=0.95 of the ensemble constructed from the online\n gradient FA posterior.\n - pinball95_em_fa: (float) The pinball loss with alpha=0.95 of the ensemble constructed from the online EM FA\n posterior.\n - dataset: (str) The name of the dataset.\n - fold: (int) The index of the cross-validation fold.\n \"\"\"\n X = dataset.iloc[:, :-1].values\n y = dataset.iloc[:, -1].values\n\n batch_size = int(np.floor(len(dataset) / n_batches_per_epoch))\n\n kfold = KFold(n_splits=n_folds, shuffle=True, random_state=42)\n\n results = []\n for k, (train_index, test_index) in enumerate(kfold.split(X)):\n print(f'Running cross-validation fold {k + 1} of {n_folds}...')\n\n X_train = X[train_index]\n y_train = y[train_index]\n X_test = X[test_index]\n y_test = y[test_index]\n\n fold_results = run_cv_fold(\n X_train=X_train,\n y_train=y_train,\n X_test=X_test,\n y_test=y_test,\n latent_dim=latent_dim,\n lr_pretrain=lr_pretrain,\n lr_swa=lr_swa,\n n_epochs_pretrain=n_epochs_pretrain,\n n_epochs_swa=n_epochs_swa,\n batch_size=batch_size,\n weight_decay=weight_decay,\n gradient_optimiser=gradient_optimiser,\n gradient_optimiser_kwargs=gradient_optimiser_kwargs,\n gradient_warm_up_time_steps=gradient_warm_up_time_steps,\n em_warm_up_time_steps=em_warm_up_time_steps,\n n_posterior_samples=n_posterior_samples,\n model_random_seed=k,\n posterior_random_seed=k + 1,\n )\n\n fold_results['dataset'] = dataset_label\n fold_results['fold'] = k + 1\n\n results.append(fold_results)\n\n print('-' * 100)\n\n return pd.DataFrame(results)\n\n\ndef run_cv_fold(\n X_train: np.ndarray,\n y_train: np.ndarray,\n X_test: np.ndarray,\n y_test: np.ndarray,\n latent_dim: int,\n lr_pretrain: float,\n lr_swa: float,\n n_epochs_pretrain: int,\n n_epochs_swa: int,\n batch_size: int,\n weight_decay: float,\n gradient_optimiser: str,\n gradient_optimiser_kwargs: dict,\n gradient_warm_up_time_steps: int,\n em_warm_up_time_steps: int,\n n_posterior_samples: int,\n model_random_seed: int,\n posterior_random_seed: int,\n) -> dict:\n \"\"\"\n Run a single cross-validation fold for the given data.\n\n Train a linear model to predict the target variable via SGD. Split training up into two parts, a pre-training phase\n followed by a second phase during which the weight vectors sampled after each batch update are used to estimate the\n posterior distribution of the weights via online gradient factor analysis (FA) and online expectation-maximisation\n (EM) FA.\n\n After training, sample weight vectors from the posteriors and use them to construct ensembles. Compute the test mean\n squared error (MSE) of the pre-trained weights, the average weights (SWA solution) and the two ensembles constructed\n from the online gradient FA posterior and the online EM FA posterior.\n\n Args:\n X_train: The training features, of shape (n_train, n_features).\n y_train: The training targets, of shape (n_train,).\n X_test: The test features, of shape (n_test, n_features).\n y_test: The test targets, of shape (n_test,).\n latent_dim: The latent dimension of the FA models.\n lr_pretrain: The learning rate to use during the pre-training phase.\n lr_swa: The learning rate to use while sampling weight vectors after pre-training.\n n_epochs_pretrain: The number of pre-training epochs.\n n_epochs_swa: The number of epochs for which to sample weight vector after pre-training.\n batch_size: The number of data points per training batch.\n weight_decay: The L2 regularisation strength to use while training.\n gradient_optimiser: The name of the PyTorch optimiser used in the online gradient FA learning algorithm. Options\n are 'sgd' and 'adam'.\n gradient_optimiser_kwargs: Keyword arguments for the PyTorch optimiser used in the online gradient FA learning\n algorithm.\n gradient_warm_up_time_steps: The number of time steps on which to update the running mean of the FA model in the\n online gradient algorithm before updating the other parameters.\n em_warm_up_time_steps: The number of time steps on which to update the running means of the FA model in the\n online EM algorithm before updating the other parameters.\n n_posterior_samples: The number of samples of the weight vector to draw from each posterior to form each\n ensemble.\n model_random_seed: The random seed to use when initialising the model.\n posterior_random_seed: The random seed to use when initialising the FA posteriors.\n\n Returns:\n The results of the cross-validation fold. Has the following keys:\n - mse_pretrained: (float) The test MSE of the pre-trained weight vector.\n - mse_swa: (float) The test MSE of the average weight vector (SWA solution).\n - mse_gradient_fa: (float) The test MSE of the ensemble constructed from the online gradient FA posterior.\n - mse_em_fa: (float) The test MSE of the ensemble constructed from the online EM FA posterior.\n - pinball05_pretrained: The pinball loss with alpha=0.05 of the pre-trained weight vector.\n - pinball05_swa: The pinball loss with alpha=0.05 of the average weight vector (SWA solution).\n - pinball05_gradient_fa: The pinball loss with alpha=0.05 of the ensemble constructed from the online\n gradient FA posterior.\n - pinball05_em_fa: The pinball loss with alpha=0.05 of the ensemble constructed from the online EM FA\n posterior.\n - pinball95_pretrained: The pinball loss with alpha=0.95 of the pre-trained weight vector.\n - pinball95_swa: The pinball loss with alpha=0.95 of the average weight vector (SWA solution).\n - pinball95_gradient_fa: The pinball loss with alpha=0.95 of the ensemble constructed from the online\n gradient FA posterior.\n - pinball95_em_fa: The pinball loss with alpha=0.95 of the ensemble constructed from the online EM FA\n posterior.\n \"\"\"\n scaler = StandardScaler()\n X_train = torch.from_numpy(scaler.fit_transform(X_train)).float()\n X_test = torch.from_numpy(scaler.transform(X_test)).float()\n\n y_train = torch.from_numpy(y_train).float()\n y_test = torch.from_numpy(y_test).float()\n\n model_optimiser_kwargs = dict(lr=lr_pretrain, weight_decay=weight_decay)\n\n gradient_weight_posterior_kwargs = dict(\n latent_dim=latent_dim,\n optimiser=OPTIMISER_FACTORY[gradient_optimiser],\n optimiser_kwargs=gradient_optimiser_kwargs,\n n_warm_up_time_steps=gradient_warm_up_time_steps,\n random_seed=posterior_random_seed,\n )\n\n em_weight_posterior_kwargs = dict(\n latent_dim=latent_dim,\n n_warm_up_time_steps=em_warm_up_time_steps,\n random_seed=posterior_random_seed,\n )\n\n model, gradient_posterior_update_callback, em_posterior_update_callback = build_model_and_callbacks(\n X=X_train,\n model_optimiser_kwargs=model_optimiser_kwargs,\n gradient_weight_posterior_kwargs=gradient_weight_posterior_kwargs,\n em_weight_posterior_kwargs=em_weight_posterior_kwargs,\n model_random_seed=model_random_seed,\n )\n\n callbacks = [gradient_posterior_update_callback, em_posterior_update_callback]\n\n w_pretrained, b_pretrained = fit_model(\n X=X_train,\n y=y_train,\n model=model,\n callbacks=callbacks,\n n_epochs_pretrain=n_epochs_pretrain,\n n_epochs_swa=n_epochs_swa,\n lr_swa=lr_swa,\n batch_size=batch_size,\n )\n\n return evaluate_model(\n X=X_test,\n y=y_test,\n w_pretrained=w_pretrained,\n b_pretrained=b_pretrained,\n gradient_posterior=gradient_posterior_update_callback.posterior,\n em_posterior=em_posterior_update_callback.posterior,\n n_posterior_samples=n_posterior_samples,\n random_seed=posterior_random_seed,\n )\n\n\ndef build_model_and_callbacks(\n X: Tensor,\n model_optimiser_kwargs: dict,\n gradient_weight_posterior_kwargs: dict,\n em_weight_posterior_kwargs: dict,\n model_random_seed: int,\n) -> (FeedForwardNet, WeightPosteriorCallback, WeightPosteriorCallback):\n \"\"\"\n Build a linear model and callbacks which should be called during training to update the weight posteriors.\n\n Args:\n X: The features. Of shape (n_samples, n_features).\n model_optimiser_kwargs: Keyword arguments for the SGD optimiser used to train the linear model during the\n pre-training phase.\n gradient_weight_posterior_kwargs: Keyword arguments for the instance of OnlineGradientFactorAnalysis used to\n estimate the posterior.\n em_weight_posterior_kwargs: Keyword arguments for the instance of OnlineEMFactorAnalysis used to estimate the\n posterior.\n model_random_seed: The random seed used to initialise the linear model.\n\n Returns:\n model: An linear model with the same dimension as the input data plus a bias term.\n gradient_posterior_update_callback: Callbacks used to update the OnlineGradientFactorAnalysis weight posterior.\n em_posterior_update_callback: Callbacks used to update the OnlineEMFactorAnalysis weight posterior.\n \"\"\"\n model = FeedForwardNet(\n input_dim=X.shape[1],\n bias=True,\n optimiser_class=SGD,\n optimiser_kwargs=model_optimiser_kwargs,\n random_seed=model_random_seed,\n )\n\n gradient_posterior = ModelPosterior(\n model=model,\n weight_posterior_class=OnlineGradientFactorAnalysis,\n weight_posterior_kwargs=gradient_weight_posterior_kwargs,\n )\n\n em_posterior = ModelPosterior(\n model=model,\n weight_posterior_class=OnlineEMFactorAnalysis,\n weight_posterior_kwargs=em_weight_posterior_kwargs,\n )\n\n gradient_posterior_update_callback = WeightPosteriorCallback(\n posterior=gradient_posterior.weight_posterior,\n update_epoch_start=1,\n )\n\n em_posterior_update_callback = WeightPosteriorCallback(\n posterior=em_posterior.weight_posterior,\n update_epoch_start=1,\n )\n\n return model, gradient_posterior_update_callback, em_posterior_update_callback\n\n\ndef fit_model(\n X: Tensor,\n y: Tensor,\n model: FeedForwardNet,\n callbacks: List[Callback],\n n_epochs_pretrain: int,\n n_epochs_swa: int,\n lr_swa: float,\n batch_size: int,\n) -> (Tensor, Tensor):\n \"\"\"\n Fit the given model to the given data.\n\n Training is split into two parts, a pre-training phase followed by a second phase during which the weight vectors\n sampled after each batch update are used to estimate the posterior distribution of the weights via online gradient\n factor analysis (FA) and online expectation-maximisation (EM) FA.\n\n Args:\n X: The features. Of shape (n_samples, n_features).\n y: The targets. Of shape (n_samples,).\n model: The model which is to be fit to the data.\n callbacks: Any callbacks which should be called during training.\n n_epochs_pretrain: The number of pre-training epochs.\n n_epochs_swa: The number of epochs for which to sample weight vector after pre-training.\n lr_swa: The learning rate to use while sampling weight vectors after pre-training.\n batch_size: The number of data points per training batch.\n\n Returns:\n A copy of the model's weights after the pre-training phase. Of shape (n_features, 1).\n A copy of the model's bias after the pre-training phase. Of shape (1,).\n \"\"\"\n dataset = TensorDataset(X, y)\n dataloader = DataLoader(dataset, batch_size=batch_size, drop_last=True, shuffle=True)\n\n pre_trainer = Trainer(max_epochs=n_epochs_pretrain, progress_bar_refresh_rate=0)\n pre_trainer.fit(model, train_dataloader=dataloader)\n w_pretrained = torch.clone(model.output_layer.weight.data).reshape(-1, 1)\n b_pretrained = torch.clone(model.output_layer.bias.data).squeeze()\n\n swa_trainer = Trainer(max_epochs=n_epochs_swa, callbacks=callbacks, progress_bar_refresh_rate=0)\n model.optimiser_kwargs['lr'] = lr_swa\n swa_trainer.fit(model, train_dataloader=dataloader)\n\n return w_pretrained, b_pretrained\n\n\ndef evaluate_model(\n X: Tensor,\n y: Tensor,\n w_pretrained: Tensor,\n b_pretrained: Tensor,\n gradient_posterior: OnlineGradientFactorAnalysis,\n em_posterior: OnlineEMFactorAnalysis,\n n_posterior_samples: int,\n random_seed: int,\n) -> Dict[str, float]:\n \"\"\"\n Compute metrics for the pre-trained weights, the average weights (SWA solution) and the two ensembles constructed\n from the online gradient FA posterior and the online expectation-maximisation (EM) FA posterior.\n\n Args:\n X: The features. Of shape (n_samples, n_features).\n y: The targets. Of shape (n_samples,).\n w_pretrained: A copy of the model's weights after the pre-training phase. Of shape (n_features, 1).\n b_pretrained: A copy of the model's bias after the pre-training phase. Of shape (1,).\n gradient_posterior: The weight posterior estimated via online gradient FA.\n em_posterior: The weight posterior estimated via online EM FA.\n n_posterior_samples: The number of samples of the weight vector to draw from each posterior to form each\n ensemble.\n random_seed: The random seed to use when drawing samples from the posteriors.\n\n Returns: The metrics. Has the following keys:\n - mse_pretrained: The MSE of the pre-trained weight vector.\n - mse_swa: The MSE of the average weight vector (SWA solution).\n - mse_gradient_fa: The MSE of the ensemble constructed from the online gradient FA posterior.\n - mse_em_fa: The MSE of the ensemble constructed from the online EM FA posterior.\n - pinball05_pretrained: The pinball loss with alpha=0.05 of the pre-trained weight vector.\n - pinball05_swa: The pinball loss with alpha=0.05 of the average weight vector (SWA solution).\n - pinball05_gradient_fa: The pinball loss with alpha=0.05 of the ensemble constructed from the online\n gradient FA posterior.\n - pinball05_em_fa: The pinball loss with alpha=0.05 of the ensemble constructed from the online EM FA\n posterior.\n - pinball95_pretrained: The pinball loss with alpha=0.95 of the pre-trained weight vector.\n - pinball95_swa: The pinball loss with alpha=0.95 of the average weight vector (SWA solution).\n - pinball95_gradient_fa: The pinball loss with alpha=0.95 of the ensemble constructed from the online\n gradient FA posterior.\n - pinball95_em_fa: The pinball loss with alpha=0.95 of the ensemble constructed from the online EM FA\n posterior.\n \"\"\"\n y_hat_pretrained = affine_transformation(X, w_pretrained, b_pretrained)\n\n y_hat_swa = swa_predict(X=X, posterior=gradient_posterior)\n\n y_hat_gradient_fa = posterior_ensemble_predict(\n X=X,\n posterior=gradient_posterior,\n n_posterior_samples=n_posterior_samples,\n random_seed=random_seed,\n )\n\n y_hat_em_fa = posterior_ensemble_predict(\n X=X,\n posterior=em_posterior,\n n_posterior_samples=n_posterior_samples,\n random_seed=random_seed,\n )\n\n return dict(\n mse_pretrained=mse_loss(y_hat_pretrained, y).item(),\n mse_swa=mse_loss(y_hat_swa, y).item(),\n mse_gradient_fa=mse_loss(y_hat_gradient_fa, y).item(),\n mse_em_fa=mse_loss(y_hat_em_fa, y).item(),\n pinball05_pretrained=pinball_loss(y, y_hat_pretrained, alpha=0.05),\n pinball05_swa=pinball_loss(y, y_hat_swa, alpha=0.05),\n pinball05_gradient_fa=pinball_loss(y, y_hat_gradient_fa, alpha=0.05),\n pinball05_em_fa=pinball_loss(y, y_hat_em_fa, alpha=0.05),\n pinball95_pretrained=pinball_loss(y, y_hat_pretrained, alpha=0.95),\n pinball95_swa=pinball_loss(y, y_hat_swa, alpha=0.95),\n pinball95_gradient_fa=pinball_loss(y, y_hat_gradient_fa, alpha=0.95),\n pinball95_em_fa=pinball_loss(y, y_hat_em_fa, alpha=0.95),\n )\n\n\ndef swa_predict(X: Tensor, posterior: OnlineFactorAnalysis) -> Tensor:\n \"\"\"\n Predict the targets of the given data using the SWA solution for the weight vector of a linear model.\n\n Args:\n X: The features. Of shape (n_samples, n_features).\n posterior: The weight posterior of the linear model. Note that posterior.get_mean() should return the average\n weights plus the bias term, with the bias term being the final element in the vector.\n\n Returns:\n The predicted targets. Of shape (n_samples,).\n \"\"\"\n theta_swa = posterior.get_mean()\n w_swa = theta_swa[:-1].reshape(-1, 1)\n b_swa = theta_swa[-1]\n\n return affine_transformation(X, w_swa, b_swa)\n\n\ndef posterior_ensemble_predict(\n X: Tensor,\n posterior: OnlineFactorAnalysis,\n n_posterior_samples: int,\n random_seed: int,\n) -> Tensor:\n \"\"\"\n Predict the targets of the given data by sampling weight vectors from the posterior and using them to build an\n ensemble.\n\n Args:\n X: The features. Of shape (n_examples, n_features).\n posterior: The weight posterior of the linear model. Note that posterior.sample() should return weight vectors\n including the bias term, with the bias term being the final element in each vector.\n n_posterior_samples: The number of samples of the weight vector to draw from the posterior to form the ensemble.\n random_seed: The random seed to use when drawing samples from the posterior.\n\n Returns:\n The predicted targets. Of shape (n_examples,).\n \"\"\"\n theta = posterior.sample(n_samples=n_posterior_samples, random_seed=random_seed).t()\n w = theta[:-1]\n b = theta[[-1]]\n\n return affine_transformation(X, w, b).mean(dim=1)\n\n\ndef affine_transformation(X: Tensor, w: Tensor, b: Tensor) -> Tensor:\n \"\"\"\n Compute an affine transformation.\n\n Args:\n X: Data of shape (n_samples, n_features).\n w: Weights of shape (n_features, n_models).\n b: Bias terms of shape (1, n_models).\n\n Returns:\n Outputs of shape (n_samples, n_models). If n_models = 1, squeeze to (n_samples,).\n \"\"\"\n return (X.mm(w) + b).squeeze(dim=1)\n\n\n@click.command()\n@click.option('--boston-housing-input-path', type=str, help='The parquet file path to load the Boston Housing dataset')\n@click.option('--yacht-hydrodynamics-input-path', type=str, help='The parquet file path to load the Yacht '\n 'Hydrodynamics dataset')\n@click.option('--concrete-strength-input-path', type=str, help='The parquet file path to load the Concrete '\n 'Compressive Strength dataset')\n@click.option('--energy-efficiency-input-path', type=str, help='The parquet file path to load the Energy Efficiency '\n 'dataset')\n@click.option('--results-output-path', type=str, help='The parquet file path to save the experiment results')\ndef main(boston_housing_input_path: str, yacht_hydrodynamics_input_path: str, concrete_strength_input_path: str,\n energy_efficiency_input_path: str, results_output_path: str):\n \"\"\"\n Run experiments to test whether linear regression predictions can be improved by stochastic weight averaging.\n\n Args:\n boston_housing_input_path: The parquet file path to load the Boston Housing dataset.\n yacht_hydrodynamics_input_path: The parquet file path to load the Yacht Hydrodynamics dataset.\n concrete_strength_input_path: The parquet file path to load the Concrete Compressive Strength dataset.\n energy_efficiency_input_path: The parquet file path to load the Energy Efficiency dataset.\n results_output_path: The parquet file path to save the experiment results.\n \"\"\"\n with open(\"params.yaml\", 'r') as fd:\n params = yaml.safe_load(fd)['linear_regression_predictions']\n\n datasets = [\n pd.read_parquet(boston_housing_input_path),\n pd.read_parquet(yacht_hydrodynamics_input_path),\n pd.read_parquet(concrete_strength_input_path),\n pd.read_parquet(energy_efficiency_input_path),\n ]\n\n dataset_labels = [\n 'boston_housing',\n 'yacht_hydrodynamics',\n 'concrete_strength',\n 'energy_efficiency',\n ]\n\n results = run_all_experiments(\n datasets=datasets,\n dataset_labels=dataset_labels,\n latent_dim=params['latent_dim'],\n n_folds=params['n_folds'],\n lr_pretrain=params['lr_pretrain'],\n lr_swa=params['lr_swa'],\n n_epochs_pretrain=params['n_epochs_pretrain'],\n n_epochs_swa=params['n_epochs_swa'],\n n_batches_per_epoch=params['n_batches_per_epoch'],\n weight_decay=params['weight_decay'],\n gradient_optimiser=params['gradient_optimiser'],\n gradient_optimiser_kwargs=params['gradient_optimiser_kwargs'],\n gradient_warm_up_time_steps=params['gradient_warm_up_time_steps'],\n em_warm_up_time_steps=params['em_warm_up_time_steps'],\n n_posterior_samples=params['n_posterior_samples'],\n )\n\n print('Results:\\n')\n print(results)\n\n Path(os.path.dirname(results_output_path)).mkdir(parents=True, exist_ok=True)\n results.to_parquet(results_output_path)\n\n\nif __name__ == '__main__':\n main()\n",
"id": "6797363",
"language": "Python",
"matching_score": 5.632513046264648,
"max_stars_count": 0,
"path": "experiments/linear_regression_predictions.py"
},
{
"content": "import pytest\nimport numpy as np\nimport pandas as pd\n\nfrom experiments.linear_regression_predictions import run_all_experiments\n\n\n@pytest.mark.parametrize(\n \"n_datasets, n_folds, n_samples, n_features\",\n [\n (1, 2, [100], [2]),\n (1, 3, [50], [3]),\n (2, 2, [100, 50], [2, 3]),\n (2, 3, [100, 50], [2, 3]),\n ]\n )\ndef test_all_experiments_results_rows_and_columns(n_datasets, n_folds, n_samples, n_features):\n datasets = [pd.DataFrame(np.random.randn(n_samples[i], n_features[i] + 1)) for i in range(n_datasets)]\n dataset_labels = [f\"dataset_{i}\" for i in range(n_datasets)]\n\n results = run_all_experiments(\n datasets=datasets,\n dataset_labels=dataset_labels,\n latent_dim=2,\n n_folds=n_folds,\n lr_pretrain=1e-3,\n lr_swa=1e-1,\n n_epochs_pretrain=10,\n n_epochs_swa=10,\n n_batches_per_epoch=10,\n weight_decay=1e-4,\n gradient_optimiser='sgd',\n gradient_optimiser_kwargs=dict(lr=0.01),\n gradient_warm_up_time_steps=10,\n em_warm_up_time_steps=10,\n n_posterior_samples=10,\n )\n\n expected_columns = [\n 'mse_pretrained',\n 'mse_swa',\n 'mse_gradient_fa',\n 'mse_em_fa',\n 'pinball05_pretrained',\n 'pinball05_swa',\n 'pinball05_gradient_fa',\n 'pinball05_em_fa',\n 'pinball95_pretrained',\n 'pinball95_swa',\n 'pinball95_gradient_fa',\n 'pinball95_em_fa',\n 'dataset',\n 'fold',\n ]\n\n actual_columns = list(results.columns)\n assert len(actual_columns) == len(expected_columns)\n assert len(np.intersect1d(actual_columns, expected_columns)) == len(actual_columns)\n\n expected_n_rows = n_datasets * n_folds\n assert len(results) == expected_n_rows\n\n for i in range(n_datasets):\n assert (results['dataset'] == dataset_labels[i]).sum() == n_folds\n",
"id": "2733381",
"language": "Python",
"matching_score": 0.6014018058776855,
"max_stars_count": 0,
"path": "tests/test_experiments/test_linear_regression_predictions.py"
},
{
"content": "from abc import ABC, abstractmethod\nfrom typing import Optional\n\nimport torch\nfrom torch import Tensor\nfrom torch.optim import Optimizer, Adam\nfrom torch.autograd import Variable\nfrom torch.distributions.multivariate_normal import MultivariateNormal\n\n\nclass OnlineFactorAnalysis(ABC):\n \"\"\"\n An abstract class used as a base for learning factor analysis (FA) models [1] online.\n\n Any concrete class which inherits from this class must implement the `update` method.\n\n The variable names used in this class generally match those used in [1].\n\n Factor loading matrix `F` is initialised to have orthogonal columns.\n\n Diagonal covariance matrix `Psi` is initialised to be the identity matrix.\n\n Args:\n observation_dim: The size of the observed variable space.\n latent_dim: The size of the latent variable space.\n device: The device (CPU or GPU) on which to perform the computation. If `None`, uses the device for the default\n tensor type.\n random_seed: The random seed for reproducibility.\n\n Attributes:\n observation_dim: The size of the observed variable space. An integer.\n latent_dim: The size of the latent variable space. An integer.\n t: The current time step, or equivalently, the number of observations seen. An integer which starts off as 0.\n c: The mean of the observed variables. A Tensor of shape (observation_dim, 1).\n F: The factor loading matrix. A Tensor of shape (observation_dim, latent_dim).\n diag_psi: The diagonal entries of the Gaussian noise covariance matrix, usually referred to as `Psi`. A Tensor\n of shape (observation_dim, 1).\n\n References:\n [1] <NAME>. Bayesian Reasoning and Machine Learning. Cambridge University Press, 2012.\n \"\"\"\n\n def __init__(self, observation_dim: int, latent_dim: int, device: Optional[torch.device] = None,\n random_seed: Optional[int] = None):\n if random_seed is not None:\n torch.manual_seed(random_seed)\n\n self.observation_dim = observation_dim\n self.latent_dim = latent_dim\n self.device = device\n self.t = 0\n self.c = torch.zeros(observation_dim, 1, device=device)\n self.F = self._init_F()\n self.diag_psi = self._init_psi()\n self._diag_inv_psi = None\n self._d = None\n self._m = None\n self._sigma = None\n self._I = torch.eye(latent_dim, device=device)\n\n def _init_F(self) -> Tensor:\n \"\"\"\n Initialise the factor loading matrix.\n\n Initialised to have orthogonal columns.\n\n Returns:\n The initial factor loading matrix. Of shape (observation_dim, latent_dim).\n \"\"\"\n A = torch.randn(self.observation_dim, self.latent_dim, device=self.device)\n F, _ = torch.linalg.qr(A, mode='reduced')\n return F\n\n def _init_psi(self) -> Tensor:\n \"\"\"\n Initialise the diagonal entries of the Gaussian noise covariance matrix.\n\n Set all entries to 1.\n\n Returns:\n The initial diagonal entries of the Gaussian noise covariance matrix. Of shape (observation_dim, 1).\n \"\"\"\n return torch.ones(self.observation_dim, 1, device=self.device)\n\n def _update_commons(self, theta: Tensor):\n \"\"\"\n Given an observation, perform updates which are common to all online FA algorithms.\n\n Specifically, update self.t, self.c, self._d, self._diag_inv_psi, self._m and self._sigma given the new\n observation and the current values of self.F and self.diag_psi.\n\n Args:\n theta: A single observation of shape (observation_dim,) or (observation_dim, 1).\n \"\"\"\n theta = theta.reshape(-1, 1)\n self.t += 1\n self._update_observation_mean(theta)\n self._update_centred_observation(theta)\n self._update_inverse_psi()\n self._update_latent_posterior_params()\n\n def _update_observation_mean(self, theta: Tensor):\n \"\"\"\n Update the running average of the observed variables.\n\n Args:\n theta: A single observation. Of shape (observation_dim, 1).\n \"\"\"\n self.c = self._update_running_average(self.c, theta)\n\n def _update_centred_observation(self, theta: Tensor):\n \"\"\"\n Centre the observation by subtracting the mean of all observations.\n\n Args:\n theta: A single observation. Of shape (observation_dim, 1).\n \"\"\"\n self._d = theta - self.c\n\n def _update_inverse_psi(self):\n \"\"\"\n Invert the diagonal Gaussian noise covariance matrix.\n \"\"\"\n self._diag_inv_psi = 1 / self.diag_psi\n\n def _update_latent_posterior_params(self):\n \"\"\"\n Update the mean and covariance of the posterior distribution of the latent variables.\n\n The distribution is `p(h | theta, F, Psi)p(h) = N(m, sigma)`, given the current observation and the current\n values of `F` and `Psi`.\n \"\"\"\n C = (self.F * self._diag_inv_psi).t()\n self._update_latent_posterior_covariance(C)\n self._update_latent_posterior_mean(C)\n\n def _update_latent_posterior_covariance(self, C: Tensor):\n \"\"\"\n Update the covariance of the posterior distribution of the latent variables.\n\n Args:\n C: The transpose of `F` right-multiplied by the inverse of `Psi`. Of shape (latent_dim, observation_dim).\n \"\"\"\n self._sigma = torch.linalg.inv(self._I + C.mm(self.F))\n\n def _update_latent_posterior_mean(self, C: Tensor):\n \"\"\"\n Update the mean of the posterior distribution of the latent variables.\n\n Args:\n C: The transpose of `F` right-multiplied by the inverse of `Psi`. Of shape (latent_dim, observation_dim).\n \"\"\"\n self._m = self._sigma.mm(C.mm(self._d))\n\n def _update_running_average(self, old_average: Tensor, new_observation: Tensor) -> Tensor:\n \"\"\"\n Update the running average given a new observation.\n\n Args:\n old_average: The average up until the current time step.\n new_observation: The observation to use to update the average.\n\n Returns:\n The updated running average.\n \"\"\"\n return old_average + (new_observation - old_average) / self.t\n\n @abstractmethod\n def update(self, theta: Tensor):\n \"\"\"\n Given a new observation, update the parameters of the FA model.\n\n Args:\n theta: A single observation of shape (observation_dim,) or (observation_dim, 1).\n \"\"\"\n ...\n\n def get_mean(self) -> Tensor:\n \"\"\"\n Get the mean of the FA model.\n\n Returns:\n The mean. Of shape (observation_dim,).\n \"\"\"\n return self.c.squeeze()\n\n def get_covariance(self) -> Tensor:\n \"\"\"\n Get the full covariance matrix of the FA model.\n\n Note: if the observation dimension is large, this may result in a memory error.\n\n Returns:\n The covariance matrix. Of shape (observation_dim, observation_dim).\n \"\"\"\n psi = torch.diag(self.diag_psi.squeeze())\n return self.F.mm(self.F.t()) + psi\n\n def sample(self, n_samples: int, random_seed: Optional[int] = None) -> Tensor:\n \"\"\"\n Draw samples from the FA model.\n\n Observations are of the form Fh + c + noise, where h is a latent variable vector sampled from N(0, I) and the\n noise vector is sampled from N(0, Psi).\n\n Args:\n n_samples: The number of independent samples to draw from the FA model.\n random_seed: The random seed to use for sampling.\n\n Returns:\n Samples of shape (n_samples, observation_dim).\n \"\"\"\n if random_seed is not None:\n torch.manual_seed(random_seed)\n\n p_h = MultivariateNormal(\n loc=torch.zeros(self.latent_dim, device=self.device),\n covariance_matrix=torch.eye(self.latent_dim, device=self.device),\n )\n\n p_noise = MultivariateNormal(\n loc=torch.zeros(self.observation_dim, device=self.device),\n covariance_matrix=torch.diag(self.diag_psi.squeeze()),\n )\n\n H = p_h.sample((n_samples,))\n noise = p_noise.sample((n_samples,))\n\n return H.mm(self.F.t()) + self.c.reshape(1, -1) + noise\n\n\nclass OnlineGradientFactorAnalysis(OnlineFactorAnalysis):\n \"\"\"\n Implementation of online stochastic gradient factor analysis (FA) from [1].\n\n The variable names used in this class generally match those used in [1].\n\n Args:\n observation_dim: The size of the observed variable space.\n latent_dim: The size of the latent variable space.\n optimiser: The class of the optimiser to use for gradient updates.\n optimiser_kwargs: Keyword arguments for the optimiser. If not given, will default to dict(lr=1e-3).\n n_warm_up_time_steps: The number of time steps on which to update the running mean of the FA model before\n updating the other parameters.\n device: The device (CPU or GPU) on which to perform the computation. If `None`, uses the device for the default\n tensor type.\n random_seed: The random seed for reproducibility.\n\n References:\n [1] <NAME>. Extending the Bayesian Deep Learning Method MultiSWAG. MSc Thesis, University of Edinburgh,\n 2021.\n \"\"\"\n\n def __init__(self, observation_dim: int, latent_dim: int, optimiser: Optimizer = Adam,\n optimiser_kwargs: Optional[dict] = None, n_warm_up_time_steps: int = 0,\n device: Optional[torch.device] = None, random_seed: int = 0):\n super().__init__(observation_dim, latent_dim, device=device, random_seed=random_seed)\n optimiser_kwargs = optimiser_kwargs or dict(lr=1e-3)\n self.F = Variable(self.F, requires_grad=False) # we will compute our own gradients\n self._log_diag_psi = Variable(torch.log(self.diag_psi), requires_grad=False)\n self._F_times_sigma_plus_m_mt = None\n self._gradient_wrt_F = None\n self._gradient_wrt_diag_psi = None\n self._gradient_wrt_log_diag_psi = None\n self._optimiser = optimiser([self.F, self._log_diag_psi], **optimiser_kwargs)\n self._n_warm_up_time_steps = n_warm_up_time_steps\n\n def update(self, theta: Tensor):\n \"\"\"\n Given a new observation, update the parameters of the FA model.\n\n Args:\n theta: A single observation of shape (observation_dim,) or (observation_dim, 1).\n \"\"\"\n self._update_commons(theta)\n if self.t > self._n_warm_up_time_steps:\n self._update_F_times_sigma_plus_m_mt()\n self._update_gradient_wrt_F()\n self._update_gradient_wrt_log_psi()\n self._gradient_step()\n self.diag_psi = torch.exp(self._log_diag_psi)\n\n def _update_F_times_sigma_plus_m_mt(self):\n \"\"\"\n Update the value of `F(sigma + mm^T)`.\n\n This quantity is used multiple times in the gradient calculations, so it is more efficient to compute it only\n once.\n \"\"\"\n self._F_times_sigma_plus_m_mt = self.F.mm(self._sigma + self._m.mm(self._m.t()))\n\n def _update_gradient_wrt_F(self):\n \"\"\"\n Update the value of the gradient of the log-likelihood wrt the factor loading matrix.\n \"\"\"\n self._gradient_wrt_F = self._diag_inv_psi * (self._d.mm(self._m.t()) - self._F_times_sigma_plus_m_mt)\n\n def _update_gradient_wrt_log_psi(self):\n \"\"\"\n Update the value of the gradient of the log-likelihood wrt the logarithm of the diagonal entries of the Gaussian\n noise covariance matrix.\n \"\"\"\n self._update_gradient_wrt_psi()\n self._gradient_wrt_log_diag_psi = self._gradient_wrt_diag_psi * self.diag_psi\n\n def _update_gradient_wrt_psi(self):\n \"\"\"\n Update the value of the gradient of the log-likelihood wrt the diagonal entries of the Gaussian noise covariance\n matrix.\n \"\"\"\n E = self._d ** 2 \\\n - 2 * self._d * self.F.mm(self._m) \\\n + torch.sum(self._F_times_sigma_plus_m_mt * self.F, dim=1, keepdim=True)\n self._gradient_wrt_diag_psi = ((self._diag_inv_psi ** 2) * E - self._diag_inv_psi) / 2\n\n def _gradient_step(self):\n \"\"\"\n Perform a gradient step to update self.F and self._log_diag_psi.\n\n Goal is to maximise the log-likelihood, but Torch optimisers are designed to minimise. So multiply the gradients\n by -1 before performing the updates.\n \"\"\"\n self.F.grad = -self._gradient_wrt_F\n self._log_diag_psi.grad = -self._gradient_wrt_log_diag_psi\n self._optimiser.step()\n\n\nclass OnlineEMFactorAnalysis(OnlineFactorAnalysis):\n \"\"\"\n Implementation of online expectation maximisation for factor analysis (FA) from [1].\n\n The variable names used in this class generally match those used in [1].\n\n Args:\n observation_dim: The size of the observed variable space.\n latent_dim: The size of the latent variable space.\n n_warm_up_time_steps: The number of time steps on which to update the running mean of the FA model before\n updating the other parameters. This will be set to at least 1 to avoid inverting a zero matrix on the first\n iteration.\n device: The device (CPU or GPU) on which to perform the computation. If `None`, uses the device for the default\n tensor type.\n random_seed: The random seed for reproducibility.\n\n References:\n [1] <NAME>. Extending the Bayesian Deep Learning Method MultiSWAG. MSc Thesis, University of Edinburgh,\n 2021.\n \"\"\"\n\n def __init__(self, observation_dim: int, latent_dim: int, n_warm_up_time_steps: int = 1,\n device: Optional[torch.device] = None, random_seed: int = 0):\n super().__init__(observation_dim, latent_dim, device=device, random_seed=random_seed)\n self._A_hat = torch.zeros(observation_dim, latent_dim, device=device)\n self._B_hat = torch.zeros(latent_dim, latent_dim, device=device)\n self._H_hat = None\n self._d_squared_hat = torch.zeros(observation_dim, 1, device=device)\n self._n_warm_up_time_steps = max(n_warm_up_time_steps, 1)\n\n def update(self, theta: Tensor):\n \"\"\"\n Given a new observation, update the running averages and the parameters of the FA model.\n\n Args:\n theta: A single observation of shape (observation_dim,) or (observation_dim, 1).\n \"\"\"\n self._update_commons(theta)\n self._update_H_hat()\n self._update_A_hat()\n self._update_d_squared_hat()\n if self.t > self._n_warm_up_time_steps:\n self._update_F()\n self._update_psi()\n\n def _update_H_hat(self):\n \"\"\"\n Update the sum of the latent posterior covariance matrix and the running average of `mm^t`.\n \"\"\"\n self._update_B_hat()\n self._H_hat = self._sigma + self._B_hat\n\n def _update_B_hat(self):\n \"\"\"\n Update the running average of `mm^t`.\n \"\"\"\n self._B_hat = self._update_running_average(self._B_hat, self._m.mm(self._m.t()))\n\n def _update_A_hat(self):\n \"\"\"\n Update the running average of `dm^t`.\n \"\"\"\n self._A_hat = self._update_running_average(self._A_hat, self._d.mm(self._m.t()))\n\n def _update_d_squared_hat(self):\n \"\"\"\n Update the running average of `d^2`.\n \"\"\"\n self._d_squared_hat = self._update_running_average(self._d_squared_hat, self._d ** 2)\n\n def _update_F(self):\n \"\"\"\n Update the factor loading matrix.\n \"\"\"\n self.F = self._A_hat.mm(torch.linalg.inv(self._H_hat))\n\n def _update_psi(self):\n \"\"\"\n Update the diagonal entries of the Gaussian noise covariance matrix.\n \"\"\"\n self.diag_psi = self._d_squared_hat \\\n + torch.sum(self.F.mm(self._H_hat) * self.F - 2 * self.F * self._A_hat, dim=1, keepdim=True)\n",
"id": "7213509",
"language": "Python",
"matching_score": 4.868819713592529,
"max_stars_count": 0,
"path": "swafa/fa.py"
},
{
"content": "import pytest\nimport torch\nfrom torch.optim import Optimizer, SGD\n\nfrom swafa.fa import OnlineGradientFactorAnalysis\n\n\nclass TestOnlineGradientFactorAnalysis:\n\n def test_update_F_times_sigma_plus_m_mt(self):\n fa = OnlineGradientFactorAnalysis(1, 1)\n fa.F = torch.Tensor([[1, 2], [3, 4], [5, 6]])\n fa._sigma = torch.Tensor([[1, 2], [3, 4]])\n fa._m = torch.Tensor([[1], [2]])\n expected_output = torch.Tensor([[12, 20], [26, 44], [40, 68]])\n fa._update_F_times_sigma_plus_m_mt()\n assert torch.isclose(fa._F_times_sigma_plus_m_mt, expected_output, atol=1e-05).all()\n\n @pytest.mark.parametrize(\"observation_dim\", [10, 20])\n @pytest.mark.parametrize(\"latent_dim\", [5, 8])\n def test_update_gradient_wrt_F(self, observation_dim, latent_dim):\n fa, theta, inv_psi = self._update_commons(observation_dim, latent_dim)\n\n expected_gradient = inv_psi.mm(fa._d.mm(fa._m.t()) - fa.F.mm(fa._sigma + fa._m.mm(fa._m.t())))\n\n fa._update_gradient_wrt_F()\n assert torch.isclose(fa._gradient_wrt_F, expected_gradient, atol=1e-05).all()\n\n @pytest.mark.parametrize(\"observation_dim\", [10, 20])\n @pytest.mark.parametrize(\"latent_dim\", [5, 8])\n def test_update_gradient_wrt_psi(self, observation_dim, latent_dim):\n fa, theta, inv_psi = self._update_commons(observation_dim, latent_dim)\n\n expected_gradient = torch.diag(\n 0.5 * (\n torch.diag(\n torch.diag(inv_psi ** 2) * (\n fa._d ** 2\n - 2 * fa._d * (fa.F.mm(fa._m))\n + torch.diag(fa.F.mm(fa._sigma + fa._m.mm(fa._m.t())).mm(fa.F.t()))\n )\n )\n - inv_psi\n )\n ).reshape(-1, 1)\n\n fa._update_gradient_wrt_psi()\n assert torch.isclose(fa._gradient_wrt_diag_psi, expected_gradient, atol=1e-05).all()\n\n @pytest.mark.parametrize(\"observation_dim\", [10, 20])\n @pytest.mark.parametrize(\"latent_dim\", [5, 8])\n def test_update_gradient_wrt_log_psi(self, observation_dim, latent_dim):\n fa, theta, inv_psi = self._update_commons(observation_dim, latent_dim)\n fa._update_gradient_wrt_log_psi()\n expected_gradient = fa._gradient_wrt_diag_psi * fa.diag_psi\n assert torch.isclose(fa._gradient_wrt_log_diag_psi, expected_gradient, atol=1e-05).all()\n\n @pytest.mark.parametrize(\"observation_dim\", [10, 20])\n @pytest.mark.parametrize(\"latent_dim\", [5, 8])\n @pytest.mark.parametrize(\"learning_rate\", [1e-3, 1e-1])\n def test_sgd_update_F(self, observation_dim, latent_dim, learning_rate):\n fa = OnlineGradientFactorAnalysis(\n observation_dim, latent_dim, optimiser=SGD, optimiser_kwargs=dict(lr=learning_rate),\n )\n for _ in range(10):\n old_F = fa.F.clone()\n fa.update(torch.randn(observation_dim))\n expected_new_F = old_F + learning_rate * fa._gradient_wrt_F\n assert torch.isclose(fa.F, expected_new_F, atol=1e-05).all()\n assert not torch.isclose(fa.F, old_F, atol=1e-05).all()\n\n @pytest.mark.parametrize(\"observation_dim\", [10, 20])\n @pytest.mark.parametrize(\"latent_dim\", [5, 8])\n @pytest.mark.parametrize(\"learning_rate\", [1e-3, 1e-1])\n def test_sgd_update_log_diag_psi(self, observation_dim, latent_dim, learning_rate):\n fa = OnlineGradientFactorAnalysis(\n observation_dim, latent_dim, optimiser=SGD, optimiser_kwargs=dict(lr=learning_rate),\n )\n for _ in range(10):\n old_log_diag_psi = fa._log_diag_psi.clone()\n fa.update(torch.randn(observation_dim))\n expected_new_log_diag_psi = old_log_diag_psi + learning_rate * fa._gradient_wrt_log_diag_psi\n assert torch.isclose(fa._log_diag_psi, expected_new_log_diag_psi, atol=1e-05).all()\n assert not torch.isclose(fa._log_diag_psi, old_log_diag_psi, atol=1e-05).all()\n\n @pytest.mark.parametrize(\"observation_dim\", [10, 20])\n @pytest.mark.parametrize(\"latent_dim\", [5, 8])\n @pytest.mark.parametrize(\"learning_rate\", [1e-3, 1e-1])\n def test_updated_diag_psi(self, observation_dim, latent_dim, learning_rate):\n fa = OnlineGradientFactorAnalysis(\n observation_dim, latent_dim, optimiser_kwargs=dict(lr=learning_rate),\n )\n for _ in range(10):\n old_diag_psi = fa.diag_psi.clone()\n fa.update(torch.randn(observation_dim, 1))\n expected_diag_psi = torch.exp(fa._log_diag_psi)\n assert torch.isclose(fa.diag_psi, expected_diag_psi, atol=1e-05).all()\n assert not torch.isclose(fa.diag_psi, old_diag_psi, atol=1e-05).all()\n\n @staticmethod\n def _update_commons(observation_dim: int, latent_dim: int, optimiser: Optimizer = SGD,\n optimiser_kwargs: dict = None):\n optimiser_kwargs = optimiser_kwargs or dict(lr=1e-3)\n fa = OnlineGradientFactorAnalysis(\n observation_dim, latent_dim, optimiser=optimiser, optimiser_kwargs=optimiser_kwargs,\n )\n fa.c = torch.randn(observation_dim, 1)\n fa.diag_psi = torch.randn(observation_dim, 1)\n theta = torch.randn(observation_dim, 1)\n fa._update_commons(theta)\n fa._update_F_times_sigma_plus_m_mt()\n inv_psi = torch.diag(fa._diag_inv_psi.squeeze())\n return fa, theta, inv_psi\n\n @pytest.mark.parametrize(\"n_warm_up_time_steps\", [0, 1, 5])\n def test_warm_up_time_steps(self, n_warm_up_time_steps):\n observation_dim = 3\n latent_dim = 2\n fa = OnlineGradientFactorAnalysis(observation_dim, latent_dim, n_warm_up_time_steps=n_warm_up_time_steps)\n\n c = fa.c.clone()\n F = fa.F.clone()\n diag_psi = fa.diag_psi.clone()\n\n for t in range(n_warm_up_time_steps + 2):\n fa.update(torch.randn(observation_dim, 1))\n assert not torch.isclose(fa.c, c).all()\n\n should_not_change = t < n_warm_up_time_steps\n assert torch.isclose(fa.F, F).all() == should_not_change\n assert torch.isclose(fa.diag_psi, diag_psi).all() == should_not_change\n\n c = fa.c.clone()\n F = fa.F.clone()\n diag_psi = fa.diag_psi.clone()\n",
"id": "10816497",
"language": "Python",
"matching_score": 3.4912519454956055,
"max_stars_count": 0,
"path": "tests/test_online_gradient_fa.py"
},
{
"content": "import pytest\nimport torch\n\nfrom swafa.fa import OnlineEMFactorAnalysis\n\n\nclass TestOnlineEMFactorAnalysis:\n\n @pytest.mark.parametrize(\"observation_dim\", [10, 20])\n @pytest.mark.parametrize(\"latent_dim\", [5, 8])\n def test_update_B_hat(self, observation_dim, latent_dim):\n fa = OnlineEMFactorAnalysis(observation_dim, latent_dim)\n m_times_mt = []\n for _ in range(10):\n fa.update(torch.randn(observation_dim))\n m_times_mt.append(fa._m.mm(fa._m.t()))\n\n expected_B_hat = torch.dstack(m_times_mt).mean(dim=2)\n assert torch.isclose(fa._B_hat, expected_B_hat, atol=1e-05).all()\n\n @pytest.mark.parametrize(\"observation_dim\", [10, 20])\n @pytest.mark.parametrize(\"latent_dim\", [5, 8])\n def test_update_H_hat(self, observation_dim, latent_dim):\n fa = OnlineEMFactorAnalysis(observation_dim, latent_dim)\n for _ in range(10):\n fa.update(torch.randn(observation_dim, 1))\n expected_H_hat = fa._sigma + fa._B_hat\n assert torch.isclose(fa._H_hat, expected_H_hat, atol=1e-05).all()\n\n @pytest.mark.parametrize(\"observation_dim\", [10, 20])\n @pytest.mark.parametrize(\"latent_dim\", [5, 8])\n def test_update_A_hat(self, observation_dim, latent_dim):\n fa = OnlineEMFactorAnalysis(observation_dim, latent_dim)\n d_times_mt = []\n for _ in range(10):\n fa.update(torch.randn(observation_dim))\n d_times_mt.append(fa._d.mm(fa._m.t()))\n\n expected_A_hat = torch.dstack(d_times_mt).mean(dim=2)\n assert torch.isclose(fa._A_hat, expected_A_hat, atol=1e-05).all()\n\n @pytest.mark.parametrize(\"observation_dim\", [10, 20])\n @pytest.mark.parametrize(\"latent_dim\", [5, 8])\n def test_update_d_squared_hat(self, observation_dim, latent_dim):\n fa = OnlineEMFactorAnalysis(observation_dim, latent_dim)\n d_squared = []\n for _ in range(10):\n fa.update(torch.randn(observation_dim))\n d_squared.append(fa._d ** 2)\n\n expected_d_squared_hat = torch.dstack(d_squared).mean(dim=2)\n assert torch.isclose(fa._d_squared_hat, expected_d_squared_hat, atol=1e-05).all()\n\n @pytest.mark.parametrize(\"observation_dim\", [10, 20])\n @pytest.mark.parametrize(\"latent_dim\", [5, 8])\n def test_update_F(self, observation_dim, latent_dim):\n fa = OnlineEMFactorAnalysis(observation_dim, latent_dim)\n for i in range(10):\n old_F = fa.F.clone()\n fa.update(torch.randn(observation_dim))\n if i == 0:\n assert torch.isclose(fa.F, old_F, atol=1e-05).all()\n else:\n expected_F = fa._A_hat.mm(torch.linalg.inv(fa._H_hat))\n assert torch.isclose(fa.F, expected_F, atol=1e-05).all()\n assert not torch.isclose(fa.F, old_F, atol=1e-05).all()\n\n @pytest.mark.parametrize(\"observation_dim\", [10, 20])\n @pytest.mark.parametrize(\"latent_dim\", [5, 8])\n def test_update_psi(self, observation_dim, latent_dim):\n fa = OnlineEMFactorAnalysis(observation_dim, latent_dim)\n d_times_dt = []\n for i in range(10):\n old_diag_psi = fa.diag_psi.clone()\n fa.update(torch.randn(observation_dim))\n d_times_dt.append(fa._d.mm(fa._d.t()))\n if i == 0:\n assert torch.isclose(fa.diag_psi, old_diag_psi, atol=1e-05).all()\n else:\n expected_diag_psi = torch.diag(\n torch.dstack(d_times_dt).mean(dim=2)\n - 2 * fa.F.mm(fa._A_hat.t())\n + fa.F.mm(fa._H_hat).mm(fa.F.t())\n ).reshape(-1, 1)\n\n assert torch.isclose(fa.diag_psi, expected_diag_psi, atol=1e-05).all()\n assert not torch.isclose(fa.diag_psi, old_diag_psi, atol=1e-05).all()\n\n @pytest.mark.parametrize(\"n_warm_up_time_steps\", [0, 1, 5])\n def test_warm_up_time_steps(self, n_warm_up_time_steps):\n observation_dim = 3\n latent_dim = 2\n fa = OnlineEMFactorAnalysis(observation_dim, latent_dim, n_warm_up_time_steps=n_warm_up_time_steps)\n\n c = fa.c.clone()\n F = fa.F.clone()\n diag_psi = fa.diag_psi.clone()\n A_hat = fa._A_hat.clone()\n B_hat = fa._B_hat.clone()\n d_squared_hat = fa._d_squared_hat.clone()\n\n for t in range(0, n_warm_up_time_steps + 2):\n fa.update(torch.randn(observation_dim, 1))\n if t > 0:\n assert not torch.isclose(fa.c, c).all()\n assert not torch.isclose(fa._A_hat, A_hat).all()\n assert not torch.isclose(fa._B_hat, B_hat).all()\n assert not torch.isclose(fa._d_squared_hat, d_squared_hat).all()\n\n should_not_change = t < n_warm_up_time_steps\n assert torch.isclose(fa.F, F).all() == should_not_change\n assert torch.isclose(fa.diag_psi, diag_psi).all() == should_not_change\n\n c = fa.c.clone()\n F = fa.F.clone()\n diag_psi = fa.diag_psi.clone()\n A_hat = fa._A_hat.clone()\n B_hat = fa._B_hat.clone()\n d_squared_hat = fa._d_squared_hat.clone()\n",
"id": "7567338",
"language": "Python",
"matching_score": 2.7841999530792236,
"max_stars_count": 0,
"path": "tests/test_online_em_fa.py"
},
{
"content": "import numpy as np\nimport pytest\nimport torch\n\nfrom swafa.fa import OnlineGradientFactorAnalysis\n\nOnlineFactorAnalysis = OnlineGradientFactorAnalysis # can't test OnlineFactorAnalysis directly as it is abstract\n\n\nclass TestOnlineFactorAnalysis:\n\n @pytest.mark.parametrize(\"observation_dim\", [10, 20])\n @pytest.mark.parametrize(\"latent_dim\", [5, 8])\n def test_init_c(self, observation_dim, latent_dim):\n fa = OnlineFactorAnalysis(observation_dim, latent_dim)\n assert (fa.c == torch.zeros(observation_dim, 1)).all()\n\n @pytest.mark.parametrize(\"observation_dim\", [10, 20])\n @pytest.mark.parametrize(\"latent_dim\", [5, 8])\n def test_init_F_shape(self, observation_dim, latent_dim):\n fa = OnlineFactorAnalysis(observation_dim, latent_dim)\n assert fa.F.shape == (observation_dim, latent_dim)\n\n @pytest.mark.parametrize(\"observation_dim\", [10, 20])\n @pytest.mark.parametrize(\"latent_dim\", [5, 8])\n def test_init_F_orthogonal_columns(self, observation_dim, latent_dim):\n fa = OnlineFactorAnalysis(observation_dim, latent_dim)\n Q = fa.F.t().mm(fa.F)\n assert torch.isclose(Q, torch.eye(latent_dim), atol=1e5, rtol=1).all()\n\n @pytest.mark.parametrize(\"observation_dim\", [10, 20])\n @pytest.mark.parametrize(\"latent_dim\", [5, 8])\n def test_init_diag_psi(self, observation_dim, latent_dim):\n fa = OnlineFactorAnalysis(observation_dim, latent_dim)\n assert (fa.diag_psi == torch.ones(fa.observation_dim, 1)).all()\n\n def test_init_t(self):\n observation_dim = 4\n latent_dim = 3\n fa = OnlineFactorAnalysis(observation_dim, latent_dim)\n assert fa.t == 0\n\n @pytest.mark.parametrize(\"observation_dim\", [10, 20])\n @pytest.mark.parametrize(\"latent_dim\", [5, 8])\n def test_update_commons_t(self, observation_dim, latent_dim):\n fa = OnlineFactorAnalysis(observation_dim, latent_dim)\n fa._update_commons(torch.randn(observation_dim, 1))\n assert fa.t == 1\n fa._update_commons(torch.randn(observation_dim))\n assert fa.t == 2\n\n @pytest.mark.parametrize(\"observation_dim\", [10, 20])\n @pytest.mark.parametrize(\"latent_dim\", [5, 8])\n def test_update_commons_c(self, observation_dim, latent_dim):\n fa = OnlineFactorAnalysis(observation_dim, latent_dim)\n theta1 = torch.randn(observation_dim, 1)\n fa._update_commons(theta1)\n assert torch.isclose(fa.c, theta1, atol=1e-05).all()\n theta2 = torch.randn(observation_dim, 1)\n fa._update_commons(theta2)\n assert torch.isclose(fa.c, (theta1 + theta2) / 2, atol=1e-05).all()\n\n @pytest.mark.parametrize(\"observation_dim\", [10, 20])\n @pytest.mark.parametrize(\"latent_dim\", [5, 8])\n def test_update_commons_centred_observation(self, observation_dim, latent_dim):\n fa = OnlineFactorAnalysis(observation_dim, latent_dim)\n theta1 = torch.randn(observation_dim, 1)\n fa._update_commons(theta1)\n assert torch.isclose(fa._d, torch.zeros(observation_dim), atol=1e-05).all()\n theta2 = torch.randn(observation_dim, 1)\n fa._update_commons(theta2)\n assert torch.isclose(fa._d, theta2 - (theta1 + theta2) / 2, atol=1e-05).all()\n\n @pytest.mark.parametrize(\"observation_dim\", [10, 20])\n @pytest.mark.parametrize(\"latent_dim\", [5, 8])\n def test_update_commons_inv_psi(self, observation_dim, latent_dim):\n fa = OnlineFactorAnalysis(observation_dim, latent_dim)\n theta1 = torch.randn(observation_dim, 1)\n fa._update_commons(theta1)\n assert torch.isclose(fa._diag_inv_psi, 1 / fa.diag_psi, atol=1e-05).all()\n theta2 = torch.randn(observation_dim)\n fa._update_commons(theta2)\n assert torch.isclose(fa._diag_inv_psi, 1 / fa.diag_psi, atol=1e-05).all()\n\n @pytest.mark.parametrize(\"observation_dim\", [10, 20])\n @pytest.mark.parametrize(\"latent_dim\", [5, 8])\n def test_update_commons_m(self, observation_dim, latent_dim):\n fa = OnlineFactorAnalysis(observation_dim, latent_dim)\n theta1 = torch.randn(observation_dim, 1)\n fa._update_commons(theta1)\n assert torch.isclose(fa._m, torch.zeros(latent_dim), atol=1e-05).all()\n theta2 = torch.randn(observation_dim)\n fa._update_commons(theta2)\n F = fa.F\n inv_psi = torch.diag(fa._diag_inv_psi.squeeze())\n expected_m = torch.linalg.inv(torch.eye(latent_dim) + F.t().mm(inv_psi).mm(F)).mm(F.t()).mm(inv_psi).mm(fa._d)\n assert torch.isclose(fa._m, expected_m, atol=1e-05).all()\n\n @pytest.mark.parametrize(\"observation_dim\", [10, 20])\n @pytest.mark.parametrize(\"latent_dim\", [5, 8])\n def test_update_commons_sigma(self, observation_dim, latent_dim):\n fa = OnlineFactorAnalysis(observation_dim, latent_dim)\n theta1 = torch.randn(observation_dim, 1)\n fa._update_commons(theta1)\n theta2 = torch.randn(observation_dim)\n fa._update_commons(theta2)\n F = fa.F\n inv_psi = torch.diag(fa._diag_inv_psi.squeeze())\n expected_sigma = torch.linalg.inv(torch.eye(latent_dim) + F.t().mm(inv_psi).mm(F))\n assert torch.isclose(fa._sigma, expected_sigma, atol=1e-05).all()\n\n def test_get_covariance(self):\n fa = OnlineFactorAnalysis(observation_dim=3, latent_dim=2)\n fa.F = torch.Tensor([\n [1, 2],\n [-3, 4],\n [-1, 2],\n ])\n fa.diag_psi = torch.Tensor([1, 2, 3])\n expected_covariance = torch.Tensor([\n [6, 5, 3],\n [5, 27, 11],\n [3, 11, 8],\n ])\n actual_covariance = fa.get_covariance()\n assert torch.isclose(actual_covariance, expected_covariance, atol=1e-05).all()\n\n @pytest.mark.parametrize(\"observation_dim\", [10, 20])\n @pytest.mark.parametrize(\"latent_dim\", [1, 5, 8])\n @pytest.mark.parametrize('n_samples', [1, 10, 100])\n @pytest.mark.parametrize('random_seed', [1, None])\n def test_sample_shape(self, observation_dim, latent_dim, n_samples, random_seed):\n fa = OnlineFactorAnalysis(observation_dim=observation_dim, latent_dim=latent_dim)\n observations = fa.sample(n_samples, random_seed)\n assert observations.shape == (n_samples, observation_dim)\n\n def test_sample_mean(self):\n fa = OnlineFactorAnalysis(observation_dim=10, latent_dim=5)\n observations = fa.sample(n_samples=1000, random_seed=1)\n actual_mean = observations.mean(dim=0)\n assert torch.isclose(actual_mean, fa.get_mean(), atol=1e-1).all()\n\n def test_sample_covariance(self):\n fa = OnlineFactorAnalysis(observation_dim=10, latent_dim=5)\n observations = fa.sample(n_samples=10000, random_seed=1)\n actual_covar = torch.from_numpy(np.cov(observations.numpy().T)).float()\n assert torch.isclose(actual_covar, fa.get_covariance(), atol=1e-1).all()\n",
"id": "9285268",
"language": "Python",
"matching_score": 2.655366897583008,
"max_stars_count": 0,
"path": "tests/test_online_fa.py"
},
{
"content": "import pytest\nimport numpy as np\nimport torch\n\nfrom experiments.online_fa import generate_and_sample_fa_model\nfrom experiments.utils.metrics import (\n compute_gaussian_log_likelihood,\n compute_fa_covariance,\n compute_distance_between_matrices,\n matrix_sqrt,\n)\n\n\ndef test_compute_fa_covariance():\n F = torch.Tensor([\n [1, 2],\n [-3, 4],\n [-1, 2],\n ])\n psi = torch.Tensor([\n [1, 0, 0],\n [0, 2, 0],\n [0, 0, 3],\n ])\n expected_covariance = torch.Tensor([\n [6, 5, 3],\n [5, 27, 11],\n [3, 11, 8],\n ])\n actual_covariance = compute_fa_covariance(F, psi)\n assert torch.isclose(actual_covariance, expected_covariance, atol=1e-05).all()\n\n\ndef test_compute_matrix_norm():\n X = torch.Tensor([\n [1, 2],\n [-3, 4],\n ])\n expected_norm = np.sqrt(30)\n actual_norm = compute_distance_between_matrices(X, torch.zeros_like(X))\n assert np.isclose(actual_norm, expected_norm, atol=1e-5)\n\n\ndef test_compute_distance_between_matrices():\n A = torch.Tensor([\n [1, 6],\n [6, 5],\n ])\n B = torch.Tensor([\n [0, 4],\n [9, 1],\n ])\n expected_distance = np.sqrt(30)\n actual_distance = compute_distance_between_matrices(A, B)\n assert np.isclose(actual_distance, expected_distance, atol=1e-5)\n\n\n@pytest.mark.parametrize(\"observation_dim\", [10, 20])\n@pytest.mark.parametrize(\"latent_dim\", [5, 8])\n@pytest.mark.parametrize(\"spectrum_range\", [[0, 1], [0, 10]])\n@pytest.mark.parametrize('n_samples', [10, 100])\ndef test_gaussian_log_likelihood(observation_dim, latent_dim, spectrum_range, n_samples):\n mean, F, psi, covar, observations = generate_and_sample_fa_model(\n observation_dim, latent_dim, spectrum_range, n_samples, random_seed=0,\n )\n inv_cov = torch.linalg.inv(covar)\n log_det_cov = torch.logdet(covar)\n expected_ll_observations = torch.zeros(n_samples)\n for i, x in enumerate(observations):\n centred_x = (x - mean).reshape(-1, 1)\n expected_ll_observations[i] = (\n -0.5 * centred_x.t().mm(inv_cov).mm(centred_x)\n -0.5 * log_det_cov\n -0.5 * observation_dim * np.log(2 * np.pi)\n )\n expected_ll = expected_ll_observations.mean().item()\n actual_ll = compute_gaussian_log_likelihood(mean, covar, observations)\n assert np.isclose(actual_ll, expected_ll, atol=1e-5)\n\n\n@pytest.mark.parametrize(\"dim\", [2, 5, 10, 20, 50, 100])\ndef test_matrix_sqrt(dim):\n torch.manual_seed(0)\n B = torch.randn(dim, dim)\n A = B.mm(B.t())\n sqrt_A = matrix_sqrt(A)\n A_rebuilt = sqrt_A.mm(sqrt_A)\n assert torch.isclose(A_rebuilt, A, atol=1e-4 * dim).all()\n",
"id": "8614339",
"language": "Python",
"matching_score": 3.5028984546661377,
"max_stars_count": 0,
"path": "tests/test_experiments/test_utils/test_metrics.py"
},
{
"content": "import pytest\nimport numpy as np\nimport torch\n\nfrom experiments.online_fa import (\n run_all_fa_experiments,\n generate_and_sample_fa_model,\n generate_fa_model,\n sample_fa_observations,\n learn_fa_with_sklearn,\n learn_fa_with_online_gradients,\n learn_fa_with_online_em,\n)\n\n\n@pytest.mark.parametrize(\"n_experiments\", [1, 3])\n@pytest.mark.parametrize(\"n_trials\", [1, 4])\n@pytest.mark.parametrize(\"n_sample_sizes\", [1, 4])\ndef test_fa_results_rows_and_columns(n_experiments, n_trials, n_sample_sizes):\n config = dict(\n observation_dim=10,\n latent_dim=5,\n spectrum_range=[0, 1],\n n_samples=list((np.arange(n_sample_sizes) + 1) * 10),\n )\n experiments_config = [config] * n_experiments\n results = run_all_fa_experiments(\n experiments_config,\n n_trials,\n gradient_optimiser='sgd',\n gradient_optimiser_kwargs=dict(lr=0.01),\n gradient_warm_up_time_steps=1,\n em_warm_up_time_steps=1,\n )\n expected_columns = [\n 'observation_dim',\n 'latent_dim',\n 'spectrum_min',\n 'spectrum_max',\n 'n_samples',\n 'covar_norm',\n 'covar_distance_sklearn',\n 'covar_distance_online_gradient',\n 'covar_distance_online_em',\n 'wasserstein_sklearn',\n 'wasserstein_online_gradient',\n 'wasserstein_online_em',\n 'experiment',\n 'trial'\n ]\n actual_columns = list(results.columns)\n assert len(actual_columns) == len(expected_columns)\n assert len(np.intersect1d(actual_columns, expected_columns)) == len(actual_columns)\n\n assert len(results) == n_experiments * n_trials * n_sample_sizes\n for i in range(n_experiments):\n assert (results['experiment'] == i + 1).sum() == n_trials * n_sample_sizes\n\n\n@pytest.mark.parametrize(\"observation_dim\", [10, 20])\n@pytest.mark.parametrize(\"latent_dim\", [5, 8])\n@pytest.mark.parametrize(\"spectrum_range\", [[0, 1], [0, 10]])\n@pytest.mark.parametrize('n_samples', [10, 100])\ndef test_true_params_shape(observation_dim, latent_dim, spectrum_range, n_samples):\n mean, F, psi, covar, observations = generate_and_sample_fa_model(\n observation_dim, latent_dim, spectrum_range, n_samples, random_seed=0,\n )\n assert mean.shape == (observation_dim,)\n assert covar.shape == (observation_dim, observation_dim)\n\n\n@pytest.mark.parametrize(\"observation_dim\", [10, 20])\n@pytest.mark.parametrize(\"latent_dim\", [5, 8])\n@pytest.mark.parametrize(\"spectrum_range\", [[0, 1], [0, 10]])\ndef test_model_mean_matches_sample_mean(observation_dim, latent_dim, spectrum_range):\n n_samples = 100000\n c, F, psi, covar, observations = generate_and_sample_fa_model(\n observation_dim, latent_dim, spectrum_range, n_samples, random_seed=0,\n )\n sample_mean = observations.mean(dim=0)\n normalised_distance = torch.linalg.norm(c - sample_mean) / torch.linalg.norm(c)\n assert normalised_distance < 0.01\n\n\n@pytest.mark.parametrize(\"observation_dim\", [10, 20])\n@pytest.mark.parametrize(\"latent_dim\", [5, 8])\n@pytest.mark.parametrize(\"spectrum_range\", [[0, 1], [0, 10]])\ndef test_model_covariance_matches_sample_covariance(observation_dim, latent_dim, spectrum_range):\n n_samples = 100000\n c, F, psi, covar, observations = generate_and_sample_fa_model(\n observation_dim, latent_dim, spectrum_range, n_samples, random_seed=0,\n )\n sample_covar = torch.from_numpy(np.cov(observations.t().numpy())).float()\n normalised_distance = torch.linalg.norm(covar - sample_covar) / torch.linalg.norm(covar)\n assert normalised_distance < 0.1\n\n\n@pytest.mark.parametrize(\"observation_dim\", [10, 20])\n@pytest.mark.parametrize(\"latent_dim\", [5, 8])\n@pytest.mark.parametrize(\"spectrum_range\", [[0, 1], [0, 10]])\n@pytest.mark.parametrize('n_samples', [10, 100])\ndef test_sampled_fa_observations_shape(observation_dim, latent_dim, spectrum_range, n_samples):\n c, F, psi = generate_fa_model(observation_dim, latent_dim, spectrum_range, random_seed=0)\n observations = sample_fa_observations(c, F, psi, n_samples, random_seed=0)\n assert observations.shape == (n_samples, observation_dim)\n\n\n@pytest.mark.parametrize(\"observation_dim\", [10, 20])\n@pytest.mark.parametrize(\"latent_dim\", [5, 8])\n@pytest.mark.parametrize(\"spectrum_range\", [[0, 1], [0, 10]])\n@pytest.mark.parametrize('n_samples', [10, 100])\ndef test_sklearn_learned_params_shape(observation_dim, latent_dim, spectrum_range, n_samples):\n c, F, psi, covar, observations = generate_and_sample_fa_model(\n observation_dim, latent_dim, spectrum_range, n_samples, random_seed=0,\n )\n mean, covar = learn_fa_with_sklearn(observations, latent_dim, random_seed=0)\n assert mean.shape == (observation_dim,)\n assert covar.shape == (observation_dim, observation_dim)\n\n\n@pytest.mark.parametrize(\"observation_dim\", [10, 20])\n@pytest.mark.parametrize(\"latent_dim\", [5, 8])\n@pytest.mark.parametrize(\"spectrum_range\", [[0, 1], [0, 10]])\n@pytest.mark.parametrize('n_samples', [10, 100])\ndef test_online_gradients_learned_params_shape(observation_dim, latent_dim, spectrum_range, n_samples):\n c, F, psi, covar, observations = generate_and_sample_fa_model(\n observation_dim, latent_dim, spectrum_range, n_samples, random_seed=0,\n )\n _, mean, covar = learn_fa_with_online_gradients(\n observations, latent_dim, optimiser_name='sgd', optimiser_kwargs=None, n_warm_up_time_steps=1, random_seed=0,\n )\n assert mean.shape == (observation_dim,)\n assert covar.shape == (observation_dim, observation_dim)\n\n\n@pytest.mark.parametrize(\"observation_dim\", [10, 20])\n@pytest.mark.parametrize(\"latent_dim\", [5, 8])\n@pytest.mark.parametrize(\"spectrum_range\", [[0, 1], [0, 10]])\n@pytest.mark.parametrize('n_samples', [10, 100])\ndef test_online_em_learned_params_shape(observation_dim, latent_dim, spectrum_range, n_samples):\n c, F, psi, covar, observations = generate_and_sample_fa_model(\n observation_dim, latent_dim, spectrum_range, n_samples, random_seed=0,\n )\n _, mean, covar = learn_fa_with_online_em(\n observations, latent_dim, n_warm_up_time_steps=1, random_seed=0,\n )\n assert mean.shape == (observation_dim,)\n assert covar.shape == (observation_dim, observation_dim)\n",
"id": "6096523",
"language": "Python",
"matching_score": 5.174328327178955,
"max_stars_count": 0,
"path": "tests/test_experiments/test_online_fa.py"
},
{
"content": "import os\nfrom pathlib import Path\nfrom typing import List, Optional\n\nimport pandas as pd\nimport torch\nfrom torch import Tensor\nfrom torch.distributions.multivariate_normal import MultivariateNormal\nfrom sklearn.decomposition import FactorAnalysis\nimport yaml\nimport click\n\nfrom swafa.fa import OnlineGradientFactorAnalysis, OnlineEMFactorAnalysis, OnlineFactorAnalysis\nfrom experiments.utils.factory import OPTIMISER_FACTORY\nfrom experiments.utils.metrics import (\n compute_fa_covariance,\n compute_distance_between_matrices,\n compute_gaussian_wasserstein_distance,\n)\n\n\ndef run_all_fa_experiments(experiments_config: List[dict], n_trials: int, gradient_optimiser: str,\n gradient_optimiser_kwargs: dict, gradient_warm_up_time_steps: int,\n em_warm_up_time_steps: int) -> pd.DataFrame:\n \"\"\"\n Run all factor analysis (FA) experiments specified in the given configuration.\n\n For each trial of each experiment, generate a FA model, sample observations from the model and then with this data\n estimate the parameters of the model using three different learning algorithms: sklearn's batch SVD approach, online\n stochastic gradient ascent and online expectation maximisation (EM).\n\n Args:\n experiments_config: Each element of the list is a dictionary specifying the configuration of a single FA\n experiment. Must contain the following fields:\n - observation_dim: (int) The size of the observed variable space of the FA model. Note that in each\n trial the full covariance matrix of the FA model will be constructed, of shape\n (observation_dim, observation_dim), so this should not be too big.\n - latent_dim: (int) The size of the latent variable space of the FA model.\n - spectrum_range: ([float, float]) The end points of the \"spectrum\", which controls the conditioning of\n the covariance matrix of the true FA model.\n - n_samples: (List[int]) The number of observations sampled from the true FA model. All FA learning\n algorithms will be fit to this data. In the case of the batch algorithm, a separate model will be\n fit to each different sample size in the list. In the case of the online algorithms, training with\n n_samples[i] observations will begin from the model fit to n_samples[i - 1] observations. Sample\n sizes must come in increasing order.\n n_trials: The number of trials to run for each experiment, for different random seeds.\n gradient_optimiser: The name of the PyTorch optimiser used in the online gradient learning algorithm. Options\n are 'sgd' and 'adam'.\n gradient_optimiser_kwargs: Keyword arguments for the PyTorch optimiser used in the online gradient FA learning\n algorithm.\n gradient_warm_up_time_steps: The number of time steps on which to update the running mean of the FA model in the\n online gradient algorithm before updating the other parameters.\n em_warm_up_time_steps: The number of time steps on which to update the running mean of the FA model in the\n online EM algorithm before updating the other parameters.\n\n Returns:\n The results of each experiment. The number of rows in the DataFrame is equal to\n sum[len(config[n_samples]) * n_trials for config in experiments_config].\n The DataFrame has the following columns:\n - observation_dim: (int) Same as above.\n - latent_dim: (int) Same as above.\n - spectrum_min: (float) The lower bound of the spectrum range.\n - spectrum_max: (float) The upper bound of the spectrum range.\n - n_samples: (int) The number of samples used to fit the model.\n - covar_norm: (float) The Frobenius norm of the the true covariance matrix of the FA model.\n - covar_distance_sklearn: (float) The Frobenius norm of the difference between the true covariance matrix\n and the covariance matrix estimated by sklearn's `FactorAnalysis`.\n - covar_distance_online_gradient: (float) The Frobenius norm of the difference between the true covariance\n matrix and the covariance matrix estimated by `OnlineGradientFactorAnalysis`.\n - covar_distance_online_em: (float) The Frobenius norm of the difference between the true covariance\n matrix and the covariance matrix estimated by `OnlineEMFactorAnalysis`.\n - wasserstein_sklearn: (float) The Wasserstein distance between the Gaussian distribution defined by the\n true FA model and the Gaussian distribution defined by the sklearn FA model.\n - wasserstein_online_gradient: (float) The Wasserstein distance between the Gaussian distribution defined by\n the true FA model and the Gaussian distribution defined by the online gradient FA model.\n - wasserstein_online_em: (float) The Wasserstein distance between the Gaussian distribution defined by the\n true FA model and the Gaussian distribution defined by the online EM FA model.\n - experiment: (int) The index of the experiment.\n - trial: (int) The index of the trial within the experiment.\n \"\"\"\n results = []\n for i_experiment, config in enumerate(experiments_config):\n print(f'Running experiment {i_experiment + 1} of {len(experiments_config)}...')\n print('-' * 100)\n for i_trial in range(n_trials):\n print(f'Running trial {i_trial + 1} of {n_trials}...')\n\n trial_results = run_fa_experiment_trial(\n observation_dim=config['observation_dim'],\n latent_dim=config['latent_dim'],\n spectrum_range=config['spectrum_range'],\n n_samples=config['n_samples'],\n gradient_optimiser=gradient_optimiser,\n gradient_optimiser_kwargs=gradient_optimiser_kwargs,\n gradient_warm_up_time_steps=gradient_warm_up_time_steps,\n em_warm_up_time_steps=em_warm_up_time_steps,\n samples_random_seed=i_trial,\n algorithms_random_seed=i_trial + 1,\n )\n\n trial_results['experiment'] = i_experiment + 1\n trial_results['trial'] = i_trial + 1\n results.append(trial_results)\n\n print('-' * 100)\n print('-' * 100)\n\n return pd.concat(results, ignore_index=True)\n\n\ndef run_fa_experiment_trial(observation_dim: int, latent_dim: int, spectrum_range: [float, float], n_samples: List[int],\n gradient_optimiser: str, gradient_optimiser_kwargs: dict,\n gradient_warm_up_time_steps: int, em_warm_up_time_steps: int, samples_random_seed: int,\n algorithms_random_seed: int) -> pd.DataFrame:\n \"\"\"\n Run a factor analysis (FA) experiment trial for the given parameters.\n\n Generate a FA model, sample observations from the model and then with this data estimate the parameters of the model\n using three different learning algorithms: sklearn's batch SVD approach, online stochastic gradient ascent and\n online expectation maximisation (EM).\n\n Args:\n observation_dim: The size of the observed variable space of the FA model. Note that the full covariance matrix\n of the FA model will be constructed, of shape (observation_dim, observation_dim), so this should not be too\n big.\n latent_dim: The size of the latent variable space of the FA model.\n spectrum_range: The end points of the \"spectrum\", which controls the conditioning of the covariance matrix of\n the true FA model.\n n_samples: The number of observations sampled from the true FA model. All FA learning algorithms will be fit to\n this data. In the case of the batch algorithm, a separate model will be fit to each different sample size in\n the list. In the case of the online algorithms, training with n_samples[i] observations will begin from the\n model fit to n_samples[i - 1] observations. Sample sizes must come in increasing order.\n gradient_optimiser: The name of the PyTorch optimiser used in the online gradient learning algorithm. Options\n are 'sgd' and 'adam'.\n gradient_optimiser_kwargs: Keyword arguments for the PyTorch optimiser used in the online gradient FA learning\n algorithm.\n gradient_warm_up_time_steps: The number of time steps on which to update the running mean of the FA model in the\n online gradient algorithm before updating the other parameters.\n em_warm_up_time_steps: The number of time steps on which to update the running mean of the FA model in the\n online EM algorithm before updating the other parameters.\n samples_random_seed: The random seed used to construct the true FA model and generate samples from it.\n algorithms_random_seed: The random seed used in all three learning algorithms.\n\n Returns:\n The results of each trial. Has len(n_samples) rows and the following columns:\n - observation_dim: (int) Same as input.\n - latent_dim: (int) Same as input.\n - spectrum_min: (float) The lower bound of the spectrum range.\n - spectrum_max: (float) The upper bound of the spectrum range.\n - n_samples: (int) The number of samples used to fit the models.\n - covar_norm: (float) The Frobenius norm of the the true covariance matrix of the FA model.\n - covar_distance_sklearn: (float) The Frobenius norm of the difference between the true covariance matrix\n and the covariance matrix estimated by sklearn's `FactorAnalysis`.\n - covar_distance_online_gradient: (float) The Frobenius norm of the difference between the true covariance\n matrix and the covariance matrix estimated by `OnlineGradientFactorAnalysis`.\n - covar_distance_online_em: (float) The Frobenius norm of the difference between the true covariance\n matrix and the covariance matrix estimated by `OnlineEMFactorAnalysis`.\n - wasserstein_sklearn: (float) The Wasserstein distance between the Gaussian distribution defined by the\n true FA model and the Gaussian distribution defined by the sklearn FA model.\n - wasserstein_online_gradient: (float) The Wasserstein distance between the Gaussian distribution defined by\n the true FA model and the Gaussian distribution defined by the online gradient FA model.\n - wasserstein_online_em: (float) The Wasserstein distance between the Gaussian distribution defined by the\n true FA model and the Gaussian distribution defined by the online EM FA model.\n \"\"\"\n max_samples = n_samples[-1]\n mean_true, F_true, psi_true, covar_true, observations_train = generate_and_sample_fa_model(\n observation_dim=observation_dim,\n latent_dim=latent_dim,\n spectrum_range=spectrum_range,\n n_samples=max_samples,\n random_seed=samples_random_seed,\n )\n\n covar_norm = compute_distance_between_matrices(covar_true, torch.zeros_like(covar_true))\n\n fa_online_gradient = None\n fa_online_em = None\n samples = Tensor([])\n n_samples_iterator = [0] + n_samples\n\n all_results = []\n for i, n_samples_first in enumerate(n_samples_iterator[:-1]):\n n_samples_last = n_samples_iterator[i + 1]\n new_samples = observations_train[n_samples_first:n_samples_last, :]\n samples = torch.cat([samples, new_samples])\n\n print(f'Using {len(samples)} samples...')\n\n mean_sklearn, covar_sklearn = learn_fa_with_sklearn(\n observations=samples,\n latent_dim=latent_dim,\n random_seed=algorithms_random_seed,\n )\n\n fa_online_gradient, mean_online_gradient, covar_online_gradient = learn_fa_with_online_gradients(\n observations=new_samples,\n latent_dim=latent_dim,\n optimiser_name=gradient_optimiser,\n optimiser_kwargs=gradient_optimiser_kwargs,\n n_warm_up_time_steps=gradient_warm_up_time_steps,\n random_seed=algorithms_random_seed,\n fa=fa_online_gradient,\n )\n\n fa_online_em, mean_online_em, covar_online_em = learn_fa_with_online_em(\n observations=new_samples,\n latent_dim=latent_dim,\n n_warm_up_time_steps=em_warm_up_time_steps,\n random_seed=algorithms_random_seed,\n fa=fa_online_em,\n )\n\n print('Computing metrics...')\n\n results = dict(\n observation_dim=observation_dim,\n latent_dim=latent_dim,\n spectrum_min=spectrum_range[0],\n spectrum_max=spectrum_range[1],\n n_samples=n_samples_last,\n covar_norm=covar_norm,\n covar_distance_sklearn=compute_distance_between_matrices(covar_true, covar_sklearn),\n covar_distance_online_gradient=compute_distance_between_matrices(covar_true, covar_online_gradient),\n covar_distance_online_em=compute_distance_between_matrices(covar_true, covar_online_em),\n wasserstein_sklearn=compute_gaussian_wasserstein_distance(\n mean_true, covar_true, mean_sklearn, covar_sklearn,\n ),\n wasserstein_online_gradient=compute_gaussian_wasserstein_distance(\n mean_true, covar_true, mean_online_gradient, covar_online_gradient,\n ),\n wasserstein_online_em=compute_gaussian_wasserstein_distance(\n mean_true, covar_true, mean_online_em, covar_online_em,\n ),\n )\n\n all_results.append(results)\n\n return pd.DataFrame(all_results)\n\n\ndef generate_and_sample_fa_model(observation_dim: int, latent_dim: int, spectrum_range: [float, float], n_samples: int,\n random_seed: int) -> (Tensor, Tensor, Tensor, Tensor, Tensor):\n \"\"\"\n Generate a factor analysis (FA) model and sample observations from it.\n\n Args:\n observation_dim: The size of the observed variable space of the FA model.\n latent_dim: The size of the latent variable space of the FA model.\n spectrum_range: The end points of the \"spectrum\", which controls the conditioning of the covariance matrix of\n the FA model.\n n_samples: The number of observations sampled from the FA model.\n random_seed: The random seed to use for generating the mean and covariance of the FA model and then sampling\n from it.\n\n Returns:\n c: The mean of the FA model. Of shape (observation_dim,).\n F: The factor loading matrix. Of shape (observation_dim, latent_dim).\n psi: The Gaussian noise covariance matrix. Of shape (observation_dim, observation_dim).\n covar: The full covariance of the FA model. Of shape (observation_dim, observation_dim).\n observations: The observations sampled from the FA model. Of shape (n_samples, observation_dim).\n \"\"\"\n print('Generating and sampling FA model...')\n c, F, psi = generate_fa_model(observation_dim, latent_dim, spectrum_range, random_seed)\n covar = compute_fa_covariance(F, psi)\n observations = sample_fa_observations(c, F, psi, n_samples, random_seed)\n return c, F, psi, covar, observations\n\n\ndef generate_fa_model(observation_dim: int, latent_dim: int, spectrum_range: [float, float], random_seed: int,\n ) -> (Tensor, Tensor, Tensor):\n \"\"\"\n Generate a factor analysis (FA) model.\n\n The mean of the FA model is sampled from a standard normal distribution.\n\n The factor loading matrix is generated in such a way that it is possible to control the conditioning number of\n the resulting covariance matrix of the FA model. The steps are as follows:\n\n 1. Generate a matrix A, of shape (observation_dim, observation_dim), by sampling from a standard normal\n distribution.\n 2. Compute the positive semi-definite matrix, M = A*A^T, of shape (observation_dim, observation_dim).\n 3. Compute the eigenvalue decomposition of M and keep the first latent_dim eigenvectors.\n 4. Generate the spectrum, s^2, of shape (observation_dim, 1), by sampling from a uniform distribution with range\n equal to spectrum_range.\n 5. Multiply the eigenvectors by s to obtain the columns of the factor loading matrix, F.\n\n Finally, the diagonal entries of the Gaussian noise covariance matrix are sampled from a uniform distribution with\n range [0, max(s^2)].\n\n This approach ensures that the variance of the observation noise is maximally as large as the largest value of\n s^2. This corresponds to an assumption that Fh is the \"signal\" corrupted by additive noise, where h is a latent\n variable vector.\n\n Args:\n observation_dim: The size of the observed variable space of the FA model.\n latent_dim: The size of the latent variable space of the FA model.\n spectrum_range: The end points of the \"spectrum\", which controls the conditioning of the covariance matrix of\n the FA model.\n random_seed: The random seed to use for generating the mean and covariance of the FA model.\n\n Returns:\n c: The mean of the FA model. Of shape (observation_dim,).\n F: The factor loading matrix. Of shape (observation_dim, latent_dim).\n psi: The Gaussian noise covariance matrix. Of shape (observation_dim, observation_dim).\n \"\"\"\n torch.manual_seed(random_seed)\n\n c = torch.randn(observation_dim)\n\n A = torch.randn(observation_dim, observation_dim)\n M = A.mm(A.t())\n _, V = torch.linalg.eigh(M)\n Vk = V[:, -latent_dim:].fliplr() # torch.linalg.eigh returns eigenvalues in ascending order\n spectrum = torch.FloatTensor(observation_dim, 1).uniform_(*spectrum_range)\n F = Vk * torch.sqrt(spectrum)\n\n diag_psi = torch.FloatTensor(observation_dim).uniform_(0, spectrum.max())\n psi = torch.diag(diag_psi)\n\n return c, F, psi\n\n\ndef sample_fa_observations(c: Tensor, F: Tensor, psi: Tensor, n_samples: int, random_seed: int) -> Tensor:\n \"\"\"\n Sample observations from a factor analysis (FA) model.\n\n Observations are of the form Fh + c + noise, where h is a latent variable vector sampled from N(0, I) and the noise\n vector is sampled from N(0, psi).\n\n Args:\n c: The mean of the FA model. Of shape (observation_dim,).\n F: The factor loading matrix. Of shape (observation_dim, latent_dim).\n psi: The Gaussian noise covariance matrix. Of shape (observation_dim, observation_dim).\n n_samples: The number of observations sampled from the FA model.\n random_seed: The random seed to use for sampling the observations.\n\n Returns:\n Sampled observations. Of shape (n_samples, observation_dim).\n \"\"\"\n torch.manual_seed(random_seed)\n\n observation_dim, latent_dim = F.shape\n p_h = MultivariateNormal(loc=torch.zeros(latent_dim), covariance_matrix=torch.eye(latent_dim))\n p_noise = MultivariateNormal(loc=torch.zeros(observation_dim), covariance_matrix=psi)\n H = p_h.sample((n_samples,))\n noise = p_noise.sample((n_samples,))\n return H.mm(F.t()) + c.reshape(1, -1) + noise\n\n\ndef learn_fa_with_sklearn(observations: Tensor, latent_dim: int, random_seed: int) -> (Tensor, Tensor):\n \"\"\"\n Learn the parameters of a factor analysis (FA) model using the sklearn (randomised) SVD method.\n\n Args:\n observations: Sampled observations. Of shape (n_samples, observation_dim).\n latent_dim: The size of the latent variable space of the FA model.\n random_seed: The random seed used in the algorithm.\n\n Returns:\n mean: The learned mean of the FA model.\n covar: The learned covariance matrix of the FA model.\n \"\"\"\n print('Learning FA model via sklearn (randomised) SVD method...')\n fa = FactorAnalysis(n_components=latent_dim, svd_method='randomized', random_state=random_seed)\n fa.fit(observations.numpy())\n mean = torch.from_numpy(fa.mean_).float()\n covar = torch.from_numpy(fa.get_covariance()).float()\n return mean, covar\n\n\ndef learn_fa_with_online_gradients(observations: Tensor, latent_dim: int, optimiser_name: str, optimiser_kwargs: dict,\n n_warm_up_time_steps: int, random_seed: int,\n fa: Optional[OnlineGradientFactorAnalysis] = None) -> (Tensor, Tensor):\n \"\"\"\n Learn the parameters of a factor analysis (FA) model via online stochastic gradient ascent.\n\n Args:\n observations: Sampled observations. Of shape (n_samples, observation_dim).\n latent_dim: The size of the latent variable space of the FA model.\n optimiser_name: The name of the PyTorch optimiser used in the learning algorithm. Options are 'sgd' and 'adam'.\n optimiser_kwargs: Keyword arguments for the PyTorch optimiser used in the learning algorithm.\n n_warm_up_time_steps: The number of time steps on which to update the running mean of the FA model before\n updating the other parameters.\n random_seed: The random seed used in the algorithm.\n fa: If a FA model is provided, the observations will be used to fit this model. Else a completely new model will\n be initialised.\n\n Returns:\n fa: The trained FA model.\n mean: The learned mean of the FA model.\n covar: The learned covariance matrix of the FA model.\n \"\"\"\n print('Learning FA model via online stochastic gradient ascent...')\n if fa is None:\n observation_dim = observations.shape[1]\n optimiser = OPTIMISER_FACTORY[optimiser_name]\n fa = OnlineGradientFactorAnalysis(\n observation_dim=observation_dim,\n latent_dim=latent_dim,\n optimiser=optimiser,\n optimiser_kwargs=optimiser_kwargs,\n n_warm_up_time_steps=n_warm_up_time_steps,\n random_seed=random_seed,\n )\n return learn_fa_online(fa, observations)\n\n\ndef learn_fa_with_online_em(observations: Tensor, latent_dim: int, n_warm_up_time_steps: int, random_seed: int,\n fa: Optional[OnlineEMFactorAnalysis] = None) -> (Tensor, Tensor):\n \"\"\"\n Learn the parameters of a factor analysis (FA) model via online expectation maximisation (EM).\n\n Args:\n observations: Sampled observations. Of shape (n_samples, observation_dim).\n latent_dim: The size of the latent variable space of the FA model.\n n_warm_up_time_steps: The number of time steps on which to update the running mean of the FA model before\n updating the other parameters.\n random_seed: The random seed used in the algorithm.\n fa: If a FA model is provided, the observations will be used to fit this model. Else a completely new model will\n be initialised.\n\n Returns:\n fa: The trained FA model.\n mean: The learned mean of the FA model.\n covar: The learned covariance matrix of the FA model.\n \"\"\"\n print('Learning FA model via online expectation maximisation...')\n if fa is None:\n observation_dim = observations.shape[1]\n fa = OnlineEMFactorAnalysis(\n observation_dim=observation_dim,\n latent_dim=latent_dim,\n n_warm_up_time_steps=n_warm_up_time_steps,\n random_seed=random_seed,\n )\n return learn_fa_online(fa, observations)\n\n\ndef learn_fa_online(fa: OnlineFactorAnalysis, observations: Tensor) -> (OnlineFactorAnalysis, Tensor, Tensor):\n \"\"\"\n Learn the parameters of a factor analysis (FA) model online.\n\n The model is updated iteratively using the given observations, one by one. A single pass is made over the data.\n\n Args:\n fa: An initialised online FA learning algorithm.\n observations: Sampled observations. Of shape (n_samples, observation_dim).\n\n Returns:\n fa: The trained FA model.\n mean: The learned mean of the FA model.\n covar: The learned covariance matrix of the FA model.\n \"\"\"\n for theta in observations:\n fa.update(theta)\n mean = fa.get_mean()\n covar = fa.get_covariance()\n return fa, mean, covar\n\n\n@click.command()\n@click.option('--results-output-path', type=str, help='The parquet file path to save the experiment results')\ndef main(results_output_path: str):\n \"\"\"\n Run the factor analysis experiments specified in the configuration and save the results.\n\n Args:\n results_output_path: The parquet file path to save the experiment results:\n \"\"\"\n with open(\"params.yaml\", 'r') as fd:\n params = yaml.safe_load(fd)['online_fa']\n\n results = run_all_fa_experiments(\n experiments_config=params['experiments'],\n n_trials=params['n_trials'],\n gradient_optimiser=params['gradient_optimiser'],\n gradient_optimiser_kwargs=params['gradient_optimiser_kwargs'],\n gradient_warm_up_time_steps=params['gradient_warm_up_time_steps'],\n em_warm_up_time_steps=params['em_warm_up_time_steps'],\n )\n\n print('Results:\\n')\n print(results)\n\n Path(os.path.dirname(results_output_path)).mkdir(parents=True, exist_ok=True)\n results.to_parquet(results_output_path)\n\n\nif __name__ == '__main__':\n main()\n",
"id": "9771290",
"language": "Python",
"matching_score": 4.594679832458496,
"max_stars_count": 0,
"path": "experiments/online_fa.py"
},
{
"content": "import numpy as np\nimport torch\nfrom torch import Tensor\n\n\ndef compute_fa_covariance(F: Tensor, psi: Tensor) -> Tensor:\n \"\"\"\n Compute the covariance matrix of a factor analysis (FA) model, given the factor loading matrix and the noise\n covariance matrix.\n\n The covariance is F*F^T + psi.\n\n Args:\n F: The factor loading matrix. Of shape (observation_dim, latent_dim).\n psi: The Gaussian noise covariance matrix. Of shape (observation_dim, observation_dim).\n\n Returns:\n The covariance matrix of the FA model. Of shape (observation_dim, observation_dim).\n \"\"\"\n return F.mm(F.t()) + psi\n\n\ndef compute_distance_between_matrices(A: Tensor, B: Tensor) -> float:\n \"\"\"\n Compute the Frobenius norm of the difference between two matrices of the same size.\n\n Args:\n A: Matrix of shape (n, m).\n B: Matrix of shape (n, m).\n\n Returns:\n The Frobenius norm of the difference between the two matrices.\n \"\"\"\n return torch.linalg.norm(A - B).item()\n\n\ndef compute_gaussian_log_likelihood(mean: Tensor, covar: Tensor, X: Tensor) -> float:\n \"\"\"\n Compute the log-likelihood of a Gaussian distribution with the given mean and covariance matrix, given the\n observations.\n\n Args:\n mean: The mean of the Gaussian distribution. Of shape (observation_dim,).\n covar: The covariance of the Gaussian distribution. Of shape (observation_dim, observation_dim).\n X: The observations. Of shape (n_observations, observation_dim).\n\n Returns:\n The log-likelihood, averaged over the given observations.\n \"\"\"\n n, d = X.shape\n inv_covar = torch.linalg.inv(covar)\n centred_X = X - mean.reshape(1, -1)\n unnormalised_log_likelihood = -0.5 * torch.sum(centred_X.mm(inv_covar) * centred_X, dim=1).mean()\n log_normalising_factor = -0.5 * (torch.logdet(covar) + d * np.log(2 * np.pi))\n return (unnormalised_log_likelihood + log_normalising_factor).item()\n\n\ndef compute_gaussian_wasserstein_distance(mean1: Tensor, covar1: Tensor, mean2: Tensor, covar2: Tensor) -> float:\n \"\"\"\n Compute the 2-Wasserstein distance between two non-degenerate Gaussian distributions with respect to the Frobenius\n norm [1].\n\n Args:\n mean1: The mean of the first distribution. Of shape (observation_dim,).\n covar1: The covariance matrix of the first distribution. Of shape (observation_dim, observation_dim).\n mean2: The mean of the second distribution. Of shape (observation_dim,).\n covar2: The covariance matrix of the second distribution. Of shape (observation_dim, observation_dim).\n\n Returns:\n The 2-Wasserstein distance between the two distributions.\n\n References:\n [1] https://en.wikipedia.org/wiki/Wasserstein_metric#Normal_distributions\n \"\"\"\n contribution_from_mean = compute_distance_between_matrices(mean1, mean2) ** 2\n sqrt_covar2 = matrix_sqrt(covar2)\n contribution_from_covar = torch.trace(\n covar1 + covar2 - 2 * matrix_sqrt(sqrt_covar2.mm(covar1).mm(sqrt_covar2))\n )\n return (contribution_from_mean + contribution_from_covar).sqrt().item()\n\n\ndef matrix_sqrt(A: Tensor) -> Tensor:\n \"\"\"\n Compute the square root of a symmetric positive semi-definite matrix.\n\n Args:\n A: Symmetric positive semi-definite matrix. Of shape (n, n).\n\n Returns\n Matrix B such that B.mm(B) = A. Of shape (n, n).\n \"\"\"\n conditioning = 1e3 * 1.1920929e-07 # for float\n eigenvalues, eigenvectors = torch.linalg.eigh(A.float())\n above_cutoff = torch.abs(eigenvalues) > conditioning * torch.max(torch.abs(eigenvalues))\n psigma_diag = torch.sqrt(eigenvalues[above_cutoff])\n eigenvectors = eigenvectors[:, above_cutoff]\n return eigenvectors.mm(torch.diag(psigma_diag)).mm(eigenvectors.t()).type(A.dtype)\n\n\ndef pinball_loss(y: Tensor, y_hat: Tensor, alpha: float) -> float:\n \"\"\"\n Compute the average pinball loss of the given targets and predictions.\n\n For more details see http://josephsalmon.eu/enseignement/UW/STAT593/QuantileRegression.pdf.\n\n Args:\n y: The true targets. Of shape (n,).\n y_hat: The predicted targets. Of shape (n,).\n alpha: The quantile for which to compute the loss.\n\n Returns:\n The average pinball loss.\n \"\"\"\n return torch.maximum(alpha * (y - y_hat), (1 - alpha) * (y_hat - y)).mean().item()\n",
"id": "8852156",
"language": "Python",
"matching_score": 1.0211957693099976,
"max_stars_count": 0,
"path": "experiments/utils/metrics.py"
},
{
"content": "from typing import Dict, List, Optional, Tuple\nfrom copy import deepcopy\n\nimport torch\nimport torch.nn as nn\nfrom torch import Tensor\nfrom torch.optim import Optimizer, Adam\nfrom pytorch_lightning import LightningModule\n\n\nclass FeedForwardNet(LightningModule):\n \"\"\"\n A feed forward neural network with a single output.\n\n Implements functionality which allows it to be used with a PyTorch Lightning Trainer.\n\n Args:\n input_dim: The input dimension of the neural network.\n hidden_dims: The dimension of each hidden layer in the neural network. hidden_dims[i] is the dimension of the\n i-th hidden layer. If None, the input will be connected directly to the output.\n hidden_activation_fn: The activation function to apply to the output of each hidden layer. If None, will be set\n to the identity activation function.\n output_activation_fn: The activation function to apply to the final output when predicting. When training,\n validating or testing, no activation will be applied to the output. Hence, the loss function should take the\n un-activated outputs as input. If None, will be set to the identity activation function.\n bias: Whether or not to include a bias term in the linear layers.\n optimiser_class: The class of the PyTorch optimiser to use for training the neural network.\n optimiser_kwargs: Keyword arguments for the optimiser class.\n loss_fn: The PyTorch loss function to use for training the model. Will be applied to the un-activated outputs\n of the neural network.\n loss_multiplier: A constant with which to multiply the loss of each batch. Useful if an estimate of the total\n loss over the full dataset is needed.\n random_seed: The random seed for initialising the weights of the neural network. If None, won't be reproducible.\n\n Attributes:\n hidden_layers: (torch.nn.ModuleList) A list of torch.nn.Linear, corresponding to dimensions specified in\n hidden_dims.\n output_layer: (torch.nn.Linear) A linear layer with a single output.\n \"\"\"\n\n def __init__(self, input_dim: int, hidden_dims: Optional[List[int]] = None,\n hidden_activation_fn: Optional[nn.Module] = None, output_activation_fn: Optional[nn.Module] = None,\n bias: bool = True, optimiser_class: Optimizer = Adam, optimiser_kwargs: Optional[dict] = None,\n loss_fn: nn.Module = nn.MSELoss(), loss_multiplier: float = 1.0, random_seed: Optional[int] = None):\n super().__init__()\n if random_seed is not None:\n torch.manual_seed(random_seed)\n\n self.input_dim = input_dim\n self.hidden_dims = hidden_dims or []\n self.hidden_activation_fn = hidden_activation_fn or self._identity_fn\n self.output_activation_fn = output_activation_fn or self._identity_fn\n self.optimiser_class = optimiser_class\n self.optimiser_kwargs = optimiser_kwargs or dict(lr=1e-3)\n self.loss_fn = loss_fn\n self.loss_multiplier = loss_multiplier\n\n self.hidden_layers = nn.ModuleList()\n d_in = deepcopy(input_dim)\n for d_out in self.hidden_dims:\n self.hidden_layers.append(nn.Linear(d_in, d_out, bias=bias))\n d_in = d_out\n\n self.output_layer = nn.Linear(d_in, 1, bias=bias)\n\n @staticmethod\n def _identity_fn(X: Tensor) -> Tensor:\n \"\"\"\n An function which returns the input unchanged.\n\n Args:\n X: A Tensor of any shape.\n\n Returns:\n Exactly the same as the unput.\n \"\"\"\n return X\n\n def forward(self, X: Tensor, activate_output: bool = False) -> Tensor:\n \"\"\"\n Run the forward pass of the neural network.\n\n Args:\n X: Input features. Of shape (n_samples, n_features).\n activate_output: Whether or not to apply the activation function to the outputs.\n\n Returns:\n Neural network outputs. Of shape (n_samples,).\n \"\"\"\n for layer in self.hidden_layers:\n X = self.hidden_activation_fn(layer(X))\n\n y_hat = self.output_layer(X).squeeze(dim=1)\n if activate_output:\n return self.output_activation_fn(y_hat)\n return y_hat\n\n def configure_optimizers(self) -> Optimizer:\n \"\"\"\n Initialise the optimiser which will be used to train the neural network.\n\n Returns:\n The initialised optimiser\n \"\"\"\n return self.optimiser_class(self.parameters(), **self.optimiser_kwargs)\n\n def training_step(self, batch: Tuple[Tensor, Tensor], batch_idx: int) -> Tensor:\n \"\"\"\n Compute the training loss for a single batch of data.\n\n Args:\n batch: (X, y), where X is the input features of shape (batch_size, n_features) and y is the outputs of shape\n (batch_size,).\n batch_idx: The index of the batch relative to the current epoch.\n\n Returns:\n The batch training loss. Of shape (1,).\n \"\"\"\n return self._step(batch, batch_idx)\n\n def validation_step(self, batch: Tuple[Tensor, Tensor], batch_idx: int) -> Tensor:\n \"\"\"\n Compute the validation loss for a single batch of data.\n\n Args:\n batch: (X, y), where X is the input features of shape (batch_size, n_features) and y is the outputs of shape\n (batch_size,).\n batch_idx: The index of the batch relative to the current epoch.\n\n Returns:\n The batch validation loss. Of shape (1,).\n \"\"\"\n return self._step(batch, batch_idx)\n\n def test_step(self, batch: Tuple[Tensor, Tensor], batch_idx: int) -> Tensor:\n \"\"\"\n Compute the test loss for a single batch of data.\n\n Args:\n batch: (X, y), where X is the input features of shape (batch_size, n_features) and y is the outputs of shape\n (batch_size,).\n batch_idx: The index of the batch relative to the current epoch.\n\n Returns:\n The batch test loss. Of shape (1,).\n \"\"\"\n return self._step(batch, batch_idx)\n\n def _step(self, batch: Tuple[Tensor, Tensor], batch_idx: int) -> Tensor:\n \"\"\"\n Compute the loss for a single batch of data.\n\n Args:\n batch: (X, y), where X is the input features of shape (batch_size, n_features) and y is the outputs of shape\n (batch_size,).\n batch_idx: The index of the batch relative to the current epoch.\n\n Returns:\n The batch loss. Of shape (1,).\n \"\"\"\n X, y = batch\n y_hat = self(X)\n return self.loss_fn(y_hat, y) * self.loss_multiplier\n\n def predict_step(self, batch: Tensor, batch_idx: int, dataloader_idx: Optional[int] = None) -> Tensor:\n \"\"\"\n Predict the outputs for a single batch of data.\n\n Args:\n batch: (X, y), where X is the input features of shape (batch_size, n_features) and y is the outputs of shape\n (batch_size,).\n batch_idx: The index of the batch relative to the current epoch.\n dataloader_idx: The index of the dataloader (may be more than one) from which the batch was sampled.\n\n Returns:\n The activated outputs. Of shape (batch_size,).\n \"\"\"\n return self(batch[0], activate_output=True)\n\n def validation_epoch_end(self, step_losses: List[Tensor]) -> Dict[str, Tensor]:\n \"\"\"\n Compute the average validation loss over all batches.\n\n Log the loss under the name 'epoch_val_loss'.\n\n Args:\n step_losses: The validation loss for each individual batch. Each one of shape (1,).\n\n Returns:\n A dict of the form {'epoch_val_loss': loss}, where loss is the average validation loss, of shape (1,).\n \"\"\"\n loss = self._average_loss(step_losses)\n metrics = dict(epoch_val_loss=loss)\n self.log_dict(metrics)\n return metrics\n\n def test_epoch_end(self, step_losses: List[Tensor]) -> Dict[str, Tensor]:\n \"\"\"\n Compute the average test loss over all batches.\n\n Log the loss under the name 'epoch_test_loss'.\n\n Args:\n step_losses: The test loss for each individual batch. Each one of shape (1,).\n\n Returns:\n A dict of the form {'epoch_test_loss': loss}, where loss is the average test loss, of shape (1,).\n \"\"\"\n loss = self._average_loss(step_losses)\n metrics = dict(epoch_test_loss=loss)\n self.log_dict(metrics)\n return metrics\n\n @staticmethod\n def _average_loss(step_losses: List[Tensor]) -> Tensor:\n \"\"\"\n Compute the average of all losses.\n\n Args:\n step_losses: Individual losses. Each one of shape (1,).\n\n Returns:\n The average loss. Of shape (1,).\n \"\"\"\n return torch.stack(step_losses).mean()\n\n\nclass FeedForwardGaussianNet(FeedForwardNet):\n \"\"\"\n A feed forward neural network which predicts the parameters of a 1D Gaussian distribution for each input.\n\n Implements functionality which allows it to be used with a PyTorch Lightning Trainer.\n\n Args:\n input_dim: The input dimension of the neural network.\n hidden_dims: The dimension of each hidden layer in the neural network. hidden_dims[i] is the dimension of the\n i-th hidden layer. If None, the input will be connected directly to the output.\n hidden_activation_fn: The activation function to apply to the output of each hidden layer. If None, will be set\n to the identity activation function.\n bias: Whether or not to include a bias term in the linear layers.\n optimiser_class: The class of the PyTorch optimiser to use for training the neural network.\n optimiser_kwargs: Keyword arguments for the optimiser class.\n loss_multiplier: A constant with which to multiply the average loss of each batch. Useful if an estimate of the\n total loss over the full dataset is needed.\n target_variance: A constant variance to use for each Gaussian. If None, an extra output layer will be added to\n the network to predict the variance as well as the mean.\n variance_epsilon: Value used to clamp the variance for numerical stability.\n random_seed: The random seed for initialising the weights of the neural network. If None, won't be reproducible.\n\n Attributes:\n hidden_layers: (torch.nn.ModuleList) A list of torch.nn.Linear, corresponding to dimensions specified in\n hidden_dims.\n output_layer: (torch.nn.Linear) A linear layer with a single output which predicts the mean of the Gaussian.\n log_variance_layer: (Optional[torch.nn.Linear]) A linear layer with a single output which predicts the log of\n the variance of the Gaussian. If target_variance is not None, will also be None.\n \"\"\"\n\n def __init__(self, input_dim: int, hidden_dims: Optional[List[int]] = None,\n hidden_activation_fn: Optional[nn.Module] = None, bias: bool = True, optimiser_class: Optimizer = Adam,\n optimiser_kwargs: Optional[dict] = None, loss_multiplier: float = 1.0, target_variance: float = None,\n variance_epsilon: float = 1e-6, random_seed: Optional[int] = None):\n\n super().__init__(\n input_dim=input_dim,\n hidden_dims=hidden_dims,\n hidden_activation_fn=hidden_activation_fn,\n bias=bias,\n optimiser_class=optimiser_class,\n optimiser_kwargs=optimiser_kwargs,\n loss_fn=nn.GaussianNLLLoss(reduction='mean', eps=variance_epsilon),\n loss_multiplier=loss_multiplier,\n random_seed=random_seed,\n )\n\n self.log_variance_layer = None\n if target_variance is None:\n d_in = self.output_layer.in_features\n self.log_variance_layer = nn.Linear(d_in, 1, bias=bias)\n\n self.target_variance = target_variance\n\n def forward(self, X: Tensor) -> (Tensor, Tensor):\n \"\"\"\n Run the forward pass of the neural network.\n\n Args:\n X: Input features. Of shape (n_samples, n_features).\n\n Returns:\n mu: Predicted mean of each input. Of shape (n_samples,).\n var: Predicted variance of each input. Of shape (n_samples,).\n \"\"\"\n for layer in self.hidden_layers:\n X = self.hidden_activation_fn(layer(X))\n\n mu = self.output_layer(X).squeeze(dim=1)\n\n if self.log_variance_layer is None:\n var = torch.ones_like(mu) * self.target_variance\n else:\n var = torch.exp(self.log_variance_layer(X).squeeze(dim=1))\n\n return mu, var\n\n def _step(self, batch: Tuple[Tensor, Tensor], batch_idx: int) -> Tensor:\n \"\"\"\n Compute the Gaussian negative log likelihood loss for a single batch of data.\n\n The average batch loss is multiplied by self._loss_multiplier.\n\n Args:\n batch: (X, y), where X is the input features of shape (batch_size, n_features) and y is the outputs of shape\n (batch_size,).\n batch_idx: The index of the batch relative to the current epoch.\n\n Returns:\n The batch loss. Of shape (1,).\n \"\"\"\n X, y = batch\n mu, var = self(X)\n return self.loss_fn(mu, y, var) * self.loss_multiplier\n",
"id": "11349687",
"language": "Python",
"matching_score": 4.842737674713135,
"max_stars_count": 0,
"path": "swafa/models.py"
},
{
"content": "import pytest\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import TensorDataset, DataLoader\nfrom pytorch_lightning import Trainer, seed_everything\n\nfrom swafa.models import FeedForwardNet, FeedForwardGaussianNet\n\n\nclass TestFeedForwardNet:\n\n @pytest.mark.parametrize(\"input_dim\", [3, 5])\n @pytest.mark.parametrize(\"hidden_dims\", [None, [4], [8, 4]])\n def test_init_layers(self, input_dim, hidden_dims):\n net = FeedForwardNet(input_dim, hidden_dims, random_seed=42)\n hidden_dims = hidden_dims or []\n\n assert len(net.hidden_layers) == len(hidden_dims)\n\n d_in = input_dim\n for i, layer in enumerate(net.hidden_layers):\n d_out = hidden_dims[i]\n assert layer.in_features == d_in\n assert layer.out_features == d_out\n d_in = d_out\n\n assert net.output_layer.in_features == d_in\n assert net.output_layer.out_features == 1\n\n @pytest.mark.parametrize(\"input_dim\", [3, 5])\n @pytest.mark.parametrize(\"hidden_dims\", [None, [4], [8, 4]])\n @pytest.mark.parametrize(\"hidden_activation_fn\", [None, nn.ReLU()])\n @pytest.mark.parametrize(\"n_samples\", [1, 4])\n @pytest.mark.parametrize(\"activate_output\", [True, False])\n def test_forward(self, input_dim, hidden_dims, hidden_activation_fn, n_samples, activate_output):\n\n def zero_activation_fn(x):\n return x * 0\n\n net = FeedForwardNet(input_dim, hidden_dims, hidden_activation_fn=hidden_activation_fn,\n output_activation_fn=zero_activation_fn)\n\n X = torch.rand(n_samples, input_dim)\n y = net(X, activate_output=activate_output)\n\n assert y.shape == (n_samples,)\n assert (y == 0).all() == activate_output\n\n @pytest.mark.parametrize(\"input_dim\", [5])\n @pytest.mark.parametrize(\"hidden_dims\", [None, [4]])\n @pytest.mark.parametrize(\"hidden_activation_fn\", [None, nn.ReLU()])\n @pytest.mark.parametrize(\"loss_fn\", [nn.MSELoss(), nn.BCEWithLogitsLoss()])\n @pytest.mark.parametrize(\"n_samples\", [32, 33])\n def test_fit_with_validation(self, input_dim, hidden_dims, hidden_activation_fn, loss_fn, n_samples):\n seed_everything(42, workers=True)\n net = FeedForwardNet(input_dim, hidden_dims, hidden_activation_fn=hidden_activation_fn, loss_fn=loss_fn)\n original_weights = [torch.clone(w) for w in net.parameters()]\n\n trainer = Trainer(deterministic=True, max_epochs=5, check_val_every_n_epoch=1)\n\n train_dataset = TensorDataset(torch.randn(n_samples, input_dim), torch.empty(n_samples).random_(2))\n train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=4, drop_last=True)\n\n val_dataset = TensorDataset(torch.randn(n_samples, input_dim), torch.empty(n_samples).random_(2))\n val_dataloader = DataLoader(val_dataset, shuffle=False, batch_size=4, drop_last=False)\n\n trainer.fit(net, train_dataloader=train_dataloader, val_dataloaders=val_dataloader)\n\n for w_old, w_new in zip(original_weights, net.parameters()):\n assert not torch.isnan(w_new).any()\n assert not torch.isclose(w_old, w_new).all()\n\n @pytest.mark.parametrize(\"input_dim\", [5])\n @pytest.mark.parametrize(\"hidden_dims\", [None, [4]])\n @pytest.mark.parametrize(\"n_samples\", [32, 33])\n def test_validate(self, input_dim, hidden_dims, n_samples):\n net = FeedForwardNet(input_dim, hidden_dims)\n trainer = Trainer()\n val_dataset = TensorDataset(torch.randn(n_samples, input_dim), torch.empty(n_samples).random_(2))\n val_dataloader = DataLoader(val_dataset, shuffle=False, batch_size=4, drop_last=False)\n\n result = trainer.validate(net, val_dataloaders=val_dataloader)\n\n assert len(result) == 1\n assert list(result[0].keys()) == ['epoch_val_loss']\n\n @pytest.mark.parametrize(\"input_dim\", [5])\n @pytest.mark.parametrize(\"hidden_dims\", [None, [4]])\n @pytest.mark.parametrize(\"n_samples\", [32, 33])\n def test_test(self, input_dim, hidden_dims, n_samples):\n net = FeedForwardNet(input_dim, hidden_dims)\n trainer = Trainer()\n test_dataset = TensorDataset(torch.randn(n_samples, input_dim), torch.empty(n_samples).random_(2))\n test_dataloader = DataLoader(test_dataset, shuffle=False, batch_size=4, drop_last=False)\n\n result = trainer.test(net, test_dataloaders=test_dataloader)\n\n assert len(result) == 1\n assert list(result[0].keys()) == ['epoch_test_loss']\n\n @pytest.mark.parametrize(\"input_dim\", [5])\n @pytest.mark.parametrize(\"hidden_dims\", [None, [4]])\n @pytest.mark.parametrize(\"n_samples\", [32, 33])\n def test_predict(self, input_dim, hidden_dims, n_samples):\n net = FeedForwardNet(input_dim, hidden_dims)\n trainer = Trainer()\n dataset = TensorDataset(torch.randn(n_samples, input_dim))\n dataloader = DataLoader(dataset, shuffle=False, batch_size=4, drop_last=False)\n\n result = trainer.predict(net, dataloaders=dataloader)\n\n assert len(torch.cat(result)) == n_samples\n\n @pytest.mark.parametrize(\"input_dim\", [5])\n @pytest.mark.parametrize(\"hidden_dims\", [None, [4]])\n @pytest.mark.parametrize(\"n_samples\", [32, 33])\n def test_predict_with_sigmoid_activation(self, input_dim, hidden_dims, n_samples):\n net = FeedForwardNet(input_dim, hidden_dims, output_activation_fn=torch.sigmoid)\n trainer = Trainer()\n dataset = TensorDataset(torch.randn(n_samples, input_dim))\n dataloader = DataLoader(dataset, shuffle=False, batch_size=4, drop_last=False)\n\n result = trainer.predict(net, dataloaders=dataloader)\n\n for batch_predictions in result:\n assert ((batch_predictions >= 0) & (batch_predictions <= 1)).all()\n\n\nclass TestFeedForwardGaussianNet:\n\n @pytest.mark.parametrize(\"input_dim\", [3, 5])\n @pytest.mark.parametrize(\"hidden_dims\", [None, [4], [8, 4]])\n @pytest.mark.parametrize(\"hidden_activation_fn\", [None, nn.ReLU()])\n @pytest.mark.parametrize(\"n_samples\", [1, 4])\n @pytest.mark.parametrize(\"target_variance\", [None, 2])\n def test_forward(self, input_dim, hidden_dims, hidden_activation_fn, n_samples, target_variance):\n net = FeedForwardGaussianNet(\n input_dim, hidden_dims, hidden_activation_fn=hidden_activation_fn, target_variance=target_variance,\n )\n\n X = torch.rand(n_samples, input_dim)\n mu, var = net(X)\n\n assert mu.shape == (n_samples,)\n assert var.shape == (n_samples,)\n assert (var > 0).all()\n\n if target_variance is not None:\n assert (var == target_variance).all()\n\n @pytest.mark.parametrize(\"input_dim\", [3, 5])\n @pytest.mark.parametrize(\"hidden_dims\", [None, [4], [8, 4]])\n @pytest.mark.parametrize(\"n_samples\", [1, 4])\n @pytest.mark.parametrize(\"loss_multiplier\", [1, 5])\n def test_step(self, input_dim, hidden_dims, n_samples, loss_multiplier):\n net = FeedForwardGaussianNet(input_dim, hidden_dims, loss_multiplier=loss_multiplier)\n\n X = torch.rand(n_samples, input_dim)\n y = torch.randn(n_samples)\n batch = (X, y)\n actual_loss = net._step(batch, batch_idx=1)\n\n mu, var = net(X)\n expected_loss = net.loss_fn(mu, y, var) * loss_multiplier\n\n assert torch.isclose(actual_loss, expected_loss)\n\n @pytest.mark.parametrize(\"input_dim\", [5])\n @pytest.mark.parametrize(\"hidden_dims\", [None, [4]])\n @pytest.mark.parametrize(\"hidden_activation_fn\", [None, nn.ReLU()])\n @pytest.mark.parametrize(\"loss_multiplier\", [1.0, 2.0])\n @pytest.mark.parametrize(\"target_variance\", [None, 1.0])\n @pytest.mark.parametrize(\"variance_epsilon\", [1e-1, 1e-6])\n @pytest.mark.parametrize(\"n_samples\", [32, 33])\n def test_fit(self, input_dim, hidden_dims, hidden_activation_fn, loss_multiplier, target_variance, variance_epsilon,\n n_samples):\n seed_everything(42, workers=True)\n net = FeedForwardGaussianNet(\n input_dim, hidden_dims, hidden_activation_fn=hidden_activation_fn, loss_multiplier=loss_multiplier,\n target_variance=target_variance, variance_epsilon=variance_epsilon,\n )\n original_weights = [torch.clone(w) for w in net.parameters()]\n\n trainer = Trainer(deterministic=True, max_epochs=5)\n\n train_dataset = TensorDataset(torch.randn(n_samples, input_dim), torch.empty(n_samples).random_(2))\n train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=4, drop_last=True)\n\n trainer.fit(net, train_dataloader=train_dataloader)\n\n for w_old, w_new in zip(original_weights, net.parameters()):\n assert not torch.isnan(w_new).any()\n assert not torch.isclose(w_old, w_new).all()\n",
"id": "3721113",
"language": "Python",
"matching_score": 1.5783103704452515,
"max_stars_count": 0,
"path": "tests/test_models.py"
},
{
"content": "from typing import List\n\nimport numpy as np\nimport pytest\nimport torch\nfrom torch import Tensor\nfrom torch.utils.data import TensorDataset, DataLoader\nfrom pytorch_lightning import Trainer, LightningModule\nfrom pytorch_lightning.callbacks import Callback\n\nfrom swafa.models import FeedForwardNet\nfrom swafa.fa import OnlineGradientFactorAnalysis\nfrom swafa.posterior import ModelPosterior\nfrom swafa.utils import get_weight_dimension\nfrom experiments.utils.callbacks import (\n OnlinePosteriorEvaluationCallback,\n BatchFactorAnalysisPosteriorEvaluationCallback,\n)\nfrom experiments.utils.metrics import (\n compute_distance_between_matrices,\n compute_gaussian_wasserstein_distance,\n)\n\n\nclass TestBasePosteriorEvaluationCallback:\n\n @pytest.mark.parametrize(\n \"n_epochs, collect_epoch_start, eval_epoch_start, eval_epoch_frequency, expected_eval_epochs\",\n [\n (1, 1, 1, 1, [0]),\n (2, 1, 1, 1, [0, 1]),\n (10, 1, 1, 2, [0, 2, 4, 6, 8]),\n (10, 1, 5, 2, [4, 6, 8]),\n (10, 1, 0.5, 2, [4, 6, 8]),\n ]\n )\n def test_eval_epochs(self, n_epochs, collect_epoch_start, eval_epoch_start, eval_epoch_frequency,\n expected_eval_epochs):\n n_samples = 20\n input_dim = 4\n hidden_dims = [8, 8]\n\n callback, true_mean, true_covar, model_posterior = _init_model_with_online_posterior_evaluation_callback(\n input_dim, hidden_dims, collect_epoch_start, eval_epoch_start, eval_epoch_frequency,\n )\n _fit_model_with_callback(model_posterior.model, callback, n_samples, input_dim, n_epochs)\n\n assert callback.eval_epochs == expected_eval_epochs\n\n @pytest.mark.parametrize(\n \"n_samples, batch_size, n_epochs, collect_epoch_start, eval_epoch_frequency, expected_n_weight_iterates\",\n [\n (32, 4, 5, 1, 1, int(32 / 4) * 5),\n (32, 4, 5, 3, 2, int(32 / 4) * (5 - 2)),\n (32, 4, 8, 0.5, 1, int(32 / 4) * (8 - 3)),\n (32, 4, 9, 0.5, 2, int(32 / 4) * (9 - 3)),\n ]\n )\n def test_n_weight_iterates(self, n_samples, batch_size, n_epochs, collect_epoch_start, eval_epoch_frequency,\n expected_n_weight_iterates):\n input_dim = 4\n hidden_dims = [8, 8]\n eval_epoch_start = collect_epoch_start\n\n callback, true_mean, true_covar, model_posterior = _init_model_with_online_posterior_evaluation_callback(\n input_dim, hidden_dims, collect_epoch_start, eval_epoch_start, eval_epoch_frequency,\n )\n _fit_model_with_callback(model_posterior.model, callback, n_samples, input_dim, n_epochs)\n\n assert len(callback.weight_iterates) == expected_n_weight_iterates\n\n @pytest.mark.parametrize(\n \"input_dim, hidden_dims, n_epochs, collect_epoch_start, eval_epoch_frequency\",\n [\n (2, [2], 2, 1, 1),\n (4, [4, 4], 2, 1, 1),\n (2, [2], 3, 1, 1),\n (4, [4, 4], 3, 2, 1),\n ]\n )\n def test_get_empirical_mean_and_covariance(self, input_dim, hidden_dims, n_epochs, collect_epoch_start,\n eval_epoch_frequency):\n n_samples = 20\n eval_epoch_start = collect_epoch_start\n\n callback, true_mean, true_covar, model_posterior = _init_model_with_online_posterior_evaluation_callback(\n input_dim, hidden_dims, collect_epoch_start, eval_epoch_start, eval_epoch_frequency,\n )\n _fit_model_with_callback(model_posterior.model, callback, n_samples, input_dim, n_epochs)\n\n empirical_mean, empirical_covar = callback.get_empirical_mean_and_covariance()\n\n assert isinstance(empirical_mean, Tensor)\n assert isinstance(empirical_covar, Tensor)\n assert empirical_mean.shape == true_mean.shape\n assert empirical_covar.shape == true_covar.shape\n assert (torch.diag(empirical_covar) > 0).all()\n\n @pytest.mark.parametrize(\n \"n_epochs, eval_epoch_frequency\",\n [\n (1, 1),\n (2, 1),\n (10, 2),\n (5, 2),\n ]\n )\n def test_posterior_distances_from_mean(self, n_epochs, eval_epoch_frequency):\n n_samples = 20\n input_dim = 4\n hidden_dims = [8, 8]\n\n callback, true_mean, true_covar, model_posterior = _init_model_with_online_posterior_evaluation_callback(\n input_dim, hidden_dims, eval_epoch_frequency=eval_epoch_frequency,\n )\n _fit_model_with_callback(model_posterior.model, callback, n_samples, input_dim, n_epochs)\n\n actual_mean, actual_covar = callback.get_mean_and_covariance()\n assert callback.posterior_distances_from_mean == [\n compute_distance_between_matrices(true_mean, actual_mean) for _ in callback.eval_epochs\n ]\n\n @pytest.mark.parametrize(\n \"n_epochs, eval_epoch_frequency\",\n [\n (1, 1),\n (2, 1),\n (10, 2),\n (5, 2),\n ]\n )\n def test_distances_from_covar(self, n_epochs, eval_epoch_frequency):\n n_samples = 20\n input_dim = 4\n hidden_dims = [8, 8]\n\n callback, true_mean, true_covar, model_posterior = _init_model_with_online_posterior_evaluation_callback(\n input_dim, hidden_dims, eval_epoch_frequency=eval_epoch_frequency,\n )\n _fit_model_with_callback(model_posterior.model, callback, n_samples, input_dim, n_epochs)\n\n actual_mean, actual_covar = callback.get_mean_and_covariance()\n assert callback.posterior_distances_from_covar == [\n compute_distance_between_matrices(true_covar, actual_covar) for _ in callback.eval_epochs\n ]\n\n @pytest.mark.parametrize(\n \"n_epochs, eval_epoch_frequency\",\n [\n (1, 1),\n (2, 1),\n (10, 2),\n (5, 2),\n ]\n )\n def test_posterior_wasserstein_distances(self, n_epochs, eval_epoch_frequency):\n n_samples = 20\n input_dim = 4\n hidden_dims = [8, 8]\n\n callback, true_mean, true_covar, model_posterior = _init_model_with_online_posterior_evaluation_callback(\n input_dim, hidden_dims, eval_epoch_frequency=eval_epoch_frequency,\n )\n _fit_model_with_callback(model_posterior.model, callback, n_samples, input_dim, n_epochs)\n\n actual_distances = [x if not np.isnan(x) else -1 for x in callback.posterior_wasserstein_distances]\n\n actual_mean, actual_covar = callback.get_mean_and_covariance()\n expected_distances = [\n compute_gaussian_wasserstein_distance(true_mean, true_covar, actual_mean, actual_covar)\n for _ in callback.eval_epochs\n ]\n expected_distances = [x if not np.isnan(x) else -1 for x in expected_distances]\n\n assert actual_distances == expected_distances\n\n @pytest.mark.parametrize(\n \"n_epochs, eval_epoch_frequency\",\n [\n (1, 1),\n (2, 1),\n (10, 1),\n ]\n )\n def test_empirical_distances_from_mean(self, n_epochs, eval_epoch_frequency):\n n_samples = 20\n input_dim = 4\n hidden_dims = [8, 8]\n\n callback, true_mean, true_covar, model_posterior = _init_model_with_online_posterior_evaluation_callback(\n input_dim, hidden_dims, eval_epoch_frequency=eval_epoch_frequency,\n )\n _fit_model_with_callback(model_posterior.model, callback, n_samples, input_dim, n_epochs)\n\n actual_mean, actual_covar = callback.get_mean_and_covariance()\n empirical_mean, empirical_covar = callback.get_empirical_mean_and_covariance()\n\n assert len(callback.empirical_distances_from_mean) == len(callback.eval_epochs)\n assert callback.empirical_distances_from_mean[-1] == compute_distance_between_matrices(\n empirical_mean, actual_mean,\n )\n\n @pytest.mark.parametrize(\n \"n_epochs, eval_epoch_frequency\",\n [\n (1, 1),\n (2, 1),\n (10, 1),\n ]\n )\n def test_empirical_distances_from_covar(self, n_epochs, eval_epoch_frequency):\n n_samples = 20\n input_dim = 4\n hidden_dims = [8, 8]\n\n callback, true_mean, true_covar, model_posterior = _init_model_with_online_posterior_evaluation_callback(\n input_dim, hidden_dims, eval_epoch_frequency=eval_epoch_frequency,\n )\n _fit_model_with_callback(model_posterior.model, callback, n_samples, input_dim, n_epochs)\n\n actual_mean, actual_covar = callback.get_mean_and_covariance()\n empirical_mean, empirical_covar = callback.get_empirical_mean_and_covariance()\n\n assert len(callback.empirical_distances_from_covar) == len(callback.eval_epochs)\n assert callback.empirical_distances_from_covar[-1] == compute_distance_between_matrices(\n empirical_covar, actual_covar,\n )\n\n @pytest.mark.parametrize(\n \"n_epochs, eval_epoch_frequency\",\n [\n (1, 1),\n (2, 1),\n (10, 1),\n ]\n )\n def test_empirical_wasserstein_distances(self, n_epochs, eval_epoch_frequency):\n n_samples = 20\n input_dim = 4\n hidden_dims = [8, 8]\n\n callback, true_mean, true_covar, model_posterior = _init_model_with_online_posterior_evaluation_callback(\n input_dim, hidden_dims, eval_epoch_frequency=eval_epoch_frequency,\n )\n _fit_model_with_callback(model_posterior.model, callback, n_samples, input_dim, n_epochs)\n\n actual_distances = [x if not np.isnan(x) else -1 for x in callback.empirical_wasserstein_distances]\n\n actual_mean, actual_covar = callback.get_mean_and_covariance()\n empirical_mean, empirical_covar = callback.get_empirical_mean_and_covariance()\n\n expected_final_distance = compute_gaussian_wasserstein_distance(\n empirical_mean, empirical_covar, actual_mean, actual_covar,\n )\n expected_final_distance = expected_final_distance if not np.isnan(expected_final_distance) else -1\n\n assert len(actual_distances) == len(callback.eval_epochs)\n assert actual_distances[-1] == expected_final_distance\n\n\nclass TestOnlinePosteriorEvaluationCallback:\n\n @pytest.mark.parametrize(\"input_dim\", [2, 4])\n @pytest.mark.parametrize(\"hidden_dims\", [[2], [4, 4]])\n def test_get_mean_and_covariance(self, input_dim, hidden_dims):\n eval_epoch_frequency = 1\n\n callback, true_mean, true_covar, model_posterior = _init_model_with_online_posterior_evaluation_callback(\n input_dim, hidden_dims, eval_epoch_frequency=eval_epoch_frequency,\n )\n\n actual_mean, actual_covar = callback.get_mean_and_covariance()\n\n assert isinstance(actual_mean, Tensor)\n assert isinstance(actual_covar, Tensor)\n assert actual_mean.shape == true_mean.shape\n assert actual_covar.shape == actual_covar.shape\n assert (torch.diag(actual_covar) > 0).all()\n\n\nclass TestBatchFactorAnalysisPosteriorEvaluationCallback:\n\n @pytest.mark.parametrize(\n \"input_dim, hidden_dims, n_epochs, collect_epoch_start, eval_epoch_frequency\",\n [\n (2, [2], 2, 1, 1),\n (4, [4, 4], 2, 1, 1),\n (2, [2], 2, 2, 1),\n (4, [4, 4], 2, 2, 1),\n ]\n )\n def test_get_mean_and_covariance(self, input_dim, hidden_dims, n_epochs, collect_epoch_start, eval_epoch_frequency):\n n_samples = 20\n eval_epoch_start = collect_epoch_start\n\n callback, true_mean, true_covar, model = _init_model_with_batch_factor_analysis_evaluation_callback(\n input_dim, hidden_dims, collect_epoch_start, eval_epoch_start, eval_epoch_frequency,\n )\n _fit_model_with_callback(model, callback, n_samples, input_dim, n_epochs)\n\n actual_mean, actual_covar = callback.get_mean_and_covariance()\n\n assert isinstance(actual_mean, Tensor)\n assert isinstance(actual_covar, Tensor)\n assert actual_mean.shape == true_mean.shape\n assert actual_covar.shape == true_covar.shape\n assert (torch.diag(actual_covar) > 0).all()\n\n\ndef _init_model_with_online_posterior_evaluation_callback(\n input_dim: int, hidden_dims: List[int], collect_epoch_start: int = 1, eval_epoch_start: int = 1,\n eval_epoch_frequency: int = 1,\n) -> (OnlinePosteriorEvaluationCallback, Tensor, Tensor, ModelPosterior):\n net = FeedForwardNet(input_dim, hidden_dims)\n\n model_posterior = ModelPosterior(\n model=net,\n weight_posterior_class=OnlineGradientFactorAnalysis,\n weight_posterior_kwargs=dict(latent_dim=2),\n )\n\n weight_dim = model_posterior._get_weight_dimension()\n true_mean = torch.randn(weight_dim)\n true_covar = torch.rand(weight_dim, weight_dim)\n\n callback = OnlinePosteriorEvaluationCallback(\n posterior=model_posterior.weight_posterior,\n true_mean=true_mean,\n true_covar=true_covar,\n collect_epoch_start=collect_epoch_start,\n eval_epoch_start=eval_epoch_start,\n eval_epoch_frequency=eval_epoch_frequency\n )\n\n return callback, true_mean, true_covar, model_posterior\n\n\ndef _init_model_with_batch_factor_analysis_evaluation_callback(\n input_dim: int, hidden_dims: List[int], collect_epoch_start: int = 1, eval_epoch_start: int = 1,\n eval_epoch_frequency: int = 1,\n) -> (OnlinePosteriorEvaluationCallback, Tensor, Tensor, LightningModule):\n net = FeedForwardNet(input_dim, hidden_dims)\n\n weight_dim = get_weight_dimension(net)\n true_mean = torch.randn(weight_dim)\n true_covar = torch.rand(weight_dim, weight_dim)\n\n callback = BatchFactorAnalysisPosteriorEvaluationCallback(\n latent_dim=2,\n true_mean=true_mean,\n true_covar=true_covar,\n collect_epoch_start=collect_epoch_start,\n eval_epoch_start=eval_epoch_start,\n eval_epoch_frequency=eval_epoch_frequency\n )\n\n return callback, true_mean, true_covar, net\n\n\ndef _fit_model_with_callback(model: LightningModule, callback: Callback, n_samples: int, input_dim: int, n_epochs: int):\n trainer = Trainer(max_epochs=n_epochs, callbacks=[callback])\n\n dataset = TensorDataset(torch.randn(n_samples, input_dim), torch.randn(n_samples))\n dataloader = DataLoader(dataset, batch_size=4, drop_last=True)\n\n trainer.fit(model, train_dataloader=dataloader)\n",
"id": "8016728",
"language": "Python",
"matching_score": 4.864663600921631,
"max_stars_count": 0,
"path": "tests/test_experiments/test_utils/test_callbacks.py"
},
{
"content": "from abc import ABC, abstractmethod\nfrom typing import Any, Optional, Union\n\nimport numpy as np\nimport torch\nfrom torch import Tensor\nfrom pytorch_lightning import Trainer, LightningModule\nfrom pytorch_lightning.callbacks import Callback\nfrom pytorch_lightning.utilities.types import STEP_OUTPUT\nfrom sklearn.decomposition import FactorAnalysis\n\nfrom swafa.custom_types import POSTERIOR_TYPE\nfrom swafa.utils import vectorise_weights, get_callback_epoch_range\nfrom experiments.utils.metrics import compute_distance_between_matrices, compute_gaussian_wasserstein_distance\n\n\nclass BasePosteriorEvaluationCallback(Callback, ABC):\n \"\"\"\n This is an abstract callback which, when fully implemented, can be used to measure how close an estimated posterior\n of a model's weights is to the true posterior.\n\n Also measures how close the mean and covariance of the estimated posterior are to the empirical mean and covariance\n of the weight vectors collected after each training batch update.\n\n Requires implementation of a method which returns the mean and covariance of the estimated posterior.\n\n Note: This callback does not update the estimated posterior. This must be done separately.\n\n Args:\n posterior: The estimated posterior distribution of a model's weights.\n true_mean: The mean of the true posterior distribution of the same model's weights. Of shape (weight_dim,).\n true_covar: The covariance matrix of the true posterior distribution of the same model's weights. Of shape\n (weight_dim, weight_dim).\n collect_epoch_start: The training epoch on which to start collecting weight vectors. Integer indexing starts\n from 1. Can also specify a float between 0 and 1, which corresponds to the fraction of total epochs which\n should pass before starting to collect weight vectors.\n eval_epoch_start: The training epoch on which to start evaluating the posterior. Integer indexing starts from 1.\n Can also specify a float between 0 and 1, which corresponds to the fraction of total epochs which should\n pass before starting to evaluate the posterior. Should be at least as large collect_epoch_start.\n eval_epoch_frequency: The number of epochs between each evaluation of the posterior.\n\n Attributes:\n first_collect_epoch: (int) The first epoch on which weight vectors will be collected.\n last_collect_epoch: (int) The last epoch on which weight vectors will be collected.\n first_eval_epoch: (int) The first epoch on which to evaluate the posterior.\n last_eval_epoch: (int) The last epoch on which to evaluate the posterior.\n weight_iterates: (List[np.ndarray]) The weight vectors collected during training.\n eval_epochs: (List[int]) The epochs on which the posterior was evaluated, with zero indexing.\n posterior_distances_from_mean: (List[float]) The distance between the mean of the true posterior and the mean\n of the estimated posterior on each evaluation epoch. Measured by the Frobenius norm.\n posterior_distances_from_covar: (List[float]) The distance between the covariance matrix of the true posterior\n and the covariance matrix of the estimated posterior on each evaluation epoch. Measured by the Frobenius\n norm.\n posterior_wasserstein_distances: (List[float]) The 2-Wasserstein distance between the true posterior and the\n estimated posterior on each evaluation epoch.\n empirical_distances_from_mean: (List[float]) The distance between the empirical mean of the weight vectors and\n the mean of the estimated posterior on each evaluation epoch. Measured by the Frobenius norm.\n empirical_distances_from_covar: (List[float]) The distance between the empirical covariance matrix of the weight\n vectors and the covariance matrix of the estimated posterior on each evaluation epoch. Measured by the\n Frobenius norm.\n empirical_wasserstein_distances: (List[float]) The 2-Wasserstein distance between the empirical mean and\n covariance of the weight vectors and the mean and covariance of the estimated posterior on each evaluation\n epoch.\n \"\"\"\n\n def __init__(self, posterior: Any, true_mean: Tensor, true_covar: Tensor,\n collect_epoch_start: Union[int, float] = 1, eval_epoch_start: Union[int, float] = 1,\n eval_epoch_frequency: int = 1):\n error_msg = f\"collect_epoch_start should be a positive integer or a float between 0 and 1, \" \\\n f\"not {collect_epoch_start}\"\n if isinstance(collect_epoch_start, int) and collect_epoch_start < 1:\n raise ValueError(error_msg)\n if isinstance(collect_epoch_start, float) and not (0 <= collect_epoch_start <= 1):\n raise ValueError(error_msg)\n\n error_msg = f\"eval_epoch_start should be a positive integer or a float between 0 and 1, \" \\\n f\"not {eval_epoch_start}\"\n if isinstance(eval_epoch_start, int) and eval_epoch_start < 1:\n raise ValueError(error_msg)\n if isinstance(eval_epoch_start, float) and not (0 <= eval_epoch_start <= 1):\n raise ValueError(error_msg)\n\n self.posterior = posterior\n self.true_mean = true_mean\n self.true_covar = true_covar\n self.eval_epoch_frequency = eval_epoch_frequency\n self._collect_epoch_start = collect_epoch_start\n self._eval_epoch_start = eval_epoch_start\n\n self.first_collect_epoch = None\n self.last_collect_epoch = None\n self.first_eval_epoch = None\n self.last_eval_epoch = None\n self.weight_iterates = []\n self.eval_epochs = []\n self.posterior_distances_from_mean = []\n self.posterior_distances_from_covar = []\n self.posterior_wasserstein_distances = []\n self.empirical_distances_from_mean = []\n self.empirical_distances_from_covar = []\n self.empirical_wasserstein_distances = []\n\n def on_fit_start(self, trainer: Trainer, pl_module: LightningModule):\n \"\"\"\n Called when fit begins.\n\n Initialise the range of epochs on which weight vectors will be collected and the posterior will be evaluated.\n\n Args:\n trainer: A PyTorch Lightning Trainer which trains the model.\n pl_module: The model being trained.\n \"\"\"\n self.first_collect_epoch, self.last_collect_epoch = get_callback_epoch_range(\n trainer, epoch_start=self._collect_epoch_start,\n )\n\n self.first_eval_epoch, self.last_eval_epoch = get_callback_epoch_range(\n trainer, epoch_start=self._eval_epoch_start,\n )\n\n if self.first_eval_epoch < self.first_collect_epoch:\n raise RuntimeError(f\"first eval epoch must be greater than or equal to first collect epoch, not \"\n f\"{self.first_eval_epoch} and {self.first_collect_epoch}\")\n\n def on_train_batch_end(self, trainer: Trainer, pl_module: LightningModule, outputs: STEP_OUTPUT, batch: Any,\n batch_idx: int, dataloader_idx: int):\n \"\"\"\n Called when the train batch ends.\n\n Collect the model's current weight vector.\n\n Args:\n trainer: A PyTorch Lightning Trainer which trains the model.\n pl_module: The model being trained.\n outputs: Not used.\n batch: Not used.\n batch_idx: Not used.\n dataloader_idx: Not used.\n \"\"\"\n if self.first_collect_epoch <= trainer.current_epoch <= self.last_collect_epoch:\n self.weight_iterates.append(vectorise_weights(pl_module).detach().cpu().numpy())\n\n def on_train_epoch_end(self, trainer: Trainer, pl_module: LightningModule, unused: Optional = None):\n \"\"\"\n Called when the train epoch ends.\n\n If the current epoch is divisible by the evaluation frequency, compute and store the following:\n - The distance between the mean vector of the true posterior and the mean vector of the estimated posterior.\n - The distance between the covariance matrix of the true posterior and the covariance matrix of the\n estimated posterior.\n - The 2-Wasserstein distance between the true posterior and the estimated posterior.\n\n Args:\n trainer: A PyTorch Lightning Trainer which trains the model.\n pl_module: The model being trained. The total dimension of its weights should match the dimension of the\n true and estimated posterior.\n unused: Only present to match the signature of the original method.\n \"\"\"\n in_eval_range = self.first_eval_epoch <= trainer.current_epoch <= self.last_eval_epoch\n if in_eval_range & (self.eval_epoch_frequency > 0) & (trainer.current_epoch % self.eval_epoch_frequency == 0):\n\n mean, covar = self.get_mean_and_covariance()\n empirical_mean, empirical_covar = self.get_empirical_mean_and_covariance()\n\n self.eval_epochs.append(trainer.current_epoch)\n\n self.posterior_distances_from_mean.append(\n compute_distance_between_matrices(self.true_mean, mean)\n )\n self.posterior_distances_from_covar.append(\n compute_distance_between_matrices(self.true_covar, covar)\n )\n self.posterior_wasserstein_distances.append(\n compute_gaussian_wasserstein_distance(self.true_mean, self.true_covar, mean, covar)\n )\n\n self.empirical_distances_from_mean.append(\n compute_distance_between_matrices(empirical_mean, mean)\n )\n self.empirical_distances_from_covar.append(\n compute_distance_between_matrices(empirical_covar, covar)\n )\n self.empirical_wasserstein_distances.append(\n compute_gaussian_wasserstein_distance(empirical_mean, empirical_covar, mean, covar)\n )\n\n def get_empirical_mean_and_covariance(self) -> (Tensor, Tensor):\n \"\"\"\n Get the empirical mean and covariance of the weight vectors.\n\n Returns:\n mean: The empirical mean of the weight vectors. Of shape (weight_dim,)\n covar: The empirical covariance of the weight vectors. Of shape (weight_dim, weight_dim).\n \"\"\"\n W = np.vstack(self.weight_iterates)\n mean = torch.from_numpy(np.mean(W, axis=0)).float()\n covar = torch.from_numpy(np.cov(W, rowvar=False)).float()\n\n return mean, covar\n\n @abstractmethod\n def get_mean_and_covariance(self) -> (Tensor, Tensor):\n \"\"\"\n Get the mean and covariance of the estimated posterior of the model's weights.\n\n Returns:\n mean: The mean of the posterior. Of shape (weight_dim,)\n covar: The covariance of the posterior. Of shape (weight_dim, weight_dim).\n \"\"\"\n ...\n\n\nclass OnlinePosteriorEvaluationCallback(BasePosteriorEvaluationCallback):\n \"\"\"\n This callback measures how close the estimated posterior of a model's weights is to the true posterior.\n\n Also measures how close the mean and covariance of the estimated posterior are to the empirical mean and covariance\n of the weight vectors collected after each training batch update.\n\n The callback assumes that the estimated posterior is updated online during training and is called at the end of each\n epoch.\n\n Args:\n posterior: The estimated posterior distribution of a model's weights.\n true_mean: The mean of the true posterior distribution of the same model's weights. Of shape (weight_dim,).\n true_covar: The covariance matrix of the true posterior distribution of the same model's weights. Of shape\n (weight_dim, weight_dim).\n collect_epoch_start: The training epoch on which to start collecting weight vectors. Integer indexing starts\n from 1. Can also specify a float between 0 and 1, which corresponds to the fraction of total epochs which\n should pass before starting to collect weight vectors.\n eval_epoch_start: The training epoch on which to start evaluating the posterior. Integer indexing starts from 1.\n Can also specify a float between 0 and 1, which corresponds to the fraction of total epochs which should\n pass before starting to evaluate the posterior. Should be at least as large collect_epoch_start.\n eval_epoch_frequency: The number of epochs between each evaluation of the posterior.\n \"\"\"\n\n def __init__(self, posterior: POSTERIOR_TYPE, true_mean: Tensor, true_covar: Tensor,\n collect_epoch_start: Union[int, float] = 1, eval_epoch_start: Union[int, float] = 1,\n eval_epoch_frequency: int = 1):\n super().__init__(posterior, true_mean, true_covar, collect_epoch_start=collect_epoch_start,\n eval_epoch_start=eval_epoch_start, eval_epoch_frequency=eval_epoch_frequency)\n\n def get_mean_and_covariance(self) -> (Tensor, Tensor):\n \"\"\"\n Get the mean and covariance of the estimated posterior of the model's weights.\n\n Returns:\n mean: The mean of the posterior. Of shape (weight_dim,)\n covar: The covariance of the posterior. Of shape (weight_dim, weight_dim).\n \"\"\"\n return self.posterior.get_mean(), self.posterior.get_covariance()\n\n\nclass BatchFactorAnalysisPosteriorEvaluationCallback(BasePosteriorEvaluationCallback):\n \"\"\"\n This callback measures how close the estimated posterior of a model's weights is to the true posterior.\n\n Also measures how close the mean and covariance of the estimated posterior are to the empirical mean and covariance\n of the weight vectors collected after each training batch update.\n\n The posterior is learned via a batch factor analysis (FA) algorithm (randomised SVD). The model's weights are\n collected after each batch training step. Before each evaluation of the posterior, the batch FA algorithm is fit to\n the weight vectors which have been collected up to that point.\n\n Args:\n latent_dim: The latent dimension of the batch FA model.\n true_mean: The mean of the true posterior distribution of the same model's weights. Of shape (weight_dim,).\n true_covar: The covariance matrix of the true posterior distribution of the same model's weights. Of shape\n (weight_dim, weight_dim).\n collect_epoch_start: The training epoch on which to start collecting weight vectors. Integer indexing starts\n from 1. Can also specify a float between 0 and 1, which corresponds to the fraction of total epochs which\n should pass before starting to collect weight vectors.\n eval_epoch_start: The training epoch on which to start evaluating the posterior. Integer indexing starts from 1.\n Can also specify a float between 0 and 1, which corresponds to the fraction of total epochs which should\n pass before starting to evaluate the posterior. Should be at least as large collect_epoch_start.\n eval_epoch_frequency: The number of epochs between each evaluation of the posterior.\n random_seed: The random seed used when fitting the FA model.\n \"\"\"\n\n def __init__(self, latent_dim: int, true_mean: Tensor, true_covar: Tensor,\n collect_epoch_start: Union[int, float] = 1, eval_epoch_start: Union[int, float] = 1,\n eval_epoch_frequency: int = 1, random_seed: int = 0):\n posterior = FactorAnalysis(n_components=latent_dim, svd_method='randomized', random_state=random_seed)\n super().__init__(posterior, true_mean, true_covar, collect_epoch_start=collect_epoch_start,\n eval_epoch_start=eval_epoch_start, eval_epoch_frequency=eval_epoch_frequency)\n\n def get_mean_and_covariance(self) -> (Tensor, Tensor):\n \"\"\"\n Fit a batch FA algorithm to the weight vectors and return the mean and covariance of the FA model.\n\n If no weight vectors have been collected yet, return zero tensors.\n\n Returns:\n mean: The mean of the posterior. Of shape (weight_dim,)\n covar: The covariance of the posterior. Of shape (weight_dim, weight_dim).\n \"\"\"\n W = np.vstack(self.weight_iterates)\n self.posterior.fit(W)\n mean = torch.from_numpy(self.posterior.mean_).float()\n covar = torch.from_numpy(self.posterior.get_covariance()).float()\n\n return mean, covar\n",
"id": "2480854",
"language": "Python",
"matching_score": 3.89359712600708,
"max_stars_count": 0,
"path": "experiments/utils/callbacks.py"
},
{
"content": "import os\nfrom pathlib import Path\nfrom typing import List, Optional\n\nimport torch\nfrom torch import Tensor\nfrom torch.utils.data import TensorDataset, DataLoader\nfrom torch.optim import Optimizer\nfrom pytorch_lightning import Trainer\nfrom pytorch_lightning.callbacks import Callback\nimport pandas as pd\nimport click\nimport yaml\nfrom sklearn.preprocessing import StandardScaler\n\nfrom swafa.models import FeedForwardNet\nfrom swafa.callbacks import WeightPosteriorCallback\nfrom swafa.fa import OnlineGradientFactorAnalysis, OnlineEMFactorAnalysis\nfrom swafa.posterior import ModelPosterior\nfrom experiments.utils.callbacks import (\n OnlinePosteriorEvaluationCallback,\n BatchFactorAnalysisPosteriorEvaluationCallback,\n)\nfrom experiments.utils.metrics import compute_distance_between_matrices\nfrom experiments.utils.factory import OPTIMISER_FACTORY\n\n\ndef run_all_experiments(\n datasets: List[pd.DataFrame],\n dataset_labels: List[str],\n min_latent_dim: int,\n max_latent_dim: int,\n n_trials: int,\n model_optimiser: str,\n model_optimiser_kwargs: dict,\n n_epochs: int,\n batch_size: int,\n gradient_optimiser: str,\n gradient_optimiser_kwargs: dict,\n gradient_warm_up_time_steps: int,\n em_warm_up_time_steps: int,\n posterior_update_epoch_start: int,\n posterior_eval_epoch_frequency: int,\n precision_scaling_factor: float,\n) -> pd.DataFrame:\n \"\"\"\n Run experiments on the given datasets.\n\n For each dataset, train a linear model to predict the target variable via SGD. Use the model weight vectors sampled\n during SGD to estimate the posterior distribution of the weights via the sklearn batch factor analysis (FA)\n algorithm, online gradient FA and online expectation-maximisation (EM) FA. For each method, compute the distance\n between the true and estimated posterior. For each dataset, run experiments with the latent dimension of the FA\n models equal to min_latent_dim to max_latent_dim.\n\n Note: the posterior distribution depends on the reciprocal of the variance of the target variable and the precision\n of the prior on the weights. These are referred to as beta and alpha respectively. See [1] for more details on how\n they are calculated.\n\n Args:\n datasets: A list of datasets. Each dataset contains features and a target variable, where the target variable is\n in the final column.\n dataset_labels: A label for each of the datasets.\n min_latent_dim: The minimum latent dimension of the FA models.\n max_latent_dim: The maximum latent dimension of the FA models.\n n_trials: The number of trials to run for each experiment.\n model_optimiser: The name of the PyTorch optimiser used to train the linear models. Options are 'sgd' and\n 'adam'.\n model_optimiser_kwargs: Keyword arguments for the PyTorch optimiser used to train the linear models.\n n_epochs: The number of epochs for which to train the linear models.\n batch_size: The batch size to use when training the linear models.\n gradient_optimiser: The name of the PyTorch optimiser used in the online gradient FA learning algorithm. Options\n are 'sgd' and 'adam'.\n gradient_optimiser_kwargs: Keyword arguments for the PyTorch optimiser used in the online gradient FA learning\n algorithm.\n gradient_warm_up_time_steps: The number of time steps on which to update the running mean of the FA model in the\n online gradient algorithm before updating the other parameters.\n em_warm_up_time_steps: The number of time steps on which to update the running means of the FA model in the\n online EM algorithm before updating the other parameters.\n posterior_update_epoch_start: The epoch on which to begin updating the estimated posterior distributions of the\n weights of the linear models.\n posterior_eval_epoch_frequency: The number of epochs between each evaluation of the estimated posteriors.\n precision_scaling_factor: The scaling factor used to compute the precision of the prior of the weights of the\n linear model. Full details in [1].\n\n Returns:\n The results of each experiment. The number of rows in the DataFrame is equal to\n n_datasets * (max_latent_dim - min_latent_dim + 1) * n_trials * n_epochs / posterior_eval_epoch_frequency.\n The DataFrame has the following columns:\n - epoch: (int) The training epoch on which the metrics were computed.\n - posterior_mean_distance_sklearn: (float) The Frobenius norm between the mean of the true posterior and the\n posterior estimated via the batch sklearn FA algorithm.\n - posterior_covar_distance_sklearn: (float) The Frobenius norm between the covariance matrix of the true\n posterior and the posterior estimated via the batch sklearn FA algorithm.\n - posterior_wasserstein_sklearn: (float) The 2-Wasserstein distance between the true posterior and the\n posterior estimated via the batch sklearn FA algorithm.\n - empirical_mean_distance_sklearn: (float) The Frobenius norm between the mean of the empirical distribution\n and the posterior estimated via the batch sklearn FA algorithm.\n - empirical_covar_distance_sklearn: (float) The Frobenius norm between the covariance matrix of the\n empirical distribution and the posterior estimated via the batch sklearn FA algorithm.\n - empirical_wasserstein_sklearn: (float) The 2-Wasserstein distance between the empirical distribution and\n the posterior estimated via the batch sklearn FA algorithm.\n - posterior_mean_distance_online_gradient: (float) The Frobenius norm between the mean of the true posterior\n and the posterior estimated via online gradient FA.\n - posterior_covar_distance_online_gradient: (float) The Frobenius norm between the covariance matrix of the\n true posterior and the posterior estimated via online gradient FA.\n - posterior_wasserstein_online_gradient: (float) The 2-Wasserstein distance between the true posterior and\n the posterior estimated via online gradient FA.\n - empirical_mean_distance_online_gradient: (float) The Frobenius norm between the mean of the empirical\n distribution and the posterior estimated via online gradient FA.\n - empirical_covar_distance_online_gradient: (float) The Frobenius norm between the covariance matrix of the\n empirical distribution and the posterior estimated via online gradient FA.\n - empirical_wasserstein_online_gradient: (float) The 2-Wasserstein distance between the empirical\n distribution and the posterior estimated via online gradient FA.\n - posterior_mean_distance_online_em: (float) The Frobenius norm between the mean of the true posterior and\n the posterior estimated via online EM FA.\n - posterior_covar_distance_online_em: (float) The Frobenius norm between the covariance matrix of the true\n posterior and the posterior estimated via online EM FA.\n - posterior_wasserstein_online_em: (float) The 2-Wasserstein distance between the true posterior and the\n posterior estimated via online EM FA.\n - empirical_mean_distance_online_em: (float) The Frobenius norm between the mean of the empirical\n distribution and the posterior estimated via online EM FA.\n - empirical_covar_distance_online_em: (float) The Frobenius norm between the covariance matrix of the\n empirical distribution and the posterior estimated via online EM FA.\n - empirical_wasserstein_online_em: (float) The 2-Wasserstein distance between the true posterior and the\n empirical distribution estimated via online EM FA.\n - empirical_mean_norm: (float) The Frobenius norm of the mean vector of the empirical distribution.\n - empirical_covar_norm: (float) The Frobenius norm of the covariance matrix of the empirical distribution.\n - latent_dim: (int) The latent dimension of the FA models.\n - trial: (int) The index of the trial within the experiment.\n - posterior_mean_norm: (float) The Frobenius norm of the mean vector of the true posterior.\n - posterior_covar_norm: (float) The Frobenius norm of the covariance matrix of the true posterior.\n - alpha: (float) The precision of the prior of the weights of the linear model.\n - beta: (float) The reciprocal of the variance of the dataset's target variable.\n - dataset: (str) The name of the dataset.\n - n_samples: (int) The number of samples in the dataset.\n - observation_dim: (int) The number of features in the dataset.\n - learning_rate: (float) The learning rate of the PyTorch optimiser used to train the linear models.\n\n References:\n [1] <NAME>. Extending the Bayesian Deep Learning Method MultiSWAG. MSc Thesis, University of Edinburgh,\n 2021.\n \"\"\"\n model_optimiser_kwargs = model_optimiser_kwargs or dict(lr=0.001)\n\n results = []\n for label, dataset in zip(dataset_labels, datasets):\n print(f'Running experiments on {label} dataset...')\n print('-' * 100)\n\n dataset_results = run_dataset_experiments(\n dataset=dataset,\n dataset_label=label,\n min_latent_dim=min_latent_dim,\n max_latent_dim=max_latent_dim,\n n_trials=n_trials,\n model_optimiser=model_optimiser,\n model_optimiser_kwargs=model_optimiser_kwargs,\n n_epochs=n_epochs,\n batch_size=batch_size,\n gradient_optimiser=gradient_optimiser,\n gradient_optimiser_kwargs=gradient_optimiser_kwargs,\n gradient_warm_up_time_steps=gradient_warm_up_time_steps,\n em_warm_up_time_steps=em_warm_up_time_steps,\n posterior_update_epoch_start=posterior_update_epoch_start,\n posterior_eval_epoch_frequency=posterior_eval_epoch_frequency,\n precision_scaling_factor=precision_scaling_factor,\n )\n\n results.append(dataset_results)\n print('-' * 100)\n\n return pd.concat(results, ignore_index=True)\n\n\ndef run_dataset_experiments(\n dataset: pd.DataFrame,\n dataset_label: str,\n min_latent_dim: int,\n max_latent_dim: int,\n n_trials: int,\n model_optimiser: str,\n model_optimiser_kwargs: dict,\n n_epochs: int,\n batch_size: int,\n gradient_optimiser: str,\n gradient_optimiser_kwargs: dict,\n gradient_warm_up_time_steps: int,\n em_warm_up_time_steps: int,\n posterior_update_epoch_start: int,\n posterior_eval_epoch_frequency: int,\n precision_scaling_factor: float,\n) -> pd.DataFrame:\n \"\"\"\n Run experiments on the given dataset.\n\n Train a linear model to predict the target variable via SGD. Use the model weight vectors sampled\n during SGD to estimate the posterior distribution of the weights via the sklearn batch factor analysis (FA)\n algorithm, online gradient FA and online expectation-maximisation (EM) FA. For each method, compute the distance\n between the true and estimated posterior. Run experiments with the latent dimension of the FA models equal to\n min_latent_dim to max_latent_dim.\n\n Note: the posterior distribution depends on the reciprocal of the variance of the target variable and the precision\n of the prior on the weights. These are referred to as beta and alpha respectively. See [1] for more details on how\n they are calculated.\n\n Args:\n dataset: Contains features and a target variable, where the target variable is in the final column.\n dataset_label: A label for the dataset.\n min_latent_dim: The minimum latent dimension of the FA models.\n max_latent_dim: The maximum latent dimension of the FA models.\n n_trials: The number of trials to run for each experiment.\n model_optimiser: The name of the PyTorch optimiser used to train the linear models. Options are 'sgd' and\n 'adam'.\n model_optimiser_kwargs: Keyword arguments for the PyTorch optimiser used to train the linear models.\n n_epochs: The number of epochs for which to train the linear models.\n batch_size: The batch size to use when training the linear models.\n gradient_optimiser: The name of the PyTorch optimiser used in the online gradient FA learning algorithm. Options\n are 'sgd' and 'adam'.\n gradient_optimiser_kwargs: Keyword arguments for the PyTorch optimiser used in the online gradient FA learning\n algorithm.\n gradient_warm_up_time_steps: The number of time steps on which to update the running mean of the FA model in the\n online gradient algorithm before updating the other parameters.\n em_warm_up_time_steps: The number of time steps on which to update the running means of the FA model in the\n online EM algorithm before updating the other parameters.\n posterior_update_epoch_start: The epoch on which to begin updating the estimated posterior distributions of the\n weights of the linear models.\n posterior_eval_epoch_frequency: The number of epochs between each evaluation of the estimated posteriors.\n precision_scaling_factor: The scaling factor used to compute the precision of the prior of the weights of the\n linear model. Full details in [1].\n\n Returns:\n The results of each experiment. The number of rows in the DataFrame is equal to\n (max_latent_dim - min_latent_dim + 1) * n_trials * n_epochs / posterior_eval_epoch_frequency.\n The DataFrame has the following columns:\n - epoch: (int) The training epoch on which the metrics were computed.\n - posterior_mean_distance_sklearn: (float) The Frobenius norm between the mean of the true posterior and the\n posterior estimated via the batch sklearn FA algorithm.\n - posterior_covar_distance_sklearn: (float) The Frobenius norm between the covariance matrix of the true\n posterior and the posterior estimated via the batch sklearn FA algorithm.\n - posterior_wasserstein_sklearn: (float) The 2-Wasserstein distance between the true posterior and the\n posterior estimated via the batch sklearn FA algorithm.\n - empirical_mean_distance_sklearn: (float) The Frobenius norm between the mean of the empirical distribution\n and the posterior estimated via the batch sklearn FA algorithm.\n - empirical_covar_distance_sklearn: (float) The Frobenius norm between the covariance matrix of the\n empirical distribution and the posterior estimated via the batch sklearn FA algorithm.\n - empirical_wasserstein_sklearn: (float) The 2-Wasserstein distance between the empirical distribution and\n the posterior estimated via the batch sklearn FA algorithm.\n - posterior_mean_distance_online_gradient: (float) The Frobenius norm between the mean of the true posterior\n and the posterior estimated via online gradient FA.\n - posterior_covar_distance_online_gradient: (float) The Frobenius norm between the covariance matrix of the\n true posterior and the posterior estimated via online gradient FA.\n - posterior_wasserstein_online_gradient: (float) The 2-Wasserstein distance between the true posterior and\n the posterior estimated via online gradient FA.\n - empirical_mean_distance_online_gradient: (float) The Frobenius norm between the mean of the empirical\n distribution and the posterior estimated via online gradient FA.\n - empirical_covar_distance_online_gradient: (float) The Frobenius norm between the covariance matrix of the\n empirical distribution and the posterior estimated via online gradient FA.\n - empirical_wasserstein_online_gradient: (float) The 2-Wasserstein distance between the empirical\n distribution and the posterior estimated via online gradient FA.\n - posterior_mean_distance_online_em: (float) The Frobenius norm between the mean of the true posterior and\n the posterior estimated via online EM FA.\n - posterior_covar_distance_online_em: (float) The Frobenius norm between the covariance matrix of the true\n posterior and the posterior estimated via online EM FA.\n - posterior_wasserstein_online_em: (float) The 2-Wasserstein distance between the true posterior and the\n posterior estimated via online EM FA.\n - empirical_mean_distance_online_em: (float) The Frobenius norm between the mean of the empirical\n distribution and the posterior estimated via online EM FA.\n - empirical_covar_distance_online_em: (float) The Frobenius norm between the covariance matrix of the\n empirical distribution and the posterior estimated via online EM FA.\n - empirical_wasserstein_online_em: (float) The 2-Wasserstein distance between the true posterior and the\n empirical distribution estimated via online EM FA.\n - empirical_mean_norm: (float) The Frobenius norm of the mean vector of the empirical distribution.\n - empirical_covar_norm: (float) The Frobenius norm of the covariance matrix of the empirical distribution.\n - latent_dim: (int) The latent dimension of the FA models.\n - trial: (int) The index of the trial within the experiment.\n - posterior_mean_norm: (float) The Frobenius norm of the mean vector of the true posterior.\n - posterior_covar_norm: (float) The Frobenius norm of the covariance matrix of the true posterior.\n - alpha: (float) The precision of the prior of the weights of the linear model.\n - beta: (float) The reciprocal of the variance of the dataset's target variable.\n - dataset: (str) The name of the dataset.\n - n_samples: (int) The number of samples in the dataset.\n - observation_dim: (int) The number of features in the dataset.\n - learning_rate: (float) The learning rate of the PyTorch optimiser used to train the linear models.\n\n References:\n [1] <NAME>. Extending the Bayesian Deep Learning Method MultiSWAG. MSc Thesis, University of Edinburgh,\n 2021.\n \"\"\"\n X, y = get_features_and_targets(dataset)\n true_posterior_mean, true_posterior_covar, alpha, beta = compute_true_posterior(\n X, y, alpha_scaling_factor=precision_scaling_factor,\n )\n observation_dim = X.shape[1]\n\n results = []\n for latent_dim in range(min_latent_dim, max_latent_dim + 1):\n if latent_dim > observation_dim:\n raise ValueError(f'latent dimension should be at most observation dimension ({observation_dim}), '\n f'not {latent_dim}')\n\n print(f'Using a posterior with latent dimension equal to {latent_dim} and observation dimension equal to '\n f'{observation_dim}...')\n print('-' * 100)\n\n for i_trial in range(n_trials):\n print(f'Running trial {i_trial + 1} of {n_trials}...')\n\n trial_results = run_experiment_trial(\n X=X,\n y=y,\n true_posterior_mean=true_posterior_mean,\n true_posterior_covar=true_posterior_covar,\n weight_decay=alpha / beta,\n model_optimiser=model_optimiser,\n model_optimiser_kwargs=model_optimiser_kwargs,\n n_epochs=n_epochs,\n batch_size=batch_size,\n posterior_latent_dim=latent_dim,\n gradient_optimiser=gradient_optimiser,\n gradient_optimiser_kwargs=gradient_optimiser_kwargs,\n gradient_warm_up_time_steps=gradient_warm_up_time_steps,\n em_warm_up_time_steps=em_warm_up_time_steps,\n posterior_update_epoch_start=posterior_update_epoch_start,\n posterior_eval_epoch_frequency=posterior_eval_epoch_frequency,\n model_random_seed=i_trial,\n posterior_random_seed=i_trial + 1,\n )\n\n trial_results['latent_dim'] = latent_dim\n trial_results['trial'] = i_trial + 1\n\n results.append(trial_results)\n\n print('-' * 100)\n print('-' * 100)\n\n results = pd.concat(results, ignore_index=True)\n\n results['posterior_mean_norm'] = compute_distance_between_matrices(\n true_posterior_mean, torch.zeros_like(true_posterior_mean)\n )\n results['posterior_covar_norm'] = compute_distance_between_matrices(\n true_posterior_covar, torch.zeros_like(true_posterior_covar)\n )\n results['alpha'] = alpha\n results['beta'] = beta\n results['dataset'] = dataset_label\n results['n_samples'] = len(X)\n results['observation_dim'] = observation_dim\n results['learning_rate'] = model_optimiser_kwargs['lr']\n\n return results\n\n\ndef run_experiment_trial(\n X: Tensor,\n y: Tensor,\n true_posterior_mean: Tensor,\n true_posterior_covar: Tensor,\n weight_decay: float,\n model_optimiser: str,\n model_optimiser_kwargs: dict,\n n_epochs: int,\n batch_size: int,\n posterior_latent_dim: int,\n gradient_optimiser: str,\n gradient_optimiser_kwargs: dict,\n gradient_warm_up_time_steps: int,\n em_warm_up_time_steps: int,\n posterior_update_epoch_start: int,\n posterior_eval_epoch_frequency: int,\n model_random_seed: int,\n posterior_random_seed: int,\n) -> pd.DataFrame:\n \"\"\"\n Run a single experiment trial on the given data with the given parameters.\n\n Train a linear model to predict the target variable via SGD. Use the model weight vectors sampled\n during SGD to estimate the posterior distribution of the weights via the sklearn batch factor analysis (FA)\n algorithm, online gradient FA and online expectation-maximisation (EM) FA. For each method, compute the distance\n between the true and estimated posterior.\n\n Args:\n X: The features. Of shape (n_samples, n_features).\n y: The targets. Of shape (n_samples,).\n true_posterior_mean: The mean of the true posterior. Of shape (n_features,).\n true_posterior_covar: The covariance matrix of the true posterior. Of shape (n_features, n_features).\n weight_decay: The L2 regularisation strength corresponding to the target variable noise and the precision of the\n prior of the weights of the linear model.\n model_optimiser: The name of the PyTorch optimiser used to train the linear model. Options are 'sgd' and 'adam'.\n model_optimiser_kwargs: Keyword arguments for the PyTorch optimiser used to train the linear model.\n n_epochs: The number of epochs for which to train the linear model.\n batch_size: The batch size to use when training the linear model.\n posterior_latent_dim: The latent dimension of the estimated posterior distributions.\n gradient_optimiser: The name of the PyTorch optimiser used in the online gradient FA learning algorithm. Options\n are 'sgd' and 'adam'.\n gradient_optimiser_kwargs: Keyword arguments for the PyTorch optimiser used in the online gradient FA learning\n algorithm.\n gradient_warm_up_time_steps: The number of time steps on which to update the running mean of the FA model in the\n online gradient algorithm before updating the other parameters.\n em_warm_up_time_steps: The number of time steps on which to update the running means of the FA model in the\n online EM algorithm before updating the other parameters.\n posterior_update_epoch_start: The epoch on which to begin updating the estimated posterior distributions of the\n weights of the linear model.\n posterior_eval_epoch_frequency: The number of epochs between each evaluation of the estimated posteriors.\n model_random_seed: The random seed used to initialise the linear model.\n posterior_random_seed: The random seed used to initialise the estimated posterior distributions.\n\n Returns:\n The results from each evaluation epoch of the experiment. The number of rows in the DataFrame is equal to\n n_epochs / posterior_eval_epoch_frequency.\n The DataFrame has the following columns:\n - epoch: (int) The training epoch on which the metrics were computed.\n - posterior_mean_distance_sklearn: (float) The Frobenius norm between the mean of the true posterior and the\n posterior estimated via the batch sklearn FA algorithm.\n - posterior_covar_distance_sklearn: (float) The Frobenius norm between the covariance matrix of the true\n posterior and the posterior estimated via the batch sklearn FA algorithm.\n - posterior_wasserstein_sklearn: (float) The 2-Wasserstein distance between the true posterior and the\n posterior estimated via the batch sklearn FA algorithm.\n - empirical_mean_distance_sklearn: (float) The Frobenius norm between the mean of the empirical distribution\n and the posterior estimated via the batch sklearn FA algorithm.\n - empirical_covar_distance_sklearn: (float) The Frobenius norm between the covariance matrix of the\n empirical distribution and the posterior estimated via the batch sklearn FA algorithm.\n - empirical_wasserstein_sklearn: (float) The 2-Wasserstein distance between the empirical distribution and\n the posterior estimated via the batch sklearn FA algorithm.\n - posterior_mean_distance_online_gradient: (float) The Frobenius norm between the mean of the true posterior\n and the posterior estimated via online gradient FA.\n - posterior_covar_distance_online_gradient: (float) The Frobenius norm between the covariance matrix of the\n true posterior and the posterior estimated via online gradient FA.\n - posterior_wasserstein_online_gradient: (float) The 2-Wasserstein distance between the true posterior and\n the posterior estimated via online gradient FA.\n - empirical_mean_distance_online_gradient: (float) The Frobenius norm between the mean of the empirical\n distribution and the posterior estimated via online gradient FA.\n - empirical_covar_distance_online_gradient: (float) The Frobenius norm between the covariance matrix of the\n empirical distribution and the posterior estimated via online gradient FA.\n - empirical_wasserstein_online_gradient: (float) The 2-Wasserstein distance between the empirical\n distribution and the posterior estimated via online gradient FA.\n - posterior_mean_distance_online_em: (float) The Frobenius norm between the mean of the true posterior and\n the posterior estimated via online EM FA.\n - posterior_covar_distance_online_em: (float) The Frobenius norm between the covariance matrix of the true\n posterior and the posterior estimated via online EM FA.\n - posterior_wasserstein_online_em: (float) The 2-Wasserstein distance between the true posterior and the\n posterior estimated via online EM FA.\n - empirical_mean_distance_online_em: (float) The Frobenius norm between the mean of the empirical\n distribution and the posterior estimated via online EM FA.\n - empirical_covar_distance_online_em: (float) The Frobenius norm between the covariance matrix of the\n empirical distribution and the posterior estimated via online EM FA.\n - empirical_wasserstein_online_em: (float) The 2-Wasserstein distance between the true posterior and the\n empirical distribution estimated via online EM FA.\n - empirical_mean_norm: (float) The Frobenius norm of the mean vector of the empirical distribution.\n - empirical_covar_norm: (float) The Frobenius norm of the covariance matrix of the empirical distribution.\n \"\"\"\n model_optimiser_kwargs['weight_decay'] = weight_decay\n\n gradient_weight_posterior_kwargs = dict(\n latent_dim=posterior_latent_dim,\n optimiser=OPTIMISER_FACTORY[gradient_optimiser],\n optimiser_kwargs=gradient_optimiser_kwargs,\n n_warm_up_time_steps=gradient_warm_up_time_steps,\n random_seed=posterior_random_seed,\n )\n\n em_weight_posterior_kwargs = dict(\n latent_dim=posterior_latent_dim,\n n_warm_up_time_steps=em_warm_up_time_steps,\n random_seed=posterior_random_seed,\n )\n\n (\n model,\n gradient_posterior_update_callback,\n em_posterior_update_callback,\n sklearn_posterior_eval_callback,\n gradient_posterior_eval_callback,\n em_posterior_eval_callback,\n ) = build_model_and_callbacks(\n X=X,\n true_posterior_mean=true_posterior_mean,\n true_posterior_covar=true_posterior_covar,\n model_optimiser_class=OPTIMISER_FACTORY[model_optimiser],\n model_optimiser_kwargs=model_optimiser_kwargs,\n posterior_latent_dim=posterior_latent_dim,\n gradient_weight_posterior_kwargs=gradient_weight_posterior_kwargs,\n em_weight_posterior_kwargs=em_weight_posterior_kwargs,\n posterior_update_epoch_start=posterior_update_epoch_start,\n posterior_eval_epoch_frequency=posterior_eval_epoch_frequency,\n model_random_seed=model_random_seed,\n )\n\n callbacks = [\n gradient_posterior_update_callback,\n em_posterior_update_callback,\n sklearn_posterior_eval_callback,\n gradient_posterior_eval_callback,\n em_posterior_eval_callback,\n ]\n\n fit_model(\n X=X,\n y=y,\n model=model,\n callbacks=callbacks,\n n_epochs=n_epochs,\n batch_size=batch_size,\n )\n\n results = collate_callback_results(\n sklearn_posterior_eval_callback,\n gradient_posterior_eval_callback,\n em_posterior_eval_callback,\n )\n\n empirical_mean, empirical_covar = sklearn_posterior_eval_callback.get_empirical_mean_and_covariance()\n results['empirical_mean_norm'] = compute_distance_between_matrices(\n empirical_mean, torch.zeros_like(empirical_mean)\n )\n results['empirical_covar_norm'] = compute_distance_between_matrices(\n empirical_covar, torch.zeros_like(empirical_covar)\n )\n\n return results\n\n\ndef get_features_and_targets(dataset: pd.DataFrame) -> (Tensor, Tensor):\n \"\"\"\n Separate the features and target variable from the given dataset.\n\n Scale each features by subtracting its mean and dividing by its standard deviation.\n\n Args:\n dataset: Contains features and a target variable, where the target variable is in the final column. Of shape\n (n_samples, n_features + 1).\n\n Returns:\n X: The scaled features. Of shape (n_samples, n_features).\n y: The targets. Of shape (n_samples,).\n \"\"\"\n X = dataset.iloc[:, :-1].values\n X = torch.from_numpy(StandardScaler().fit_transform(X)).float()\n y = torch.from_numpy(dataset.iloc[:, -1].values).float()\n return X, y\n\n\ndef compute_true_posterior(X: Tensor, y: Tensor, alpha: Optional[float] = None, beta: Optional[float] = None,\n alpha_scaling_factor: float = 0.1) -> (Tensor, Tensor, float, float):\n \"\"\"\n Compute mean and covariance of the true posterior distribution of the weights of a linear model, given the data.\n\n Full derivation given in [1].\n\n Args:\n X: The features. Of shape (n_samples, n_features).\n y: The targets. Of shape (n_samples,).\n alpha: The precision of the prior of the weights of the linear model. If None, will be set automatically\n according to [1].\n beta: The reciprocal of the variance of the dataset's target variable. If None, will be computed from the\n observed data.\n alpha_scaling_factor: The factor used to compute alpha, if alpha is None.\n\n Returns:\n mu: The mean of the true posterior. Of shape (n_features,).\n S: The covariance matrix of the true posterior. Of shape (n_features, n_features).\n alpha: The precision of the prior of the weights of the linear model. If not None, will be same as input.\n beta: The reciprocal of the variance of the dataset's target variable. If not None, will be same as input.\n\n References:\n [1] <NAME>. Extending the Bayesian Deep Learning Method MultiSWAG. MSc Thesis, University of Edinburgh,\n 2021.\n \"\"\"\n beta = beta if beta is not None else compute_beta(y)\n S, alpha = compute_true_posterior_covar(X, beta, alpha=alpha, alpha_scaling_factor=alpha_scaling_factor)\n m = compute_true_posterior_mean(X, y, beta, S)\n return m, S, alpha, beta\n\n\ndef compute_beta(y: Tensor) -> float:\n \"\"\"\n Compute the reciprocal of the variance of the target variable.\n\n Args:\n y: The target variable. Of shape (n_samples,).\n\n Returns:\n The reciprocal of the variance of the target variable. Often known as beta.\n \"\"\"\n return (1 / torch.var(y)).item()\n\n\ndef compute_true_posterior_covar(X: Tensor, beta: float, alpha: Optional[float] = None,\n alpha_scaling_factor: float = 0.1) -> (Tensor, float):\n \"\"\"\n Compute the covariance of the true posterior distribution of the weights of a linear model, given the data.\n\n This is the inverse of\n\n alpha * I + beta * sum_n(X[n] * X[n]^T).\n\n If alpha is None, it will be set to\n\n alpha_scaling_factor * mean(diag(beta * sum_n(X[n] * X[n]^T))).\n\n Full derivation given in [1].\n\n Args:\n X: The features. Of shape (n_samples, n_features).\n beta: The reciprocal of the variance of the dataset's target variable.\n alpha: The precision of the prior of the weights of the linear model. If None, will be computed according to the\n equation above.\n alpha_scaling_factor: The factor used to compute alpha in the equation above. Only used if alpha is None.\n\n Returns:\n S: The covariance matrix of the true posterior. Of shape (n_features, n_features).\n alpha: The precision of the prior of the weights of the linear model. If not None, will be same as input.\n\n References:\n [1] <NAME>. Extending the Bayesian Deep Learning Method MultiSWAG. MSc Thesis, University of Edinburgh,\n 2021.\n \"\"\"\n B = beta * torch.einsum('ij,ik->jk', X, X)\n alpha = alpha if alpha is not None else alpha_scaling_factor * torch.diag(B).mean().item()\n A = alpha * torch.eye(len(B)) + B\n S = torch.linalg.inv(A)\n return S, alpha\n\n\ndef compute_true_posterior_mean(X: Tensor, y: Tensor, beta: float, S: Tensor) -> Tensor:\n \"\"\"\n Compute the mean of the true posterior distribution of the weights of a linear model, given the data.\n\n Full derivation given in [1].\n\n Args:\n X: The features. Of shape (n_samples, n_features).\n y: The targets. Of shape (n_samples,).\n beta: The reciprocal of the variance of the dataset's target variable.\n S: The covariance matrix of the true posterior. Of shape (n_features, n_features).\n\n Returns:\n The mean of the true posterior. Of shape (n_features,).\n\n References:\n [1] <NAME>. Extending the Bayesian Deep Learning Method MultiSWAG. MSc Thesis, University of Edinburgh,\n 2021.\n \"\"\"\n b = beta * (y.reshape(-1, 1) * X).sum(dim=0, keepdims=True).t()\n return S.mm(b).squeeze()\n\n\ndef build_model_and_callbacks(\n X: Tensor,\n true_posterior_mean: Tensor,\n true_posterior_covar: Tensor,\n model_optimiser_class: Optimizer,\n model_optimiser_kwargs: dict,\n posterior_latent_dim: int,\n gradient_weight_posterior_kwargs: dict,\n em_weight_posterior_kwargs: dict,\n posterior_update_epoch_start: int,\n posterior_eval_epoch_frequency: int,\n model_random_seed: int,\n) -> (FeedForwardNet, WeightPosteriorCallback, WeightPosteriorCallback, BatchFactorAnalysisPosteriorEvaluationCallback,\n OnlinePosteriorEvaluationCallback, OnlinePosteriorEvaluationCallback):\n \"\"\"\n Build a linear model and callbacks which should be called during training to update and evaluate the weight\n posteriors.\n\n Args:\n X: The features. Of shape (n_samples, n_features).\n true_posterior_mean: The mean of the true posterior. Of shape (n_features,).\n true_posterior_covar: The covariance matrix of the true posterior. Of shape (n_features, n_features).\n model_optimiser_class: The class of the PyTorch optimiser used to train the linear model.\n model_optimiser_kwargs: Keyword arguments for the PyTorch optimiser used to train the linear model.\n posterior_latent_dim: The latent dimension of the estimated posterior distributions.\n gradient_weight_posterior_kwargs: Keyword arguments for the instance of OnlineGradientFactorAnalysis used to\n estimate the posterior.\n em_weight_posterior_kwargs: Keyword arguments for the instance of OnlineEMFactorAnalysis used to estimate the\n posterior.\n posterior_update_epoch_start: The epoch on which to begin updating the estimated posterior distributions of the\n weights of the linear models.\n posterior_eval_epoch_frequency: The number of epochs between each evaluation of the estimated posteriors.\n model_random_seed: The random seed used to initialise the linear model.\n\n Returns:\n model: An linear model with the same dimension as the input data. Note that a bias term will NOT be added to the\n model.\n gradient_posterior_update_callback: Callbacks used to update the OnlineGradientFactorAnalysis weight posterior.\n em_posterior_update_callback: Callbacks used to update the OnlineEMFactorAnalysis weight posterior.\n sklearn_posterior_eval_callback: Callback used to evaluate the sklearn FactorAnalysis weight posterior.\n gradient_posterior_eval_callback: Callback used to evaluate the OnlineGradientFactorAnalysis weight posterior.\n em_posterior_eval_callback: Callback used to evaluate the OnlineEMFactorAnalysis weight posterior.\n \"\"\"\n model = FeedForwardNet(\n input_dim=X.shape[1],\n bias=False,\n optimiser_class=model_optimiser_class,\n optimiser_kwargs=model_optimiser_kwargs,\n random_seed=model_random_seed,\n )\n\n gradient_posterior = ModelPosterior(\n model=model,\n weight_posterior_class=OnlineGradientFactorAnalysis,\n weight_posterior_kwargs=gradient_weight_posterior_kwargs,\n )\n\n em_posterior = ModelPosterior(\n model=model,\n weight_posterior_class=OnlineEMFactorAnalysis,\n weight_posterior_kwargs=em_weight_posterior_kwargs,\n )\n\n gradient_posterior_update_callback = WeightPosteriorCallback(\n posterior=gradient_posterior.weight_posterior,\n update_epoch_start=posterior_update_epoch_start,\n )\n\n em_posterior_update_callback = WeightPosteriorCallback(\n posterior=em_posterior.weight_posterior,\n update_epoch_start=posterior_update_epoch_start,\n )\n\n sklearn_posterior_eval_callback = BatchFactorAnalysisPosteriorEvaluationCallback(\n latent_dim=posterior_latent_dim,\n true_mean=true_posterior_mean,\n true_covar=true_posterior_covar,\n collect_epoch_start=posterior_update_epoch_start,\n eval_epoch_start=posterior_update_epoch_start,\n eval_epoch_frequency=posterior_eval_epoch_frequency,\n random_seed=model_random_seed,\n )\n\n gradient_posterior_eval_callback = OnlinePosteriorEvaluationCallback(\n posterior=gradient_posterior.weight_posterior,\n true_mean=true_posterior_mean,\n true_covar=true_posterior_covar,\n collect_epoch_start=posterior_update_epoch_start,\n eval_epoch_start=posterior_update_epoch_start,\n eval_epoch_frequency=posterior_eval_epoch_frequency,\n )\n\n em_posterior_eval_callback = OnlinePosteriorEvaluationCallback(\n posterior=em_posterior.weight_posterior,\n true_mean=true_posterior_mean,\n true_covar=true_posterior_covar,\n collect_epoch_start=posterior_update_epoch_start,\n eval_epoch_start=posterior_update_epoch_start,\n eval_epoch_frequency=posterior_eval_epoch_frequency,\n )\n\n return (\n model,\n gradient_posterior_update_callback,\n em_posterior_update_callback,\n sklearn_posterior_eval_callback,\n gradient_posterior_eval_callback,\n em_posterior_eval_callback,\n )\n\n\ndef fit_model(X: Tensor, y: Tensor, model: FeedForwardNet, callbacks: List[Callback], n_epochs: int, batch_size: int):\n \"\"\"\n Fit the given model on the given data.\n\n Args:\n X: The features. Of shape (n_samples, n_features).\n y: The targets. Of shape (n_samples,).\n model: The model which is to be fit to the data.\n callbacks: Any callbacks which should be called during training.\n n_epochs: The number of epochs for which to train the model.\n batch_size: The batch size to use when training the model.\n \"\"\"\n dataset = TensorDataset(X, y)\n dataloader = DataLoader(dataset, batch_size=batch_size, drop_last=False, shuffle=True)\n\n trainer = Trainer(max_epochs=n_epochs, callbacks=callbacks, progress_bar_refresh_rate=0)\n trainer.fit(model, train_dataloader=dataloader)\n\n\ndef collate_callback_results(sklearn_posterior_eval_callback: BatchFactorAnalysisPosteriorEvaluationCallback,\n gradient_posterior_eval_callback: OnlinePosteriorEvaluationCallback,\n em_posterior_eval_callback: OnlinePosteriorEvaluationCallback) -> pd.DataFrame:\n \"\"\"\n Collate the results from the posterior evaluations callbacks into a single DataFrame.\n\n Args:\n sklearn_posterior_eval_callback: Callback used to evaluate the sklearn FactorAnalysis weight posterior.\n gradient_posterior_eval_callback: Callback used to evaluate the OnlineGradientFactorAnalysis weight posterior.\n em_posterior_eval_callback: Callback used to evaluate the OnlineEMFactorAnalysis weight posterior.\n\n Returns:\n The callback results from each evaluation epoch. The number of rows in the DataFrame is equal to\n n_epochs / posterior_eval_epoch_frequency.\n The DataFrame has the following columns:\n - epoch: (int) The training epoch on which the metrics were computed.\n - posterior_mean_distance_sklearn: (float) The Frobenius norm between the mean of the true posterior and the\n posterior estimated via the batch sklearn FA algorithm.\n - posterior_covar_distance_sklearn: (float) The Frobenius norm between the covariance matrix of the true\n posterior and the posterior estimated via the batch sklearn FA algorithm.\n - posterior_wasserstein_sklearn: (float) The 2-Wasserstein distance between the true posterior and the\n posterior estimated via the batch sklearn FA algorithm.\n - empirical_mean_distance_sklearn: (float) The Frobenius norm between the mean of the empirical distribution\n and the posterior estimated via the batch sklearn FA algorithm.\n - empirical_covar_distance_sklearn: (float) The Frobenius norm between the covariance matrix of the\n empirical distribution and the posterior estimated via the batch sklearn FA algorithm.\n - empirical_wasserstein_sklearn: (float) The 2-Wasserstein distance between the empirical distribution and\n the posterior estimated via the batch sklearn FA algorithm.\n - posterior_mean_distance_online_gradient: (float) The Frobenius norm between the mean of the true posterior\n and the posterior estimated via online gradient FA.\n - posterior_covar_distance_online_gradient: (float) The Frobenius norm between the covariance matrix of the\n true posterior and the posterior estimated via online gradient FA.\n - posterior_wasserstein_online_gradient: (float) The 2-Wasserstein distance between the true posterior and\n the posterior estimated via online gradient FA.\n - empirical_mean_distance_online_gradient: (float) The Frobenius norm between the mean of the empirical\n distribution and the posterior estimated via online gradient FA.\n - empirical_covar_distance_online_gradient: (float) The Frobenius norm between the covariance matrix of the\n empirical distribution and the posterior estimated via online gradient FA.\n - empirical_wasserstein_online_gradient: (float) The 2-Wasserstein distance between the empirical\n distribution and the posterior estimated via online gradient FA.\n - posterior_mean_distance_online_em: (float) The Frobenius norm between the mean of the true posterior and\n the posterior estimated via online EM FA.\n - posterior_covar_distance_online_em: (float) The Frobenius norm between the covariance matrix of the true\n posterior and the posterior estimated via online EM FA.\n - posterior_wasserstein_online_em: (float) The 2-Wasserstein distance between the true posterior and the\n posterior estimated via online EM FA.\n - empirical_mean_distance_online_em: (float) The Frobenius norm between the mean of the empirical\n distribution and the posterior estimated via online EM FA.\n - empirical_covar_distance_online_em: (float) The Frobenius norm between the covariance matrix of the\n empirical distribution and the posterior estimated via online EM FA.\n - empirical_wasserstein_online_em: (float) The 2-Wasserstein distance between the true posterior and the\n empirical distribution estimated via online EM FA.\n \"\"\"\n results = []\n for i, (epoch_sklearn, epoch_gradient, epoch_em) in enumerate(zip(sklearn_posterior_eval_callback.eval_epochs,\n gradient_posterior_eval_callback.eval_epochs,\n em_posterior_eval_callback.eval_epochs)):\n if (epoch_sklearn != epoch_gradient) or (epoch_sklearn != epoch_gradient):\n raise RuntimeError(f'The evaluation epochs of the three evaluation callbacks must be equal, not '\n f'{epoch_sklearn}, {epoch_gradient} and {epoch_em}')\n\n results.append(dict(\n epoch=epoch_sklearn,\n posterior_mean_distance_sklearn=sklearn_posterior_eval_callback.posterior_distances_from_mean[i],\n posterior_covar_distance_sklearn=sklearn_posterior_eval_callback.posterior_distances_from_covar[i],\n posterior_wasserstein_sklearn=sklearn_posterior_eval_callback.posterior_wasserstein_distances[i],\n empirical_mean_distance_sklearn=sklearn_posterior_eval_callback.empirical_distances_from_mean[i],\n empirical_covar_distance_sklearn=sklearn_posterior_eval_callback.empirical_distances_from_covar[i],\n empirical_wasserstein_sklearn=sklearn_posterior_eval_callback.empirical_wasserstein_distances[i],\n posterior_mean_distance_online_gradient=gradient_posterior_eval_callback.posterior_distances_from_mean[i],\n posterior_covar_distance_online_gradient=gradient_posterior_eval_callback.posterior_distances_from_covar[i],\n posterior_wasserstein_online_gradient=gradient_posterior_eval_callback.posterior_wasserstein_distances[i],\n empirical_mean_distance_online_gradient=gradient_posterior_eval_callback.empirical_distances_from_mean[i],\n empirical_covar_distance_online_gradient=gradient_posterior_eval_callback.empirical_distances_from_covar[i],\n empirical_wasserstein_online_gradient=gradient_posterior_eval_callback.empirical_wasserstein_distances[i],\n posterior_mean_distance_online_em=em_posterior_eval_callback.posterior_distances_from_mean[i],\n posterior_covar_distance_online_em=em_posterior_eval_callback.posterior_distances_from_covar[i],\n posterior_wasserstein_online_em=em_posterior_eval_callback.posterior_wasserstein_distances[i],\n empirical_mean_distance_online_em=em_posterior_eval_callback.empirical_distances_from_mean[i],\n empirical_covar_distance_online_em=em_posterior_eval_callback.empirical_distances_from_covar[i],\n empirical_wasserstein_online_em=em_posterior_eval_callback.empirical_wasserstein_distances[i],\n ))\n\n return pd.DataFrame(results)\n\n\n@click.command()\n@click.option('--boston-housing-input-path', type=str, help='The parquet file path to load the Boston Housing dataset')\n@click.option('--yacht-hydrodynamics-input-path', type=str, help='The parquet file path to load the Yacht '\n 'Hydrodynamics dataset')\n@click.option('--concrete-strength-input-path', type=str, help='The parquet file path to load the Concrete '\n 'Compressive Strength dataset')\n@click.option('--energy-efficiency-input-path', type=str, help='The parquet file path to load the Energy Efficiency '\n 'dataset')\n@click.option('--results-output-path', type=str, help='The parquet file path to save the experiment results')\ndef main(boston_housing_input_path: str, yacht_hydrodynamics_input_path: str, concrete_strength_input_path: str,\n energy_efficiency_input_path: str, results_output_path: str):\n \"\"\"\n Run experiments to estimate the posterior distribution of the weights of linear regression models.\n\n Args:\n boston_housing_input_path: The parquet file path to load the Boston Housing dataset.\n yacht_hydrodynamics_input_path: The parquet file path to load the Yacht Hydrodynamics dataset.\n concrete_strength_input_path: The parquet file path to load the Concrete Compressive Strength dataset.\n energy_efficiency_input_path: The parquet file path to load the Energy Efficiency dataset.\n results_output_path: The parquet file path to save the experiment results.\n \"\"\"\n with open(\"params.yaml\", 'r') as fd:\n params = yaml.safe_load(fd)['linear_regression_posterior']\n\n datasets = [\n pd.read_parquet(boston_housing_input_path),\n pd.read_parquet(yacht_hydrodynamics_input_path),\n pd.read_parquet(concrete_strength_input_path),\n pd.read_parquet(energy_efficiency_input_path),\n ]\n\n dataset_labels = [\n 'boston_housing',\n 'yacht_hydrodynamics',\n 'concrete_strength',\n 'energy_efficiency',\n ]\n\n results = run_all_experiments(\n datasets=datasets,\n dataset_labels=dataset_labels,\n min_latent_dim=params['min_latent_dim'],\n max_latent_dim=params['max_latent_dim'],\n n_trials=params['n_trials'],\n model_optimiser=params['model_optimiser'],\n model_optimiser_kwargs=params['model_optimiser_kwargs'],\n n_epochs=params['n_epochs'],\n batch_size=params['batch_size'],\n gradient_optimiser=params['gradient_optimiser'],\n gradient_optimiser_kwargs=params['gradient_optimiser_kwargs'],\n gradient_warm_up_time_steps=params['gradient_warm_up_time_steps'],\n em_warm_up_time_steps=params['em_warm_up_time_steps'],\n posterior_update_epoch_start=params['posterior_update_epoch_start'],\n posterior_eval_epoch_frequency=params['posterior_eval_epoch_frequency'],\n precision_scaling_factor=params['precision_scaling_factor'],\n )\n\n print('Results:\\n')\n print(results)\n\n Path(os.path.dirname(results_output_path)).mkdir(parents=True, exist_ok=True)\n results.to_parquet(results_output_path)\n\n\nif __name__ == '__main__':\n main()\n",
"id": "7192651",
"language": "Python",
"matching_score": 6.220668792724609,
"max_stars_count": 0,
"path": "experiments/linear_regression_posterior.py"
},
{
"content": "import pytest\nimport numpy as np\nimport pandas as pd\nimport torch\n\nfrom experiments.linear_regression_posterior import (\n compute_true_posterior,\n compute_true_posterior_covar,\n compute_true_posterior_mean,\n get_features_and_targets,\n run_all_experiments,\n)\n\n\n@pytest.mark.parametrize(\n \"n_datasets, min_latent_dim, max_latent_dim, n_trials, n_features, n_epochs, posterior_eval_epoch_frequency\",\n [\n (1, 1, 1, 1, [2], 1, 1),\n (1, 1, 2, 2, [3], 2, 1),\n (1, 2, 2, 2, [3], 4, 2),\n (2, 1, 1, 1, [2, 3], 1, 1),\n (2, 1, 2, 2, [3, 2], 2, 1),\n (2, 2, 2, 2, [3, 3], 4, 2),\n ]\n )\ndef test_all_experiments_results_rows_and_columns(n_datasets, min_latent_dim, max_latent_dim, n_trials, n_features,\n n_epochs, posterior_eval_epoch_frequency):\n n_samples = 100\n datasets = [pd.DataFrame(np.random.randn(n_samples, n_features[i] + 1)) for i in range(n_datasets)]\n dataset_labels = [f\"dataset_{i}\" for i in range(n_datasets)]\n\n results = run_all_experiments(\n datasets=datasets,\n dataset_labels=dataset_labels,\n min_latent_dim=min_latent_dim,\n max_latent_dim=max_latent_dim,\n n_trials=n_trials,\n model_optimiser='sgd',\n model_optimiser_kwargs=dict(lr=0.01),\n n_epochs=n_epochs,\n batch_size=32,\n gradient_optimiser='adam',\n gradient_optimiser_kwargs=dict(lr=0.01),\n gradient_warm_up_time_steps=1,\n em_warm_up_time_steps=1,\n posterior_update_epoch_start=1,\n posterior_eval_epoch_frequency=posterior_eval_epoch_frequency,\n precision_scaling_factor=0.1,\n )\n\n expected_columns = [\n 'epoch',\n 'posterior_mean_distance_sklearn',\n 'posterior_covar_distance_sklearn',\n 'posterior_wasserstein_sklearn',\n 'empirical_mean_distance_sklearn',\n 'empirical_covar_distance_sklearn',\n 'empirical_wasserstein_sklearn',\n 'posterior_mean_distance_online_gradient',\n 'posterior_covar_distance_online_gradient',\n 'posterior_wasserstein_online_gradient',\n 'empirical_mean_distance_online_gradient',\n 'empirical_covar_distance_online_gradient',\n 'empirical_wasserstein_online_gradient',\n 'posterior_mean_distance_online_em',\n 'posterior_covar_distance_online_em',\n 'posterior_wasserstein_online_em',\n 'empirical_mean_distance_online_em',\n 'empirical_covar_distance_online_em',\n 'empirical_wasserstein_online_em',\n 'empirical_mean_norm',\n 'empirical_covar_norm',\n 'latent_dim',\n 'trial',\n 'posterior_mean_norm',\n 'posterior_covar_norm',\n 'alpha',\n 'beta',\n 'dataset',\n 'n_samples',\n 'observation_dim',\n 'learning_rate',\n ]\n\n actual_columns = list(results.columns)\n assert len(actual_columns) == len(expected_columns)\n assert len(np.intersect1d(actual_columns, expected_columns)) == len(actual_columns)\n\n expected_n_rows = n_datasets * (max_latent_dim - min_latent_dim + 1) * n_trials * n_epochs \\\n / posterior_eval_epoch_frequency\n assert len(results) == expected_n_rows\n\n for i in range(n_datasets):\n assert (results['dataset'] == dataset_labels[i]).sum() == (max_latent_dim - min_latent_dim + 1) * n_trials \\\n * n_epochs / posterior_eval_epoch_frequency\n\n\n@pytest.mark.parametrize(\"n_samples\", [10, 20])\n@pytest.mark.parametrize(\"n_features\", [2, 3])\ndef test_get_features_and_targets(n_samples, n_features):\n dataset = pd.DataFrame(np.random.randn(n_samples, n_features + 1))\n features = dataset.iloc[:, :-1].values\n targets = dataset.iloc[:, -1].values\n\n means = features.mean(axis=0, keepdims=True)\n stds = features.std(axis=0, keepdims=True)\n\n X, y = get_features_and_targets(dataset)\n\n assert np.isclose((X.numpy() * stds) + means, features, atol=1e-4).all()\n assert np.isclose(y.numpy(), targets).all()\n\n\n@pytest.mark.parametrize(\"n_samples\", [10, 100])\n@pytest.mark.parametrize(\"n_features\", [3, 8])\n@pytest.mark.parametrize(\"alpha\", [None, 0.1])\n@pytest.mark.parametrize(\"beta\", [0.01, 0.1])\n@pytest.mark.parametrize(\"alpha_scaling_factor\", [0.01, 0.1])\ndef test_compute_true_posterior_covar(n_samples, n_features, alpha, beta, alpha_scaling_factor):\n X = torch.randn(n_samples, n_features)\n\n actual_S, actual_alpha = compute_true_posterior_covar(\n X, beta, alpha=alpha, alpha_scaling_factor=alpha_scaling_factor,\n )\n\n xxt = torch.zeros(n_features, n_features)\n for x in X:\n x = x.reshape(-1, 1)\n xxt += x.mm(x.t())\n B = beta * xxt\n\n if alpha is None:\n assert np.isclose(actual_alpha, alpha_scaling_factor * torch.diag(B).mean().item())\n else:\n assert actual_alpha == alpha\n\n A = actual_alpha * torch.eye(n_features) + B\n expected_S = torch.linalg.inv(A)\n\n assert torch.isclose(actual_S, expected_S).all()\n\n\n@pytest.mark.parametrize(\"n_samples\", [10, 100])\n@pytest.mark.parametrize(\"n_features\", [3, 8])\n@pytest.mark.parametrize(\"alpha\", [None, 0.1])\n@pytest.mark.parametrize(\"beta\", [0.01, 0.1])\n@pytest.mark.parametrize(\"alpha_scaling_factor\", [0.01, 0.1])\ndef test_compute_true_posterior_mean(n_samples, n_features, alpha, beta, alpha_scaling_factor):\n X = torch.randn(n_samples, n_features)\n y = torch.randn(n_samples)\n\n S, _ = compute_true_posterior_covar(X, beta, alpha=alpha, alpha_scaling_factor=alpha_scaling_factor)\n\n actual_m = compute_true_posterior_mean(X, y, beta, S)\n\n yx = torch.zeros(n_features)\n for i, x in enumerate(X):\n yx += y[i] * x\n b = beta * yx\n\n expected_m = S.mm(b.reshape(-1, 1)).squeeze()\n assert torch.isclose(actual_m, expected_m, atol=1e-4).all()\n\n\n@pytest.mark.parametrize(\"n_samples\", [10, 100])\n@pytest.mark.parametrize(\"n_features\", [3, 8])\n@pytest.mark.parametrize(\"alpha\", [None, 0.1])\n@pytest.mark.parametrize(\"beta\", [None, 0.1])\n@pytest.mark.parametrize(\"alpha_scaling_factor\", [0.01, 0.1])\ndef test_compute_true_posterior(n_samples, n_features, alpha, beta, alpha_scaling_factor):\n X = torch.randn(n_samples, n_features)\n y = torch.randn(n_samples)\n\n actual_m, actual_S, actual_alpha, actual_beta = compute_true_posterior(\n X, y, alpha, beta, alpha_scaling_factor=alpha_scaling_factor,\n )\n\n if beta is None:\n assert np.isclose(actual_beta, 1 / torch.var(y).item())\n else:\n assert actual_beta == beta\n\n expected_S, expected_alpha = compute_true_posterior_covar(\n X, actual_beta, alpha=alpha, alpha_scaling_factor=alpha_scaling_factor,\n )\n assert torch.isclose(actual_S, expected_S).all()\n\n expected_m = compute_true_posterior_mean(X, y, actual_beta, expected_S)\n assert torch.isclose(actual_m, expected_m).all()\n",
"id": "6661422",
"language": "Python",
"matching_score": 3.0032713413238525,
"max_stars_count": 0,
"path": "tests/test_experiments/test_linear_regression_posterior.py"
},
{
"content": "import os\nfrom pathlib import Path\nfrom typing import List, Optional\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport click\n\n\ndef run_analysis(results: pd.DataFrame, analysis_output_dir: str):\n \"\"\"\n Aggregate the experiment results and generate plots showing how similar the estimated posterior distributions are\n to the true posterior distributions.\n\n For each dataset and latent dimension, group by n_epochs and compute the mean and standard error of the metrics in\n the experiment results. Save these statistics to csv files.\n\n Also, for each dataset generate three plots, one showing the distance between the true and estimated posterior mean,\n one showing the distance between the true and estimated posterior covariance matrix and one showing the Wasserstein\n distance between the true and estimated posteriors. Save these plots to png files.\n\n Args:\n results: The results of each experiment. The number of rows in the DataFrame is equal to\n n_datasets * max_latent_dim * n_trials * n_epochs / posterior_eval_epoch_frequency.\n The DataFrame has the following columns:\n - epoch: (int) The training epoch on which the metrics were computed.\n - posterior_mean_distance_sklearn: (float) The Frobenius norm between the mean of the true posterior and the\n posterior estimated via the batch sklearn FA algorithm.\n - posterior_covar_distance_sklearn: (float) The Frobenius norm between the covariance matrix of the true\n posterior and the posterior estimated via the batch sklearn FA algorithm.\n - posterior_wasserstein_sklearn: (float) The 2-Wasserstein distance between the true posterior and the\n posterior estimated via the batch sklearn FA algorithm.\n - empirical_mean_distance_sklearn: (float) The Frobenius norm between the mean of the empirical distribution\n and the posterior estimated via the batch sklearn FA algorithm.\n - empirical_covar_distance_sklearn: (float) The Frobenius norm between the covariance matrix of the\n empirical distribution and the posterior estimated via the batch sklearn FA algorithm.\n - empirical_wasserstein_sklearn: (float) The 2-Wasserstein distance between the empirical distribution and\n the posterior estimated via the batch sklearn FA algorithm.\n - posterior_mean_distance_online_gradient: (float) The Frobenius norm between the mean of the true posterior\n and the posterior estimated via online gradient FA.\n - posterior_covar_distance_online_gradient: (float) The Frobenius norm between the covariance matrix of the\n true posterior and the posterior estimated via online gradient FA.\n - posterior_wasserstein_online_gradient: (float) The 2-Wasserstein distance between the true posterior and\n the posterior estimated via online gradient FA.\n - empirical_mean_distance_online_gradient: (float) The Frobenius norm between the mean of the empirical\n distribution and the posterior estimated via online gradient FA.\n - empirical_covar_distance_online_gradient: (float) The Frobenius norm between the covariance matrix of the\n empirical distribution and the posterior estimated via online gradient FA.\n - empirical_wasserstein_online_gradient: (float) The 2-Wasserstein distance between the empirical\n distribution and the posterior estimated via online gradient FA.\n - posterior_mean_distance_online_em: (float) The Frobenius norm between the mean of the true posterior and\n the posterior estimated via online EM FA.\n - posterior_covar_distance_online_em: (float) The Frobenius norm between the covariance matrix of the true\n posterior and the posterior estimated via online EM FA.\n - posterior_wasserstein_online_em: (float) The 2-Wasserstein distance between the true posterior and the\n posterior estimated via online EM FA.\n - empirical_mean_distance_online_em: (float) The Frobenius norm between the mean of the empirical\n distribution and the posterior estimated via online EM FA.\n - empirical_covar_distance_online_em: (float) The Frobenius norm between the covariance matrix of the\n empirical distribution and the posterior estimated via online EM FA.\n - empirical_wasserstein_online_em: (float) The 2-Wasserstein distance between the true posterior and the\n empirical distribution estimated via online EM FA.\n - empirical_mean_norm: (float) The Frobenius norm of the mean vector of the empirical distribution.\n - empirical_covar_norm: (float) The Frobenius norm of the covariance matrix of the empirical distribution.\n - latent_dim: (int) The latent dimension of the FA models.\n - trial: (int) The index of the trial within the experiment.\n - posterior_mean_norm: (float) The Frobenius norm of the mean vector of the true posterior.\n - posterior_covar_norm: (float) The Frobenius norm of the covariance matrix of the true posterior.\n - alpha: (float) The precision of the prior of the weights of the linear model.\n - beta: (float) The reciprocal of the variance of the dataset's target variable.\n - dataset: (str) The name of the dataset.\n - n_samples: (int) The number of samples in the dataset.\n - observation_dim: (int) The number of features in the dataset.\n - learning_rate: (float) The learning rate of the PyTorch optimiser used to train the linear models.\n analysis_output_dir: The directory path to save the output of the analysis.\n \"\"\"\n metric_suffixes = ['sklearn', 'online_gradient', 'online_em']\n posterior_mean_columns = [f'posterior_mean_distance_{x}' for x in metric_suffixes]\n posterior_covar_columns = [f'posterior_covar_distance_{x}' for x in metric_suffixes]\n posterior_wasserstein_columns = [f'posterior_wasserstein_{x}' for x in metric_suffixes]\n empirical_mean_columns = [f'empirical_mean_distance_{x}' for x in metric_suffixes]\n empirical_covar_columns = [f'empirical_covar_distance_{x}' for x in metric_suffixes]\n empirical_wasserstein_columns = [f'empirical_wasserstein_{x}' for x in metric_suffixes]\n\n results[posterior_mean_columns] = \\\n results[posterior_mean_columns].values / results[['posterior_mean_norm']].values\n results[posterior_covar_columns] = \\\n results[posterior_covar_columns].values / results[['posterior_covar_norm']].values\n\n results[empirical_mean_columns] = \\\n results[empirical_mean_columns].values / results[['empirical_mean_norm']].values\n results[empirical_covar_columns] = \\\n results[empirical_covar_columns].values / results[['empirical_covar_norm']].values\n\n axis_titles = ['Batch SVD', 'Online SGA', 'Online EM']\n\n for dataset_label in results['dataset'].unique():\n dataset_means, dataset_standard_errors = aggregate_experiment_results(\n results,\n dataset_label,\n metric_columns=posterior_mean_columns\n + posterior_covar_columns\n + posterior_wasserstein_columns\n + empirical_mean_columns\n + empirical_covar_columns\n + empirical_wasserstein_columns,\n )\n\n dataset_means.to_csv(os.path.join(\n analysis_output_dir,\n f'linear_regression_posterior_metric_means__{dataset_label}.csv'), index=False,\n )\n\n dataset_standard_errors.to_csv(os.path.join(\n analysis_output_dir,\n f'linear_regression_posterior_metric_standard_errors__{dataset_label}.csv'),\n index=False,\n )\n\n generate_and_save_error_bar_plot(\n dataset_means,\n dataset_standard_errors,\n png_path=os.path.join(\n analysis_output_dir, f'linear_regression_posterior_mean_distance__{dataset_label}.png',\n ),\n ylabel='Relative distance from true posterior mean',\n axes_columns=posterior_mean_columns,\n axes_titles=axis_titles,\n yscale='log',\n )\n\n generate_and_save_error_bar_plot(\n dataset_means,\n dataset_standard_errors,\n png_path=os.path.join(\n analysis_output_dir, f'linear_regression_posterior_covar_distance__{dataset_label}.png',\n ),\n ylabel='Relative distance from true posterior covariance',\n axes_columns=posterior_covar_columns,\n axes_titles=axis_titles,\n yscale='log'\n )\n\n generate_and_save_error_bar_plot(\n dataset_means,\n dataset_standard_errors,\n png_path=os.path.join(\n analysis_output_dir, f'linear_regression_posterior_wasserstein__{dataset_label}.png',\n ),\n ylabel='2-Wasserstein distance from true posterior',\n axes_columns=posterior_wasserstein_columns,\n axes_titles=axis_titles,\n yscale='log'\n )\n\n generate_and_save_error_bar_plot(\n dataset_means,\n dataset_standard_errors,\n png_path=os.path.join(\n analysis_output_dir, f'linear_regression_empirical_mean_distance__{dataset_label}.png',\n ),\n ylabel='Relative distance from empirical mean',\n axes_columns=empirical_mean_columns,\n axes_titles=axis_titles,\n yscale='log'\n )\n\n generate_and_save_error_bar_plot(\n dataset_means,\n dataset_standard_errors,\n png_path=os.path.join(\n analysis_output_dir, f'linear_regression_empirical_covar_distance__{dataset_label}.png',\n ),\n ylabel='Relative distance from empirical covariance',\n axes_columns=empirical_covar_columns,\n axes_titles=axis_titles,\n yscale='log'\n )\n\n generate_and_save_error_bar_plot(\n dataset_means,\n dataset_standard_errors,\n png_path=os.path.join(\n analysis_output_dir, f'linear_regression_empirical_wasserstein__{dataset_label}.png',\n ),\n ylabel='2-Wasserstein distance from empirical distribution',\n axes_columns=empirical_wasserstein_columns,\n axes_titles=axis_titles,\n yscale='log'\n )\n\n\ndef aggregate_experiment_results(results: pd.DataFrame, dataset_label: str, metric_columns: List[str],\n ) -> (pd.DataFrame, pd.DataFrame):\n \"\"\"\n For the given dataset, group the results by latent dimension and epoch and average the given metric columns over all\n trials.\n\n Also, compute the standard error of the mean of each group.\n\n Args:\n results: The results of each experiment. Columns must contain 'dataset', 'latent_dim', 'epoch' and\n metric_columns.\n dataset_label: The name of the dataset for which to aggregate the results.\n metric_columns: The columns which are to be aggregated.\n\n Returns:\n group_means: The mean of each column in metric_columns for each combination of latent dimension and epoch. Has\n columns 'latent_dim', 'epoch' and metric_columns.\n group_standard_errors: The standard error of each value in group_means. Has the same shape as group_means.\n \"\"\"\n dataset_results = results[results['dataset'] == dataset_label]\n grouped_results = dataset_results.groupby(['latent_dim', 'epoch'])\n group_means = grouped_results[metric_columns].mean().reset_index()\n group_standard_errors = grouped_results[metric_columns].sem().reset_index()\n return group_means, group_standard_errors\n\n\ndef generate_and_save_error_bar_plot(means: pd.DataFrame, standard_errors: pd.DataFrame, png_path: str,\n ylabel: str, axes_columns: List[str], axes_titles: Optional[List[str]] = None,\n xscale: str = 'linear', yscale: str = 'linear',):\n \"\"\"\n Plot the means with standard error bars.\n\n Save the plot to the given png file.\n\n Args:\n means: Should contain columns 'latent_dim', 'epoch' and axes_columns. A separate axes will be plotted for each\n column in axes_columns. On each axis, a separate line will be plotted for each latent dimension. Each line\n will show the values in the column plotted again the epoch for the corresponding latent dimension.\n standard_errors: The standard error for each value in means. Has the same shape and columns as means.\n png_path: The file path to save the plot as a png file.\n ylabel: The y-axis label. All plots will share the same y-axis label.\n axes_columns: The column in means to plot on each axis. A subplot will be generated with shape\n (1, len(axes_columns)).\n axes_titles: A title for each axis. Should have the same length as axes_columns. If None, will be set to\n axes_columns.\n xscale: The type of scale to use on the x-axis.\n yscale: The type of scale to use on the y-axis.\n \"\"\"\n axes_titles = axes_titles or axes_columns\n plt.rcParams.update({'font.size': 15})\n fig, axes = plt.subplots(1, len(axes_columns), sharey=True, figsize=(18, 6))\n\n for latent_dim in means['latent_dim'].unique():\n group_means = means[means['latent_dim'] == latent_dim]\n group_standard_errors = standard_errors[standard_errors['latent_dim'] == latent_dim]\n\n for ax, column in zip(axes, axes_columns):\n x = group_means['epoch']\n y = group_means[column]\n se = group_standard_errors[column]\n ax.errorbar(x, y, se, label=f'latent dim = {latent_dim}', marker=None)\n\n for ax, title in zip(axes, axes_titles):\n ax.set_xlabel('Epoch')\n ax.set_xscale(xscale)\n ax.set_title(title)\n\n axes[0].set_ylabel(ylabel)\n axes[0].set_yscale(yscale)\n plt.legend()\n\n plt.savefig(png_path, format='png')\n\n\n@click.command()\n@click.option('--results-input-path', type=str, help='The parquet file path from which to load the experiment results')\n@click.option('--analysis-output-dir', type=str, help='The directory path to save the output of the analysis')\ndef main(results_input_path: str, analysis_output_dir: str):\n \"\"\"\n Analyse the results from the linear regression posterior experiments.\n\n Save the analysis to the given output directory.\n\n Args:\n results_input_path: The parquet file path from which to load the experiment results.\n analysis_output_dir: The directory path to save the output of the analysis.\n \"\"\"\n results = pd.read_parquet(results_input_path)\n\n Path(analysis_output_dir).mkdir(parents=True, exist_ok=True)\n\n run_analysis(\n results,\n analysis_output_dir,\n )\n\n\nif __name__ == '__main__':\n main()\n",
"id": "3912221",
"language": "Python",
"matching_score": 7.269672393798828,
"max_stars_count": 0,
"path": "experiments/linear_regression_posterior_analysis.py"
},
{
"content": "import os\nfrom pathlib import Path\nfrom typing import List, Optional, Union\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport click\nimport yaml\n\n\ndef run_analysis(results: pd.DataFrame, analysis_output_dir: str, min_samples: int):\n \"\"\"\n Aggregate the experiment results and generate plots for covariance distance, log-likelihood and Wasserstein\n distance.\n\n For each experiment, defined by the parameters observation_dim, latent_dim, spectrum_min and spectrum_max, group by\n n_samples and compute the mean and standard error of the metrics in the experiment results. Save these statistics\n to csv files.\n\n Also, for each experiment generate four plots, one showing the distance between the true covariance matrix and the\n estimated covariance matrix, one showing the training log-likelihood, one showing the hold-out log-likelihood, and\n one showing the Wasserstein distance between the Gaussian distribution define by the true factor analysis (FA) model\n and the Gaussian distribution defined by the learned FA model, for each FA learning algorithm. Save these plots to\n png files.\n\n Args:\n results: The results of each experiment. Has len(experiments_config) * n_trials rows and the following columns:\n - observation_dim: (int) The size of the observed variable space of the FA model..\n - latent_dim: (int) The size of the latent variable space of the FA model..\n - spectrum_min: (float) The lower bound of the spectrum range.\n - spectrum_max: (float) The upper bound of the spectrum range.\n - n_samples: (int) The number of observations sampled from the true FA model.\n - covar_norm: (float) The Frobenius norm of the the true covariance matrix of the FA model.\n - covar_distance_sklearn: (float) The Frobenius norm of the difference between the true covariance matrix\n and the covariance matrix estimated by sklearn's `FactorAnalysis`.\n - covar_distance_online_gradient: (float) The Frobenius norm of the difference between the true covariance\n matrix and the covariance matrix estimated by `OnlineGradientFactorAnalysis`.\n - covar_distance_online_em: (float) The Frobenius norm of the difference between the true covariance\n matrix and the covariance matrix estimated by `OnlineEMFactorAnalysis`.\n - wasserstein_sklearn: (float) The Wasserstein distance between the Gaussian distribution defined by the\n true FA model and the Gaussian distribution defined by the sklearn FA model.\n - wasserstein_online_gradient: (float) The Wasserstein distance between the Gaussian distribution defined by\n the true FA model and the Gaussian distribution defined by the online gradient FA model.\n - wasserstein_online_em: (float) The Wasserstein distance between the Gaussian distribution defined by the\n true FA model and the Gaussian distribution defined by the online EM FA model.\n - experiment: (int) The index of the experiment.\n - trial: (int) The index of the trial within the experiment.\n analysis_output_dir: The directory path to save the output of the analysis.\n min_samples: Only analyse experiments which used at least this many data samples to learn FA models.\n \"\"\"\n param_columns = ['observation_dim', 'latent_dim', 'spectrum_min', 'spectrum_max']\n group_by_column = 'n_samples'\n metric_suffixes = ['sklearn', 'online_em', 'online_gradient']\n\n covar_columns = [f'covar_distance_{x}' for x in metric_suffixes]\n wasserstein_columns = [f'wasserstein_{x}' for x in metric_suffixes]\n\n plot_line_labels = ['batch_svd', 'online_em', 'online_sga']\n\n results = results[results['n_samples'] >= min_samples]\n results[covar_columns] = results[covar_columns].values / results[['covar_norm']].values\n results[wasserstein_columns] = results[wasserstein_columns].values / results[['observation_dim']].values\n\n param_combinations = results[param_columns].drop_duplicates()\n for _, params in param_combinations.iterrows():\n group_means, group_standard_errors = aggregate_experiment_results(\n results,\n params,\n group_by_column,\n metric_columns=covar_columns + wasserstein_columns,\n )\n\n params_str = params_to_string(params)\n\n group_means.to_csv(os.path.join(\n analysis_output_dir, f'online_fa_metric_means__{params_str}.csv'),\n )\n\n group_standard_errors.to_csv(os.path.join(\n analysis_output_dir, f'online_fa_metric_standard_errors__{params_str}.csv'),\n )\n\n generate_and_save_error_bar_plot(\n group_means[covar_columns],\n group_standard_errors[covar_columns],\n png_path=os.path.join(analysis_output_dir, f'online_fa_covar_distance__{params_str}.png'),\n xlabel='Number of training samples',\n ylabel='Relative covariance distance',\n xscale='log',\n yscale='log',\n line_labels=plot_line_labels,\n )\n\n generate_and_save_error_bar_plot(\n group_means[wasserstein_columns],\n group_standard_errors[wasserstein_columns],\n png_path=os.path.join(analysis_output_dir, f'online_fa_wasserstein__{params_str}.png'),\n xlabel='Number of training samples',\n ylabel='Scaled 2-Wasserstein distance',\n xscale='log',\n yscale='log',\n line_labels=plot_line_labels,\n )\n\n\ndef aggregate_experiment_results(results: pd.DataFrame, experiment_params: Union[pd.Series, dict],\n group_by_column: str, metric_columns: List[str]) -> (pd.DataFrame, pd.DataFrame):\n \"\"\"\n For the experiment with the given parameters, group by the given column and average the given metric columns over\n all trials.\n\n Also, compute the standard error of the mean of each group.\n\n Args:\n results: The results of each experiment. Each row corresponds to a single trial. Columns must contain\n group_by_column, metric_columns and all keys in experiment_params.\n experiment_params: The parameters of the experiment which is to be aggregated. Keys are parameter names and\n values are parameter values.\n group_by_column: The column to group by before aggregating the results.\n metric_columns: The columns which are to be aggregated.\n\n Returns:\n group_means: The mean of each column in metric_columns for each experiment group. The number of rows is equal to\n the number of unique values in group_by_column and the number of columns is equal to len(metric_columns).\n group_standard_errors: The standard error of each value in group_means. Has the same shape as group_means.\n \"\"\"\n experiment_mask = np.all(\n np.hstack(\n [results[name].values.reshape(-1, 1) == value for name, value in experiment_params.items()]\n ),\n axis=1\n )\n experiment_results = results[experiment_mask]\n grouped_results = experiment_results.groupby(group_by_column)\n group_means = grouped_results[metric_columns].mean()\n group_standard_errors = grouped_results[metric_columns].sem()\n return group_means, group_standard_errors\n\n\ndef generate_and_save_error_bar_plot(means: pd.DataFrame, standard_errors: pd.DataFrame, png_path: str, xlabel: str,\n ylabel: str, xscale: str = 'linear', yscale: str = 'linear',\n line_labels: Optional[List[str]] = None):\n \"\"\"\n Plot the means with standard error bars.\n\n Use a log scale for both axes.\n\n Save the plot to the given png file.\n\n Args:\n means: Plot a separate line for the values in each column. Use the values in the index of the DataFrame as the\n x-axis values.\n standard_errors: The standard error for each value in means. Has the same shape as means.\n png_path: The file path to save the plot as a png file.\n xlabel: The x-axis label.\n ylabel: The y-axis label.\n xscale: The type of scale to use on the x-axis.\n yscale: The type of scale to use on the y-axis.\n line_labels: A label for each line that will be plotted. Should be the same length as the number of columns in\n means. If None, means.columns will be used.\n \"\"\"\n line_labels = line_labels or means.columns\n plt.rcParams.update({'font.size': 20})\n plt.figure(figsize=(12, 8))\n x = means.index\n for i, metric_name in enumerate(means.columns):\n plt.errorbar(x, means[metric_name], standard_errors[metric_name], label=line_labels[i], marker='o')\n\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.xscale(xscale)\n plt.yscale(yscale)\n plt.legend()\n\n plt.savefig(png_path, format='png')\n\n\ndef params_to_string(params: Union[pd.Series, dict]) -> str:\n \"\"\"\n Convert parameter key-value pairs to a string.\n\n E.g. dict(observation_dim=19, latent_dim=5) will be converted to 'observation_dim=19__latent_dim=5'.\n\n Args:\n params: Key-value parameter pairs.\n\n Returns:\n A string of the form 'key1=value1__key2==value2__.....'.\n \"\"\"\n return '__'.join([f'{name}={value}' for name, value in params.items()])\n\n\n@click.command()\n@click.option('--results-input-path', type=str, help='The parquet file path from which to load the experiment results')\n@click.option('--analysis-output-dir', type=str, help='The directory path to save the output of the analysis')\ndef main(results_input_path: str, analysis_output_dir: str):\n \"\"\"\n Analyse the results from the factor analysis experiments.\n\n Save the analysis to the given output directory.\n\n Args:\n results_input_path: The parquet file path from which to load the experiment results.\n analysis_output_dir: The directory path to save the output of the analysis.\n \"\"\"\n with open(\"params.yaml\", 'r') as fd:\n params = yaml.safe_load(fd)\n\n results = pd.read_parquet(results_input_path)\n\n Path(analysis_output_dir).mkdir(parents=True, exist_ok=True)\n\n run_analysis(\n results,\n analysis_output_dir,\n params['online_fa_analysis']['min_samples'],\n )\n\n\nif __name__ == '__main__':\n main()\n",
"id": "4753622",
"language": "Python",
"matching_score": 3.1127922534942627,
"max_stars_count": 0,
"path": "experiments/online_fa_analysis.py"
},
{
"content": "import os\nfrom pathlib import Path\nfrom typing import Dict\n\nimport torch\nfrom torch import Tensor\nfrom torch.utils.data import TensorDataset, DataLoader\nfrom torch.optim import Optimizer\nfrom pytorch_lightning import Trainer\nimport pandas as pd\nimport click\nimport yaml\nfrom sklearn.linear_model import BayesianRidge\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom swafa.models import FeedForwardGaussianNet\nfrom swafa.callbacks import FactorAnalysisVariationalInferenceCallback\nfrom experiments.linear_regression_posterior import get_features_and_targets\nfrom experiments.utils.metrics import compute_distance_between_matrices, compute_gaussian_wasserstein_distance\nfrom experiments.utils.factory import OPTIMISER_FACTORY\n\n\ndef run_experiment(\n dataset: pd.DataFrame,\n latent_dim: int,\n n_gradients_per_update: int,\n optimiser_class: Optimizer,\n bias_optimiser_kwargs: dict,\n factors_optimiser_kwargs: dict,\n noise_optimiser_kwargs: dict,\n max_grad_norm: float,\n batch_size: int,\n n_epochs: int,\n results_output_dir: str,\n) -> pd.DataFrame:\n \"\"\"\n Run posterior estimation experiments on the given dataset.\n\n Compute the true posterior of a linear regression model fit to the data and compare it to the posterior estimated\n via the VIFA algorithm.\n\n Save all results (including plots) to the given output directory.\n\n Args:\n dataset: Contains features and a target variable, where the target variable is in the final column.\n latent_dim: The latent dimension of the factor analysis model used as the variational distribution.\n n_gradients_per_update: The number of mini-batch gradients to use to form the expectation of the true gradient\n for each parameter update.\n optimiser_class: The class of the optimiser to use for gradient updates.\n bias_optimiser_kwargs: Keyword arguments for the optimiser which updates the bias term of the factor analysis\n variational distribution.\n factors_optimiser_kwargs: Keyword arguments for the optimiser which updates the factor loading matrix of the\n factor analysis variational distribution.\n noise_optimiser_kwargs: Keyword arguments for the optimiser which updates the logarithm of the diagonal entries\n of the Gaussian noise covariance matrix of the factor analysis variational distribution.\n max_grad_norm: Maximum norm for gradients which are used to update the parameters of the variational\n distribution.\n batch_size: The batch size to use for mini-batch gradient optimisation.\n n_epochs: The number of epochs for which to run the mini-batch gradient optimisation.\n results_output_dir: The path to directory where experiment results (including plots) will be saved.\n\n Returns:\n A dictionary with the following keys:\n - relative_distance_from_mean: The Frobenius norm between the mean of the true posterior and the mean of the\n variational posterior (including bias), divided by the Frobenius norm of the mean of the true\n posterior.\n - relative_distance_from_covar: The Frobenius norm between the covariance of the true posterior and the\n covariance of the variational posterior (not including bias), divided by the Frobenius norm of the\n covariance of the true posterior.\n - scaled_wasserstein_distance: The 2-Wasserstein distance between the true posterior and the variational\n posterior (not including bias), divided by the dimension of the distribution.\n - alpha: The precision of the prior.\n - beta: The precision of the label noise.\n \"\"\"\n X, y = get_features_and_targets(dataset)\n n_samples, n_features = X.shape\n\n true_mean, true_covar, true_bias, alpha, beta = get_true_posterior(X, y)\n\n model = FeedForwardGaussianNet(\n input_dim=n_features,\n bias=True,\n loss_multiplier=n_samples,\n target_variance=1 / beta,\n random_seed=1,\n )\n\n variational_callback = FactorAnalysisVariationalInferenceCallback(\n latent_dim=latent_dim,\n precision=alpha,\n n_gradients_per_update=n_gradients_per_update,\n optimiser_class=optimiser_class,\n bias_optimiser_kwargs=bias_optimiser_kwargs,\n factors_optimiser_kwargs=factors_optimiser_kwargs,\n noise_optimiser_kwargs=noise_optimiser_kwargs,\n max_grad_norm=max_grad_norm,\n random_seed=1,\n )\n\n dataset = TensorDataset(X, y)\n dataloader = DataLoader(dataset, batch_size=batch_size, drop_last=True, shuffle=True)\n\n trainer = Trainer(max_epochs=n_epochs, callbacks=variational_callback, progress_bar_refresh_rate=0)\n trainer.fit(model, train_dataloader=dataloader)\n\n variational_mean, variational_covar, variational_bias = get_variational_posterior(variational_callback)\n\n true_diag_covar, true_non_diag_covar = split_covariance(true_covar.numpy())\n variational_diag_covar, variational_non_diag_covar = split_covariance(variational_covar.numpy())\n\n generate_and_save_mean_plot(true_mean.numpy(), variational_mean.numpy(), results_output_dir)\n\n generate_and_save_variance_plot(true_diag_covar, variational_diag_covar, results_output_dir)\n\n generate_and_save_covariance_plot(true_non_diag_covar, variational_non_diag_covar, results_output_dir)\n\n results = compute_metrics(true_mean, true_covar, true_bias, variational_mean, variational_covar, variational_bias)\n results['alpha'] = alpha\n results['beta'] = beta\n\n return pd.DataFrame(results, index=[0])\n\n\ndef train_test_split(dataset: pd.DataFrame) -> (pd.DataFrame, pd.DataFrame):\n \"\"\"\n Split the data into equally sized train and test sets.\n\n Args:\n dataset: Data of shape (n, k).\n\n Returns:\n train_dataset: Training data of shape (n / 2, k).\n test_dataset: Test data of shape (n / 2, k).\n \"\"\"\n shuffled_dataset = dataset.sample(frac=1, random_state=1).reset_index(drop=True)\n middle_index = int(len(shuffled_dataset) / 2)\n train_dataset = shuffled_dataset.iloc[:middle_index, :]\n test_dataset = shuffled_dataset.iloc[middle_index:, :]\n\n return train_dataset, test_dataset\n\n\ndef get_true_posterior(X: Tensor, y: Tensor) -> (Tensor, Tensor, float, float, float):\n \"\"\"\n Get the parameters of the true posterior of a linear regression model fit to the given data.\n\n Args:\n X: The features, of shape (n_samples, n_features).\n y: The targets, of shape (n_samples,).\n\n Returns:\n mean: The posterior mean, of shape (n_features,).\n covar: The posterior covariance, of shape (n_features, n_features).\n bias: The posterior bias.\n alpha: The precision of the Gaussian prior.\n beta: The precision of Gaussian target noise.\n \"\"\"\n br = BayesianRidge()\n br.fit(X.numpy(), y.numpy())\n mean = torch.from_numpy(br.coef_).float()\n covar = torch.from_numpy(br.sigma_).float()\n bias = br.intercept_\n alpha = br.lambda_\n beta = br.alpha_\n\n return mean, covar, bias, alpha, beta\n\n\ndef get_variational_posterior(variational_callback: FactorAnalysisVariationalInferenceCallback,\n ) -> (Tensor, Tensor, float):\n \"\"\"\n Get the parameters of the linear regression posterior estimated by the given variational inference callback.\n\n Args:\n variational_callback: Variational inference callback. It is assumed that the bias term of the posterior\n corresponds to the final dimension of the mean and covariance.\n\n Returns:\n mean: The posterior mean, of shape (n_features,).\n covar: The posterior covariance, of shape (n_features, n_features).\n bias: The posterior bias.\n \"\"\"\n weights = variational_callback.get_variational_mean()\n mean = weights[:-1]\n bias = weights[-1].item()\n covar = variational_callback.get_variational_covariance()[:-1, :-1]\n\n return mean, covar, bias\n\n\ndef split_covariance(covar: np.ndarray) -> (np.ndarray, np.ndarray):\n \"\"\"\n Split the given covariance matrix into diagonal and non-diagonal entries.\n\n Args:\n covar: Covariance matrix of shape (n_features, n_features).\n\n Returns:\n diag_covar: Diagonal covariance entries of shape (n_features,).\n non_diag_covar: Non-diagonal covariance entries of shape (n_features, n_features). Diagonal is set to 0.\n \"\"\"\n diag_covar = np.diag(covar)\n\n non_diag_covar = covar.copy()\n np.fill_diagonal(non_diag_covar, 0)\n\n return diag_covar, non_diag_covar\n\n\ndef compute_metrics(true_mean: Tensor, true_covar: Tensor, true_bias: float, variational_mean: Tensor,\n variational_covar: Tensor, variational_bias: float) -> Dict[str, float]:\n \"\"\"\n\n Args:\n true_mean: The true posterior mean, of shape (n_features,).\n true_covar: The true posterior covariance, of shape (n_features, n_features).\n true_bias: The true posterior bias.\n variational_mean: The variational posterior mean, of shape (n_features,).\n variational_covar: The variational posterior covariance, of shape (n_features, n_features).\n variational_bias: The variational posterior bias.\n\n Returns:\n A dictionary with the following keys:\n - relative_distance_from_mean: The Frobenius norm between the mean of the true posterior and the mean of the\n variational posterior (including bias), divided by the Frobenius norm of the mean of the true\n posterior.\n - relative_distance_from_covar: The Frobenius norm between the covariance of the true posterior and the\n covariance of the variational posterior (not including bias), divided by the Frobenius norm of the\n covariance of the true posterior.\n - scaled_wasserstein_distance: The 2-Wasserstein distance between the true posterior and the variational\n posterior (not including bias), divided by the dimension of the distribution.\n \"\"\"\n true_weights = torch.cat([true_mean, torch.Tensor([true_bias])])\n variational_weights = torch.cat([variational_mean, torch.Tensor([variational_bias])])\n\n distance_between_weights = compute_distance_between_matrices(true_weights, variational_weights)\n distance_between_covar = compute_distance_between_matrices(true_covar, variational_covar)\n\n true_weights_norm = compute_distance_between_matrices(true_weights, torch.zeros_like(true_weights))\n true_covar_norm = compute_distance_between_matrices(true_covar, torch.zeros_like(true_covar))\n\n wasserstein_distance = compute_gaussian_wasserstein_distance(\n true_mean, true_covar, variational_mean, variational_covar,\n )\n\n return dict(\n relative_distance_from_mean=distance_between_weights / true_weights_norm,\n relative_distance_from_covar=distance_between_covar / true_covar_norm,\n scaled_wasserstein_distance=wasserstein_distance / true_covar.shape[0],\n )\n\n\ndef generate_and_save_mean_plot(true_mean: np.ndarray, variational_mean: np.ndarray, plot_dir: str):\n \"\"\"\n Generate and save a bar plot which compares the true and variational posterior means.\n\n Plot will be saved to '{plot_dir}/posterior_mean.png'\n\n Args:\n true_mean: The true posterior mean, of shape (n_features,).\n variational_mean: The variational posterior mean, of shape (n_features,).\n plot_dir: The directory for saving the plot.\n \"\"\"\n plt.rcParams.update({'font.size': 15})\n\n plot_data = pd.DataFrame({\n 'True': true_mean,\n 'VIFA': variational_mean,\n }, index=range(1, len(true_mean) + 1))\n\n plot_data.plot(kind='bar', figsize=(16, 6))\n\n plt.xlabel('Feature index')\n plt.ylabel('Weight mean')\n plt.xticks(rotation=0)\n\n png_path = os.path.join(plot_dir, 'posterior_mean.png')\n plt.savefig(png_path, format='png')\n plt.close()\n\n\ndef generate_and_save_variance_plot(true_var: np.ndarray, variational_var: np.ndarray, plot_dir: str):\n \"\"\"\n Generate and save a bar plot which compares the true and variational posterior variances.\n\n Plot will be saved to '{plot_dir}/posterior_variance.png'\n\n Args:\n true_var: The true posterior variances, of shape (n_features,).\n variational_var: The variational posterior variances, of shape (n_features,).\n plot_dir: The directory for saving the plot.\n \"\"\"\n plt.rcParams.update({'font.size': 15})\n\n plot_data = pd.DataFrame({\n 'True': true_var,\n 'VIFA': variational_var,\n }, index=range(1, len(true_var) + 1))\n\n plot_data.plot(kind='bar', figsize=(16, 6))\n\n plt.xlabel('Feature index')\n plt.ylabel('Weight variance')\n plt.xticks(rotation=0)\n\n png_path = os.path.join(plot_dir, 'posterior_variance.png')\n plt.savefig(png_path, format='png')\n plt.close()\n\n\ndef generate_and_save_covariance_plot(true_covar: np.ndarray, variational_covar: np.ndarray, plot_dir: str):\n \"\"\"\n Generate and save an image plot which compares the true and variational posterior covariances.\n\n Plot will be saved to '{plot_dir}/posterior_covariance.png'\n\n Args:\n true_covar: The true posterior covariance, of shape (n_features, n_features).\n variational_covar: The variational posterior covariance, of shape (n_features, n_features).\n plot_dir: The directory for saving the plot.\n \"\"\"\n plt.rcParams.update({'font.size': 15})\n\n fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(16, 6))\n\n cmap = plt.cm.seismic\n true_img = ax[0].imshow(true_covar, cmap=cmap)\n variational_img = ax[1].imshow(variational_covar, cmap=cmap)\n\n ticks = np.arange(len(true_covar))\n tick_labels = ticks + 1\n for a in ax:\n a.set_xticks(ticks)\n a.set_xticklabels(tick_labels)\n\n a.set_yticks(ticks)\n a.set_yticklabels(tick_labels)\n\n fig.subplots_adjust(right=0.8)\n cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7])\n fig.colorbar(variational_img, cax=cbar_ax)\n\n png_path = os.path.join(plot_dir, 'posterior_covariance.png')\n plt.savefig(png_path, format='png')\n plt.close()\n\n\n@click.command()\n@click.option('--dataset-label', type=str, help='Label for the dataset. Used to retrieve parameters')\n@click.option('--dataset-input-path', type=str, help='The parquet file path to load the dataset')\n@click.option('--results-output-dir', type=str, help='The directory path to save the results of the experiment')\ndef main(dataset_label: str, dataset_input_path: str, results_output_dir: str):\n \"\"\"\n Run experiment to estimate the posterior distribution of the weights of linear regression models via variational\n inference.\n \"\"\"\n with open(\"params.yaml\", 'r') as fd:\n params = yaml.safe_load(fd)['linear_regression_vi']\n\n dataset_params = params['datasets'][dataset_label]\n\n dataset = pd.read_parquet(dataset_input_path)\n\n train_dataset, test_dataset = train_test_split(dataset)\n experiment_dataset = test_dataset if params['testing'] else train_dataset\n\n Path(results_output_dir).mkdir(parents=True, exist_ok=True)\n\n print(f'Running experiment for {dataset_label} dataset...')\n results = run_experiment(\n dataset=experiment_dataset,\n latent_dim=dataset_params['latent_dim'],\n n_gradients_per_update=dataset_params['n_gradients_per_update'],\n optimiser_class=OPTIMISER_FACTORY[dataset_params['optimiser']],\n bias_optimiser_kwargs=dataset_params['bias_optimiser_kwargs'],\n factors_optimiser_kwargs=dataset_params['factors_optimiser_kwargs'],\n noise_optimiser_kwargs=dataset_params['noise_optimiser_kwargs'],\n max_grad_norm=dataset_params['max_grad_norm'],\n batch_size=dataset_params['batch_size'],\n n_epochs=dataset_params['n_epochs'],\n results_output_dir=results_output_dir,\n )\n\n results.to_csv(os.path.join(results_output_dir, 'results.csv'), index=False)\n\n\nif __name__ == '__main__':\n main()\n",
"id": "4575834",
"language": "Python",
"matching_score": 6.5675578117370605,
"max_stars_count": 0,
"path": "experiments/linear_regression_vi.py"
},
{
"content": "import numpy as np\nimport pandas as pd\nimport pytest\nimport torch\n\nfrom swafa.callbacks import FactorAnalysisVariationalInferenceCallback\nfrom experiments.linear_regression_vi import (\n compute_metrics,\n get_true_posterior,\n get_variational_posterior,\n run_experiment,\n split_covariance,\n train_test_split,\n)\nfrom experiments.utils.metrics import compute_gaussian_wasserstein_distance\n\n\ndef test_train_test_split():\n n_rows, n_columns = 20, 10\n dataset = pd.DataFrame(np.random.rand(n_rows, n_columns))\n\n train_dataset, test_dataset = train_test_split(dataset)\n\n expected_n_rows = int(n_rows / 2)\n\n assert train_dataset.shape == (expected_n_rows, n_columns)\n assert test_dataset.shape == (expected_n_rows, n_columns)\n assert len(np.intersect1d(train_dataset.index, test_dataset.index)) == 0\n\n\ndef test_get_true_posterior():\n n_rows, n_columns = 20, 3\n X = torch.randn(n_rows, n_columns)\n y = torch.randn(n_rows)\n\n mean, covar, bias, alpha, beta = get_true_posterior(X, y)\n\n assert mean.shape == (n_columns,)\n assert covar.shape == (n_columns, n_columns)\n assert (torch.diag(covar) >= 0).all()\n assert isinstance(bias, float)\n assert alpha > 0\n assert beta > 0\n\n\n@pytest.mark.parametrize(\"n_features, latent_dim\",\n [\n (5, 1),\n (5, 3),\n (10, 10),\n ]\n )\ndef test_get_variational_posterior(n_features, latent_dim):\n variational_callback = FactorAnalysisVariationalInferenceCallback(latent_dim, precision=1)\n variational_callback.weight_dim = n_features + 1 # including bias term\n variational_callback._init_variational_params()\n\n mean, covar, bias = get_variational_posterior(variational_callback)\n\n assert mean.shape == (n_features,)\n assert covar.shape == (n_features, n_features)\n assert (torch.diag(covar) >= 0).all()\n assert isinstance(bias, float)\n\n\ndef test_split_covariance():\n d = 10\n covar = np.random.rand(d, d)\n\n diag_covar, non_diag_covar = split_covariance(covar)\n\n assert diag_covar.shape == (d,)\n assert non_diag_covar.shape == (d, d)\n assert (diag_covar == np.diag(covar)).all()\n assert (np.diag(non_diag_covar) == 0).all()\n\n\ndef test_compute_metrics():\n true_mean = torch.Tensor([1, 2])\n true_covar = torch.Tensor([\n [1, -1],\n [2, 3],\n ])\n true_bias = 4\n\n variational_mean = torch.Tensor([0, 3])\n variational_covar = torch.Tensor([\n [2, -3],\n [3, 2],\n ])\n variational_bias = 6\n\n expected_relative_distance_from_mean = np.sqrt(6) / np.sqrt(21)\n expected_relative_distance_from_covar = np.sqrt(7) / np.sqrt(15)\n expected_scaled_wasserstein_distance = compute_gaussian_wasserstein_distance(\n true_mean, true_covar, variational_mean, variational_covar,\n ) / len(true_mean)\n\n actual_metrics = compute_metrics(\n true_mean, true_covar, true_bias, variational_mean, variational_covar, variational_bias,\n )\n\n assert np.isclose(actual_metrics['relative_distance_from_mean'], expected_relative_distance_from_mean)\n assert np.isclose(actual_metrics['relative_distance_from_covar'], expected_relative_distance_from_covar)\n assert np.isclose(actual_metrics['scaled_wasserstein_distance'], expected_scaled_wasserstein_distance)\n\n\ndef test_run_experiment(tmpdir):\n dataset = pd.DataFrame(np.random.randn(20, 3), columns=['a', 'b', 'c'])\n\n results = run_experiment(\n dataset,\n latent_dim=2,\n n_gradients_per_update=1,\n optimiser_class=torch.optim.SGD,\n bias_optimiser_kwargs=dict(lr=1e-3),\n factors_optimiser_kwargs=dict(lr=1e-3),\n noise_optimiser_kwargs=dict(lr=1e-3),\n max_grad_norm=1,\n batch_size=2,\n n_epochs=5,\n results_output_dir=tmpdir,\n )\n\n columns = {\n 'relative_distance_from_mean',\n 'relative_distance_from_covar',\n 'scaled_wasserstein_distance',\n 'alpha',\n 'beta',\n }\n\n assert set(results.columns) == columns\n assert len(results) == 1\n",
"id": "4136948",
"language": "Python",
"matching_score": 2.8445727825164795,
"max_stars_count": 0,
"path": "tests/test_experiments/test_linear_regression_vi.py"
},
{
"content": "import logging\nimport os\nfrom pathlib import Path\nfrom typing import Dict, List, Optional\nimport warnings\n\nimport click\nimport numpy as np\nimport optuna\nimport pandas as pd\nfrom pytorch_lightning import Trainer\nfrom sklearn.model_selection import KFold\nfrom sklearn.preprocessing import StandardScaler\nimport torch\nfrom torch import Tensor\nfrom torch.utils.data import TensorDataset, DataLoader\nimport yaml\n\nfrom experiments.utils.factory import ACTIVATION_FACTORY\nfrom swafa.callbacks import FactorAnalysisVariationalInferenceCallback\nfrom swafa.models import FeedForwardGaussianNet\nfrom swafa.utils import set_weights\n\n# turn off annoying pytorch lightning logging and warnings\nlogging.getLogger('pytorch_lightning').setLevel(logging.ERROR)\nwarnings.filterwarnings('ignore')\n\n\nclass Objective:\n \"\"\"\n An objective function which can be used in an Optuna study to optimise the hyperparameters of a Gaussian neural\n network.\n\n The performance of each hyperparameter configuration is estimated via cross-validation. In each fold, the training\n data is used to approximate the posterior distribution of the weights of the neural network via the VIFA algorithm.\n Then the approximate posterior is used to compute a Bayesian model average for each validation point and compute\n the log-likelihood relative to the actual validation targets.\n\n The hyperparameters which are tuned are the learning rate with which to update the parameters of the posterior, the\n precision of the prior of the posterior and the precision of the additive noise distribution of the targets.\n Hyperparameter values are sampled from log-uniform distributions. The user must define the hyperparameter ranges.\n\n Note: since the primary metric is the log-likelihood, this objective should be MAXIMISED.\n\n Args:\n dataset: The features and targets to use to perform cross-validation, of shape (n_rows, n_features + 1). Target\n should be in final column.\n n_cv_folds: The number of cross-validation folds.\n latent_dim: The latent dimension of the factor analysis model used to approximate the posterior.\n n_gradients_per_update: The number of mini-batch gradients to use to form the expectation of the true gradient\n for each parameter update of the posterior.\n max_grad_norm: Maximum norm for gradients which are used to update the parameters of the posterior.\n batch_size: The batch size to use while training.\n n_epochs: The number of training epochs.\n learning_rate_range: The minimum and maximum values of the hyperparameter range of the learning rate with which\n to update the parameters of the posterior.\n prior_precision_range: The minimum and maximum values of the hyperparameter range of the precision of the prior\n of the posterior.\n noise_precision_range: The minimum and maximum values of the hyperparameter range of the precision of the\n additive noise distribution of the targets.\n n_bma_samples: The number of samples in each Bayesian model averaging when testing.\n hidden_dims: The dimension of each hidden layer in the neural network. hidden_dims[i] is the dimension of the\n i-th hidden layer. If None, the input will be connected directly to the output.\n hidden_activation_fn: The activation function to apply to the output of each hidden layer. If None, will be set\n to the identity activation function.\n random_seed: The random seed to use when initialising the parameters of the posterior.\n\n Attributes:\n k_fold: (KFold) Defines the split of each cross-validation fold.\n \"\"\"\n def __init__(\n self,\n dataset: pd.DataFrame,\n n_cv_folds: int,\n latent_dim: int,\n n_gradients_per_update: int,\n max_grad_norm: float,\n batch_size: int,\n n_epochs: int,\n learning_rate_range: List[float],\n prior_precision_range: List[float],\n noise_precision_range: List[float],\n n_bma_samples: int,\n hidden_dims: Optional[List[int]] = None,\n hidden_activation_fn: Optional[torch.nn.Module] = None,\n random_seed: Optional[int] = None,\n ):\n self._dataset = dataset\n self.latent_dim = latent_dim\n self.n_gradients_per_update = n_gradients_per_update\n self.max_grad_norm = max_grad_norm\n self.batch_size = batch_size\n self.n_epochs = n_epochs\n self.learning_rate_range = learning_rate_range\n self.prior_precision_range = prior_precision_range\n self.noise_precision_range = noise_precision_range\n self.n_bma_samples = n_bma_samples\n self.hidden_dims = hidden_dims\n self.hidden_activation_fn = hidden_activation_fn\n self.random_seed = random_seed\n\n self.k_fold = KFold(n_splits=n_cv_folds)\n\n @property\n def dataset(self) -> pd.DataFrame:\n return self._dataset\n\n @dataset.setter\n def dataset(self, value: pd.DataFrame):\n self._dataset = value\n\n def __call__(self, trial: optuna.Trial):\n \"\"\"\n Sample hyperparameters and run cross-validation.\n\n Args:\n trial: An optuna trial from which to sample hyperparameters.\n\n Returns:\n The average log-likelihood over the cross-validation folds. Higher is better (maximisation).\n \"\"\"\n learning_rate = trial.suggest_loguniform('learning_rate', *self.learning_rate_range)\n prior_precision = trial.suggest_loguniform('prior_precision', *self.prior_precision_range)\n noise_precision = trial.suggest_loguniform('noise_precision', *self.noise_precision_range)\n\n cv_ll, _ = self.cross_validate(learning_rate, prior_precision, noise_precision)\n\n return cv_ll\n\n def cross_validate(self, learning_rate: float, prior_precision: float, noise_precision: float) -> (float, float):\n \"\"\"\n Cross-validate a neural network for the given hyperparameters.\n\n In each fold, use the training data to approximate the posterior distribution of the weights of a neural network\n via the VIFA algorithm. Then use the posterior to compute a Bayesian model average for each test point and\n compute metrics relative to the actual targets.\n\n Args:\n learning_rate: The learning rate with which to update the parameters of the VIFA callback.\n prior_precision: The precision of the prior of the posterior.\n noise_precision: The precision of the additive noise distribution of the targets.\n\n Returns:\n The average log-likelihood and root mean squared error across all validation folds.\n \"\"\"\n ll_list = []\n rmse_list = []\n for train_index, test_index in self.k_fold.split(self.dataset):\n ll, rmse = self.train_and_test(train_index, test_index, learning_rate, prior_precision, noise_precision)\n ll_list.append(ll)\n rmse_list.append(rmse)\n\n return np.mean(ll_list), np.mean(rmse_list)\n\n def train_and_test(self, train_index: np.ndarray, test_index: np.ndarray, learning_rate: float,\n prior_precision: float, noise_precision: float) -> (float, float):\n \"\"\"\n Train and test a neural network for the given train and test indices and hyperparameters.\n\n Using the training data, approximate the posterior distribution of the weights of a neural network via the VIFA\n algorithm. Then use the posterior to compute a Bayesian model average for each test point and compute metrics\n relative to the actual targets.\n\n Args:\n train_index: Train row indices of self.dataset, of shape (n_train,).\n test_index: Test row indices of self.dataset, of shape (n_test,).\n learning_rate: The learning rate with which to update the parameters of the VIFA callback.\n prior_precision: The precision of the prior of the posterior.\n noise_precision: The precision of the additive noise distribution of the targets.\n\n Returns:\n The mean log-likelihood and root mean squared error of the Bayesian model averages relative to the true\n targets of the test data.\n \"\"\"\n train_dataset = self.dataset.iloc[train_index, :]\n test_dataset = self.dataset.iloc[test_index, :]\n\n X_train, y_train, scaler = self.fit_transform_features_and_targets(train_dataset)\n\n X_test, y_test = self.transform_features_and_targets(test_dataset, scaler)\n\n y_mean = scaler.mean_[-1]\n y_scale = scaler.scale_[-1]\n\n standardised_noise_precision = self.standardise_noise_precision(noise_precision, y_scale)\n\n model, variational_callback = self.train_model(\n X_train, y_train, learning_rate, prior_precision, standardised_noise_precision,\n )\n\n ll, rmse = self.test_model(model, variational_callback, X_test, y_test, y_mean, y_scale)\n\n return ll, rmse\n\n def fit_transform_features_and_targets(self, dataset: pd.DataFrame) -> (Tensor, Tensor, StandardScaler):\n \"\"\"\n Fit a standard scaler to the given dataset and use it to scale the data.\n\n It is assumed that the target is in the final column of the dataset and all other columns are features.\n\n Args:\n dataset: Features and targets, of shape (n_rows, n_features + 1). Target should be in final column.\n\n Returns:\n The transformed features of shape (n_rows, n_features), and transformed targets of shape (n_rows,) and the\n scaler used to transform the data.\n \"\"\"\n scaler = StandardScaler()\n scaler.fit(dataset.values)\n\n X, y = self.transform_features_and_targets(dataset, scaler)\n\n return X, y, scaler\n\n @staticmethod\n def transform_features_and_targets(dataset: pd.DataFrame, scaler: StandardScaler) -> (Tensor, Tensor):\n \"\"\"\n Transform features and targets in the given dataset using the given scaler.\n\n It is assumed that the target is in the final column of the dataset and all other columns are features.\n\n Args:\n dataset: Features and targets, of shape (n_rows, n_features + 1). Target should be in final column.\n scaler: A scaler which has already been fit to training data.\n\n Returns:\n The transformed features of shape (n_rows, n_features), and transformed targets of shape (n_rows,).\n \"\"\"\n standardised_dataset = scaler.transform(dataset.values)\n\n X = torch.from_numpy(standardised_dataset[:, :-1]).float()\n y = torch.from_numpy(standardised_dataset[:, -1]).float()\n\n return X, y\n\n @staticmethod\n def standardise_noise_precision(noise_precision: float, y_scale: float) -> float:\n \"\"\"\n Suppose the original targets were standardised by dividing them by sigma. Then the variance of the standardised\n noise should be old_variance / sigma^2. Hence, the precision of the standardised noise is\n sigma^2 / old_variance = sigma^2 * precision.\n\n Args:\n noise_precision: The precision of the additive noise distribution of the non-standardised targets.\n y_scale: The standard deviation of the non-standardised training target.\n\n Returns:\n The precision of the additive noise distribution of the standardised targets.\n \"\"\"\n return noise_precision * (y_scale ** 2)\n\n def train_model(self, X: Tensor, y: Tensor, learning_rate: float, prior_precision: float, noise_precision: float,\n ) -> (FeedForwardGaussianNet, FactorAnalysisVariationalInferenceCallback):\n \"\"\"\n Given the input data, approximate the posterior distribution of the weights of a neural network via the VIFA\n algorithm.\n\n Args:\n X: The features, of shape (n_rows, n_features).\n y: The targets, of shape (n_rows,).\n learning_rate: The learning rate with which to update the parameters of the VIFA callback.\n prior_precision: The precision of the prior of the posterior.\n noise_precision: The precision of the additive noise distribution of the targets.\n\n Returns:\n The model and the callback. The callback can be used to sample weight vectors for the model from the\n approximate posterior.\n \"\"\"\n n_samples, n_features = X.shape\n\n optimiser_kwargs = dict(lr=learning_rate)\n\n model = FeedForwardGaussianNet(\n input_dim=n_features,\n hidden_dims=self.hidden_dims,\n hidden_activation_fn=self.hidden_activation_fn,\n loss_multiplier=n_samples,\n target_variance=1 / noise_precision,\n random_seed=self.random_seed,\n )\n\n variational_callback = FactorAnalysisVariationalInferenceCallback(\n latent_dim=self.latent_dim,\n precision=prior_precision,\n n_gradients_per_update=self.n_gradients_per_update,\n optimiser_class=torch.optim.Adam,\n bias_optimiser_kwargs=optimiser_kwargs,\n factors_optimiser_kwargs=optimiser_kwargs,\n noise_optimiser_kwargs=optimiser_kwargs,\n max_grad_norm=self.max_grad_norm,\n random_seed=self.random_seed,\n )\n\n dataset = TensorDataset(X, y)\n dataloader = DataLoader(dataset, batch_size=self.batch_size, drop_last=True, shuffle=True)\n\n trainer = Trainer(\n max_epochs=self.n_epochs,\n callbacks=variational_callback,\n weights_summary=None,\n progress_bar_refresh_rate=0,\n )\n\n trainer.fit(model, train_dataloader=dataloader)\n\n return model, variational_callback\n\n def test_model(self, model: FeedForwardGaussianNet,\n variational_callback: FactorAnalysisVariationalInferenceCallback,\n X: Tensor, y: Tensor, y_mean: float, y_scale: float) -> (float, float):\n \"\"\"\n Use the given model and variational callback to compute a Bayesian model average for each input and compute\n metrics relative to actual targets.\n\n Note: it is assumed that the model was fit to a standardised target variable (zero mean and unit standard\n deviation). However, metrics for non-standardised predictions will be computed.\n\n Args:\n model: The model to use to make predictions.\n variational_callback: A variational callback which can be used to sample weight vectors for the model.\n X: The features for which to compute the Bayesian model average, of shape (n_rows, n_features).\n y: The standardised training targets, of shape (n_rows,)\n y_mean: The mean of the non-standardised training target.\n y_scale: The standard deviation of the non-standardised training target.\n\n Returns:\n The mean log-likelihood and root mean squared error of the Bayesian model averages relative to the\n non-standardised targets.\n \"\"\"\n y_original = self.de_standardise_target(y, y_mean, y_scale)\n\n mu, var = self.compute_bayesian_model_average(model, variational_callback, X, y_mean, y_scale)\n\n return self.compute_metrics(y_original, mu, var)\n\n @staticmethod\n def compute_metrics(y: Tensor, mu: Tensor, var: Tensor) -> (float, float):\n \"\"\"\n Compute metrics given the true targets and the predicted mean and variance of each target.\n\n Args:\n y: The true targets, of shape (n_rows,).\n mu: The predicted mean of each target, of shape (n_rows,).\n var: The predicted variance of each target, of shape (n_rows,).\n\n Returns:\n The mean log-likelihood and root mean squared error of the predictions relative to the true targets.\n \"\"\"\n nll_fn = torch.nn.GaussianNLLLoss(reduction='mean', full=True)\n ll = -nll_fn(mu, y, var).item()\n\n mse_fn = torch.nn.MSELoss(reduction='mean')\n rmse = mse_fn(mu, y).sqrt().item()\n\n return ll, rmse\n\n def compute_bayesian_model_average(self, model: FeedForwardGaussianNet,\n variational_callback: FactorAnalysisVariationalInferenceCallback,\n X: Tensor, y_mean: float, y_scale: float) -> (Tensor, Tensor):\n \"\"\"\n Use the given model and variational callback to compute a Bayesian model average for each input.\n\n The Bayesian model average is constructed by making self.n_bma_samples predictions for each input - using\n different weight vectors sampled from the variational callback - and then computing the mean and variance of the\n predictions.\n\n Note: it is assumed that the model was fit to a standardised target variable (zero mean and unit standard\n deviation). However, non-standardised predictions will be returned.\n\n Args:\n model: The model to use to make predictions.\n variational_callback: A variational callback which can be used to sample weight vectors for the model.\n X: The features for which to compute the Bayesian model average, of shape (n_rows, n_features).\n y_mean: The mean of the non-standardised training target.\n y_scale: The standard deviation of the non-standardised training target.\n\n Returns:\n The mean and variance of the predictions for each input, both of shape (n_rows,). Note that these are\n non-standardised.\n \"\"\"\n ys = torch.hstack(\n [\n self.predict(model, variational_callback, X, y_mean, y_scale).unsqueeze(dim=1)\n for _ in range(self.n_bma_samples)\n ]\n )\n\n return ys.mean(dim=1), ys.var(dim=1)\n\n def predict(self, model: FeedForwardGaussianNet, variational_callback: FactorAnalysisVariationalInferenceCallback,\n X: Tensor, y_mean: float, y_scale: float) -> Tensor:\n \"\"\"\n Sample a model weight vector from the variational callback and use it to make predictions for the given data.\n\n Note: it is assumed that the model was fit to a standardised target variable (zero mean and unit standard\n deviation). However, non-standardised predictions will be returned.\n\n Args:\n model: The model to use to make predictions.\n variational_callback: A variational callback which can be used to sample weight vectors for the model.\n X: The features to make predictions for, of shape (n_rows, n_features).\n y_mean: The mean of the non-standardised training target.\n y_scale: The standard deviation of the non-standardised training target.\n\n Returns:\n The predictions, of shape (n_rows,). Note that these are non-standardised.\n \"\"\"\n weights = variational_callback.sample_weight_vector()\n set_weights(model, weights)\n y_pred, _ = model(X)\n\n return self.de_standardise_target(y_pred, y_mean, y_scale)\n\n @staticmethod\n def de_standardise_target(y: Tensor, y_mean: float, y_scale: float) -> Tensor:\n \"\"\"\n De-standardise the target variable by multiplying by the scaling factor and adding the mean.\n\n Args:\n y: The standardised target, of shape (n_rows,)\n y_mean: The mean of the non-standardised training target.\n y_scale: The standard deviation of the non-standardised training target.\n\n Returns:\n The non-standardised target, of shape (n_rows,).\n \"\"\"\n return y * y_scale + y_mean\n\n\ndef run_experiment(\n dataset: pd.DataFrame,\n n_train_test_splits: int,\n train_fraction: float,\n n_hyperparameter_trials: int,\n n_cv_folds: int,\n latent_dim: int,\n n_gradients_per_update: int,\n max_grad_norm: float,\n batch_size: int,\n n_epochs: int,\n learning_rate_range: List[float],\n prior_precision_range: List[float],\n noise_precision_range: List[float],\n n_bma_samples: int,\n hidden_dims: Optional[List[int]] = None,\n hidden_activation_fn: Optional[torch.nn.Module] = None,\n data_split_random_seed: Optional[int] = None,\n test: bool = False,\n) -> pd.DataFrame:\n \"\"\"\n Run several trials for different train/test splits of the dataset.\n\n In each trial, using the training data only, run a study to select the best hyperpararameters of an approximate\n posterior distribution of the weights of a neural network trained via the VIFA algorithm. Optionally, use the best\n hyperparameters to fit the posterior to all the training data and compute metrics on the test set.\n\n The hyperparameters which are tuned are the learning rate with which to update the parameters of the posterior, the\n precision of the prior of the posterior and the precision of the additive noise distribution of the targets.\n Hyperparameter values are sampled from log-uniform distributions. The user must define the hyperparameter ranges.\n\n Return the mean and standard error of each metric across all trials.\n\n Args:\n dataset: The features and targets to use to perform training, cross-validation and testing, of shape\n (n_rows, n_features + 1). Target should be in final column.\n n_train_test_splits: The number of random splits of the dataset. For each split, run a hyperparameter study and\n (optionally) test the best set of hyperparameters.\n train_fraction: The fraction of the dataset to include in the training set of each split.\n n_hyperparameter_trials: The number of rounds of hyperparameter optimisation in each study.\n n_cv_folds: The number of cross-validation folds in each hyperparameter trial.\n latent_dim: The latent dimension of the factor analysis model used to approximate the posterior.\n n_gradients_per_update: The number of mini-batch gradients to use to form the expectation of the true gradient\n for each parameter update of the posterior.\n max_grad_norm: Maximum norm for gradients which are used to update the parameters of the posterior.\n batch_size: The batch size to use while training.\n n_epochs: The number of training epochs.\n learning_rate_range: The minimum and maximum values of the hyperparameter range of the learning rate with which\n to update the parameters of the posterior.\n prior_precision_range: The minimum and maximum values of the hyperparameter range of the precision of the prior\n of the posterior.\n noise_precision_range: The minimum and maximum values of the hyperparameter range of the precision of the\n additive noise distribution of the targets.\n n_bma_samples: The number of samples in each Bayesian model averaging when testing.\n hidden_dims: The dimension of each hidden layer in the neural network. hidden_dims[i] is the dimension of the\n i-th hidden layer. If None, the input will be connected directly to the output.\n hidden_activation_fn: The activation function to apply to the output of each hidden layer. If None, will be set\n to the identity activation function.\n data_split_random_seed: The random seed to use to construct the train/test splits.\n test: Whether or not to compute test results.\n\n Returns:\n The mean and standard error of the cross-validated log-likelihood (val_ll) and root mean squared error\n (val_rmse) corresponding to the best hyperparameters. Also, if test=True, the mean and standard error of the\n test log-likelihood (test_ll) and test root mean squared error (test_rmse).\n \"\"\"\n np.random.seed(data_split_random_seed)\n train_test_indices = [train_test_split(dataset, train_fraction) for _ in range(n_train_test_splits)]\n\n results = []\n for i, train_test in enumerate(train_test_indices):\n print(f'Running train/test split {i + 1} of {n_train_test_splits}...')\n train_index, test_index = train_test\n\n trial_results = run_trial(\n dataset=dataset,\n train_index=train_index,\n test_index=test_index,\n n_hyperparameter_trials=n_hyperparameter_trials,\n n_cv_folds=n_cv_folds,\n latent_dim=latent_dim,\n n_gradients_per_update=n_gradients_per_update,\n max_grad_norm=max_grad_norm,\n batch_size=batch_size,\n n_epochs=n_epochs,\n learning_rate_range=learning_rate_range,\n prior_precision_range=prior_precision_range,\n noise_precision_range=noise_precision_range,\n n_bma_samples=n_bma_samples,\n hidden_dims=hidden_dims,\n hidden_activation_fn=hidden_activation_fn,\n model_random_seed=i,\n test=test,\n )\n\n results.append(trial_results)\n\n return aggregate_results(pd.DataFrame(results))\n\n\ndef run_trial(\n dataset: pd.DataFrame,\n train_index: np.ndarray,\n test_index: np.ndarray,\n n_hyperparameter_trials: int,\n n_cv_folds: int,\n latent_dim: int,\n n_gradients_per_update: int,\n max_grad_norm: float,\n batch_size: int,\n n_epochs: int,\n learning_rate_range: List[float],\n prior_precision_range: List[float],\n noise_precision_range: List[float],\n n_bma_samples: int,\n hidden_dims: Optional[List[int]] = None,\n hidden_activation_fn: Optional[torch.nn.Module] = None,\n model_random_seed: Optional[int] = None,\n test: bool = False,\n) -> Dict[str, float]:\n \"\"\"\n Run a hyperparameter study and testing (optional) for the given train/test split of the dataset.\n\n In each trial, using the training data only, run a study to select the best hyperpararameters of an approximate\n posterior distribution of the weights of a neural network trained via the VIFA algorithm. Optionally, use the best\n hyperparameters to fit the posterior to all the training data and compute metrics on the test set.\n\n The hyperparameters which are tuned are the learning rate with which to update the parameters of the posterior, the\n precision of the prior of the posterior and the precision of the additive noise distribution of the targets.\n Hyperparameter values are sampled from log-uniform distributions. The user must define the hyperparameter ranges.\n\n Args:\n dataset: The features and targets to use to perform training, cross-validation and testing, of shape\n (n_rows, n_features + 1). Target should be in final column.\n train_index: Train row indices of the dataset, of shape (n_train,).\n test_index: Test row indices of the dataset, of shape (n_test,).\n n_hyperparameter_trials: The number of rounds of hyperparameter optimisation.\n n_cv_folds: The number of cross-validation folds.\n latent_dim: The latent dimension of the factor analysis model used to approximate the posterior.\n n_gradients_per_update: The number of mini-batch gradients to use to form the expectation of the true gradient\n for each parameter update of the posterior.\n max_grad_norm: Maximum norm for gradients which are used to update the parameters of the posterior.\n batch_size: The batch size to use while training.\n n_epochs: The number of training epochs.\n learning_rate_range: The minimum and maximum values of the hyperparameter range of the learning rate with which\n to update the parameters of the posterior.\n prior_precision_range: The minimum and maximum values of the hyperparameter range of the precision of the prior\n of the posterior.\n noise_precision_range: The minimum and maximum values of the hyperparameter range of the precision of the\n additive noise distribution of the targets.\n n_bma_samples: The number of samples in each Bayesian model averaging when testing.\n hidden_dims: The dimension of each hidden layer in the neural network. hidden_dims[i] is the dimension of the\n i-th hidden layer. If None, the input will be connected directly to the output.\n hidden_activation_fn: The activation function to apply to the output of each hidden layer. If None, will be set\n to the identity activation function.\n model_random_seed: The random seed to use when initialising the parameters of the posterior.\n test: Whether or not to compute test results after running cross-validation.\n\n Returns:\n The average cross-validated log-likelihood (val_ll) and root mean squared error (val_rmse) corresponding to the\n best hyperparameters. Also, if test=True, the test log-likelihood (test_ll) and test root mean squared error\n (test_rmse).\n \"\"\"\n train_dataset = dataset.iloc[train_index, :]\n\n objective = Objective(\n dataset=train_dataset,\n n_cv_folds=n_cv_folds,\n latent_dim=latent_dim,\n n_gradients_per_update=n_gradients_per_update,\n max_grad_norm=max_grad_norm,\n batch_size=batch_size,\n n_epochs=n_epochs,\n learning_rate_range=learning_rate_range,\n prior_precision_range=prior_precision_range,\n noise_precision_range=noise_precision_range,\n n_bma_samples=n_bma_samples,\n hidden_dims=hidden_dims,\n hidden_activation_fn=hidden_activation_fn,\n random_seed=model_random_seed,\n )\n\n sampler = optuna.samplers.RandomSampler(seed=model_random_seed)\n study = optuna.create_study(sampler=sampler, direction='maximize')\n study.optimize(objective, n_trials=n_hyperparameter_trials)\n\n learning_rate = study.best_params['learning_rate']\n prior_precision = study.best_params['prior_precision']\n noise_precision = study.best_params['noise_precision']\n\n val_ll, val_rmse = objective.cross_validate(learning_rate, prior_precision, noise_precision)\n\n results = dict(val_ll=val_ll, val_rmse=val_rmse)\n\n if not test:\n return results\n\n objective.dataset = dataset\n\n test_ll, test_rmse = objective.train_and_test(\n train_index, test_index, learning_rate, prior_precision, noise_precision,\n )\n\n results['test_ll'] = test_ll\n results['test_rmse'] = test_rmse\n\n return results\n\n\ndef train_test_split(dataset: pd.DataFrame, train_fraction: float) -> (np.ndarray, np.ndarray):\n \"\"\"\n Sample train and test indices for the given dataset.\n\n Args:\n dataset: The dataset for which to get train and test indices. Of shape (n_rows, n_columns).\n train_fraction: The fraction of rows to include in the training set. The remaining fraction will go in the test\n set.\n\n Returns:\n Train and test indices corresponding to rows of the dataset.\n \"\"\"\n n_samples = dataset.shape[0]\n permutation = np.random.choice(range(n_samples), n_samples, replace=False)\n end_train = round(n_samples * train_fraction)\n train_index = permutation[:end_train]\n test_index = permutation[end_train:]\n\n return train_index, test_index\n\n\ndef aggregate_results(results: pd.DataFrame) -> pd.DataFrame:\n \"\"\"\n Compute the mean and standard error of each column.\n\n Args:\n results: Un-aggregated results, of shape (n_rows, n_columns).\n\n Returns:\n Aggregated results, of shape (n_columns, 2). First column is the mean and second column is the standard error.\n \"\"\"\n means = results.mean()\n standard_errors = results.sem()\n\n agg_results = pd.concat([means, standard_errors], axis=1)\n agg_results.columns = ['mean', 'se']\n\n return agg_results\n\n\n@click.command()\n@click.option('--dataset-label', type=str, help='Label for the dataset. Used to retrieve parameters')\n@click.option('--dataset-input-path', type=str, help='The parquet file path to load the dataset')\n@click.option('--results-output-dir', type=str, help='The directory path to save the results of the experiment')\ndef main(dataset_label: str, dataset_input_path: str, results_output_dir: str):\n \"\"\"\n Run neural network prediction experiment for the given dataset.\n \"\"\"\n with open(\"params.yaml\", 'r') as fd:\n params = yaml.safe_load(fd)['neural_net_predictions']\n\n dataset_params = params['datasets'][dataset_label]\n\n dataset = pd.read_parquet(dataset_input_path)\n\n print(f'Running experiment for {dataset_label} dataset...')\n results = run_experiment(\n dataset=dataset,\n n_train_test_splits=params['n_train_test_splits'],\n train_fraction=params['train_fraction'],\n n_cv_folds=params['n_cv_folds'],\n n_hyperparameter_trials=params['n_hyperparameter_trials'],\n latent_dim=dataset_params['latent_dim'],\n n_gradients_per_update=dataset_params['n_gradients_per_update'],\n max_grad_norm=dataset_params['max_grad_norm'],\n batch_size=dataset_params['batch_size'],\n n_epochs=dataset_params['n_epochs'],\n learning_rate_range=dataset_params['learning_rate_range'],\n prior_precision_range=dataset_params['prior_precision_range'],\n noise_precision_range=dataset_params['noise_precision_range'],\n n_bma_samples=dataset_params['n_bma_samples'],\n hidden_dims=params['hidden_dims'],\n hidden_activation_fn=ACTIVATION_FACTORY[params['hidden_activation_fn']],\n data_split_random_seed=params['data_split_random_seed'],\n test=params['test'],\n )\n\n Path(results_output_dir).mkdir(parents=True, exist_ok=True)\n results.to_csv(os.path.join(results_output_dir, 'results.csv'))\n\n\nif __name__ == '__main__':\n main()\n",
"id": "10060706",
"language": "Python",
"matching_score": 7.4015960693359375,
"max_stars_count": 0,
"path": "experiments/neural_net_predictions.py"
},
{
"content": "from typing import List, Optional\n\nimport numpy as np\nimport optuna\nimport pandas as pd\nimport pytest\nfrom scipy import stats\nfrom sklearn.preprocessing import StandardScaler\nimport torch\n\nfrom experiments.neural_net_predictions import (\n aggregate_results,\n Objective,\n run_experiment,\n run_trial,\n train_test_split,\n)\nfrom swafa.callbacks import FactorAnalysisVariationalInferenceCallback\nfrom swafa.models import FeedForwardGaussianNet\nfrom swafa.utils import get_weight_dimension\n\n\nclass TestObjective:\n\n def test_unnormalise_target(self):\n objective = _init_objective()\n\n y = torch.Tensor([1, 2, 3])\n y_mean = 2\n y_scale = 5\n\n expected_y = torch.Tensor([7, 12, 17])\n\n actual_y = objective.de_standardise_target(y, y_mean, y_scale)\n\n assert torch.isclose(actual_y, expected_y).all()\n\n @pytest.mark.parametrize(\n \"n_rows, n_columns, y_mean, y_scale\",\n [\n (10, 3, 3, 5),\n (20, 5, -2, 2),\n ]\n )\n def test_predict(self, n_rows, n_columns, y_mean, y_scale):\n objective = _init_objective(n_rows, n_columns)\n model, variational_callback = _init_model_and_callback(n_rows, n_columns)\n X = torch.randn(n_rows, n_columns)\n\n y1 = objective.predict(model, variational_callback, X, y_mean, y_scale)\n y2 = objective.predict(model, variational_callback, X, y_mean, y_scale)\n\n assert y1.shape == (n_rows,)\n assert y2.shape == (n_rows,)\n assert not torch.isclose(y1, y2).all()\n\n @pytest.mark.parametrize(\n \"n_rows, n_columns, y_mean, y_scale\",\n [\n (10, 3, 3, 5),\n (20, 5, -2, 2),\n ]\n )\n def test_compute_bayesian_model_average(self, n_rows, n_columns, y_mean, y_scale):\n objective = _init_objective(n_rows, n_columns)\n model, variational_callback = _init_model_and_callback(n_rows, n_columns)\n X = torch.randn(n_rows, n_columns)\n\n mu, var = objective.compute_bayesian_model_average(model, variational_callback, X, y_mean, y_scale)\n\n assert mu.shape == (n_rows,)\n assert var.shape == (n_rows,)\n assert (var > 0).all()\n\n def test_compute_metrics(self):\n objective = _init_objective(n_rows=3)\n\n y = torch.Tensor([1, 2, 3])\n mu = torch.Tensor([2, 4, 0])\n var = torch.Tensor([1, 2, 1])\n\n expected_ll = np.mean(\n [\n -np.log(np.sqrt(var_i)) - np.log(np.sqrt(2 * np.pi)) - 0.5 * (y_i - mu_i) ** 2 / var_i\n for y_i, mu_i, var_i in zip(y.numpy(), mu.numpy(), var.numpy())\n ]\n )\n expected_rmse = np.sqrt(14 / 3)\n\n actual_ll, actual_rmse = objective.compute_metrics(y, mu, var)\n\n assert np.isclose(actual_ll, expected_ll)\n assert np.isclose(actual_rmse, expected_rmse)\n\n @pytest.mark.parametrize(\n \"n_rows, n_columns, y_mean, y_scale\",\n [\n (10, 3, 3, 5),\n (20, 5, -2, 2),\n ]\n )\n def test_test_model(self, n_rows, n_columns, y_mean, y_scale):\n objective = _init_objective(n_rows, n_columns)\n model, variational_callback = _init_model_and_callback(n_rows, n_columns)\n X = torch.randn(n_rows, n_columns)\n y = torch.randn(n_rows)\n\n ll, rmse = objective.test_model(model, variational_callback, X, y, y_mean, y_scale)\n\n assert ll < np.inf\n assert rmse < np.inf\n\n def test_train_model(self):\n n_rows = 10\n n_columns = 3\n\n objective = _init_objective(n_rows, n_columns)\n X = torch.randn(n_rows, n_columns)\n y = torch.randn(n_rows)\n\n model, variational_callback = objective.train_model(\n X, y, learning_rate=1e-3, prior_precision=1e-1, noise_precision=1e-1,\n )\n\n assert variational_callback.c is not None\n assert variational_callback.F is not None\n assert variational_callback.diag_psi is not None\n assert get_weight_dimension(model) == variational_callback.c.shape[0]\n\n def test_standardise_noise_precision(self):\n objective = _init_objective()\n\n noise_precision = 0.1\n y_scale = 2\n\n assert np.isclose(objective.standardise_noise_precision(noise_precision, y_scale), 0.4)\n\n @pytest.mark.parametrize(\n \"n_rows, n_features\",\n [\n (10, 3),\n (20, 5),\n ]\n )\n def test_transform_features_and_targets(self, n_rows, n_features):\n objective = _init_objective(n_rows, n_features)\n dataset = pd.DataFrame(np.random.randn(n_rows, n_features + 1))\n scaler = StandardScaler()\n scaler.fit(dataset.values)\n\n X, y = objective.transform_features_and_targets(dataset, scaler)\n\n assert torch.isclose(X.mean(dim=0), torch.zeros(n_features), atol=1e-5).all()\n assert torch.isclose(X.std(dim=0), torch.ones(n_features), atol=1e-1).all()\n\n assert torch.isclose(y.mean(), torch.zeros(1), atol=1e-5).all()\n assert torch.isclose(y.std(), torch.ones(1), atol=1e-1).all()\n\n @pytest.mark.parametrize(\n \"n_rows, n_features\",\n [\n (10, 3),\n (20, 5),\n ]\n )\n def test_fit_transform_features_and_targets(self, n_rows, n_features):\n objective = _init_objective(n_rows, n_features)\n dataset = pd.DataFrame(np.random.randn(n_rows, n_features + 1))\n\n X, y, scaler = objective.fit_transform_features_and_targets(dataset)\n\n assert torch.isclose(X.mean(dim=0), torch.zeros(n_features), atol=1e-5).all()\n assert torch.isclose(X.std(dim=0), torch.ones(n_features), atol=1e-1).all()\n\n assert torch.isclose(y.mean(), torch.zeros(1), atol=1e-5).all()\n assert torch.isclose(y.std(), torch.ones(1), atol=1e-1).all()\n\n assert scaler.mean_ is not None\n\n def test_train_and_test(self):\n objective = _init_objective(n_rows=40)\n\n train_index = np.arange(30)\n test_index = np.arange(30, 40)\n\n ll, rmse = objective.train_and_test(\n train_index, test_index, learning_rate=1e-3, prior_precision=1e-1, noise_precision=1e-1,\n )\n\n assert ll < np.inf\n assert rmse < np.inf\n\n @pytest.mark.parametrize(\"n_trials\", [1, 3])\n def test_objective_in_study(self, n_trials):\n objective = _init_objective(n_rows=40)\n study = optuna.create_study(sampler=optuna.samplers.TPESampler(seed=1), direction='maximize')\n\n study.optimize(objective, n_trials=n_trials)\n\n assert len(study.trials) == n_trials\n\n\ndef test_aggregate_results():\n a = np.array([1, 2, 3])\n b = np.array([4, 5, 6])\n\n results = pd.DataFrame({'a': a, 'b': b})\n\n expected_aggregated_results = pd.DataFrame(\n {'mean': [a.mean(), b.mean()], 'se': [stats.sem(a), stats.sem(b)]},\n index=['a', 'b'],\n )\n\n actual_aggregated_results = aggregate_results(results)\n\n assert np.isclose(actual_aggregated_results.values, expected_aggregated_results.values).all()\n assert (actual_aggregated_results.columns == expected_aggregated_results.columns).all()\n assert (actual_aggregated_results.index == expected_aggregated_results.index).all()\n\n\ndef test_train_test_split():\n n_rows = 50\n train_fraction = 0.8\n\n dataset = pd.DataFrame(np.random.randn(n_rows, 3))\n\n train_index, test_index = train_test_split(dataset, train_fraction)\n\n assert len(train_index) == 40\n assert len(test_index) == 10\n assert len(np.intersect1d(train_index, test_index)) == 0\n\n\n@pytest.mark.parametrize(\"test\", [True, False])\ndef test_run_trial(test):\n dataset = pd.DataFrame(np.random.randn(50, 3))\n\n two_trial_results = [\n run_trial(\n dataset=dataset,\n train_index=np.arange(40),\n test_index=np.arange(40, 50),\n n_hyperparameter_trials=2,\n n_cv_folds=2,\n latent_dim=2,\n n_gradients_per_update=2,\n max_grad_norm=10,\n batch_size=5,\n n_epochs=2,\n learning_rate_range=[1e-3, 1e-2],\n prior_precision_range=[1e-3, 1e-2],\n noise_precision_range=[1e-3, 1e-2],\n n_bma_samples=5,\n hidden_dims=[4],\n hidden_activation_fn=torch.nn.ReLU(),\n model_random_seed=1,\n test=test,\n )\n for _ in range(2)\n ]\n\n for results in two_trial_results:\n if test:\n assert set(results.keys()) == {'val_ll', 'val_rmse', 'test_ll', 'test_rmse'}\n else:\n assert set(results.keys()) == {'val_ll', 'val_rmse'}\n\n for key, val in two_trial_results[0].items():\n assert np.isclose(val, two_trial_results[1][key])\n\n\n@pytest.mark.parametrize(\"test\", [True, False])\ndef test_run_experiment(test):\n dataset = pd.DataFrame(np.random.randn(50, 3))\n\n two_experiment_results = [\n run_experiment(\n dataset=dataset,\n n_train_test_splits=2,\n train_fraction=0.8,\n n_hyperparameter_trials=2,\n n_cv_folds=2,\n latent_dim=2,\n n_gradients_per_update=2,\n max_grad_norm=10,\n batch_size=5,\n n_epochs=2,\n learning_rate_range=[1e-3, 1e-2],\n prior_precision_range=[1e-3, 1e-2],\n noise_precision_range=[1e-3, 1e-2],\n n_bma_samples=5,\n hidden_dims=[4],\n hidden_activation_fn=torch.nn.ReLU(),\n data_split_random_seed=1,\n test=test,\n )\n for _ in range(2)\n ]\n\n for results in two_experiment_results:\n assert set(results.columns) == {'mean', 'se'}\n\n if test:\n assert set(results.index) == {'val_ll', 'val_rmse', 'test_ll', 'test_rmse'}\n else:\n assert set(results.index) == {'val_ll', 'val_rmse'}\n\n assert np.isclose(two_experiment_results[0].values, two_experiment_results[1].values).all()\n\n\ndef _init_objective(\n n_rows: int = 10,\n n_columns: int = 3,\n n_cv_folds: int = 2,\n latent_dim: int = 2,\n n_gradients_per_update: int = 2,\n max_grad_norm: float = 10,\n batch_size: int = 5,\n n_epochs: int = 2,\n learning_rate_range: List[float] = None,\n prior_precision_range: List[float] = None,\n noise_precision_range: List[float] = None,\n n_bma_samples: int = 2,\n hidden_dims: Optional[List[int]] = None,\n hidden_activation_fn: Optional[torch.nn.Module] = None,\n random_seed: Optional[int] = 1,\n) -> Objective:\n learning_rate_range = learning_rate_range or [1e-3, 1e-2]\n prior_precision_range = prior_precision_range or [1e-3, 1e-2]\n noise_precision_range = noise_precision_range or [1e-3, 1e-2]\n hidden_dims = hidden_dims or [4]\n hidden_activation_fn = hidden_activation_fn or torch.nn.ReLU()\n\n dataset = pd.DataFrame(np.random.randn(n_rows, n_columns))\n\n return Objective(\n dataset=dataset,\n n_cv_folds=n_cv_folds,\n latent_dim=latent_dim,\n n_gradients_per_update=n_gradients_per_update,\n max_grad_norm=max_grad_norm,\n batch_size=batch_size,\n n_epochs=n_epochs,\n learning_rate_range=learning_rate_range,\n prior_precision_range=prior_precision_range,\n noise_precision_range=noise_precision_range,\n n_bma_samples=n_bma_samples,\n hidden_dims=hidden_dims,\n hidden_activation_fn=hidden_activation_fn,\n random_seed=random_seed,\n )\n\n\ndef _init_model_and_callback(\n n_samples: int = 10,\n n_features: int = 3,\n latent_dim: int = 2,\n prior_precision: float = 0.1,\n noise_precision: float = 0.1,\n n_gradients_per_update: int = 2,\n hidden_dims: Optional[List[int]] = None,\n hidden_activation_fn: Optional[torch.nn.Module] = None,\n random_seed: Optional[int] = 1,\n) -> (FeedForwardGaussianNet, FactorAnalysisVariationalInferenceCallback):\n hidden_dims = hidden_dims or [4]\n hidden_activation_fn = hidden_activation_fn or torch.nn.ReLU()\n\n model = FeedForwardGaussianNet(\n input_dim=n_features,\n hidden_dims=hidden_dims,\n hidden_activation_fn=hidden_activation_fn,\n loss_multiplier=n_samples,\n target_variance=1 / noise_precision,\n random_seed=random_seed,\n )\n\n variational_callback = FactorAnalysisVariationalInferenceCallback(\n latent_dim=latent_dim,\n precision=prior_precision,\n n_gradients_per_update=n_gradients_per_update,\n optimiser_class=torch.optim.SGD,\n random_seed=random_seed,\n )\n\n variational_callback.on_fit_start(trainer=None, pl_module=model)\n\n return model, variational_callback\n",
"id": "3047311",
"language": "Python",
"matching_score": 2.9756715297698975,
"max_stars_count": 0,
"path": "tests/test_experiments/test_neural_net_predictions.py"
},
{
"content": "from typing import Any, Optional, Union\n\nfrom pytorch_lightning import Trainer, LightningModule\nfrom pytorch_lightning.callbacks import Callback\nfrom pytorch_lightning.utilities.types import STEP_OUTPUT\nimport torch\nfrom torch import Tensor\nfrom torch.optim import Optimizer, SGD\nfrom torch.autograd import Variable\n\nfrom swafa.custom_types import POSTERIOR_TYPE\nfrom swafa.utils import (\n get_callback_epoch_range,\n vectorise_weights,\n vectorise_gradients,\n get_weight_dimension,\n set_weights,\n normalise_gradient,\n)\nfrom swafa.fa import OnlineGradientFactorAnalysis\n\n\nclass WeightPosteriorCallback(Callback):\n \"\"\"\n A callback which can be used with a PyTorch Lightning Trainer to update the posterior distribution of a model's\n weights.\n\n The updates are performed using the weight iterates sampled after each mini-batch update. Each iterate can update\n the posterior separately, or alternatively, the update can be made using the average of the iterates within a fixed\n window.\n\n When this callback is used while training a model, the dimension of the posterior distribution must match the\n dimension of the model's weight space.\n\n Args:\n posterior: Posterior distribution over the weights of a PyTorch Lighting model.\n update_epoch_start: The training epoch on which to start updating the posterior. Integer indexing starts from 1.\n Can also specify a float between 0 and 1, which corresponds to the fraction of total epochs which should\n pass before starting to update the posterior.\n iterate_averaging_window_size: The size of the window for averaging weight iterates. An update will be made to\n the posterior using each window average. Setting this to 1 is equivalent to using each iterate to update\n the posterior separately.\n\n Attributes:\n first_update_epoch: The epoch on which the updates to the posterior will start.\n last_update_epoch: The epoch on which the updates to the posterior will end.\n \"\"\"\n\n def __init__(self, posterior: POSTERIOR_TYPE, update_epoch_start: Union[int, float] = 1,\n iterate_averaging_window_size: int = 1):\n error_msg = f\"update_epoch_start should be a positive integer or a float between 0 and 1, \" \\\n f\"not {update_epoch_start}\"\n if isinstance(update_epoch_start, int) and update_epoch_start < 1:\n raise ValueError(error_msg)\n if isinstance(update_epoch_start, float) and not (0 <= update_epoch_start <= 1):\n raise ValueError(error_msg)\n\n self.posterior = posterior\n self._update_epoch_start = update_epoch_start\n\n self.iterate_averaging_window_size = iterate_averaging_window_size\n self._weight_window_average = None\n self._window_index = 0\n\n self.first_update_epoch = None\n self.last_update_epoch = None\n\n def on_fit_start(self, trainer: Trainer, pl_module: LightningModule):\n \"\"\"\n Called when fit begins.\n\n Initialise the range of epochs on which the posterior will be updated and check that the dimension of the\n posterior distribution matches the dimension of the model's weight space.\n\n Also, initialise the average weight vector within the current window.\n\n Args:\n trainer: A PyTorch Lightning Trainer which trains the model.\n pl_module: The model being trained.\n \"\"\"\n self.first_update_epoch, self.last_update_epoch = get_callback_epoch_range(\n trainer, epoch_start=self._update_epoch_start,\n )\n weights = self._check_weight_dimension(pl_module)\n self._reset_weight_window_average(weights)\n\n def _check_weight_dimension(self, pl_module: LightningModule) -> Tensor:\n \"\"\"\n Check that the dimension of the posterior distribution matches the dimension of the model's weight space.\n\n If not, raise a RuntimeError.\n\n Args:\n pl_module: The model being trained.\n\n Returns:\n The vectorised model weights, of shape (n_weights,).\n \"\"\"\n weights = vectorise_weights(pl_module)\n weight_dim = len(weights)\n if weight_dim != self.posterior.observation_dim:\n raise RuntimeError(f\"The dimension of the model and the posterior weight distribution must match, but they \"\n f\"are {weight_dim} and {self.posterior.observation_dim}, respectively\")\n\n return weights\n\n def on_train_batch_end(self, trainer: Trainer, pl_module: LightningModule, outputs: STEP_OUTPUT, batch: Any,\n batch_idx: int, dataloader_idx: int):\n \"\"\"\n Called when the train batch ends.\n\n If within the update epoch range, update the weight iterates window average using the latest setting of the\n model's weights.\n\n If the weight iterate averaging window size has been reached, use the window average to update the posterior\n distribution.\n\n Args:\n trainer: A PyTorch Lightning Trainer which trains the model.\n pl_module: The model being trained.\n outputs: Not used.\n batch: Not used.\n batch_idx: Not used.\n dataloader_idx: Not used.\n \"\"\"\n if self.first_update_epoch <= trainer.current_epoch <= self.last_update_epoch:\n weights = vectorise_weights(pl_module)\n self._update_weight_window_average(weights)\n\n if self._window_index == self.iterate_averaging_window_size:\n self.posterior.update(self._weight_window_average)\n self._reset_weight_window_average(weights)\n\n def _update_weight_window_average(self, weights: Tensor):\n \"\"\"\n Increment window index by 1 and update the running average of the window weight iterates.\n\n Args:\n weights: The vectorised model weights, of shape (n_weights,).\n \"\"\"\n self._window_index += 1\n self._weight_window_average = \\\n self._weight_window_average + (weights - self._weight_window_average) / self._window_index\n\n def _reset_weight_window_average(self, weights: Tensor):\n \"\"\"\n Reset the window average of the weight iterates to a tensor of 0s and reset the window index to 0.\n\n Args:\n weights: The vectorised model weights, of shape (n_weights,).\n \"\"\"\n self._window_index = 0\n self._weight_window_average = torch.zeros_like(weights)\n\n\nclass FactorAnalysisVariationalInferenceCallback(Callback):\n \"\"\"\n A callback which can be used with a PyTorch Lightning Trainer to learn the parameters of a factor analysis\n variational distribution of a model's weights.\n\n The parameters are updated to minimise the Kullback-Leibler divergence between the variational distribution and the\n true posterior of the model's weights. This is done via stochastic gradient descent.\n\n See [1] for full details of the algorithm.\n\n Args:\n latent_dim: The latent dimension of the factor analysis model used as the variational distribution.\n precision: The precision of the prior of the true posterior.\n n_gradients_per_update: The number of mini-batch gradients to use to form the expectation of the true gradient\n for each parameter update.\n optimiser_class: The class of the optimiser to use for gradient updates.\n bias_optimiser_kwargs: Keyword arguments for the optimiser which updates the bias term of the factor analysis\n variational distribution. If not given, will default to dict(lr=1e-3).\n factors_optimiser_kwargs: Keyword arguments for the optimiser which updates the factor loading matrix of the\n factor analysis variational distribution. If not given, will default to dict(lr=1e-3).\n noise_optimiser_kwargs: Keyword arguments for the optimiser which updates the logarithm of the diagonal entries\n of the Gaussian noise covariance matrix of the factor analysis variational distribution. If not given, will\n default to dict(lr=1e-3).\n max_grad_norm: Optional maximum norm for gradients which are used to update the parameters of the variational\n distribution.\n device: The device (CPU or GPU) on which to perform the computation. If None, uses the device for the default\n tensor type.\n random_seed: The random seed for reproducibility.\n\n Attributes:\n weight_dim: An integer specifying the total number of weights in the model. Note that this is computed when the\n model is fit for the first time.\n c: The bias term of the factor analysis variational distribution. A Tensor of shape (weight_dim, 1).\n F: The factor loading matrix of the factor analysis variational distribution. A Tensor of shape\n (weight_dim, latent_dim).\n diag_psi: The diagonal entries of the Gaussian noise covariance matrix of the factor analysis variational\n distribution. A Tensor of shape (weight_dim, 1).\n\n References:\n [1] <NAME>. Extending the Bayesian Deep Learning Method MultiSWAG. MSc Thesis, University of Edinburgh,\n 2021.\n \"\"\"\n\n def __init__(self, latent_dim: int, precision: float, n_gradients_per_update: int = 1,\n optimiser_class: Optimizer = SGD, bias_optimiser_kwargs: Optional[dict] = None,\n factors_optimiser_kwargs: Optional[dict] = None, noise_optimiser_kwargs: Optional[dict] = None,\n max_grad_norm: Optional[float] = None, device: Optional[torch.device] = None,\n random_seed: Optional[int] = None):\n self.latent_dim = latent_dim\n self.precision = precision\n self.n_gradients_per_update = n_gradients_per_update\n self.optimiser_class = optimiser_class\n self.bias_optimiser_kwargs = bias_optimiser_kwargs or dict(lr=1e-3)\n self.factors_optimiser_kwargs = factors_optimiser_kwargs or dict(lr=1e-3)\n self.noise_optimiser_kwargs = noise_optimiser_kwargs or dict(lr=1e-3)\n self.max_grad_norm = max_grad_norm\n self.device = device\n self.random_seed = random_seed\n\n self.weight_dim = None\n self.c = None\n self.F = None\n self.diag_psi = None\n\n self._I = torch.eye(latent_dim, device=device)\n self._log_diag_psi = None\n self._h = None\n self._z = None\n self._sqrt_diag_psi_dot_z = None\n self._A = None\n self._B = None\n self._C = None\n self._var_grad_wrt_F = None\n self._var_grad_wrt_log_diag_psi = None\n self._prior_grad_wrt_c = None\n self._prior_grad_wrt_F = None\n self._prior_grad_wrt_log_diag_psi = None\n\n self._optimiser = None\n self._batch_counter = 0\n\n def on_fit_start(self, trainer: Trainer, pl_module: LightningModule):\n \"\"\"\n Called when fit begins.\n\n If parameters of variational distribution have not already been initialised, initialise them and the optimiser\n which will update them.\n\n Args:\n trainer: A PyTorch Lightning Trainer which trains the model.\n pl_module: The model being trained.\n \"\"\"\n if self.weight_dim is None:\n self.weight_dim = get_weight_dimension(pl_module)\n self._init_variational_params()\n self._update_expected_gradients()\n self._init_optimiser()\n\n def on_batch_start(self, trainer: Trainer, pl_module: LightningModule):\n \"\"\"\n Called when the training batch begins.\n\n Sample weight vector from the variational distribution and use it to set the weights of the neural network.\n\n Args:\n trainer: A PyTorch Lightning Trainer which trains the model.\n pl_module: The model being trained.\n \"\"\"\n weights = self.sample_weight_vector()\n set_weights(pl_module, weights)\n\n def on_after_backward(self, trainer: Trainer, pl_module: LightningModule):\n \"\"\"\n Called after loss.backward() and before optimisers are stepped.\n\n Use the back propagated gradient of the network's loss wrt the network's weights to compute the gradient wrt\n the parameters of the variational distribution. Accumulate these gradients.\n\n Periodically, use the accumulated gradients to approximate the expected gradients and update the parameters of\n the variational distribution.\n\n Args:\n trainer: A PyTorch Lightning Trainer which trains the model.\n pl_module: The model being trained.\n \"\"\"\n grad_weights = vectorise_gradients(pl_module)[:, None]\n self._accumulate_gradients(grad_weights)\n\n self._batch_counter += 1\n if self._batch_counter % self.n_gradients_per_update == 0:\n self._update_variational_params()\n self._update_expected_gradients()\n\n def _init_variational_params(self):\n \"\"\"\n Initialise the parameters of the factor analysis variational distribution.\n \"\"\"\n fa = OnlineGradientFactorAnalysis(\n observation_dim=self.weight_dim,\n latent_dim=self.latent_dim,\n device=self.device,\n random_seed=self.random_seed,\n )\n\n self.c = Variable(fa.c.data, requires_grad=False) # we will compute our own gradients\n self.F = Variable(fa.F.data, requires_grad=False)\n self.diag_psi = fa.diag_psi\n self._log_diag_psi = Variable(torch.log(self.diag_psi), requires_grad=False)\n\n self.c.grad = torch.zeros_like(self.c.data)\n self.F.grad = torch.zeros_like(self.F.data)\n self._log_diag_psi.grad = torch.zeros_like(self._log_diag_psi.data)\n\n def _init_optimiser(self):\n \"\"\"\n Initialise the optimiser which will be used to update the parameters of the variational distribution.\n \"\"\"\n self._optimiser = self.optimiser_class(\n [\n {'params': [self.c], **self.bias_optimiser_kwargs},\n {'params': [self.F], **self.factors_optimiser_kwargs},\n {'params': [self._log_diag_psi], **self.noise_optimiser_kwargs},\n ],\n )\n\n def sample_weight_vector(self) -> Tensor:\n \"\"\"\n Generate a single sample of the neural network's weight vector from the variational distribution.\n\n Returns:\n Sample of shape (self.weight_dim,).\n \"\"\"\n self._h = torch.normal(torch.zeros(self.latent_dim), torch.ones(self.latent_dim))[:, None]\n self._z = torch.normal(torch.zeros(self.weight_dim), torch.ones(self.weight_dim))[:, None]\n self._sqrt_diag_psi_dot_z = torch.sqrt(self.diag_psi) * self._z\n return (self.F.mm(self._h) + self.c + self._sqrt_diag_psi_dot_z).squeeze(dim=1)\n\n def _accumulate_gradients(self, grad_weights: Tensor):\n \"\"\"\n Accumulate gradients wrt the parameters of the variational distribution.\n\n Args:\n grad_weights: The back propagated gradient of the network's loss wrt the network's weights. Of shape\n (self.weight_dim, 1).\n \"\"\"\n self.c.grad += self._compute_gradient_wrt_c(grad_weights)\n self.F.grad += self._compute_gradient_wrt_F(grad_weights)\n self._log_diag_psi.grad += self._compute_gradient_wrt_log_diag_psi(grad_weights)\n\n def _compute_gradient_wrt_c(self, grad_weights: Tensor) -> Tensor:\n \"\"\"\n Compute the gradient of the variational objective wrt the bias term of the factor analysis variational\n distribution.\n\n Args:\n grad_weights: The back propagated gradient of the network's loss wrt the network's weights. Of shape\n (self.weight_dim, 1).\n\n Returns:\n The gradient of the variational objective wrt the bias term of the factor analysis variational\n distribution. Of shape (self.weight_dim, 1).\n \"\"\"\n return -self._prior_grad_wrt_c + grad_weights\n\n def _compute_gradient_wrt_F(self, grad_weights: Tensor) -> Tensor:\n \"\"\"\n Compute the gradient of the variational objective wrt the factors matrix of the factor analysis variational\n distribution.\n\n Args:\n grad_weights: The back propagated gradient of the network's loss wrt the network's weights. Of shape\n (self.weight_dim, 1).\n\n Returns:\n The gradient of the variational objective wrt the factors matrix of the factor analysis variational\n distribution. Of shape (self.weight_dim, self.latent_dim).\n \"\"\"\n loss_grad = self._compute_loss_gradient_wrt_F(grad_weights)\n\n return self._var_grad_wrt_F - self._prior_grad_wrt_F + loss_grad\n\n def _compute_loss_gradient_wrt_F(self, grad_weights: Tensor) -> Tensor:\n \"\"\"\n Compute the gradient of the network's loss wrt the factors matrix.\n\n Args:\n grad_weights: The back propagated gradient of the network's loss wrt the network's weights. Of shape\n (self.weight_dim, 1).\n\n Returns:\n The gradient of the network's loss wrt the factors matrix. Of shape (self.weight_dim, self.latent_dim).\n \"\"\"\n return grad_weights.mm(self._h.t())\n\n def _compute_gradient_wrt_log_diag_psi(self, grad_weights: Tensor) -> Tensor:\n \"\"\"\n Compute the gradient of the variational objective wrt the logarithm of the diagonal of the noise covariance\n matrix of the factor analysis variational distribution.\n\n Args:\n grad_weights: The back propagated gradient of the network's loss wrt the network's weights. Of shape\n (self.weight_dim, 1).\n\n Returns:\n The gradient of the variational objective wrt the logarithm of the diagonal of the noise covariance\n matrix of the factor analysis variational distribution. Of shape (self.weight_dim, 1).\n \"\"\"\n loss_grad = self._compute_loss_gradient_wrt_log_diag_psi(grad_weights)\n\n return self._var_grad_wrt_log_diag_psi - self._prior_grad_wrt_log_diag_psi + loss_grad\n\n def _compute_loss_gradient_wrt_log_diag_psi(self, grad_weights: Tensor) -> Tensor:\n \"\"\"\n Compute the gradient of the network's loss wrt the logarithm of the diagonal of the noise covariance matrix.\n\n Args:\n grad_weights: The back propagated gradient of the network's loss wrt the network's weights. Of shape\n (self.weight_dim, 1).\n\n Returns:\n The gradient of the network's loss wrt the logarithm of the diagonal of the noise covariance matrix. Of\n shape (self.weight_dim, 1).\n \"\"\"\n return 0.5 * grad_weights * self._sqrt_diag_psi_dot_z\n\n def _update_variational_params(self):\n \"\"\"\n Update the parameters of the factor analysis variational distribution.\n\n This is done by using the accumulated gradients to approximate the expected gradients and then performing a\n gradient step.\n\n After performing the updates, the gradients are reset to zero.\n \"\"\"\n self._average_and_normalise_gradient(self.c)\n self._average_and_normalise_gradient(self.F)\n self._average_and_normalise_gradient(self._log_diag_psi)\n\n self._optimiser.step()\n self._optimiser.zero_grad()\n\n self.diag_psi = torch.exp(self._log_diag_psi)\n\n def _average_and_normalise_gradient(self, var: Variable):\n \"\"\"\n Average the gradients accumulated in the variable by dividing by self.n_gradients_per_update and normalise if\n required.\n\n Args:\n var: The variable whose gradient to average and normalise.\n \"\"\"\n var.grad /= self.n_gradients_per_update\n\n if self.max_grad_norm is not None:\n var.grad = normalise_gradient(var.grad, self.max_grad_norm)\n\n def _update_expected_gradients(self):\n \"\"\"\n Update the expected gradients used in the algorithm which do not depend on the sampled network weights.\n \"\"\"\n self._update_A()\n self._update_B()\n self._update_C()\n self._update_variational_gradient_wrt_F()\n self._update_variational_gradient_wrt_log_diag_psi()\n self._update_prior_gradient_wrt_c()\n self._update_prior_gradient_wrt_F()\n self._update_prior_gradient_wrt_log_diag_psi()\n\n def _update_A(self):\n \"\"\"\n Update A = psi^(-1) * F.\n \"\"\"\n diag_inv_psi = 1 / self.diag_psi\n self._A = diag_inv_psi * self.F\n\n def _update_B(self):\n \"\"\"\n Update B = Ft * A.\n \"\"\"\n self._B = self.F.t().mm(self._A)\n\n def _update_C(self):\n \"\"\"\n Update C = A * (I + B)^(-1).\n \"\"\"\n inv_term = torch.linalg.inv(self._I + self._B)\n self._C = self._A.mm(inv_term)\n\n def _update_variational_gradient_wrt_F(self):\n \"\"\"\n Update d(variational distribution) / d(F) = C * Bt - A\n \"\"\"\n self._var_grad_wrt_F = self._C.mm(self._B.t()) - self._A\n\n def _update_variational_gradient_wrt_log_diag_psi(self):\n \"\"\"\n Update d(variational distribution) / d(log diag psi) = 0.5 * sum(C dot A, dim=1) dot diag_psi - 0.5\n \"\"\"\n sum_term = (self._C * self._A).sum(dim=1, keepdims=True)\n self._var_grad_wrt_log_diag_psi = 0.5 * sum_term * self.diag_psi - 0.5\n\n def _update_prior_gradient_wrt_c(self):\n \"\"\"\n Update d(prior distribution) / d(c) = -precision * c\n \"\"\"\n self._prior_grad_wrt_c = -self.precision * self.c\n\n def _update_prior_gradient_wrt_F(self):\n \"\"\"\n Update d(prior distribution) / d(F) = -precision * F\n \"\"\"\n self._prior_grad_wrt_F = -self.precision * self.F\n\n def _update_prior_gradient_wrt_log_diag_psi(self):\n \"\"\"\n Update d(prior distribution) / d(log diag psi) = -0.5 * precision * diag_psi\n \"\"\"\n self._prior_grad_wrt_log_diag_psi = -0.5 * self.precision * self.diag_psi\n\n def get_variational_mean(self) -> Tensor:\n \"\"\"\n Get the mean of the factor analysis variational distribution.\n\n Returns:\n The mean vector. Of shape (self.weight_dim,).\n \"\"\"\n return self.c.squeeze()\n\n def get_variational_covariance(self) -> Tensor:\n \"\"\"\n Get the full covariance matrix of the factor analysis variational distribution.\n\n Note: if the network dimension is large, this may result in a memory error.\n\n Returns:\n The covariance matrix. Of shape (self.weight_dim, self.weight_dim).\n \"\"\"\n psi = torch.diag(self.diag_psi.squeeze())\n return self.F.mm(self.F.t()) + psi\n",
"id": "4535976",
"language": "Python",
"matching_score": 6.207792282104492,
"max_stars_count": 0,
"path": "swafa/callbacks.py"
},
{
"content": "import numpy as np\nimport pytest\nimport torch\nfrom torch.utils.data import TensorDataset, DataLoader\nfrom torch.optim import SGD\nfrom pytorch_lightning import Trainer\n\nfrom swafa.models import FeedForwardNet\nfrom swafa.callbacks import WeightPosteriorCallback, FactorAnalysisVariationalInferenceCallback\nfrom swafa.fa import OnlineGradientFactorAnalysis\nfrom swafa.posterior import ModelPosterior\nfrom swafa.utils import get_weight_dimension\n\n\nclass TestWeightPosteriorUpdate:\n\n @pytest.mark.parametrize(\n \"n_samples, batch_size, n_epochs, update_epoch_start, iterate_averaging_window_size, expected_n_updates\",\n [\n (32, 4, 5, 1, 1, int(32 / 4) * 5),\n (32, 4, 5, 3, 1, int(32 / 4) * (5 - 2)),\n (32, 4, 8, 0.5, 1, int(32 / 4) * (8 - 3)),\n (32, 4, 9, 0.5, 1, int(32 / 4) * (9 - 3)),\n (32, 4, 5, 1, 2, (int(32 / 4) * 5 / 2)),\n (32, 4, 5, 3, 2, (int(32 / 4) * (5 - 2) / 2)),\n (32, 4, 8, 0.5, 2, (int(32 / 4) * (8 - 3) / 2)),\n (32, 4, 9, 0.5, 2, (int(32 / 4) * (9 - 3) / 2)),\n ]\n )\n def test_posterior_updates(self, n_samples, batch_size, n_epochs, update_epoch_start, iterate_averaging_window_size,\n expected_n_updates):\n input_dim = 4\n hidden_dims = [8, 8]\n net = FeedForwardNet(input_dim, hidden_dims)\n\n model_posterior = ModelPosterior(\n model=net,\n weight_posterior_class=OnlineGradientFactorAnalysis,\n weight_posterior_kwargs=dict(latent_dim=3),\n )\n\n callback = WeightPosteriorCallback(\n posterior=model_posterior.weight_posterior,\n update_epoch_start=update_epoch_start,\n iterate_averaging_window_size=iterate_averaging_window_size,\n )\n\n trainer = Trainer(max_epochs=n_epochs, callbacks=[callback])\n\n dataset = TensorDataset(torch.randn(n_samples, input_dim), torch.empty(n_samples).random_(2))\n dataloader = DataLoader(dataset, batch_size=4, drop_last=True)\n\n trainer.fit(model_posterior.model, train_dataloader=dataloader)\n\n assert model_posterior.weight_posterior.t == expected_n_updates\n\n @pytest.mark.parametrize(\"bad_update_epoch_start\", [0, -0.1, 1.1])\n def test_init_raises_error_for_bad_update_epoch_start(self, bad_update_epoch_start):\n net = FeedForwardNet(input_dim=10, hidden_dims=[10])\n\n model_posterior = ModelPosterior(\n model=net,\n weight_posterior_class=OnlineGradientFactorAnalysis,\n weight_posterior_kwargs=dict(latent_dim=3),\n )\n\n with pytest.raises(ValueError):\n WeightPosteriorCallback(\n posterior=model_posterior.weight_posterior,\n update_epoch_start=bad_update_epoch_start,\n )\n\n def test_raises_error_if_model_and_posterior_do_not_match(self):\n n_samples = 32\n input_dim = 10\n net = FeedForwardNet(input_dim=input_dim, hidden_dims=[10])\n\n model_posterior = ModelPosterior(\n model=FeedForwardNet(input_dim=input_dim, hidden_dims=[5]),\n weight_posterior_class=OnlineGradientFactorAnalysis,\n weight_posterior_kwargs=dict(latent_dim=3),\n )\n\n callback = WeightPosteriorCallback(\n posterior=model_posterior.weight_posterior,\n update_epoch_start=1,\n )\n\n trainer = Trainer(max_epochs=10, callbacks=[callback])\n\n dataset = TensorDataset(torch.randn(n_samples, input_dim), torch.empty(n_samples).random_(2))\n dataloader = DataLoader(dataset, batch_size=4, drop_last=True)\n\n with pytest.raises(RuntimeError):\n trainer.fit(net, train_dataloader=dataloader)\n\n def test_update_weight_window_average(self):\n input_dim = 10\n net = FeedForwardNet(input_dim=input_dim, bias=False)\n\n model_posterior = ModelPosterior(\n model=net,\n weight_posterior_class=OnlineGradientFactorAnalysis,\n weight_posterior_kwargs=dict(latent_dim=3),\n )\n\n callback = WeightPosteriorCallback(\n posterior=model_posterior.weight_posterior,\n update_epoch_start=1,\n )\n\n weights1 = torch.randn(input_dim)\n callback._weight_window_average = torch.clone(weights1)\n callback._window_index = 1\n weights2 = torch.randn(input_dim)\n callback._update_weight_window_average(weights2)\n\n assert torch.isclose(callback._weight_window_average, (weights1 + weights2) / 2).all()\n assert callback._window_index == 2\n\n\nclass TestFactorAnalysisVariationalInferenceCallback:\n\n @pytest.mark.parametrize(\n \"input_dim, hidden_dims, latent_dim\",\n [\n (15, None, 1),\n (24, [8], 2),\n (32, [16, 8], 4),\n ]\n )\n def test_variational_distribution_params_shape(self, input_dim, hidden_dims, latent_dim):\n net = FeedForwardNet(input_dim, hidden_dims)\n weight_dim = get_weight_dimension(net)\n\n callback = FactorAnalysisVariationalInferenceCallback(latent_dim, precision=0.1)\n trainer = Trainer(max_epochs=1, callbacks=[callback])\n\n callback.on_fit_start(trainer, net)\n\n assert callback.c.shape == (weight_dim, 1)\n assert callback.F.shape == (weight_dim, latent_dim)\n assert callback.diag_psi.shape == (weight_dim, 1)\n\n @pytest.mark.parametrize(\n \"input_dim, hidden_dims, latent_dim\",\n [\n (15, None, 1),\n (24, [8], 2),\n (32, [16, 8], 4),\n ]\n )\n def test_sample_weight_vector(self, input_dim, hidden_dims, latent_dim):\n net = FeedForwardNet(input_dim, hidden_dims)\n\n callback = FactorAnalysisVariationalInferenceCallback(\n latent_dim, precision=0.1, random_seed=1,\n )\n trainer = Trainer(max_epochs=1, callbacks=[callback])\n callback.on_fit_start(trainer, net)\n\n samples = torch.hstack([callback.sample_weight_vector()[:, None] for _ in range(10000)]).numpy()\n actual_mean = samples.mean(axis=1, keepdims=True)\n actual_cov = np.cov(samples)\n\n expected_mean = callback.c.numpy()\n expected_cov = callback.F.mm(callback.F.t()) + torch.diag(callback.diag_psi.squeeze()).numpy()\n\n assert np.isclose(actual_mean, expected_mean, atol=0.1).all()\n assert np.isclose(actual_cov, expected_cov, atol=0.1).all()\n\n @pytest.mark.parametrize(\n \"weight_dim, latent_dim, precision\",\n [\n (10, 1, 1.0),\n (10, 2, 0.1),\n (20, 5, 0.01),\n ]\n )\n def test_compute_gradient_wrt_c(self, weight_dim, latent_dim, precision):\n callback = FactorAnalysisVariationalInferenceCallback(latent_dim, precision)\n\n callback.weight_dim = weight_dim\n callback._init_variational_params()\n callback._update_expected_gradients()\n grad_weights = torch.randn(weight_dim, 1)\n\n actual_grad = callback._compute_gradient_wrt_c(grad_weights)\n\n expected_var_grad = 0\n expected_prior_grad = -precision * callback.c\n expected_loss_grad = grad_weights\n\n expected_grad = expected_var_grad - expected_prior_grad + expected_loss_grad\n\n assert torch.isclose(actual_grad, expected_grad).all()\n\n @pytest.mark.parametrize(\n \"weight_dim, latent_dim, precision\",\n [\n (10, 1, 1.0),\n (10, 2, 0.1),\n (20, 5, 0.01),\n ]\n )\n def test_compute_gradient_wrt_F(self, weight_dim, latent_dim, precision):\n callback = FactorAnalysisVariationalInferenceCallback(latent_dim, precision)\n\n callback.weight_dim = weight_dim\n callback._init_variational_params()\n callback.diag_psi = torch.rand(weight_dim, 1)\n callback._update_expected_gradients()\n callback.sample_weight_vector()\n grad_weights = torch.randn(weight_dim, 1)\n\n actual_grad = callback._compute_gradient_wrt_F(grad_weights)\n\n F = callback.F\n h = callback._h\n S = callback.get_variational_covariance()\n inv_S = torch.linalg.inv(S)\n\n expected_var_grad = -inv_S.mm(F)\n expected_prior_grad = -precision * F\n expected_loss_grad = grad_weights.mm(h.t())\n\n expected_grad = expected_var_grad - expected_prior_grad + expected_loss_grad\n\n assert torch.isclose(actual_grad, expected_grad, atol=1e-4).all()\n\n @pytest.mark.parametrize(\n \"weight_dim, latent_dim, precision\",\n [\n (10, 1, 1.0),\n (10, 2, 0.1),\n (20, 5, 0.01),\n ]\n )\n def test_compute_gradient_wrt_log_diag_psi(self, weight_dim, latent_dim, precision):\n callback = FactorAnalysisVariationalInferenceCallback(latent_dim, precision)\n\n callback.weight_dim = weight_dim\n callback._init_variational_params()\n callback.diag_psi = torch.rand(weight_dim, 1)\n callback._update_expected_gradients()\n callback.sample_weight_vector()\n grad_weights = torch.randn(weight_dim, 1)\n\n actual_grad = callback._compute_gradient_wrt_log_diag_psi(grad_weights)\n\n diag_psi = callback.diag_psi\n psi = torch.diag(callback.diag_psi.squeeze())\n z = callback._z\n S = callback.get_variational_covariance()\n inv_S = torch.linalg.inv(S)\n\n expected_var_grad = -(1 / 2) * torch.diag(inv_S)[:, None] * diag_psi\n expected_prior_grad = (-precision / 2) * diag_psi\n expected_loss_grad = (1 / 2) * torch.diag(grad_weights.mm(z.t()).mm(torch.sqrt(psi)))[:, None]\n\n expected_grad = expected_var_grad - expected_prior_grad + expected_loss_grad\n\n assert torch.isclose(actual_grad, expected_grad, atol=1e-4).all()\n\n @pytest.mark.parametrize(\n \"weight_dim, latent_dim, precision, n_gradients_per_update\",\n [\n (10, 1, 1.0, 1),\n (10, 2, 0.1, 2),\n (20, 5, 0.01, 3),\n ]\n )\n def test_accumulate_gradients(self, weight_dim, latent_dim, precision, n_gradients_per_update):\n callback = FactorAnalysisVariationalInferenceCallback(\n latent_dim, precision, n_gradients_per_update=n_gradients_per_update,\n )\n\n callback.weight_dim = weight_dim\n callback._init_variational_params()\n callback._update_expected_gradients()\n callback.sample_weight_vector()\n grad_weights = torch.randn(weight_dim, 1)\n\n grad_wrt_c = callback._compute_gradient_wrt_c(grad_weights)\n grad_wrt_F = callback._compute_gradient_wrt_F(grad_weights)\n grad_wrt_log_diag_psi = callback._compute_gradient_wrt_log_diag_psi(grad_weights)\n\n for _ in range(n_gradients_per_update):\n callback._accumulate_gradients(grad_weights)\n\n assert torch.isclose(callback.c.grad, grad_wrt_c * n_gradients_per_update).all()\n assert torch.isclose(callback.F.grad, grad_wrt_F * n_gradients_per_update).all()\n assert torch.isclose(callback._log_diag_psi.grad, grad_wrt_log_diag_psi * n_gradients_per_update).all()\n\n @pytest.mark.parametrize(\n \"weight_dim, latent_dim, precision, n_gradients_per_update\",\n [\n (10, 1, 1.0, 1),\n (10, 2, 0.1, 2),\n (20, 5, 0.01, 3),\n ]\n )\n def test_average_and_normalise_gradient(self, weight_dim, latent_dim, precision, n_gradients_per_update):\n callback = FactorAnalysisVariationalInferenceCallback(\n latent_dim, precision, n_gradients_per_update=n_gradients_per_update,\n )\n\n callback.weight_dim = weight_dim\n callback._init_variational_params()\n callback._update_expected_gradients()\n callback.sample_weight_vector()\n grad_weights = torch.randn(weight_dim, 1)\n\n grad_wrt_c = callback._compute_gradient_wrt_c(grad_weights)\n\n for _ in range(n_gradients_per_update):\n callback._accumulate_gradients(grad_weights)\n\n callback._average_and_normalise_gradient(callback.c)\n\n assert torch.isclose(callback.c.grad, grad_wrt_c).all()\n\n @pytest.mark.parametrize(\n \"weight_dim, latent_dim, precision, n_gradients_per_update, learning_rate\",\n [\n (10, 1, 1.0, 1, 0.1),\n (10, 2, 0.1, 2, 0.01),\n (20, 5, 0.01, 3, 0.001),\n ]\n )\n def test_update_variational_params(self, weight_dim, latent_dim, precision, n_gradients_per_update, learning_rate):\n callback = FactorAnalysisVariationalInferenceCallback(\n latent_dim, precision, optimiser_class=SGD,\n bias_optimiser_kwargs=dict(lr=learning_rate),\n factors_optimiser_kwargs=dict(lr=learning_rate),\n noise_optimiser_kwargs=dict(lr=learning_rate),\n n_gradients_per_update=n_gradients_per_update,\n )\n\n callback.weight_dim = weight_dim\n callback._init_variational_params()\n callback._update_expected_gradients()\n callback._init_optimiser()\n callback.sample_weight_vector()\n grad_weights = torch.randn(weight_dim, 1)\n\n c_before = torch.clone(callback.c)\n F_before = torch.clone(callback.F)\n log_diag_psi_before = torch.clone(callback._log_diag_psi)\n\n grad_wrt_c = callback._compute_gradient_wrt_c(grad_weights)\n grad_wrt_F = callback._compute_gradient_wrt_F(grad_weights)\n grad_wrt_log_diag_psi = callback._compute_gradient_wrt_log_diag_psi(grad_weights)\n\n for _ in range(n_gradients_per_update):\n callback._accumulate_gradients(grad_weights)\n\n callback._update_variational_params()\n\n assert torch.isclose(callback.c, c_before - learning_rate * grad_wrt_c).all()\n assert torch.isclose(callback.F, F_before - learning_rate * grad_wrt_F).all()\n assert torch.isclose(callback._log_diag_psi, log_diag_psi_before - learning_rate * grad_wrt_log_diag_psi).all()\n\n assert torch.isclose(callback.diag_psi, torch.exp(callback._log_diag_psi)).all()\n\n assert (callback.c.grad == 0).all()\n assert (callback.F.grad == 0).all()\n assert (callback._log_diag_psi.grad == 0).all()\n\n @pytest.mark.parametrize(\n \"input_dim, hidden_dims, latent_dim, precision, learning_rate\",\n [\n (15, None, 1, 0.1, 0.1),\n (24, [8], 2, 0.1, 0.01),\n (32, [16, 8], 4, 1, 0.001),\n ]\n )\n def test_variational_distribution_params_change(self, input_dim, hidden_dims, latent_dim, precision, learning_rate):\n net = FeedForwardNet(input_dim, hidden_dims)\n callback = FactorAnalysisVariationalInferenceCallback(\n latent_dim, precision,\n bias_optimiser_kwargs=dict(lr=learning_rate),\n factors_optimiser_kwargs=dict(lr=learning_rate),\n noise_optimiser_kwargs=dict(lr=learning_rate),\n )\n\n n_samples = 8\n dataset = TensorDataset(torch.randn(n_samples, input_dim), torch.randn(n_samples))\n dataloader = DataLoader(dataset, batch_size=4)\n\n trainer = Trainer(max_epochs=1, callbacks=[callback])\n trainer.fit(net, train_dataloader=dataloader)\n\n c_before = torch.clone(callback.c)\n F_before = torch.clone(callback.F)\n diag_psi_before = torch.clone(callback.diag_psi)\n\n trainer = Trainer(max_epochs=1, callbacks=[callback])\n trainer.fit(net, train_dataloader=dataloader)\n\n c_after = torch.clone(callback.c)\n F_after = torch.clone(callback.F)\n diag_psi_after = torch.clone(callback.diag_psi)\n\n assert torch.isclose(callback.diag_psi, torch.exp(callback._log_diag_psi)).all()\n assert not torch.isclose(c_before, c_after).all()\n assert not torch.isclose(F_before, F_after).all()\n assert not torch.isclose(diag_psi_before, diag_psi_after).all()\n",
"id": "9850133",
"language": "Python",
"matching_score": 3.6772477626800537,
"max_stars_count": 0,
"path": "tests/test_callbacks.py"
},
{
"content": "from typing import Dict, List, Optional\n\nfrom torch import Tensor\nfrom torch.utils.data import DataLoader\nfrom pytorch_lightning import Trainer, LightningModule\n\nfrom swafa.custom_types import POSTERIOR_TYPE\nfrom swafa.utils import get_weight_dimension\n\n\nclass ModelPosterior:\n \"\"\"\n This class represents a model together with a posterior distribution over the model's weights.\n\n Args:\n model: A neural network implemented as a PyTorch Lightning model.\n weight_posterior_class: The uninitialised class which will be used to construct the posterior distribution over\n the model's weights.\n weight_posterior_kwargs: Keyword arguments which will be used when initialising the posterior class. This is\n optional, but should contain any positional arguments of the class, except the dimension of the\n distribution, which is inferred from the number of weights in the model.\n\n Attributes:\n weight_posterior: The initialised posterior distribution of the model's weights. Of type weight_posterior_class.\n \"\"\"\n\n def __init__(self, model: LightningModule, weight_posterior_class: POSTERIOR_TYPE,\n weight_posterior_kwargs: Optional[dict] = None):\n self.model = model\n self.weight_posterior = self._init_weight_posterior(weight_posterior_class, weight_posterior_kwargs)\n\n def _init_weight_posterior(self, posterior_class: POSTERIOR_TYPE, posterior_kwargs: Optional[dict] = None):\n \"\"\"\n Initialise the posterior distribution over the parameters of the model.\n\n Args:\n posterior_class: The uninitialised class which will be used to construct the posterior distribution.\n posterior_kwargs: Keyword arguments which will be used when initialising the posterior class. This is\n optional, but should contain any positional arguments of the class, except the dimension of the\n distribution, which is inferred from the number of weights in the model.\n\n Returns:\n The initialised posterior distribution.\n \"\"\"\n posterior_kwargs = posterior_kwargs or dict()\n return posterior_class(get_weight_dimension(self.model), **posterior_kwargs)\n\n def _get_weight_dimension(self) -> int:\n \"\"\"\n Get the total combined dimension of all the weights in the model.\n\n Returns:\n The total dimension of the model's weights.\n \"\"\"\n return sum([w.numel() for w in self.model.parameters()])\n\n def bayesian_model_average(self, dataloader: DataLoader, trainer: Trainer, n_samples: int) -> Tensor:\n raise NotImplementedError\n\n def _sample_model(self) -> LightningModule:\n raise NotImplementedError\n\n def test(self, dataloader: DataLoader, trainer: Trainer) -> List[Dict[str, float]]:\n raise NotImplementedError\n",
"id": "9952634",
"language": "Python",
"matching_score": 1.7823585271835327,
"max_stars_count": 0,
"path": "swafa/posterior.py"
},
{
"content": "import pytest\n\nfrom swafa.models import FeedForwardNet\nfrom swafa.fa import OnlineGradientFactorAnalysis\nfrom swafa.posterior import ModelPosterior\n\n\nclass TestModelPosterior:\n\n @pytest.mark.parametrize(\n \"input_dim, hidden_dims, posterior_latent_dim, expected_posterior_dim\",\n [\n (5, None, 3, 5 + 1),\n (6, [4], 2, (6 + 1) * 4 + (4 + 1)),\n (7, [6, 9], 5, (7 + 1) * 6 + (6 + 1) * 9 + (9 + 1)),\n ]\n )\n def test_posterior_dimension(self, input_dim, hidden_dims, posterior_latent_dim, expected_posterior_dim):\n net = FeedForwardNet(input_dim, hidden_dims)\n\n model_posterior = ModelPosterior(\n model=net,\n weight_posterior_class=OnlineGradientFactorAnalysis,\n weight_posterior_kwargs=dict(latent_dim=posterior_latent_dim),\n )\n\n assert model_posterior.weight_posterior.latent_dim == posterior_latent_dim\n assert model_posterior.weight_posterior.observation_dim == expected_posterior_dim\n",
"id": "4121659",
"language": "Python",
"matching_score": 2.1531131267547607,
"max_stars_count": 0,
"path": "tests/test_posterior.py"
},
{
"content": "from typing import Union\n\nfrom swafa.fa import OnlineGradientFactorAnalysis, OnlineEMFactorAnalysis\n\n\nPOSTERIOR_TYPE = Union[OnlineGradientFactorAnalysis, OnlineEMFactorAnalysis]\n",
"id": "6484059",
"language": "Python",
"matching_score": 0.058116063475608826,
"max_stars_count": 0,
"path": "swafa/custom_types.py"
},
{
"content": "from typing import Optional, Union\n\nimport torch\nfrom torch import Tensor\nimport torch.nn as nn\nfrom pytorch_lightning import Trainer\n\n\ndef get_callback_epoch_range(trainer: Trainer, epoch_start: Optional[Union[int, float]] = None,\n epoch_stop: Optional[Union[int, float]] = None) -> (int, int):\n \"\"\"\n Initialise the range of epochs on which a callback will be triggered.\n\n Convert the epoch limits from float to int if necessary and converts to zero-indexing.\n\n Args:\n trainer: The PyTorch Lightning Trainer which will trigger the callback.\n epoch_start: The first training epoch on which to trigger the callback. Integer indexing starts from 1. Can\n also specify a float between 0 and 1, which corresponds to the fraction of total epochs which should pass\n before triggering the callback for the first time.\n epoch_stop: The last training epoch on which to trigger the callback. Integer indexing starts from 1. Can\n also specify a float between 0 and 1, which corresponds to the fraction of total epochs which should pass\n before triggering the callback for the last time.\n \"\"\"\n epoch_start = epoch_start or 1\n epoch_stop = epoch_stop or trainer.max_epochs\n\n if isinstance(epoch_start, float):\n epoch_start = int(trainer.max_epochs * epoch_start)\n\n if isinstance(epoch_stop, float):\n epoch_stop = int(trainer.max_epochs * epoch_stop)\n\n first_epoch = max(epoch_start - 1, 0)\n last_epoch = min(epoch_stop - 1, trainer.max_epochs - 1)\n\n return first_epoch, last_epoch\n\n\ndef vectorise_weights(model: nn.Module) -> Tensor:\n \"\"\"\n Concatenate all weights of the given model into a single vector.\n\n Each individual set of weights is reshaped into a single vector and then these vectors are stacked together.\n\n The weights are stacked in the order that they are returned by model.parameters().\n\n Args:\n model: A PyTorch model.\n\n Returns:\n All the model's weights stacked together. Of shape (n_weights,).\n \"\"\"\n return torch.cat([w.data.reshape(-1) for w in model.parameters()])\n\n\ndef vectorise_gradients(model: nn.Module) -> Tensor:\n \"\"\"\n Concatenate all gradients of the given model's weights into a single vector.\n\n Each individual set of gradients is reshaped into a single vector and then these vectors are stacked together.\n\n The gradients are stacked in the order that the weights are returned by model.parameters().\n\n Args:\n model: A PyTorch model.\n\n Returns:\n All the model's gradients stacked together. Of shape (n_weights,).\n \"\"\"\n return torch.cat([w.grad.reshape(-1) for w in model.parameters()])\n\n\ndef get_weight_dimension(model: nn.Module) -> int:\n \"\"\"\n Get the total combined dimension of all the weights in the model.\n\n Returns:\n The total dimension of the model's weights.\n \"\"\"\n return sum([w.numel() for w in model.parameters()])\n\n\ndef set_weights(model: nn.Module, weights: torch.Tensor):\n \"\"\"\n Set the learnable parameters of the given model to the given weights.\n\n The order of the given weights should be the same as that returned by vectorise_weights().\n\n Args:\n model: A PyTorch model with n_weights learnable parameters.\n weights: A version of the model's weights stacked together. Of shape (n_weights,).\n \"\"\"\n weight_count = 0\n for w in model.parameters():\n n_elements = w.numel()\n elements = weights[weight_count:weight_count + n_elements]\n w.data = elements.reshape(w.shape)\n weight_count += n_elements\n\n\ndef normalise_gradient(grad: Tensor, max_grad_norm: float) -> Tensor:\n \"\"\"\n If the gradient norm is greater than the maximum value, normalise the gradient such that its norm is equal to the\n maximum value.\n\n Args:\n grad: The gradient.\n max_grad_norm: The maximum allowed gradient norm.\n\n Returns:\n The normalised gradient.\n \"\"\"\n grad_norm = torch.linalg.norm(grad)\n if grad_norm > max_grad_norm:\n return max_grad_norm * grad / grad_norm\n\n return grad\n",
"id": "12010678",
"language": "Python",
"matching_score": 3.315216541290283,
"max_stars_count": 0,
"path": "swafa/utils.py"
},
{
"content": "import pytest\nfrom pytorch_lightning import Trainer\nimport torch\n\nfrom swafa.models import FeedForwardNet\nfrom swafa.utils import (\n get_callback_epoch_range,\n vectorise_weights,\n vectorise_gradients,\n set_weights,\n get_weight_dimension,\n normalise_gradient,\n)\n\n\n@pytest.mark.parametrize(\n \"max_epochs, epoch_start, epoch_stop, expected_first_epoch, expected_last_epoch\",\n [\n (10, 1, 10, 0, 9),\n (10, 1, None, 0, 9),\n (10, None, 10, 0, 9),\n (10, None, None, 0, 9),\n (20, 0.0, 1.0, 0, 19),\n (20, 0.2, 0.8, 3, 15),\n ]\n)\ndef test_get_callback_epoch_range(max_epochs, epoch_start, epoch_stop, expected_first_epoch, expected_last_epoch):\n trainer = Trainer(max_epochs=max_epochs)\n first_epoch, last_epoch = get_callback_epoch_range(trainer, epoch_start=epoch_start, epoch_stop=epoch_stop)\n\n assert first_epoch == expected_first_epoch\n assert last_epoch == expected_last_epoch\n\n\n@pytest.mark.parametrize(\n \"input_dim, hidden_dims, expected_n_weights\",\n [\n (5, None, 5 + 1),\n (6, [4], (6 + 1) * 4 + (4 + 1)),\n (7, [6, 9], (7 + 1) * 6 + (6 + 1) * 9 + (9 + 1)),\n ]\n)\ndef test_vectorise_weights(input_dim, hidden_dims, expected_n_weights):\n net = FeedForwardNet(input_dim, hidden_dims)\n weights = vectorise_weights(net)\n\n assert len(weights) == expected_n_weights\n\n\n@pytest.mark.parametrize(\n \"input_dim, hidden_dims, expected_n_gradients\",\n [\n (5, None, 5 + 1),\n (6, [4], (6 + 1) * 4 + (4 + 1)),\n (7, [6, 9], (7 + 1) * 6 + (6 + 1) * 9 + (9 + 1)),\n ]\n)\ndef test_vectorise_gradients(input_dim, hidden_dims, expected_n_gradients):\n net = FeedForwardNet(input_dim, hidden_dims)\n x = torch.randn(3, input_dim)\n loss = net(x).sum()\n loss.backward()\n gradients = vectorise_gradients(net)\n\n assert len(gradients) == expected_n_gradients\n\n\n@pytest.mark.parametrize(\n \"input_dim, hidden_dims, expected_weight_dim\",\n [\n (5, None, 5 + 1),\n (6, [4], (6 + 1) * 4 + (4 + 1)),\n (7, [6, 9], (7 + 1) * 6 + (6 + 1) * 9 + (9 + 1)),\n ]\n)\ndef test_get_weight_dimension(input_dim, hidden_dims, expected_weight_dim):\n net = FeedForwardNet(input_dim, hidden_dims)\n\n assert get_weight_dimension(net) == expected_weight_dim\n\n\n@pytest.mark.parametrize(\n \"input_dim, hidden_dims\",\n [\n (5, None),\n (6, [4]),\n (7, [6, 9]),\n ]\n)\ndef test_set_weights(input_dim, hidden_dims):\n net = FeedForwardNet(input_dim, hidden_dims)\n original_weights = vectorise_weights(net)\n n_weights = get_weight_dimension(net)\n expected_weights = torch.randn(n_weights)\n set_weights(net, expected_weights)\n actual_weights = vectorise_weights(net)\n\n assert not torch.isclose(actual_weights, original_weights).all()\n assert torch.isclose(actual_weights, expected_weights).all()\n\n\n@pytest.mark.parametrize(\n \"grad, max_grad_norm, expected_grad_norm\",\n [\n (torch.tensor([1, 1]).float(), 100, torch.sqrt(torch.tensor(2))),\n (torch.tensor([10, 10]).float(), 5, torch.tensor(5).float()),\n ]\n)\ndef test_set_weights(grad, max_grad_norm, expected_grad_norm):\n actual_grad_norm = torch.linalg.norm(normalise_gradient(grad, max_grad_norm))\n\n assert torch.isclose(actual_grad_norm, expected_grad_norm)\n",
"id": "5807456",
"language": "Python",
"matching_score": 0.4314954876899719,
"max_stars_count": 0,
"path": "tests/test_utils.py"
},
{
"content": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Jul 9 16:18:13 2018\r\n\r\n@author: Pichau\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport torch\r\nfrom torch_utils import pairwise_distances_squared, gaussian_kernel\r\nfrom sklearn.model_selection import GridSearchCV, RandomizedSearchCV, StratifiedKFold\r\nimport collections\r\nfrom copy import deepcopy\r\nimport sys\r\n\r\nDEVICE = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\r\n \r\n \r\nclass PCIF(object):\r\n \"\"\"\r\n Probabilistic Classifier Importance Fitting.\r\n \r\n Trains a probabilistic classifier to distinguish between samples from \r\n training and test distributions. Then given a feature vector x, we can use \r\n the trained classifier along with Bayes' rule to estimate the probability \r\n density ratio w(x) as follows:\r\n \r\n w(x) = n_tr * p(test|x) / n_te * p(train|x),\r\n \r\n where n_tr and n_te are the number of training and test samples used to\r\n fit the model respectively, and p(train|x) and p(test|x) are the \r\n probabilities that x was sampled from the training and test distributions\r\n respectively, as predicted by the trained classifier.\r\n \r\n Attributes\r\n ----------\r\n \r\n n_tr_: integer\r\n Number of samples from training distribution used to fit the model.\r\n \r\n n_te_: integer\r\n Number of samples from test distribution used to fit the model.\r\n \r\n estimator_: estimator with scikit-learn interface\r\n Fitted probabilistic classifier.\r\n \r\n cv_results_:\r\n \r\n best_score_:\r\n \r\n best_params_:\r\n \r\n \"\"\"\r\n\r\n def __init__(self):\r\n \r\n # attributes\r\n self.n_tr_ = None\r\n self.n_te_ = None\r\n self.estimator_ = None\r\n self.cv_results_ = None\r\n self.best_score_ = None\r\n self.best_params_ = None\r\n \r\n \r\n def fit(self,\r\n estimator,\r\n X_tr,\r\n X_te):\r\n \"\"\"\r\n Fits a probabilistic classifier to the input training and test data\r\n to predict p(test|x).\r\n \r\n - If an estimator with the scikit-learn interface is provided,\r\n this estimator is fit to the data.\r\n \r\n - If scikit-learn GridSearchCV or RandomizedSearchCV object provided,\r\n model selection is run and the best estimator is subsequently fit to \r\n all the data.\r\n \r\n Parameters\r\n ----------\r\n \r\n estimator: estimator or sklearn.model_selection.GridSearchCV/RandomizedSearchCV\r\n If estimator, assumed to implement the scikit-learn estimator interface.\r\n \r\n X_tr: numpy array\r\n Input data from training distribution, where each row is a feature vector.\r\n \r\n X_te: numpy array\r\n Input data from test distribution, where each row is a feature vector.\r\n \"\"\"\r\n \r\n # construct the target (1 if test, 0 if train)\r\n self.n_tr_ = X_tr.shape[0]\r\n self.n_te_ = X_te.shape[0]\r\n n = self.n_tr_ + self.n_te_\r\n y = np.concatenate((np.zeros(self.n_tr_), np.ones(self.n_te_)))\r\n \r\n # stack and shuffle features and target\r\n X = np.vstack((X_tr, X_te))\r\n i_shuffle = np.random.choice(n, n, replace=False)\r\n X = X[i_shuffle]\r\n y = y[i_shuffle]\r\n \r\n # fit estimator\r\n if isinstance(estimator, GridSearchCV) or isinstance(estimator, RandomizedSearchCV):\r\n print(\"Running model selection...\")\r\n if estimator.refit == False:\r\n estimator.refit = True \r\n estimator.fit(X, y) \r\n print(\"Best score = {}\".format(estimator.best_score_))\r\n self.cv_results_ = estimator.cv_results_\r\n self.estimator_ = estimator.best_estimator_ \r\n self.best_score_ = estimator.best_score_\r\n self.best_params_ = estimator.best_params_\r\n print(\"Done!\")\r\n else:\r\n print(\"Fitting estimator...\")\r\n self.estimator_ = estimator.fit(X, y)\r\n print(\"Done!\")\r\n \r\n \r\n def predict(self,\r\n X):\r\n \"\"\"\r\n Estimates importance weights for input data.\r\n \r\n For each feature vector x, the trained probabilistic classifier \r\n is used to estimate the probability density ratio\r\n \r\n w(x) = n_tr * p(test|x) / n_te * p(train|x),\r\n \r\n where n_tr and n_te are the number of training and test samples used to\r\n train the model respectively, and p(train|x) and p(test|x) are the\r\n probabilities that x was sampled from the training and test distributions\r\n respectively, as predicted by the trained classifier.\r\n \r\n Parameters\r\n ----------\r\n \r\n X: numpy array\r\n Input data, where each row is a feature vector.\r\n \r\n Returns\r\n -------\r\n \r\n w: numpy vector of shape (len(X),)\r\n Estimated importance weight for each input. \r\n w[i] corresponds to importance weight of X[i]\r\n \"\"\"\r\n \r\n assert self.estimator_ is not None, \"Need to run fit method before calling predict!\"\r\n \r\n p = self.estimator.predict_proba(X)\r\n w = importance_weights(p, self.n_tr_, self.n_te_)\r\n \r\n return w\r\n \r\n \r\n def fit_predict(self,\r\n estimator,\r\n X_tr,\r\n X_te,\r\n X):\r\n \r\n self.fit(estimator, X_tr, X_te)\r\n w = self.predict(X)\r\n \r\n return w\r\n \r\n \r\n def predict_oos(self,\r\n X_tr,\r\n X_te,\r\n estimator=None,\r\n n_splits=5):\r\n \r\n if estimator is None:\r\n assert self.estimator_ is not None, \"Either provide an estimator or run fit method first!\"\r\n estimator = deepcopy(self.estimator_)\r\n \r\n # stack features and construct the target (1 if test, 0 if train)\r\n X = np.vstack((X_tr, X_te))\r\n y = np.concatenate((np.zeros(X_tr.shape[0]), np.ones(X_te.shape[0])))\r\n \r\n # split the data into n_splits folds, and for 1 to n_splits,\r\n # train on (n_splits - 1)-folds and use the fitted estimator to\r\n # predict weights for the other fold\r\n w = np.zeros_like(y)\r\n skf = StratifiedKFold(n_splits=n_splits, shuffle=True)\r\n for idx_tr, idx_te in skf.split(X, y):\r\n \r\n # fit on (n_splits - 1)-folds\r\n estimator.fit(X[idx_tr], y[idx_tr])\r\n \r\n # predict probabilities for other fold\r\n p = estimator.predict_proba(X[idx_te])\r\n \r\n # predict weights for other fold \r\n n_tr = (y[idx_tr] == 0).sum()\r\n n_te = (y[idx_tr] == 1).sum()\r\n w[idx_te] = importance_weights(p, n_tr, n_te)\r\n \r\n # split into training and test weights \r\n w_tr = w[y == 0]\r\n w_te = w[y == 1]\r\n \r\n return w_tr, w_te\r\n \r\n\r\ndef importance_weights(p,\r\n n_tr,\r\n n_te,\r\n logits=False):\r\n \r\n if len(p.shape) > 1:\r\n p = p[:, 1]\r\n \r\n if logits:\r\n w = (n_tr / n_te) * np.exp(p)\r\n else:\r\n w = (n_tr / n_te) * (p / (1 - p))\r\n\r\n return w \r\n\r\n \r\nclass uLSIF(object):\r\n \"\"\"\r\n Unconstrained Least Squares Importance Fitting (uLSIF).\r\n \r\n Implementation of uLSIF algorithm as described in \r\n Machine Learning in Non-Stationary Environments - Introduction to Covariate Shift Adaption,\r\n <NAME> and <NAME>, 2012.\r\n \r\n Gaussian kernel basis functions are fit to samples from training and test\r\n distributions to approximate the probability density ratio\r\n \r\n w(x) = p_te(x) / p_tr(x),\r\n \r\n where p_tr(x) and p_te(x) are the probabilities that the feature vector x\r\n comes from the training and test distributions respectively. The fitting \r\n is done through minimisation of the squared-loss between the model and \r\n the true probability density ratio function.\r\n \r\n Once fitted the model can be used to estimate the probability density\r\n ratio, or importance, of any x. \r\n \r\n Parameters\r\n ----------\r\n \r\n n_kernels: integer (default=100)\r\n Number of Guassian kernels to use in the model.\r\n \r\n Attributes\r\n ----------\r\n \r\n C_: torch tensor\r\n Kernel centres, where each row is a randomly chosen sample from the \r\n test distribution.\r\n \r\n alpha_: torch tensor\r\n Coefficients of fitted model.\r\n \r\n sigma_: scalar\r\n Kernel width of fitted model. \r\n \"\"\"\r\n\r\n def __init__(self,\r\n n_kernels=100):\r\n \r\n # parameters\r\n self.n_kernels = n_kernels\r\n \r\n # attributes\r\n self.C_ = None\r\n self.alpha_ = None\r\n self.sigma_ = None\r\n \r\n \r\n def fit(self,\r\n X_tr,\r\n X_te,\r\n sigma,\r\n lam,\r\n random_seed=42):\r\n \"\"\"\r\n Fits the model to the input training and test data.\r\n \r\n Gaussian kernel basis functions are fit to the data by minimising\r\n the squared-loss between the model and the true probability density \r\n ratio function.\r\n \r\n - If scalars provided for both kernel width (sigma) and regularisation \r\n strength (lam), the model with these hyperparameters is fit to the data.\r\n \r\n - If more than one value provided for either of the hyperparameters, \r\n a hyperparameter search is performed via leave-on-out cross-validation\r\n and the best parameters are used to fit the model.\r\n \r\n Parameters\r\n ----------\r\n \r\n X_tr: numpy array\r\n Input data from training distribution, where each row is a feature vector.\r\n \r\n X_te: numpy array\r\n Input data from test distribution, where each row is a feature vector.\r\n \r\n sigma: scalar or iterable\r\n Gaussian kernel width. If iterable, hyperparameter search will be run.\r\n \r\n lam: scalar or iterable\r\n Regularisation strength. If iterable, hyperparameter search will be run.\r\n \r\n random_seed: integer (default=42)\r\n Numpy random seed.\r\n \"\"\"\r\n \r\n with torch.no_grad():\r\n \r\n np.random.seed(random_seed)\r\n \r\n # convert training and test data to torch tensors\r\n X_tr = torch.from_numpy(X_tr).float().to(DEVICE)\r\n X_te = torch.from_numpy(X_te).float().to(DEVICE)\r\n \r\n # randomly choose kernel centres from X_te without replacement\r\n n_te = X_te.size(0)\r\n t = min(self.n_kernels, X_te.size(0)) \r\n self.C_ = X_te[np.random.choice(n_te, t, replace=False)] # shape (t, d)\r\n \r\n # compute the squared l2-norm of the difference between \r\n # every point in X_tr and every point in C,\r\n # element (l, i) should contain the squared l2-norm\r\n # between C[l] and X_tr[i]\r\n print(\"Computing distance matrix for X_train...\")\r\n D_tr = pairwise_distances_squared(self.C_, X_tr) # shape (t, n_tr)\r\n \r\n # do the same for X_te\r\n print(\"Computing distance matrix for X_test...\")\r\n D_te = pairwise_distances_squared(self.C_, X_te) # shape (t, n_te) \r\n \r\n # check if we need to run a hyperparameter search\r\n search_sigma = isinstance(sigma, (collections.Sequence, np.ndarray)) and \\\r\n (len(sigma) > 1)\r\n search_lam = isinstance(lam, (collections.Sequence, np.ndarray)) and \\\r\n (len(lam) > 1)\r\n if search_sigma | search_lam:\r\n print(\"Running hyperparameter search...\")\r\n sigma, lam = self.loocv(X_tr, D_tr, X_te, D_te, sigma, lam)\r\n else:\r\n if isinstance(sigma, (collections.Sequence, np.ndarray)):\r\n sigma = sigma[0]\r\n if isinstance(lam, (collections.Sequence, np.ndarray)):\r\n lam = lam[0]\r\n \r\n print(\"Computing optimal solution...\") \r\n X_tr = gaussian_kernel(D_tr, sigma) # shape (t, n_tr)\r\n X_te = gaussian_kernel(D_te, sigma) # shape (t, n_te)\r\n H, h = self.kernel_arrays(X_tr, X_te) # shapes (t, t) and (t, 1)\r\n alpha = (H + (lam * torch.eye(t)).to(DEVICE)).inverse().mm(h) # shape (t, 1)\r\n self.alpha_ = torch.max(torch.zeros(1).to(DEVICE), alpha) # shape (t, 1)\r\n self.sigma_ = sigma\r\n print(\"Done!\")\r\n \r\n \r\n def predict(self,\r\n X):\r\n \"\"\"\r\n Estimates importance weights for input data.\r\n \r\n For each feature vector x, uses the fitted model to estimate the \r\n probability density ratio\r\n \r\n w(x) = p_te(x) / p_tr(x),\r\n \r\n where p_tr is the probability density of the training distribution and\r\n p_te is the probability density of the test distribution.\r\n \r\n Parameters\r\n ----------\r\n \r\n X: numpy array\r\n Input data from training distribution, where each row is a feature vector.\r\n \r\n Returns\r\n -------\r\n \r\n w: numpy vector of shape (len(X),)\r\n Estimated importance weight for each input. \r\n w[i] corresponds to importance weight of X[i]\r\n \"\"\"\r\n \r\n with torch.no_grad():\r\n \r\n assert self.alpha_ is not None, \"Need to run fit method before calling predict!\"\r\n \r\n # convert data to torch tensors\r\n X = torch.from_numpy(X).float().to(DEVICE)\r\n \r\n # compute the squared l2-norm of the difference between \r\n # every point in X and every point in C,\r\n # element (l, i) should contain the squared l2-norm\r\n # between C[l] and X[i]\r\n D = pairwise_distances_squared(self.C_, X) # shape (t, n)\r\n \r\n # compute gaussian kernel\r\n X = gaussian_kernel(D, self.sigma_) # shape (t, n_tr)\r\n \r\n # compute importance weights\r\n w = self.alpha_.t().mm(X).squeeze().cpu().numpy() # shape (n_tr,) \r\n \r\n return w\r\n \r\n \r\n def loocv(self,\r\n X_tr,\r\n D_tr,\r\n X_te,\r\n D_te,\r\n sigma_range,\r\n lam_range):\r\n \"\"\"\r\n Runs hyperprameter search via leave-one-out cross-validation (LOOCV).\r\n \r\n Computes LOOCV squared-loss for every combination of the Guassian kernel \r\n width and regularisation strength and returns the parameters which \r\n correspond to the smallest loss. \r\n \r\n Parameters\r\n ----------\r\n \r\n X_tr: torch tensor\r\n Input data from training distribution, where each row is a feature vector.\r\n \r\n D_tr: torch tensor\r\n Squared l2-norm of the difference between every kernel centre\r\n and every row in X_tr.\r\n Element (l, i) should contain the squared l2-norm between \r\n the l-th kernel centre and X_tr[i]\r\n \r\n X_te: torch tensor\r\n Input data from test distribution, where each row is a feature vector.\r\n \r\n D_te: torch tensor\r\n Squared l2-norm of the difference between every kernel centre\r\n and every point in X_te.\r\n Element (l, i) should contain the squared l2-norm between \r\n the l-th kernel centre and X_te[i]\r\n \r\n sigma_range: scalar or iterable\r\n Guassian kernel width. If scalar will be converted to list.\r\n \r\n lam_range: scalar or iterable\r\n Regularisation strength. If scalar will be converted to list.\r\n \r\n Returns\r\n -------\r\n \r\n sigma_hat: scalar\r\n Guassian kernel width corresponding to lowest LOOCV loss.\r\n \r\n lam_hat: scalar\r\n Regularisation strength corresponding to lowest LOOCV loss. \r\n \"\"\"\r\n \r\n with torch.no_grad():\r\n \r\n # make sure hyperparameter ranges are iterables\r\n if not isinstance(sigma_range, (collections.Sequence, np.ndarray)):\r\n sigma_range = [sigma_range]\r\n if not isinstance(lam_range, (collections.Sequence, np.ndarray)):\r\n lam_range = [lam_range]\r\n \r\n # define some useful variables\r\n n_tr, d = X_tr.size()\r\n n_te = X_te.size(0)\r\n n = min(n_tr, n_te)\r\n t = min(self.n_kernels, n_te) \r\n ones_t = torch.ones((t, 1), device=DEVICE)\r\n ones_n = torch.ones((n, 1), device=DEVICE)\r\n diag_n_idx = torch.cat((torch.range(0, n-1).view(1, -1).long(), torch.range(0, n-1).view(1, -1).long()))\r\n losses = np.zeros((len(sigma_range), len(lam_range)))\r\n \r\n # for each candidate of Gaussian kernel width...\r\n for sigma_idx, sigma in enumerate(sigma_range):\r\n \r\n # apply the Gaussian kernel function to the elements of D_tr and D_te\r\n # reuse variables X_tr and X_te as we won't need the originals again\r\n X_tr = gaussian_kernel(D_tr, sigma) # shape (t, n_tr)\r\n X_te = gaussian_kernel(D_te, sigma) # shape (t, n_te)\r\n \r\n # compute kernel arrays\r\n H, h = self.kernel_arrays(X_tr, X_te) # shapes (t, t) and (t, 1)\r\n \r\n # for what follows X_tr and X_te must have the same shape,\r\n # so choose n points randomly from each\r\n X_tr = X_tr[:, np.random.choice(n_tr, n, replace=False)] # shape (t, n)\r\n X_te = X_te[:, np.random.choice(n_te, n, replace=False)] # shape (t, n)\r\n \r\n # for each candidate of regularisation parameter...\r\n for lam_idx, lam in enumerate(lam_range):\r\n \r\n # compute the t x t matrix B\r\n B = H + torch.eye(t, device=DEVICE) * (lam * (n_tr - 1)) / n_tr # shape (t, t)\r\n \r\n # compute the t x n matrix B_0 \r\n B_inv = B.inverse() # shape (t, t)\r\n B_inv_X_tr = B_inv.mm(X_tr) # shape (t, n)\r\n diag_num = h.t().mm(B_inv_X_tr).squeeze() # shape (n,)\r\n diag_denom = (n_tr * ones_n.t() - ones_t.t().mm(X_tr * B_inv_X_tr)).squeeze() # shape (n,) \r\n diag_sparse = torch.sparse.FloatTensor(diag_n_idx, (diag_num / diag_denom).cpu(), torch.Size([n, n])).to(DEVICE) # sparse (n, n)\r\n B_0 = B_inv.mm(h).mm(ones_n.t()) + (diag_sparse.t().mm(B_inv_X_tr.t())).t() # shape (t, n)\r\n \r\n # compute B_1\r\n diag_num = ones_t.t().mm(X_te * B_inv_X_tr).squeeze() # shape (n,)\r\n diag_sparse = torch.sparse.FloatTensor(diag_n_idx, (diag_num / diag_denom).cpu(), torch.Size([n, n])).to(DEVICE) # sparse (n, n)\r\n B_1 = B_inv.mm(X_te) + (diag_sparse.t().mm(B_inv_X_tr.t())).t() # shape (t, n)\r\n \r\n # compute B_2\r\n B_2 = ((n_tr - 1) / (n_tr * (n_te - 1))) * (n_te * B_0 - B_1) # shape (t, n) \r\n B_2 = torch.max(torch.zeros(1).to(DEVICE), B_2) # shape (t, n) \r\n \r\n # compute leave-one-out CV loss\r\n loss_1 = ((X_tr * B_2).t().mm(ones_t).pow(2).sum() / (2 * n)).item()\r\n loss_2 = (ones_t.t().mm(X_te * B_2).mm(ones_n) / n).item() \r\n losses[sigma_idx, lam_idx] = loss_1 - loss_2 \r\n print(\"sigma = {:0.5f}, lambda = {:0.5f}, loss = {:0.5f}\".format(\r\n sigma, lam, losses[sigma_idx, lam_idx]))\r\n \r\n # get best hyperparameters \r\n sigma_idx, lam_idx = np.unravel_index(np.argmin(losses), losses.shape)\r\n sigma_hat, lam_hat = sigma_range[sigma_idx], lam_range[lam_idx] \r\n print(\"\\nbest loss = {:0.5f} for sigma = {:0.5f} and lambda = {:0.5f}\".format(\r\n losses[sigma_idx, lam_idx], sigma_hat, lam_hat))\r\n \r\n return sigma_hat, lam_hat\r\n \r\n \r\n def kernel_arrays(self,\r\n X_tr,\r\n X_te):\r\n \"\"\"\r\n Computes kernel matrix H and vector h from algorithm.\r\n \r\n H[l, l'] is equal to the sum over i=1:n_tr of \r\n \r\n exp(-(||x_i - c_l|| ^ 2 + -||x_i - c_l'|| ^ 2) / (2 * (sigma ^ 2)))\r\n \r\n where n_tr is the number of samples from the training distribution,\r\n x_i is the i-th sample from the training distribution and \r\n c_l is the l-th kernel centre.\r\n \r\n h[l] is equal to the sum over i=1:n_te of \r\n \r\n exp(-(||x_i - c_l|| ^ 2) / (2 * (sigma ^ 2)))\r\n \r\n where n_te is the number of samples from the test distribution,\r\n x_i is the i-th sample from the test distribution and \r\n c_l is the l-th kernel centre.\r\n \r\n Parameters\r\n ----------\r\n \r\n X_tr: torch tensor\r\n X_tr[l, i] is equal to the Gaussian kernel of the squared l2-norm\r\n of the difference between the l-th kernel centre and the\r\n i-th sample from the training distribution:\r\n \r\n exp(-(||x_i - c_l|| ^ 2) / (2 * (sigma ^ 2))).\r\n \r\n X_te: torch tensor\r\n X_te[l, i] is equal to the Gaussian kernel of the squared l2-norm\r\n of the difference between the l-th kernel centre and the\r\n i-th sample from the test distribution:\r\n \r\n exp(-(||x_i - c_l|| ^ 2) / (2 * (sigma ^ 2))).\r\n \r\n Returns\r\n -------\r\n \r\n H: torch tensor\r\n H[l, l'] is equal to the sum over i=1:X_tr.size(1) \r\n of X_tr[l, i] * X_tr[l', i] / X_tr.size(1)\r\n \r\n h: torch tensor\r\n h[l] is equal to the sum over i=1:X_te.size(1) \r\n of X_te[l, i] / X_te.size(1) \r\n \"\"\"\r\n \r\n # compute H\r\n n_tr = X_tr.size(1)\r\n H = X_tr.mm(X_tr.t()) / n_tr # shape (t, t)\r\n \r\n # compute h\r\n n_te = X_te.size(1)\r\n h = X_te.sum(dim=1, keepdim=True) / n_te # shape (t, 1)\r\n \r\n return H, h",
"id": "9610572",
"language": "Python",
"matching_score": 2.339475154876709,
"max_stars_count": 2,
"path": "covariate_shift_adaption/importance_estimation.py"
},
{
"content": "#!/usr/bin/env python\n\nfrom setuptools import setup\n\nNUMPY_MIN_VERSION = '1.8.2'\nTORCH_MIN_VERSION = '0.4.0'\nSKLEARN_MIN_VERSION = '0.19.0'\n\nsetup(\n name='covariate-shift-adaption',\n version='1.0',\n description='Covariate Shift Adaption algorithms',\n license='BSD 3-Clause License',\n author='<NAME> and <NAME>',\n author_email='',\n url='https://github.com/ayeright/covariate-shift-adaptation',\n packages=['covariate_shift_adaption'],\n install_requires=[\n f'numpy>={NUMPY_MIN_VERSION}',\n f'torch>={TORCH_MIN_VERSION}',\n f'scikit-learn>={SKLEARN_MIN_VERSION}',\n ],\n)\n",
"id": "8027672",
"language": "Python",
"matching_score": 0.09623268246650696,
"max_stars_count": 2,
"path": "setup.py"
},
{
"content": "from torch.nn import ReLU\nfrom torch.optim import Adam, SGD\n\n\nOPTIMISER_FACTORY = dict(\n sgd=SGD,\n adam=Adam,\n)\n\nACTIVATION_FACTORY = dict(\n relu=ReLU(),\n)\n",
"id": "4636279",
"language": "Python",
"matching_score": 0.0579855851829052,
"max_stars_count": 0,
"path": "experiments/utils/factory.py"
}
] | 3.112792 |
devalexanderdaza | [
{
"content": "#!/usr/bin/env python\nimport os\nimport shutil\n\nPROJECT_DIRECTORY = os.path.realpath(os.path.curdir)\n\n\ndef remove(filepath):\n fullpath = os.path.join(PROJECT_DIRECTORY, filepath)\n if os.path.isfile(fullpath):\n os.remove(fullpath)\n else:\n shutil.rmtree(fullpath, ignore_errors=True)\n\n\nif __name__ == \"__main__\":\n if \"{{ cookiecutter.open_source_license }}\" == \"Not open source\":\n remove(\".github/workflows/codeql.yml\") # codeql is available for free only for OSS\n remove(\"LICENSE\")\n\n if \"{{ cookiecutter.include_logo }}\" != \"y\":\n remove(\"docs/images/logo.svg\")\n\n if \"{{ cookiecutter.include_screenshots }}\" != \"y\":\n remove(\"docs/images/screenshot.png\")\n\n if \"{{ cookiecutter.include_security }}\" != \"y\":\n remove(\"docs/SECURITY.md\")\n\n if \"{{ cookiecutter.include_code_of_conduct }}\" != \"y\":\n remove(\"docs/CODE_OF_CONDUCT.md\")\n\n if \"{{ cookiecutter.include_workflows }}\" != \"y\":\n remove(\".github/workflows\")\n remove(\".github/labels.yml\")\n\n if \"{{ cookiecutter.use_github_discussions }}\" == \"y\":\n remove(\".github/ISSUE_TEMPLATE/04_SUPPORT_QUESTION.md\")\n\n if \"{{ cookiecutter.use_codeql }}\" != \"y\":\n remove(\".github/workflows/codeql.yml\")",
"id": "11080955",
"language": "Python",
"matching_score": 0,
"max_stars_count": 146,
"path": "hooks/post_gen_project.py"
}
] | 0 |
ben5962 | [
{
"content": "# This file will be ignored by pytddmon since it's begins with a \".\",\r\n# which per UNIX conventions means it is to be treated as hidden.\r\n# This is necessary e.g. since emacs creates dotfiles when saving,\r\n# in same directory as unit test.\r\n\r\nimport unittest\r\n\r\nclass TestClass(unittest.TestCase):\r\n def test1(self):\r\n assert True\r\n\r\n",
"id": "5037720",
"language": "Python",
"matching_score": 2.717941999435425,
"max_stars_count": 12,
"path": "systest/dotfiles_are_ignored/.test_is_hidden.py"
},
{
"content": "# \r\n\r\nimport unittest\r\n\r\nclass TestClass(unittest.TestCase):\r\n def test1(self):\r\n assert True\r\n\r\n",
"id": "5089586",
"language": "Python",
"matching_score": 0.10320361703634262,
"max_stars_count": 12,
"path": "systest/builtin_assert_works/test_user_builtin_assert.py"
},
{
"content": "# coding: utf-8\nimport unittest\nfrom pytddmon import Monitor\n\n\nclass TestChangeDetection(unittest.TestCase):\n\n def _set_up_monitor(self):\n files = ['file']\n file_finder = lambda: files\n get_file_size = lambda x: 1\n get_file_modification_time = lambda x: 1\n monitor = Monitor(file_finder, get_file_size, get_file_modification_time)\n return files, monitor\n\n def test_modification_time_changed(self):\n files = ['file']\n file_finder = lambda: files\n get_file_size = lambda x: 1\n\n modtime = [1]\n get_file_modification_time = lambda x: modtime[0]\n\n monitor = Monitor(file_finder, get_file_size, get_file_modification_time)\n modtime[0] = 2\n change_detected = monitor.look_for_changes()\n assert change_detected\n\n def test_nothing_changed(self):\n files, monitor = self._set_up_monitor()\n change_detected = monitor.look_for_changes()\n assert not change_detected\n\n def test_adding_file(self):\n files, monitor = self._set_up_monitor()\n files.append('file2')\n change_detected = monitor.look_for_changes()\n assert change_detected\n\n def test_renaming_file(self):\n files, monitor = self._set_up_monitor()\n files[0] = 'renamed'\n change_detected = monitor.look_for_changes()\n assert change_detected\n\n def test_change_is_only_detected_once(self):\n files, monitor = self._set_up_monitor()\n files[0] = 'changed'\n change_detected = monitor.look_for_changes()\n change_detected = monitor.look_for_changes()\n assert not change_detected\n\n def test_file_size_changed(self):\n files = ['file']\n filesize = [1]\n file_finder = lambda: files\n get_file_size = lambda x: filesize[0]\n get_file_modification_time = lambda x: 1\n\n monitor = Monitor(file_finder, get_file_size, get_file_modification_time)\n filesize[0] = 5\n change_detected = monitor.look_for_changes()\n assert change_detected\n\n def test_file_order_does_not_matter(self):\n files = ['file', 'file2']\n file_finder = lambda: files\n get_file_size = lambda x: 1\n get_file_modification_time = lambda x: 1\n\n monitor = Monitor(file_finder, get_file_size, get_file_modification_time)\n files[:] = ['file2', 'file']\n change_detected = monitor.look_for_changes()\n assert not change_detected\n\nif __name__ == '__main__':\n unittest.main()\n",
"id": "11649605",
"language": "Python",
"matching_score": 1.7605057954788208,
"max_stars_count": 12,
"path": "src/tests/test_monitor.py"
},
{
"content": "# coding: utf-8\n\"\"\"\"\n#- kör tester från början\n#- kör inte tester om ingen förändring\n#- kör tester om förändring\n- summa finns i .total_tests\n- gröna finns i .total_passed_tests\n\"\"\"\n\nimport sys\nsys.path.append('..')\n\nimport unittest\nfrom pytddmon import Pytddmon\n\n\nclass TestPytddmonMonitorCommunication(unittest.TestCase):\n class FakeMonitor:\n def __init__(self, look_for_changes_returns):\n self.returns = list(look_for_changes_returns)\n self.returns.reverse()\n\n def look_for_changes(self):\n return self.returns.pop()\n\n def setUp(self):\n self.number_of_test_runs = 0\n\n def _set_up_pytddmon(self, params):\n fake_monitor = self.FakeMonitor(look_for_changes_returns=params)\n pytddmon = Pytddmon(self.fake_filefinder, fake_monitor)\n pytddmon.main()\n return pytddmon\n\n def fake_filefinder(self):\n self.number_of_test_runs += 1\n return []\n\n def test_runs_tests_at_boot(self):\n self._set_up_pytddmon([False])\n self.assertEqual(1, self.number_of_test_runs)\n\n def test_runs_tests_when_change_detected(self):\n self._set_up_pytddmon([True])\n self.assertEqual(2, self.number_of_test_runs)\n\n def test_doesnt_run_tests_when_no_change(self):\n pytddmon = self._set_up_pytddmon([True, False])\n pytddmon.main()\n self.assertEqual(2, self.number_of_test_runs)\n\n def test_runs_each_time_a_change_is_detected(self):\n runs = 10\n fake_monitor = self.FakeMonitor(look_for_changes_returns=([True] * runs))\n pytddmon = Pytddmon(self.fake_filefinder, fake_monitor)\n for _ in range(runs):\n pytddmon.main()\n self.assertEqual(runs + 1, self.number_of_test_runs)\n\n def test_total_tests_is_zero_if_no_tests_are_run(self):\n pytddmon = self._set_up_pytddmon([False])\n self.assertEqual(0, pytddmon.total_tests_run)\n\nif __name__ == '__main__':\n unittest.main()\n",
"id": "3546559",
"language": "Python",
"matching_score": 1.2464162111282349,
"max_stars_count": 12,
"path": "src/tests/test_pytddmon.py"
},
{
"content": "#! /usr/bin/env python\n#coding: utf-8\n\n\"\"\"\nCOPYRIGHT (c) 2009-2014\n\nLICENSE\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n\"\"\"\n\nimport os\nimport sys\nimport platform\nimport optparse\nimport re\nimport unittest\nimport doctest\nimport time\nimport multiprocessing\nimport functools\n\n\nON_PYTHON3 = sys.version_info[0] == 3\nON_WINDOWS = platform.system() == \"Windows\"\n\n\n####\n## Core\n####\n\nclass Pytddmon:\n \"\"\"The core class, all functionality (except UI) is\n combined into this class\"\"\"\n\n def __init__(\n self,\n file_finder,\n monitor,\n project_name=\"<pytddmon>\",\n pulse_disabled=False\n ):\n self.file_finder = file_finder\n self.project_name = project_name\n self.monitor = monitor\n self.pulse_disabled = pulse_disabled\n self.change_detected = False\n\n self.total_tests_run = 0\n self.total_tests_passed = 0\n self.last_test_run_time = -1\n self.log = \"\"\n self.status_message = 'n/a'\n\n self.run_tests()\n\n def run_tests(self):\n \"\"\"Runs all tests and updates state variables with results.\"\"\"\n\n file_paths = self.file_finder()\n\n # We need to run the tests in a separate process, since\n # Python caches loaded modules, and unittest/doctest\n # imports modules to run them.\n # However, we do not want to assume users' unit tests\n # are thread-safe, so we only run one test module at a\n # time, using processes = 1.\n start = time.time()\n if file_paths:\n pool = multiprocessing.Pool(processes=1)\n results = pool.map(run_tests_in_file, file_paths)\n pool.close()\n pool.join()\n else:\n results = []\n self.last_test_run_time = time.time() - start\n\n now = time.strftime(\"%H:%M:%S\", time.localtime())\n self.log = \"\"\n self.log += \"Monitoring folder %s.\\n\" % self.project_name\n self.log += \"Found <TOTALTESTS> tests in %i files.\\n\" % len(results)\n self.log += \"Last change detected at %s.\\n\" % now\n self.log += \"Test run took %.2f seconds.\\n\" % self.last_test_run_time\n self.log += \"\\n\"\n self.total_tests_passed = 0\n self.total_tests_run = 0\n module_logs = [] # Summary for each module with errors first\n for packed in results:\n (module, green, total, logtext) = packed\n self.total_tests_passed += green\n self.total_tests_run += total\n module_log = \"\\nLog from \" + module + \":\\n\" + logtext\n if not isinstance(total, int) or total - green > 0:\n module_logs.insert(0, module_log)\n else:\n module_logs.append(module_log)\n self.log += ''.join(module_logs)\n self.log = self.log.replace('<TOTALTESTS>',\n str(int(self.total_tests_run.real)))\n self.status_message = now\n\n def get_and_set_change_detected(self):\n self.change_detected = self.monitor.look_for_changes()\n return self.change_detected\n\n def main(self):\n \"\"\"This is the main loop body\"\"\"\n self.change_detected = self.monitor.look_for_changes()\n if self.change_detected:\n self.run_tests()\n\n def get_log(self):\n \"\"\"Access the log string created during test run\"\"\"\n return self.log\n\n def get_status_message(self):\n \"\"\"Return message in status bar\"\"\"\n return self.status_message\n\n\nclass Monitor:\n \"\"\"Looks for file changes when prompted to\"\"\"\n\n def __init__(self, file_finder, get_file_size, get_file_modtime):\n self.file_finder = file_finder\n self.get_file_size = get_file_size\n self.get_file_modtime = get_file_modtime\n self.snapshot = self.get_snapshot()\n\n def get_snapshot(self):\n snapshot = {}\n for found_file in self.file_finder():\n file_size = self.get_file_size(found_file)\n file_modtime = self.get_file_modtime(found_file)\n snapshot[found_file] = (file_size, file_modtime)\n return snapshot\n\n def look_for_changes(self):\n new_snapshot = self.get_snapshot()\n change_detected = new_snapshot != self.snapshot\n self.snapshot = new_snapshot\n return change_detected\n\n\nclass Kata:\n ''' Generates a logical unit test template file '''\n def __init__(self, kata_name):\n classname = kata_name.title().replace(' ', '') + 'Tests'\n self.content = '''\\\n# coding: utf-8\nimport unittest\n# Unit tests for kata '{0}'.\n\nclass {1}(unittest.TestCase):\n\n def test_something(self):\n self.assertTrue(True)\n\n def test_another_thing(self):\n self.assertEqual([1, 2], [x for x in range(1, 3)])\n\n'''.format(kata_name, classname)\n self.filename = 'test_' + kata_name.lower().replace(' ', '_') + '.py'\n\n\n####\n## Finding files\n####\nclass FileFinder:\n \"\"\"Returns all files matching given regular\n expression from root downwards\"\"\"\n\n def __init__(self, root, regexp):\n self.root = os.path.abspath(root)\n self.regexp = regexp\n\n def __call__(self):\n return self.find_files()\n\n def find_files(self):\n \"\"\"recursively finds files matching regexp\"\"\"\n file_paths = set()\n for path, _folder, filenames in os.walk(self.root):\n for filename in filenames:\n if self.re_complete_match(filename):\n file_paths.add(\n os.path.abspath(os.path.join(path, filename))\n )\n return file_paths\n\n def re_complete_match(self, string_to_match):\n \"\"\"full string regexp check\"\"\"\n return bool(re.match(self.regexp + \"$\", string_to_match))\n\n\n####\n## Finding & running tests\n####\n\ndef log_exceptions(func):\n \"\"\"Decorator that forwards the error message from an exception to the log\n slot of the return value, and also returns a complex number to signal that\n the result is an error.\"\"\"\n wraps = functools.wraps\n\n @wraps(func)\n def wrapper(*a, **k):\n \"\"\"Docstring\"\"\"\n try:\n return func(*a, **k)\n except:\n import traceback\n\n return 'Exception(%s)' % a[0], 0, 1j, traceback.format_exc()\n\n return wrapper\n\n\n@log_exceptions\ndef run_tests_in_file(file_path):\n module = file_name_to_module(\"\", file_path)\n return run_module(module)\n\n\ndef run_module(module):\n suite = find_tests_in_module(module)\n (green, total, log) = run_suite(suite)\n return module, green, total, log\n\n\ndef file_name_to_module(base_path, file_name):\n r\"\"\"Converts file_names of files in packages to import friendly dot\n separated paths.\n\n Examples:\n >>> print(file_name_to_module(\"\",\"pytddmon.pyw\"))\n pytddmon\n >>> print(file_name_to_module(\"\",\"pytddmon.py\"))\n pytddmon\n >>> print(file_name_to_module(\"\",\"tests/pytddmon.py\"))\n tests.pytddmon\n >>> print(file_name_to_module(\"\",\"./tests/pytddmon.py\"))\n tests.pytddmon\n >>> print(file_name_to_module(\"\",\".\\\\tests\\\\pytddmon.py\"))\n tests.pytddmon\n >>> print(\n ... file_name_to_module(\n ... \"/User/pytddmon\\\\ geek/pytddmon/\",\n ... \"/User/pytddmon\\\\ geek/pytddmon/tests/pytddmon.py\"\n ... )\n ... )\n tests.pytddmon\n \"\"\"\n symbol_stripped = os.path.relpath(file_name, base_path)\n for symbol in r\"/\\.\":\n symbol_stripped = symbol_stripped.replace(symbol, \" \")\n words = symbol_stripped.split()\n # remove .py/.pyw\n module_words = words[:-1]\n module_name = '.'.join(module_words)\n return module_name\n\n\ndef find_tests_in_module(module):\n suite = unittest.TestSuite()\n suite.addTests(find_unittests_in_module(module))\n suite.addTests(find_doctests_in_module(module))\n return suite\n\n\ndef find_unittests_in_module(module):\n test_loader = unittest.TestLoader()\n return test_loader.loadTestsFromName(module)\n\n\ndef find_doctests_in_module(module):\n try:\n return doctest.DocTestSuite(module, optionflags=doctest.ELLIPSIS)\n except ValueError:\n return unittest.TestSuite()\n\n\ndef run_suite(suite):\n def StringIO():\n if ON_PYTHON3:\n import io as StringIO\n else:\n import StringIO\n return StringIO.StringIO()\n\n err_log = StringIO()\n text_test_runner = unittest.TextTestRunner(stream=err_log, verbosity=1)\n result = text_test_runner.run(suite)\n green = result.testsRun - len(result.failures) - len(result.errors)\n total = result.testsRun\n if green < total:\n log = err_log.getvalue()\n else:\n log = \"All %i tests passed\\n\" % total\n return green, total, log\n\n\n####\n## GUI\n####\n\ndef import_tkinter():\n \"\"\"imports tkinter from python 3.x or python 2.x\"\"\"\n try:\n if not ON_PYTHON3:\n import Tkinter as tkinter\n else:\n import tkinter\n except ImportError as e:\n sys.stderr.write(\n 'Cannot import tkinter. Please install it using your system ' +\n 'package manager, since tkinter is not available on PyPI. ' +\n ' In Ubuntu: \\n' +\n ' sudo apt-get install python-tk\\n' +\n 'The actual error was \"{0}\"\\n'.format(e))\n raise SystemExit(1)\n return tkinter\n\n\ndef import_tkFont():\n \"\"\"imports tkFont from python 3.x or python 2.x\"\"\"\n if not ON_PYTHON3:\n import tkFont\n else:\n from tkinter import font as tkFont\n return tkFont\n\n\nclass TKGUIButton(object):\n \"\"\"Encapsulate the button(label)\"\"\"\n\n def __init__(self, tkinter, tkFont, toplevel, display_log_callback):\n self.font = tkFont.Font(name=\"Helvetica\", size=28)\n self.label = tkinter.Label(\n toplevel,\n text=\"loading...\",\n relief='raised',\n font=self.font,\n justify=tkinter.CENTER,\n anchor=tkinter.CENTER\n )\n self.bind_click(display_log_callback)\n self.pack()\n\n def bind_click(self, display_log_callback):\n \"\"\"Binds the left mouse button click event to trigger the log_windows\n display method\"\"\"\n self.label.bind(\n '<Button-1>',\n display_log_callback\n )\n\n def pack(self):\n \"\"\"packs the label\"\"\"\n self.label.pack(\n expand=1,\n fill='both'\n )\n\n def update(self, text, color):\n \"\"\"updates the color and displayed text.\"\"\"\n self.label.configure(\n bg=color,\n activebackground=color,\n text=text\n )\n\n\nclass TkGUI(object):\n \"\"\"Connect pytddmon engine to Tkinter GUI toolkit\"\"\"\n\n def __init__(self, pytddmon, tkinter, tkFont):\n self.pytddmon = pytddmon\n self.tkinter = tkinter\n self.tkFont = tkFont\n self.color_picker = ColorPicker(pulse_disabled=pytddmon.pulse_disabled)\n self.root = None\n self.building_root()\n self.title_font = None\n self.building_fonts()\n self.frame = None\n self.building_frame()\n self.button = TKGUIButton(\n tkinter,\n tkFont,\n self.frame,\n self.display_log_message\n )\n self.status_bar = None\n self.building_status_bar()\n self.frame.grid()\n self.message_window = None\n self.text = None\n\n if ON_WINDOWS:\n buttons_width = 25\n else:\n buttons_width = 75\n self.root.minsize(\n width=self.title_font.measure(\n self.pytddmon.project_name\n ) + buttons_width,\n height=0\n )\n self.frame.pack(expand=1, fill=\"both\")\n self.create_text_window()\n self.update_text_window()\n\n def building_root(self):\n \"\"\"take hold of the tk root object as self.root\"\"\"\n self.root = self.tkinter.Tk()\n self.root.wm_attributes(\"-topmost\", 1)\n if ON_WINDOWS:\n self.root.attributes(\"-toolwindow\", 1)\n print(\"Minimize me!\")\n\n def building_fonts(self):\n \"\"\"building fonts\"\"\"\n self.title_font = self.tkFont.nametofont(\"TkCaptionFont\")\n\n def building_frame(self):\n \"\"\"Creates a frame and assigns it to self.frame\"\"\"\n # Calculate the width of the tilte + buttons\n self.frame = self.tkinter.Frame(\n self.root\n )\n # Sets the title of the gui\n self.frame.master.title(self.pytddmon.project_name)\n # Forces the window to not be resizeable\n self.frame.master.resizable(False, False)\n self.frame.pack(expand=1, fill=\"both\")\n\n def building_status_bar(self):\n \"\"\"Add status bar and assign it to self.status_bar\"\"\"\n self.status_bar = self.tkinter.Label(\n self.frame,\n text=\"n/a\"\n )\n self.status_bar.pack(expand=1, fill=\"both\")\n\n def _update_and_get_color(self):\n \"\"\"Calculate the current color and trigger pulse\"\"\"\n self.color_picker.set_result(\n self.pytddmon.total_tests_passed,\n self.pytddmon.total_tests_run,\n )\n light, color = self.color_picker.pick()\n rgb = self.color_picker.translate_color(light, color)\n self.color_picker.pulse()\n return rgb\n\n def _get_text(self):\n \"\"\"Calculates the text to show the user(passed/total or Error!)\"\"\"\n if self.pytddmon.total_tests_run.imag != 0:\n text = \"?ERROR\"\n else:\n text = \"%r/%r\" % (\n self.pytddmon.total_tests_passed,\n self.pytddmon.total_tests_run\n )\n return text\n\n def update(self):\n \"\"\"updates the tk gui\"\"\"\n rgb = self._update_and_get_color()\n text = self._get_text()\n self.button.update(text, rgb)\n self.root.configure(bg=rgb)\n self.update_status(self.pytddmon.get_status_message())\n\n if self.pytddmon.change_detected:\n self.update_text_window()\n\n def update_status(self, message):\n self.status_bar.configure(\n text=message\n )\n self.status_bar.update_idletasks()\n\n def get_text_message(self):\n \"\"\"returns the log message from pytddmon\"\"\"\n message = self.pytddmon.get_log()\n return message\n\n def create_text_window(self):\n \"\"\"creates new window and text widget\"\"\"\n win = self.tkinter.Toplevel()\n if ON_WINDOWS:\n win.attributes(\"-toolwindow\", 1)\n win.title('Details')\n win.protocol('WM_DELETE_WINDOW', self.when_message_window_x)\n self.message_window = win\n self.text = self.tkinter.Text(win)\n self.message_window.withdraw()\n\n def when_message_window_x(self):\n self.message_window.withdraw()\n\n def update_text_window(self):\n \"\"\"inserts/replaces the log message in the text widget\"\"\"\n text = self.text\n text['state'] = self.tkinter.NORMAL\n text.delete(1.0, self.tkinter.END)\n text.insert(self.tkinter.INSERT, self.get_text_message())\n text['state'] = self.tkinter.DISABLED\n text.pack(expand=1, fill='both')\n text.focus_set()\n\n def display_log_message(self, _arg):\n \"\"\"displays/close the log message from pytddmon in a window\"\"\"\n if self.message_window.state() == 'normal':\n self.message_window.withdraw()\n else:\n self.message_window.state('normal')\n\n def loop(self):\n \"\"\"the main loop\"\"\"\n if self.pytddmon.get_and_set_change_detected():\n self.update_status('Testing...')\n self.pytddmon.run_tests()\n self.update()\n self.frame.after(750, self.loop)\n\n def run(self):\n \"\"\"starts the main loop and goes into sleep\"\"\"\n self.loop()\n self.root.mainloop()\n\n\nclass ColorPicker:\n \"\"\"\n ColorPicker decides the background color the pytddmon window,\n based on the number of green tests, and the total number of\n tests. Also, there is a \"pulse\" (light color, dark color),\n to increase the feeling of continuous testing.\n \"\"\"\n color_table = {\n (True, 'green'): '0f0',\n (False, 'green'): '0c0',\n (True, 'red'): 'f00',\n (False, 'red'): 'c00',\n (True, 'orange'): 'fc0',\n (False, 'orange'): 'ca0',\n (True, 'gray'): '999',\n (False, 'gray'): '555'\n }\n\n def __init__(self, pulse_disabled=False):\n self.color = 'green'\n self.light = True\n self.pulse_disabled = pulse_disabled\n\n def pick(self):\n \"\"\"returns the tuple (light, color) with the types(bool ,str)\"\"\"\n return self.light, self.color\n\n def pulse(self):\n \"\"\"updates the light state\"\"\"\n if self.pulse_disabled:\n return\n self.light = not self.light\n\n def reset_pulse(self):\n \"\"\"resets the light state\"\"\"\n self.light = True\n\n def set_result(self, green, total):\n \"\"\"calculates what color should be used and may reset the lightness\"\"\"\n old_color = self.color\n self.color = 'green'\n if green.imag or total.imag:\n self.color = \"orange\"\n elif green == total - 1:\n self.color = 'red'\n elif green < total - 1:\n self.color = 'gray'\n if self.color != old_color:\n self.reset_pulse()\n\n @classmethod\n def translate_color(cls, light, color):\n \"\"\"helper method to create a rgb string\"\"\"\n return \"#\" + cls.color_table[(light, color)]\n\n\ndef parse_commandline():\n \"\"\"\n returns (files, test_mode) created from the command line arguments\n passed to pytddmon.\n \"\"\"\n usage = \"usage: %prog [options] [static file list]\"\n version = \"%prog \" + '1.0.8'\n parser = optparse.OptionParser(usage=usage, version=version)\n parser.add_option(\n \"--log-and-exit\",\n action=\"store_true\",\n default=False,\n help='Run all tests, write the results to \"pytddmon.log\" and exit.')\n parser.add_option(\n \"--log-path\",\n help='Instead of writing to \"pytddmon.log\" in --log-and-exit, ' +\n 'write to LOG_PATH.')\n parser.add_option(\n \"--gen-kata\",\n help='Generate a stub unit test file appropriate for jump ' +\n 'starting a kata')\n parser.add_option(\n \"--no-pulse\",\n dest=\"pulse_disabled\",\n action=\"store_true\",\n default=False,\n help='Disable the \"heartbeating colorshift\" of pytddmon.')\n (options, args) = parser.parse_args()\n return (\n args,\n options.log_and_exit,\n options.log_path,\n options.pulse_disabled,\n options.gen_kata)\n\n\ndef build_monitor(file_finder):\n try:\n os.stat_float_times(False)\n except AttributeError:\n pass #AttributeError: module 'os' has no attribute 'stat_float_times'\n\n def get_file_size(file_path):\n stat = os.stat(file_path)\n return stat.st_size\n\n def get_file_modtime(file_path):\n stat = os.stat(file_path)\n return stat.st_mtime\n\n return Monitor(file_finder, get_file_size, get_file_modtime)\n\n\ndef run():\n \"\"\"\n The main function: basic initialization and program start\n \"\"\"\n cwd = os.getcwd()\n\n # Include current work directory in Python path\n sys.path[:0] = [cwd]\n\n # Command line argument handling\n (static_file_set, test_mode, test_output,\n pulse_disabled, kata_name) = parse_commandline()\n\n # Generating a kata unit test file? Do it and exit ...\n if kata_name:\n kata = Kata(kata_name)\n print('Writing kata unit test template to ' + kata.filename + '.')\n with open(kata.filename, 'w') as f:\n f.write(kata.content)\n return\n\n # What files to monitor?\n if not static_file_set:\n regex = (\"^[^\\\\.].*.py\")\n else:\n regex = '|'.join(static_file_set)\n file_finder = FileFinder(cwd, regex)\n\n # The change detector: Monitor\n monitor = build_monitor(file_finder)\n\n # Python engine ready to be setup\n pytddmon = Pytddmon(\n file_finder,\n monitor,\n project_name=os.path.basename(cwd),\n pulse_disabled=pulse_disabled\n )\n\n # Start the engine\n if not test_mode:\n TkGUI(pytddmon, import_tkinter(), import_tkFont()).run()\n else:\n pytddmon.main()\n\n outputfile = test_output or 'pytddmon.log'\n with open(outputfile, 'w') as log_file:\n log_file.write(\n \"green=%r\\ntotal=%r\\n\" % (\n pytddmon.total_tests_passed,\n pytddmon.total_tests_run\n )\n )\n\n\nif __name__ == '__main__':\n run()\n",
"id": "9619686",
"language": "Python",
"matching_score": 4.386894226074219,
"max_stars_count": 0,
"path": "src/pytddmon.py"
},
{
"content": "#! /usr/bin/env python\n#coding: utf-8\nimport os\nimport shutil\nimport subprocess\nfrom optparse import OptionParser\n\ndef get_log_as_dictionary(path):\n f = open(path, 'r')\n rows = f.readlines()\n f.close()\n dict = {}\n for row in rows:\n (name, splitter, value) = row.partition('=')\n dict[name] = value.strip()\n return dict\n\ndef get_log(testdir, logname):\n fullpath = os.path.join(testdir, logname)\n return get_log_as_dictionary(fullpath)\n\ndef pretty_please(testdir):\n testdir = testdir.replace('\\\\', '/')\n testdir = testdir.split('/')[-1]\n testdir = testdir.replace('_', ' ')\n testdir = testdir.title()\n return testdir\n\ndef compare(testdir, what, gotdict, expdict):\n got = gotdict[what]\n exp = expdict[what]\n pretty = pretty_please(testdir)\n if got != exp:\n print(pretty + \": expected \" + exp + \" \" + what + \" test(s), got \" + got)\n\ndef compare_logs(testdir, got, exp):\n compare(testdir, 'green', got, exp)\n compare(testdir, 'total', got, exp)\n\ndef compare_logs_in_dir(testdir, outputdir):\n gotinfo = get_log(outputdir, \"pytddmon.log\")\n expinfo = get_log(testdir, \"expected.log\")\n compare_logs(testdir, gotinfo, expinfo)\n\ndef get_args(path):\n argspath = os.path.join(path, \"args.txt\")\n if not os.path.exists(argspath):\n return []\n f = open(argspath, \"r\")\n content = f.read().strip()\n f.close()\n return content.split()\n\ndef run_all():\n (tmpdir, cleanup) = parse_commandline()\n cwd = os.getcwd()\n pytddmon_path = os.path.join(cwd, \"../src/pytddmon.py\")\n names = os.listdir(cwd)\n for name in names:\n path = os.path.join(cwd, name)\n if os.path.isdir(path):\n os.chdir(path)\n cmdline = ['python', pytddmon_path, \"--log-and-exit\"]\n log_path = path\n if tmpdir:\n log_path = os.path.join(tmpdir, name)\n log_name = os.path.join(log_path, 'pytddmon.log')\n if not os.path.exists(log_path):\n os.mkdir(log_path)\n touch(log_name)\n cmdline.extend(['--log-path', log_name])\n args = get_args(path)\n cmdline.extend(args)\n try:\n subprocess.check_call(cmdline, stdout=None, stderr=None)\n except:\n print(\" .. in test: \" + path + \"\\n\")\n compare_logs_in_dir(path, log_path)\n if tmpdir and cleanup:\n shutil.rmtree(log_path)\n\n os.chdir(cwd)\n\ndef touch(fname, times=None):\n with open(fname, 'a'):\n os.utime(fname, times)\n\ndef parse_commandline():\n parser = OptionParser()\n parser.add_option('-t', '--tmpdir', help='Write log files to TMPDIR')\n parser.add_option('-c',\n '--clean-up',\n action='store_true',\n default=False,\n help='If TMPDIR is defined, then clean up the temporary files and directories created')\n\n (options, args) = parser.parse_args()\n return options.tmpdir, options.clean_up\n\nif __name__ == \"__main__\":\n run_all()\n",
"id": "7582477",
"language": "Python",
"matching_score": 0.6096998453140259,
"max_stars_count": 12,
"path": "systest/systest.py"
},
{
"content": "#!/usr/bin/env python\n# coding: utf-8\n\nfrom setuptools import setup\n\nVERSION = \"1.0.8\"\nAUTHORS = '''\\\n<NAME>, <NAME>, <NAME>,\n<NAME>, <NAME>, <NAME>,\n<NAME>, <NAME>, <NAME>,\n<NAME>'''\nHOMEPAGE = \"http://pytddmon.org\"\n\nif __name__ == '__main__':\n setup(\n name='pytddmon',\n version=VERSION,\n description='continuous unit testing in Python',\n long_description='Read the pytddmon blog and more documentation at ' +\n 'http://pytddmon.org',\n author=AUTHORS,\n author_email=\"<EMAIL>\",\n license='MIT',\n url=HOMEPAGE,\n scripts=['src/pytddmon.py'],\n test_suite='nose.collector',\n zip_safe=True\n )\n",
"id": "8776876",
"language": "Python",
"matching_score": 0.030571529641747475,
"max_stars_count": 12,
"path": "setup.py"
},
{
"content": "import unittest\nfrom pytddmon import ColorPicker\n\n\nclass TestPulse(unittest.TestCase):\n def setUp(self):\n self.color_picker = ColorPicker()\n\n def test_starts_with_light_color(self):\n light, _ = self.color_picker.pick()\n self.assertTrue(light)\n\n def test_pulses_by_default(self):\n self.assertFalse(self.color_picker.pulse_disabled)\n\n def test_dark_after_pulse(self):\n self.color_picker.pulse()\n light, _ = self.color_picker.pick()\n self.assertFalse(light)\n\n def test_no_light_change_after_disabled_pulse(self):\n color_picker = ColorPicker(pulse_disabled=True)\n original_light, _ = color_picker.pick()\n color_picker.pulse()\n new_light, _ = color_picker.pick()\n self.assertEqual(original_light, new_light)\n\n def test_no_failing_test_picks_green(self):\n self.color_picker.set_result(1, 1)\n (light, color) = self.color_picker.pick()\n self.assertEqual(color, 'green')\n\n def test_one_failing_test_picks_red(self):\n self.color_picker.set_result(1, 2)\n (light, color) = self.color_picker.pick()\n self.assertEqual(color, 'red')\n\n def test_two_failing_tests_picks_gray(self):\n self.color_picker.set_result(1, 3)\n (light, color) = self.color_picker.pick()\n self.assertEqual(color, 'gray')\n\n def test_changing_color_resets_pulse(self):\n self.color_picker.set_result(1, 1)\n self.color_picker.pulse()\n self.color_picker.set_result(1, 2)\n (light, color) = self.color_picker.pick()\n self.assertTrue(light)\n\n def test_default_color_is_green(self):\n self.color_picker = ColorPicker()\n (light, color) = self.color_picker.pick()\n self.assertEqual('green', color)\n\n def test_no_tests_means_green(self):\n self.color_picker.set_result(0, 0)\n (light, color) = self.color_picker.pick()\n self.assertEqual('green', color)\n\n def test_pulse_is_not_reset_if_colors_stays_same(self):\n self.color_picker.pulse()\n self.color_picker.set_result(1, 1)\n self.color_picker.set_result(1, 1)\n (light, color) = self.color_picker.pick()\n self.assertFalse(light)\n",
"id": "12368077",
"language": "Python",
"matching_score": 2.571010112762451,
"max_stars_count": 12,
"path": "src/tests/test_color_picker.py"
},
{
"content": "import unittest\n\nfrom pytddmon import Kata\n\nclass KataGeneratorTests(unittest.TestCase):\n\tdef setUp(self):\n\t\tkata = Kata('bowling')\n\t\tself.result = kata.content\n\t\tself.filename = kata.filename\n\n\tdef test_output_includes_kata_name(self):\n\t\tself.assertTrue('bowling' in self.result)\n\n\tdef test_output_imports_unittest(self):\n\t\tself.assertTrue('import unittest' in self.result)\n\n\tdef test_classname_cased_nicely(self):\n\t\tself.assertTrue('BowlingTests' in self.result)\n\n\tdef test_contains_a_test_def(self):\n\t\tself.assertTrue('def test_something(self):' in self.result)\n\n\tdef test_contains_a_true_assertion(self):\n\t\tself.assertTrue('self.assertTrue(True)' in self.result)\n\n\tdef test_contains_an_list_equality_assertion(self):\n\t\tself.assertTrue('self.assertEqual([1, 2], [x for x in range(1, 3)])' in self.result)\n\n\tdef test_removes_spaces_in_name(self):\n\t\tresult = Kata('a name with spaces').content\n\t\tself.assertTrue('ANameWithSpacesTests' in result, result)\n\n\tdef test_generates_a_nice_filename(self):\n\t\tself.assertEqual('test_bowling.py', self.filename)\n\n\tdef test_filename_is_stripped_from_spaces(self):\n\t\tfilename = Kata('Blair witch project').filename\n\t\tself.assertEqual('test_blair_witch_project.py', filename)\n\n\tdef test_unicode_shebang_included(self):\n\t\tresult = Kata('some nice kata').content\n\t\tself.assertTrue('# coding: utf-8' in result, result)\n",
"id": "12021834",
"language": "Python",
"matching_score": 3.1954081058502197,
"max_stars_count": 12,
"path": "src/tests/test_genkata.py"
},
{
"content": "import unittest\n\nclass TestCase(unittest.TestCase):\n def test_something(self):\n self.assertTrue(True)\n",
"id": "8883636",
"language": "Python",
"matching_score": 0.9195616245269775,
"max_stars_count": 12,
"path": "systest/testfile_from_argument/unit.py"
},
{
"content": "# coding: utf-8\nimport unittest\n\nimport pytddmon\n\n\nclass TestStatusBarFeature(unittest.TestCase):\n\n def _test_gui_should_contain_a_status_bar(self):\n # This test creates an X error in a running pytddmon session\n # FIXME: This code does not seem to work\n import mock\n gui = pytddmon.TkGUI(mock.Mock())\n self.assertTrue(hasattr(gui, 'status_bar'))\n\n def test_pytddmon_should_have_get_status_message_function(self):\n self.assertTrue(hasattr(pytddmon.Pytddmon, 'get_status_message'))\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"id": "11610292",
"language": "Python",
"matching_score": 1,
"max_stars_count": 12,
"path": "src/tests/test_status_bar_feature.py"
},
{
"content": "def id_function(x):\n \"\"\"\n >>> id_function(10)\n 10\n \"\"\"\n return x\n\n",
"id": "504923",
"language": "Python",
"matching_score": 0,
"max_stars_count": 12,
"path": "systest/rexursivescan_for_doctest/test_doctest.py"
},
{
"content": "def fn():\n '''\n >>> fn()\n 5\n '''\n return 5\n",
"id": "9563304",
"language": "Python",
"matching_score": 1,
"max_stars_count": 12,
"path": "systest/one_green_doctest/unit.py"
},
{
"content": "def fn():\n print('.')\n",
"id": "7449657",
"language": "Python",
"matching_score": 1,
"max_stars_count": 12,
"path": "systest/print_dot_in_code_does_not_fool_counting/unit.py"
}
] | 1 |
sandrinemfuranzima | [
{
"content": "from django.apps import AppConfig\n\n\nclass InstConfig(AppConfig):\n name = 'inst'\n",
"id": "2791737",
"language": "Python",
"matching_score": 0.23584435880184174,
"max_stars_count": 4,
"path": "inst/apps.py"
},
{
"content": "from django.db import models\nfrom pyuploadcare.dj.models import ImageField\n\n# Create your models here.\n\n\nclass Location(models.Model):\n name = models.CharField(max_length =50)\n\n @classmethod\n def tag_articles(cls):\n tags = cls.objects.all()\n return tags\n\n def save_location(self):\n self.save()\n\n def delete_location(self):\n self.delete()\n\n def update_location(self, update):\n self.name = update\n self.save()\n\n @classmethod\n def get_location_id(cls, id):\n locate = Location.objects.get(pk = id)\n return locate\n\n def __str__(self):\n return self.name\n\nclass Category(models.Model):\n name = models.CharField(max_length =50)\n\n\n def save_category(self):\n self.save()\n\n def delete_category(self):\n self.delete()\n\n def update_category(self, update):\n self.name = update\n self.save()\n\n @classmethod\n def get_category_id(cls, id):\n category = Category.objects.get(pk = id)\n return category\n\n def __str__(self):\n return self.name\n\nclass Image(models.Model):\n name = models.CharField(max_length = 60)\n pic = models.ImageField(upload_to = 'uploads/')\n picture = ImageField( blank = True, manual_crop = '1920x1080')\n description = models.TextField()\n image_location = models.ForeignKey('Location')\n image_category = models.ForeignKey('Category')\n\n def save_image(self):\n self.save()\n\n def delete_image(self):\n self.delete()\n\n @classmethod\n def update_image(cls, id ,name, description , image_location, image_category):\n update = cls.objects.filter(id = id).update(name = name, description = description ,image_location = image_location,image_category = image_category)\n # return update\n\n @classmethod\n def get_all_images(cls):\n images = cls.objects.all()\n return images\n\n @classmethod\n def get_image_by_id(cls,id):\n image = cls.objects.filter(id= id).all()\n return image\n\n @classmethod\n def search_by_category(cls,image_category):\n images = Image.objects.filter(image_category__name__icontains=image_category)\n return images\n\n @classmethod\n def filter_by_location(cls, image_location):\n images_location = cls.objects.filter(image_location__id=image_location)\n return images_location\n\n def __str__(self):\n return self.name\n\n class Meta:\n ordering = ['name']\n",
"id": "5928115",
"language": "Python",
"matching_score": 2.0313591957092285,
"max_stars_count": 1,
"path": "gallery/models.py"
},
{
"content": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11 on 2018-07-15 18:16\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('gallery', '0003_image_pic'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='image',\n name='image_link',\n field=models.CharField(default=12, max_length=500),\n preserve_default=False,\n ),\n migrations.AlterField(\n model_name='image',\n name='pic',\n field=models.ImageField(upload_to='uploads/'),\n ),\n ]\n",
"id": "1923217",
"language": "Python",
"matching_score": 4.767912864685059,
"max_stars_count": 1,
"path": "gallery/migrations/0004_auto_20180715_2116.py"
},
{
"content": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11 on 2018-07-13 13:02\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('gallery', '0002_auto_20180713_1332'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='image',\n name='pic',\n field=models.ImageField(default=12, upload_to='uploads'),\n preserve_default=False,\n ),\n ]\n",
"id": "1164567",
"language": "Python",
"matching_score": 0.4335249066352844,
"max_stars_count": 1,
"path": "gallery/migrations/0003_image_pic.py"
},
{
"content": "from django.shortcuts import render,redirect,get_object_or_404\nfrom django.contrib.auth.decorators import login_required\nfrom . forms import ProfileUploadForm,CommentForm,ProfileForm\nfrom django.http import HttpResponse\nfrom . models import Pic ,Profile, Likes, Follow, Comment,Unfollow\nfrom django.conf import settings\n\n\n# Create your views here.\n@login_required(login_url='/accounts/login/')\ndef index(request):\n title = 'Instagram'\n pic_posts = Pic.objects.all()\n # comments = Comment.objects.all()\n\n print(pic_posts)\n return render(request, 'index.html', {\"title\":title,\"pic_posts\":pic_posts})\n\n\n@login_required(login_url='/accounts/login/')\ndef comment(request,id):\n\t\n\tpost = get_object_or_404(Pic,id=id)\t\n\tcurrent_user = request.user\n\tprint(post)\n\n\tif request.method == 'POST':\n\t\tform = CommentForm(request.POST)\n\n\t\tif form.is_valid():\n\t\t\tcomment = form.save(commit=False)\n\t\t\tcomment.user = current_user\n\t\t\tcomment.pic = post\n\t\t\tcomment.save()\n\t\t\treturn redirect('index')\n\telse:\n\t\tform = CommentForm()\n\n\treturn render(request,'comment.html',{\"form\":form}) \n\n\n\n@login_required(login_url='/accounts/login/')\ndef profile(request):\n\t current_user = request.user\n\t profile = Profile.objects.all()\n\t follower = Follow.objects.filter(user = profile)\n\n\t return render(request, 'profile.html',{\"current_user\":current_user,\"profile\":profile,\"follower\":follower})\n\n@login_required(login_url='/accounts/login/')\ndef timeline(request):\n\tcurrent_user = request.user \n\tMyprofile = Profile.objects.order_by('-time_uploaded')\n\tcomment = Comment.objects.order_by('-time_comment')\n\t\n\n\treturn render(request, 'my-inst/timeline.html',{\"Myprofile\":Myprofile,\"comment\":comment})\n\n@login_required(login_url='/accounts/login/')\ndef single_pic(request,pic_id):\n\tpic = pic.objects.get(id= pic_id)\n\n\treturn render(request, 'my-inst/single_pic.html',{\"pic\":pic})\n\n@login_required(login_url='/accounts/login/')\ndef like(request,pic_id):\n\tPic = Pic.objects.get(id=pic_id)\n\tlike +=1\n\tsave_like()\n\treturn redirect(timeline)\n\n\n# @login_required(login_url='/accounts/login/')\n# def search_pic(request):\n\n# \tif \"pic\" in request.GET and request.GET[\"pic\"]:\n# \t\tsearch_pic = request.GET.get(\"pic\")\n# \t\tgot_users = Profile.find_profile(search_pic)\n# \t\tmessage =f\"{search_pic}\"\n\n# \t\treturn render(request,'my-inst/search_pic.html',{\"got_users\":got_users,\"message\":message})\n# \telse:\n# \t\tmessage = \"Invalid username\"\n# \t\treturn render(request,'my-inst/search_pic.html',{\"message\":message})\n\ndef search_results(request):\n if 'pic' in request.GET and request.GET[\"pic\"]:\n search_term = request.GET.get(\"pic\")\n searched_profiles = Profile.search_profile(search_term)\n message = f\"{search_term}\"\n\n return render(request, 'search_pic.html',{\"message\":message,\"pics\": searched_profiles})\n\n else:\n message = \"You haven't searched for any term\"\n return render(request, 'search_pic.html',{\"message\":message})\n\n\n@login_required(login_url='/accounts/login/')\ndef upload_profile(request):\n current_user = request.user \n title = 'Upload Profile'\n try:\n requested_profile = Profile.objects.get(user_id = current_user.id)\n if request.method == 'POST':\n form = ProfileUploadForm(request.POST,request.FILES)\n\n if form.is_valid():\n requested_profile.profile_pic = form.cleaned_data['profile_pic']\n requested_profile.bio = form.cleaned_data['bio']\n requested_profile.username = form.cleaned_data['username']\n requested_profile.save_profile()\n return redirect( profile )\n else:\n form = ProfileUploadForm()\n except:\n if request.method == 'POST':\n form = ProfileUploadForm(request.POST,request.FILES)\n\n if form.is_valid():\n new_profile = Profile(profile_pic = form.cleaned_data['profile_pic'],bio = form.cleaned_data['bio'],username = form.cleaned_data['username'])\n new_profile.save_profile()\n return redirect( profile )\n else:\n form = ProfileUploadForm()\n\n\n return render(request,'upload_profile.html',{\"title\":title,\"current_user\":current_user,\"form\":form})\n\n\n@login_required(login_url='/accounts/login/')\ndef send(request):\n '''\n View function that displays a forms that allows users to upload images\n '''\n current_user = request.user\n\n if request.method == 'POST':\n\n form = ImageForm(request.POST ,request.FILES)\n\n if form.is_valid():\n image = form.save(commit = False)\n image.user_key = current_user\n image.likes +=0\n image.save() \n\n return redirect( timeline)\n else:\n form = ImageForm() \n return render(request, 'my-inst/send.html',{\"form\" : form}) \n\n",
"id": "4697380",
"language": "Python",
"matching_score": 2.334287166595459,
"max_stars_count": 0,
"path": "inst/views.py"
},
{
"content": "from django.urls import url, include\nfrom . import views\nfrom rest_framework import routers\n\nrouter = routers.DefaultRouter()\nrouter.register('users', views.UserViewSet)\nrouter.register('posts', views.PostViewSet)\nrouter.register('profile', views.ProfileViewSet)\n\nurlpatterns = [\n url('', views.index, name='index'),\n url('signup/', views.signup, name='signup'),\n url('account/', include('django.contrib.auth.urls')),\n url('api/', include(router.urls)),\n url('<username>/profile', views.user_profile, name='userprofile'),\n url('api-auth/', include('rest_framework.urls', namespace='rest_framework')),\n url('profile/<username>/', views.profile, name='profile'),\n url('profile/<username>/settings', views.edit_profile, name='edit'),\n url('project/<post>', views.project, name='project'),\n url('search/', views.search_project, name='search'),\n]\n",
"id": "364873",
"language": "Python",
"matching_score": 1.9202332496643066,
"max_stars_count": 0,
"path": "myawwards/urls.py"
}
] | 1.975796 |
newtonx-inc | [
{
"content": "import sys\n\nlogging_dict = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n \"json\": {\n '()': \"pysdl.stackdriverjsonlogger.StackdriverJsonFormatter\",\n 'format': \"%(levelname)s %(asctime)s %(module)s %(process)s %(thread)s %(message)s\"\n },\n },\n 'handlers': {\n 'console': {\n 'level': 'DEBUG',\n 'class': 'logging.StreamHandler',\n 'formatter': 'json',\n 'stream': sys.stdout\n },\n },\n 'loggers': {\n # Root Logger\n '': {\n 'handlers': ['console'],\n 'level': 'INFO',\n },\n # DB events and sql\n 'django.db.backends': {\n 'handlers': ['console'],\n 'level': 'ERROR',\n },\n # Celery tasks will duplicate WARNING and ERROR logs here but it also helps us to catch celery issues\n # IMO duplication of ERROR and WARNING logs aren't a big deal\n 'celery': {\n 'handlers': ['console'],\n 'level': 'WARNING',\n },\n 'celery.beat': {\n 'handlers': ['console'],\n 'level': 'WARNING',\n },\n }\n}",
"id": "2196395",
"language": "Python",
"matching_score": 2.6593093872070312,
"max_stars_count": 0,
"path": "src/pysdl/logger_config.py"
},
{
"content": "# -*- coding: utf-8 -*-\nimport unittest.mock\nimport logging\nimport json\nimport sys\nimport random\n\ntry:\n import xmlrunner # noqa\nexcept ImportError:\n pass\n\ntry:\n from StringIO import StringIO # noqa\nexcept ImportError:\n # Python 3 Support\n from io import StringIO\n\nsys.path.append('pysdl')\nfrom src.pysdl import StackdriverJsonFormatter\n\n\nclass TestStackDriverFormatter(unittest.TestCase):\n def setUp(self):\n self.logger = logging.getLogger(\"logging-test-{}\".format(random.randint(1, 101)))\n self.logger.setLevel(logging.DEBUG)\n self.buffer = StringIO()\n\n self.logHandler = logging.StreamHandler(self.buffer)\n self.logger.addHandler(self.logHandler)\n\n def testFormatKeys(self):\n supported_keys = [\n 'asctime',\n 'created',\n 'filename',\n 'funcName',\n 'levelname',\n 'levelno',\n # Only addition key added is severity\n 'severity',\n 'lineno',\n 'module',\n 'msecs',\n 'message',\n 'name',\n 'pathname',\n 'process',\n 'processName',\n 'relativeCreated',\n 'thread',\n 'threadName'\n ]\n\n log_format = lambda x: ['%({0:s})s'.format(i) for i in x]\n custom_format = ' '.join(log_format(supported_keys))\n\n fr = StackdriverJsonFormatter(custom_format)\n self.logHandler.setFormatter(fr)\n\n msg = \"testing logging format\"\n self.logger.info(msg)\n log_msg = self.buffer.getvalue()\n log_json = json.loads(log_msg)\n\n for supported_key in supported_keys:\n if supported_key in log_json:\n self.assertTrue(True)\n",
"id": "9744002",
"language": "Python",
"matching_score": 2.2129290103912354,
"max_stars_count": 0,
"path": "tests/tests.py"
},
{
"content": "from setuptools import setup, find_packages\n\nfrom os import path\nthis_directory = path.abspath(path.dirname(__file__))\nwith open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\n\nsetup(\n name='pysdl',\n version='1.0.3',\n url='',\n license='',\n author='NewtonX',\n author_email='',\n python_requires='>=3.4',\n test_suite=\"tests.tests\",\n long_description=long_description,\n long_description_content_type='text/markdown',\n package_dir={'': 'src'},\n packages=find_packages(\"src\", exclude=\"tests\"),\n description='A python library that helps Stackdriver consume python logs appropriately built on madzak/python-json-logger',\n)\n",
"id": "10670532",
"language": "Python",
"matching_score": 0,
"max_stars_count": 0,
"path": "setup.py"
},
{
"content": "from pythonjsonlogger import jsonlogger\n\n\nclass StackdriverJsonFormatter(jsonlogger.JsonFormatter, object):\n def __init__(self, fmt=\"%(levelname) %(message)\", style='%', *args, **kwargs):\n jsonlogger.JsonFormatter.__init__(self, fmt=fmt, *args, **kwargs)\n\n def process_log_record(self, log_record):\n log_record['severity'] = log_record['levelname']\n return super(StackdriverJsonFormatter, self).process_log_record(log_record)\n",
"id": "6798187",
"language": "Python",
"matching_score": 0,
"max_stars_count": 0,
"path": "src/pysdl/stackdriverjsonlogger.py"
}
] | 1.106465 |
hansoljin | [
{
"content": "from collections import defaultdict\nfrom glob import iglob\nfrom os.path import basename, join\nfrom typing import TYPE_CHECKING\n\nimport discord\nfrom discord.ext import commands\nfrom utils.utils import ConsoleColor\n\nif TYPE_CHECKING:\n from typing import Dict, List, Optional, Tuple, Type\nempty = discord.Embed.Empty\n\nsystems_no_aliases = ('3ds', 'wiiu', 'vwii', 'switch', 'wii', 'dsi')\naliases = {\n 'nx': 'switch',\n 'ns': 'switch'\n}\nname_to_aliases = defaultdict(set)\nfor k, v in aliases.items():\n name_to_aliases[v].add(k)\n\n# compatibility\nsystems = systems_no_aliases + tuple(aliases) + ('legacy',)\n\n\ndef parse_header(header_raw: str):\n header: Dict[str, str] = {\n 'title': empty,\n 'url': empty,\n 'author.name': None,\n 'author.url': empty,\n 'author.icon-url': empty,\n 'help-desc': None,\n 'aliases': '',\n 'color': None,\n 'thumbnail-url': empty,\n 'image-url': empty,\n 'cooldown-rate': '1',\n 'cooldown-per': '30',\n }\n\n for line in header_raw.splitlines():\n if line.startswith('---'):\n continue\n key, value = line.split(':', maxsplit=1)\n key = key.strip()\n value = value.strip()\n header[key] = value\n\n return header\n\n\ndef parse_body(body_raw: str):\n body_raw = body_raw.strip()\n # first one should be the descripton and will have no header\n parts = []\n current_header = ''\n current_body = []\n\n def doadd():\n parts.append((current_header, '\\n'.join(current_body)))\n\n for line in body_raw.splitlines():\n if line.startswith('#'):\n # This does not work entirely how markdown should work.\n # It seems a header requires a space between the # and the text.\n # Example: \"#test\" should not work but \"# test\" does.\n # This isn't really worth trying to check for however.\n doadd()\n current_header = line.lstrip('#').lstrip(' ')\n current_body = []\n else:\n current_body.append(line)\n\n if current_header or current_body:\n doadd()\n\n # special case for an empty body\n if not parts:\n parts.append(('', ''))\n\n return parts\n\n\ndef create_embed(header: 'Dict[str, str]', body: 'List[Tuple[str, str]]', embed_color: discord.Color):\n description = body[0][1]\n embed = discord.Embed(\n title=header['title'],\n description=description,\n url=header['url'],\n color=embed_color,\n )\n if header['author.name']: # this field is required\n embed.set_author(name=header['author.name'], url=header['author.url'], icon_url=header['author.icon-url'])\n embed.set_thumbnail(url=header['thumbnail-url'])\n embed.set_image(url=header['image-url'])\n\n # first one is used as the embed description\n for field in body[1:]:\n embed.add_field(name=field[0], value=field[1], inline=False)\n\n return embed\n\n\ndef parse_md_command(md_text: str, format_map: dict, embed_color: discord.Color):\n parts = md_text.split('\\n\\n', maxsplit=1)\n if len(parts) == 1:\n # in case there is no body\n parts.append('')\n header_raw, body_raw = parts\n\n body_raw = body_raw.format_map(format_map)\n\n header = parse_header(header_raw)\n body = parse_body(body_raw)\n\n if header['color']:\n # override with the custom color\n embed_color = discord.Color(int(header['color'], 16))\n\n return header, create_embed(header, body, embed_color)\n\n\ndef md_file_to_embed(md_path: str, format_map: dict):\n colors = {\n '3ds': ConsoleColor.n3ds(),\n 'wiiu': ConsoleColor.wiiu(),\n 'vwii': ConsoleColor.wiiu(),\n 'wii': ConsoleColor.wii(),\n 'switch': ConsoleColor.switch(),\n 'legacy': ConsoleColor.legacy(),\n 'dsi': ConsoleColor.legacy(),\n 'all': empty # default embed color\n }\n\n with open(md_path, 'r', encoding='utf-8') as f:\n fn = basename(md_path)\n name, console, _ = fn.rsplit('.', maxsplit=2)\n return name, console, *parse_md_command(f.read(), format_map, colors[console])\n\n\ndef check_console(message, channel, consoles):\n message = message.lower()\n if isinstance(consoles, str):\n consoles = (consoles,)\n if message in consoles:\n return True\n elif (\"wii\" not in consoles or channel.startswith(\"legacy\")) and channel.startswith(consoles) and message not in systems:\n return True\n return False\n\n\ndef get_console_name(console):\n return aliases.get(console, console)\n\n\ndef add_md_files_as_commands(cog_class: 'Type[commands.Cog]', md_dir: str = None, *, namespace=commands, format_map=None):\n\n def make_cmd(name: str, help_desc: 'Optional[str]', embeds: 'Dict[str, discord.Embed]', cooldown: 'Tuple[int, int]', aliases: list):\n if len(embeds) > 1:\n # multi-console commands require a check\n async def cmd(self, ctx, *, consoles=''):\n supported_consoles = list(embeds)\n for s in supported_consoles:\n if s in {'dsi', 'wii'}:\n # special case for legacy channel\n supported_consoles.append('legacy')\n # replace aliases with the expected console\n requested_consoles = {get_console_name(c) for c in consoles.split()}\n # and then check if any of the consoles are supported here\n requested_consoles = {c for c in requested_consoles if c in supported_consoles}\n channel_name = ctx.channel.name if not isinstance(ctx.channel, discord.DMChannel) else ''\n\n if not requested_consoles:\n if channel_name.startswith(tuple(supported_consoles)):\n requested_consoles = {'auto'}\n else:\n valid = set(supported_consoles)\n for v in supported_consoles:\n valid |= name_to_aliases[v]\n await ctx.send(f'Please specify a console. Valid options are: {\", \".join(sorted(valid))}')\n\n ctx.command.reset_cooldown(ctx)\n return\n\n for console in requested_consoles:\n for embed_console, embed in embeds.items():\n cons = [embed_console]\n if embed_console in {'dsi', 'wii'}:\n # special case for legacy channel\n cons.append('legacy')\n if check_console(console, channel_name, tuple(cons)):\n await ctx.send(embed=embed)\n else:\n # single-console commands can simply print the one embed\n async def cmd(self, ctx):\n # this is kinda ugly, but basically it gets the first (and only) value of the dict\n await ctx.send(embed=next(iter(embeds.values())))\n\n cmd.__name__ = name\n # i figured this was easier than dealing with the multiple attributes for command help\n cmd.__doc__ = help_desc\n\n # this feels _wrong_ but is probably the best way to do this\n cooldown = commands.cooldown(cooldown[0], cooldown[1], commands.BucketType.channel)(cmd)\n cmd_obj = namespace.command(name=name, aliases=aliases)(cooldown)\n return cmd_obj\n\n new_commands = defaultdict(dict)\n aliases = defaultdict(list)\n cooldowns = {}\n helpdescs = defaultdict(lambda: None)\n\n if md_dir is None:\n md_dir = cog_class.data_dir\n\n if format_map is None:\n try:\n format_map = cog_class.format_map\n except AttributeError:\n format_map = None\n\n for md in iglob(join(md_dir, '*.md')):\n command, console, header, embed = md_file_to_embed(md, format_map)\n new_commands[command][console] = embed\n if header['aliases']:\n aliases[command].extend(header['aliases'].split(','))\n if header['help-desc']:\n # in case some don't have a help-desc, don't delete a previous one\n helpdescs[command] = header['help-desc']\n cooldowns[command] = (int(header['cooldown-rate']), int(header['cooldown-per']))\n\n for command, embed_dict in new_commands.items():\n new_aliases = list(set(aliases[command]))\n command_obj = make_cmd(command, helpdescs[command], embed_dict, cooldowns[command], new_aliases)\n setattr(cog_class, command, command_obj)\n # there has to be a better way to do this...\n cog_class.__cog_commands__.append(command_obj)\n",
"id": "7532537",
"language": "Python",
"matching_score": 2.8153562545776367,
"max_stars_count": 51,
"path": "utils/mdcmd.py"
},
{
"content": "import re\n\nfrom .ctr_results import modules as ctr_results_modules\nfrom .types import Module, ResultInfo, ConsoleErrorInfo, ConsoleErrorField, \\\n BANNED_FIELD, WARNING_COLOR, UNKNOWN_CATEGORY_DESCRIPTION\n\n\n\"\"\"\nThis file contains all currently known 2DS/3DS support codes.\nThere may be inaccuracies here; we'll do our best to correct them\nwhen we find out more about them.\n\nA \"support\" code, in contrast to a result code, is a human-readable string like\n002-0102. They're meant to be more user-friendly than result codes, which are\ntypically integer values.\n\nNote: the \"modules\" presented here are more like \"categories\". However, this difference\nisn't enough to justify creating a different class with the same logic, so we'll just\nrefer to them as \"modules\" from now on.\n\nTo add a module/category so the code understands it, simply add a new module number\nto the 'modules' dictionary, with a Module variable as the value. If the module\nhas no known error codes, simply add a dummy Module instead (see the dict for\nmore info). See the various module variables for a more in-depth example\n on how to make one.\n\nOnce you've added a module, or you want to add a new support code to an existing\nmodule, add a new description value (for 3DS it's the 4 digit number after the dash)\nas the key, and a ResultInfo variable with a text description of the error or result.\nYou can also add a second string to the ResultInfo to designate a support URL if\none exists. Not all support codes have known error pages.\n\nSimple example of adding a module with a sample support code:\ntest = Module('test', {\n 5: ResultInfo('test', 'https://example.com')\n})\n\nmodules = {\n 9999: test\n}\n\nSources used to compile this information:\nKurisu's previous err.py module\nNinendo's support knowledgebase at https://en-americas-support.nintendo.com/app/answers\n\"\"\"\n\n# 001: friends module, parental controls, online services in general?\nfriends = Module('friends', {\n 102: ResultInfo('This error code indicates you were unexpectedly disconnected from network services.', 'https://en-americas-support.nintendo.com/app/answers/detail/a_id/17043'),\n 721: ResultInfo('This error code indicates the Parental Controls are set to restrict access to the online feature you\\'re attempting to use.', 'https://www.nintendo.com.au/help/3ds-error-codes')\n})\n\n# 002: bans and other account errors\naccount = Module('account', {\n 102: ResultInfo('This console is permanently banned by Nintendo.', is_ban=True),\n 107: ResultInfo('This console is temporarily (?) banned by Nintendo.', is_ban=True),\n 110: ResultInfo('Error when trying to use the discontinued youtube 3ds app.'),\n 119: ResultInfo('System update is required. This is typically shown when the friends module is outdated.'),\n 120: ResultInfo('Game or title update is required. This is typically shown when the title you\\'re trying to launch is outdated.'),\n 121: ResultInfo('Local friend code SEED has invalid signature. This should only happen if it has been modified.', is_ban=True),\n 123: ResultInfo('This console is permanently banned by Nintendo.', is_ban=True)\n})\n\n# 003: connection related errors\ninternet = Module('internet', {\n 299: ResultInfo('The Wireless Connection is currently deactivated. Please activate the wireless connection.'),\n 399: ResultInfo('Accepted EULA version is too low'),\n 1099: ResultInfo('Access point with given SSID not found.', 'https://en-americas-support.nintendo.com/app/answers/detail/a_id/4249/kw/003-1099'),\n 1101: ResultInfo('Incorrect password for access point or configuration is not compatible with the 3DS.'),\n 2001: ResultInfo('DNS error. If you\\'re using a custom DNS server, make sure the settings are correct.'),\n 2103: ResultInfo('Generic connection error(?)')\n})\n\n# Yet another nim hack. Why does this category have so many redundant errors?\nNIM_4069 = ResultInfo('This error may appear if the eShop is unavailable. If the issue persists, you might need to replace your console\\'s SD card.', 'https://en-americas-support.nintendo.com/app/answers/detail/a_id/14413')\n\n# 005: nim\nnim = Module('nim', {\n # 005-2008 is the same as 007-2920 and 009-2920...\n 2008: ResultInfo('This error is typically displayed when a Nintendo eShop download failed, or when the title has an invalid ticket. Delete the title and/or its ticket in FBI and install it again from a legitimate source like the Nintendo eShop, or from your game cartridges if using cart dumps.', 'https://en-americas-support.nintendo.com/app/answers/detail/a_id/41692'),\n 4040: ResultInfo('The connection timed out when connecting to the eShop.', 'https://en-americas-support.nintendo.com/app/answers/detail/a_id/4429'),\n 4069: NIM_4069,\n # in HTTP range...\n 4240: ResultInfo('This error code likely indicates a temporary service issue with the Nintendo eShop.', 'https://en-americas-support.nintendo.com/app/answers/detail/a_id/28399'),\n 4305: ResultInfo('A generic error that may be displayed when the connection times out or you are unable to download software from the eShop.', 'https://en-americas-support.nintendo.com/app/answers/detail/a_id/4346'),\n 4320: ResultInfo('A generic error that may be displayed when formatting your console or performing a system transfer.', 'https://en-americas-support.nintendo.com/app/answers/detail/a_id/48382'),\n 5602: ResultInfo('Unable to connect to the eShop. This usually occurs when the System\\'s region setting is incorrect. Change it in system settings and try connecting again.'),\n 5687: ResultInfo('A generic error that displays when you\\'re unable to connect to the eShop.', 'https://en-americas-support.nintendo.com/app/answers/detail/a_id/26251/'),\n # in SOAP range...\n 5704: ResultInfo('A generic error that displays when you\\'re unable to connect to the eShop.', 'https://en-americas-support.nintendo.com/app/answers/detail/a_id/26252'),\n 5958: ResultInfo('Unknown eShop error. Usually seen on region-changed consoles.'),\n 5964: ResultInfo('Your NNID has been banned from accessing the eShop. You will need to contact Nintendo Support if you feel it was unjustified.'),\n 7545: NIM_4069,\n 7550: NIM_4069,\n 8025: NIM_4069,\n 8026: NIM_4069,\n 8029: NIM_4069\n})\n# 006: online matchmaking and gameplay errors\nmatchmaking = Module('matchmaking', {\n 112: ResultInfo('Typically displayed when an issue with connecting to Pokémon Bank occurs.', 'https://en-americas-support.nintendo.com/app/answers/detail/a_id/4203/'),\n 332: ResultInfo('Caused by closed ports when attempting matchmaking(?)'),\n (501, 502): ResultInfo('This may indicate in issue with the network being used blocking traffic necessary for online play.', 'https://en-americas-support.nintendo.com/app/answers/detail/a_id/4204'),\n 612: ResultInfo('This error code generally indicates that your network is not optimal for peer to peer connections, likely due to your network\\'s NAT type.', 'https://en-americas-support.nintendo.com/app/answers/detail/a_id/25881'),\n 811: ResultInfo('This error code indicates the service you are attempting to use is currently unavailable due to ongoing maintenance.', 'https://en-americas-support.nintendo.com/app/answers/detail/a_id/25910/'),\n (800, 899): ResultInfo('These are typically shown when there is an error during the matchmaking process and you were unable to connect to the authentication servers.', 'https://en-americas-support.nintendo.com/app/answers/detail/a_id/4328/')\n})\n\n# 007: errors related to (presumably) the eShop API\neshop_mint = Module('eshop (mint/api?)', {\n 200: ResultInfo('Could not access the SD card.', 'https://en-americas-support.nintendo.com/app/answers/detail/a_id/4234'),\n 1221: ResultInfo('The download code you entered can only be redeemed within the relevant software title. It cannot be redeemed in Nintendo eShop.', 'https://en-americas-support.nintendo.com/app/answers/detail/a_id/14600'),\n 2001: ResultInfo('Error when attempting to access eshop on a region changed console. Fixed by changing back to the console original region.'),\n 2100: ResultInfo('The connection to the Nintendo eShop timed out. This error code is often times caused by slow download times due to interference or a slow Internet connection.', 'See [the support page](https://en-americas-support.nintendo.com/app/answers/detail/a_id/4432) or [Nintendo\\'s network status](https://support.nintendo.com/networkstatus).'),\n 2670: ResultInfo('An error occurred while attempting to connect.', 'https://en-americas-support.nintendo.com/app/answers/detail/a_id/4383'),\n 2720: ResultInfo('eShop SSL error.'),\n 2913: ResultInfo('The server is probably down. Try again later.', 'https://en-americas-support.nintendo.com/app/answers/detail/a_id/10425'),\n 2916: ResultInfo('This is typically displayed when an error occurs while attempting to download a title from the Nintendo eShop.', 'https://en-americas-support.nintendo.com/app/answers/detail/a_id/6557'),\n 2920: ResultInfo('This error is typically displayed when a Nintendo eShop download failed, or when the title has an invalid ticket. Delete the title and/or its ticket in FBI and install it again from a legitimate source like the Nintendo eShop, or from your game cartridges if using cart dumps.', 'https://en-americas-support.nintendo.com/app/answers/detail/a_id/41692'),\n 2924: ResultInfo('Happens when opening eshop with a invalid language setting'),\n 3049: ResultInfo('The eShop is down for maintenance.', 'https://support.nintendo.com/networkstatus/'),\n 6106: ResultInfo('Occurs when attempting to re-download software from the eshop with an invalid or fake ticket')\n})\n\n# 009: errors related to (presumably) the eShop application itself\neshop_app = Module('eshop (app?)', {\n # 1001: ResultInfo('The eShop is down for maintenance.', 'https://en-americas-support.nintendo.com/app/answers/detail/a_id/45399'),\n 1000: ResultInfo('System update required (friends module?).'),\n 1001: eshop_mint.data[3049],\n 2705: ResultInfo('This error code is often the result of the Internet connection timing out or losing connection with the Nintendo eShop.', 'https://en-americas-support.nintendo.com/app/answers/detail/a_id/14478'),\n 2913: ResultInfo('An eShop or in-game DLC download failed (or the server is down).', 'https://en-americas-support.nintendo.com/app/answers/detail/a_id/7243'),\n 2916: eshop_mint.data[2916],\n 2920: eshop_mint.data[2920],\n 2924: eshop_mint.data[2924],\n 2923: ResultInfo('You are unable to use a function which requires internet services, such as software updates or formatting the system memory.', 'https://en-americas-support.nintendo.com/app/answers/detail/a_id/17014'),\n 2995: ResultInfo('This error can occur if the download code was entered incorrectly, has not yet been activated, has expired, was entered in the wrong place, or is intended for a region other than your own.', 'https://en-americas-support.nintendo.com/app/answers/detail/a_id/13515'),\n 4077: ResultInfo('Cannot start or continue eShop download. This happens due to insufficient free space being available on the SD Card.'),\n 4079: ResultInfo('Unable to access SD card.'),\n 4998: ResultInfo('Local content is newer. Unknown what causes this.'),\n 6106: ResultInfo('AM error in NIM. Bad ticket is likely.'),\n 8401: ResultInfo('The update data is corrupted. Delete it and reinstall.'),\n 9001: ResultInfo('Caused by trying to download content with low battery percentage.')\n})\n\n# 011: eshop website, or other misc/overlapping errors\neshop_site = Module('eshop (website?)', {\n 3010: ResultInfo('Server timeout due to user inactivity.'),\n 3021: ResultInfo('Cannot find title on Nintendo eShop (incorrect region, or never existed?).'),\n 3136: ResultInfo('Nintendo eShop is currently unavailable. Try again later.'),\n 5998: ResultInfo('Nintendo eShop is currently under maintenance', 'https://en-americas-support.nintendo.com/app/answers/detail/a_id/24326/'),\n 6901: ResultInfo('This console is permanently banned by Nintendo (displayed in Japanese for some reason).', is_ban=True)\n})\n\n# 014: system transfers?\ndata_transfer = Module('system transfer', {\n 13: ResultInfo('Attempting to do a system transfer with with a invalid language setting.'),\n 16: ResultInfo('Both consoles have the same movable.sed key. Format the target console and system transfer again.'),\n 62: ResultInfo('An error occurred during system transfer. Move closer to the wireless router and try again.', 'https://en-americas-support.nintendo.com/app/answers/detail/a_id/15664')\n})\n# 012: a category related to the web browser or ssl module considered 1511\nbrowser1 = Module('browser (?)', {\n 1004: ResultInfo('SSL connection failed.'),\n 1511: ResultInfo('Certificate warning.')\n})\n\n# 032: a second category related to the web browser\nbrowser2 = Module('browser (?)', {\n 1820: ResultInfo('Displayed when the browser asks if you want to go to to a potentially dangerous website. Press \\'yes\\' to continue if you feel it is safe.')\n})\n\n# 022: more account stuff?\naccount2 = Module('account', {\n 2452: ResultInfo('Tried to access the eShop with UNITINFO patch enabled. Turn it off in Luma\\'s options.'),\n (2501, 2591): ResultInfo('NNID is already linked to another system. This can be the result of using System Transfer (where all NNIDs associated with the system are moved, whether they are currently linked or not), restoring the source console\\'s NAND, and then attempting to use applications which require an NNID.'),\n 2511: ResultInfo('System update required (displayed by Miiverse?).'),\n 2613: ResultInfo('Incorrect email or password when attempting to link an existing NNID. Can also happen if the NNID is already linked to another system, or if you attempt to download an application from the eShop without a linked NNID on the console.', 'https://en-americas-support.nintendo.com/app/answers/detail/a_id/4314/kw/022-2613'),\n 2631: ResultInfo('The NNID you are attempting to use has been deleted, or is unusable due to a System Transfer. A transferred NNID will only work on the target system.', 'https://en-americas-support.nintendo.com/app/answers/detail/a_id/4285/kw/022-2631'),\n 2633: ResultInfo('NNID is temporarily locked due to too many incorrect password attempts. Try again later.'),\n 2634: ResultInfo('NNID is not correctly linked on this console.', '[To fix it, follow these steps. Afterwards, reboot and sign into your NNID again.](https://3ds.hacks.guide/godmode9-usage#removing-an-nnid-without-formatting-your-device)'),\n 2812: ResultInfo('This console is permanently banned by Nintendo for playing Pokémon Sun & Moon online before the release date illegally.', is_ban=True),\n 2815: ResultInfo('This console is banned from accessing Miiverse by Nintendo.'),\n 5363: ResultInfo('Happens when trying to load NNID settings with a invalid language setting.'),\n 5515: ResultInfo('Network timeout.'),\n})\n\n# 090: application defined?\nunknown1 = Module('unknown', {\n 212: ResultInfo('Game is permanently banned from Pokémon Global Link for using altered or illegal save data.', is_ban=True)\n})\n\n# We have some modules partially documented, those that aren't return None.\n# These category names are largely made up based on the types of known errors they have,\n# or based on their Wii U-equivalent, because Wii U is better documented.\nmodules = {\n 1: friends,\n 2: account,\n 3: internet,\n 5: nim,\n 6: matchmaking,\n 7: eshop_mint,\n 9: eshop_app,\n 11: eshop_site,\n 12: browser1,\n 14: data_transfer,\n 22: account2,\n 32: browser2,\n 90: unknown1\n}\n\nRE = re.compile(r'0\\d{2}-\\d{4}')\n\nCONSOLE_NAME = 'Nintendo 2DS/3DS'\n\n# Suggested color to use if displaying information through a Discord bot's embed\nCOLOR = 0xCE181E\n\n\ndef is_valid(error: str):\n return RE.match(error)\n\n\ndef construct_result(ret, mod, desc):\n module = ctr_results_modules.get(mod, Module(''))\n ret.add_field(ConsoleErrorField('Module', message_str=module.name, supplementary_value=mod))\n description = module.get_error(desc)\n if description is None or not description.description:\n description = ctr_results_modules[0].get_error(desc)\n if description is None or not description.description:\n ret.add_field(ConsoleErrorField('Description', supplementary_value=desc))\n else:\n ret.add_field(ConsoleErrorField('Description', message_str=description.description, supplementary_value=desc))\n else:\n ret.add_field(ConsoleErrorField('Description', message_str=description.description, supplementary_value=desc))\n\n return ret\n\n\ndef construct_result_range(ret, mod, range_desc):\n module = ctr_results_modules.get(mod, Module(''))\n ret.add_field(ConsoleErrorField('Module', message_str=module.name, supplementary_value=mod))\n found_descs = []\n unknown_descs = []\n for desc in range_desc:\n if desc < 0 or desc > 1023:\n continue\n\n description = module.get_error(desc)\n if description is None or not description.description:\n description = ctr_results_modules[0].get_error(desc)\n if description is None or not description.description:\n unknown_descs.append(str(desc))\n else:\n found_descs.append(ConsoleErrorField('Description', message_str=description.description, supplementary_value=desc).message)\n else:\n found_descs.append(ConsoleErrorField('Description', message_str=description.description, supplementary_value=desc).message)\n\n if found_descs:\n ret.add_field(ConsoleErrorField('Possible known descriptions', message_str='\\n'.join(found_descs)))\n if unknown_descs:\n ret.add_field(ConsoleErrorField('Possible unknown descriptions', message_str=', '.join(unknown_descs)))\n\n return ret\n\n\ndef construct_support(ret, mod, desc):\n category = modules.get(mod, Module(''))\n if category.name:\n ret.add_field(ConsoleErrorField('Category', message_str=category.name))\n else:\n ret.add_field(ConsoleErrorField('Category', supplementary_value=mod))\n description = category.get_error(desc)\n if description is not None and description.description:\n ret.add_field(ConsoleErrorField('Description', message_str=description.description))\n if description.support_url:\n ret.add_field(ConsoleErrorField('Further information', message_str=description.support_url))\n if description.is_ban:\n ret.add_field(BANNED_FIELD)\n ret.color = WARNING_COLOR\n else:\n ret.add_field(UNKNOWN_CATEGORY_DESCRIPTION)\n return ret\n\n\ndef nim_handler(ret, description):\n \"\"\"\n Parses 3ds nim error codes in the following ranges:\n 005-2000 to 005-3023:\n - NIM got a result of its own. Took description and added by 52000.\n 005-4200 to 005-4399:\n - NIM got an HTTP result. Took description and added by 54200, cutting out at 54399 if it was beyond that.\n 005-4400 to 005-4999:\n - Range of HTTP codes, however, can suffer collision.\n 005-5000 to 005-6999:\n - SOAP Error Code range, when <ErrorCode> is not 0 on the SOAP responses.\n 005-7000 to 005-9999:\n - Non specific expected results are formatted to an error code in nim by taking result module and shifting right by 5, and taking the result description and masked with 0x1F, then added both together along with 57000.\n \"\"\"\n # If we have a specific description for it in our knowledgebase,\n # show it instead of doing the rest of the processing.\n error = nim.get_error(description)\n if error is not None and error.description:\n return construct_support(ret, 5, description)\n\n elif 2000 <= description < 3024:\n description -= 2000\n return construct_result(ret, 52, description) # nim result module, not support category\n\n elif 4200 <= description < 4400:\n description -= 4200\n construct_result(ret, 40, description) # http result module, not support category\n if description == 199:\n ret.add_field(ConsoleErrorField('Extra note', message_str='Alternatively, any http description beyond 199.\\nNIM truncates it to 199.'))\n\n elif 4400 <= description < 5000:\n description -= 4400\n ret.add_field(ConsoleErrorField('Category', message_str='nim'))\n if description < 100:\n ret.add_field(ConsoleErrorField('HTTP Status Code', message_str=f'{description + 100}'))\n elif 100 <= description < 500:\n ret.add_field(ConsoleErrorField('HTTP Status Code', message_str=f'{description + 100} or {description} due to a programming mistake in NIM.'))\n else:\n ret.add_field(ConsoleErrorField('HTTP Status Code', message_str=f'{description}'))\n\n elif 5000 <= description < 7000:\n description -= 5000\n ret.add_field(ConsoleErrorField('Category', message_str='nim'))\n ret.add_field(ConsoleErrorField('Description', message_str=f'SOAP message returned result code {description} on a NIM operation.'))\n\n # >= 7000 range is compacted\n elif description >= 7000:\n description -= 7000\n module = description >> 5\n # There are way more than 0x1F descriptions, but this is how Nintendo does it...\n description = description & 0x1F\n return construct_result_range(ret, module, range(0 + description, 1024 + description, 32))\n\n else:\n ret.add_field(ConsoleErrorField('Category', message_str='nim'))\n ret.add_field(UNKNOWN_CATEGORY_DESCRIPTION)\n\n return ret\n\n\ndef get(error: str):\n mod = int(error[:3])\n desc = int(error[4:])\n ret = ConsoleErrorInfo(error, CONSOLE_NAME, COLOR)\n if mod == 5: # 5 is NIM\n return nim_handler(ret, desc)\n return construct_support(ret, mod, desc)\n",
"id": "10426758",
"language": "Python",
"matching_score": 1.6328709125518799,
"max_stars_count": 51,
"path": "cogs/results/ctr_support.py"
},
{
"content": "from gino import Gino\n\ndb = Gino()\n\n\nclass Staff(db.Model):\n __tablename__ = \"staff\"\n id = db.Column(db.BigInteger, db.ForeignKey(\"members.id\"), primary_key=True)\n position = db.Column(db.String(8))\n console = db.Column(db.String(8))\n\n\nclass Warn(db.Model):\n __tablename__ = \"warns\"\n id = db.Column(db.BigInteger(), primary_key=True)\n user = db.Column(db.BigInteger(), db.ForeignKey(\"members.id\"))\n issuer = db.Column(db.BigInteger(), db.ForeignKey(\"members.id\"))\n reason = db.Column(db.Unicode())\n\n\nclass FriendCode(db.Model):\n __tablename__ = \"friendcodes\"\n id = db.Column(db.BigInteger, db.ForeignKey(\"members.id\"), primary_key=True)\n fc_3ds = db.Column(db.BigInteger, default=None)\n fc_switch = db.Column(db.BigInteger, default=None)\n\n\nclass Channel(db.Model):\n __tablename__ = \"channels\"\n id = db.Column(db.BigInteger(), primary_key=True)\n name = db.Column(db.Unicode)\n private = db.Column(db.Boolean(), default=False)\n mod_channel = db.Column(db.Boolean(), default=False)\n default_role = db.Column(db.BigInteger(), db.ForeignKey(\"roles.id\"), default=None)\n lock_level = db.Column(db.Integer, default=0)\n nofilter = db.Column(db.Boolean(), default=False)\n\n @property\n def is_mod_channel(self):\n return self.mod_channel\n\n\nclass Role(db.Model):\n __tablename__ = \"roles\"\n id = db.Column(db.BigInteger, primary_key=True)\n name = db.Column(db.Unicode)\n\n\nclass PermanentRole(db.Model):\n __tablename__ = \"permanentroles\"\n user_id = db.Column(db.BigInteger(), db.ForeignKey(\"members.id\"))\n role_id = db.Column(db.BigInteger(), db.ForeignKey(\"roles.id\"))\n _pk = db.PrimaryKeyConstraint('user_id', 'role_id', name='permanentroles_pkey')\n\n\nclass TimedRestriction(db.Model):\n __tablename__ = \"timed_restriction\"\n id = db.Column(db.BigInteger(), primary_key=True)\n user = db.Column(db.BigInteger, db.ForeignKey(\"members.id\"))\n type = db.Column(db.String(12))\n end_date = db.Column(db.DateTime())\n alerted = db.Column(db.Boolean(), default=False)\n\n\nclass TimedRole(db.Model):\n __tablename__ = \"timedroles\"\n id = db.Column(db.BigInteger(), primary_key=True)\n role_id = db.Column(db.BigInteger(), db.ForeignKey(\"roles.id\"))\n user_id = db.Column(db.BigInteger, db.ForeignKey(\"members.id\"))\n expiring_date = db.Column(db.DateTime())\n\n\nclass Member(db.Model):\n __tablename__ = \"members\"\n id = db.Column(db.BigInteger(), primary_key=True)\n watched = db.Column(db.Boolean(), default=False)\n\n\nclass Softban(db.Model):\n __tablename__ = \"softbans\"\n id = db.Column(db.BigInteger(), primary_key=True)\n user = db.Column(db.BigInteger(), db.ForeignKey(\"members.id\"))\n issuer = db.Column(db.BigInteger(), db.ForeignKey(\"members.id\"))\n reason = db.Column(db.Unicode())\n\n\nclass Flag(db.Model):\n __tablename__ = \"flags\"\n name = db.Column(db.String(20), primary_key=True)\n value = db.Column(db.Boolean(), default=False)\n\n\nclass FilteredWord(db.Model):\n __tablename__ = \"filteredwords\"\n word = db.Column(db.String(), primary_key=True)\n kind = db.Column(db.String())\n\n\nclass LevenshteinWord(db.Model):\n __tablename__ = \"levenshteinwords\"\n word = db.Column(db.String(), primary_key=True)\n threshold = db.Column(db.Integer())\n kind = db.Column(db.String())\n whitelist = db.Column(db.Boolean(), default=True)\n\n\nclass WhitelistWord(db.Model):\n __tablename__ = \"whitelistedwords\"\n word = db.Column(db.String(), primary_key=True)\n\n\nclass ApprovedInvite(db.Model):\n __tablename__ = \"approvedinvites\"\n code = db.Column(db.String(), primary_key=True)\n uses = db.Column(db.Integer(), default=-1)\n alias = db.Column(db.String())\n\n @property\n def is_temporary(self):\n return self.uses > 0\n\n\nclass Rule(db.Model):\n __tablename__ = \"rules\"\n id = db.Column(db.Integer(), primary_key=True)\n description = db.Column(db.String())\n\n\nclass RemindMeEntry(db.Model):\n __tablename__ = \"remindmeentries\"\n id = db.Column(db.BigInteger(), primary_key=True)\n date = db.Column(db.TIMESTAMP, nullable=False)\n author = db.Column(db.BigInteger, db.ForeignKey(\"members.id\"), nullable=False)\n reminder = db.Column(db.String, nullable=False)\n\n\nclass Tag(db.Model):\n __tablename__ = \"tags\"\n id = db.Column(db.BigInteger(), primary_key=True)\n title = db.Column(db.String, unique=True, nullable=False)\n content = db.Column(db.String, nullable=False)\n author = db.Column(db.BigInteger, db.ForeignKey(\"members.id\"), nullable=False)\n\n\nclass Citizen(db.Model):\n __tablename__ = \"citizens\"\n id = db.Column(db.BigInteger, db.ForeignKey(\"members.id\"), primary_key=True)\n social_credit = db.Column(db.Integer, nullable=False)\n",
"id": "1161393",
"language": "Python",
"matching_score": 0.5047085285186768,
"max_stars_count": 51,
"path": "utils/models.py"
}
] | 1.632871 |
sunarvin | [
{
"content": "#!/usr/bin/python3\r\n# -*- coding: utf-8 -*-\r\n\r\n# Author: <NAME>\r\n# E-mail: <EMAIL>\r\n# You use at your own risk. The author is not responsible for any loss or damage the program involved.\r\n#\r\n# MIT license, all text above must be included in any redistribution.\r\n\r\nimport sys\r\nfrom capstone import *\r\nimport struct\r\n\r\n\r\ndef get_text_section(filename: str):\r\n magic_s = ''\r\n\r\n with open(filename, 'rb') as f:\r\n buf = f.read(4)\r\n magic_s += ('%02x' % buf[3])\r\n magic_s += ('%02x' % buf[2])\r\n magic_s += ('%02x' % buf[1])\r\n magic_s += ('%02x' % buf[0])\r\n print(magic_s)\r\n\r\n if magic_s == 'feedfacf':\r\n print('64 LE')\r\n# elif magic_s == 'cffaedfe':\r\n# print('64 BE')\r\n# elif magic_s == 'feedface':\r\n# print('32 LE')\r\n# elif magic_s == 'cefaedfe':\r\n# print('32 BE')\r\n else:\r\n print('Unsupported type')\r\n\r\n # read header\r\n buf = f.read(28)\r\n cputype, cpusubtype, filetype, ncmds, sizeofcmds, flags, rsv0 = struct.unpack('7I', buf)\r\n print('command number: %d' % ncmds)\r\n\r\n found = False\r\n # search for __TEXT segment\r\n for i in range(ncmds):\r\n buf = f.read(8)\r\n cmd, cmdsize = struct.unpack('II', buf)\r\n print('cmd 0x%X size %d' % (cmd, cmdsize))\r\n\r\n if cmd == 25: # segment type 25 (0x19 LC_SEGMENT_64)\r\n buf = f.read(16)\r\n seg_name = buf.decode('utf-8').rstrip('\\0')\r\n print('segment name:', seg_name)\r\n\r\n if seg_name == '__TEXT': # hit the __TEXT segment\r\n buf = f.read(48)\r\n vmaddr, vmsize, fileoff, filesize, maxprot, initprot, nsects, flags = struct.unpack('4Q4I', buf)\r\n\r\n # search for __text section\r\n for j in range(nsects):\r\n buf = f.read(16)\r\n sec_name = buf.decode('utf-8').rstrip('\\0')\r\n print('section name:', sec_name)\r\n buf = f.read(16) # jump off segment name\r\n buf = f.read(48)\r\n if sec_name == '__text': # hit the __text section\r\n addr, size, offset, align, reloff, nreloc, flags, rsv1, rsv2, rsv3 = struct.unpack('2Q8I', buf)\r\n\r\n f.seek(offset, 0)\r\n code = f.read(size)\r\n\r\n return code, addr, offset\r\n\r\n # move to the next segment\r\n f.seek(cmdsize-24, 1)\r\n\r\n\r\ndef search_modify_offset(code_bytes: bytes, text_addr: int):\r\n aarch64_cs = Cs(CS_ARCH_ARM64, CS_MODE_ARM)\r\n\r\n # search for (STP, WZR) instructions\r\n candidate = []\r\n for i in aarch64_cs.disasm(code_bytes, text_addr):\r\n if i.mnemonic == 'str' and i.op_str[0:4] == 'wzr,':\r\n # print(\"0x%x:\\t%s\\t%s\" %(i.address, i.mnemonic, i.op_str))\r\n candidate.append(i)\r\n\r\n # search for two adjoining (STP, WZR) instruction\r\n prev = None\r\n for i in candidate:\r\n if (prev is not None) and (i.address-prev.address == 4):\r\n break\r\n else:\r\n prev = i\r\n\r\n # two adjoining instructions are found\r\n if prev is not None and i is not None:\r\n print(\"0x%x:\\t%s\\t%s\" % (prev.address, prev.mnemonic, prev.op_str))\r\n print(\"0x%x:\\t%s\\t%s\" % (i.address, i.mnemonic, i.op_str))\r\n\r\n # compute the offset in bytes\r\n offset = (prev.address - text_addr)\r\n return offset\r\n else:\r\n return None\r\n\r\n\r\ndef do_modify(filename: str, offset: int):\r\n with open(filename, 'rb+') as f:\r\n # read 8 bytes\r\n f.seek(offset)\r\n eight_bytes = f.read(8)\r\n# for byte in eight_bytes:\r\n# print(hex(byte))\r\n # swap and write back to file\r\n f.seek(offset)\r\n f.write(eight_bytes[4:8])\r\n f.write(eight_bytes[0:4])\r\n\r\n\r\nif __name__ == '__main__':\r\n if len(sys.argv) < 2:\r\n print('Usage: python %s <FILENAME>' % sys.argv[0])\r\n sys.exit()\r\n\r\n filename = sys.argv[1]\r\n code, text_addr, file_offset = get_text_section(filename)\r\n print('__text EA: 0x%X' % text_addr)\r\n print('__text offset in this file: 0x%X' % file_offset)\r\n\r\n offset = search_modify_offset(code, text_addr)\r\n if offset is None:\r\n print('Unable find two adjoining WZR instructions, exit')\r\n sys.exit()\r\n\r\n modify_offset = file_offset + offset\r\n print('Patch address in EA: 0x%X' % (modify_offset))\r\n print('Patch at file offset: 0x%X' % (modify_offset))\r\n do_modify(filename, modify_offset)\r\n",
"id": "592957",
"language": "Python",
"matching_score": 0,
"max_stars_count": 0,
"path": "swap_wzr.py"
}
] | 0 |
mwaliman | [
{
"content": "#!/usr/bin/env python\n\n\"\"\"Detect Ponto-Medullary Junction.\n\nCopyright (c) 2017 Polytechnique Montreal <www.neuro.polymtl.ca>\nAuthor: Charley\nCreated: 2017-07-21\nModified: 2017-09-12\n\nAbout the license: see the file LICENSE.TXT\n\"\"\"\n\nfrom __future__ import print_function, absolute_import, division\n\nimport os\nimport sys\nimport numpy as np\nfrom scipy.ndimage.measurements import center_of_mass\nimport nibabel as nib\nimport argparse\n\nimport spinalcordtoolbox.image as msct_image\nfrom spinalcordtoolbox.image import Image\n\nimport sct_utils as sct\nfrom spinalcordtoolbox.utils import Metavar, SmartFormatter, ActionCreateFolder\n\n\ndef get_parser():\n # Initialize the parser\n\n parser = argparse.ArgumentParser(\n description='Detection of the Ponto-Medullary Junction (PMJ). '\n ' This method is machine-learning based and adapted for T1w-like or '\n ' T2w-like images. '\n ' If the PMJ is detected from the input image, a nifti mask is output '\n ' (\"*_pmj.nii.gz\") with one voxel (value=50) located at the predicted PMJ '\n ' position. If the PMJ is not detected, nothing is output.',\n add_help=None,\n formatter_class=SmartFormatter,\n prog=os.path.basename(__file__).strip(\".py\"))\n mandatory = parser.add_argument_group(\"\\nMANDATORY ARGUMENTS\")\n mandatory.add_argument(\n \"-i\",\n metavar=Metavar.file,\n help='Input image. Example: t2.nii.gz',\n required=True)\n mandatory.add_argument(\n \"-c\",\n help=\"Type of image contrast, if your contrast is not in the available options (t1, t2), \"\n \"use t1 (cord bright/ CSF dark) or t2 (cord dark / CSF bright)\",\n required=True,\n choices=(\"t1\", \"t2\"))\n optional = parser.add_argument_group(\"\\nOPTIONAL ARGUMENTS\")\n optional.add_argument(\n \"-h\",\n \"--help\",\n action=\"help\",\n help=\"Show this help message and exit\")\n optional.add_argument(\n \"-s\",\n metavar=Metavar.file,\n help='SC segmentation or centerline mask. '\n 'Provide this mask helps the detection of the PMJ by indicating the position of the SC '\n 'in the Right-to-Left direction. Example: t2_seg.nii.gz',\n required=False)\n optional.add_argument(\n \"-ofolder\",\n metavar=Metavar.folder,\n help='Output folder. Example: My_Output_Folder/',\n action=ActionCreateFolder,\n required=False)\n optional.add_argument(\n '-qc',\n metavar=Metavar.str,\n help='The path where the quality control generated content will be saved.',\n default=None)\n optional.add_argument(\n \"-igt\",\n metavar=Metavar.str,\n help=\"File name of ground-truth PMJ (single voxel).\",\n required=False)\n optional.add_argument(\n \"-r\",\n type=int,\n help=\"Remove temporary files.\",\n required=False,\n default=1,\n choices=(0, 1))\n optional.add_argument(\n \"-v\",\n type=int,\n help=\"Verbose: 0 = nothing, 1 = classic, 2 = expended\",\n required=False,\n choices=(0, 1, 2),\n default=1)\n\n return parser\n\n\nclass DetectPMJ:\n def __init__(self, fname_im, contrast, fname_seg, path_out, verbose):\n\n self.fname_im = fname_im\n self.contrast = contrast\n\n self.fname_seg = fname_seg\n\n self.path_out = path_out\n\n self.verbose = verbose\n\n self.tmp_dir = sct.tmp_create(verbose=self.verbose) # path to tmp directory\n\n self.orientation_im = Image(self.fname_im).orientation # to re-orient the data at the end\n\n self.slice2D_im = sct.extract_fname(self.fname_im)[1] + '_midSag.nii' # file used to do the detection, with only one slice\n self.dection_map_pmj = sct.extract_fname(self.fname_im)[1] + '_map_pmj' # file resulting from the detection\n\n # path to the pmj detector\n self.pmj_model = os.path.join(sct.__data_dir__, 'pmj_models', '{}_model'.format(self.contrast))\n\n self.threshold = -0.75 if self.contrast == 't1' else 0.8 # detection map threshold, depends on the contrast\n\n self.fname_out = sct.extract_fname(self.fname_im)[1] + '_pmj.nii.gz'\n\n self.fname_qc = 'qc_pmj.png'\n\n def apply(self):\n\n self.ifolder2tmp() # move data to the temp dir\n\n self.orient2pir() # orient data to PIR orientation\n\n self.extract_sagital_slice() # extract a sagital slice, used to do the detection\n\n self.detect() # run the detection\n\n self.get_max_position() # get the max of the detection map\n\n self.generate_mask_pmj() # generate the mask with one voxel (value = 50) at the predicted PMJ position\n\n fname_out2return = self.tmp2ofolder() # save results to ofolder\n\n return fname_out2return, self.tmp_dir\n\n def tmp2ofolder(self):\n \"\"\"Copy output files to the ofolder.\"\"\"\n os.chdir(self.curdir) # go back to original directory\n\n if self.pa_coord != -1: # If PMJ has been detected\n sct.printv('\\nSave resulting file...', self.verbose, 'normal')\n sct.copy(os.path.abspath(os.path.join(self.tmp_dir, self.fname_out)),\n os.path.abspath(os.path.join(self.path_out, self.fname_out)))\n\n return os.path.join(self.path_out, self.fname_out)\n else:\n return None\n\n def generate_mask_pmj(self):\n \"\"\"Output the PMJ mask.\"\"\"\n if self.pa_coord != -1: # If PMJ has been detected\n im = Image(''.join(sct.extract_fname(self.fname_im)[1:])) # image in PIR orientation\n im_mask = msct_image.zeros_like(im)\n\n im_mask.data[self.pa_coord, self.is_coord, self.rl_coord] = 50 # voxel with value = 50\n\n im_mask.change_orientation(self.orientation_im).save(self.fname_out)\n\n x_pmj, y_pmj, z_pmj = np.where(im_mask.data == 50)\n sct.printv('\\tx_pmj = ' + str(x_pmj[0]), self.verbose, 'info')\n sct.printv('\\ty_pmj = ' + str(y_pmj[0]), self.verbose, 'info')\n sct.printv('\\tz_pmj = ' + str(z_pmj[0]), self.verbose, 'info')\n\n\n def get_max_position(self):\n \"\"\"Find the position of the PMJ by thresholding the probabilistic map.\"\"\"\n img_pred = Image(self.dection_map_pmj)\n\n if True in np.unique(img_pred.data > self.threshold): # threshold the detection map\n img_pred_maxValue = np.max(img_pred.data) # get the max value of the detection map\n self.pa_coord = np.where(img_pred.data == img_pred_maxValue)[0][0]\n self.is_coord = np.where(img_pred.data == img_pred_maxValue)[1][0]\n\n sct.printv('\\nPonto-Medullary Junction detected', self.verbose, 'normal')\n\n else:\n self.pa_coord, self.is_coord = -1, -1\n\n sct.printv('\\nPonto-Medullary Junction not detected', self.verbose, 'normal')\n\n del img_pred\n\n def detect(self):\n \"\"\"Run the classifier on self.slice2D_im.\"\"\"\n sct.printv('\\nRun PMJ detector', self.verbose, 'normal')\n os.environ[\"FSLOUTPUTTYPE\"] = \"NIFTI_PAIR\"\n cmd_pmj = ['isct_spine_detect', self.pmj_model, self.slice2D_im.split('.nii')[0], self.dection_map_pmj]\n print(cmd_pmj)\n sct.run(cmd_pmj, verbose=0, is_sct_binary=True)\n\n img = nib.load(self.dection_map_pmj + '_svm.hdr') # convert .img and .hdr files to .nii\n nib.save(img, self.dection_map_pmj + '.nii')\n\n self.dection_map_pmj += '.nii' # fname of the resulting detection map\n\n def extract_sagital_slice(self):\n \"\"\"Extract the sagital slice where the detection is done.\n\n If the segmentation is provided,\n the 2D sagital slice is choosen accoding to the segmentation.\n\n If the segmentation is not provided,\n the 2D sagital slice is choosen as the mid-sagital slice of the input image.\n \"\"\"\n\n if self.fname_seg is not None:\n img_seg = Image(self.fname_seg)\n\n z_mid_slice = img_seg.data[:, int(img_seg.dim[1] / 2), :]\n if 1 in z_mid_slice: # if SC segmentation available at this slice\n self.rl_coord = int(center_of_mass(z_mid_slice)[1]) # Right_left coordinate\n else:\n self.rl_coord = int(img_seg.dim[2] / 2)\n del img_seg\n\n else:\n img = Image(self.fname_im)\n self.rl_coord = int(img.dim[2] / 2) # Right_left coordinate\n del img\n\n sct.run(['sct_crop_image', '-i', self.fname_im, '-start', str(self.rl_coord), '-end', str(self.rl_coord), '-dim', '2', '-o', self.slice2D_im])\n\n def orient2pir(self):\n \"\"\"Orient input data to PIR orientation.\"\"\"\n if self.orientation_im != 'PIR': # open image and re-orient it to PIR if needed\n Image(self.fname_im).change_orientation(\"PIR\").save(''.join(sct.extract_fname(self.fname_im)[1:]))\n\n if self.fname_seg is not None:\n Image(self.fname_seg).change_orientation('PIR').save(''.join(sct.extract_fname(self.fname_seg)[1:]))\n\n def ifolder2tmp(self):\n \"\"\"Copy data to tmp folder.\"\"\"\n if self.fname_im is not None: # copy input image\n sct.copy(self.fname_im, self.tmp_dir)\n self.fname_im = ''.join(sct.extract_fname(self.fname_im)[1:])\n else:\n sct.printv('ERROR: No input image', self.verbose, 'error')\n\n if self.fname_seg is not None: # copy segmentation image\n sct.copy(self.fname_seg, self.tmp_dir)\n self.fname_seg = ''.join(sct.extract_fname(self.fname_seg)[1:])\n\n self.curdir = os.getcwd()\n os.chdir(self.tmp_dir) # go to tmp directory\n\n\ndef main():\n parser = get_parser()\n arguments = parser.parse_args(args=None if sys.argv[1:] else ['--help'])\n\n # Set param arguments ad inputted by user\n fname_in = arguments.i\n contrast = arguments.c\n\n # Segmentation or Centerline line\n if arguments.s is not None:\n fname_seg = arguments.s\n if not os.path.isfile(fname_seg):\n fname_seg = None\n sct.printv('WARNING: -s input file: \"' + arguments.s + '\" does not exist.\\nDetecting PMJ without using segmentation information', 1, 'warning')\n else:\n fname_seg = None\n\n # Output Folder\n if arguments.ofolder is not None:\n path_results = arguments.ofolder\n if not os.path.isdir(path_results) and os.path.exists(path_results):\n sct.printv(\"ERROR output directory %s is not a valid directory\" % path_results, 1, 'error')\n if not os.path.exists(path_results):\n os.makedirs(path_results)\n else:\n path_results = '.'\n\n path_qc = arguments.qc\n\n # Remove temp folder\n rm_tmp = bool(arguments.r)\n\n verbose = arguments.v\n sct.init_sct(log_level=verbose, update=True) # Update log level\n\n # Initialize DetectPMJ\n detector = DetectPMJ(fname_im=fname_in,\n contrast=contrast,\n fname_seg=fname_seg,\n path_out=path_results,\n verbose=verbose)\n\n # run the extraction\n fname_out, tmp_dir = detector.apply()\n\n # Remove tmp_dir\n if rm_tmp:\n sct.rmtree(tmp_dir)\n\n # View results\n if fname_out is not None:\n if path_qc is not None:\n from spinalcordtoolbox.reports.qc import generate_qc\n generate_qc(fname_in, fname_seg=fname_out, args=sys.argv[1:], path_qc=os.path.abspath(path_qc),process='sct_detect_pmj')\n\n sct.display_viewer_syntax([fname_in, fname_out], colormaps=['gray', 'red'])\n\n\nif __name__ == \"__main__\":\n sct.init_sct()\n main()\n",
"id": "4188219",
"language": "Python",
"matching_score": 3.7244133949279785,
"max_stars_count": 0,
"path": "scripts/sct_detect_pmj.py"
},
{
"content": "#!/usr/bin/env python\n#########################################################################################\n#\n# Create mask along z direction.\n#\n# ---------------------------------------------------------------------------------------\n# Copyright (c) 2014 Polytechnique Montreal <www.neuro.polymtl.ca>\n# Authors: <NAME>\n# Modified: 2014-10-11\n#\n# About the license: see the file LICENSE.TXT\n#########################################################################################\n\n\n# TODO: scale size in mm.\n\nfrom __future__ import division, absolute_import\n\nimport sys\nimport os\nimport argparse\n\nimport numpy as np\n\nimport nibabel\nfrom scipy import ndimage\n\nimport sct_utils as sct\nimport spinalcordtoolbox.image as msct_image\nfrom spinalcordtoolbox.image import Image\nfrom sct_image import concat_data\nfrom spinalcordtoolbox.utils import Metavar, SmartFormatter\n\n\n# DEFAULT PARAMETERS\nclass Param:\n def __init__(self):\n self.debug = 0\n self.fname_data = ''\n self.fname_out = ''\n self.process_list = ['coord', 'point', 'centerline', 'center']\n self.process = 'center' # default method\n self.shape_list = ['cylinder', 'box', 'gaussian']\n self.shape = 'cylinder' # default shape\n self.size = '41' # in voxel. if gaussian, size corresponds to sigma.\n self.even = 0\n self.file_prefix = 'mask_' # output prefix\n self.verbose = 1\n self.remove_temp_files = 1\n self.offset = '0,0'\n\n\ndef main(args=None):\n \"\"\"\n Main function\n :param args:\n :return:\n \"\"\"\n # get parser args\n if args is None:\n args = None if sys.argv[1:] else ['--help']\n parser = get_parser()\n arguments = parser.parse_args(args=args)\n\n param = Param()\n param.fname_data = os.path.abspath(arguments.i)\n\n if arguments.p is not None:\n param.process = (arguments.p).split(',')\n if param.process[0] not in param.process_list:\n sct.printv(parser.usage.generate(error='ERROR: Process ' + param.process[0] + ' is not recognized.'))\n if arguments.size is not None:\n param.size = arguments.size\n if arguments.f is not None:\n param.shape = arguments.f\n if arguments.o is not None:\n param.fname_out = os.path.abspath(arguments.o)\n if arguments.r is not None:\n param.remove_temp_files = arguments.r\n\n param.verbose = arguments.v\n sct.init_sct(log_level=param.verbose, update=True) # Update log level\n\n # run main program\n create_mask(param)\n\n\ndef create_mask(param):\n # parse argument for method\n method_type = param.process[0]\n # check method val\n if not method_type == 'center':\n method_val = param.process[1]\n\n # check existence of input files\n if method_type == 'centerline':\n sct.check_file_exist(method_val, param.verbose)\n\n # Extract path/file/extension\n path_data, file_data, ext_data = sct.extract_fname(param.fname_data)\n\n # Get output folder and file name\n if param.fname_out == '':\n param.fname_out = os.path.abspath(param.file_prefix + file_data + ext_data)\n\n path_tmp = sct.tmp_create(basename=\"create_mask\", verbose=param.verbose)\n\n sct.printv('\\nOrientation:', param.verbose)\n orientation_input = Image(param.fname_data).orientation\n sct.printv(' ' + orientation_input, param.verbose)\n\n # copy input data to tmp folder and re-orient to RPI\n Image(param.fname_data).change_orientation(\"RPI\").save(os.path.join(path_tmp, \"data_RPI.nii\"))\n if method_type == 'centerline':\n Image(method_val).change_orientation(\"RPI\").save(os.path.join(path_tmp, \"centerline_RPI.nii\"))\n if method_type == 'point':\n Image(method_val).change_orientation(\"RPI\").save(os.path.join(path_tmp, \"point_RPI.nii\"))\n\n # go to tmp folder\n curdir = os.getcwd()\n os.chdir(path_tmp)\n\n # Get dimensions of data\n im_data = Image('data_RPI.nii')\n nx, ny, nz, nt, px, py, pz, pt = im_data.dim\n sct.printv('\\nDimensions:', param.verbose)\n sct.printv(im_data.dim, param.verbose)\n # in case user input 4d data\n if nt != 1:\n sct.printv('WARNING in ' + os.path.basename(__file__) + ': Input image is 4d but output mask will be 3D from first time slice.', param.verbose, 'warning')\n # extract first volume to have 3d reference\n nii = msct_image.empty_like(Image('data_RPI.nii'))\n data3d = nii.data[:, :, :, 0]\n nii.data = data3d\n nii.save('data_RPI.nii')\n\n if method_type == 'coord':\n # parse to get coordinate\n coord = [x for x in map(int, method_val.split('x'))]\n\n if method_type == 'point':\n # get file name\n # extract coordinate of point\n sct.printv('\\nExtract coordinate of point...', param.verbose)\n # TODO: change this way to remove dependence to sct.run. ProcessLabels.display_voxel returns list of coordinates\n status, output = sct.run(['sct_label_utils', '-i', 'point_RPI.nii', '-display'], verbose=param.verbose)\n # parse to get coordinate\n # TODO fixup... this is quite magic\n coord = output[output.find('Position=') + 10:-17].split(',')\n\n if method_type == 'center':\n # set coordinate at center of FOV\n coord = np.round(float(nx) / 2), np.round(float(ny) / 2)\n\n if method_type == 'centerline':\n # get name of centerline from user argument\n fname_centerline = 'centerline_RPI.nii'\n else:\n # generate volume with line along Z at coordinates 'coord'\n sct.printv('\\nCreate line...', param.verbose)\n fname_centerline = create_line(param, 'data_RPI.nii', coord, nz)\n\n # create mask\n sct.printv('\\nCreate mask...', param.verbose)\n centerline = nibabel.load(fname_centerline) # open centerline\n hdr = centerline.get_header() # get header\n hdr.set_data_dtype('uint8') # set imagetype to uint8\n spacing = hdr.structarr['pixdim']\n data_centerline = centerline.get_data() # get centerline\n # if data is 2D, reshape with empty third dimension\n if len(data_centerline.shape) == 2:\n data_centerline_shape = list(data_centerline.shape)\n data_centerline_shape.append(1)\n data_centerline = data_centerline.reshape(data_centerline_shape)\n z_centerline_not_null = [iz for iz in range(0, nz, 1) if data_centerline[:, :, iz].any()]\n # get center of mass of the centerline\n cx = [0] * nz\n cy = [0] * nz\n for iz in range(0, nz, 1):\n if iz in z_centerline_not_null:\n cx[iz], cy[iz] = ndimage.measurements.center_of_mass(np.array(data_centerline[:, :, iz]))\n # create 2d masks\n file_mask = 'data_mask'\n for iz in range(nz):\n if iz not in z_centerline_not_null:\n # write an empty nifty volume\n img = nibabel.Nifti1Image(data_centerline[:, :, iz], None, hdr)\n nibabel.save(img, (file_mask + str(iz) + '.nii'))\n else:\n center = np.array([cx[iz], cy[iz]])\n mask2d = create_mask2d(param, center, param.shape, param.size, im_data=im_data)\n # Write NIFTI volumes\n img = nibabel.Nifti1Image(mask2d, None, hdr)\n nibabel.save(img, (file_mask + str(iz) + '.nii'))\n\n fname_list = [file_mask + str(iz) + '.nii' for iz in range(nz)]\n im_out = concat_data(fname_list, dim=2).save('mask_RPI.nii.gz')\n\n im_out.change_orientation(orientation_input)\n im_out.header = Image(param.fname_data).header\n im_out.save(param.fname_out)\n\n # come back\n os.chdir(curdir)\n\n # Remove temporary files\n if param.remove_temp_files == 1:\n sct.printv('\\nRemove temporary files...', param.verbose)\n sct.rmtree(path_tmp)\n\n sct.display_viewer_syntax([param.fname_data, param.fname_out], colormaps=['gray', 'red'], opacities=['', '0.5'])\n\n\ndef create_line(param, fname, coord, nz):\n \"\"\"\n Create vertical line in 3D volume\n :param param:\n :param fname:\n :param coord:\n :param nz:\n :return:\n \"\"\"\n\n # duplicate volume (assumes input file is nifti)\n sct.copy(fname, 'line.nii', verbose=param.verbose)\n\n # set all voxels to zero\n sct.run(['sct_maths', '-i', 'line.nii', '-mul', '0', '-o', 'line.nii'], param.verbose)\n\n cmd = ['sct_label_utils', '-i', 'line.nii', '-o', 'line.nii', '-create-add']\n for iz in range(nz):\n if iz == nz - 1:\n cmd += [str(int(coord[0])) + ',' + str(int(coord[1])) + ',' + str(iz) + ',1']\n else:\n cmd += [str(int(coord[0])) + ',' + str(int(coord[1])) + ',' + str(iz) + ',1:']\n\n sct.run(cmd, param.verbose)\n\n return 'line.nii'\n\n\ndef create_mask2d(param, center, shape, size, im_data):\n \"\"\"\n Create a 2D mask\n :param param:\n :param center:\n :param shape:\n :param size:\n :param im_data: Image object for input data.\n :return:\n \"\"\"\n # get dim\n nx, ny, nz, nt, px, py, pz, pt = im_data.dim\n # extract offset d = 2r+1 --> r=ceil((d-1)/2.0)\n offset = param.offset.split(',')\n offset[0] = int(offset[0])\n offset[1] = int(offset[1])\n # px, py = spacing[0], spacing[1]\n\n # initialize 2d grid\n xx, yy = np.mgrid[:nx, :ny]\n mask2d = np.zeros((nx, ny))\n xc = center[0]\n yc = center[1]\n if 'mm' in size:\n size = float(size[:-2])\n radius_x = np.ceil((int(np.round(size / px)) - 1) / 2.0)\n radius_y = np.ceil((int(np.round(size / py)) - 1) / 2.0)\n else:\n radius_x = np.ceil((int(size) - 1) / 2.0)\n radius_y = radius_x\n\n if shape == 'box':\n mask2d = ((abs(xx + offset[0] - xc) <= radius_x) & (abs(yy + offset[1] - yc) <= radius_y)) * 1\n\n elif shape == 'cylinder':\n mask2d = (((xx + offset[0] - xc) / radius_x) ** 2 + ((yy + offset[1] - yc) / radius_y) ** 2 <= 1) * 1\n\n elif shape == 'gaussian':\n sigma = float(radius_x)\n mask2d = np.exp(-(((xx + offset[0] - xc)**2) / (2 * (sigma**2)) + ((yy + offset[1] - yc)**2) / (2 * (sigma**2))))\n\n return mask2d\n\n\ndef get_parser():\n # Initialize default parameters\n param_default = Param()\n # Initialize the parser\n\n parser = argparse.ArgumentParser(\n description='Create mask along z direction.',\n add_help=None,\n prog=os.path.basename(__file__).strip(\".py\"),\n formatter_class= SmartFormatter)\n mandatoryArguments = parser.add_argument_group(\"\\nMANDATORY ARGUMENTS\")\n mandatoryArguments.add_argument(\n '-i',\n help='Image to create mask on. Only used to get header. Must be 3D. Example: data.nii.gz',\n metavar=Metavar.file,\n required = False)\n mandatoryArguments.add_argument(\n '-p',\n help='R|Process to generate mask.\\n'\n ' <coord,XxY>: Center mask at the X,Y coordinates. (e.g. \"coord,20x15\")\\n'\n ' <point,FILE>: Center mask at the X,Y coordinates of the label defined in input volume FILE. (e.g. \"point,label.nii.gz\")\\n'\n ' <center>: Center mask in the middle of the FOV (nx/2, ny/2).\\n'\n ' <centerline,FILE>: At each slice, the mask is centered at the spinal cord centerline, defined by the input segmentation FILE. (e.g. \"centerline,t2_seg.nii.gz\")',\n metavar=Metavar.str,\n required = False,\n default = param_default.process)\n optional = parser.add_argument_group(\"\\nOPTIONAL ARGUMENTS\")\n optional.add_argument(\n \"-h\",\n \"--help\",\n action=\"help\",\n help=\"Show this help message and exit\")\n optional.add_argument(\n '-size',\n help='Size of the mask in the axial plane, given in pixel (Example: 35) or in millimeter (Example: 35mm). '\n 'If shape=gaussian, size corresponds to \"sigma\" (Example: 45).',\n metavar=Metavar.str,\n required = False,\n default = param_default.size)\n optional.add_argument(\n '-f',\n help='Shape of the mask',\n required = False,\n default = param_default.shape,\n choices=('cylinder', 'box', 'gaussian'))\n optional.add_argument(\n '-o',\n metavar=Metavar.str,\n help='Name of output mask, Example: data.nii',\n required = False)\n optional.add_argument(\n \"-r\",\n type=int,\n help='Remove temporary files',\n required = False,\n default = 1,\n choices = (0, 1))\n optional.add_argument(\n \"-v\",\n type=int,\n help=\"Verbose: 0 = nothing, 1 = classic, 2 = expended \",\n required=False,\n choices=(0, 1, 2),\n default = 1)\n\n return parser\n\n\nif __name__ == \"__main__\":\n sct.init_sct()\n main()\n",
"id": "11472562",
"language": "Python",
"matching_score": 2.6971893310546875,
"max_stars_count": 0,
"path": "scripts/sct_create_mask.py"
},
{
"content": "#!/usr/bin/env python\n#\n# This program is a wrapper for the isct_dice_coefficient binary\n#\n# ---------------------------------------------------------------------------------------\n# Copyright (c) 2013 Polytechnique Montreal <www.neuro.polymtl.ca>\n# Authors: <NAME>\n# Modified: 2017-07-05 (charley)\n#\n# About the license: see the file LICENSE.TXT\n#########################################################################################\n\nfrom __future__ import absolute_import\n\nimport sys\nimport os\nimport argparse\n\nimport sct_utils as sct\nfrom spinalcordtoolbox.utils import Metavar, SmartFormatter\n\n\ndef get_parser():\n parser = argparse.ArgumentParser(\n description='Compute the Dice Coefficient. '\n 'N.B.: indexing (in both time and space) starts with 0 not 1! Inputting -1 for a '\n 'size will set it to the full image extent for that dimension.',\n add_help=None,\n formatter_class=SmartFormatter,\n prog=os.path.basename(__file__).strip(\".py\"))\n mandatory = parser.add_argument_group(\"\\nMANDATORY ARGUMENTS\")\n mandatory.add_argument(\n '-i',\n metavar=Metavar.file,\n help='First input image. Example: t2_seg.nii.gz',\n required=True)\n mandatory.add_argument(\n '-d',\n metavar=Metavar.file,\n help='Second input image. Example: t2_manual_seg.nii.gz',\n required=True)\n optional = parser.add_argument_group(\"\\nOPTIONAL ARGUMENTS\")\n optional.add_argument(\n \"-h\",\n \"--help\",\n action=\"help\",\n help=\"Show this help message and exit\")\n optional.add_argument(\n '-2d-slices',\n type=int,\n help='Compute DC on 2D slices in the specified dimension',\n required=False,\n choices=(0, 1, 2))\n optional.add_argument(\n '-b',\n metavar=Metavar.list,\n help='Bounding box with the coordinates of the origin and the size of the box as follow: '\n 'x_origin,x_size,y_origin,y_size,z_origin,z_size. Example: 5,10,5,10,10,15',\n required=False)\n optional.add_argument(\n '-bmax',\n type=int,\n help='Use maximum bounding box of the images union to compute DC.',\n required=False,\n choices=(0, 1))\n optional.add_argument(\n '-bzmax',\n type=int,\n help='Use maximum bounding box of the images union in the \"Z\" direction to compute DC.',\n required=False,\n choices=(0, 1))\n optional.add_argument(\n '-bin',\n type=int,\n help='Binarize image before computing DC. (Put non-zero-voxels to 1)',\n required=False,\n choices=(0, 1))\n optional.add_argument(\n '-o',\n metavar=Metavar.str,\n help='Output file with DC results (.txt). Example: dice_coeff.txt',\n required=False)\n optional.add_argument(\n \"-r\",\n type=int,\n help=\"Remove temporary files.\",\n required=False,\n default=1,\n choices=(0, 1))\n optional.add_argument(\n '-v',\n type=int,\n help='Verbose.',\n required=False,\n default=1,\n choices=(0, 1))\n\n return parser\n\nif __name__ == \"__main__\":\n sct.init_sct()\n parser = get_parser()\n arguments = parser.parse_args(args=None if sys.argv[1:] else ['--help'])\n\n fname_input1 = arguments.i\n fname_input2 = arguments.d\n\n verbose = arguments.v\n sct.init_sct(log_level=verbose, update=True) # Update log level\n\n tmp_dir = sct.tmp_create(verbose=verbose) # create tmp directory\n tmp_dir = os.path.abspath(tmp_dir)\n\n # copy input files to tmp directory\n # for fname in [fname_input1, fname_input2]:\n sct.copy(fname_input1, tmp_dir)\n sct.copy(fname_input2, tmp_dir)\n fname_input1 = ''.join(sct.extract_fname(fname_input1)[1:])\n fname_input2 = ''.join(sct.extract_fname(fname_input2)[1:])\n\n curdir = os.getcwd()\n os.chdir(tmp_dir) # go to tmp directory\n\n if '-bin' in arguments:\n fname_input1_bin = sct.add_suffix(fname_input1, '_bin')\n sct.run(['sct_maths', '-i', fname_input1, '-bin', '0', '-o', fname_input1_bin])\n fname_input1 = fname_input1_bin\n fname_input2_bin = sct.add_suffix(fname_input2, '_bin')\n sct.run(['sct_maths', '-i', fname_input2, '-bin', '0', '-o', fname_input2_bin])\n fname_input2 = fname_input2_bin\n\n # copy header of im_1 to im_2\n from spinalcordtoolbox.image import Image\n im_1 = Image(fname_input1)\n im_2 = Image(fname_input2)\n im_2.header = im_1.header\n im_2.save()\n\n cmd = ['isct_dice_coefficient', fname_input1, fname_input2]\n\n if vars(arguments)[\"2d_slices\"] is not None:\n cmd += ['-2d-slices', str(vars(arguments)[\"2d_slices\"])]\n if arguments.b is not None:\n bounding_box = arguments.b\n cmd += ['-b'] + bounding_box\n if arguments.bmax is not None and arguments.bmax == 1:\n cmd += ['-bmax']\n if arguments.bzmax is not None and arguments.bzmax == 1:\n cmd += ['-bzmax']\n if arguments.o is not None:\n path_output, fname_output, ext = sct.extract_fname(arguments.o)\n cmd += ['-o', fname_output + ext]\n\n rm_tmp = bool(arguments.r)\n\n # # Computation of Dice coefficient using Python implementation.\n # # commented for now as it does not cover all the feature of isct_dice_coefficient\n # #from spinalcordtoolbox.image import Image, compute_dice\n # #dice = compute_dice(Image(fname_input1), Image(fname_input2), mode='3d', zboundaries=False)\n # #sct.printv('Dice (python-based) = ' + str(dice), verbose)\n\n status, output = sct.run(cmd, verbose, is_sct_binary=True)\n\n os.chdir(curdir) # go back to original directory\n\n # copy output file into original directory\n if arguments.o is not None:\n sct.copy(os.path.join(tmp_dir, fname_output + ext), os.path.join(path_output, fname_output + ext))\n\n # remove tmp_dir\n if rm_tmp:\n sct.rmtree(tmp_dir)\n\n sct.printv(output, verbose)\n",
"id": "10744540",
"language": "Python",
"matching_score": 1.7619390487670898,
"max_stars_count": 0,
"path": "scripts/sct_dice_coefficient.py"
},
{
"content": "import os, sys, logging\nimport glob\nimport nibabel as nib\nimport numpy as np\nimport argparse\n\nfrom spinalcordtoolbox.image import Image, change_type, zeros_like\nfrom spinalcordtoolbox.deepseg_sc.core import find_centerline, crop_image_around_centerline\nfrom spinalcordtoolbox.deepseg_sc.core import post_processing_slice_wise, apply_intensity_normalization\nfrom spinalcordtoolbox.deepseg_sc.cnn_models import nn_architecture_seg\nimport sct_utils as sct\nfrom spinalcordtoolbox import resampling\n\n\ndef preprocess_image(image, contrast_type='t1', ctr_algo='svm', ctr_file=None, brain_bool=True,\n\t\t\t\t\t\t\t\t kernel_size='2d', remove_temp_files=1, verbose=1):\n\t\"\"\" Resamples, reorients to RPI, and applies OptiC cropping to an Image and returns the result as an sct Image.\n\tInputs:\n\t\timage - Image to be cropped\n\tReturns:\n\t\tim_nii - resampled Image\n\t\tim_norm_in - resampled, cropped, and normalized Imagect\n\t\tX_CROP_LST, Y_CROP_LST, Z_CROP_LST - coordinates for cropping original image\n\t\"\"\"\n\t\n\tim = image.copy()\n\t\n\t# create temporary folder with intermediate results\n\ttmp_folder = sct.TempFolder(verbose=verbose)\n\ttmp_folder_path = tmp_folder.get_path()\n\tif ctr_algo == 'file': # if the ctr_file is provided\n\t\ttmp_folder.copy_from(ctr_file)\n\t\tfile_ctr = os.path.basename(ctr_file)\n\telse:\n\t\tfile_ctr = None\n\ttmp_folder.chdir()\n\n\t# re-orient image to RPI if necessary...\n\toriginal_orientation = im.orientation\n\tfname_orient = 'image_in_RPI.nii'\n\tim.change_orientation('RPI').save(fname_orient)\n\n\tinput_resolution = im.dim[4:7]\n\n\t# resamples image to 0.5x0.5 resolution and finds the spinal cord centerline - execute OptiC binary\n\tfname_res, centerline_filename, im_labels = find_centerline(algo=ctr_algo,\n\t\t\t\t\t\t\t\t\t\t\t\t\t image_fname=fname_orient,\n\t\t\t\t\t\t\t\t\t\t\t\t\t contrast_type=contrast_type,\n\t\t\t\t\t\t\t\t\t\t\t\t\t brain_bool=brain_bool,\n\t\t\t\t\t\t\t\t\t\t\t\t\t folder_output=tmp_folder_path,\n\t\t\t\t\t\t\t\t\t\t\t\t\t remove_temp_files=remove_temp_files,\n\t\t\t\t\t\t\t\t\t\t\t\t\t centerline_fname=file_ctr)\n\t# could save the ctr_nii later if desired\n\tim_nii, ctr_nii = Image(fname_res), Image(centerline_filename)\n\t\n\t# crop image around the spinal cord centerline\n\tcrop_size = 96 if (kernel_size == '3d' and contrast_type == 't2s') else 64\n\tX_CROP_LST, Y_CROP_LST, Z_CROP_LST, im_crop_nii = crop_image_around_centerline(im_in=im_nii,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t ctr_in=ctr_nii,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t crop_size=crop_size)\n\t# normalize the intensity of the images\n\tim_norm_in = apply_intensity_normalization(im_in=im_crop_nii)\n\treturn im_nii, im_norm_in, X_CROP_LST, Y_CROP_LST, Z_CROP_LST\n\ndef segment_2d_slices(image, seg_model, binary_seg=True, threshold=0.5):\n\t\"\"\"Applies seg_model on 2d slices of a cropped Image.\n\t\n\tInputs:\n\t\timage - Image to be segmented\n\t\tseg_model - 2d segmentation model\n\t\tbinary - whether the segmentation is binary or partial\n\t\tthreshold - threshold for binary segmentation\n\tReturns:\n\t\tseg_crop - output segmentation as an Image\n\t\"\"\"\n\tcropped_seg = zeros_like(image)\n\tcropped_seg_data = np.zeros(image.data.shape)\n\n\tdata_norm = image.data\n\tx_cOm, y_cOm = None, None #??\n\tfor z in range(data_norm.shape[2]):\n\t\tpred_seg = seg_model.predict(np.expand_dims(np.expand_dims(data_norm[:, :, z], -1), 0),\n\t\t\t\t\t\t\t\t\t batch_size=BATCH_SIZE)[0, :, :, 0]\n\t\tif binary_seg:\n\t\t\tpred_seg_th = (pred_seg > threshold).astype(int)\n\t\t\tpred_seg_pp = post_processing_slice_wise(pred_seg_th, x_cOm, y_cOm)\n\t\telse:\n\t\t\tpred_seg_pp = pred_seg\n\t\tcropped_seg_data[:, :, z] = pred_seg_pp\n\tcropped_seg.data = cropped_seg_data\n\treturn cropped_seg\n\ndef uncrop_image(ref_in, data_crop, X_CROP_LST, Y_CROP_LST, Z_CROP_LST):\n\t\"\"\" Reconstructs the segmentation from cropped seg_data and returns as an sct Image.\n\t\n\tInputs:\n\t\tref_in - original reference Image with correct dimensions\n\t\tdata_crop - cropped segmentation data\n\t\tX_CROP_LST, Y_CROP_LST, Z_CROP_LST - coordinates for cropping original image\n\tReturns:\n\t\tseg_uncrop - uncropped Image\n\t\"\"\"\n\tseg_uncrop = zeros_like(ref_in, dtype=np.float32)\n\tcrop_size_x, crop_size_y = data_crop.shape[:2]\n\tfor i_z, zz in enumerate(Z_CROP_LST):\n\t\tpred_seg = data_crop[:, :, zz]\n\t\tx_start, y_start = int(X_CROP_LST[i_z]), int(Y_CROP_LST[i_z])\n\t\tx_end = x_start + crop_size_x if x_start + crop_size_x < seg_uncrop.dim[0] else seg_uncrop.dim[0]\n\t\ty_end = y_start + crop_size_y if y_start + crop_size_y < seg_uncrop.dim[1] else seg_uncrop.dim[1]\n\t\tseg_uncrop.data[x_start:x_end, y_start:y_end, zz] = pred_seg[0:x_end - x_start, 0:y_end - y_start]\n\treturn seg_uncrop\n\nif __name__ == \"__main__\":\n\tparser = argparse.ArgumentParser('This script has 3 inputs: An input directory of niftis, an output directory, and the path to a weights file.')\n\tparser.add_argument(\"input_dir\")\n\tparser.add_argument(\"output_dir\")\n\tparser.add_argument(\"seg_model_fname\")\n\tparser.add_argument(\"contrast_type\")\n\tparser.add_argument(\"binary_seg\")\n\targs = parser.parse_args()\n\n\tinput_dir = args.input_dir\n\toutput_dir = args.output_dir\n\tseg_model_fname = args.seg_model_fname\n\tcontrast_type = args.contrast_type\n\tbinary_seg = True if args.binary_seg == 'True' else False\n\n \t# build model\n\tBATCH_SIZE = 4\n\tinput_size = (64,64)\n\n\tseg_model = nn_architecture_seg(height=input_size[0],\n\t\t\t\t\t\t\t\t\t\twidth=input_size[1],\n\t\t\t\t\t\t\t\t\t\tdepth=2,\n\t\t\t\t\t\t\t\t\t\tfeatures=32,\n\t\t\t\t\t\t\t\t\t\tbatchnorm=True,\n\t\t\t\t\t\t\t\t\t\tdropout=0.0)\n\tseg_model.load_weights(seg_model_fname)\n\n\t# segment image\n\tfor name in os.listdir(input_dir):\n\t\tif not '.DS_Store' in name:\n\t\t\tfname_image = input_dir + name\n\t\t\tparent, stem, ext = sct.extract_fname(fname_image)\n\n\t\t\t# import image\n\t\t\timage = Image(fname_image)\n\t\t\timage.save(output_dir+ stem + '_im' + ext)\n\n\t\t\t# crop image\n\t\t\tresampled_image, cropped_image, X_CROP_LST, Y_CROP_LST, Z_CROP_LST = preprocess_image(image, contrast_type=contrast_type)\n\t\t\tresampled_image.save(output_dir+ stem + '_res_im' + ext)\n\t\t\tif contrast_type == 't2':\n\t\t\t\tcropped_image.data = 255.0 - cropped_image.data\n\t\t\tcropped_image.save(output_dir+ stem + '_im_crop' + ext)\n\n\t\t\t# segment \n\t\t\tcropped_seg = segment_2d_slices(cropped_image, seg_model,binary_seg=binary_seg)\n\t\t\tcropped_seg.save(output_dir+ stem + '_seg_crop' + ext)\n\n\t\t\t# uncrop segmentation\n\t\t\tuncropped_seg = uncrop_image(resampled_image, cropped_seg.data, X_CROP_LST, Y_CROP_LST, Z_CROP_LST)\n\t\t\tuncropped_seg.save(output_dir+ stem + '_seg' + ext)\n\n",
"id": "12450267",
"language": "Python",
"matching_score": 3.920283079147339,
"max_stars_count": 0,
"path": "seg.py"
},
{
"content": "#!/usr/bin/env python\n# -*- coding: utf-8\n#########################################################################################\n#\n# Function to segment the spinal cord using convolutional neural networks\n#\n# ---------------------------------------------------------------------------------------\n# Copyright (c) 2017 Polytechnique Montreal <www.neuro.polymtl.ca>\n# Authors: <NAME> & <NAME>\n#\n# About the license: see the file LICENSE.TXT\n#########################################################################################\n\nfrom __future__ import division, absolute_import\n\nimport os\nimport sys\nimport argparse\n\nimport sct_utils as sct\nfrom spinalcordtoolbox.utils import Metavar, SmartFormatter, ActionCreateFolder\n\n\ndef get_parser():\n \"\"\"Initialize the parser.\"\"\"\n\n parser = argparse.ArgumentParser(\n description=\"Spinal Cord Segmentation using convolutional networks. Reference: Gros et al. Automatic \"\n \"segmentation of the spinal cord and intramedullary multiple sclerosis lesions with convolutional \"\n \"neural networks. Neuroimage. 2018 Oct 6;184:901-915. \",\n formatter_class=SmartFormatter,\n add_help=None,\n prog=os.path.basename(__file__).strip(\".py\"))\n mandatory = parser.add_argument_group(\"\\nMANDATORY ARGUMENTS\")\n mandatory.add_argument(\n \"-i\",\n metavar=Metavar.file,\n help='Input image. Example: t1.nii.gz')\n mandatory.add_argument(\n \"-c\",\n help=\"Type of image contrast.\",\n choices=('t1', 't2', 't2s', 'dwi'))\n optional = parser.add_argument_group(\"\\nOPTIONAL ARGUMENTS\")\n optional.add_argument(\n \"-h\",\n \"--help\",\n action=\"help\",\n help=\"show this help message and exit\")\n optional.add_argument(\n \"-centerline\",\n help=\"R|Method used for extracting the centerline:\\n\"\n \" svm: Automatic detection using Support Vector Machine algorithm.\\n\"\n \" cnn: Automatic detection using Convolutional Neural Network.\\n\"\n \" viewer: Semi-automatic detection using manual selection of a few points with an interactive viewer \"\n \"followed by regularization.\\n\"\n \" file: Use an existing centerline (use with flag -file_centerline)\",\n choices=('svm', 'cnn', 'viewer', 'file'),\n default=\"svm\")\n optional.add_argument(\n \"-file_centerline\",\n metavar=Metavar.str,\n help='Input centerline file (to use with flag -centerline file). Example: t2_centerline_manual.nii.gz')\n optional.add_argument(\n \"-brain\",\n type=int,\n help='Indicate if the input image contains brain sections (to speed up segmentation). This flag is only '\n 'effective with \"-centerline cnn\".',\n choices=(0, 1))\n optional.add_argument(\n \"-kernel\",\n help=\"Choice of kernel shape for the CNN. Segmentation with 3D kernels is longer than with 2D kernels.\",\n choices=('2d', '3d'),\n default=\"2d\")\n optional.add_argument(\n \"-ofolder\",\n metavar=Metavar.str,\n help='Output folder. Example: My_Output_Folder/ ',\n action=ActionCreateFolder,\n default=os.getcwd())\n optional.add_argument(\n \"-r\",\n type=int,\n help=\"Remove temporary files.\",\n choices=(0, 1),\n default=1)\n optional.add_argument(\n \"-v\",\n type=int,\n help=\"1: display on (default), 0: display off, 2: extended\",\n choices=(0, 1, 2),\n default=1)\n optional.add_argument(\n '-qc',\n metavar=Metavar.str,\n help='The path where the quality control generated content will be saved',\n default=None)\n optional.add_argument(\n '-qc-dataset',\n metavar=Metavar.str,\n help='If provided, this string will be mentioned in the QC report as the dataset the process was run on',)\n optional.add_argument(\n '-qc-subject',\n metavar=Metavar.str,\n help='If provided, this string will be mentioned in the QC report as the subject the process was run on',)\n optional.add_argument(\n '-igt',\n metavar=Metavar.str,\n help='File name of ground-truth segmentation.',)\n\n return parser\n\n\ndef main():\n \"\"\"Main function.\"\"\"\n parser = get_parser()\n args = parser.parse_args(args=None if sys.argv[1:] else ['--help'])\n\n fname_image = os.path.abspath(args.i)\n contrast_type = args.c\n\n ctr_algo = args.centerline\n\n if args.brain is None:\n if contrast_type in ['t2s', 'dwi']:\n brain_bool = False\n if contrast_type in ['t1', 't2']:\n brain_bool = True\n else:\n brain_bool = bool(args.brain)\n\n kernel_size = args.kernel\n if kernel_size == '3d' and contrast_type == 'dwi':\n kernel_size = '2d'\n sct.printv('3D kernel model for dwi contrast is not available. 2D kernel model is used instead.',\n type=\"warning\")\n\n\n if ctr_algo == 'file' and args.file_centerline is None:\n sct.printv('Please use the flag -file_centerline to indicate the centerline filename.', 1, 'warning')\n sys.exit(1)\n\n if args.file_centerline is not None:\n manual_centerline_fname = args.file_centerline\n ctr_algo = 'file'\n else:\n manual_centerline_fname = None\n\n remove_temp_files = args.r\n verbose = args.v\n sct.init_sct(log_level=verbose, update=True) # Update log level\n\n path_qc = args.qc\n qc_dataset = args.qc_dataset\n qc_subject = args.qc_subject\n output_folder = args.ofolder\n\n algo_config_stg = '\\nMethod:'\n algo_config_stg += '\\n\\tCenterline algorithm: ' + str(ctr_algo)\n algo_config_stg += '\\n\\tAssumes brain section included in the image: ' + str(brain_bool)\n algo_config_stg += '\\n\\tDimension of the segmentation kernel convolutions: ' + kernel_size + '\\n'\n sct.printv(algo_config_stg)\n\n # Segment image\n from spinalcordtoolbox.image import Image\n from spinalcordtoolbox.deepseg_sc.core import deep_segmentation_spinalcord\n from spinalcordtoolbox.reports.qc import generate_qc\n\n im_image = Image(fname_image)\n # note: below we pass im_image.copy() otherwise the field absolutepath becomes None after execution of this function\n im_seg, im_image_RPI_upsamp, im_seg_RPI_upsamp, im_labels_viewer, im_ctr = \\\n deep_segmentation_spinalcord(im_image.copy(), contrast_type, ctr_algo=ctr_algo,\n ctr_file=manual_centerline_fname, brain_bool=brain_bool, kernel_size=kernel_size,\n remove_temp_files=remove_temp_files, verbose=verbose)\n\n # Save segmentation\n fname_seg = os.path.abspath(os.path.join(output_folder, sct.extract_fname(fname_image)[1] + '_seg' +\n sct.extract_fname(fname_image)[2]))\n\n # copy q/sform from input image to output segmentation\n im_seg.copy_qform_from_ref(im_image)\n im_seg.save(fname_seg)\n\n if ctr_algo == 'viewer':\n # Save labels\n fname_labels = os.path.abspath(os.path.join(output_folder, sct.extract_fname(fname_image)[1] + '_labels-centerline' +\n sct.extract_fname(fname_image)[2]))\n im_labels_viewer.save(fname_labels)\n\n if verbose == 2:\n # Save ctr\n fname_ctr = os.path.abspath(os.path.join(output_folder, sct.extract_fname(fname_image)[1] + '_centerline' +\n sct.extract_fname(fname_image)[2]))\n im_ctr.save(fname_ctr)\n\n if path_qc is not None:\n generate_qc(fname_image, fname_seg=fname_seg, args=sys.argv[1:], path_qc=os.path.abspath(path_qc),\n dataset=qc_dataset, subject=qc_subject, process='sct_deepseg_sc')\n sct.display_viewer_syntax([fname_image, fname_seg], colormaps=['gray', 'red'], opacities=['', '0.7'])\n\n\nif __name__ == \"__main__\":\n sct.init_sct()\n main()\n",
"id": "11143736",
"language": "Python",
"matching_score": 3.3175265789031982,
"max_stars_count": 0,
"path": "scripts/sct_deepseg_sc.py"
},
{
"content": "#!/usr/bin/env python\n# -*- coding: utf-8\n# This command-line tool is the interface for the deepseg_gm API\n# that implements the model for the Spinal Cord Gray Matter Segmentation.\n#\n# Reference paper:\n# <NAME>., <NAME>., & <NAME>. (2017).\n# Spinal cord gray matter segmentation using deep dilated convolutions.\n# URL: https://arxiv.org/abs/1710.01269\n\nfrom __future__ import absolute_import\n\nimport sys\nimport os\nimport argparse\n\nimport sct_utils as sct\nfrom spinalcordtoolbox.utils import Metavar, SmartFormatter\n\nfrom spinalcordtoolbox.reports.qc import generate_qc\n\n\ndef get_parser():\n parser = argparse.ArgumentParser(\n description='Spinal Cord Gray Matter (GM) Segmentation using deep dilated convolutions. The contrast of the '\n 'input image must be similar to a T2*-weighted image: WM dark, GM bright and CSF bright. '\n 'Reference: Perone CS, Calabrese E, <NAME>. Spinal cord gray matter segmentation using deep '\n 'dilated convolutions. Sci Rep 2018;8(1):5966.',\n add_help=None,\n formatter_class=SmartFormatter,\n prog=os.path.basename(__file__).strip(\".py\"))\n mandatory = parser.add_argument_group(\"\\nMANDATORY ARGUMENTS\")\n mandatory.add_argument(\n \"-i\",\n help=\"Image filename to segment (3D volume). Example: t2s.nii.gz.\",\n metavar=Metavar.file)\n optional = parser.add_argument_group(\"\\nOPTIONAL ARGUMENTS\")\n optional.add_argument(\n \"-h\",\n \"--help\",\n action=\"help\",\n help=\"Show this help message and exit\")\n optional.add_argument(\n \"-o\",\n help=\"Output segmentation file name. Example: sc_gm_seg.nii.gz\",\n metavar=Metavar.file,\n default=None)\n misc = parser.add_argument_group('\\nMISC')\n misc.add_argument(\n '-qc',\n help=\"The path where the quality control generated content will be saved.\",\n metavar=Metavar.str,\n default=None)\n misc.add_argument(\n '-qc-dataset',\n help='If provided, this string will be mentioned in the QC report as the dataset the process was run on',\n metavar=Metavar.str)\n misc.add_argument(\n '-qc-subject',\n help='If provided, this string will be mentioned in the QC report as the subject the process was run on',\n metavar=Metavar.str)\n misc.add_argument(\n \"-m\",\n help=\"Model to use (large or challenge). \"\n \"The model 'large' will be slower but \"\n \"will yield better results. The model \"\n \"'challenge' was built using data from \"\n \"the following challenge: goo.gl/h4AVar.\",\n choices=('large', 'challenge'),\n default='large')\n misc.add_argument(\n \"-thr\",\n type=float,\n help='Threshold to apply in the segmentation predictions, use 0 (zero) to disable it. Example: 0.999',\n metavar=Metavar.float,\n default=0.999)\n misc.add_argument(\n \"-t\",\n help=\"Enable TTA (test-time augmentation). \"\n \"Better results, but takes more time and \"\n \"provides non-deterministic results.\",\n metavar='')\n misc.add_argument(\n \"-v\",\n type=int,\n help=\"Verbose: 0 = no verbosity, 1 = verbose.\",\n choices=(0, 1),\n default=1)\n\n return parser\n\n\ndef run_main():\n parser = get_parser()\n arguments = parser.parse_args(args=None if sys.argv[1:] else ['--help'])\n input_filename = arguments.i\n if arguments.o is not None:\n output_filename = arguments.o\n else:\n output_filename = sct.add_suffix(input_filename, '_gmseg')\n\n use_tta = arguments.t\n model_name = arguments.m\n threshold = arguments.thr\n verbose = arguments.v\n sct.init_sct(log_level=verbose, update=True) # Update log level\n\n if threshold > 1.0 or threshold < 0.0:\n raise RuntimeError(\"Threshold should be between 0.0 and 1.0.\")\n\n # Threshold zero means no thresholding\n if threshold == 0.0:\n threshold = None\n\n from spinalcordtoolbox.deepseg_gm import deepseg_gm\n deepseg_gm.check_backend()\n\n out_fname = deepseg_gm.segment_file(input_filename, output_filename,\n model_name, threshold, int(verbose),\n use_tta)\n\n path_qc = arguments.qc\n qc_dataset = arguments.qc_dataset\n qc_subject = arguments.qc_subject\n if path_qc is not None:\n generate_qc(fname_in1=input_filename, fname_seg=out_fname, args=sys.argv[1:], path_qc=os.path.abspath(path_qc),\n dataset=qc_dataset, subject=qc_subject, process='sct_deepseg_gm')\n\n sct.display_viewer_syntax([input_filename, format(out_fname)],\n colormaps=['gray', 'red'],\n opacities=['1', '0.7'],\n verbose=verbose)\n\n\nif __name__ == '__main__':\n sct.init_sct()\n run_main()\n",
"id": "2647598",
"language": "Python",
"matching_score": 1.0602401494979858,
"max_stars_count": 0,
"path": "scripts/sct_deepseg_gm.py"
},
{
"content": "#!/usr/bin/env python\n##############################################################################\n#\n# Perform operations on images\n#\n# ----------------------------------------------------------------------------\n# Copyright (c) 2015 Polytechnique Montreal <www.neuro.polymtl.ca>\n# Authors: <NAME>, <NAME>\n#\n# About the license: see the file LICENSE.TXT\n##############################################################################\n\nfrom __future__ import absolute_import\n\nimport os\nimport sys\nimport argparse\nimport numpy as np\n\nimport spinalcordtoolbox.image as msct_image\nfrom spinalcordtoolbox.image import Image\nfrom spinalcordtoolbox.utils import Metavar, SmartFormatter\n\nimport sct_utils as sct\n\n\ndef get_parser():\n\n parser = argparse.ArgumentParser(\n description='Perform manipulations on images (e.g., pad, change space, split along dimension). '\n 'Inputs can be a number, a 4d image, or several 3d images separated with \",\"',\n formatter_class=SmartFormatter,\n add_help=None,\n prog=os.path.basename(__file__).strip('.py'))\n mandatory = parser.add_argument_group('MANDATORY ARGUMENTS')\n mandatory.add_argument(\n '-i',\n nargs='+',\n metavar=Metavar.file,\n help='Input file(s). If several inputs: separate them by white space. Example: \"data.nii.gz\"',\n required = True)\n optional = parser.add_argument_group('OPTIONAL ARGUMENTS')\n optional.add_argument(\n '-h',\n '--help',\n action='help',\n help='Show this help message and exit')\n optional.add_argument(\n '-o',\n metavar=Metavar.file,\n help='Output file. Example: data_pad.nii.gz',\n required = False)\n\n image = parser.add_argument_group('IMAGE OPERATIONS')\n image.add_argument(\n '-pad',\n metavar=Metavar.list,\n help='Pad 3D image. Specify padding as: \"x,y,z\" (in voxel). Example: \"0,0,1\"',\n required = False)\n image.add_argument(\n '-pad-asym',\n metavar=Metavar.list,\n help='Pad 3D image with asymmetric padding. Specify padding as: \"x_i,x_f,y_i,y_f,z_i,z_f\" (in voxel). '\n 'Example: \"0,0,5,10,1,1\"',\n required = False)\n image.add_argument(\n '-split',\n help='Split data along the specified dimension. The suffix _DIM+NUMBER will be added to the intput file name.',\n required = False,\n choices = ('x', 'y', 'z', 't'))\n image.add_argument(\n '-concat',\n help='Concatenate data along the specified dimension',\n required = False,\n choices = ('x', 'y', 'z', 't'))\n image.add_argument(\n '-remove-vol',\n metavar=Metavar.list,\n help='Remove specific volumes from a 4d volume. Separate with \",\". Example: \"0,5,10\"',\n required=False)\n image.add_argument(\n '-keep-vol',\n metavar=Metavar.list,\n help='Keep specific volumes from a 4d volume (remove others). Separate with \",\". Example: \"1,2,3,11\"',\n required=False)\n image.add_argument(\n '-type',\n help='Change file type',\n required = False,\n choices = ('uint8','int16','int32','float32','complex64','float64','int8','uint16','uint32','int64','uint64'))\n\n header = parser.add_argument_group('HEADER OPERATIONS')\n header.add_argument(\n '-copy-header',\n metavar=Metavar.file,\n help='Copy the header of the source image (specified in -i) to the destination image (specified here). '\n 'Example: data_dest.nii.gz',\n required = False)\n\n orientation = parser.add_argument_group('ORIENTATION OPERATIONS')\n orientation.add_argument(\n '-getorient',\n help='Get orientation of the input image',\n action='store_true',\n required=False)\n orientation.add_argument(\n '-setorient',\n help='Set orientation of the input image (only modifies the header).',\n choices='RIP LIP RSP LSP RIA LIA RSA LSA IRP ILP SRP SLP IRA ILA SRA SLA RPI LPI RAI LAI RPS LPS RAS LAS PRI PLI ARI ALI PRS PLS ARS ALS IPR SPR IAR SAR IPL SPL IAL SAL PIR PSR AIR ASR PIL PSL AIL ASL'.split(),\n required = False)\n orientation.add_argument(\n '-setorient-data',\n help='Set orientation of the input image\\'s data (does NOT modify the header, but the data). Use with care !',\n choices='RIP LIP RSP LSP RIA LIA RSA LSA IRP ILP SRP SLP IRA ILA SRA SLA RPI LPI RAI LAI RPS LPS RAS LAS PRI PLI ARI ALI PRS PLS ARS ALS IPR SPR IAR SAR IPL SPL IAL SAL PIR PSR AIR ASR PIL PSL AIL ASL'.split(),\n required = False)\n\n multi = parser.add_argument_group('MULTI-COMPONENT OPERATIONS ON ITK COMPOSITE WARPING FIELDS')\n multi.add_argument(\n '-mcs',\n action='store_true',\n help='Multi-component split: Split ITK warping field into three separate displacement fields. '\n 'The suffix _X, _Y and _Z will be added to the input file name.',\n required=False)\n multi.add_argument(\n '-omc',\n action='store_true',\n help='Multi-component merge: Merge inputted images into one multi-component image. Requires several inputs.',\n required=False)\n\n warping = parser.add_argument_group('WARPING FIELD OPERATIONSWarping field operations:')\n warping.add_argument(\n '-display-warp',\n action='store_true',\n help='Create a grid and deform it using provided warping field.',\n required=False)\n\n misc = parser.add_argument_group('Misc')\n misc.add_argument(\n '-v',\n type=int,\n help='Verbose. 0: nothing. 1: basic. 2: extended.',\n required=False,\n default=1,\n choices=(0, 1, 2))\n\n return parser\n\n\ndef main(args=None):\n \"\"\"\n Main function\n :param args:\n :return:\n \"\"\"\n # initializations\n output_type = None\n dim_list = ['x', 'y', 'z', 't']\n\n # Get parser args\n if args is None:\n args = None if sys.argv[1:] else ['--help']\n parser = get_parser()\n arguments = parser.parse_args(args=args)\n fname_in = arguments.i\n n_in = len(fname_in)\n verbose = arguments.v\n sct.init_sct(log_level=verbose, update=True) # Update log level\n\n if arguments.o is not None:\n fname_out = arguments.o\n else:\n fname_out = None\n\n # run command\n if arguments.concat is not None:\n dim = arguments.concat\n assert dim in dim_list\n dim = dim_list.index(dim)\n im_out = [concat_data(fname_in, dim)] # TODO: adapt to fname_in\n\n elif arguments.copy_header is not None:\n im_in = Image(fname_in[0])\n im_dest = Image(arguments.copy_header)\n im_dest_new = im_in.copy()\n im_dest_new.data = im_dest.data.copy()\n # im_dest.header = im_in.header\n im_dest_new.absolutepath = im_dest.absolutepath\n im_out = [im_dest_new]\n fname_out = arguments.copy_header\n\n elif arguments.display_warp:\n im_in = fname_in[0]\n visualize_warp(im_in, fname_grid=None, step=3, rm_tmp=True)\n im_out = None\n\n elif arguments.getorient:\n im_in = Image(fname_in[0])\n orient = im_in.orientation\n im_out = None\n\n elif arguments.keep_vol is not None:\n index_vol = (arguments.keep_vol).split(',')\n for iindex_vol, vol in enumerate(index_vol):\n index_vol[iindex_vol] = int(vol)\n im_in = Image(fname_in[0])\n im_out = [remove_vol(im_in, index_vol, todo='keep')]\n\n elif arguments.mcs:\n im_in = Image(fname_in[0])\n if n_in != 1:\n sct.printv(parser.usage.generate(error='ERROR: -mcs need only one input'))\n if len(im_in.data.shape) != 5:\n sct.printv(parser.usage.generate(error='ERROR: -mcs input need to be a multi-component image'))\n im_out = multicomponent_split(im_in)\n\n elif arguments.omc:\n im_ref = Image(fname_in[0])\n for fname in fname_in:\n im = Image(fname)\n if im.data.shape != im_ref.data.shape:\n sct.printv(parser.usage.generate(error='ERROR: -omc inputs need to have all the same shapes'))\n del im\n im_out = [multicomponent_merge(fname_in)] # TODO: adapt to fname_in\n\n elif arguments.pad is not None:\n im_in = Image(fname_in[0])\n ndims = len(im_in.data.shape)\n if ndims != 3:\n sct.printv('ERROR: you need to specify a 3D input file.', 1, 'error')\n return\n\n pad_arguments = arguments.pad.split(',')\n if len(pad_arguments) != 3:\n sct.printv('ERROR: you need to specify 3 padding values.', 1, 'error')\n\n padx, pady, padz = pad_arguments\n padx, pady, padz = int(padx), int(pady), int(padz)\n im_out = [pad_image(im_in, pad_x_i=padx, pad_x_f=padx, pad_y_i=pady,\n pad_y_f=pady, pad_z_i=padz, pad_z_f=padz)]\n\n elif arguments.pad_asym is not None:\n im_in = Image(fname_in[0])\n ndims = len(im_in.data.shape)\n if ndims != 3:\n sct.printv('ERROR: you need to specify a 3D input file.', 1, 'error')\n return\n\n pad_arguments = arguments.pad_asym.split(',')\n if len(pad_arguments) != 6:\n sct.printv('ERROR: you need to specify 6 padding values.', 1, 'error')\n\n padxi, padxf, padyi, padyf, padzi, padzf = pad_arguments\n padxi, padxf, padyi, padyf, padzi, padzf = int(padxi), int(padxf), int(padyi), int(padyf), int(padzi), int(padzf)\n im_out = [pad_image(im_in, pad_x_i=padxi, pad_x_f=padxf, pad_y_i=padyi, pad_y_f=padyf, pad_z_i=padzi, pad_z_f=padzf)]\n\n elif arguments.remove_vol is not None:\n index_vol = (arguments.remove_vol).split(',')\n for iindex_vol, vol in enumerate(index_vol):\n index_vol[iindex_vol] = int(vol)\n im_in = Image(fname_in[0])\n im_out = [remove_vol(im_in, index_vol, todo='remove')]\n\n elif arguments.setorient is not None:\n sct.printv(fname_in[0])\n im_in = Image(fname_in[0])\n im_out = [msct_image.change_orientation(im_in, arguments.setorient)]\n\n elif arguments.setorient_data is not None:\n im_in = Image(fname_in[0])\n im_out = [msct_image.change_orientation(im_in, arguments.setorient_data, data_only=True)]\n\n elif arguments.split is not None:\n dim = arguments.split\n assert dim in dim_list\n im_in = Image(fname_in[0])\n dim = dim_list.index(dim)\n im_out = split_data(im_in, dim)\n\n elif arguments.type is not None:\n output_type = arguments.type\n im_in = Image(fname_in[0])\n im_out = [im_in] # TODO: adapt to fname_in\n\n else:\n im_out = None\n sct.printv(parser.usage.generate(error='ERROR: you need to specify an operation to do on the input image'))\n\n # in case fname_out is not defined, use first element of input file name list\n if fname_out is None:\n fname_out = fname_in[0]\n\n # Write output\n if im_out is not None:\n sct.printv('Generate output files...', verbose)\n # if only one output\n if len(im_out) == 1 and not '-split' in arguments:\n im_out[0].save(fname_out, dtype=output_type, verbose=verbose)\n sct.display_viewer_syntax([fname_out], verbose=verbose)\n if arguments.mcs:\n # use input file name and add _X, _Y _Z. Keep the same extension\n l_fname_out = []\n for i_dim in range(3):\n l_fname_out.append(sct.add_suffix(fname_out or fname_in[0], '_' + dim_list[i_dim].upper()))\n im_out[i_dim].save(l_fname_out[i_dim], verbose=verbose)\n sct.display_viewer_syntax(fname_out)\n if arguments.split is not None:\n # use input file name and add _\"DIM+NUMBER\". Keep the same extension\n l_fname_out = []\n for i, im in enumerate(im_out):\n l_fname_out.append(sct.add_suffix(fname_out or fname_in[0], '_' + dim_list[dim].upper() + str(i).zfill(4)))\n im.save(l_fname_out[i])\n sct.display_viewer_syntax(l_fname_out)\n\n elif arguments.getorient:\n sct.printv(orient)\n\n elif arguments.display_warp:\n sct.printv('Warping grid generated.', verbose, 'info')\n\n\ndef pad_image(im, pad_x_i=0, pad_x_f=0, pad_y_i=0, pad_y_f=0, pad_z_i=0, pad_z_f=0):\n\n nx, ny, nz, nt, px, py, pz, pt = im.dim\n pad_x_i, pad_x_f, pad_y_i, pad_y_f, pad_z_i, pad_z_f = int(pad_x_i), int(pad_x_f), int(pad_y_i), int(pad_y_f), int(pad_z_i), int(pad_z_f)\n\n if len(im.data.shape) == 2:\n new_shape = list(im.data.shape)\n new_shape.append(1)\n im.data = im.data.reshape(new_shape)\n\n # initialize padded_data, with same type as im.data\n padded_data = np.zeros((nx + pad_x_i + pad_x_f, ny + pad_y_i + pad_y_f, nz + pad_z_i + pad_z_f), dtype=im.data.dtype)\n\n if pad_x_f == 0:\n pad_x_f = None\n elif pad_x_f > 0:\n pad_x_f *= -1\n if pad_y_f == 0:\n pad_y_f = None\n elif pad_y_f > 0:\n pad_y_f *= -1\n if pad_z_f == 0:\n pad_z_f = None\n elif pad_z_f > 0:\n pad_z_f *= -1\n\n padded_data[pad_x_i:pad_x_f, pad_y_i:pad_y_f, pad_z_i:pad_z_f] = im.data\n im_out = im.copy()\n # TODO: Do not copy the Image(), because the dim field and hdr.get_data_shape() will not be updated properly.\n # better to just create a new Image() from scratch.\n im_out.data = padded_data # done after the call of the function\n im_out.absolutepath = sct.add_suffix(im_out.absolutepath, \"_pad\")\n\n # adapt the origin in the sform and qform matrix\n new_origin = np.dot(im_out.hdr.get_qform(), [-pad_x_i, -pad_y_i, -pad_z_i, 1])\n\n im_out.hdr.structarr['qoffset_x'] = new_origin[0]\n im_out.hdr.structarr['qoffset_y'] = new_origin[1]\n im_out.hdr.structarr['qoffset_z'] = new_origin[2]\n im_out.hdr.structarr['srow_x'][-1] = new_origin[0]\n im_out.hdr.structarr['srow_y'][-1] = new_origin[1]\n im_out.hdr.structarr['srow_z'][-1] = new_origin[2]\n\n return im_out\n\n\ndef split_data(im_in, dim, squeeze_data=True):\n \"\"\"\n Split data\n :param im_in: input image.\n :param dim: dimension: 0, 1, 2, 3.\n :return: list of split images\n \"\"\"\n\n dim_list = ['x', 'y', 'z', 't']\n # Parse file name\n # Open first file.\n data = im_in.data\n # in case input volume is 3d and dim=t, create new axis\n if dim + 1 > len(np.shape(data)):\n data = data[..., np.newaxis]\n # in case splitting along the last dim, make sure to remove the last dim to avoid singleton\n if dim + 1 == len(np.shape(data)):\n if squeeze_data:\n do_reshape = True\n else:\n do_reshape = False\n else:\n do_reshape = False\n # Split data into list\n data_split = np.array_split(data, data.shape[dim], dim)\n # Write each file\n im_out_list = []\n for idx_img, dat in enumerate(data_split):\n im_out = msct_image.empty_like(im_in)\n if do_reshape:\n im_out.data = dat.reshape(tuple([ x for (idx_shape, x) in enumerate(data.shape) if idx_shape != dim]))\n else:\n im_out.data = dat\n im_out.absolutepath = sct.add_suffix(im_in.absolutepath, \"_{}{}\".format(dim_list[dim].upper(), str(idx_img).zfill(4)))\n im_out_list.append(im_out)\n\n return im_out_list\n\n\ndef concat_data(fname_in_list, dim, pixdim=None, squeeze_data=False):\n \"\"\"\n Concatenate data\n :param im_in_list: list of Images or image filenames\n :param dim: dimension: 0, 1, 2, 3.\n :param pixdim: pixel resolution to join to image header\n :param squeeze_data: bool: if True, remove the last dim if it is a singleton.\n :return im_out: concatenated image\n \"\"\"\n # WARNING: calling concat_data in python instead of in command line causes a non understood issue (results are different with both options)\n # from numpy import concatenate, expand_dims\n\n dat_list = []\n data_concat_list = []\n\n # check if shape of first image is smaller than asked dim to concatenate along\n # data0 = Image(fname_in_list[0]).data\n # if len(data0.shape) <= dim:\n # expand_dim = True\n # else:\n # expand_dim = False\n\n for i, fname in enumerate(fname_in_list):\n # if there is more than 100 images to concatenate, then it does it iteratively to avoid memory issue.\n if i != 0 and i % 100 == 0:\n data_concat_list.append(np.concatenate(dat_list, axis=dim))\n im = Image(fname)\n dat = im.data\n # if image shape is smaller than asked dim, then expand dim\n if len(dat.shape) <= dim:\n dat = np.expand_dims(dat, dim)\n dat_list = [dat]\n del im\n del dat\n else:\n im = Image(fname)\n dat = im.data\n # if image shape is smaller than asked dim, then expand dim\n if len(dat.shape) <= dim:\n dat = np.expand_dims(dat, dim)\n dat_list.append(dat)\n del im\n del dat\n if data_concat_list:\n data_concat_list.append(np.concatenate(dat_list, axis=dim))\n data_concat = np.concatenate(data_concat_list, axis=dim)\n else:\n data_concat = np.concatenate(dat_list, axis=dim)\n # write file\n im_out = msct_image.empty_like(Image(fname_in_list[0]))\n im_out.data = data_concat\n if isinstance(fname_in_list[0], str):\n im_out.absolutepath = sct.add_suffix(fname_in_list[0], '_concat')\n else:\n if fname_in_list[0].absolutepath:\n im_out.absolutepath = sct.add_suffix(fname_in_list[0].absolutepath, '_concat')\n\n if pixdim is not None:\n im_out.hdr['pixdim'] = pixdim\n\n if squeeze_data and data_concat.shape[dim] == 1:\n # remove the last dim if it is a singleton.\n im_out.data = data_concat.reshape(tuple([ x for (idx_shape, x) in enumerate(data_concat.shape) if idx_shape != dim]))\n else:\n im_out.data = data_concat\n\n return im_out\n\n\ndef remove_vol(im_in, index_vol_user, todo):\n \"\"\"\n Remove specific volumes from 4D data.\n :param im_in: [str] input image.\n :param index_vol: [int] list of indices corresponding to volumes to remove\n :param todo: {keep, remove} what to do\n :return: 4d volume\n \"\"\"\n # get data\n data = im_in.data\n nt = data.shape[3]\n # define index list of volumes to keep/remove\n if todo == 'remove':\n index_vol = [i for i in range(0, nt) if i not in index_vol_user]\n elif todo == 'keep':\n index_vol = index_vol_user\n else:\n sct.printv('ERROR: wrong assignment of variable \"todo\"', 1, 'error')\n # define new 4d matrix with selected volumes\n data_out = data[:, :, :, index_vol]\n # save matrix inside new Image object\n im_out = im_in.copy()\n im_out.data = data_out\n return im_out\n\n\ndef concat_warp2d(fname_list, fname_warp3d, fname_dest):\n \"\"\"\n Concatenate 2d warping fields into a 3d warping field along z dimension. The 3rd dimension of the resulting warping\n field will be zeroed.\n :param\n fname_list: list of 2d warping fields (along X and Y).\n fname_warp3d: output name of 3d warping field\n fname_dest: 3d destination file (used to copy header information)\n :return: none\n \"\"\"\n from numpy import zeros\n import nibabel as nib\n\n # get dimensions\n # nib.load(fname_list[0])\n # im_0 = Image(fname_list[0])\n nx, ny = nib.load(fname_list[0]).shape[0:2]\n nz = len(fname_list)\n # warp3d = tuple([nx, ny, nz, 1, 3])\n warp3d = zeros([nx, ny, nz, 1, 3])\n for iz, fname in enumerate(fname_list):\n warp2d = nib.load(fname).get_data()\n warp3d[:, :, iz, 0, 0] = warp2d[:, :, 0, 0, 0]\n warp3d[:, :, iz, 0, 1] = warp2d[:, :, 0, 0, 1]\n del warp2d\n # save new image\n im_dest = nib.load(fname_dest)\n affine_dest = im_dest.get_affine()\n im_warp3d = nib.Nifti1Image(warp3d, affine_dest)\n # set \"intent\" code to vector, to be interpreted as warping field\n im_warp3d.header.set_intent('vector', (), '')\n nib.save(im_warp3d, fname_warp3d)\n # copy header from 2d warping field\n #\n # im_dest = Image(fname_dest)\n # im_warp3d = im_dest.copy()\n # im_warp3d.data = warp3d.astype('float32')\n # # add dimension between 3rd and 5th\n # im_warp3d.hdr.set_data_shape([nx, ny, nz, 1, 3])\n #\n # im_warp3d.hdr.set_intent('vector', (), '')\n # im_warp3d.absolutepath = fname_warp3d\n # # save 3d warping field\n # im_warp3d.save()\n # return im_out\n\n\ndef multicomponent_split(im):\n \"\"\"\n Convert composite image (e.g., ITK warping field, 5dim) into several 3d volumes.\n Replaces \"c3d -mcs warp_comp.nii -oo warp_vecx.nii warp_vecy.nii warp_vecz.nii\"\n :param im:\n :return:\n \"\"\"\n data = im.data\n assert len(data.shape) == 5\n data_out = []\n for i in range(data.shape[-1]):\n dat_out = data[:, :, :, :, i]\n '''\n while dat_out.shape[-1] == 1:\n dat_out = reshape(dat_out, dat_out.shape[:-1])\n '''\n data_out.append(dat_out) # .astype('float32'))\n im_out = [im.copy() for j in range(len(data_out))]\n for i, im in enumerate(im_out):\n im.data = data_out[i]\n im.hdr.set_intent('vector', (), '')\n im.absolutepath = sct.add_suffix(im.absolutepath, '_{}'.format(i))\n return im_out\n\n\ndef multicomponent_merge(fname_list):\n from numpy import zeros\n # WARNING: output multicomponent is not optimal yet, some issues may be related to the use of this function\n\n im_0 = Image(fname_list[0])\n new_shape = list(im_0.data.shape)\n if len(new_shape) == 3:\n new_shape.append(1)\n new_shape.append(len(fname_list))\n new_shape = tuple(new_shape)\n\n data_out = zeros(new_shape)\n for i, fname in enumerate(fname_list):\n im = Image(fname)\n dat = im.data\n if len(dat.shape) == 2:\n data_out[:, :, 0, 0, i] = dat.astype('float32')\n elif len(dat.shape) == 3:\n data_out[:, :, :, 0, i] = dat.astype('float32')\n elif len(dat.shape) == 4:\n data_out[:, :, :, :, i] = dat.astype('float32')\n del im\n del dat\n im_out = im_0.copy()\n im_out.data = data_out.astype('float32')\n im_out.hdr.set_intent('vector', (), '')\n im_out.absolutepath = sct.add_suffix(im_out.absolutepath, '_multicomponent')\n return im_out\n\n\ndef visualize_warp(fname_warp, fname_grid=None, step=3, rm_tmp=True):\n if fname_grid is None:\n from numpy import zeros\n tmp_dir = sct.tmp_create()\n im_warp = Image(fname_warp)\n status, out = sct.run(['fslhd', fname_warp])\n curdir = os.getcwd()\n os.chdir(tmp_dir)\n dim1 = 'dim1 '\n dim2 = 'dim2 '\n dim3 = 'dim3 '\n nx = int(out[out.find(dim1):][len(dim1):out[out.find(dim1):].find('\\n')])\n ny = int(out[out.find(dim2):][len(dim2):out[out.find(dim2):].find('\\n')])\n nz = int(out[out.find(dim3):][len(dim3):out[out.find(dim3):].find('\\n')])\n sq = zeros((step, step))\n sq[step - 1] = 1\n sq[:, step - 1] = 1\n dat = zeros((nx, ny, nz))\n for i in range(0, dat.shape[0], step):\n for j in range(0, dat.shape[1], step):\n for k in range(dat.shape[2]):\n if dat[i:i + step, j:j + step, k].shape == (step, step):\n dat[i:i + step, j:j + step, k] = sq\n fname_grid = 'grid_' + str(step) + '.nii.gz'\n im_grid = Image(param=dat)\n grid_hdr = im_warp.hdr\n im_grid.hdr = grid_hdr\n im_grid.absolutepath = fname_grid\n im_grid.save()\n fname_grid_resample = sct.add_suffix(fname_grid, '_resample')\n sct.run(['sct_resample', '-i', fname_grid, '-f', '3x3x1', '-x', 'nn', '-o', fname_grid_resample])\n fname_grid = os.path.join(tmp_dir, fname_grid_resample)\n os.chdir(curdir)\n path_warp, file_warp, ext_warp = sct.extract_fname(fname_warp)\n grid_warped = os.path.join(path_warp, sct.extract_fname(fname_grid)[1] + '_' + file_warp + ext_warp)\n sct.run(['sct_apply_transfo', '-i', fname_grid, '-d', fname_grid, '-w', fname_warp, '-o', grid_warped])\n if rm_tmp:\n sct.rmtree(tmp_dir)\n\n\nif __name__ == \"__main__\":\n sct.init_sct()\n main()\n",
"id": "10988341",
"language": "Python",
"matching_score": 4.646671295166016,
"max_stars_count": 0,
"path": "scripts/sct_image.py"
},
{
"content": "#!/usr/bin/env python\n#########################################################################################\n#\n# Perform mathematical operations on images\n#\n# ---------------------------------------------------------------------------------------\n# Copyright (c) 2015 Polytechnique Montreal <www.neuro.polymtl.ca>\n# Authors: <NAME>, <NAME>\n#\n# About the license: see the file LICENSE.TXT\n#########################################################################################\n\nfrom __future__ import division, absolute_import\n\nimport os\nimport sys\nimport numpy as np\nimport argparse\n\nfrom spinalcordtoolbox.image import Image\nfrom spinalcordtoolbox.utils import Metavar, SmartFormatter\n\nfrom sct_utils import printv, extract_fname\nimport sct_utils as sct\n\n\nALMOST_ZERO = 0.000000001\n\n\ndef get_parser():\n\n parser = argparse.ArgumentParser(\n description='Perform mathematical operations on images. Some inputs can be either a number or a 4d image or '\n 'several 3d images separated with \",\"',\n add_help=None,\n formatter_class=SmartFormatter,\n prog=os.path.basename(__file__).strip(\".py\"))\n mandatory = parser.add_argument_group(\"MANDATORY ARGUMENTS\")\n mandatory.add_argument(\n \"-i\",\n metavar=Metavar.file,\n help=\"Input file. Example: data.nii.gz\",\n required=True)\n mandatory.add_argument(\n \"-o\",\n metavar=Metavar.file,\n help='Output file. Example: data_mean.nii.gz',\n required=True)\n\n optional = parser.add_argument_group(\"OPTIONAL ARGUMENTS\")\n optional.add_argument(\n \"-h\",\n \"--help\",\n action=\"help\",\n help=\"Show this help message and exit\")\n\n basic = parser.add_argument_group('BASIC OPERATIONS')\n basic.add_argument(\n \"-add\",\n metavar='',\n nargs=\"+\",\n help='Add following input. Can be a number or multiple images (separated with space).',\n required=False)\n basic.add_argument(\n \"-sub\",\n metavar='',\n nargs=\"+\",\n help='Subtract following input. Can be a number or an image.',\n required=False)\n basic.add_argument(\n \"-mul\",\n metavar='',\n nargs=\"+\",\n help='Multiply by following input. Can be a number or multiple images (separated with space).',\n required=False)\n basic.add_argument(\n \"-div\",\n metavar='',\n nargs=\"+\",\n help='Divide by following input. Can be a number or an image.',\n required=False)\n basic.add_argument(\n '-mean',\n help='Average data across dimension.',\n required=False,\n choices=('x', 'y', 'z', 't'))\n basic.add_argument(\n '-rms',\n help='Compute root-mean-squared across dimension.',\n required=False,\n choices=('x', 'y', 'z', 't'))\n basic.add_argument(\n '-std',\n help='Compute STD across dimension.',\n required=False,\n choices=('x', 'y', 'z', 't'))\n basic.add_argument(\n \"-bin\",\n type=float,\n metavar=Metavar.float,\n help='Binarize image using specified threshold. Example: 0.5',\n required=False)\n\n thresholding = parser.add_argument_group(\"THRESHOLDING METHODS\")\n thresholding.add_argument(\n '-otsu',\n type=int,\n metavar=Metavar.int,\n help='Threshold image using Otsu algorithm.\\nnbins: number of bins. Example: 256',\n required=False)\n thresholding.add_argument(\n \"-otsu-adap\",\n metavar=Metavar.list,\n help=\"Threshold image using Adaptive Otsu algorithm.\\nblock_size:\\noffset:\",\n required=False)\n thresholding.add_argument(\n \"-otsu-median\",\n help='R|Threshold image using Median Otsu algorithm. Separate with \",\" Example: 2,3'\n '\\n Size of the median filter. Example: 2'\n '\\n Number of iterations. Example: 3\\n',\n required=False)\n thresholding.add_argument(\n '-percent',\n type=int,\n help=\"Threshold image using percentile of its histogram.\",\n required=False)\n thresholding.add_argument(\n \"-thr\",\n type=float,\n metavar=Metavar.float,\n help='Use following number to threshold image (zero below number).',\n required=False)\n\n mathematical = parser.add_argument_group(\"MATHEMATICAL MORPHOLOGY\")\n mathematical.add_argument(\n '-dilate',\n metavar='',\n help='Dilate binary image. If only one input is given, structured element is a ball with input radius (in '\n 'voxel). If comma-separated inputs are given (Example: 2,4,5 ), structured element is a box with input '\n 'dimensions.',\n required=False)\n mathematical.add_argument(\n '-erode',\n metavar='',\n help='Erode binary image. If only one input is given, structured element is a ball with input radius (in '\n 'voxel). If comma-separated inputs are given (Example: 2,4,5), structured element is a box with input '\n 'dimensions.',\n required=False)\n\n filtering = parser.add_argument_group(\"FILTERING METHODS\")\n filtering.add_argument(\n \"-smooth\",\n metavar='',\n help='Gaussian smoothing filter with specified standard deviations in mm for each axis (Example: 2,2,1) or '\n 'single value for all axis (Example: 2).',\n required = False)\n filtering.add_argument(\n '-laplacian',\n nargs=\"+\",\n metavar='',\n help='Laplacian filtering with specified standard deviations in mm for all axes (Example: 2).',\n required = False)\n filtering.add_argument(\n '-denoise',\n help='R|Non-local means adaptative denoising from P. Coupe et al. as implemented in dipy. Separate with \". Example: p=1,b=3\\n'\n ' p: (patch radius) similar patches in the non-local means are searched for locally, inside a cube of side 2*p+1 centered at each voxel of interest. Default: p=1\\n'\n ' b: (block radius) the size of the block to be used (2*b+1) in the blockwise non-local means implementation. Default: b=5 '\n ' Note, block radius must be smaller than the smaller image dimension: default value is lowered for small images)\\n'\n 'To use default parameters, write -denoise 1',\n required=False)\n\n similarity = parser.add_argument_group(\"SIMILARITY METRIC\")\n similarity.add_argument(\n '-mi',\n metavar=Metavar.file,\n help='Compute the mutual information (MI) between both input files (-i and -mi) as in: '\n 'http://scikit-learn.org/stable/modules/generated/sklearn.metrics.mutual_info_score.html',\n required=False)\n similarity.add_argument(\n '-minorm',\n metavar=Metavar.file,\n help='Compute the normalized mutual information (MI) between both input files (-i and -mi) as in: '\n 'http://scikit-learn.org/stable/modules/generated/sklearn.metrics.normalized_mutual_info_score.html',\n required=False)\n similarity.add_argument(\n '-corr',\n metavar=Metavar.file,\n help='Compute the cross correlation (CC) between both input files (-i and -cc).',\n required=False)\n\n misc = parser.add_argument_group(\"MISC\")\n misc.add_argument(\n '-symmetrize',\n type=int,\n help='Symmetrize data along the specified dimension.',\n required=False,\n choices=(0, 1, 2))\n misc.add_argument(\n '-type',\n required=False,\n help='Output type.',\n choices=('uint8', 'int16', 'int32', 'float32', 'complex64', 'float64', 'int8', 'uint16', 'uint32', 'int64',\n 'uint64'))\n misc.add_argument(\n \"-v\",\n type=int,\n help=\"Verbose. 0: nothing. 1: basic. 2: extended.\",\n required=False,\n default=1,\n choices=(0, 1, 2))\n\n return parser\n\n\n# MAIN\n# ==========================================================================================\ndef main(args=None):\n \"\"\"\n Main function\n :param args:\n :return:\n \"\"\"\n dim_list = ['x', 'y', 'z', 't']\n\n # Get parser args\n if args is None:\n args = None if sys.argv[1:] else ['--help']\n parser = get_parser()\n arguments = parser.parse_args(args=args)\n fname_in = arguments.i\n fname_out = arguments.o\n verbose = arguments.v\n sct.init_sct(log_level=verbose, update=True) # Update log level\n if '-type' in arguments:\n output_type = arguments.type\n else:\n output_type = None\n\n # Open file(s)\n im = Image(fname_in)\n data = im.data # 3d or 4d numpy array\n dim = im.dim\n\n # run command\n if arguments.otsu is not None:\n param = arguments.otsu\n data_out = otsu(data, param)\n\n elif arguments.otsu_adap is not None:\n param = convert_list_str(arguments.otsu_adap, \"int\")\n data_out = otsu_adap(data, param[0], param[1])\n\n elif arguments.otsu_median is not None:\n param = convert_list_str(arguments.otsu_median, \"int\")\n data_out = otsu_median(data, param[0], param[1])\n\n elif arguments.thr is not None:\n param = arguments.thr\n data_out = threshold(data, param)\n\n elif arguments.percent is not None:\n param = arguments.percent\n data_out = perc(data, param)\n\n elif arguments.bin is not None:\n bin_thr = arguments.bin\n data_out = binarise(data, bin_thr=bin_thr)\n\n elif arguments.add is not None:\n from numpy import sum\n data2 = get_data_or_scalar(arguments.add, data)\n data_concat = concatenate_along_4th_dimension(data, data2)\n data_out = sum(data_concat, axis=3)\n\n elif arguments.sub is not None:\n data2 = get_data_or_scalar(arguments.sub, data)\n data_out = data - data2\n\n elif arguments.laplacian is not None:\n sigmas = convert_list_str(arguments.laplacian, \"float\")\n if len(sigmas) == 1:\n sigmas = [sigmas for i in range(len(data.shape))]\n elif len(sigmas) != len(data.shape):\n printv(parser.usage.generate(error='ERROR: -laplacian need the same number of inputs as the number of image dimension OR only one input'))\n # adjust sigma based on voxel size\n sigmas = [sigmas[i] / dim[i + 4] for i in range(3)]\n # smooth data\n data_out = laplacian(data, sigmas)\n\n elif arguments.mul is not None:\n from numpy import prod\n data2 = get_data_or_scalar(arguments.mul, data)\n data_concat = concatenate_along_4th_dimension(data, data2)\n data_out = prod(data_concat, axis=3)\n\n elif arguments.div is not None:\n from numpy import divide\n data2 = get_data_or_scalar(arguments.div, data)\n data_out = divide(data, data2)\n\n elif arguments.mean is not None:\n from numpy import mean\n dim = dim_list.index(arguments.mean)\n if dim + 1 > len(np.shape(data)): # in case input volume is 3d and dim=t\n data = data[..., np.newaxis]\n data_out = mean(data, dim)\n\n elif arguments.rms is not None:\n from numpy import mean, sqrt, square\n dim = dim_list.index(arguments.rms)\n if dim + 1 > len(np.shape(data)): # in case input volume is 3d and dim=t\n data = data[..., np.newaxis]\n data_out = sqrt(mean(square(data.astype(float)), dim))\n\n elif arguments.std is not None:\n from numpy import std\n dim = dim_list.index(arguments.std)\n if dim + 1 > len(np.shape(data)): # in case input volume is 3d and dim=t\n data = data[..., np.newaxis]\n data_out = std(data, dim, ddof=1)\n\n elif arguments.smooth is not None:\n sigmas = convert_list_str(arguments.smooth, \"float\")\n if len(sigmas) == 1:\n sigmas = [sigmas[0] for i in range(len(data.shape))]\n elif len(sigmas) != len(data.shape):\n printv(parser.usage.generate(error='ERROR: -smooth need the same number of inputs as the number of image dimension OR only one input'))\n # adjust sigma based on voxel size\n sigmas = [sigmas[i] / dim[i + 4] for i in range(3)]\n # smooth data\n data_out = smooth(data, sigmas)\n\n elif arguments.dilate is not None:\n data_out = dilate(data, convert_list_str(arguments.dilate, \"int\"))\n\n elif arguments.erode is not None:\n data_out = erode(data, convert_list_str(arguments.erode))\n\n elif arguments.denoise is not None:\n # parse denoising arguments\n p, b = 1, 5 # default arguments\n list_denoise = (arguments.denoise).split(\",\")\n for i in list_denoise:\n if 'p' in i:\n p = int(i.split('=')[1])\n if 'b' in i:\n b = int(i.split('=')[1])\n data_out = denoise_nlmeans(data, patch_radius=p, block_radius=b)\n\n elif arguments.symmetrize is not None:\n data_out = (data + data[list(range(data.shape[0] - 1, -1, -1)), :, :]) / float(2)\n\n elif arguments.mi is not None:\n # input 1 = from flag -i --> im\n # input 2 = from flag -mi\n im_2 = Image(arguments.mi)\n compute_similarity(im.data, im_2.data, fname_out, metric='mi', verbose=verbose)\n data_out = None\n\n elif arguments.minorm is not None:\n im_2 = Image(arguments.minorm)\n compute_similarity(im.data, im_2.data, fname_out, metric='minorm', verbose=verbose)\n data_out = None\n\n elif arguments.corr is not None:\n # input 1 = from flag -i --> im\n # input 2 = from flag -mi\n im_2 = Image(arguments.corr)\n compute_similarity(im.data, im_2.data, fname_out, metric='corr', verbose=verbose)\n data_out = None\n\n # if no flag is set\n else:\n data_out = None\n printv(parser.usage.generate(error='ERROR: you need to specify an operation to do on the input image'))\n\n if data_out is not None:\n # Write output\n nii_out = Image(fname_in) # use header of input file\n nii_out.data = data_out\n nii_out.save(fname_out, dtype=output_type)\n # TODO: case of multiple outputs\n # assert len(data_out) == n_out\n # if n_in == n_out:\n # for im_in, d_out, fn_out in zip(nii, data_out, fname_out):\n # im_in.data = d_out\n # im_in.absolutepath = fn_out\n # if \"-w\" in arguments:\n # im_in.hdr.set_intent('vector', (), '')\n # im_in.save()\n # elif n_out == 1:\n # nii[0].data = data_out[0]\n # nii[0].absolutepath = fname_out[0]\n # if \"-w\" in arguments:\n # nii[0].hdr.set_intent('vector', (), '')\n # nii[0].save()\n # elif n_out > n_in:\n # for dat_out, name_out in zip(data_out, fname_out):\n # im_out = nii[0].copy()\n # im_out.data = dat_out\n # im_out.absolutepath = name_out\n # if \"-w\" in arguments:\n # im_out.hdr.set_intent('vector', (), '')\n # im_out.save()\n # else:\n # printv(parser.usage.generate(error='ERROR: not the correct numbers of inputs and outputs'))\n\n # display message\n if data_out is not None:\n sct.display_viewer_syntax([fname_out], verbose=verbose)\n else:\n printv('\\nDone! File created: ' + fname_out, verbose, 'info')\n\n\ndef convert_list_str(string_list, type):\n \"\"\"\n Receive a string and then converts it into a list of selected type\n \"\"\"\n new_type_list = (string_list).split(\",\")\n for inew_type_list, ele in enumerate(new_type_list):\n if type is \"int\":\n new_type_list[inew_type_list] = int(ele)\n elif type is \"float\":\n new_type_list[inew_type_list] = float(ele)\n\n return new_type_list\n\n\ndef otsu(data, nbins):\n from skimage.filters import threshold_otsu\n thresh = threshold_otsu(data, nbins)\n return data > thresh\n\n\ndef otsu_adap(data, block_size, offset):\n from skimage.filters import threshold_adaptive\n\n mask = data\n for iz in range(data.shape[2]):\n mask[:, :, iz] = threshold_adaptive(data[:, :, iz], block_size, offset)\n # mask[:, :, iz] = threshold_otsu(data[:, :, iz], 5)\n return mask\n\n\ndef otsu_median(data, size, n_iter):\n from dipy.segment.mask import median_otsu\n data, mask = median_otsu(data, size, n_iter)\n return mask\n\n\ndef threshold(data, thr_value):\n data[data < thr_value] = 0\n return data\n\n\ndef perc(data, perc_value):\n from numpy import percentile\n perc = percentile(data, perc_value)\n return data > perc\n\n\ndef binarise(data, bin_thr=0):\n return data > bin_thr\n\n\ndef dilate(data, radius):\n \"\"\"\n Dilate data using ball structuring element\n :param data: 2d or 3d array\n :param radius: radius of structuring element OR comma-separated int.\n :return: data dilated\n \"\"\"\n from skimage.morphology import dilation, ball\n if len(radius) == 1:\n # define structured element as a ball\n selem = ball(radius[0])\n else:\n # define structured element as a box with input dimensions\n selem = np.ones((radius[0], radius[1], radius[2]), dtype=np.dtype)\n return dilation(data, selem=selem, out=None)\n\n\ndef erode(data, radius):\n \"\"\"\n Erode data using ball structuring element\n :param data: 2d or 3d array\n :param radius: radius of structuring element\n :return: data eroded\n \"\"\"\n from skimage.morphology import erosion, ball\n if len(radius) == 1:\n # define structured element as a ball\n selem = ball(radius[0])\n else:\n # define structured element as a box with input dimensions\n selem = np.ones((radius[0], radius[1], radius[2]), dtype=np.dtype)\n return erosion(data, selem=selem, out=None)\n\n\ndef get_data(list_fname):\n \"\"\"\n Get data from list of file names\n :param list_fname:\n :return: 3D or 4D numpy array.\n \"\"\"\n try:\n nii = [Image(f_in) for f_in in list_fname]\n except Exception as e:\n sct.printv(str(e), 1, 'error') # file does not exist, exit program\n data0 = nii[0].data\n data = nii[0].data\n # check that every images have same shape\n for i in range(1, len(nii)):\n if not np.shape(nii[i].data) == np.shape(data0):\n printv('\\nWARNING: shape(' + list_fname[i] + ')=' + str(np.shape(nii[i].data)) + ' incompatible with shape(' + list_fname[0] + ')=' + str(np.shape(data0)), 1, 'warning')\n printv('\\nERROR: All input images must have same dimensions.', 1, 'error')\n else:\n data = concatenate_along_4th_dimension(data, nii[i].data)\n return data\n\n\ndef get_data_or_scalar(argument, data_in):\n \"\"\"\n Get data from list of file names (scenario 1) or scalar (scenario 2)\n :param argument: list of file names of scalar\n :param data_in: if argument is scalar, use data to get np.shape\n :return: 3d or 4d numpy array\n \"\"\"\n # try to convert argument in float\n try:\n # build data2 with same shape as data\n data_out = data_in[:, :, :] * 0 + float(argument[0])\n # if conversion fails, it should be a string (i.e. file name)\n except ValueError:\n data_out = get_data(argument)\n return data_out\n\n\ndef concatenate_along_4th_dimension(data1, data2):\n \"\"\"\n Concatenate two data along 4th dimension.\n :param data1: 3d or 4d array\n :param data2: 3d or 4d array\n :return data_concat: concate(data1, data2)\n \"\"\"\n if len(np.shape(data1)) == 3:\n data1 = data1[..., np.newaxis]\n if len(np.shape(data2)) == 3:\n data2 = data2[..., np.newaxis]\n return np.concatenate((data1, data2), axis=3)\n\n\ndef denoise_nlmeans(data_in, patch_radius=1, block_radius=5):\n \"\"\"\n data_in: nd_array to denoise\n for more info about patch_radius and block radius, please refer to the dipy website: http://nipy.org/dipy/reference/dipy.denoise.html#dipy.denoise.nlmeans.nlmeans\n \"\"\"\n from dipy.denoise.nlmeans import nlmeans\n from dipy.denoise.noise_estimate import estimate_sigma\n from numpy import asarray\n data_in = asarray(data_in)\n\n block_radius_max = min(data_in.shape) - 1\n block_radius = block_radius_max if block_radius > block_radius_max else block_radius\n\n sigma = estimate_sigma(data_in)\n denoised = nlmeans(data_in, sigma, patch_radius=patch_radius, block_radius=block_radius)\n\n return denoised\n\n\ndef smooth(data, sigmas):\n \"\"\"\n Smooth data by convolving Gaussian kernel\n :param data: input 3D numpy array\n :param sigmas: Kernel SD in voxel\n :return:\n \"\"\"\n assert len(data.shape) == len(sigmas)\n from scipy.ndimage.filters import gaussian_filter\n return gaussian_filter(data.astype(float), sigmas, order=0, truncate=4.0)\n\n\ndef laplacian(data, sigmas):\n \"\"\"\n Apply Laplacian filter\n \"\"\"\n assert len(data.shape) == len(sigmas)\n from scipy.ndimage.filters import gaussian_laplace\n return gaussian_laplace(data.astype(float), sigmas)\n # from scipy.ndimage.filters import laplace\n # return laplace(data.astype(float))\n\n\ndef compute_similarity(data1, data2, fname_out='', metric='', verbose=1):\n '''\n Compute a similarity metric between two images data\n :param data1: numpy.array 3D data\n :param data2: numpy.array 3D data\n :param fname_out: file name of the output file. Output file should be either a text file ('.txt') or a pickle file ('.pkl', '.pklz' or '.pickle')\n :param metric: 'mi' for mutual information or 'corr' for pearson correlation coefficient\n :return: None\n '''\n assert data1.size == data2.size, \"\\n\\nERROR: the data don't have the same size.\\nPlease use \\\"sct_register_multimodal -i im1.nii.gz -d im2.nii.gz -identity 1\\\" to put the input images in the same space\"\n data1_1d = data1.ravel()\n data2_1d = data2.ravel()\n # get indices of non-null voxels from the intersection of both data\n data_mult = data1_1d * data2_1d\n ind_nonnull = np.where(data_mult > ALMOST_ZERO)[0]\n # set new variables with non-null voxels\n data1_1d = data1_1d[ind_nonnull]\n data2_1d = data2_1d[ind_nonnull]\n # compute similarity metric\n if metric == 'mi':\n res = mutual_information(data1_1d, data2_1d, normalized=False)\n metric_full = 'Mutual information'\n if metric == 'minorm':\n res = mutual_information(data1_1d, data2_1d, normalized=True)\n metric_full = 'Normalized Mutual information'\n if metric == 'corr':\n res = correlation(data1_1d, data2_1d)\n metric_full = 'Pearson correlation coefficient'\n # qc output\n if verbose > 1:\n import matplotlib\n matplotlib.use('Agg')\n import matplotlib.pyplot as plt\n plt.plot(data1_1d, 'b')\n plt.plot(data2_1d, 'r')\n plt.grid\n plt.title('Similarity: ' + metric_full + ' = ' + str(res))\n plt.savefig('fig_similarity.png')\n\n printv('\\n' + metric_full + ': ' + str(res), verbose, 'info')\n\n path_out, filename_out, ext_out = extract_fname(fname_out)\n if ext_out not in ['.txt', '.pkl', '.pklz', '.pickle']:\n printv('ERROR: the output file should a text file or a pickle file. Received extension: ' + ext_out, 1, 'error')\n\n elif ext_out == '.txt':\n file_out = open(fname_out, 'w')\n file_out.write(metric_full + ': \\n' + str(res))\n file_out.close()\n\n else:\n import pickle, gzip\n if ext_out == '.pklz':\n pickle.dump(res, gzip.open(fname_out, 'wb'), protocol=2)\n else:\n pickle.dump(res, open(fname_out, 'w'), protocol=2)\n\n\ndef mutual_information(x, y, nbins=32, normalized=False):\n \"\"\"\n Compute mutual information\n :param x: 1D numpy.array : flatten data from an image\n :param y: 1D numpy.array : flatten data from an image\n :param nbins: number of bins to compute the contingency matrix (only used if normalized=False)\n :return: float non negative value : mutual information\n \"\"\"\n import sklearn.metrics\n if normalized:\n mi = sklearn.metrics.normalized_mutual_info_score(x, y)\n else:\n c_xy = np.histogram2d(x, y, nbins)[0]\n mi = sklearn.metrics.mutual_info_score(None, None, contingency=c_xy)\n # mi = adjusted_mutual_info_score(None, None, contingency=c_xy)\n return mi\n\n\ndef correlation(x, y, type='pearson'):\n \"\"\"\n Compute pearson or spearman correlation coeff\n Pearson's R is parametric whereas Spearman's R is non parametric (less sensitive)\n :param x: 1D numpy.array : flatten data from an image\n :param y: 1D numpy.array : flatten data from an image\n :param type: str: 'pearson' or 'spearman': type of R correlation coeff to compute\n :return: float value : correlation coefficient (between -1 and 1)\n \"\"\"\n from scipy.stats import pearsonr, spearmanr\n\n if type == 'pearson':\n corr = pearsonr(x, y)[0]\n if type == 'spearman':\n corr = spearmanr(x, y)[0]\n\n return corr\n\n\n# def check_shape(data):\n# \"\"\"\n# Make sure all elements of the list (given by first axis) have same shape. If data is 4d, convert to list and switch first and last axis.\n# :param data_list:\n# :return: data_list_out\n# \"\"\"\n# from numpy import shape\n# # check that element of the list have same shape\n# for i in range(1, shape(data)[0]):\n# if not shape(data[0]) == shape(data[i]):\n# printv('ERROR: all input images must have same dimensions.', 1, 'error')\n# # if data are 4d (hence giving 5d list), rearrange to list of 3d data\n# if len(shape(data)) == 5:\n# from numpy import squeeze\n# from scipy import swapaxes\n# data = squeeze(swapaxes(data, 0, 4)).tolist()\n# return data\n\n # # random_walker\n # from skimage.segmentation import random_walker\n # import numpy as np\n # markers = np.zeros(data.shape, dtype=np.uint)\n # perc = np.percentile(data, 95)\n # markers[data < perc] = 1\n # markers[data > perc] = 2\n # mask = random_walker(data, markers, beta=10, mode='bf')\n\n # # spectral clustering\n # from sklearn.feature_extraction import image\n # from sklearn.cluster import spectral_clustering\n # import numpy as np\n # data2d = data[:, :, 8]\n # graph = image.img_to_graph(data2d)\n # graph.data = np.exp(-graph.data / graph.data.std())\n # mask = spectral_clustering(graph, n_clusters=2, eigen_solver='arpack')\n # # label_im = -np.ones(data.shape)\n # # label_im[mask] = labels\n\n # Hough transform for ellipse\n # from skimage import data, color\n # from skimage.feature import canny, morphology\n # from skimage.transform import hough_ellipse\n # # detect edges\n # data2d = data3d[:, :, 8]\n # edges = canny(data2d, sigma=3.0)\n # Perform a Hough Transform\n # The accuracy corresponds to the bin size of a major axis.\n # The value is chosen in order to get a single high accumulator.\n # The threshold eliminates low accumulators\n # result = hough_ellipse(edges, accuracy=20, threshold=250, min_size=100, max_size=120)\n # result = hough_ellipse(edges, accuracy=20, min_size=5, max_size=20)\n # result.sort(order='accumulator')\n # # Estimated parameters for the ellipse\n # best = list(result[-1])\n # yc, xc, a, b = [int(round(x)) for x in best[1:5]]\n # orientation = best[5]\n # # Draw the ellipse on the original image\n # from matplotlib.pylab import *\n # from skimage.draw import ellipse_perimeter\n # cy, cx = ellipse_perimeter(yc, xc, a, b, orientation)\n # # image_rgb[cy, cx] = (0, 0, 255)\n # # Draw the edge (white) and the resulting ellipse (red)\n # # edges = color.gray2rgb(edges)\n # data2d[cy, cx] = 1000\n\n # # detect edges\n # from skimage.feature import canny\n # from skimage import morphology, measure\n # data2d = data3d[:, :, 8]\n # edges = canny(data2d, sigma=3.0)\n # contours = measure.find_contours(edges, 1, fully_connected='low')\n\n # mask = morphology.closing(edges, morphology.square(3), out=None)\n\n # k-means clustering\n # from sklearn.cluster import KMeans\n\n\nif __name__ == \"__main__\":\n sct.init_sct()\n main()\n",
"id": "4973887",
"language": "Python",
"matching_score": 1.4042093753814697,
"max_stars_count": 0,
"path": "scripts/sct_maths.py"
},
{
"content": "#!/usr/bin/env python\n# -*- coding: utf-8\n# pytest unit tests for spinalcordtoolbox.resampling\n\n# TODO: add test for 2d image\n\nfrom __future__ import absolute_import\n\nimport sys, os\nimport pytest\n\nimport numpy as np\nimport nibabel as nib\n\nfrom spinalcordtoolbox.utils import __sct_dir__\nsys.path.append(os.path.join(__sct_dir__, 'scripts'))\nfrom spinalcordtoolbox import resampling\n\n\n@pytest.fixture(scope=\"session\")\ndef fake_3dimage_nib():\n \"\"\"\n :return: an empty 3-d nibabel Image\n \"\"\"\n nx, ny, nz = 9, 9, 9 # image dimension\n data = np.zeros((nx, ny, nz), dtype=np.int8)\n data[4, 4, 4] = 1.\n affine = np.eye(4)\n # Create nibabel object\n nii = nib.nifti1.Nifti1Image(data, affine)\n return nii\n\n\n@pytest.fixture(scope=\"session\")\ndef fake_3dimage_nib_big():\n \"\"\"\n :return: an empty 3-d nibabel Image\n \"\"\"\n nx, ny, nz = 29, 39, 19 # image dimension\n data = np.zeros((nx, ny, nz), dtype=np.int8)\n data[14, 19, 9] = 1.\n affine = np.eye(4)\n # Create nibabel object\n nii = nib.nifti1.Nifti1Image(data, affine)\n return nii\n\n\n@pytest.fixture(scope=\"session\")\ndef fake_4dimage_nib():\n \"\"\"\n :return: an empty 4-d nibabel Image\n \"\"\"\n nx, ny, nz, nt = 9, 9, 9, 3 # image dimension\n data = np.zeros((nx, ny, nz, nt), dtype=np.int8)\n data[4, 4, 4, 0] = 1.\n affine = np.eye(4)\n # Create nibabel object\n nii = nib.nifti1.Nifti1Image(data, affine)\n return nii\n\n\n# noinspection 801,PyShadowingNames\ndef test_nib_resample_image_3d(fake_3dimage_nib):\n \"\"\"Test resampling with 3D nibabel image\"\"\"\n img_r = resampling.resample_nib(fake_3dimage_nib, new_size=[2, 2, 1], new_size_type='factor', interpolation='nn')\n assert img_r.get_data().shape == (18, 18, 9)\n assert img_r.get_data()[8, 8, 4] == 1.0 # make sure there is no displacement in world coordinate system\n assert img_r.header.get_zooms() == (0.5, 0.5, 1.0)\n # debug\n # nib.save(img_r, 'test_4.nii.gz')\n\n\n# noinspection 801,PyShadowingNames\ndef test_nib_resample_image_3d_to_dest(fake_3dimage_nib, fake_3dimage_nib_big):\n \"\"\"Test resampling with 3D nibabel image\"\"\"\n img_r = resampling.resample_nib(fake_3dimage_nib, img_dest=fake_3dimage_nib_big, interpolation='linear')\n assert img_r.get_data().shape == (29, 39, 19)\n assert img_r.get_data()[4, 4, 4] == 1.0\n\n\n# noinspection 801,PyShadowingNames\ndef test_nib_resample_image_4d(fake_4dimage_nib):\n \"\"\"Test resampling with 4D nibabel image\"\"\"\n img_r = resampling.resample_nib(fake_4dimage_nib, new_size=[2, 2, 1, 1], new_size_type='factor', interpolation='nn')\n assert img_r.get_data().shape == (18, 18, 9, 3)\n assert img_r.get_data()[8, 8, 4, 0] == 1.0 # make sure there is no displacement in world coordinate system\n assert img_r.get_data()[8, 8, 4, 1] == 0.0\n assert img_r.header.get_zooms() == (0.5, 0.5, 1.0, 1.0)\n",
"id": "11330634",
"language": "Python",
"matching_score": 0.9470016360282898,
"max_stars_count": 0,
"path": "unit_testing/test_resampling.py"
},
{
"content": "#!/usr/bin/env python\n#########################################################################################\n#\n# Test function for sct_apply_transfo\n#\n# ---------------------------------------------------------------------------------------\n# Copyright (c) 2017 Polytechnique Montreal <www.neuro.polymtl.ca>\n# Author: <NAME>\n#\n# About the license: see the file LICENSE.TXT\n#########################################################################################\n\n# TODO: generate warping field for dmri that makes sense (dmri --> T2).\n\nfrom __future__ import absolute_import\n\nfrom spinalcordtoolbox.image import Image\n\n\ndef init(param_test):\n \"\"\"\n Initialize class: param_test\n \"\"\"\n # initialization\n default_args = [\n '-i template/template/PAM50_small_t2.nii.gz -d t2/t2.nii.gz -w t2/warp_template2anat.nii.gz',\n '-i template/template/PAM50_small_t2.nii.gz -d t2/t2.nii.gz -w t2/warp_template2anat.nii.gz t2/warp_template2anat.nii.gz',\n '-i dmri/dmri.nii.gz -d t2/t2.nii.gz -w t2/warp_template2anat.nii.gz']\n param_test.input = 'template/template/PAM50_small_t2.nii.gz'\n param_test.ref = 't2/t2.nii.gz'\n param_test.out = 'PAM50_small_t2_reg.nii.gz'\n\n # assign default params\n if not param_test.args:\n param_test.args = default_args\n\n return param_test\n\n\ndef test_integrity(param_test):\n \"\"\"\n Test integrity of function\n \"\"\"\n img_src = Image(param_test.input)\n img_ref = Image(param_test.ref)\n img_output = Image(param_test.out)\n\n if img_output.orientation != img_ref.orientation:\n param_test.output += \"\\nImage has wrong orientation (%s -> %s)\" % (img_ref.orientation, img_output.orientation)\n param_test.status = 99\n\n if len(img_src.data.shape) > 3:\n # Allowed failure for now\n return param_test\n\n if not (img_output.data != 0).any():\n param_test.output += \"\\nImage is garbage (all zeros)\"\n param_test.status = 99\n\n return param_test\n",
"id": "2787759",
"language": "Python",
"matching_score": 1.8242805004119873,
"max_stars_count": 0,
"path": "testing/test_sct_apply_transfo.py"
},
{
"content": "#!/usr/bin/env python\n#\n# Test major functions.\n#\n# In The following fields should be defined under the init() function of each test script:\n# param_test.list_fname_gt list containing the relative file name for ground truth data. See test_sct_propseg\n#\n# Authors: <NAME>, <NAME>, <NAME>\n\n# TODO: list functions to test in help (do a search in testing folder)\n\nfrom __future__ import print_function, absolute_import\n\nimport sys, os, time, copy, shlex, importlib, multiprocessing, tempfile, shutil\nimport traceback\nimport signal\n\nimport numpy as np\nfrom pandas import DataFrame\n\nimport sct_utils as sct\n\nsys.path.append(os.path.join(sct.__sct_dir__, 'testing'))\n\n\ndef fs_signature(root):\n ret = dict()\n root = os.path.abspath(root)\n for cwd, dirs, files in os.walk(root):\n if cwd == os.path.abspath(tempfile.gettempdir()):\n continue\n if cwd == os.path.join(root, \"testing-qc\"):\n files[:] = []\n dirs[:] = []\n continue\n dirs.sort()\n files.sort()\n for file in files:\n if cwd == root:\n continue\n path = os.path.relpath(os.path.join(cwd, file), root)\n data = os.stat(path)\n ret[path] = data\n return ret\n\n\ndef fs_ok(sig_a, sig_b, exclude=()):\n errors = list()\n for path, data in sig_b.items():\n if path not in sig_a:\n errors.append((path, \"added: {}\".format(path)))\n continue\n if sig_a[path] != data:\n errors.append((path, \"modified: {}\".format(path)))\n errors = [ (x,y) for (x,y) in errors if not x.startswith(exclude) ]\n if errors:\n for error in errors:\n sct.printv(\"Error: %s\", 1, type='error')\n raise RuntimeError()\n\n# Parameters\nclass Param:\n def __init__(self):\n self.download = 0\n self.path_data = 'sct_testing_data' # path to the testing data\n self.path_output = None\n self.function_to_test = None\n self.remove_tmp_file = 0\n self.verbose = 0\n self.args = [] # list of input arguments to the function\n self.args_with_path = '' # input arguments to the function, with path\n # self.list_fname_gt = [] # list of fname for ground truth data\n self.contrast = '' # folder containing the data and corresponding to the contrast. Could be t2, t1, t2s, etc.\n self.output = '' # output string\n self.results = '' # results in Panda DataFrame\n self.redirect_stdout = True # for debugging, set to 0. Otherwise set to 1.\n self.fname_log = None\n\n\n# define nice colors\nclass bcolors:\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n\n\n# PARSER\n# ==========================================================================================\ndef get_parser():\n import argparse\n\n param_default = Param()\n\n parser = argparse.ArgumentParser(\n description=\"Crash and integrity testing for functions of the Spinal Cord Toolbox. Internet connection is required for downloading testing data.\",\n )\n\n parser.add_argument(\"--function\", \"-f\",\n help=\"Test this specific script (eg. 'sct_propseg').\",\n nargs=\"+\",\n )\n\n def arg_jobs(s):\n jobs = int(s)\n if jobs > 0:\n pass\n elif jobs == 0:\n jobs = multiprocessing.cpu_count()\n else:\n raise ValueError()\n return jobs\n\n parser.add_argument(\"--download\", \"-d\",\n choices=(\"0\", \"1\"),\n default=param_default.download,\n )\n parser.add_argument(\"--path\", \"-p\",\n help='Path to testing data. NB: no need to set if using \"-d 1\"',\n default=param_default.path_data,\n )\n parser.add_argument(\"--remove-temps\", \"-r\",\n choices=(\"0\", \"1\"),\n help='Remove temporary files.',\n default=param_default.remove_tmp_file,\n )\n parser.add_argument(\"--jobs\", \"-j\",\n type=arg_jobs,\n help=\"# of simultaneous tests to run (jobs). 0 or unspecified means # of available CPU threads ({})\".format(multiprocessing.cpu_count()),\n default=arg_jobs(0),\n )\n parser.add_argument(\"--verbose\", \"-v\",\n choices=(\"0\", \"1\"),\n default=param_default.verbose,\n )\n parser.add_argument(\"--abort-on-failure\",\n help=\"Instead of iterating through all tests, abort at the first one that would fail.\",\n action=\"store_true\",\n )\n parser.add_argument(\"--continue-from\",\n help=\"Instead of running all tests (or those specified by --function, start from this one\",\n )\n parser.add_argument(\"--check-filesystem\",\n help=\"Check filesystem for unwanted modifications\",\n action=\"store_true\",\n )\n parser.add_argument(\"--execution-folder\",\n help=\"Folder where to run tests from (default. temporary)\",\n )\n\n return parser\n\n\ndef process_function(fname, param):\n \"\"\"\n \"\"\"\n param.function_to_test = fname\n # display script name\n # load modules of function to test\n module_testing = importlib.import_module('test_' + fname)\n # initialize default parameters of function to test\n param.args = []\n # param.list_fname_gt = []\n # param.fname_groundtruth = ''\n param = module_testing.init(param)\n # loop over parameters to test\n list_status_function = []\n list_output = []\n for i in range(0, len(param.args)):\n param_test = copy.deepcopy(param)\n param_test.default_args = param.args\n param_test.args = param.args[i]\n param_test.test_integrity = True\n # if list_fname_gt is not empty, assign it\n # if param_test.list_fname_gt:\n # param_test.fname_gt = param_test.list_fname_gt[i]\n # test function\n try:\n param_test = test_function(param_test)\n except sct.RunError as e:\n list_status_function.append(1)\n list_output.append(\"Got SCT exception:\")\n list_output.append(e.args[0])\n except Exception as e:\n list_status_function.append(1)\n list_output.append(\"Got exception: %s\" % e)\n list_output += traceback.format_exc().splitlines()\n else:\n list_status_function.append(param_test.status)\n list_output.append(param_test.output)\n\n return list_output, list_status_function\n\n\ndef process_function_multiproc(fname, param):\n \"\"\" Wrapper that makes ^C work in multiprocessing code \"\"\"\n # Ignore SIGINT, parent will take care of the clean-up\n signal.signal(signal.SIGINT, signal.SIG_IGN)\n return process_function(fname, param)\n\n\n# Main\n# ==========================================================================================\ndef main(args=None):\n\n # initializations\n param = Param()\n\n # check user arguments\n if args is None:\n args = sys.argv[1:]\n\n # get parser info\n parser = get_parser()\n\n arguments = parser.parse_args(args)\n\n param.download = int(arguments.download)\n param.path_data = arguments.path\n functions_to_test = arguments.function\n param.remove_tmp_file = int(arguments.remove_temps)\n jobs = arguments.jobs\n\n param.verbose = arguments.verbose\n sct.init_sct(log_level=param.verbose, update=True) # Update log level\n\n start_time = time.time()\n\n # get absolute path and add slash at the end\n param.path_data = os.path.abspath(param.path_data)\n\n # check existence of testing data folder\n if not os.path.isdir(param.path_data) or param.download:\n downloaddata(param)\n\n # display path to data\n sct.printv('\\nPath to testing data: ' + param.path_data, param.verbose)\n\n # create temp folder that will have all results\n path_tmp = os.path.abspath(arguments.execution_folder or sct.tmp_create(verbose=param.verbose))\n\n # go in path data (where all scripts will be run)\n curdir = os.getcwd()\n os.chdir(param.path_data)\n\n functions_parallel = list()\n functions_serial = list()\n if functions_to_test:\n for f in functions_to_test:\n if f in get_functions_parallelizable():\n functions_parallel.append(f)\n elif f in get_functions_nonparallelizable():\n functions_serial.append(f)\n else:\n sct.printv('Command-line usage error: Function \"%s\" is not part of the list of testing functions' % f, type='error')\n jobs = min(jobs, len(functions_parallel))\n else:\n functions_parallel = get_functions_parallelizable()\n functions_serial = get_functions_nonparallelizable()\n\n if arguments.continue_from:\n first_func = arguments.continue_from\n if first_func in functions_parallel:\n functions_serial = []\n functions_parallel = functions_parallel[functions_parallel.index(first_func):]\n elif first_func in functions_serial:\n functions_serial = functions_serial[functions_serial.index(first_func):]\n\n if arguments.check_filesystem and jobs != 1:\n print(\"Check filesystem used -> jobs forced to 1\")\n jobs = 1\n\n print(\"Will run through the following tests:\")\n if functions_serial:\n print(\"- sequentially: {}\".format(\" \".join(functions_serial)))\n if functions_parallel:\n print(\"- in parallel with {} jobs: {}\".format(jobs, \" \".join(functions_parallel)))\n\n list_status = []\n for name, functions in (\n (\"serial\", functions_serial),\n (\"parallel\", functions_parallel),\n ):\n if not functions:\n continue\n\n if any([s for (f, s) in list_status]) and arguments.abort_on_failure:\n break\n\n try:\n if functions == functions_parallel and jobs != 1:\n pool = multiprocessing.Pool(processes=jobs)\n\n results = list()\n # loop across functions and run tests\n for f in functions:\n func_param = copy.deepcopy(param)\n func_param.path_output = f\n res = pool.apply_async(process_function_multiproc, (f, func_param,))\n results.append(res)\n else:\n pool = None\n\n for idx_function, f in enumerate(functions):\n print_line('Checking ' + f)\n if functions == functions_serial or jobs == 1:\n if arguments.check_filesystem:\n if os.path.exists(os.path.join(path_tmp, f)):\n shutil.rmtree(os.path.join(path_tmp, f))\n sig_0 = fs_signature(path_tmp)\n\n func_param = copy.deepcopy(param)\n func_param.path_output = f\n\n res = process_function(f, func_param)\n\n if arguments.check_filesystem:\n sig_1 = fs_signature(path_tmp)\n fs_ok(sig_0, sig_1, exclude=(f,))\n else:\n res = results[idx_function].get()\n\n list_output, list_status_function = res\n # manage status\n if any(list_status_function):\n if 1 in list_status_function:\n print_fail()\n status = (f, 1)\n else:\n print_warning()\n status = (f, 99)\n for output in list_output:\n for line in output.splitlines():\n print(\" %s\" % line)\n else:\n print_ok()\n if param.verbose:\n for output in list_output:\n for line in output.splitlines():\n print(\" %s\" % line)\n status = (f, 0)\n # append status function to global list of status\n list_status.append(status)\n if any([s for (f, s) in list_status]) and arguments.abort_on_failure:\n break\n except KeyboardInterrupt:\n raise\n finally:\n if pool:\n pool.terminate()\n pool.join()\n\n print('status: ' + str([s for (f, s) in list_status]))\n if any([s for (f, s) in list_status]):\n print(\"Failures: {}\".format(\" \".join([f for (f, s) in list_status if s])))\n\n # display elapsed time\n elapsed_time = time.time() - start_time\n sct.printv('Finished! Elapsed time: ' + str(int(np.round(elapsed_time))) + 's\\n')\n\n # come back\n os.chdir(curdir)\n\n # remove temp files\n if param.remove_tmp_file and arguments.execution_folder is None:\n sct.printv('\\nRemove temporary files...', 0)\n sct.rmtree(path_tmp)\n\n e = 0\n if any([s for (f, s) in list_status]):\n e = 1\n # print(e)\n\n sys.exit(e)\n\n\ndef downloaddata(param):\n \"\"\"\n Download testing data from internet.\n Parameters\n ----------\n param\n\n Returns\n -------\n None\n \"\"\"\n sct.printv('\\nDownloading testing data...', param.verbose)\n import sct_download_data\n sct_download_data.main(['-d', 'sct_testing_data'])\n\n\ndef get_functions_nonparallelizable():\n return [\n 'sct_deepseg_gm',\n 'sct_deepseg_lesion',\n 'sct_deepseg_sc',\n ]\n\ndef get_functions_parallelizable():\n return [\n 'sct_analyze_lesion',\n 'sct_analyze_texture',\n 'sct_apply_transfo',\n 'sct_convert',\n 'sct_compute_ernst_angle',\n 'sct_compute_hausdorff_distance',\n 'sct_compute_mtr',\n 'sct_compute_mscc',\n 'sct_compute_snr',\n 'sct_concat_transfo',\n # 'sct_convert_binary_to_trilinear', # not useful\n 'sct_create_mask',\n 'sct_crop_image',\n 'sct_dice_coefficient',\n 'sct_detect_pmj',\n 'sct_dmri_compute_dti',\n 'sct_dmri_concat_bvals',\n 'sct_dmri_concat_bvecs',\n 'sct_dmri_create_noisemask',\n 'sct_dmri_compute_bvalue',\n 'sct_dmri_moco',\n 'sct_dmri_separate_b0_and_dwi',\n 'sct_dmri_transpose_bvecs',\n 'sct_extract_metric',\n 'sct_flatten_sagittal',\n 'sct_fmri_compute_tsnr',\n 'sct_fmri_moco',\n 'sct_get_centerline',\n 'sct_image',\n 'sct_label_utils',\n 'sct_label_vertebrae',\n 'sct_maths',\n 'sct_merge_images',\n # 'sct_pipeline', # not useful-- to remove at some point\n 'sct_process_segmentation',\n 'sct_propseg',\n 'sct_qc',\n 'sct_register_multimodal',\n 'sct_register_to_template',\n 'sct_resample',\n 'sct_smooth_spinalcord',\n 'sct_straighten_spinalcord', # deps: sct_apply_transfo\n # 'sct_segment_graymatter',\n 'sct_warp_template',\n ]\n\n\n# print without carriage return\n# ==========================================================================================\ndef print_line(string):\n sys.stdout.write(string + make_dot_lines(string))\n sys.stdout.flush()\n\n\n# fill line with dots\n# ==========================================================================================\ndef make_dot_lines(string):\n if len(string) < 52:\n dot_lines = '.' * (52 - len(string))\n return dot_lines\n else:\n return ''\n\n\n# print in color\n# ==========================================================================================\ndef print_ok():\n sct.printv(\"[\" + bcolors.OKGREEN + \"OK\" + bcolors.ENDC + \"]\")\n\n\ndef print_warning():\n sct.printv(\"[\" + bcolors.WARNING + \"WARNING\" + bcolors.ENDC + \"]\")\n\n\ndef print_fail():\n sct.printv(\"[\" + bcolors.FAIL + \"FAIL\" + bcolors.ENDC + \"]\")\n\n\n# init_testing\n# ==========================================================================================\ndef test_function(param_test):\n \"\"\"\n\n Parameters\n ----------\n file_testing\n\n Returns\n -------\n path_output str: path where to output testing data\n \"\"\"\n\n # load modules of function to test\n module_function_to_test = importlib.import_module(param_test.function_to_test)\n module_testing = importlib.import_module('test_' + param_test.function_to_test)\n\n # retrieve subject name\n subject_folder = os.path.basename(param_test.path_data)\n\n # build path_output variable\n path_testing = os.getcwd()\n\n # if not param_test.path_output:\n # param_test.path_output = sct.tmp_create(basename=(param_test.function_to_test + '_' + subject_folder), verbose=0)\n # elif not os.path.isdir(param_test.path_output):\n # os.makedirs(param_test.path_output)\n\n # # get parser information\n # parser = module_function_to_test.get_parser()\n # if '-ofolder' in parser.options and '-ofolder' not in param_test.args:\n # param_test.args += \" -ofolder \" + param_test.path_output\n #\n # dict_args = parser.parse(shlex.split(param_test.args), check_file_exist=False)\n # # TODO: if file in list does not exist, raise exception and assign status=200\n # # add data path to each input argument\n # dict_args_with_path = parser.add_path_to_file(copy.deepcopy(dict_args), param_test.path_data, input_file=True)\n # # add data path to each output argument\n # dict_args_with_path = parser.add_path_to_file(copy.deepcopy(dict_args_with_path), param_test.path_output, input_file=False, output_file=True)\n # # save into class\n # param_test.dict_args_with_path = dict_args_with_path\n # param_test.args_with_path = parser.dictionary_to_string(dict_args_with_path)\n #\n # initialize panda dataframe\n param_test.results = DataFrame(index=[subject_folder],\n data={'status': 0,\n 'duration': 0,\n 'output': '',\n 'path_data': param_test.path_data,\n 'path_output': param_test.path_output})\n #\n # # retrieve input file (will be used later for integrity testing)00\n # if '-i' in dict_args:\n # # check if list in case of multiple input files\n # if not isinstance(dict_args_with_path['-i'], list):\n # list_file_to_check = [dict_args_with_path['-i']]\n # # assign field file_input for integrity testing\n # param_test.file_input = dict_args['-i'].split('/')[-1]\n # # update index of dataframe by appending file name for more clarity\n # param_test.results = param_test.results.rename({subject_folder: os.path.join(subject_folder, dict_args['-i'])})\n # else:\n # list_file_to_check = dict_args_with_path['-i']\n # # TODO: assign field file_input for integrity testing\n # for file_to_check in list_file_to_check:\n # # file_input = file_to_check.split('/')[1]\n # # Check if input files exist\n # if not (os.path.isfile(file_to_check)):\n # param_test.status = 200\n # param_test.output += '\\nERROR: This input file does not exist: ' + file_to_check\n # return update_param(param_test)\n #\n # # retrieve ground truth (will be used later for integrity testing)\n # if '-igt' in dict_args:\n # param_test.fname_gt = dict_args_with_path['-igt']\n # # Check if ground truth files exist\n # if not os.path.isfile(param_test.fname_gt):\n # param_test.status = 201\n # param_test.output += '\\nERROR: The following file used for ground truth does not exist: ' + param_test.fname_gt\n # return update_param(param_test)\n\n # run command\n cmd = ' '.join([param_test.function_to_test, param_test.args])\n # param_test.output += '\\nWill run in %s:' % (os.path.join(path_testing, param_test.path_output))\n param_test.output += '\\n====================================================================================================\\n' + cmd + '\\n====================================================================================================\\n\\n' # copy command\n time_start = time.time()\n try:\n # os.chdir(param_test.path_output)\n # if not os.path.exists(param_test.path_output):\n # # in case of relative path, we want a subfolder too\n # os.makedirs(param_test.path_output)\n # os.chdir(path_testing)\n param_test.status, o = sct.run(cmd, verbose=0)\n if param_test.status:\n raise Exception\n except Exception as err:\n param_test.status = 1\n param_test.output += str(err)\n return update_param(param_test)\n\n param_test.output += o\n param_test.results['duration'] = time.time() - time_start\n\n # test integrity\n if param_test.test_integrity:\n param_test.output += '\\n\\n====================================================================================================\\n' + 'INTEGRITY TESTING' + '\\n====================================================================================================\\n\\n' # copy command\n try:\n # os.chdir(param_test.path_output)\n param_test = module_testing.test_integrity(param_test)\n # os.chdir(path_testing)\n except Exception as err:\n # os.chdir(path_testing)\n param_test.status = 2\n param_test.output += str(err)\n return update_param(param_test)\n\n return update_param(param_test)\n\n\ndef update_param(param):\n \"\"\"\n Update field \"results\" in param class\n \"\"\"\n for results_attr in param.results.columns:\n if hasattr(param, results_attr):\n param.results[results_attr] = getattr(param, results_attr)\n return param\n\n\n# START PROGRAM\n# ==========================================================================================\nif __name__ == \"__main__\":\n sct.init_sct()\n # initialize parameters\n param = Param()\n # call main function\n main()\n",
"id": "12173603",
"language": "Python",
"matching_score": 2.319488525390625,
"max_stars_count": 0,
"path": "scripts/sct_testing.py"
},
{
"content": "#!/usr/bin/env python\n\n# Analyze lesions\n#\n# Copyright (c) 2014 Polytechnique Montreal <www.neuro.polymtl.ca>\n# Author: Charley\n# Modified: 2017-08-19\n#\n# About the license: see the file LICENSE.TXT\n\nfrom __future__ import print_function, absolute_import, division\n\nimport os, math, sys, pickle, shutil\nimport argparse\n\nimport numpy as np\nimport pandas as pd\nfrom skimage.measure import label\n\nfrom spinalcordtoolbox.image import Image\nfrom spinalcordtoolbox.centerline.core import ParamCenterline, get_centerline\nfrom spinalcordtoolbox.utils import Metavar, SmartFormatter, ActionCreateFolder\n\nimport sct_utils as sct\nfrom sct_utils import extract_fname, printv, tmp_create\n\n\ndef get_parser():\n # Initialize the parser\n\n parser = argparse.ArgumentParser(\n description='R|Compute statistics on segmented lesions. The function assigns an ID value to each lesion (1, 2, '\n '3, etc.) and then outputs morphometric measures for each lesion:\\n'\n '- volume [mm^3]\\n'\n '- length [mm]: length along the Superior-Inferior axis\\n'\n '- max_equivalent_diameter [mm]: maximum diameter of the lesion, when approximating\\n'\n ' the lesion as a circle in the axial plane.\\n\\n'\n 'If the proportion of lesion in each region (e.g. WM and GM) does not sum up to 100%, it means '\n 'that the registered template does not fully cover the lesion. In that case you might want to '\n 'check the registration results.',\n add_help=None,\n formatter_class=SmartFormatter,\n prog=os.path.basename(__file__).strip(\".py\")\n )\n\n mandatory_arguments = parser.add_argument_group(\"\\nMANDATORY ARGUMENTS\")\n mandatory_arguments.add_argument(\n \"-m\",\n help='Binary mask of lesions (lesions are labeled as \"1\").',\n metavar=Metavar.file)\n mandatory_arguments.add_argument(\n \"-s\",\n help=\"Spinal cord centerline or segmentation file, which will be used to correct morphometric measures with \"\n \"cord angle with respect to slice. (e.g.'t2_seg.nii.gz')\",\n metavar=Metavar.file)\n\n optional = parser.add_argument_group(\"\\nOPTIONAL ARGUMENTS\")\n optional.add_argument(\n \"-h\",\n \"--help\",\n action=\"help\",\n help=\"show this help message and exit\")\n optional.add_argument(\n \"-i\",\n help='Image from which to extract average values within lesions (e.g. \"t2.nii.gz\"). If provided, the function '\n 'computes the mean and standard deviation values of this image within each lesion.',\n metavar=Metavar.file,\n default=None,\n required=False)\n optional.add_argument(\n \"-f\",\n help=\"Path to folder containing the atlas/template registered to the anatomical image. If provided, the \"\n \"function computes: (i) the distribution of each lesion depending on each vertebral level and on each\"\n \"region of the template (e.g. GM, WM, WM tracts) and (ii) the proportion of ROI (e.g. vertebral level, \"\n \"GM, WM) occupied by lesion.\",\n metavar=Metavar.str,\n default=None,\n required=False)\n optional.add_argument(\n \"-ofolder\",\n help='Output folder (e.g. \"./\")',\n metavar=Metavar.folder,\n action=ActionCreateFolder,\n default='./',\n required=False)\n optional.add_argument(\n \"-r\",\n type=int,\n help=\"Remove temporary files.\",\n required=False,\n default=1,\n choices=(0, 1))\n optional.add_argument(\n \"-v\",\n type=int,\n help=\"Verbose: 0 = nothing, 1 = classic, 2 = expended\",\n required=False,\n choices=(0, 1, 2),\n default=1)\n\n return parser\n\n\nclass AnalyzeLeion:\n def __init__(self, fname_mask, fname_sc, fname_ref, path_template, path_ofolder, verbose):\n self.fname_mask = fname_mask\n\n self.fname_sc = fname_sc\n self.fname_ref = fname_ref\n self.path_template = path_template\n self.path_ofolder = path_ofolder\n self.verbose = verbose\n self.wrk_dir = os.getcwd()\n\n if not set(np.unique(Image(fname_mask).data)) == set([0.0, 1.0]):\n if set(np.unique(Image(fname_mask).data)) == set([0.0]):\n printv('WARNING: Empty masked image', self.verbose, 'warning')\n else:\n printv(\"ERROR input file %s is not binary file with 0 and 1 values\" % fname_mask, 1, 'error')\n\n # create tmp directory\n self.tmp_dir = tmp_create(verbose=verbose) # path to tmp directory\n\n # lesion file where each lesion has a different value\n self.fname_label = extract_fname(self.fname_mask)[1] + '_label' + extract_fname(self.fname_mask)[2]\n\n # initialization of measure sheet\n measure_lst = ['label', 'volume [mm3]', 'length [mm]', 'max_equivalent_diameter [mm]']\n if self.fname_ref is not None:\n for measure in ['mean', 'std']:\n measure_lst.append(measure + '_' + extract_fname(self.fname_ref)[1])\n measure_dct = {}\n for column in measure_lst:\n measure_dct[column] = None\n self.measure_pd = pd.DataFrame(data=measure_dct, index=range(0), columns=measure_lst)\n\n # orientation of the input image\n self.orientation = None\n\n # volume object\n self.volumes = None\n\n # initialization of proportion measures, related to registrated atlas\n if self.path_template is not None:\n self.path_atlas = os.path.join(self.path_template, \"atlas\")\n self.path_levels = os.path.join(self.path_template, \"template\", \"PAM50_levels.nii.gz\")\n else:\n self.path_atlas, self.path_levels = None, None\n self.vert_lst = None\n self.atlas_roi_lst = None\n self.distrib_matrix_dct = {}\n\n # output names\n self.pickle_name = extract_fname(self.fname_mask)[1] + '_analyzis.pkl'\n self.excel_name = extract_fname(self.fname_mask)[1] + '_analyzis.xls'\n\n def analyze(self):\n self.ifolder2tmp()\n\n # Orient input image(s) to RPI\n self.orient2rpi()\n\n # Label connected regions of the masked image\n self.label_lesion()\n\n # Compute angle for CSA correction\n self.angle_correction()\n\n # Compute lesion volume, equivalent diameter, (S-I) length, max axial nominal diameter\n # if registered template provided: across vertebral level, GM, WM, within WM/GM tracts...\n # if ref image is provided: Compute mean and std value in each labeled lesion\n self.measure()\n\n # reorient data if needed\n self.reorient()\n\n # print averaged results\n self.show_total_results()\n\n # save results in excel and pickle files\n self.pack_measures()\n\n # save results to ofolder\n self.tmp2ofolder()\n\n def tmp2ofolder(self):\n os.chdir(self.wrk_dir) # go back to working directory\n\n printv('\\nSave results files...', self.verbose, 'normal')\n printv('\\n... measures saved in the files:', self.verbose, 'normal')\n for file_ in [self.fname_label, self.excel_name, self.pickle_name]:\n printv('\\n - ' + os.path.join(self.path_ofolder, file_), self.verbose, 'normal')\n sct.copy(os.path.join(self.tmp_dir, file_), os.path.join(self.path_ofolder, file_))\n\n def pack_measures(self):\n writer = pd.ExcelWriter(self.excel_name, engine='xlwt')\n self.measure_pd.to_excel(writer, sheet_name='measures', index=False, engine='xlwt')\n\n # Add the total column and row\n if self.path_template is not None:\n for sheet_name in self.distrib_matrix_dct:\n if '#' in sheet_name:\n df = self.distrib_matrix_dct[sheet_name].copy()\n df = df.append(df.sum(numeric_only=True, axis=0), ignore_index=True)\n df['total'] = df.sum(numeric_only=True, axis=1)\n df.iloc[-1, df.columns.get_loc('vert')] = 'total'\n df.to_excel(writer, sheet_name=sheet_name, index=False, engine='xlwt')\n else:\n self.distrib_matrix_dct[sheet_name].to_excel(writer, sheet_name=sheet_name, index=False, engine='xlwt')\n\n # Save pickle\n self.distrib_matrix_dct['measures'] = self.measure_pd\n with open(self.pickle_name, 'wb') as handle:\n pickle.dump(self.distrib_matrix_dct, handle)\n\n # Save Excel\n writer.save()\n\n def show_total_results(self):\n printv('\\n\\nAveraged measures...', self.verbose, 'normal')\n for stg, key in zip([' Volume [mm^3] = ', ' (S-I) Length [mm] = ', ' Equivalent Diameter [mm] = '], ['volume [mm3]', 'length [mm]', 'max_equivalent_diameter [mm]']):\n printv(stg + str(np.round(np.mean(self.measure_pd[key]), 2)) + '+/-' + str(np.round(np.std(self.measure_pd[key]), 2)), self.verbose, type='info')\n\n printv('\\nTotal volume = ' + str(np.round(np.sum(self.measure_pd['volume [mm3]']), 2)) + ' mm^3', self.verbose, 'info')\n printv('Lesion count = ' + str(len(self.measure_pd['volume [mm3]'].values)), self.verbose, 'info')\n\n def reorient(self):\n if not self.orientation == 'RPI':\n printv('\\nOrient output image to initial orientation...', self.verbose, 'normal')\n self._orient(self.fname_label, self.orientation)\n\n def _measure_within_im(self, im_lesion, im_ref, label_lst):\n printv('\\nCompute reference image features...', self.verbose, 'normal')\n\n for lesion_label in label_lst:\n im_label_data_cur = im_lesion == lesion_label\n im_label_data_cur[np.where(im_ref == 0)] = 0 # if the ref object is eroded compared to the labeled object\n mean_cur, std_cur = np.mean(im_ref[np.where(im_label_data_cur)]), np.std(im_ref[np.where(im_label_data_cur)])\n\n label_idx = self.measure_pd[self.measure_pd.label == lesion_label].index\n self.measure_pd.loc[label_idx, 'mean_' + extract_fname(self.fname_ref)[1]] = mean_cur\n self.measure_pd.loc[label_idx, 'std_' + extract_fname(self.fname_ref)[1]] = std_cur\n printv('Mean+/-std of lesion #' + str(lesion_label) + ' in ' + extract_fname(self.fname_ref)[1] + ' file: ' + str(np.round(mean_cur, 2)) + '+/-' + str(np.round(std_cur, 2)), self.verbose, type='info')\n\n def _measure_volume(self, im_data, p_lst, idx):\n for zz in range(im_data.shape[2]):\n self.volumes[zz, idx - 1] = np.sum(im_data[:, :, zz]) * p_lst[0] * p_lst[1] * p_lst[2]\n\n vol_tot_cur = np.sum(self.volumes[:, idx - 1])\n self.measure_pd.loc[idx, 'volume [mm3]'] = vol_tot_cur\n printv(' Volume : ' + str(np.round(vol_tot_cur, 2)) + ' mm^3', self.verbose, type='info')\n\n def _measure_length(self, im_data, p_lst, idx):\n length_cur = np.sum([np.cos(self.angles[zz]) * p_lst[2] for zz in np.unique(np.where(im_data)[2])])\n self.measure_pd.loc[idx, 'length [mm]'] = length_cur\n printv(' (S-I) length : ' + str(np.round(length_cur, 2)) + ' mm', self.verbose, type='info')\n\n def _measure_diameter(self, im_data, p_lst, idx):\n area_lst = [np.sum(im_data[:, :, zz]) * np.cos(self.angles[zz]) * p_lst[0] * p_lst[1] for zz in range(im_data.shape[2])]\n diameter_cur = 2 * np.sqrt(max(area_lst) / (4 * np.pi))\n self.measure_pd.loc[idx, 'max_equivalent_diameter [mm]'] = diameter_cur\n printv(' Max. equivalent diameter : ' + str(np.round(diameter_cur, 2)) + ' mm', self.verbose, type='info')\n\n def ___pve_weighted_avg(self, im_mask_data, im_atlas_data):\n return im_mask_data * im_atlas_data\n\n def __relative_ROIvol_in_mask(self, im_mask_data, im_atlas_roi_data, p_lst, im_template_vert_data=None, vert_level=None):\n #\n # Goal:\n # This function computes the percentage of ROI occupied by binary mask\n # --> ie volume of the intersection between {im_mask and im_roi} divided by the volume of roi\n # If im_template_vert and vert are specified, the ROI is restricted to the given vert_level\n # The PVE is handled by the method 'weighted_average'\n #\n # Inputs:\n # - im_mask_data - type=NumPyArray - binary mask (eg lesions)\n # - im_atlas_roi_data - type=NumPyArray - ROI in the same space as im_mask\n # - p_lst - type=list of float\n # - im_template_vert_data - type=NumPyArray - vertebral template in the same space as im_mask\n # - vert_level - type=int - vertebral level ID to restrict the ROI\n #\n\n if im_template_vert_data is not None and vert_level is not None:\n im_atlas_roi_data[np.where(im_template_vert_data != vert_level)] = 0.0\n im_mask_data[np.where(im_template_vert_data != vert_level)] = 0.0\n\n im_mask_roi_data_wa = self.___pve_weighted_avg(im_mask_data=im_mask_data, im_atlas_data=im_atlas_roi_data)\n vol_tot_roi = np.sum(im_atlas_roi_data) * p_lst[0] * p_lst[1] * p_lst[2]\n vol_mask_roi_wa = np.sum(im_mask_roi_data_wa) * p_lst[0] * p_lst[1] * p_lst[2]\n\n return vol_mask_roi_wa, vol_tot_roi\n\n def _measure_eachLesion_distribution(self, lesion_id, atlas_data, im_vert, im_lesion, p_lst):\n sheet_name = 'lesion#' + str(lesion_id) + '_distribution'\n self.distrib_matrix_dct[sheet_name] = pd.DataFrame.from_dict({'vert': [str(v) for v in self.vert_lst]})\n\n # initialized to 0 for each vertebral level and each PAM50 tract\n for tract_id in atlas_data:\n self.distrib_matrix_dct[sheet_name]['PAM50_' + str(tract_id).zfill(2)] = [0] * len(self.vert_lst)\n\n vol_mask_tot = 0.0 # vol tot of this lesion through the vertebral levels and PAM50 tracts\n for vert in self.vert_lst: # Loop over vertebral levels\n im_vert_cur = np.copy(im_vert)\n im_vert_cur[np.where(im_vert_cur != vert)] = 0.0\n if np.count_nonzero(im_vert_cur * np.copy(im_lesion)): # if there is lesion in this vertebral level\n idx = self.distrib_matrix_dct[sheet_name][self.distrib_matrix_dct[sheet_name].vert == str(vert)].index\n for tract_id in atlas_data: # Loop over PAM50 tracts\n res_lst = self.__relative_ROIvol_in_mask(im_mask_data=np.copy(im_lesion),\n im_atlas_roi_data=np.copy(atlas_data[tract_id]),\n p_lst=p_lst,\n im_template_vert_data=np.copy(im_vert_cur),\n vert_level=vert)\n self.distrib_matrix_dct[sheet_name].loc[idx, 'PAM50_' + str(tract_id).zfill(2)] = res_lst[0]\n vol_mask_tot += res_lst[0]\n\n # convert the volume values in distrib_matrix_dct to percentage values\n for vert in self.vert_lst:\n idx = self.distrib_matrix_dct[sheet_name][self.distrib_matrix_dct[sheet_name].vert == str(vert)].index\n for tract_id in atlas_data:\n val = self.distrib_matrix_dct[sheet_name].loc[idx, 'PAM50_' + str(tract_id).zfill(2)].values[0]\n self.distrib_matrix_dct[sheet_name].loc[idx, 'PAM50_' + str(tract_id).zfill(2)] = val * 100.0 / vol_mask_tot\n\n def __regroup_per_tracts(self, vol_dct, tract_limit):\n res_mask = [vol_dct[t][0] for t in vol_dct if t >= tract_limit[0] and t <= tract_limit[1]]\n res_tot = [vol_dct[t][1] for t in vol_dct if t >= tract_limit[0] and t <= tract_limit[1]]\n return np.sum(res_mask) * 100.0 / np.sum(res_tot)\n\n def _measure_totLesion_distribution(self, im_lesion, atlas_data, im_vert, p_lst):\n\n sheet_name = 'ROI_occupied_by_lesion'\n self.distrib_matrix_dct[sheet_name] = pd.DataFrame.from_dict({'vert': [str(v) for v in self.vert_lst] + ['total']})\n\n # initialized to 0 for each vertebral level and each PAM50 tract\n for tract_id in atlas_data:\n self.distrib_matrix_dct[sheet_name]['PAM50_' + str(tract_id).zfill(2)] = [0] * len(self.vert_lst + ['total'])\n\n for vert in self.vert_lst + ['total']: # loop over the vertebral levels\n if vert != 'total':\n im_vert_cur = np.copy(im_vert)\n im_vert_cur[np.where(im_vert_cur != vert)] = 0\n else:\n im_vert_cur = None\n if im_vert_cur is None or np.count_nonzero(im_vert_cur * np.copy(im_lesion)):\n res_perTract_dct = {} # for each tract compute the volume occupied by lesion and the volume of the tract\n idx = self.distrib_matrix_dct[sheet_name][self.distrib_matrix_dct[sheet_name].vert == str(vert)].index\n for tract_id in atlas_data: # loop over the tracts\n res_perTract_dct[tract_id] = self.__relative_ROIvol_in_mask(im_mask_data=np.copy(im_lesion),\n im_atlas_roi_data=np.copy(atlas_data[tract_id]),\n p_lst=p_lst,\n im_template_vert_data=np.copy(im_vert_cur),\n vert_level=vert)\n\n # group tracts to compute involvement in GM, WM, DC, VF, LF\n self.distrib_matrix_dct[sheet_name].loc[idx, 'PAM50_GM'] = self.__regroup_per_tracts(vol_dct=res_perTract_dct, tract_limit=[30, 35])\n self.distrib_matrix_dct[sheet_name].loc[idx, 'PAM50_WM'] = self.__regroup_per_tracts(vol_dct=res_perTract_dct, tract_limit=[0, 29])\n self.distrib_matrix_dct[sheet_name].loc[idx, 'PAM50_DC'] = self.__regroup_per_tracts(vol_dct=res_perTract_dct, tract_limit=[0, 3])\n self.distrib_matrix_dct[sheet_name].loc[idx, 'PAM50_VF'] = self.__regroup_per_tracts(vol_dct=res_perTract_dct, tract_limit=[14, 29])\n self.distrib_matrix_dct[sheet_name].loc[idx, 'PAM50_LF'] = self.__regroup_per_tracts(vol_dct=res_perTract_dct, tract_limit=[4, 13])\n\n # save involvement in each PAM50 tracts\n for tract_id in atlas_data:\n self.distrib_matrix_dct[sheet_name].loc[idx, 'PAM50_' + str(tract_id).zfill(2)] = res_perTract_dct[tract_id][0] * 100.0 / res_perTract_dct[tract_id][1]\n\n def measure(self):\n im_lesion = Image(self.fname_label)\n im_lesion_data = im_lesion.data\n p_lst = im_lesion.dim[4:7] # voxel size\n\n label_lst = [l for l in np.unique(im_lesion_data) if l] # lesion label IDs list\n\n if self.path_template is not None:\n if os.path.isfile(self.path_levels):\n img_vert = Image(self.path_levels)\n im_vert_data = img_vert.data\n self.vert_lst = [v for v in np.unique(im_vert_data) if v] # list of vertebral levels available in the input image\n\n else:\n im_vert_data = None\n printv('ERROR: the file ' + self.path_levels + ' does not exist. Please make sure the template was correctly registered and warped (sct_register_to_template or sct_register_multimodal and sct_warp_template)', type='error')\n\n # In order to open atlas images only one time\n atlas_data_dct = {} # dict containing the np.array of the registrated atlas\n for fname_atlas_roi in self.atlas_roi_lst:\n tract_id = int(fname_atlas_roi.split('_')[-1].split('.nii.gz')[0])\n img_cur = Image(fname_atlas_roi)\n img_cur_copy = img_cur.copy()\n atlas_data_dct[tract_id] = img_cur_copy.data\n del img_cur\n\n self.volumes = np.zeros((im_lesion.dim[2], len(label_lst)))\n\n # iteration across each lesion to measure statistics\n for lesion_label in label_lst:\n im_lesion_data_cur = np.copy(im_lesion_data == lesion_label)\n printv('\\nMeasures on lesion #' + str(lesion_label) + '...', self.verbose, 'normal')\n\n label_idx = self.measure_pd[self.measure_pd.label == lesion_label].index\n self._measure_volume(im_lesion_data_cur, p_lst, label_idx)\n self._measure_length(im_lesion_data_cur, p_lst, label_idx)\n self._measure_diameter(im_lesion_data_cur, p_lst, label_idx)\n\n # compute lesion distribution for each lesion\n if self.path_template is not None:\n self._measure_eachLesion_distribution(lesion_id=lesion_label,\n atlas_data=atlas_data_dct,\n im_vert=im_vert_data,\n im_lesion=im_lesion_data_cur,\n p_lst=p_lst)\n\n if self.path_template is not None:\n # compute total lesion distribution\n self._measure_totLesion_distribution(im_lesion=np.copy(im_lesion_data > 0),\n atlas_data=atlas_data_dct,\n im_vert=im_vert_data,\n p_lst=p_lst)\n\n if self.fname_ref is not None:\n # Compute mean and std value in each labeled lesion\n self._measure_within_im(im_lesion=im_lesion_data, im_ref=Image(self.fname_ref).data, label_lst=label_lst)\n\n def _normalize(self, vect):\n norm = np.linalg.norm(vect)\n return vect / norm\n\n def angle_correction(self):\n im_seg = Image(self.fname_sc)\n nx, ny, nz, nt, px, py, pz, pt = im_seg.dim\n data_seg = im_seg.data\n\n # fit centerline, smooth it and return the first derivative (in physical space)\n _, arr_ctl, arr_ctl_der, _ = get_centerline(im_seg, param=ParamCenterline(), verbose=1)\n x_centerline_deriv, y_centerline_deriv, z_centerline_deriv = arr_ctl_der\n\n self.angles = np.full_like(np.empty(nz), np.nan, dtype=np.double)\n\n # loop across x_centerline_deriv (instead of [min_z_index, max_z_index], which could vary after interpolation)\n for iz in range(x_centerline_deriv.shape[0]):\n # normalize the tangent vector to the centerline (i.e. its derivative)\n tangent_vect = self._normalize(np.array(\n [x_centerline_deriv[iz] * px, y_centerline_deriv[iz] * py, pz]))\n\n # compute the angle between the normal vector of the plane and the vector z\n angle = np.arccos(np.vdot(tangent_vect, np.array([0, 0, 1])))\n self.angles[iz] = math.degrees(angle)\n\n def label_lesion(self):\n printv('\\nLabel connected regions of the masked image...', self.verbose, 'normal')\n im = Image(self.fname_mask)\n im_2save = im.copy()\n im_2save.data = label(im.data, connectivity=2)\n im_2save.save(self.fname_label)\n\n self.measure_pd['label'] = [l for l in np.unique(im_2save.data) if l]\n printv('Lesion count = ' + str(len(self.measure_pd['label'])), self.verbose, 'info')\n\n def _orient(self, fname, orientation):\n return Image(fname).change_orientation(orientation).save(fname, mutable=True)\n\n def orient2rpi(self):\n # save input image orientation\n self.orientation = Image(self.fname_mask).orientation\n\n if not self.orientation == 'RPI':\n printv('\\nOrient input image(s) to RPI orientation...', self.verbose, 'normal')\n self._orient(self.fname_mask, 'RPI')\n\n if self.fname_sc is not None:\n self._orient(self.fname_sc, 'RPI')\n if self.fname_ref is not None:\n self._orient(self.fname_ref, 'RPI')\n if self.path_template is not None:\n self._orient(self.path_levels, 'RPI')\n for fname_atlas in self.atlas_roi_lst:\n self._orient(fname_atlas, 'RPI')\n\n def ifolder2tmp(self):\n # copy input image\n if self.fname_mask is not None:\n sct.copy(self.fname_mask, self.tmp_dir)\n self.fname_mask = ''.join(extract_fname(self.fname_mask)[1:])\n else:\n printv('ERROR: No input image', self.verbose, 'error')\n\n # copy seg image\n if self.fname_sc is not None:\n sct.copy(self.fname_sc, self.tmp_dir)\n self.fname_sc = ''.join(extract_fname(self.fname_sc)[1:])\n\n # copy ref image\n if self.fname_ref is not None:\n sct.copy(self.fname_ref, self.tmp_dir)\n self.fname_ref = ''.join(extract_fname(self.fname_ref)[1:])\n\n # copy registered template\n if self.path_template is not None:\n sct.copy(self.path_levels, self.tmp_dir)\n self.path_levels = ''.join(extract_fname(self.path_levels)[1:])\n\n self.atlas_roi_lst = []\n for fname_atlas_roi in os.listdir(self.path_atlas):\n if fname_atlas_roi.endswith('.nii.gz'):\n tract_id = int(fname_atlas_roi.split('_')[-1].split('.nii.gz')[0])\n if tract_id < 36: # Not interested in CSF\n sct.copy(os.path.join(self.path_atlas, fname_atlas_roi), self.tmp_dir)\n self.atlas_roi_lst.append(fname_atlas_roi)\n\n os.chdir(self.tmp_dir) # go to tmp directory\n\n\ndef main(args=None):\n \"\"\"\n Main function\n :param args:\n :return:\n \"\"\"\n # get parser args\n if args is None:\n args = None if sys.argv[1:] else ['--help']\n parser = get_parser()\n arguments = parser.parse_args(args=args)\n\n fname_mask = arguments.m\n fname_sc = arguments.s\n fname_ref = arguments.i\n\n # Path to template\n path_template = arguments.f\n # TODO: check this in the parser\n # if not os.path.isdir(path_template) and os.path.exists(path_template):\n # path_template = None\n # printv(\"ERROR output directory %s is not a valid directory\" % path_template, 1, 'error')\n\n # Output Folder\n path_results = arguments.ofolder\n # if not os.path.isdir(path_results) and os.path.exists(path_results):\n # printv(\"ERROR output directory %s is not a valid directory\" % path_results, 1, 'error')\n if not os.path.exists(path_results):\n os.makedirs(path_results)\n\n # Remove temp folder\n if arguments.r is not None:\n rm_tmp = bool(arguments.r)\n else:\n rm_tmp = True\n\n # Verbosity\n verbose = arguments.v\n sct.init_sct(log_level=verbose, update=True) # Update log level\n\n # create the Lesion constructor\n lesion_obj = AnalyzeLeion(fname_mask=fname_mask,\n fname_sc=fname_sc,\n fname_ref=fname_ref,\n path_template=path_template,\n path_ofolder=path_results,\n verbose=verbose)\n\n # run the analyze\n lesion_obj.analyze()\n\n # remove tmp_dir\n if rm_tmp:\n sct.rmtree(lesion_obj.tmp_dir)\n\n printv('\\nDone! To view the labeled lesion file (one value per lesion), type:', verbose)\n if fname_ref is not None:\n printv('fsleyes ' + fname_mask + ' ' + os.path.join(path_results, lesion_obj.fname_label) + ' -cm red-yellow -a 70.0 & \\n', verbose, 'info')\n else:\n printv('fsleyes ' + os.path.join(path_results, lesion_obj.fname_label) + ' -cm red-yellow -a 70.0 & \\n', verbose, 'info')\n\n\nif __name__ == \"__main__\":\n sct.init_sct()\n main()\n",
"id": "6806626",
"language": "Python",
"matching_score": 3.646730422973633,
"max_stars_count": 0,
"path": "scripts/sct_analyze_lesion.py"
},
{
"content": "#!/usr/bin/env python\n#########################################################################################\n#\n# Compute SNR in a given ROI according to different methods presented in Dietrich et al.,\n# Measurement of signal-to-noise ratios in MR images: Influence of multichannel coils,\n# parallel imaging, and reconstruction filters (2007).\n#\n# ---------------------------------------------------------------------------------------\n# Copyright (c) 2015 Polytechnique Montreal <www.neuro.polymtl.ca>\n# Authors: <NAME>\n#\n# About the license: see the file LICENSE.TXT\n########################################################################################\n\nfrom __future__ import division, absolute_import\n\nimport sys\nimport numpy as np\nimport os\nimport argparse\nfrom spinalcordtoolbox.image import Image, empty_like\nfrom spinalcordtoolbox.utils import parse_num_list\nimport sct_utils as sct\nfrom spinalcordtoolbox.utils import Metavar, SmartFormatter\n\n\n# PARAMETERS\nclass Param(object):\n # The constructor\n def __init__(self):\n self.almost_zero = np.finfo(float).eps\n\n# PARSER\n# ==========================================================================================\ndef get_parser():\n\n # Initialize the parser\n parser = argparse.ArgumentParser(\n description='Compute SNR using methods described in [Dietrich et al., Measurement of'\n ' signal-to-noise ratios in MR images: Influence of multichannel coils, parallel '\n 'imaging, and reconstruction filters. J Magn Reson Imaging 2007; 26(2): 375-385].',\n add_help=None,\n formatter_class=SmartFormatter,\n prog=os.path.basename(__file__).strip(\".py\"))\n mandatoryArguments = parser.add_argument_group(\"\\nMANDATORY ARGUMENTS\")\n mandatoryArguments.add_argument(\n '-i',\n help='4D data to compute the SNR on (along the 4th dimension). Example: b0s.nii.gz',\n required=False,\n metavar=Metavar.file)\n optional = parser.add_argument_group(\"\\nOPTIONAL ARGUMENTS\")\n optional.add_argument(\n \"-h\",\n \"--help\",\n action=\"help\",\n help=\"Show this help message and exit\")\n optional.add_argument(\n '-m',\n help='Binary (or weighted) mask within which SNR will be averaged. Example: dwi_moco_mean_seg.nii.gz',\n metavar=Metavar.file,\n default='')\n optional.add_argument(\n '-method',\n help='R|Method to use to compute the SNR:\\n'\n ' diff (default): Substract two volumes (defined by -vol) and estimate noise variance within the ROI (flag -m is required).\\n'\n ' mult: Estimate noise variance over time across volumes specified with -vol.',\n choices=('diff', 'mult'),\n default='diff')\n optional.add_argument(\n '-vol',\n help='Volumes to compute SNR from. Separate with \",\" (Example: -vol 0,1), or select range '\n 'using \":\" (Example: -vol 2:50). By default, all volumes in series are selected.',\n metavar=Metavar.str,\n default='')\n optional.add_argument(\n '-r',\n type=int,\n help='Remove temporary files.',\n default=1,\n choices=(0, 1))\n optional.add_argument(\n '-v',\n help=\"Verbose. 0: nothing, 1: basic, 2: extended.\",\n type=int,\n choices=(0, 1, 2),\n default=1)\n\n return parser\n\n\ndef weighted_avg_and_std(values, weights):\n \"\"\"\n Return the weighted average and standard deviation.\n values, weights -- Numpy ndarrays with the same shape.\n Source: https://stackoverflow.com/questions/2413522/weighted-standard-deviation-in-numpy\n \"\"\"\n average = np.average(values, weights=weights)\n # Fast and numerically precise:\n variance = np.average((values - average) ** 2, weights=weights)\n return (average, np.sqrt(variance))\n\n\ndef main():\n\n # Default params\n param = Param()\n\n # Get parser info\n parser = get_parser()\n arguments = parser.parse_args(args=None if sys.argv[1:] else ['--help'])\n fname_data = arguments.i\n if arguments.m is not None:\n fname_mask = arguments.m\n else:\n fname_mask = ''\n method = arguments.method\n if arguments.vol is not None:\n index_vol_user = arguments.vol\n else:\n index_vol_user = ''\n verbose = arguments.v\n sct.init_sct(log_level=verbose, update=True) # Update log level\n\n # Check parameters\n if method == 'diff':\n if not fname_mask:\n sct.printv('You need to provide a mask with -method diff. Exit.', 1, type='error')\n\n # Load data and orient to RPI\n im_data = Image(fname_data).change_orientation('RPI')\n data = im_data.data\n if fname_mask:\n mask = Image(fname_mask).change_orientation('RPI').data\n\n # Retrieve selected volumes\n if index_vol_user:\n index_vol = parse_num_list(index_vol_user)\n else:\n index_vol = range(data.shape[3])\n\n # Make sure user selected 2 volumes with diff method\n if method == 'diff':\n if not len(index_vol) == 2:\n sct.printv('Method \"diff\" should be used with exactly two volumes (specify with flag \"-vol\").', 1, 'error')\n\n # Compute SNR\n # NB: \"time\" is assumed to be the 4th dimension of the variable \"data\"\n if method == 'mult':\n # Compute mean and STD across time\n data_mean = np.mean(data[:, :, :, index_vol], axis=3)\n data_std = np.std(data[:, :, :, index_vol], axis=3, ddof=1)\n # Generate mask where std is different from 0\n mask_std_nonzero = np.where(data_std > param.almost_zero)\n snr_map = np.zeros_like(data_mean)\n snr_map[mask_std_nonzero] = data_mean[mask_std_nonzero] / data_std[mask_std_nonzero]\n # Output SNR map\n fname_snr = sct.add_suffix(fname_data, '_SNR-' + method)\n im_snr = empty_like(im_data)\n im_snr.data = snr_map\n im_snr.save(fname_snr, dtype=np.float32)\n # Output non-zero mask\n fname_stdnonzero = sct.add_suffix(fname_data, '_mask-STD-nonzero' + method)\n im_stdnonzero = empty_like(im_data)\n data_stdnonzero = np.zeros_like(data_mean)\n data_stdnonzero[mask_std_nonzero] = 1\n im_stdnonzero.data = data_stdnonzero\n im_stdnonzero.save(fname_stdnonzero, dtype=np.float32)\n # Compute SNR in ROI\n if fname_mask:\n mean_in_roi = np.average(data_mean[mask_std_nonzero], weights=mask[mask_std_nonzero])\n std_in_roi = np.average(data_std[mask_std_nonzero], weights=mask[mask_std_nonzero])\n snr_roi = mean_in_roi / std_in_roi\n # snr_roi = np.average(snr_map[mask_std_nonzero], weights=mask[mask_std_nonzero])\n\n elif method == 'diff':\n data_2vol = np.take(data, index_vol, axis=3)\n # Compute mean in ROI\n data_mean = np.mean(data_2vol, axis=3)\n mean_in_roi = np.average(data_mean, weights=mask)\n data_sub = np.subtract(data_2vol[:, :, :, 1], data_2vol[:, :, :, 0])\n _, std_in_roi = weighted_avg_and_std(data_sub, mask)\n # Compute SNR, correcting for Rayleigh noise (see eq. 7 in Dietrich et al.)\n snr_roi = (2/np.sqrt(2)) * mean_in_roi / std_in_roi\n\n # Display result\n if fname_mask:\n sct.printv('\\nSNR_' + method + ' = ' + str(snr_roi) + '\\n', type='info')\n\n\n# START PROGRAM\n# ==========================================================================================\nif __name__ == \"__main__\":\n sct.init_sct()\n # call main function\n main()\n",
"id": "8312152",
"language": "Python",
"matching_score": 0.963283896446228,
"max_stars_count": 0,
"path": "scripts/sct_compute_snr.py"
},
{
"content": "#!/usr/bin/env python\n#########################################################################################\n#\n# Concatenate bval files in time.\n#\n# ---------------------------------------------------------------------------------------\n# Copyright (c) 2015 Polytechnique Montreal <www.neuro.polymtl.ca>\n# Author: <NAME>\n#\n# About the license: see the file LICENSE.TXT\n#########################################################################################\n\nfrom __future__ import absolute_import\n\nimport os, sys, argparse\n\nimport sct_utils as sct\nfrom spinalcordtoolbox.utils import Metavar, SmartFormatter\n\n# PARSER\n# ==========================================================================================\n\n\ndef get_parser():\n # Initialize the parser\n\n parser = argparse.ArgumentParser(\n description='Concatenate bval files in time.',\n formatter_class=SmartFormatter,\n add_help=None,\n prog=os.path.basename(__file__).strip(\".py\"))\n mandatory = parser.add_argument_group(\"\\nMANDATORY ARGUMENTS\")\n mandatory.add_argument(\n \"-i\",\n help='List of the bval files to concatenate. Example: dmri_b700.bval dmri_b2000.bval',\n nargs='+',\n metavar=Metavar.file,\n required=True)\n optional = parser.add_argument_group(\"\\nOPTIONAL ARGUMENTS\")\n optional.add_argument(\n \"-h\",\n \"--help\",\n action=\"help\",\n help=\"Show this help message and exit\")\n optional.add_argument(\n \"-o\",\n help='Output file with bvals merged. Example: dmri_b700_b2000_concat.bval',\n metavar=Metavar.file)\n\n return parser\n\n\n# MAIN\n# ==========================================================================================\ndef main():\n # Get parser info\n parser = get_parser()\n arguments = parser.parse_args(args=None if sys.argv[1:] else ['--help'])\n fname_bval_list = arguments.i\n # Build fname_out\n if arguments.o is not None:\n fname_out = arguments.o\n else:\n path_in, file_in, ext_in = sct.extract_fname(fname_bval_list[0])\n fname_out = path_in + 'bvals_concat' + ext_in\n\n # Open bval files and concatenate\n bvals_concat = ''\n # for file_i in fname_bval_list:\n # f = open(file_i, 'r')\n # for line in f:\n # bvals_concat += line\n # f.close()\n from dipy.data.fetcher import read_bvals_bvecs\n for i_fname in fname_bval_list:\n bval_i, bvec_i = read_bvals_bvecs(i_fname, None)\n bvals_concat += ' '.join(str(v) for v in bval_i)\n bvals_concat += ' '\n\n # Write new bval\n new_f = open(fname_out, 'w')\n new_f.write(bvals_concat)\n new_f.close()\n\n\n# START PROGRAM\n# ==========================================================================================\nif __name__ == \"__main__\":\n sct.init_sct()\n # call main function\n main()\n",
"id": "6458936",
"language": "Python",
"matching_score": 0.5403174161911011,
"max_stars_count": 0,
"path": "scripts/sct_dmri_concat_bvals.py"
}
] | 2.071885 |
thiagopereirasilva | [
{
"content": "from __future__ import print_function\nimport requests\nimport json\nimport uuid\nimport cv2\nfrom datetime import date\n\naddr = 'http://localhost:5000'\ntest_url = addr + '/images/upload'\nimage_name = 'image_teste.jpg'\nuuid_code = str(uuid.uuid1())\nmyDescription = 'Photo description'\ntoday = str(date.today())\nauthor = '<NAME>'\ncounter = 1\nfor x in range(7):\n\n # prepare headers for http request\n content_type = 'image/jpeg'\n\n headers = {'content-type': content_type,\n 'phone_img_name': uuid_code + \"_\" + str(counter) + \".jpg\",\n 'phone_UUID': uuid_code,\n 'phone_description': myDescription,\n 'phone_date': today,\n 'phone_calibration': 'myCalibration',\n 'phone_author': author}\n counter += 1\n img = cv2.imread(image_name)\n # encode image as jpeg\n _, img_encoded = cv2.imencode('.jpg', img)\n # send http request with image and receive response\n response = requests.post(\n test_url, data=img_encoded.tostring(), headers=headers)\n # decode response\n print(json.loads(response.text))\n ",
"id": "5708343",
"language": "Python",
"matching_score": 0,
"max_stars_count": 1,
"path": "client.py"
},
{
"content": "from flask import Flask, request, Response, url_for, send_file, send_from_directory, safe_join, abort\nfrom flask_pymongo import PyMongo\nfrom PIL import Image\nfrom zipfile import ZipFile\nimport numpy as np\nimport pymongo\nimport sys\nimport getopt\nimport glob\nimport piexif\nimport os\nimport werkzeug\nimport jsonpickle\nimport cv2\nimport base64\nimport datetime\nimport time\nimport json\n\n# Read coniguration file\nip = ''\nport = ''\ndatabase = ''\ndatabase_collection = ''\nwith open('config.json') as json_file:\n data = json.load(json_file)\n ip = data['server_ip']\n port = data['server_port']\n database = data['database_url']\n database_collection = data['database_collection']\n\n# Initialize the Flask application\napp = Flask(__name__)\napp.config[\"CLIENT_IMAGES\"] = \"/home/thiago/Desktop/Workspace/PhotoSphere/download\"\napp.config[\"HOST\"] = \"http://10.7.128.14:5000\"\n# app.config[\"HOST\"] = \"http://\" + str(ip) + \":\" + str(port)\n\n\n# Initialize Mongo connector\n# client = pymongo.MongoClient(\"mongodb://localhost:27017/\")\n# db = client[\"photosphere\"]\nclient = pymongo.MongoClient(database)\ndb = client[database_collection]\n\n\ndef procurar_imagens(pasta_fonte='./download'):\n # arquivos = os.listdir(pasta_fonte)\n arquivos = glob.glob(os.path.join(pasta_fonte, '*.jpeg'))\n\n if arquivos:\n for arquivo in arquivos:\n if (arquivo.endswith(\".jpeg\") != True):\n arquivos.remove(arquivo)\n else:\n print('[Alerta]\\tNenhum arquivo JPEG encontrado em \"{0}\"!'.format(\n pasta_fonte))\n return arquivos\n\n\ndef corrigir_imagens(imagens, pasta_destino=''):\n for imagem in imagens:\n try:\n img = Image.open(imagem)\n except Exception:\n print('[Erro]\\t\\tImpossivel ler o arquivo \"{0}\"'.format(imagem))\n sys.exit(2)\n\n exif_dict = piexif.load(imagem)\n # exif_dict = piexif.load(img.info[\"exif\"])\n # exif_dict = img._getexif() //nao funciona\n # print('[Info]\\t\\tEXIF')\n # print(exif_dict)\n\n arquivo = os.path.basename(imagem)\n\n try:\n print('[Info]\\t\\tAdding EXIF in \"{0}\"'.\n format(arquivo))\n exif_dict['Exif'][piexif.ExifIFD.FNumber] = (71, 10)\n except Exception:\n print('[Erro]\\t\\tError ao tentar adicionar informacao no arquivo \"{0}\"'.\n format(imagem))\n sys.exit(2)\n\n exif_bytes = piexif.dump(exif_dict)\n\n if not os.path.exists(pasta_destino):\n os.mkdir(pasta_destino)\n\n nova_imagem = os.path.join(pasta_destino, arquivo)\n\n print('[Info]\\t\\tSalving file \"{0}\"'.\n format(nova_imagem))\n\n img.save(nova_imagem, \"jpeg\", exif=exif_bytes)\n\n\n@app.route('/images/hdr/<uuid_code>/<calibration>', methods=['GET'])\ndef generate_hdr(uuid_code, calibration):\n print('[Info]\\tCalibrating the following images')\n path = os.getcwd()\n path = path + '/download/' + uuid_code\n # Findding for hdr_image\n customers = db[\"imageset\"]\n imageset = customers.find_one(uuid_code)\n if imageset:\n print(\"[Info]\\t\\tThe imageset \" + uuid_code + ' already exist.')\n if 'hdr_image' in imageset:\n hdr_path = \"download/\" + uuid_code + \"/\" + imageset['hdr_image']\n return send_file(hdr_path, mimetype=\"image/gif\")\n else:\n print(\"[Info]\\t\\tGenerating HDR image for imageset \" + uuid_code)\n\n imagens = procurar_imagens(path)\n images_path = ''\n for img in imagens:\n images_path += \" \" + img\n\n corrigir_imagens(imagens, path)\n\n print('[Info]\\tRunning hdrgen')\n\n # call hdrgen\n hdr_path = \"download/\" + uuid_code + \"/\" + uuid_code + \"_output.jpg\"\n \n print(\"./hdrgen/hdrgen -o \" + hdr_path + \" \" + images_path)\n \n os.popen('./hdrgen/hdrgen -o ' + hdr_path + \" \" + images_path)\n\n query = {\"_id\": uuid_code}\n value = {\"$set\": {\"hdr_image\": uuid_code + \"_output.jpg\"}}\n customers.update_one(query, value)\n return send_file(hdr_path, mimetype=\"image/gif\")\n\n\n@app.route('/images/upload', methods=['POST'])\ndef upload():\n print(\"[Info]\\tReceived a request\")\n file_name = ''\n uuid_code = ''\n\n # get current directory\n path = os.getcwd()\n if 'phone_img_name' in request.headers:\n file_name = request.headers.get('phone_img_name')\n uuid_code = request.headers.get('phone_UUID')\n\n # nparr = np.fromstring(request.data, np.uint8)\n # decode image\n array_img = base64.b64decode(request.data)\n\n # img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)\n\n path = path + '/download/' + uuid_code\n if (os.path.isdir(path) == False):\n os.mkdir(path)\n print(\"[Info]\\t\\tSaving image <\" + file_name + \"> belongs to ImageSet <\" +\n uuid_code + \"> \\n on directory \\n\" + path)\n\n # cv2.imwrite(path +'/'+file_name, img)\n with open(path+'/'+file_name, 'wb') as f_output:\n f_output.write(array_img)\n\n response = {'message': 'created'}\n print(\"[Info]\\t\\tSend response: \" + str(response))\n response_pickled = jsonpickle.encode(response)\n return Response(response=response_pickled, status=201, mimetype=\"application/json\")\n\n\n# docker run -d -p 27017:27017 -p 28017:28017 mongo\n@app.route('/imageset', methods=['POST'])\ndef create_imageset():\n print(\"[Info]\\tSalving imageSet\")\n content = request.json\n uid = content['uuid']\n\n # getting images paths\n path = os.getcwd()\n path = path + '/download/' + uid\n images_path = procurar_imagens(path)\n images_path_str = []\n for img in images_path:\n arr = img.split(\"/\")\n images_path_str.append(arr[len(arr)-1])\n\n # getting actual time (long format)\n actual_time = int(round(time.time()*1000))\n\n doc = {'_id': uid, 'uuid': uid, 'label': content['label'],\n 'description': content['description'],\n 'author': content['author'], 'created_date': actual_time}\n doc['images_paths'] = images_path_str\n\n customers = db[\"imageset\"]\n ids = customers.insert(doc)\n print('[Info]\\t\\tImageset ' + str(ids) + ' created')\n resp = {'message': 'Imageset ' + str(ids)+' created'}\n response_pickled = jsonpickle.encode(resp)\n\n return Response(response=response_pickled, status=201, mimetype=\"application/json\")\n\n\n@app.route('/imageset', methods=['GET'])\ndef getAllImageSet():\n print(\"[Info]\\t\\tGetting all imageSet\")\n all_imageset = []\n customers = db[\"imageset\"]\n for x in customers.find():\n aux = x['created_date']\n x['created_date'] = str(aux)\n # if 'hdr_image' in x:\n # aux2 = x['hdr_image']\n # If not exist hdr_image, then will be created by hdr service\n x['hdr_image'] = app.config[\"HOST\"] + \\\n \"/images/hdr/\" + x['_id'] + \"/999\"\n pathService = []\n for path in x['images_paths']:\n img_url = app.config[\"HOST\"] + \"/images/\" + x['_id'] + \"/\" + path\n pathService.append(img_url)\n\n x['images_paths'] = pathService\n\n all_imageset.append(x)\n\n # print(all_imageset)\n response_pickled = jsonpickle.encode(all_imageset)\n return Response(response=response_pickled, status=200, mimetype=\"application/json\")\n\n\n@app.route('/images/<uuid_code>/<image>', methods=['GET'])\ndef uploadTest(uuid_code, image):\n print(\"[Info]\\t\\tSending image\")\n image_path = \"download/\"+uuid_code+\"/\"+image\n return send_file(image_path, mimetype='image/jpg')\n\n\n# @app.route(\"/image/<image_name>\", methods=['GET'])\n# def get_image(image_name):\n# try:\n# return send_from_directory(app.config[\"CLIENT_IMAGES\"], filename=image_name, as_attachment=True)\n# except:\n# abort(404)\n\n\n# start flask app\napp.run(host=\"0.0.0.0\", port=5000)\n",
"id": "7970571",
"language": "Python",
"matching_score": 0,
"max_stars_count": 1,
"path": "server.py"
}
] | 0 |
becorey | [
{
"content": "import sys\nimport time\nimport math\nimport threading\nis_py2 = sys.version[0] == '2'\nif is_py2:\n\timport Queue as queue\nelse:\n\timport queue as queue\nimport RPi.GPIO as GPIO\nimport logging\nfrom ws2801 import ws2801\n\nlogging.basicConfig(level=logging.DEBUG, format='[%(levelname)s (%(threadName)-10s) %(message)s',)\n\nGPIO.setmode(GPIO.BCM)\n\nclass Pathlight(object):\n\tdef __init__(self, sensorPins):\n\t\tself.sendsorPins = sensorPins\n\t\tself.led = ws2801(queue.Queue())\n\n\t\tself.initSensors(sensorPins)\n\t\treturn\n\n\tdef initSensors(self, sensorPins):\n\t\tself.sensors = []\n\t\tfor pin in sensorPins\t:\n\t\t\tself.sensors.append(MotionSensor(self, pin))\n\t\treturn\n\n\n\tdef shutdown(self):\n\t\tself.led.brightness_decrease(.01, 10)\n\t\tGPIO.cleanup()\n\t\treturn\n\n\nclass MotionSensor(object):\n\tdef __init__(self, parent, pin):\n\t\tself.parent = parent\n\t\tself.pin = pin\n\t\tGPIO.setup(self.pin, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)\n\t\tGPIO.add_event_detect(self.pin, GPIO.BOTH, callback=self.motionSensed)\n\t\treturn\n\n\tdef motionSensed(self, pin):\n\t\tval = GPIO.input(pin)\n\t\tif val:\n\t\t\tprint('++++++++++ motion entered ++++++++++')\n\t\t\t#self.parent.leds[0].queue.put([100, 0, 100])\n\t\t\tself.parent.led.fadeIn(rgb=[255,255,255], transTime = 4.0, steps = 50)\n\t\telse:\n\t\t\tprint('---------- motion exited')\n\t\t\t#self.parent.leds[0].queue.put([100, 100, 100])\n\t\t\tself.parent.led.brightness_decrease(transTime = 2.0, steps = 20)\n\t\t#print(time.time())\n\t\treturn True\n\n\ndef boundedValue(n, smallest, largest):\n\treturn max(smallest, min(n, largest))\n\n\n\nif __name__ == '__main__':\n\n\tmotionSensors = [26]\n\tpa = Pathlight(motionSensors)\n\n\ttry:\n\t\ti = 0\n\t\twhile True:\n\t\t\ti = i+1\n\t\t\ttime.sleep(2)\n\texcept KeyboardInterrupt:\n\t\tprint('Exiting')\n\n\tfinally:\n\t\tpa.shutdown()\n",
"id": "12096657",
"language": "Python",
"matching_score": 0,
"max_stars_count": 0,
"path": "pathlight.py"
},
{
"content": "import sys\nimport time\nimport RPi.GPIO as GPIO\nimport threading\nis_py2 = sys.version[0] == '2'\nif is_py2:\n\timport Queue as queue\nelse:\n\timport queue as queue\n\n# Import the WS2801 module.\nimport Adafruit_WS2801\nimport Adafruit_GPIO.SPI as SPI\n\nmtime = lambda: int(round(time.time()*1000))\n\nclass ws2801(threading.Thread):\n\tdef __init__(self, queue, PIXEL_COUNT = 64):\n\t\tself.PIXEL_COUNT = PIXEL_COUNT\n\t\tPIXEL_CLOCK = 21\n\t\tPIXEL_DOUT = 20\n\t\tSPI_PORT = 0\n\t\tSPI_DEVICE = 0\n\t\t# hardware SPI.. not working\n\t\t#self.pixels = Adafruit_WS2801.WS2801Pixels(self.PIXEL_COUNT, spi=SPI.SpiDev(SPI_PORT, SPI_DEVICE), gpio=GPIO)\n\t\t# software SPI\n\t\tself.pixels = Adafruit_WS2801.WS2801Pixels(PIXEL_COUNT, clk=PIXEL_CLOCK, do=PIXEL_DOUT)\n\t\tself.pixels.clear()\n\t\tself.pixels.show()\n\t\treturn\n\n\t# Define the wheel function to interpolate between different hues.\n\tdef wheel(self, pos):\n\t\tif pos < 85:\n\t\t\treturn Adafruit_WS2801.RGB_to_color(pos * 3, 255 - pos * 3, 0)\n\t\telif pos < 170:\n\t\t\tpos -= 85\n\t\t\treturn Adafruit_WS2801.RGB_to_color(255 - pos * 3, 0, pos * 3)\n\t\telse:\n\t\t\tpos -= 170\n\t\t\treturn Adafruit_WS2801.RGB_to_color(0, pos * 3, 255 - pos * 3)\n\n\t# Define rainbow cycle function to do a cycle of all hues.\n\tdef rainbow_cycle_successive(self, wait=0.1):\n\t\tfor i in range(self.pixels.count()):\n\t\t\t# tricky math! we use each pixel as a fraction of the full 96-color wheel\n\t\t\t# (thats the i / strip.numPixels() part)\n\t\t\t# Then add in j which makes the colors go around per pixel\n\t\t\t# the % 96 is to make the wheel cycle around\n\t\t\tself.pixels.set_pixel(i, self.wheel(((i * 256 // self.pixels.count())) % 256) )\n\t\t\tself.pixels.show()\n\t\t\tif wait > 0:\n\t\t\t\ttime.sleep(wait)\n\n\tdef rainbow_cycle(self, wait=0.005):\n\t\tfor j in range(256): # one cycle of all 256 colors in the wheel\n\t\t\tfor i in range(self.pixels.count()):\n\t\t\t\tself.pixels.set_pixel(i, self.wheel(((i * 256 // self.pixels.count()) + j) % 256) )\n\t\t\tself.pixels.show()\n\t\t\tif wait > 0:\n\t\t\t\ttime.sleep(wait)\n\n\tdef rainbow_colors(self, wait=0.05):\n\t\tfor j in range(256): # one cycle of all 256 colors in the wheel\n\t\t\tfor i in range(self.pixels.count()):\n\t\t\t\tself.pixels.set_pixel(i, self.wheel(((256 // self.pixels.count() + j)) % 256) )\n\t\t\tself.pixels.show()\n\t\t\tif wait > 0:\n\t\t\t\ttime.sleep(wait)\n\n\tdef brightness_decrease(self, transTime=1.0, steps=50):\n\t\tdt = transTime/float(steps)\n\t\t#first build an array for the rgb transition values, for each pixel\n\t\til = self.pixels.count()\n\t\trt = [None]*il\n\t\tgt = [None]*il\n\t\tbt = [None]*il\n\t\tfor i in range(il):\n\t\t\tr, g, b = self.pixels.get_pixel_rgb(i)\n\t\t\t#print(rt)\n\t\t\t#print(i)\n\t\t\t#print(rt[i])\n\t\t\trt[i] = [(float(j)/float(steps))*r for j in range(steps+1)]\n\t\t\tgt[i] = [(float(j)/float(steps))*g for j in range(steps+1)]\n\t\t\tbt[i] = [(float(j)/float(steps))*b for j in range(steps+1)]\n\t\t#then step through the arrays\n\t\tfor j in range(steps-1, -1, -1):\n\t\t\tfor i in range(il):\n\t\t\t\t#print(\"brightness_decrease, rgb = \"+str(int(rt[i][j]))+\",\"+str(int(gt[i][j]))+\",\"+str(int(bt[i][j])))\n\t\t\t\tself.pixels.set_pixel(i, Adafruit_WS2801.RGB_to_color( int(rt[i][j]), int(gt[i][j]), int(bt[i][j]) ))\n\t\t\tself.pixels.show()\n\t\t\ttime.sleep(dt)\n\n\tdef fadeIn(self, rgb=[255,255,255], transTime = 4.0, steps=50):\n\t\tdt = transTime/float(steps)\n\t\tN = self.pixels.count()\n\t\tfor i in range(steps):\n\t\t\tfor Ni in range (N):\n\t\t\t\tr = int(rgb[0]*i/float(steps))\n\t\t\t\tg = int(rgb[1]*i/float(steps))\n\t\t\t\tb = int(rgb[2]*i/float(steps))\n\t\t\t\tself.pixels.set_pixel(Ni, Adafruit_WS2801.RGB_to_color(r,g,b))\n\t\t\tself.pixels.show()\n\t\t\ttime.sleep(dt)\n\n\tdef appear_from_back(self, color=(255, 0, 255)):\n\t\tpos = 0\n\t\tfor i in range(self.pixels.count()):\n\t\t\tfor j in reversed(range(i, self.pixels.count())):\n\t\t\t\tself.pixels.clear()\n\t\t\t\t# first set all pixels at the begin\n\t\t\t\tfor k in range(i):\n\t\t\t\t\tself.pixels.set_pixel(k, Adafruit_WS2801.RGB_to_color( color[0], color[1], color[2] ))\n\t\t\t\t# set then the pixel at position j\n\t\t\t\tself.pixels.set_pixel(j, Adafruit_WS2801.RGB_to_color( color[0], color[1], color[2] ))\n\t\t\t\tself.pixels.show()\n\t\t\t\ttime.sleep(0.01)\n\n\tdef fadeFromEnd(self, transTime = 4.0, targetColor=(255,200,100), rootPoint=0):\n\t\tN = self.pixels.count()\n\t\tNsteps = 150\n\t\tdt = transTime / float(Nsteps)\n\t\t#print(\"should take \"+str(dt*Nsteps)+\" seconds\")\n\n\t\tfor i in range(Nsteps):\n\t\t\tfor x in range(N):\n\t\t\t\trgb = [0,0,0]\n\t\t\t\tfor j in range(len(rgb)):\n\t\t\t\t\t#m is slope of the color gradient. should be negative\n\t\t\t\t\t#strongly affects the rollout appearance\n\t\t\t\t\t#smaller abs(m) = faster rollout\n\t\t\t\t\tm=-0.75\n\t\t\t\t\tb = (i/float(Nsteps)) * (targetColor[j]-m*N)\n\t\t\t\t\trgb[j] = int(m*x+b)\n\t\t\t\t\tif rgb[j]<=0:\n\t\t\t\t\t\trgb[j]=0\n\t\t\t\t\tif rgb[j]>=targetColor[j]:\n\t\t\t\t\t\trgb[j]=targetColor[j]\n\t\t\t\t#print(rgb)\n\t\t\t\tself.pixels.set_pixel(x, Adafruit_WS2801.RGB_to_color(rgb[0], rgb[1], rgb[2]))\n\t\t\tself.pixels.show() # contains a sleep(0.002), Nsteps affects total time\n\t\t\ttime.sleep(dt)\n\n\nif __name__ == \"__main__\":\n\tprint(\"Starting lightshow\")\n\n\ttry:\n\t\tledstrip = ws2801(queue.Queue())\n\n\t\tledstrip.rainbow_cycle_successive(0.1)\n\t\t#ledstrip.rainbow_cycle(0.01)\n\t\t#ledstrip.brightness_decrease()\n\t\t#ledstrip.rainbow_colors()\n\t\tstarttime = mtime()\n\t\tledstrip.fadeFromEnd()\n\t\tendtime = mtime()\n\t\tlengthtime = (endtime - starttime)/1000.0\n\t\tprint(str(lengthtime)+\" seconds\")\n\t\t#ledstrip.brightness_decrease()\n\n\texcept KeyboardInterrupt:\n\t\tprint('Exiting')\n\tfinally:\n\t\tledstrip.brightness_decrease(1.0, 40)\n",
"id": "5947625",
"language": "Python",
"matching_score": 0,
"max_stars_count": 0,
"path": "ws2801.py"
}
] | 0 |
mmamun1 | [
{
"content": "# Copyright 2019-2021 The AmpliGraph Authors. All Rights Reserved.\n#\n# This file is Licensed under the Apache License, Version 2.0.\n# A copy of the Licence is available in LICENCE, or at:\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\nimport tensorflow as tf\nimport abc\nimport logging\n\nimport math\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\n\nOPTIMIZER_REGISTRY = {}\n\n\ndef register_optimizer(name, external_params=[], class_params={}):\n def insert_in_registry(class_handle):\n OPTIMIZER_REGISTRY[name] = class_handle\n class_handle.name = name\n OPTIMIZER_REGISTRY[name].external_params = external_params\n OPTIMIZER_REGISTRY[name].class_params = class_params\n return class_handle\n\n return insert_in_registry\n\n\n# Default learning rate for the optimizers\nDEFAULT_LR = 0.0005\n\n# Default momentum for the optimizers\nDEFAULT_MOMENTUM = 0.9\n\nDEFAULT_DECAY_CYCLE = 0\n\nDEFAULT_DECAY_CYCLE_MULTIPLE = 1\n\nDEFAULT_LR_DECAY_FACTOR = 2\n\nDEFAULT_END_LR = 1e-8\n\nDEFAULT_SINE = False\n\n\nclass Optimizer(abc.ABC):\n \"\"\"Abstract class for optimizer .\n \"\"\"\n\n name = \"\"\n external_params = []\n class_params = {}\n\n def __init__(self, optimizer_params, batches_count, verbose):\n \"\"\"Initialize the Optimizer\n\n Parameters\n ----------\n optimizer_params : dict\n Consists of key-value pairs. The initializer will check the keys to get the corresponding params.\n batches_count: int\n number of batches in an epoch\n verbose : bool\n Enable/disable verbose mode\n \"\"\"\n\n self.verbose = verbose\n self._optimizer_params = {}\n self._init_hyperparams(optimizer_params)\n self.batches_count = batches_count\n\n def _display_params(self):\n \"\"\"Display the parameter values\n \"\"\"\n logger.info('\\n------ Optimizer -----')\n logger.info('Name : {}'.format(self.name))\n for key, value in self._optimizer_params.items():\n logger.info('{} : {}'.format(key, value))\n\n def _init_hyperparams(self, hyperparam_dict):\n \"\"\" Initializes the hyperparameters needed by the algorithm.\n\n Parameters\n ----------\n hyperparam_dict : dictionary\n Consists of key value pairs. The optimizer will check the keys to get the corresponding params\n \"\"\"\n\n self._optimizer_params['lr'] = hyperparam_dict.get('lr', DEFAULT_LR)\n if self.verbose:\n self._display_params()\n\n def minimize(self, loss):\n \"\"\"Create an optimizer to minimize the model loss\n\n Parameters\n ----------\n loss: tf.Tensor\n Node which needs to be evaluated for computing the model loss.\n\n Returns\n -------\n train: tf.Operation\n Node that needs to be evaluated for minimizing the loss during training\n \"\"\"\n raise NotImplementedError('Abstract Method not implemented!')\n\n def update_feed_dict(self, feed_dict, batch_num, epoch_num):\n \"\"\"Fills values of placeholders created by the optimizers.\n\n Parameters\n ----------\n feed_dict : dict\n Dictionary that would be passed while optimizing the model loss to sess.run.\n batch_num: int\n current batch number\n epoch_num: int\n current epoch number\n \"\"\"\n raise NotImplementedError('Abstract Method not implemented!')\n\n\n@register_optimizer(\"adagrad\", ['lr'])\nclass AdagradOptimizer(Optimizer):\n \"\"\"Wrapper around adagrad optimizer\n \"\"\"\n\n def __init__(self, optimizer_params, batches_count, verbose=False):\n \"\"\"Initialize the Optimizer\n\n Parameters\n ----------\n optimizer_params : dict\n Consists of key-value pairs. The optimizer will check the keys to get the corresponding params:\n\n - **'lr'**: (float). Learning Rate (default: 0.0005)\n\n Example: ``optimizer_params={'lr': 0.001}``\n batches_count: int\n number of batches in an epoch\n verbose : bool\n Enable/disable verbose mode\n \"\"\"\n\n super(AdagradOptimizer, self).__init__(optimizer_params, batches_count, verbose)\n\n def minimize(self, loss):\n \"\"\"Create an optimizer to minimize the model loss\n\n Parameters\n ----------\n loss: tf.Tensor\n Node which needs to be evaluated for computing the model loss.\n\n Returns\n -------\n train: tf.Operation\n Node that needs to be evaluated for minimizing the loss during training\n \"\"\"\n self.optimizer = tf.train.AdagradOptimizer(learning_rate=self._optimizer_params['lr'])\n train = self.optimizer.minimize(loss)\n return train\n\n def update_feed_dict(self, feed_dict, batch_num, epoch_num):\n \"\"\"Fills values of placeholders created by the optimizers.\n\n Parameters\n ----------\n feed_dict : dict\n Dictionary that would be passed while optimizing the model loss to sess.run.\n batch_num: int\n current batch number\n epoch_num: int\n current epoch number\n \"\"\"\n return\n\n\n@register_optimizer(\"adam\", ['lr'])\nclass AdamOptimizer(Optimizer):\n \"\"\"Wrapper around Adam Optimizer\n \"\"\"\n\n def __init__(self, optimizer_params, batches_count, verbose=False):\n \"\"\"Initialize the Optimizer\n\n Parameters\n ----------\n optimizer_params : dict\n Consists of key-value pairs. The optimizer will check the keys to get the corresponding params:\n\n - **'lr'**: (float). Learning Rate (default: 0.0005)\n\n Example: ``optimizer_params={'lr': 0.001}``\n batches_count: int\n number of batches in an epoch\n verbose : bool\n Enable/disable verbose mode\n \"\"\"\n\n super(AdamOptimizer, self).__init__(optimizer_params, batches_count, verbose)\n\n def minimize(self, loss):\n \"\"\"Create an optimizer to minimize the model loss\n\n Parameters\n ----------\n loss: tf.Tensor\n Node which needs to be evaluated for computing the model loss.\n\n Returns\n -------\n train: tf.Operation\n Node that needs to be evaluated for minimizing the loss during training\n \"\"\"\n self.optimizer = tf.train.AdamOptimizer(learning_rate=self._optimizer_params['lr'])\n\n train = self.optimizer.minimize(loss)\n return train\n\n def update_feed_dict(self, feed_dict, batch_num, epoch_num):\n \"\"\"Fills values of placeholders created by the optimizers.\n\n Parameters\n ----------\n feed_dict : dict\n Dictionary that would be passed while optimizing the model loss to sess.run.\n batch_num: int\n current batch number\n epoch_num: int\n current epoch number\n \"\"\"\n return\n\n\n@register_optimizer(\"momentum\", ['lr', 'momentum'])\nclass MomentumOptimizer(Optimizer):\n \"\"\"Wrapper around Momentum Optimizer\n \"\"\"\n\n def __init__(self, optimizer_params, batches_count, verbose=False):\n \"\"\"Initialize the Optimizer\n\n Parameters\n ----------\n optimizer_params : dict\n Consists of key-value pairs. The optimizer will check the keys to get the corresponding params:\n\n - **'lr'**: (float). Learning Rate (default: 0.0005)\n - **'momentum'**: (float). Momentum (default: 0.9)\n\n Example: ``optimizer_params={'lr': 0.001, 'momentum':0.90}``\n batches_count: int\n number of batches in an epoch\n verbose : bool\n Enable/disable verbose mode\n \"\"\"\n\n super(MomentumOptimizer, self).__init__(optimizer_params, batches_count, verbose)\n\n def _init_hyperparams(self, hyperparam_dict):\n \"\"\" Initializes the hyperparameters needed by the algorithm.\n\n Parameters\n ----------\n hyperparam_dict : dictionary\n Consists of key value pairs. The optimizer will check the keys to get the corresponding params\n \"\"\"\n\n self._optimizer_params['lr'] = hyperparam_dict.get('lr', DEFAULT_LR)\n self._optimizer_params['momentum'] = hyperparam_dict.get('momentum', DEFAULT_MOMENTUM)\n\n if self.verbose:\n self._display_params()\n\n def minimize(self, loss):\n \"\"\"Create an optimizer to minimize the model loss\n\n Parameters\n ----------\n loss: tf.Tensor\n Node which needs to be evaluated for computing the model loss.\n\n Returns\n -------\n train: tf.Operation\n Node that needs to be evaluated for minimizing the loss during training\n \"\"\"\n self.optimizer = tf.train.MomentumOptimizer(learning_rate=self._optimizer_params['lr'],\n momentum=self._optimizer_params['momentum'])\n\n train = self.optimizer.minimize(loss)\n return train\n\n def update_feed_dict(self, feed_dict, batch_num, epoch_num):\n \"\"\"Fills values of placeholders created by the optimizers.\n\n Parameters\n ----------\n feed_dict : dict\n Dictionary that would be passed while optimizing the model loss to sess.run.\n batch_num: int\n current batch number\n epoch_num: int\n current epoch number\n \"\"\"\n return\n\n\n@register_optimizer(\"sgd\", ['lr', 'decay_cycle', 'end_lr', 'sine_decay', 'expand_factor', 'decay_lr_rate'])\nclass SGDOptimizer(Optimizer):\n '''Wrapper around SGD Optimizer\n '''\n def __init__(self, optimizer_params, batches_count, verbose=False):\n \"\"\"Initialize the Optimizer\n\n Parameters\n ----------\n optimizer_params : dict\n Consists of key-value pairs. The optimizer will check the keys to get the corresponding params:\n\n - **'lr'**: (float). Learning Rate upper bound (default: 0.0005)\n - **'decay_cycle'**: (int). Cycle of epoch over which to decay (default: 0)\n - **'end_lr'**: (float). Learning Rate lower bound (default: 1e-8)\n - **'cosine_decay'**: (bool). Use cosine decay or to fixed rate decay (default: False)\n - **'expand_factor'**: (float). Expand the decay cycle length by this factor after each cycle \\\n (default: 1)\n - **'decay_lr_rate'**: (float). Decay factor to decay the start lr after each cycle \\\n (default: 2)\n\n Example: ``optimizer_params={'lr': 0.01, 'decay_cycle':30, 'end_lr':0.0001, 'sine_decay':True}``\n batches_count: int\n number of batches in an epoch\n verbose : bool\n Enable/disable verbose mode\n \"\"\"\n super(SGDOptimizer, self).__init__(optimizer_params, batches_count, verbose)\n\n def _init_hyperparams(self, hyperparam_dict):\n \"\"\" Initializes the hyperparameters needed by the algorithm.\n\n Parameters\n ----------\n hyperparam_dict : dictionary\n Consists of key value pairs. The optimizer will check the keys to get the corresponding params\n \"\"\"\n\n self._optimizer_params['lr'] = hyperparam_dict.get('lr', DEFAULT_LR)\n self._optimizer_params['decay_cycle'] = hyperparam_dict.get('decay_cycle', DEFAULT_DECAY_CYCLE)\n self._optimizer_params['cosine_decay'] = hyperparam_dict.get('cosine_decay', DEFAULT_SINE)\n self._optimizer_params['expand_factor'] = hyperparam_dict.get('expand_factor', DEFAULT_DECAY_CYCLE_MULTIPLE)\n self._optimizer_params['decay_lr_rate'] = hyperparam_dict.get('decay_lr_rate', DEFAULT_LR_DECAY_FACTOR)\n self._optimizer_params['end_lr'] = hyperparam_dict.get('end_lr', DEFAULT_END_LR)\n\n if self.verbose:\n self._display_params()\n\n def minimize(self, loss):\n \"\"\"Create an optimizer to minimize the model loss\n\n Parameters\n ----------\n loss: tf.Tensor\n Node which needs to be evaluated for computing the model loss.\n\n Returns\n -------\n train: tf.Operation\n Node that needs to be evaluated for minimizing the loss during training\n \"\"\"\n\n # create a placeholder for learning rate\n self.lr_placeholder = tf.placeholder(tf.float32)\n # create the optimizer with the placeholder\n self.optimizer = tf.train.GradientDescentOptimizer(learning_rate=self.lr_placeholder)\n\n # load the hyperparameters that would be used while generating the learning rate per batch\n # start learning rate\n self.start_lr = self._optimizer_params['lr']\n self.current_lr = self.start_lr\n\n # cycle rate for learning rate decay\n self.decay_cycle_rate = self._optimizer_params['decay_cycle']\n self.end_lr = self._optimizer_params['end_lr']\n\n # check if it is a sinudoidal decay or constant decay\n self.is_cosine_decay = self._optimizer_params['cosine_decay']\n self.next_cycle_epoch = self.decay_cycle_rate + 1\n\n # Get the cycle expand factor\n self.decay_cycle_expand_factor = self._optimizer_params['expand_factor']\n\n # Get the LR decay factor at the start of each cycle\n self.decay_lr_rate = self._optimizer_params['decay_lr_rate']\n self.curr_cycle_length = self.decay_cycle_rate\n self.curr_start = 0\n\n # create the operation that minimizes the loss\n train = self.optimizer.minimize(loss)\n return train\n\n def update_feed_dict(self, feed_dict, batch_num, epoch_num):\n \"\"\"Fills values of placeholders created by the optimizers.\n\n Parameters\n ----------\n feed_dict : dict\n Dictionary that would be passed while optimizing the model loss to sess.run.\n batch_num: int\n current batch number\n epoch_num: int\n current epoch number\n \"\"\"\n # Sinusoidal Decay\n if self.is_cosine_decay:\n # compute the cycle number\n current_cycle_num = \\\n ((epoch_num - 1 - self.curr_start) * self.batches_count + (batch_num - 1)) / \\\n (self.curr_cycle_length * self.batches_count)\n # compute a learning rate for the current batch/epoch\n self.current_lr = \\\n self.end_lr + (self.start_lr - self.end_lr) * 0.5 * (1 + math.cos(math.pi * current_cycle_num))\n\n # Start the next cycle and Expand the cycle/Decay the learning rate\n if epoch_num % (self.next_cycle_epoch - 1) == 0 and batch_num == self.batches_count:\n self.curr_cycle_length = self.curr_cycle_length * self.decay_cycle_expand_factor\n self.next_cycle_epoch = self.next_cycle_epoch + self.curr_cycle_length\n self.curr_start = epoch_num\n self.start_lr = self.start_lr / self.decay_lr_rate\n\n if self.current_lr < self.end_lr:\n self.current_lr = self.end_lr\n\n # fixed rate decay\n elif self.decay_cycle_rate > 0:\n if epoch_num % (self.next_cycle_epoch) == 0 and batch_num == 1:\n if self.current_lr > self.end_lr:\n self.next_cycle_epoch = self.decay_cycle_rate + \\\n ((self.next_cycle_epoch - 1) * self.decay_cycle_expand_factor) + 1\n self.current_lr = self.current_lr / self.decay_lr_rate\n\n if self.current_lr < self.end_lr:\n self.current_lr = self.end_lr\n\n # no change to the learning rate\n else:\n pass\n\n feed_dict.update({self.lr_placeholder: self.current_lr})\n",
"id": "7663863",
"language": "Python",
"matching_score": 0,
"max_stars_count": 1794,
"path": "ampligraph/latent_features/optimizers.py"
}
] | 0 |
hildeweerts | [
{
"content": "import seaborn as sns; sns.set()\nimport matplotlib.pyplot as plt\n\ndef manifold(df, c='prediction', figsize=(8,6), s=15, title='nMDS'):\n \"\"\"\n Plot points using a 2D manifold embedding.\n\n Parameters\n ----------\n df : pandas dataframe\n 'x' : mds embedding 0\n 'y' : mds embedding 1\n optional:\n 'prediction' : predicted probability by classifier\n 'label' : label assigned by cluster\n c : string\n - prediction : plot manifold scatterplot colored by prediction probability\n - label : plot manifold scatterplot colored by cluster label\n s : int\n scatterplot node size\n \"\"\"\n f, ax = plt.subplots(figsize=figsize)\n if c == 'prediction':\n points = ax.scatter(x='x', y='y', c=c, s=s, cmap='Spectral', data=df)\n cbar = f.colorbar(points)\n cbar.set_label('prediction probability')\n plt.title(\"%s in SHAP space\" % title)\n else:\n for label, group in df.groupby([c]):\n points = ax.scatter(group['x'], group['y'], s=s, label=label, cmap='Spectral')\n return",
"id": "9699905",
"language": "Python",
"matching_score": 0.8142860531806946,
"max_stars_count": 0,
"path": "fpdash/cbr/plot.py"
},
{
"content": "from cbr import cluster, plot, prep",
"id": "10008886",
"language": "Python",
"matching_score": 0.010573756881058216,
"max_stars_count": 0,
"path": "fpdash/cbr/__init__.py"
},
{
"content": "# General\nimport os, sys, pickle, json\nimport pandas as pd\nimport numpy as np\n# Dash and plotly\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output, State\nimport plotly.graph_objs as go\n# colors\nimport matplotlib \nfrom matplotlib import cm\n# Math\nfrom scipy import stats\n# sklearn\nfrom sklearn.manifold import MDS\nfrom sklearn.neighbors import NearestNeighbors\n# add to pythonpath\nsys.path.append(os.getcwd() + '/fpdash')\nimport shapley.shap as shap\n\n\"\"\"\nINITIALIZE GLOBAL STUFF\n\"\"\"\n\n# Import classifier\nwith open(os.getcwd() + '/data/clf.pickle', 'rb') as f:\n clf = pickle.load(f)\n# Import NN\n# with open(os.getcwd() + '/data/nn.pickle', 'rb') as f:\n# nn = pickle.load(f)\n\n# load case base data\nX_base = pd.read_csv(os.getcwd() + '/data/X_base.csv')\nX_base_decoded = pd.read_csv(os.getcwd() + '/data/X_base_decoded.csv')\nmeta_base = pd.read_csv(os.getcwd() + '/data/meta_base.csv')\nSHAP_base = pd.read_csv(os.getcwd() + '/data/SHAP_base.csv')\n\n# load alert data\nX_alert = pd.read_csv(os.getcwd() + '/data/X_alert.csv')\nX_alert_decoded = pd.read_csv(os.getcwd() + '/data/X_alert_decoded.csv')\nmeta_alert = pd.read_csv(os.getcwd() + '/data/meta_alert.csv')\nSHAP_alert = pd.read_csv(os.getcwd() + '/data/SHAP_alert.csv')\n\n# load separate train data\nX_train = pd.read_csv(os.getcwd() + '/data/X_train.csv')\n\n# Initialize SHAP explainer (must use TRAIN data!)\nexplainer = shap.Explainer(X=X_train, f=clf, mc='training')\n\n# Spectral colormap\nspectral_cmap = matplotlib.cm.get_cmap('Spectral')\nspectral_rgb = []\nnorm = matplotlib.colors.Normalize(vmin=0, vmax=255)\nfor i in range(0, 255):\n k = matplotlib.colors.colorConverter.to_rgb(spectral_cmap(norm(i)))\n spectral_rgb.append(k)\nspectral = []\n\nn_entries = 255\nfor k in [x / n_entries for x in range(0, n_entries+1, 1)]:\n C = spectral_rgb[int(np.round(255*k))-1]\n spectral.append([k, 'rgb'+str((C[0], C[1], C[2]))])\n \n# Border colors\nopacity = 0.5\ncat_colors = {'TP' : 'rgba(159, 211, 86, %s)' % opacity,\n 'TN' : 'rgba(13, 181, 230, %s)' % opacity,\n 'FP' : 'rgba(177, 15, 46, %s)' % opacity,\n 'FN' : 'rgba(255, 165, 76, %s)' % opacity}\n\n# Train nearest neighbors\ndef define_distance_function(contr):\n \"\"\"\n Parameters\n ----------\n contr : array like\n shap values of instance\n \n Returns\n -------\n weighted_distance : function\n function that computes the distance weighted by feature contributions\n \"\"\"\n contr = np.abs(np.array(contr))\n def weighted_distance(a, b):\n \"\"\" compute Euclidean distance between a and b, weighted by feature contributions\n Parameters\n ---------\n a : array\n b : array\n\n Returns\n -------\n distance : float\n weighted distance between array a and b\n \"\"\"\n distance = np.sqrt(np.sum(contr * np.square(np.array(a) - np.array(b))))\n return distance\n return weighted_distance\nnn_dict = {}\nfor i in range(len(SHAP_alert)):\n distance_function_i = define_distance_function(SHAP_alert.iloc[i])\n nn_i = NearestNeighbors(n_neighbors = 10, algorithm = 'brute', metric = distance_function_i)\n nn_i.fit(SHAP_base)\n nn_dict[i] = nn_i\nprint('Initialized nearest neighbor.')\n\n\"\"\"\nCOMPUTE SHAP WITH SAMPLES\n\"\"\"\n\ndef retrieve_shap(instance, top):\n # Retrieve SHAP values \n shap_values = SHAP_alert.iloc[instance].to_dict()\n # Retrieve most important features\n df = pd.DataFrame.from_dict(shap_values, orient = 'index').reset_index(level=0)\n df = df.reindex(df[0].abs().sort_values(ascending = False).index)\n features = list(df['index'].iloc[0:top])\n importances = list(df[0].iloc[0:top])\n # Retrieve feature value\n values = [X_alert_decoded.iloc[instance][f] for f in features]\n # Retrieve errors\n alpha = 0.05\n return importances, features, values\n\n\n\"\"\"\nCOMPUTATIONS FOR NEIGHBOR PLOT\n\"\"\"\ndef retrieve_neighbors(i, n_neighbors = 10):\n if n_neighbors == 0:\n distances, neighbors = [None], [None]\n else:\n distances, neighbors = nn_dict[i].kneighbors(SHAP_alert.iloc[[i]], n_neighbors=n_neighbors)\n return distances[0], neighbors[0]\n\ndef compute_mds(i, neighbors, space):\n \"\"\"Compute x and y for multi-dimensional scaling plot.\n \n Parameters\n ----------\n i : int\n index of instance in X_test\n neighbors : np array [n_neighbors]\n array with indices of neighbors in X_train\n space : str, one from ['shap', 'feature']\n distances computed based on shap value space or feature value space\n \"\"\"\n if space == 'shap':\n alert = SHAP_alert\n base = SHAP_base\n elif space == 'feature':\n alert = X_alert\n base = X_base\n else:\n raise ValueError(\"space not in ['shap', 'feature']\")\n mds_values = np.vstack((np.array(alert.iloc[i]), np.array(base.iloc[neighbors])))\n mds = MDS(random_state=1, dissimilarity ='euclidean', metric=True)\n mds.fit(mds_values.astype(np.float64))\n x, y = mds.embedding_.transpose()\n return x, y\n\n\"\"\"\nPLOT FUNCTIONS\n\"\"\"\n\n\"\"\"\nFeature importances\n\"\"\"\ndef generate_options():\n return [{'label' : 'Case %s' % nr, 'value' : nr} for nr in range(1,11)]\n\ndef feature_importance_bar_exact(shap_value, lim):\n if shap_value >= 0:\n color = '#0DB5E6'\n else:\n color = '#ffa54c'\n # Trace definition\n hoverlabel = {\n 'bordercolor' : 'white',\n 'font' : {'size' : 10},\n }\n trace = go.Bar(x = [shap_value] , \n y = [''],\n orientation = 'h',\n hoverinfo = 'x',\n hoverlabel = hoverlabel,\n marker = {'color' : color},\n )\n # Layout definition\n xaxis = {\n 'range' : [-lim, lim],\n 'fixedrange' : True,\n 'showgrid' : False,\n 'zeroline' : False,\n 'showline' : False,\n 'showticklabels' : False,\n 'hoverformat': '.2f'\n }\n yaxis = {\n 'fixedrange' : True,\n 'showgrid' : False,\n 'zeroline' : False,\n 'showline' : False,\n 'showticklabels' : False\n }\n margin=go.layout.Margin(l=0, r=0, t=0, b=0, pad=0)\n layout = go.Layout(yaxis = yaxis,\n xaxis = xaxis,\n margin = margin,\n bargap = 0)\n \n # Config definition\n config={'displayModeBar': False,\n 'showLink' : False}\n \n return dcc.Graph(figure = {'data' : [trace],\n 'layout' : layout}, \n config = config,\n style = {'height' : '18px',\n 'width' : '170px'})\n\ndef feature_importance_table_exact(importances, features, values):\n # Add header\n table = [html.Tr([html.Th(col) for col in ['Contribution', 'Feature', 'Value']])]\n # Add body\n lim = np.abs(importances[0]) + 0.2\n for importance, feature, value in zip(importances, features, values):\n table.append(html.Tr([\n html.Td(feature_importance_bar_exact(importance, lim)),\n html.Td(feature),\n html.Td(value),\n ]))\n \n return html.Table(table,\n style={'font-size': '1.5rem',\n 'marginTop' : '10px'}\n )\n\n\"\"\"\nNeighbors plot\n\"\"\"\ndef scatter_neighbors(x, y, neighbors, view, instance, border_width=4):\n \"\"\"\n Parameters\n ----------\n x : array\n mds x with x[0] alert and x[1:] neighbors\n y : array\n mds y, with y[0] being alert and y[1:] neighbors\n neighbors : array\n array with indexes of neighbors\n view : str, one from ['perf', 'pred']\n which view to plot\n instance : int\n index of the current alert\n border_width : int\n border width \n \n \"\"\"\n global spectral\n global cat_colors\n global meta_base\n global meta_alert\n \n if view == 'perf':\n showscale = False\n colorscale = [[0,'rgba(75, 75, 75, 1)'], [1, 'rgba(75, 75, 75, 1)']]\n color_alert = 'rgba(255, 255, 0, 0.3)'\n showlegend = True\n elif view == 'pred':\n border_width = 0\n showscale = True\n colorscale = spectral\n color_alert = spectral[int(meta_alert.iloc[instance]['score']*len(spectral))-1][1]\n showlegend = False\n else:\n raise ValueError(\"view must be one of ['pred', 'perf']\")\n \n \"\"\"\n PREP\n \"\"\"\n # Retrieve meta information\n meta_neighbors = pd.DataFrame({'x' : x[1:], 'y' : y[1:], \n 'performance' : meta_base['performance'].iloc[neighbors], \n 'score' : meta_base['score'].iloc[neighbors], \n 'index' : neighbors})\n \"\"\"\n ADD TRACES\n \"\"\"\n traces = []\n # Add neighbors\n for perf in ['TP', 'TN', 'FP', 'FN']:\n group = meta_neighbors[meta_neighbors['performance'] == perf]\n scatter = go.Scatter(\n x = group['x'],\n y = group['y'],\n mode = 'markers',\n marker = {'line' : {'width' : border_width, 'color' : cat_colors[perf]},\n 'color' : group['score'],\n 'colorscale' : colorscale,\n 'cmin' : 0,\n 'cmax' : 1,\n 'size' : 10},\n showlegend = showlegend,\n name=perf,\n hoverinfo = 'text',\n hoveron = 'points',\n text = ['%.2f' % i for i in group['score']])\n traces.append(scatter)\n #Add alert\n traces.append(go.Scatter(\n x = [x[0]],\n y = [y[0]],\n mode = 'markers',\n marker = {'line' : {'width' : 3, 'color' : 'rgba(50, 50, 50, 1)'},\n 'size' : 14,\n 'color' : color_alert,\n 'cmin' : 0,\n 'cmax' : 1},\n name = 'Current alert',\n showlegend = True,\n hoverinfo = 'text',\n hoveron = 'points',\n text = 'Current Alert (p=%.2f)' % meta_alert['score'].iloc[instance]))\n # Add dummy colorbar\n traces.append(go.Scatter(\n x=[None],\n y=[None],\n mode='markers',\n marker=dict(\n colorscale=spectral, \n showscale=showscale,\n cmin=0,\n cmax=1,\n colorbar=dict(thickness=5, ticklen=8, outlinewidth=0, title=\"\"\"Model's Confidence\"\"\", tickfont = {'size' : 8}, titlefont={'size' : 10})),\n showlegend = False,\n hoverinfo='none'))\n\n \"\"\"\n Define layout\n \"\"\"\n xaxis = {'fixedrange' : False,\n 'showgrid' : True,\n 'zeroline' : False,\n 'showline' : False,\n 'showticklabels' : False,\n }\n yaxis = {'fixedrange' : False,\n 'showgrid' : True,\n 'zeroline' : False,\n 'showline' : False,\n 'showticklabels' : False\n }\n margin = go.layout.Margin(l=0, r=0, t=0, b=0, pad=0)\n layout = go.Layout(yaxis = yaxis, xaxis = xaxis, margin = margin, height = 400,\n hovermode = 'closest', legend = dict(y=-0.05, orientation='h'),\n paper_bgcolor='rgba(0,0,0,0)', plot_bgcolor='rgba(0,0,0,0)',title='Hoi')\n \n \"\"\"\n Define config\n \"\"\"\n # Config definition\n config={'displayModeBar': False,\n 'showLink' : False}\n \n return dcc.Graph(id='neighbors-scatter',\n figure = {'data' : traces,\n 'layout' : layout},\n config = config,\n #style = {'height' : '18px',\n # 'width' : '170px'}\n )\n\ndef update_performance(fig, instance, view, border_width=4):\n global spectral, meta_alert\n current_width = fig['data'][0]['marker']['line']['width']\n if ((current_width == 0) and (view == 'perf')):\n #alert\n fig['data'][4]['marker']['color'] = 'rgba(255, 255, 0, 0.3)' \n #scale\n fig['data'][4]['showlegend'] = True\n fig['data'][5]['marker']['showscale'] = False\n #neighbors\n for i in range(4):\n fig['data'][i]['marker']['line']['width'] = border_width\n fig['data'][i]['marker']['colorscale'] = [[0,'rgba(75, 75, 75, 1)'], [1, 'rgba(75, 75, 75, 1)']]\n fig['data'][i]['showlegend'] = True\n elif ((current_width != 0) and (view == 'pred')):\n #alert\n fig['data'][4]['marker']['color'] = spectral[int(meta_alert.iloc[instance]['score']*len(spectral))-1][1]\n #scale\n fig['data'][4]['showlegend'] = True\n fig['data'][5]['marker']['showscale'] = True\n #neighbors\n for i in range(4):\n fig['data'][i]['marker']['line']['width'] = 0\n fig['data'][i]['marker']['colorscale'] = spectral\n fig['data'][i]['showlegend'] = False\n return fig\n\n\"\"\"\nSTYLING\n\"\"\"\n\ncolors = {\n 'background': '#f6f6f6',\n 'text-gray' : '#727272'\n}\n\n# DIV STYLES\ncolumnStyle = {'marginLeft': 5,\n 'marginRight' : 5,\n 'backgroundColor': colors['background'],\n 'paddingLeft' : 20,\n 'paddingRight' : 20,\n 'paddingBottom' : 20,\n 'height' : '93vh',\n 'overflow': 'auto'}\n\nmiddleColumnStyle = {'marginLeft': 20,\n 'paddingLeft' : 20,\n 'paddingRight' : 20,\n 'paddingBottom' : 20}\n\nradioStyle = {\n 'margin-right': 10\n}\n\nlabelStyle = {\n 'font-size' : '1.4rem',\n 'color' : colors['text-gray']\n}\n\niconStyle = {'font-size' : '1.5rem',\n 'color' : colors['text-gray']}\n\n\n\"\"\"\nAPPLICATION\n\"\"\"\n\nexternal_stylesheets = [os.getcwd() + '/assets/font-awesome/css/all.css']\napp = dash.Dash(__name__, external_stylesheets=external_stylesheets)\napp.config.supress_callback_exceptions = True\n\napp.layout = html.Div([\n html.Div([\n # LEFT COLUMN: ALERT SELECTION\n html.Div([html.H1('Alerts'),\n dcc.RadioItems(\n id='alertlist',\n inputStyle = radioStyle,\n labelStyle = labelStyle,\n value=1,\n options=generate_options())\n ], \n className=\"two columns\",\n style = columnStyle),\n \n # MIDDLE COLUMN: ALERT EXPLANATION\n html.Div(\n children = [\n # Title\n html.H1('Case ID', id='alert_id'),\n # Prediction Probability Row\n html.Div(className = 'row', \n children = [\n # Title\n html.H2('Prediction'),\n # Subtitle\n html.P(\"\"\"The model's confidence indicates the \n likelihood of the positive class, as estimated by the model. \"\"\"),\n html.H4(id = 'load-probability'),\n html.H4(id = 'prediction-probability')\n ]),\n \n # Number of Samples Row\n html.Div(className = 'row',\n children = [\n # Title\n html.H2('Explanation'),\n # Paragraph\n html.P(\"\"\"The displayed feature contributions indicate how much each feature value contributed to the\n algorithm's prediction.\"\"\"),\n html.Ul([\n html.Li(html.P([\n html.B('Negative contribution: '), \n 'feature value makes it ', html.B('less'), ' likely that the case is fraudulent.']\n )),\n html.Li(html.P([\n html.B('Positive contribution: '), \n 'feature value makes it ', html.B('more'), ' likely that the case is fraudulent.']\n ))\n ])\n ]),\n\n html.Div(id='load-importances'),\n html.Div(id='feature-importance-table')\n ],\n className=\"five columns\",\n id = 'explanation',\n style = middleColumnStyle),\n \n # RIGHT COLUMN: NEIGHBORS\n html.Div(className=\"five columns\",\n style = columnStyle,\n children = [html.H1('Case-Based Performance'),\n html.H2('Most Similar Cases'),\n #html.P('The cases most similar to the alert are retrieved.'),\n html.Div(className='row',\n children = [\n html.H6('Number of Cases'),\n ]),\n html.Div(className = 'row', \n children = [\n html.Div(className=\"eleven columns\",\n children = [dcc.Slider(\n id='neighbors-slider',\n min=10,\n max=100,\n step=5,\n value=30, \n marks={str(n): {'label' : str(n), \n 'style' : {'font-size' : 10}} for n in range(10,110,10)})])\n ]),\n html.Div(className='row',\n children = [html.H6(['View ',\n html.Div([html.I(className=\"fas fa-info-circle\", style=iconStyle),\n html.Span([\"\"\"Display the model's confidence scores or the model's performance.\"\"\"],\n className='tooltiptext')],\n className = \"tooltip\")\n ])\n ], style = {'marginTop' : '3.5rem'}),\n html.Div(className = 'row',\n id='perf-buttons',\n children = [html.Button(\"Confidence\", \n id ='color-button-pred',\n style = {'marginRight' : '0.5rem',\n 'background-color' : '#888', \n 'color' : '#fff'}),\n html.Button('Performance', \n id ='color-button-perf',\n style = {'marginRight' : '0.5rem',\n 'background-color' : 'transparent', \n 'color' : '#555'})]),\n html.Div(className='row',\n children = [html.H6(['Similarity ',\n html.Div([html.I(className=\"fas fa-info-circle\", style=iconStyle),\n html.Span([\"\"\"Display similarity on feature values (= similar attributes) \n or feature contributions (= similar explanation)\"\"\"],\n className='tooltiptext')],\n className = \"tooltip\")\n ])\n ],\n style = {'marginTop' : '2rem' }),\n html.Div(className = 'row',\n id='space-buttons',\n children = [html.Button('Feature Values', \n id ='space-button-val',\n style = {'marginRight' : '0.5rem', \n 'background-color' : '#888', \n 'color' : '#fff'}),\n html.Button('Feature Contributions', \n id ='space-button-contr',\n style = {'marginRight' : '0.5rem', \n 'background-color' : 'transparent', \n 'color' : '#555'})]),\n\n html.Div(className ='row',\n children = [\n html.Div(id='neighbors-plot', style= {'marginTop' : '1.5rem', 'marginBottom' : '1.5rem'})\n ]),\n html.Div(id = 'performance-explanation',\n className = 'row',\n children = [''])\n ]),\n # HIDDEN DIVS WITH INTERMEDIATE VALUES\n html.Div(id='selected-instance', style={'display': 'none'}, children=0),\n html.Div(id='neighbor-dummydiv', style={'display': 'none'}, children=None),\n html.Div(id='color-state', children='perf:0 pred:0 last:pred', style={'display': 'none'}),\n html.Div(id='space-state', children='val:0 contr:0 last:val', style={'display': 'none'}),\n ])\n], style={'paddingTop' : 5})\n\n\"\"\"\n-------------------------\nCALLBACKS\n-------------------------\n\"\"\"\n\n\"\"\"\nSELECT INSTANCE\n\"\"\"\n# Select instance\n@app.callback(Output('selected-instance', 'children'), \n [Input('alertlist', 'value')])\ndef select_instance(value):\n instance = value\n return instance # or, more generally, json.dumps(cleaned_df)\n\n# Update title\n@app.callback(Output('alert_id', 'children'),\n [Input('alertlist', 'value')])\ndef update_title(value):\n global X_alert\n title = 'Case ID: %s' % value\n return title\n\n\"\"\"\nUPDATE PROBABILITY\n\"\"\"\n@app.callback(Output('load-probability', 'children'),\n [Input('alertlist', 'value')])\ndef update_probability(instance):\n if instance:\n return html.Div([html.P('Loading...')], \n id='prediction-probability',\n style = {'marginTop' : '10px'})\n \n@app.callback(Output('prediction-probability', 'children'),\n [Input('alertlist', 'value')])\n\ndef update_probability(instance):\n if instance:\n probability = clf.predict_proba([X_alert.iloc[instance]])[0][1]\n return [\"Model's confidence: %.2f \" % probability,\n html.Div([html.I(className=\"fas fa-exclamation-circle\", style=iconStyle),\n html.Span([html.B(\"\"\"WARNING: \"\"\"),\n \"\"\"the model's confidence might be inaccurate!\"\"\"],\n className='tooltiptext')],\n className = \"tooltip\")]\n \n\"\"\"\nCOMPUTE SHAP VALUE\n\"\"\"\n\n@app.callback(\n Output('feature-importance-table', 'children'),\n [Input('selected-instance', 'children')])\ndef update_importances(instance):\n if instance:\n top = 15\n nr_samples=10\n importances, features, values = retrieve_shap(int(instance), top)\n return feature_importance_table_exact(importances, features, values)\n \n\"\"\"\nRETRIEVE NEIGHBORS\n\"\"\"\n\n\"\"\"\nsave neighbors\n\"\"\"\n@app.callback(\n Output('neighbor-dummydiv', 'children'),\n [Input('neighbors-slider', 'value'),\n Input('selected-instance', 'children')])\ndef update_dummy_div(n_neighbors, instance):\n global meta_base\n distances, neighbors = retrieve_neighbors(instance, n_neighbors)\n return neighbors\n\n\"\"\"\nscatterplot\n\"\"\"\n\n@app.callback(\n Output('color-state', 'children'), \n [Input('color-button-perf', 'n_clicks'),\n Input('color-button-pred', 'n_clicks')],\n [State('color-state', 'children')])\ndef update_state(perf_clicks, pred_clicks, prev_clicks):\n prev_clicks = dict([i.split(':') for i in prev_clicks.split(' ')])\n \n # Replace None by 0\n if perf_clicks is None:\n perf_clicks = 0\n if pred_clicks is None:\n pred_clicks = 0\n # Check value\n if perf_clicks > int(prev_clicks['perf']):\n last_clicked = 'perf'\n elif pred_clicks > int(prev_clicks['pred']):\n last_clicked = 'pred'\n else:\n last_clicked ='pred'\n # Check changed\n prev_clicked = prev_clicks['last']\n if prev_clicked == last_clicked:\n changed = 'no'\n else:\n changed = 'yes'\n # Update current state\n cur_clicks = 'perf:{} pred:{} last:{} changed:{}'.format(perf_clicks, pred_clicks, last_clicked, changed)\n return cur_clicks\n\n@app.callback(\n Output('space-state', 'children'), \n [Input('space-button-val', 'n_clicks'),\n Input('space-button-contr', 'n_clicks')],\n [State('space-state', 'children')])\ndef update_state(val_clicks, contr_clicks, prev_clicks):\n prev_clicks = dict([i.split(':') for i in prev_clicks.split(' ')])\n # Replace None by 0\n if val_clicks is None:\n val_clicks = 0\n if contr_clicks is None:\n contr_clicks = 0\n # Check value\n if val_clicks > int(prev_clicks['val']):\n last_clicked = 'feature'\n elif contr_clicks > int(prev_clicks['contr']):\n last_clicked = 'shap'\n else:\n last_clicked ='feature'\n # Check changed\n prev_clicked = prev_clicks['last']\n if prev_clicked == last_clicked:\n changed = 'no'\n else:\n changed = 'yes'\n cur_clicks = 'val:{} contr:{} last:{} changed:{}'.format(val_clicks, contr_clicks, last_clicked, changed)\n return cur_clicks\n\n@app.callback(\n Output('neighbors-plot', 'children'),\n [Input('neighbor-dummydiv', 'children'),\n Input('selected-instance', 'children'),\n Input('space-state', 'children')],\n [State('color-state', 'children'),\n State('neighbors-plot', 'children')])\ndef update_neighbors(neighbors, instance, prev_clicks_space, prev_clicks_color, current_graph):\n global meta_base\n prev_clicks_space = dict([i.split(':') for i in prev_clicks_space.split(' ')])\n # Check whether it has changed\n if prev_clicks_space['changed'] == 'no':\n graph = current_graph\n else:\n # Determine space\n last_clicked_space = prev_clicks_space['last']\n # compute distances\n x, y = compute_mds(instance, neighbors, space=last_clicked_space)\n # Determine color\n last_clicked_color = dict([i.split(':') for i in prev_clicks_color.split(' ')])['last']\n # create graph\n graph = scatter_neighbors(x, y, neighbors, view=last_clicked_color, instance=instance)\n return graph\n\n@app.callback(\n Output('neighbors-scatter', 'figure'),\n [Input('color-state', 'children')],\n [State('neighbors-scatter', 'figure'), \n State('selected-instance', 'children')])\ndef update_scatter(prev_clicks_color, figure, instance):\n prev_clicks_color = dict([i.split(':') for i in prev_clicks_color.split(' ')])\n if prev_clicks_color['changed'] == 'no':\n figure = figure\n else:\n last_clicked_color = prev_clicks_color['last']\n figure = update_performance(figure, instance, view=last_clicked_color)\n return figure\n\n\n\"\"\"\nupdate buttons\n\"\"\"\n\n@app.callback(\n Output('color-button-perf', 'style'),\n [Input('color-state', 'children')],\n [State('color-button-perf', 'style')])\ndef update_perf_button(prev_clicks_color, current_style):\n prev_clicks_color = dict([i.split(':') for i in prev_clicks_color.split(' ')])\n new_background = current_style['background-color']\n new_color = current_style['color']\n if prev_clicks_color['changed'] == 'yes':\n if prev_clicks_color['last'] == 'perf':\n new_background = '#888'\n new_color = '#fff'\n elif prev_clicks_color['last'] == 'pred':\n new_background = 'transparent'\n new_color = '#555'\n current_style['background-color'] = new_background\n current_style['color'] = new_color\n return current_style\n\n@app.callback(\n Output('color-button-pred', 'style'),\n [Input('color-state', 'children')],\n [State('color-button-pred', 'style')])\ndef update_pred_button(prev_clicks_color, current_style):\n prev_clicks_color = dict([i.split(':') for i in prev_clicks_color.split(' ')])\n new_background = current_style['background-color']\n new_color = current_style['color']\n if prev_clicks_color['changed'] == 'yes':\n if prev_clicks_color['last'] == 'pred':\n new_background = '#888'\n new_color = '#fff'\n elif prev_clicks_color['last'] == 'perf':\n new_background = 'transparent'\n new_color = '#555'\n current_style['background-color'] = new_background\n current_style['color'] = new_color\n return current_style\n\n@app.callback(\n Output('space-button-val', 'style'),\n [Input('space-state', 'children')],\n [State('space-button-val', 'style')])\ndef update_val_button(prev_clicks_space, current_style):\n prev_clicks_space = dict([i.split(':') for i in prev_clicks_space.split(' ')])\n new_background = current_style['background-color']\n new_color = current_style['color']\n if prev_clicks_space['changed'] == 'yes':\n if prev_clicks_space['last'] == 'feature':\n new_background = '#888'\n new_color = '#fff'\n elif prev_clicks_space['last'] == 'shap':\n new_background = 'transparent'\n new_color = '#555'\n current_style['background-color'] = new_background\n current_style['color'] = new_color\n return current_style\n\n@app.callback(\n Output('space-button-contr', 'style'),\n [Input('space-state', 'children')],\n [State('space-button-contr', 'style')])\ndef update_val_button(prev_clicks_space, current_style):\n prev_clicks_space = dict([i.split(':') for i in prev_clicks_space.split(' ')])\n new_background = current_style['background-color']\n new_color = current_style['color']\n if prev_clicks_space['changed'] == 'yes':\n if prev_clicks_space['last'] == 'shap':\n new_background = '#888'\n new_color = '#fff'\n elif prev_clicks_space['last'] == 'feature':\n new_background = 'transparent'\n new_color = '#555'\n current_style['background-color'] = new_background\n current_style['color'] = new_color\n return current_style\n\n\"\"\"\ndisplay performance explanation\n\"\"\"\n@app.callback(\n Output('performance-explanation', 'children'),\n [Input('color-state', 'children')])\ndef update_performance_explanation(prev_clicks_color):\n prev_clicks_color = dict([i.split(':') for i in prev_clicks_color.split(' ')])\n text = ''\n if prev_clicks_color['changed'] == 'yes':\n if prev_clicks_color['last'] == 'perf':\n text = html.Ul([html.Li(html.P([html.B('TP '),\n '(true positive): triggered an alert and was indeed fraudulent.']\n )),\n html.Li(html.P([html.B('FP '), \n '(false postive): triggered alert but was', html.B(' not '), 'fraudulent.']\n )),\n html.Li(html.P([html.B('TN '),\n '(true negative): did not trigger an alert and was indeed non-fraudulent.']\n )),\n html.Li(html.P([html.B('FN '), \n '(false negative): did not trigger an alert but was actually fraudulent.']\n ))\n ])\n elif prev_clicks_color['last'] == 'pred':\n text = ''\n return text\n\nif __name__ == '__main__':\n app.run_server(debug=True, processes=4)\n ",
"id": "376703",
"language": "Python",
"matching_score": 3.052541494369507,
"max_stars_count": 0,
"path": "app.py"
},
{
"content": "import openml\nimport pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.metrics import roc_curve\nimport matplotlib.pyplot as plt\nimport seaborn as sns; sns.set()\n\n\ndef openmlwrapper(data_id=31, random_state=1, n_samples = 2000, verbose=True, scale=True, test_size=0.25):\n \"\"\"\n Wrapper for preprocessing OpenML datasets. Train/test split (75/25) and fill missing values with median of\n training set. \n Optional: scale data through normalization (subtract mean, divide by standard deviation).\n \n Parameters\n ----------\n data_id : int\n openml dataset id\n random_state : int\n random state of the train test split\n n_samples : int\n number of samples from the data that will be returned\n \n Returns\n -------\n data_dict : dict\n Dictionary with data, including: X_train, X_test, y_train, y_test, X_train_decoded (original feature values), \n X_test_decoded (original feature values)\n \"\"\"\n dataset = openml.datasets.get_dataset(data_id)\n X, y, cat, att = dataset.get_data(target = dataset.default_target_attribute, \n return_categorical_indicator=True,\n return_attribute_names=True)\n print('Start preprocessing...')\n # Sample at most n_samples samples\n if len(X) > n_samples:\n prng = np.random.RandomState(seed=1)\n rows = prng.randint(0, high=len(X), size=n_samples)\n X = X[rows, :]\n y = y[rows]\n if verbose:\n print(\"...Sampled %s samples from dataset %s.\" % (n_samples, data_id))\n else:\n if verbose:\n print(\"...Used all %s samples from dataset %s.\" % (len(X), data_id))\n \n # Split data in train and test\n X_train, X_test, y_train, y_test = train_test_split(pd.DataFrame(X, columns=att),\n pd.DataFrame(y, columns=['class']),\n random_state = random_state,\n test_size=test_size)\n # Fill missing values with median of X_train\n X_train = X_train.fillna(X_train.median())\n X_test = X_test.fillna(X_train.median())\n if verbose:\n print('...Filled missing values.')\n \n # Create decoded version with original feature values for visualizations\n X_train_decoded = X_train.copy()\n X_test_decoded = X_test.copy()\n for f in att:\n labels = dataset.retrieve_class_labels(target_name=f)\n if labels != 'NUMERIC':\n labels_dict = {i : l for i,l in zip(range(len(labels)), labels)}\n else:\n labels_dict = {}\n X_test_decoded[f] = X_test_decoded[f].replace(labels_dict)\n X_train_decoded[f] = X_train_decoded[f].replace(labels_dict)\n if verbose:\n print('...Decoded to original feature values.')\n\n # Scale data\n if scale:\n scaler = StandardScaler()\n scaler.fit(X_train)\n X_train = pd.DataFrame(scaler.transform(X_train), columns=list(X_train))\n X_test = pd.DataFrame(scaler.transform(X_test), columns=list(X_test))\n if verbose:\n print('...Scaled data.')\n\n print('Preprocessing done.')\n return {'X_train' : X_train,\n 'X_test' : X_test,\n 'y_train' : y_train,\n 'y_test' : y_test,\n 'X_train_decoded' : X_train_decoded,\n 'X_test_decoded' : X_test_decoded}\n\ndef plot_roc(y, y_score, label, max_fpr, xlim, mln = True):\n \"\"\"\n Plot de ROC curve up to a particular maximum false positive rate.\n \n Parameters\n ----------\n y : array like [n_observations]\n true classes\n y_score : array like [n_observations]\n classification probabilities\n label : string\n dataset name\n max_fpr : numerical\n maximum false positive rate\n xlim : numerical\n limit of plot on x axis\n mln : Boolean\n display FPR per million\n\n Returns\n -------\n fpr : array\n fp rates\n tpr : array \n tp rates\n thresholds : array\n prediction thresholds\n \"\"\"\n ax = plt.axes()\n fpr, tpr, thresholds = roc_curve(y, y_score, drop_intermediate=False)\n plt.plot([0, 1], [0, 1], '--', linewidth=1, color='0.25')\n plt.plot(fpr, tpr, label = 'Classifier')\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title('ROC curve %s data' % label)\n if xlim:\n if mln:\n plt.plot([max_fpr,max_fpr], [0, 1], 'r--', linewidth=1, label = 'FPR $\\leq %.f*10^{-6}$'%(max_fpr*10**6))\n labels = plt.xticks()\n ax.set_xticklabels(['%.0f' %(i*10**6) for i in labels[0]])\n plt.xlabel('False Positive Rate (per 1 mln)')\n else:\n plt.plot([max_fpr, max_fpr], [0, 1], 'r--', linewidth=1, label = 'FPR $\\leq %.2f$'%(max_fpr))\n plt.xlabel('False Positive Rate')\n plt.xlim(-.000001,xlim+.000001)\n plt.legend()\n plt.show()\n return fpr, tpr, thresholds",
"id": "10494811",
"language": "Python",
"matching_score": 2.3038549423217773,
"max_stars_count": 0,
"path": "fpdash/cbr/prep.py"
},
{
"content": "# cluster imports\nfrom sklearn.cluster import AgglomerativeClustering, KMeans, SpectralClustering\nfrom sklearn.mixture import GaussianMixture\n\n# cluster description imports\nfrom sklearn.tree import DecisionTreeClassifier, _tree\nfrom sklearn.utils.class_weight import compute_class_weight\nfrom sklearn.model_selection import GridSearchCV, StratifiedKFold\nfrom scipy.stats import uniform\nfrom sklearn.metrics import roc_auc_score\n\n# prototyping imports\nimport pandas as pd\nimport numpy as np\n\ndef wrapper(alg, n_clusters, instances, clu_params={}):\n \"\"\"\n sklearn cluster wrapper for easy testing.\n \n Parameters\n ----------\n alg : str\n algorithm\n n_clusters : int\n number of clusters/components\n instances : array like size [n_instances, n_features]\n instances that need to be clustered\n \"\"\"\n if alg=='gmm':\n clu = GaussianMixture(n_components=n_clusters, random_state=1, **clu_params)\n if alg=='spec':\n clu = SpectralClustering(n_clusters=n_clusters, random_state=1, **clu_params)\n if alg=='aggl':\n clu = AgglomerativeClustering(n_clusters=n_clusters, affinity='euclidean', **clu_params)\n if alg=='kmeans':\n clu = KMeans(n_clusters=n_clusters, **clu_params)\n labels = clu.fit_predict(instances)\n return clu, labels\n\n\nclass Prototype:\n \"\"\"\n Prototypical explanation.\n \"\"\"\n def __init__(self, label, feature_values):\n \"\"\"\n Initialize prototype.\n Parameters\n ----------\n label : object\n label of the prototype\n feature_values : object of type dict, list or np.ndarray\n feature values of the prototype\n \"\"\"\n self.label = label\n if (type(feature_values) == list) or (type(feature_values) == np.ndarray):\n self.feature_values = {i : v for i, v in zip(range(len(feature_values)), feature_values)}\n elif type(feature_values) == dict:\n self.feature_values = feature_values\n else:\n raise ValueError('Invalid feature values type.')\n return\n \nclass PrototypeSet:\n \"\"\"\n Set of prototypical explanations.\n \"\"\"\n def __init__(self, prototypes = None):\n \"\"\"\n Initialize Prototype set.\n \n Parameters\n ----------\n prototypes : dictionary of Prototype instances\n \"\"\"\n if prototypes is None:\n self.prototypes = {}\n else:\n self.prototypes = {p.label : p for p in prototypes}\n return\n \n def fit(self, X, labels, metric = 'mean'):\n \"\"\"\n Fit prototypes based on a clustering.\n \n Parameters\n ----------\n X : array like [n_instances, n_features]\n actual values of the instances\n labels : array of int of size n_instances\n nearest prototypes\n metric : str\n metric used to compute prototypes\n - 'mean' : average feature values of all instances in cluster\n \"\"\"\n df = pd.DataFrame(X)\n df['label'] = labels\n for label, group in df.groupby('label'):\n if metric == 'mean':\n group = group.drop('label', axis=1)\n values = group.mean().to_dict()\n self.prototypes[label] = Prototype(label, values)\n return\n\ndef prototype_rmse(X, labels, ps):\n \"\"\"\n Compute the RSME of a prototype set. This is the root mean squared error in case \n SHAP explanations are predicted based on the SHAP explanation of the prototypes.\n \n Parameters\n ----------\n X : array like [n_instances, n_features]\n labels : array of int of size n_instances\n nearest prototypes\n ps : PrototypeSet\n set of prototypes which we like to evaluate\n feature_rsme : boolean\n retrieve RMSE on a per-feature basis\n \"\"\"\n df = pd.DataFrame(X)\n df['label'] = labels\n se_total = 0\n for label, group in df.groupby('label'):\n group = group.drop('label', axis=1)\n p = ps.prototypes[label]\n # compute squared error for this prototype\n se = np.sum(np.sum(np.square(group - p.feature_values)))\n # add to total rmse, weighted by group size\n se_total += se\n rmse = np.sqrt(se_total/len(X))\n return rmse\n\ndef prototype_r2(X, labels, ps):\n \"\"\"\n Compute the R2 of a prototype set. This is the amount of variance in the prediction probability\n that is explained by the prototype set.\n \n Parameters\n ----------\n X : array like [n_instances, n_features]\n labels : array of int of size n_instances\n nearest prototypes\n ps : PrototypeSet\n set of prototypes which we like to evaluate\n \"\"\"\n df = pd.DataFrame(X)\n # Compute SS_tot\n y_i = df.sum(axis=1)\n y_mean = y_i.mean()\n SS_tot = np.sum(np.square(y_i - y_mean))\n \n # Compute SS_res\n df['label'] = labels\n SS_res = 0\n for label, group in df.groupby('label'):\n f_i = sum(ps.prototypes[label].feature_values.values())\n group = group.drop('label', axis=1)\n y_i_group = group.sum(axis=1)\n SS_res += np.sum(np.square(y_i_group - f_i))\n \n # Compute R2\n R2 = 1 - (SS_res/SS_tot)\n return R2\n\ndef fit_description_tree(clu, cluster, X, feature_names = None, random_state=1, \n param_grid=None):\n \"\"\"\n For a cluster within a clustering, fit a decision tree with target variable cluster membership.\n\n Parameters\n ----------\n clu : cluster object\n sklearn clustering object including labels_\n cluster : int\n cluster id in clu.labels_\n X : array like\n training instances\n feature_names : array like\n array containing feature names (str); if not passed X must be a pandas dataframe\n random_state : int\n random seed passed to decision tree and grid search\n param_distr : dict\n dictionary with parameter distributions for hyperparameter tuning\n \n Returns\n -------\n dt : DecisionTreeClassifier\n Description tree\n score : float\n AUC of decision tree classifier\n labels_dummy : list\n cluster membership (1 if in cluster, else 0)\n \"\"\"\n \n # feature names\n if feature_names is None:\n feature_names = list(X)\n # set param_grid\n if param_grid is None:\n param_grid = {'max_depth' : list(range(1, 2*len(feature_names))), \n 'class_weight' : ['balanced']}\n \n # retrieve labels\n labels_dummy = [1 if l == cluster else 0 for l in clu.labels_]\n \n # perform grid search\n inner_cv = KFold(n_splits=3, shuffle=True, random_state=i)\n outer_cv = KFold(n_splits=3, shuffle=True, random_state=i)\n \n rscv = GridSearchCV(estimator=DecisionTreeClassifier(random_state=random_state),\n param_grid=param_grid, scoring = 'roc_auc', \n iid=False, n_jobs=-1, cv=StratifiedKFold(3, shuffle=True, random_state=random_state), refit=True)\n rscv.fit(X, labels_dummy)\n dt = rscv.best_estimator_\n \n # evaluate estimator\n y_score = [i[1] for i in dt.predict_proba(X)]\n score = roc_auc_score(y_true=labels_dummy, y_score=y_score)\n return dt, score, labels_dummy\n\ndef get_description(tree, feature_names, labels_dummy = None, probability_threshold = 0.5):\n \"\"\"\n Produce description for decision tree.\n \n Parameters\n -----------\n tree : scikit-learn DecisionTreeClassifier\n decision tree you want to describe\n feature_names : list \n feature names\n labels_dummy : list\n dummy labels necessary to compute class weights when class_weight is 'balanced'\n 1 : part of cluster, 0 : not part of cluster\n probability_threshold : float\n if proportion of instances belonging to the positive class > probability_threshold, the description\n is added to the set of descriptions\n \n Returns\n -------\n descriptions : dict\n dictionary with the descriptions\n \"\"\"\n class_weight = tree.class_weight\n if class_weight == 'balanced':\n class_weight_vec = compute_class_weight(class_weight, [0,1], labels_dummy)\n class_weight = {0 : class_weight_vec[0], 1 : class_weight_vec[1]}\n descriptions = []\n tree_ = tree.tree_\n feature_name = [feature_names[i] if i != _tree.TREE_UNDEFINED else \"undefined!\"\n for i in tree_.feature]\n \n def recurse(node, depth, descr):\n if tree_.feature[node] != _tree.TREE_UNDEFINED: # if internal node\n name = feature_name[node]\n threshold = tree_.threshold[node]\n descr_left = descr.copy()\n descr_left[node] = {'name' : name, 'threshold' : threshold, 'sign' : '<='}\n descr_right = descr.copy()\n descr_right[node] = {'name' : name, 'threshold' : threshold, 'sign' : '>'}\n recurse(tree_.children_left[node], depth + 1, descr_left)\n recurse(tree_.children_right[node], depth + 1, descr_right)\n else: #if leaf node\n value = tree_.value[node][0]\n value_0 = value[0]/class_weight[0] # number of instances not in cluster\n value_1 = value[1]/class_weight[1] # number of instances in cluster\n if value_1/(value_0 + value_1) > probability_threshold: # if leaf node belongs to target cluster:\n descriptions.append((descr, value_0, value_1)) \n recurse(0,1,{})\n return descriptions\n\n\n\n\n\"\"\"\nOLDER STUFF\n\"\"\"\n\ndef prototype_r2_old(X, labels, ps):\n \"\"\"\n Compute the accuracy of a prototype set.\n \n Parameters\n ----------\n X : array like [n_instances, n_features]\n labels : array of int of size n_instances\n nearest prototypes\n ps : PrototypeSet\n set of prototypes which we like to evaluate\n \"\"\"\n df = pd.DataFrame(X)\n SS_tot = np.sum(np.sum(np.square(df - df.mean())))\n df['label'] = labels\n SS_res = 0\n for label, group in df.groupby('label'):\n group = group.drop('label', axis=1)\n p = ps.prototypes[label]\n SS_res += np.sum(np.sum(np.square(group - p.feature_values)))\n R2 = 1 - (SS_res / SS_tot)\n return R2\n\ndef get_description_old(tree, feature_names, labels_dummy = None):\n \"\"\"\n Produce description for decision tree.\n \n Parameters\n -----------\n tree : scikit-learn DecisionTreeClassifier\n decision tree you want to describe\n feature_names : list \n feature names\n labels_dummy : list\n dummy labels necessary to compute class weights when class_weight is 'balanced'\n 1 : part of cluster, 0 : not part of cluster\n \"\"\"\n class_weight = tree.class_weight\n if class_weight == 'balanced':\n class_weight_vec = compute_class_weight(class_weight, [0,1], labels_dummy)\n class_weight = {0 : class_weight_vec[0], 1 : class_weight_vec[1]}\n descriptions = []\n tree_ = tree.tree_\n feature_name = [feature_names[i] if i != _tree.TREE_UNDEFINED else \"undefined!\"\n for i in tree_.feature]\n \n def recurse(node, depth, descr):\n if tree_.feature[node] != _tree.TREE_UNDEFINED: # if internal node\n name = feature_name[node]\n threshold = tree_.threshold[node]\n descr_left = descr.copy()\n descr_left[node] = {'name' : name, 'threshold' : threshold, 'sign' : '<='}\n descr_right = descr.copy()\n descr_right[node] = {'name' : name, 'threshold' : threshold, 'sign' : '>'}\n recurse(tree_.children_left[node], depth + 1, descr_left)\n recurse(tree_.children_right[node], depth + 1, descr_right)\n else: #if leaf node\n value = tree_.value[node][0]\n value_0 = value[0]/class_weight[0] # number of instances not in cluster\n value_1 = value[1]/class_weight[1] # number of instances in cluster\n if value_1 > value_0: # if leaf node belongs to target cluster:\n descriptions.append((descr, value_0, value_1)) \n recurse(0,1,{})\n return descriptions\n\ndef describe_old(clu, dt_params, X, feature_names = None, cluster = None):\n \"\"\"\n For each cluster, fit a decision tree to describe the cluster.\n\n Parameters\n ----------\n clu : cluster object\n sklearn clustering object\n dt_params : dict\n decision tree parameters\n cluster : int\n cluster id of cluster you want to describe if you don't want to describe all clusters\n X : array like\n training instances (either in SHAP space or in feature value space)\n \"\"\"\n # default class_weight\n if 'class_weight' not in dt_params:\n dt_params['class_weight'] = {0: 1, 1: 1} \n descriptions = {}\n \n if feature_names is None:\n feature_names = list(X)\n\n # determine range\n if cluster is None:\n c_range = range(clu.n_clusters)\n else:\n c_range = [cluster]\n\n # compute descriptions\n for i in c_range:\n labels_dummy = [1 if l == i else 0 for l in clu.labels_]\n dt = DecisionTreeClassifier(**dt_params)\n dt.fit(X, labels_dummy)\n descriptions[i] = get_description(dt, feature_names, labels_dummy)\n return descriptions",
"id": "11678922",
"language": "Python",
"matching_score": 2.5231099128723145,
"max_stars_count": 0,
"path": "fpdash/cbr/cluster.py"
},
{
"content": "import pandas as pd\nimport numpy as np\nimport itertools\nfrom numba import njit, jit\n\n\"\"\"\n------------------------------------------\nGeneral SHAP computation helper functions.\n------------------------------------------\n\"\"\"\n\n@njit\ndef find_index(array, item):\n \"\"\"\n Accelerated index finder.\n \"\"\"\n for idx, val in np.ndenumerate(array):\n if val == item:\n return idx[0]\n\ndef computeb1b2(x, w, o, i, pre_idx, order=-1, prng=None):\n \"\"\"\n Compute b1 and b2 for order sample o, instance sample w, and feature index i.\n \n Parameters\n ----------\n w : numpy array\n array of size n with feature values of instance (w)\n o : numpy array \n array of size n with order of features\n i : int\n feature index\n pre_idx : numpy array\n arrangement of feature indices\n \"\"\"\n pos_i = find_index(o, i) # pos_i = np.where(o == i)[0][0]\n idx = pre_idx[pos_i + 1:] # positions succeeding i \n o_idx = o[idx] # features succeeding i\n b1 = x.copy()\n b1[o_idx] = w[o_idx] # fill features succeeding i with w\n b2 = b1.copy()\n b2[i] = w[i] # change x_i to w_i \n return b1, b2\n\n\"\"\"\n-----------------------------------\nExact computation helper functions.\n-----------------------------------\n\"\"\"\n\ndef retrieve_instances(mc, X):\n \"\"\"\n Retrieve all \n \"\"\"\n if mc == 'uniform-cat':\n z = [X[c].unique() for c in X.columns]\n instances = list(itertools.product(*z))\n elif mc == 'training':\n instances = X.as_matrix()\n return instances\n\ndef retrieve_permutations(n):\n permutations = list(itertools.permutations(range(n)))\n return permutations\n\n\"\"\"\n-----------------------------------\nAdaptive Sampling helper functions.\n-----------------------------------\n\"\"\"\ndef update(existingAggregate, newValue):\n \"\"\"\n Update the count, mean, and mean square.\n \n Welford's online algorithm for calculating variance.\n \n existingAggretate : tuple\n (count, mean, M2)\n newValue : float\n f(b1) - f(b2) for newest sample\n \"\"\"\n (count, mean, M2) = existingAggregate\n count += 1 \n delta = newValue - mean\n mean += delta / count\n delta2 = newValue - mean\n M2 += delta * delta2\n sampleVariance = M2/(count - 1)\n return count, mean, M2, sampleVariance\n\n\"\"\"\n-------------\nSHAP classes.\n-------------\n\"\"\"\n\nclass Generator:\n \"\"\"\n Generator for instaces (w)\n \"\"\"\n def __init__(self, X, mc):\n \"\"\"\n Paramaters\n ----------\n X : pandas DataFrame\n Training data for sampling instances (w)\n mc : string\n - 'training' : sample instances (w) from training data\n \"\"\"\n self.mc = mc\n self.feature_generators = {}\n self.columns = X.columns.tolist()\n if mc == 'training':\n self.X = X\n else: \n raise ValueError(\"'%s' is an invalid Monte Carlo sampling strategy.\" % self.mc)\n return\n\n def sample(self, n, seed = 1, sample_type = 'dict', replace = True):\n \"\"\"\n Sample n ranodm instances (w).\n\n Parameters\n ----------\n n : int\n number of samples\n seed : int\n pseudorandom seed\n replacement : Boolean\n sample with replacement (True) or without replacement (False)\n\n Returns\n -------\n samples : numpy array\n two dimensional numpy array of feature * instances\n \"\"\"\n if self.mc == 'training':\n samples = self.X.sample(n = n, random_state = seed, replace = replace)\n return samples\n\nclass Values:\n \"\"\"\n Store SHAP values and samples.\n \"\"\"\n def __init__(self, shap_values, samples = None):\n self.shap_values = shap_values\n self.samples = samples\n return\n\nclass Explainer:\n \"\"\"\n Object that can be called to compute SHAP values. Stores training data, classifier and sampling parameter.\n \"\"\"\n\n def __init__(self, X, mc = 'training', f = None):\n \"\"\"\n Paramaters\n ----------\n X : pandas DataFrame\n Training data for sampling instances (w)\n mc : string\n - 'training' : sample instances (w) from training data\n \"\"\"\n self.X = X\n self.mc = mc\n self.f = f\n return\n\n def standard(self, x, m, f = None, X = None, mc = None, seed = 1, verbose = False, return_samples=False):\n\n \"\"\"\n Naive Monte Carlo approximation of SHAP values.\n \n Parameters\n ----------\n x : numpy array\n numpy array containing all feature values, features must be ordered according to dataframe\n f : object\n model should have function '.predict_proba()' that takes as input an instance and as \n output a probability for the positive class\n m : int\n number of samples for each feature\n X : pandas DataFrame\n training dataset\n mc : string\n Monte Carlo sampling strategy that indicates how random instances are sampled.\n The sampling strategy affects the computation of the conditional expectation.\n 'training' : random instances will be sampled from the training data\n seed : int\n seed used for generating random instances and choosing random orders\n verbose : Boolean\n controls verbosity\n return_samples : Boolean\n returning samples that were used to commpute SHAP values to allow for SHAP-ICE and SHAP-ICC plots.\n \"\"\"\n\n # Retrieve explainer variables\n X, mc, f = self.X, self.mc, self.f\n \n # Initialize vars\n features = np.arange(len(X.columns)).astype(int) # numpy array with feature indexes\n n = len(features)\n chi = Generator(X=X, mc=mc)\n phi = {}\n pre_idx = np.arange(len(features)) \n\n # Sample all permutations (o)\n prng = np.random.RandomState(seed=seed)\n permutations = [prng.permutation(range(n)) for i in range(m*n)]\n \n # Sample all instances (w)\n samples = np.array(chi.sample(n=m*n, seed=seed, sample_type = 'array'))\n \n #TEMP\n temp_results = {}\n \n # Compute all b1 and b2\n b1_all = [0]*(m*n) # initialize list with all b1's\n b2_all = [0]*(m*n) # initialize list with all b2's\n for i, index_n in zip(features, range(n)): # for each feature\n temp_feature_results = []\n for w, o, index_m in zip(samples[index_n*m:(index_n+1)*m], permutations[index_n*m:(index_n+1)*m], range(m)):\n # for each sample index_m, consisting of instance w and order o:\n b1, b2 = computeb1b2(x, w, o, i, pre_idx)\n all_index = index_n*m + index_m\n b1_all[all_index] = b1\n b2_all[all_index] = b2\n # TEMP\n temp_feature_results.append({'o' : tuple(o), 'w' : w, 'b1' : b1, 'b2' : b2, 'v' : w[i]})\n temp_results[i] = pd.DataFrame(temp_feature_results)\n\n # Make predictions for instances b1 and b2\n predictions = np.array(f.predict_proba(b1_all + b2_all))[:, 1]\n if verbose:\n print(\"Average predictions b1/b2: %.5f\" %(np.mean(predictions)))\n\n # Compute Shapley value based on marginal contributions\n for i, j in zip(X.columns, features):\n b1_sum = sum(predictions[(j*m):(j+1)*m])\n b2_sum = sum(predictions[(n*m+j*m):(n*m+(j+1)*m)])\n phi[i] = (b1_sum - b2_sum)/m\n \n # TEMP\n b1_i = predictions[(j*m):(j+1)*m]\n b2_i = predictions[(n*m+j*m):(n*m+(j+1)*m)]\n temp_results[j]['f(b1)'] = b1_i\n temp_results[j]['f(b2)'] = b2_i\n temp_results[j]['c'] = b1_i - b2_i\n if return_samples:\n return Values(phi, samples = temp_results)\n else:\n return Values(phi)",
"id": "9859222",
"language": "Python",
"matching_score": 0.8790044784545898,
"max_stars_count": 0,
"path": "fpdash/shapley/shap.py"
},
{
"content": "from setuptools import setup, find_packages\n\nwith open(\"README.md\", \"r\") as fh:\n readme = fh.read()\n\nsetup(\n name=\"camelbird\",\n version=\"0.0.1\",\n description=\"Fair Machine Learning\",\n url=\"https://github.com/hildeweerts/camelbird\",\n author=\"<NAME>\",\n author_email=\"<EMAIL>\",\n licence=\"BSD 3-Clause License\",\n long_description=readme,\n long_description_content_type=\"text/markdown\",\n packages=find_packages(),\n install_requires=[\"numpy\",\n \"scikit-learn\",\n ],\n python_requires='>=3.6',\n)",
"id": "12017033",
"language": "Python",
"matching_score": 0.0764203891158104,
"max_stars_count": 2,
"path": "setup.py"
}
] | 0.879004 |
mrakhsha | [
{
"content": "import numpy as np\nimport scipy.io\nimport Shared_Exp_Beh as seb\nimport os.path as op\nimport pytest\n\ndata_path = op.join(seb.__path__[0], 'data/')\n\n\ndef test_beh_analysis():\n \"\"\"\n\n :return: Test results raise error\n \"\"\"\n\n # Test if the size of all variables of the experiment is same\n file_directory = data_path\n subject_list = ['behav_Shared_ARSubNum21']\n beh_vars = seb.var_extractor(file_directory, subject_list)\n assert beh_vars[0][\"conf_val\"].shape == beh_vars[0][\"conf_val\"].shape == beh_vars[0][\"get_rew\"].shape == \\\n beh_vars[0][\"rew_val\"].shape == beh_vars[0][\"sub_rt\"].shape == beh_vars[0][\"att_first\"].shape == \\\n beh_vars[0][\"num_tar_att\"].shape\n\n # Tests of stay, winstay, and loseswitch\n cor_vec = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])\n assert seb.Behavior.performance(cor_vec) == float(0)\n\n cor_vec = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1])\n assert seb.Behavior.performance(cor_vec) == float(100)\n\n cor_vec = np.array([1, 0, 1, 0, 1, 0, 1, 0, 1, 0])\n assert seb.Behavior.performance(cor_vec) == float(50)\n\n pre_cor_vec = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])\n cor_vec = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])\n assert seb.Behavior.prob_stay(cor_vec, pre_cor_vec) == float(1)\n\n pre_cor_vec = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1])\n cor_vec = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1])\n assert seb.Behavior.prob_stay(cor_vec, pre_cor_vec) == float(1)\n\n pre_cor_vec = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1])\n cor_vec = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])\n assert seb.Behavior.prob_stay(cor_vec, pre_cor_vec) == float(0)\n\n # when all the trials are correct LoseSwitch should be nan\n # when all the trials are wrong WinStay should be nan\n pre_cor_vec = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1])\n cor_vec = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])\n assert seb.Behavior.prob_winstay(cor_vec, pre_cor_vec) == float(0)\n assert np.isnan(seb.Behavior.prob_loseswitch(cor_vec, pre_cor_vec))\n\n pre_cor_vec = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1])\n cor_vec = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1])\n assert seb.Behavior.prob_winstay(cor_vec, pre_cor_vec) == float(1)\n assert np.isnan(seb.Behavior.prob_loseswitch(cor_vec, pre_cor_vec))\n\n pre_cor_vec = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])\n cor_vec = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])\n assert np.isnan(seb.Behavior.prob_winstay(cor_vec, pre_cor_vec))\n assert seb.Behavior.prob_loseswitch(cor_vec, pre_cor_vec) == float(0)\n\n pre_cor_vec = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])\n cor_vec = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1])\n assert np.isnan(seb.Behavior.prob_winstay(cor_vec, pre_cor_vec))\n assert seb.Behavior.prob_loseswitch(cor_vec, pre_cor_vec) == float(1)\n\n # smoke tests for beh_analysis\n for id_rew in [0, 1, 3]:\n for id_conf in [0, 1, 2]:\n for id_side in [0, 1, 2]:\n for id_att_first in [0, 1, 2]:\n beh = seb.beh_analysis(beh_vars, idx_rew=id_rew, idx_conf=id_conf, idx_side=id_side, idx_att_first=id_att_first)\n assert beh[\"performance\"].shape == beh[\"prob_stay\"].shape == beh[\n \"prob_winstay\"].shape == beh[\"prob_loseswitch\"].shape == beh[\"mean_sub_rt\"].shape\n\n # raise error for not permissible values\n with pytest.raises(ValueError):\n seb.beh_analysis(beh_vars, idx_rew=4, idx_conf=0, idx_side=0, idx_att_first=2)\n with pytest.raises(ValueError):\n seb.beh_analysis(beh_vars, idx_rew=0, idx_conf=4, idx_side=0, idx_att_first=2)\n with pytest.raises(ValueError):\n seb.beh_analysis(beh_vars, idx_rew=0, idx_conf=0, idx_side=4, idx_att_first=2)\n with pytest.raises(ValueError):\n seb.beh_analysis(beh_vars, idx_rew=0, idx_conf=0, idx_side=1, idx_att_first=4)\n with pytest.raises(ValueError):\n seb.beh_analysis(beh_vars, idx_rew=4, idx_conf=4, idx_side=4, idx_att_first=4)\n\n",
"id": "1910622",
"language": "Python",
"matching_score": 4.227602005004883,
"max_stars_count": 1,
"path": "Shared_Exp_Beh/tests/test_Shared_Exp_Beh.py"
},
{
"content": "\"\"\"\nThis code is written for behavioral analysis of the EEG shared experiment\nWritten by <NAME> \n\n\"\"\"\n# import necessary libraries and packages\nimport scipy.io\nimport numpy as np\n\n\n# define a class of functions for behavior analysis\nclass Behavior:\n \"\"\"\n Blueprint for behavior\n\n \"\"\"\n\n @staticmethod\n # calculating performance\n def performance(cor_vec):\n\n mean_perf = (np.nanmean(cor_vec)) * 100\n return mean_perf\n\n @staticmethod\n # calculating probability of stay\n def prob_stay(cor_vec, pre_cor_vec):\n\n idx_stay = np.array(pre_cor_vec == cor_vec)\n prob_stay = np.mean(idx_stay)\n return prob_stay\n\n @staticmethod\n # calculating probability of WinStay\n def prob_winstay(cor_vec, pre_cor_vec):\n\n idx_stay = np.array(pre_cor_vec == cor_vec)\n if np.mean(pre_cor_vec) == 0:\n prob_winstay = np.nan\n else:\n prob_winstay = np.mean(idx_stay & pre_cor_vec) / np.mean(pre_cor_vec)\n return prob_winstay\n\n @staticmethod\n # calculating probability of LoseSwitch\n def prob_loseswitch(cor_vec, pre_cor_vec):\n\n idx_switch = np.array(pre_cor_vec != cor_vec)\n pre_false_vec = ~(pre_cor_vec.astype(bool)) * 1\n if np.mean(pre_false_vec) == 0:\n prob_loseswitch = np.nan\n else:\n prob_loseswitch = np.mean(idx_switch & pre_false_vec) / np.mean(pre_false_vec)\n return prob_loseswitch\n\n\n# define the function which extracts the behavioral variables we need\ndef var_extractor(file_directory, subject_list):\n \"\"\"\n \n :param file_directory: A string for the address of the input data\n :param subject_list: A list of inputs\n :return: a dictionary of all the variables of the experiment and behavior\n \"\"\"\n\n beh_vars = []\n\n # main loop for loading the data\n for subject in subject_list:\n\n # Load .mat file\n beh_data = scipy.io.loadmat(file_directory + subject + '.mat')\n num_tar_att = np.array(beh_data[\"NumTargetAttended\"])\n att_side = np.array(beh_data[\"AttendedSide\"])\n att_side = np.transpose(att_side)\n conf_val = np.array(beh_data[\"ConfidenceValue\"])\n get_rew = np.array(beh_data[\"GetReward\"])\n rew_val = np.array(beh_data[\"RewardVecValue\"])\n att_first = np.array(beh_data[\"AttendedFirst\"])\n sub_rt = np.array(beh_data[\"SubjectRT\"])\n\n # make a dictionary for necessary variables\n tmp_beh_vars = {\"att_side\": att_side, \"conf_val\": conf_val, \"get_rew\": get_rew, \"rew_val\": rew_val,\n \"sub_rt\": sub_rt, \"att_first\": att_first, \"num_tar_att\": num_tar_att}\n\n # append counters data together\n beh_vars.append(tmp_beh_vars)\n\n return beh_vars\n\n\n# define the function which makes the data table of subjects\ndef table_maker(beh_vars):\n \"\"\"\n\n :param beh_vars: list of the data of the subjects\n :return: a dictionary of the data\n \"\"\"\n import pandas as pd\n from scipy import stats\n table_data = pd.DataFrame()\n for subject in range(len(beh_vars)):\n\n tmp_sub = beh_vars[subject]\n num_trial = tmp_sub[\"att_first\"].size\n num_block = tmp_sub[\"att_side\"].size\n att_side = tmp_sub[\"att_side\"]\n att_side = np.repeat(att_side, num_trial/num_block)\n sub_num = np.repeat(subject, num_trial)\n\n # make a dictionary for necessary variables\n tmp_data = {\"sub_num\": sub_num, \"att_side\": att_side, \"conf_val\": tmp_sub[\"conf_val\"].flatten(),\n \"get_rew\": tmp_sub[\"get_rew\"].flatten(), \"rew_val\": tmp_sub[\"rew_val\"].flatten(),\n \"sub_rt\": tmp_sub[\"sub_rt\"].flatten(), \"att_first\": tmp_sub[\"att_first\"].flatten(),\n \"num_tar_att\": tmp_sub[\"num_tar_att\"].flatten(), \"z_rt\": stats.zscore(tmp_sub[\"sub_rt\"].flatten())}\n\n tmp_table_data = pd.DataFrame(data=tmp_data)\n table_data = table_data.append(tmp_table_data, ignore_index=True)\n\n return table_data\n\n\ndef beh_analysis(beh_vars, idx_rew, idx_conf, idx_side, idx_att_first):\n \"\"\"\n\n :param beh_vars: a dictionary of the inputs of the behavioral parameters\n :param idx_rew: int to show which reward value we need\n :param idx_conf: int to show which confidence results we want\n :param idx_side: int to show which side we want\n :param idx_att_first: int shows whether we want the trials in which target appears in attended stream earlier\n :return:\n a dictionary of all behavioral policies\n \"\"\"\n # check if the inputs are legitimate\n if (idx_rew not in [0, 1, 3]) and (idx_side in [0, 1, 2]) and (idx_conf in [0, 1, 2]) and \\\n (idx_att_first in [0, 1, 2]):\n er_var = 'idx_rew'\n er_exist = True\n elif (idx_rew in [0, 1, 3]) and (idx_side not in [0, 1, 2]) and (idx_conf in [0, 1, 2]) and \\\n (idx_att_first in [0, 1, 2]):\n er_var = 'idx_side'\n er_exist = True\n elif (idx_rew in [0, 1, 3]) and (idx_side in [0, 1, 2]) and (idx_conf not in [0, 1, 2]) and \\\n (idx_att_first in [0, 1, 2]):\n er_var = 'idx_conf'\n er_exist = True\n elif (idx_rew in [0, 1, 3]) and (idx_side in [0, 1, 2]) and (idx_conf in [0, 1, 2]) and \\\n (idx_att_first not in [0, 1, 2]):\n er_var = 'idx_att_first'\n er_exist = True\n elif (idx_rew in [0, 1, 3]) and (idx_side in [0, 1, 2]) and (idx_conf in [0, 1, 2]) and \\\n (idx_att_first in [0, 1, 2]):\n er_exist = False\n else:\n er_var = 'Unknown'\n er_exist = True\n\n if er_exist:\n raise ValueError('Invalid value for {}'.format(er_var))\n\n # separate the blocks we need\n if idx_side == 1 or idx_side == 2:\n num_block = int((beh_vars[0][\"att_side\"].shape[0]) / 2)\n else:\n num_block = int(beh_vars[0][\"att_side\"].shape[0])\n\n # initialization of matrices\n performance = np.nan * np.zeros(shape=(len(beh_vars), num_block))\n prob_stay = np.nan * np.zeros(shape=(len(beh_vars), num_block))\n prob_winstay = np.nan * np.zeros(shape=(len(beh_vars), num_block))\n prob_loseswitch = np.nan * np.zeros(shape=(len(beh_vars), num_block))\n mean_sub_rt = np.nan * np.zeros(shape=(len(beh_vars), num_block))\n sub_rt = []\n\n cnt_sub = 0\n for sub_beh in beh_vars:\n\n tmp_beh_data1 = {}\n\n if idx_side == 1 or idx_side == 2:\n\n idx_side_block = np.where(sub_beh[\"att_side\"] == idx_side)[0]\n\n for key in sub_beh.keys():\n tmp_beh_data1[key] = sub_beh[key][idx_side_block, :]\n else:\n tmp_beh_data1 = sub_beh\n\n for block in range(num_block):\n # calculate the average of correct over reward and confidence conditions\n if (idx_rew == 1 or idx_rew == 3) and (idx_conf != 2 and idx_conf != 1) and (\n idx_att_first != 1 and idx_att_first != 0):\n\n idx_sel_bool = tmp_beh_data1[\"rew_val\"][block, :] == idx_rew\n\n elif (idx_rew != 1 and idx_rew != 3) and (idx_conf == 2 or idx_conf == 1) and (\n idx_att_first != 1 and idx_att_first != 0):\n\n idx_sel_bool = tmp_beh_data1[\"conf_val\"][block, :] == idx_conf\n\n elif (idx_rew != 1 and idx_rew != 3) and (idx_conf != 2 and idx_conf != 1) and (\n idx_att_first == 1 or idx_att_first == 0):\n\n idx_sel_bool = tmp_beh_data1[\"att_first\"][block, :] == idx_att_first\n\n elif (idx_rew == 1 or idx_rew == 3) and (idx_conf == 2 or idx_conf == 1) and (\n idx_att_first != 1 and idx_att_first != 0):\n\n idx_sel_bool = (tmp_beh_data1[\"conf_val\"][block, :] == idx_conf) & \\\n (tmp_beh_data1[\"rew_val\"][block, :] == idx_rew)\n\n elif (idx_rew == 1 or idx_rew == 3) and (idx_conf != 2 and idx_conf != 1) and (\n idx_att_first == 1 or idx_att_first == 0):\n\n idx_sel_bool = (tmp_beh_data1[\"rew_val\"][block, :] == idx_rew) & \\\n (tmp_beh_data1[\"att_first\"][block, :] == idx_att_first)\n\n elif (idx_rew != 1 and idx_rew != 3) and (idx_conf == 2 or idx_conf == 1) and (\n idx_att_first == 1 or idx_att_first == 0):\n\n idx_sel_bool = (tmp_beh_data1[\"conf_val\"][block, :] == idx_conf) & \\\n (tmp_beh_data1[\"att_first\"][block, :] == idx_att_first)\n\n elif (idx_rew == 1 or idx_rew == 3) and (idx_conf == 2 or idx_conf == 1) and (\n idx_att_first == 1 or idx_att_first == 0):\n\n idx_sel_bool = (tmp_beh_data1[\"conf_val\"][block, :] == idx_conf) & \\\n (tmp_beh_data1[\"rew_val\"][block, :] == idx_rew) & \\\n (tmp_beh_data1[\"att_first\"][block, :] == idx_att_first)\n else:\n\n idx_sel_bool = np.ones((len(tmp_beh_data1[\"rew_val\"][block, :]), 1), dtype=bool)\n\n # keeping only the trials with one target\n idx_sel_bool = idx_sel_bool.reshape(idx_sel_bool.shape[0], 1)\n tmp_cor_vec = (tmp_beh_data1[\"get_rew\"][block, :])\n tmp_cor_vec = tmp_cor_vec.reshape(tmp_cor_vec.shape[0], 1)\n tmp_num_tar = (tmp_beh_data1[\"num_tar_att\"][block, :])\n tmp_num_tar = tmp_num_tar.reshape(tmp_num_tar.shape[0], 1)\n idx_one_target = tmp_num_tar == 1\n idx_tar = (idx_one_target & idx_sel_bool)\n cor_vec = tmp_cor_vec[idx_tar]\n idx_pre = np.insert(idx_tar[:-1], 0, True)\n # since previous trial could have 2 reward I just make all 2's to be also 1 for stay and winstay\n pre_cor_vec = (np.transpose(tmp_cor_vec[idx_pre]) > 0).astype(int)\n performance[cnt_sub, block] = Behavior.performance(cor_vec)\n prob_stay[cnt_sub, block] = Behavior.prob_stay(cor_vec, pre_cor_vec)\n prob_winstay[cnt_sub, block] = Behavior.prob_winstay(cor_vec, pre_cor_vec)\n prob_loseswitch[cnt_sub, block] = Behavior.prob_loseswitch(cor_vec, pre_cor_vec)\n tmp_rt = tmp_beh_data1[\"sub_rt\"][block, :]\n tmp_rt = tmp_rt.reshape(tmp_rt.shape[0], 1)\n tmp_rt = tmp_rt[idx_tar & tmp_cor_vec > 0]\n tmp_rt = tmp_rt[tmp_rt > 0] # remove the ones which was no answer or negative RT (answering before target)\n\n if any(tmp_rt > 1):\n raise ValueError('RT could not be higher than 1sec')\n sub_rt.append(tmp_rt)\n mean_sub_rt[cnt_sub, block] = np.mean(tmp_rt)\n # add one to the counter of subjects\n cnt_sub += 1\n beh_result = {\"performance\": performance, \"prob_stay\": prob_stay, \"prob_winstay\": prob_winstay,\n \"prob_loseswitch\": prob_loseswitch, \"sub_rt\": sub_rt, \"mean_sub_rt\": mean_sub_rt}\n\n return beh_result\n\n\n",
"id": "8216713",
"language": "Python",
"matching_score": 2.389328956604004,
"max_stars_count": 1,
"path": "Shared_Exp_Beh/Shared_Exp_Beh.py"
},
{
"content": "# # idx_side = 0 means all the trials\n# # idx_side = 1 means the trials of side 1\n# # idx_side = 2 means the trials of side 2\n#\n# # idx_rew = 0 means all the trials\n# # idx_rew = 3 means the trials with reward 3\n# # idx_rew = 1 means the trials with reward 1\n#\n# # idx_conf = 0 means all the trials\n# # idx_conf = 2 means the trials with confidence 2\n# # idx_conf = 1 means the trials with confidence 1\n#\n# # idx_att_first = 2 means all trials\n# # idx_att_first = 1 means trials that target appeared earlier in the attended stream\n# # idx_att_first = 0 means trials that target appeared later in the attended stream\n#\n# beh0002 = seb.beh_analysis(beh_vars, idx_rew=0, idx_conf=0, idx_side=0, idx_att_first=2)\n# beh0012 = seb.beh_analysis(beh_vars, idx_rew=0, idx_conf=0, idx_side=1, idx_att_first=2)\n# beh0022 = seb.beh_analysis(beh_vars, idx_rew=0, idx_conf=0, idx_side=2, idx_att_first=2)\n# beh0001 = seb.beh_analysis(beh_vars, idx_rew=0, idx_conf=0, idx_side=0, idx_att_first=1)\n# beh0000 = seb.beh_analysis(beh_vars, idx_rew=0, idx_conf=0, idx_side=0, idx_att_first=0)\n# beh3002 = seb.beh_analysis(beh_vars, idx_rew=3, idx_conf=0, idx_side=0, idx_att_first=2)\n# beh1002 = seb.beh_analysis(beh_vars, idx_rew=1, idx_conf=0, idx_side=0, idx_att_first=2)\n# beh0202 = seb.beh_analysis(beh_vars, idx_rew=0, idx_conf=2, idx_side=0, idx_att_first=2)\n# beh0102 = seb.beh_analysis(beh_vars, idx_rew=0, idx_conf=1, idx_side=0, idx_att_first=2)\n#\n# beh_all = (beh0002, beh0012, beh0022, beh0001, beh0000, beh3002, beh1002, beh0202, beh0102)\n\nfrom scipy import stats\nimport numpy as np\n# import matplotlib.pyplot as plt\n# Plot mean of each subject in two cases of attend first and attend second (if there are two different type of people)\n# fig = plt.figure(0)\n# x = np.arange(24)\n# y = np.nanmean(beh0001[\"performance\"], 1)\n# yerr = np.nanstd(beh0001[\"performance\"], 1) / np.sqrt(beh0001[\"performance\"].shape[1])\n# plt.errorbar(x, y, yerr=yerr)\n#\n# y = np.nanmean(beh0000[\"performance\"], 1)\n# yerr = np.nanstd(beh0000[\"performance\"], 1) / np.sqrt(beh0000[\"performance\"].shape[1])\n# plt.errorbar(x, y, yerr=yerr)\n#\n# # check if the difference between the performance when target comes earlier or later in attended stream is different\n# print(stats.ttest_rel(beh0001[\"performance\"].flatten(), beh0000[\"performance\"].flatten()))\n#\n# # check the difference between the performance high rew vs low rew\n# print(stats.ttest_rel(beh3002[\"performance\"].flatten(), beh1002[\"performance\"].flatten()))\n#\n# # check the difference between the performance high conf vs low conf (it is significant)\n# id_valid = np.logical_and(beh0102[\"performance\"].flatten()>0, beh0202[\"performance\"].flatten()>0)\n# print(stats.ttest_rel(beh0202[\"performance\"].flatten()[id_valid], beh0102[\"performance\"].flatten()[id_valid]))\n#\n# # check if the difference between the performance for different sides (side bias)\n# print(stats.ttest_rel(beh0022[\"performance\"].flatten(), beh0012[\"performance\"].flatten()))\n",
"id": "12743283",
"language": "Python",
"matching_score": 2.3924713134765625,
"max_stars_count": 1,
"path": "Shared_Exp_Beh/scripts/scratch.py"
},
{
"content": "\nimport Shared_Exp_Beh as seb\nimport os.path as op\nimport numpy as np\nfrom scipy import stats\nimport matplotlib.pyplot as plt\n\ndata_path = op.join(seb.__path__[0], 'data/')\nfile_directory = data_path\nfig_directory = op.join(seb.__path__[0], 'figures/')\n\nsubject_list = ['behav_Shared_ARSubNum21', 'behav_Shared_ESSubNum24', 'behav_Shared_HASubNum20',\n 'behav_Shared_JHSubNum29',\n 'behav_Shared_JSSubNum25', 'behav_Shared_PDSubNum28', 'behav_Shared_SPSubNum27',\n 'behav_Shared_STSubNum26',\n 'behav_Shared_TLSubNum22', 'behav_Shared_TWSubNum30', 'behav_Shared_TZSubNum23',\n 'behav_Shared_AHSubNum12',\n 'behav_Shared_ASSubNum18', 'behav_Shared_BJSSubNum14',\n 'behav_Shared_BSSubNum15',\n 'behav_Shared_JEVSubNum11', 'behav_Shared_JGSubNum19', 'behav_Shared_JSSubNum16',\n 'behav_Shared_MHSubNum17',\n 'behav_Shared_OKSubNum13']\n\nbeh_vars = seb.var_extractor(file_directory, subject_list)\n\n\n# idx_side = 0 means all the trials\n# idx_side = 1 means the trials of side 1\n# idx_side = 2 means the trials of side 2\n\n# idx_rew = 0 means all the trials\n# idx_rew = 3 means the trials with reward 3\n# idx_rew = 1 means the trials with reward 1\n\n# idx_conf = 0 means all the trials\n# idx_conf = 2 means the trials with confidence 2\n# idx_conf = 1 means the trials with confidence 1\n\n# idx_att_first = 2 means all trials\n# idx_att_first = 1 means trials that target appeared earlier in the attended stream\n# idx_att_first = 0 means trials that target appeared later in the attended stream\n\n\n# looking only different rewards\nmean_perf = []\nsem_perf = []\nmean_rt = []\nsem_rt = []\n\nfor id_rew in [0, 1, 3]:\n\n beh = seb.beh_analysis(beh_vars, idx_rew=id_rew, idx_conf=0, idx_side=0, idx_att_first=2)\n avgperf = np.nanmean(beh[\"performance\"])\n semperf = stats.sem(beh[\"performance\"], axis=None, ddof=0, nan_policy='omit')\n avgrt = np.nanmean(beh[\"mean_sub_rt\"])\n semrt = stats.sem(beh[\"mean_sub_rt\"], axis=None, ddof=0, nan_policy='omit')\n mean_perf.append(avgperf)\n sem_perf.append(semperf)\n mean_rt.append(avgrt)\n sem_rt.append(semrt)\n\n# plotting all the performances in different reward conditions\nfig = plt.figure(1)\nplt.errorbar(np.arange(3), mean_perf, yerr=sem_perf)\nax = fig.gca()\n# Hide the right and top spines\nax.spines['right'].set_visible(False)\nax.spines['top'].set_visible(False)\nax.set_ylabel('Accuracy', fontsize=20)\nax.set_xticks(range(3))\nax.set_xticklabels(('All', 'LowRew', 'HighRew'))\nfor axis in ['bottom', 'left']:\n ax.spines[axis].set_linewidth(3)\nax.tick_params(width=3)\nax.set_ylim([45, 85])\nfig.tight_layout()\nplt.yticks(np.arange(45, 85, 10))\nplt.tick_params(labelsize=15)\nplt.show()\nfig.savefig(fig_directory+\"AccuracyRew.pdf\")\n\n# plotting all the rt in different reward conditions\nfig = plt.figure(2)\nplt.errorbar(np.arange(3), mean_rt, yerr=sem_rt)\nax = fig.gca()\n# Hide the right and top spines\nax.spines['right'].set_visible(False)\nax.spines['top'].set_visible(False)\nax.set_ylabel('z(RT)', fontsize=20)\nax.set_xticks(range(3))\nax.set_xticklabels(('All', 'LowRew', 'HighRew'))\nfor axis in ['bottom', 'left']:\n ax.spines[axis].set_linewidth(3)\nax.tick_params(width=3)\nax.set_ylim([0.52, 0.57])\nfig.tight_layout()\nplt.yticks(np.arange(0.52, 0.57, 0.01))\nplt.tick_params(labelsize=15)\nplt.show()\nfig.savefig(fig_directory+\"RTRew.pdf\")\n\n# looking only different confidences\nmean_perf = []\nsem_perf = []\nmean_rt = []\nsem_rt = []\n\nfor idx_conf in [0, 1, 2]:\n\n beh = seb.beh_analysis(beh_vars, idx_rew=0, idx_conf=idx_conf, idx_side=0, idx_att_first=2)\n avgperf = np.nanmean(beh[\"performance\"])\n semperf = stats.sem(beh[\"performance\"], axis=None, ddof=0, nan_policy='omit')\n avgrt = np.nanmean(beh[\"mean_sub_rt\"])\n semrt = stats.sem(beh[\"mean_sub_rt\"], axis=None, ddof=0, nan_policy='omit')\n mean_perf.append(avgperf)\n sem_perf.append(semperf)\n mean_rt.append(avgrt)\n sem_rt.append(semrt)\n\n# plotting all the performances in different confidence conditions\nfig = plt.figure(1)\nplt.errorbar(np.arange(3), mean_perf, yerr=sem_perf)\nax = fig.gca()\n# Hide the right and top spines\nax.spines['right'].set_visible(False)\nax.spines['top'].set_visible(False)\nax.set_ylabel('Accuracy', fontsize=20)\nax.set_xticks(range(3))\nax.set_xticklabels(('All', 'LowConf', 'HighConf'))\nfor axis in ['bottom', 'left']:\n ax.spines[axis].set_linewidth(3)\nax.tick_params(width=3)\nax.set_ylim([45, 85])\nfig.tight_layout()\nplt.yticks(np.arange(45, 85, 10))\nplt.tick_params(labelsize=15)\nplt.show()\nfig.savefig(fig_directory+\"AccuracyConf.pdf\")\n\n# plotting all the rt in different confidence conditions\nfig = plt.figure(2)\nplt.errorbar(np.arange(3), mean_rt, yerr=sem_rt)\nax = fig.gca()\n# Hide the right and top spines\nax.spines['right'].set_visible(False)\nax.spines['top'].set_visible(False)\nax.set_ylabel('z(RT)', fontsize=20)\nax.set_xticks(range(3))\nax.set_xticklabels(('All', 'LowConf', 'HighConf'))\nfor axis in ['bottom', 'left']:\n ax.spines[axis].set_linewidth(3)\nax.tick_params(width=3)\nax.set_ylim([0.52, 0.57])\nfig.tight_layout()\nplt.yticks(np.arange(0.52, 0.57, 0.01))\nplt.tick_params(labelsize=15)\nplt.show()\nfig.savefig(fig_directory+\"RTConf.pdf\")\n\n\n# looking only different side of attention\nmean_perf = []\nsem_perf = []\nmean_rt = []\nsem_rt = []\n\nfor idx_side in [0, 1, 2]:\n\n beh = seb.beh_analysis(beh_vars, idx_rew=0, idx_conf=0, idx_side=idx_side, idx_att_first=2)\n avgperf = np.nanmean(beh[\"performance\"])\n semperf = stats.sem(beh[\"performance\"], axis=None, ddof=0, nan_policy='omit')\n avgrt = np.nanmean(beh[\"mean_sub_rt\"])\n semrt = stats.sem(beh[\"mean_sub_rt\"], axis=None, ddof=0, nan_policy='omit')\n mean_perf.append(avgperf)\n sem_perf.append(semperf)\n mean_rt.append(avgrt)\n sem_rt.append(semrt)\n\n# plotting all the performances in different confidence conditions\nfig = plt.figure(1)\nplt.errorbar(np.arange(3), mean_perf, yerr=sem_perf)\nax = fig.gca()\n# Hide the right and top spines\nax.spines['right'].set_visible(False)\nax.spines['top'].set_visible(False)\nax.set_ylabel('Accuracy', fontsize=20)\nax.set_xticks(range(3))\nax.set_xticklabels(('All', 'Left', 'Right'))\nfor axis in ['bottom', 'left']:\n ax.spines[axis].set_linewidth(3)\nax.tick_params(width=3)\nax.set_ylim([74, 81])\nfig.tight_layout()\nplt.yticks(np.arange(74, 81, 1))\nplt.tick_params(labelsize=15)\nplt.show()\nfig.savefig(fig_directory+\"AccuracySide.pdf\")\n\n# plotting all the rt in different confidence conditions\nfig = plt.figure(2)\nplt.errorbar(np.arange(3), mean_rt, yerr=sem_rt)\nax = fig.gca()\n# Hide the right and top spines\nax.spines['right'].set_visible(False)\nax.spines['top'].set_visible(False)\nax.set_ylabel('z(RT)', fontsize=20)\nax.set_xticks(range(3))\nax.set_xticklabels(('All', 'Left', 'Right'))\nfor axis in ['bottom', 'left']:\n ax.spines[axis].set_linewidth(3)\nax.tick_params(width=3)\nax.set_ylim([0.52, 0.57])\nfig.tight_layout()\nplt.yticks(np.arange(0.52, 0.57, 0.01))\nplt.tick_params(labelsize=15)\nplt.show()\nfig.savefig(fig_directory+\"RTSide.pdf\")\n\n\n# looking only different order of attention\nmean_perf = []\nsem_perf = []\nmean_rt = []\nsem_rt = []\n\nfor idx_att_first in [2, 1, 0]:\n\n beh = seb.beh_analysis(beh_vars, idx_rew=0, idx_conf=0, idx_side=0, idx_att_first=idx_att_first)\n avgperf = np.nanmean(beh[\"performance\"])\n semperf = stats.sem(beh[\"performance\"], axis=None, ddof=0, nan_policy='omit')\n avgrt = np.nanmean(beh[\"mean_sub_rt\"])\n semrt = stats.sem(beh[\"mean_sub_rt\"], axis=None, ddof=0, nan_policy='omit')\n mean_perf.append(avgperf)\n sem_perf.append(semperf)\n mean_rt.append(avgrt)\n sem_rt.append(semrt)\n\n# plotting all the performances in different confidence conditions\nfig = plt.figure(1)\nplt.errorbar(np.arange(3), mean_perf, yerr=sem_perf)\nax = fig.gca()\n# Hide the right and top spines\nax.spines['right'].set_visible(False)\nax.spines['top'].set_visible(False)\nax.set_ylabel('Accuracy', fontsize=20)\nax.set_xticks(range(3))\nax.set_xticklabels(('All', 'AttFirst', 'AttSecond'))\nfor axis in ['bottom', 'left']:\n ax.spines[axis].set_linewidth(3)\nax.tick_params(width=3)\nax.set_ylim([74, 81])\nfig.tight_layout()\nplt.yticks(np.arange(74, 81, 1))\nplt.tick_params(labelsize=15)\nplt.show()\nfig.savefig(fig_directory+\"AccuracyOrder.pdf\")\n\n# plotting all the rt in different confidence conditions\nfig = plt.figure(2)\nplt.errorbar(np.arange(3), mean_rt, yerr=sem_rt)\nax = fig.gca()\n# Hide the right and top spines\nax.spines['right'].set_visible(False)\nax.spines['top'].set_visible(False)\nax.set_ylabel('z(RT)', fontsize=20)\nax.set_xticks(range(3))\nax.set_xticklabels(('All', 'AttFirst', 'AttSecond'))\nfor axis in ['bottom', 'left']:\n ax.spines[axis].set_linewidth(3)\nax.tick_params(width=3)\nax.set_ylim([0.52, 0.57])\nfig.tight_layout()\nplt.yticks(np.arange(0.52, 0.57, 0.01))\nplt.tick_params(labelsize=15)\nplt.show()\nfig.savefig(fig_directory+\"RTOrder.pdf\")\n",
"id": "2654191",
"language": "Python",
"matching_score": 6.369410514831543,
"max_stars_count": 1,
"path": "Shared_Exp_Beh/scripts/Plot_Beh.py"
},
{
"content": "import Shared_Exp_Beh as seb\nimport os.path as op\nimport statsmodels.formula.api as smf\n\ndata_path = op.join(seb.__path__[0], 'data/')\n\nfile_directory = data_path\n# list of subjects (To do: Change this to argument)\nsubject_list = ['behav_Shared_ARSubNum21', 'behav_Shared_ESSubNum24', 'behav_Shared_HASubNum20',\n 'behav_Shared_JHSubNum29',\n 'behav_Shared_JSSubNum25', 'behav_Shared_PDSubNum28', 'behav_Shared_SPSubNum27',\n 'behav_Shared_STSubNum26',\n 'behav_Shared_TLSubNum22', 'behav_Shared_TWSubNum30', 'behav_Shared_TZSubNum23',\n 'behav_Shared_AHSubNum12',\n 'behav_Shared_ASSubNum18', 'behav_Shared_BJSSubNum14',\n 'behav_Shared_BSSubNum15',\n 'behav_Shared_JEVSubNum11', 'behav_Shared_JGSubNum19', 'behav_Shared_JSSubNum16',\n 'behav_Shared_MHSubNum17',\n 'behav_Shared_OKSubNum13']\n\nbeh_vars = seb.var_extractor(file_directory, subject_list)\n# making a table from all the trials of all subjects\ntable_data = seb.table_maker(beh_vars)\ntable_data = table_data[table_data[\"num_tar_att\"] == 1]\n\n# regression for accuracy\nres1 = smf.ols(formula='get_rew ~ att_first * att_side + conf_val * rew_val ', data=table_data)\nres = res1.fit()\nprint(res.summary())\n\n# regression for reaction time only on correct trials\ntable_data_cor = table_data[table_data[\"get_rew\"] == 1]\nres1 = smf.ols(formula='z_rt ~ att_first + att_side + conf_val * rew_val ', data=table_data_cor)\nres = res1.fit()\nprint(res.summary())",
"id": "5278892",
"language": "Python",
"matching_score": 1.9613232612609863,
"max_stars_count": 1,
"path": "Shared_Exp_Beh/scripts/Regress_Beh.py"
}
] | 2.392471 |
Mounika-dev | [
{
"content": "from django.test import TestCase\nfrom django.contrib.auth import get_user_model\nfrom core import models\n\n\ndef sample_user(email='<EMAIL>', password='<PASSWORD>'):\n \"\"\"to create a sample user for testing\"\"\"\n return get_user_model().objects.create_user(email, password)\n\n\nclass ModelTests(TestCase):\n\n def test_create_user_with_email_successful(self):\n \"\"\"test if creating a new user with an email is successful\"\"\"\n email = '<EMAIL>'\n password = '<PASSWORD>'\n user = get_user_model().objects.create_user(email, password)\n self.assertEqual(user.email, email)\n self.assertTrue(user.check_password(password))\n\n def test_new_user_email_normalized(self):\n \"\"\"Test if the new user's email id is normalized\"\"\"\n email = '<EMAIL>'\n user = get_user_model().objects.create_user(email, 'test562')\n\n self.assertEqual(user.email, email.lower())\n\n def test_new_user_email_validity(self):\n \"\"\"test if the new user has an email\"\"\"\n with self.assertRaises(ValueError):\n get_user_model().objects.create_user(None, 'testpw')\n\n def test_create_new_superuser(self):\n \"\"\" test creating a new superuser\"\"\"\n user = get_user_model().objects.create_superuser('<EMAIL>', 'pas')\n self.assertTrue(user.is_superuser)\n self.assertTrue(user.is_staff)\n\n def test_tag_str(self):\n \"\"\"test if tag is in string representation\"\"\"\n tag = models.Tag.objects.create(\n user=sample_user(),\n name='vegan'\n )\n\n self.assertEqual(str(tag), tag.name)\n",
"id": "141323",
"language": "Python",
"matching_score": 0,
"max_stars_count": 0,
"path": "app/core/tests/test_models.py"
}
] | 0 |
jamelsingh | [
{
"content": "# PLEASE STOP!\n# DO NOT EDIT THIS FILE OR DELETE THIS FILE\n# Create a new config.py file in same directory and import, then extend this class.\n\nimport os\n\nfrom telethon.tl.types import ChatBannedRights\n\n\nclass Config(object):\n LOGGER = True\n\n # MUST NEEDED VARS\n # set this value with your name\n ALIVE_NAME = os.environ.get(\"ALIVE_NAME\", None)\n # Get the values for following 2 from my.telegram.org\n APP_ID = int(os.environ.get(\"APP_ID\", 6))\n API_HASH = os.environ.get(\"API_HASH\") or None\n # Get this value by running python3 stringsetup.py or https://repl.it/@ImSaravanakrish/Tamilbot#main.py\n STRING_SESSION = os.environ.get(\"STRING_SESSION\", None)\n # Telegram BOT Token and bot username from @BotFather\n TG_BOT_TOKEN = os.environ.get(\"TG_BOT_TOKEN\") or os.environ.get(\n \"TG_BOT_TOKEN_BF_HER\", None\n )\n TG_BOT_USERNAME = os.environ.get(\"TG_BOT_USERNAME\") or os.environ.get(\n \"TG_BOT_USER_NAME_BF_HER\", None\n )\n # get this value from http://www.timezoneconverter.com/cgi-bin/findzone.tzc\n TZ = os.environ.get(\"TZ\", \"Asia/Kolkata\")\n # set this with required cat repo link\n UPSTREAM_REPO = os.environ.get(\n \"UPSTREAM_REPO\", \"https://github.com/ivetri/Tamilbot.git\"\n )\n\n # BASIC and MAIN CONFIG VARS\n # Set this value with group id of private group(can be found this value by .id)\n PRIVATE_GROUP_BOT_ID = int(os.environ.get(\"PRIVATE_GROUP_BOT_API_ID\") or 0)\n # Set this value same as PRIVATE_GROUP_BOT_API_ID if you need pmgaurd\n PRIVATE_GROUP_ID = int(os.environ.get(\"PRIVATE_GROUP_ID\") or 0)\n # set this value with channel id of private channel use full for .frwd cmd\n PRIVATE_CHANNEL_BOT_API_ID = int(os.environ.get(\"PRIVATE_CHANNEL_BOT_API_ID\") or 0)\n # for heroku plugin you can get this value from https://dashboard.heroku.com/account\n HEROKU_API_KEY = os.environ.get(\"HEROKU_API_KEY\", None)\n # set this with same app name you given for heroku\n HEROKU_APP_NAME = os.environ.get(\"HEROKU_APP_NAME\", None)\n # Owner id to show profile link of given id as owner\n OWNER_ID = int(os.environ.get(\"OWNER_ID\") or 0)\n # Maximum no of pms should be sent before he get block will work only if you set PRIVATE_GROUP_ID\n MAX_FLOOD_IN_PMS = int(os.environ.get(\"MAX_FLOOD_IN_PMS\", 5))\n # remove background api get it from revome.bg \n REM_BG_API_KEY = os.environ.get(\"REM_BG_API_KEY\", None)\n # set this with group id so it keeps notifying about your tagged messages or pms\n PM_LOGGER_GROUP_ID = int(\n os.environ.get(\"PM_LOGGER_GROUP_ID\")\n or os.environ.get(\"PM_LOGGR_BOT_API_ID\")\n or 0\n )\n\n # Custom vars for userbot\n # set this will channel id of your custom plugins\n # for custom thumb image set this with your required thumb telegraoh link\n THUMB_IMAGE = os.environ.get(\n \"THUMB_IMAGE\", \"https://telegra.ph/file/2790938cacb9aa80d478c.jpg\"\n )\n # specify NO_LOAD with plugin names for not loading in userbot\n NO_LOAD = [x for x in os.environ.get(\"NO_LOAD\", \"\").split()]\n # For custom alive pic\n ALIVE_PIC = os.environ.get(\"ALIVE_PIC\", None)\n # for Custom pmpermit pic\n PMPERMIT_PIC = os.environ.get(\"PMPERMIT_PIC\", None)\n # for custom pic for .digitalpfp\n DEFAULT_NAME = os.environ.get(\"DEFAULT_NAME\", None)\n # forcustomizing pmpermit text\n CUSTOM_PMPERMIT_TEXT = os.environ.get(\"CUSTOM_PMPERMIT_TEXT\", None)\n # number of rows of buttons to be displayed in .help command\n # emoji to be displayed in .help\n EMOJI_TO_DISPLAY_IN_HELP = os.environ.get(\"EMOJI_TO_DISPLAY_IN_HELP\", \" \")\n # specify command handler that should be used for the plugins\n # this should be a valid \"regex\" pattern\n COMMAND_HAND_LER = os.environ.get(\"COMMAND_HAND_LER\", r\"\\.\")\n # set this with required folder path to act as download folder\n TMP_DOWNLOAD_DIRECTORY = os.environ.get(\"TMP_DOWNLOAD_DIRECTORY\", \"./downloads\")\n # set this with required folder path to act as temparary folder\n TEMP_DIR = os.environ.get(\"TEMP_DIR\", \"./temp/\")\n # For custom stickerpack names\n\n # DO NOT EDIT BELOW THIS LINE IF YOU DO NOT KNOW WHAT YOU ARE DOING\n # TG API limit. A message can have maximum 4096 characters!\n MAX_MESSAGE_SIZE_LIMIT = 4095\n # specify LOAD and NO_LOAD\n LOAD = []\n # warn mode for anti flood\n ANTI_FLOOD_WARN_MODE = ChatBannedRights(\n until_date=None, view_messages=None, send_messages=True\n )\n\n # time.py\n COUNTRY = str(os.environ.get(\"COUNTRY\", \"\"))\n TZ_NUMBER = int(os.environ.get(\"TZ_NUMBER\", 1))\n # For updater plugin\n UPSTREAM_REPO_BRANCH = os.environ.get(\"UPSTREAM_REPO_BRANCH\", \"master\")\n\n\nclass Production(Config):\n LOGGER = False\n\n\nclass Development(Config):\n LOGGER = True\n",
"id": "6086545",
"language": "Python",
"matching_score": 1.3601572513580322,
"max_stars_count": 25,
"path": "sample_config.py"
},
{
"content": "\"\"\"\n✘ Commands Available -\n• `{i}delchat`\n\tDelete the group this cmd is used in.\n• `{i}getlink`\n Get link of group this cmd is used in.\n• `{i}create (b|g|c) <group_name>`\n Create group woth a specific name.\n b - megagroup/supergroup\n g - small group\n c - channel\n\"\"\"\n\n\nfrom telethon.errors import ChatAdminRequiredError as no_admin\nfrom telethon.tl import functions\n\nfrom . import *\n\n\n@ultroid_cmd(\n pattern=\"delchat$\",\n groups_only=True,\n)\nasync def _(e):\n xx = await eor(e, \"`Processing...`\")\n try:\n await e.client(functions.channels.DeleteChannelRequest(e.chat_id))\n except TypeError:\n return await eod(xx, \"`Cant delete this chat`\", time=10)\n except no_admin:\n return await eod(xx, \"`I m not an admin`\", time=10)\n await e.client.send_message(Var.LOG_CHANNEL, f\"#Deleted\\nDeleted {e.chat_id}\")\n\n\n@ultroid_cmd(\n pattern=\"getlink$\",\n groups_only=True,\n)\nasync def _(e):\n xx = await eor(e, \"`Processing...`\")\n try:\n r = await e.client(\n functions.messages.ExportChatInviteRequest(e.chat_id),\n )\n except no_admin:\n return await eod(xx, \"`I m not an admin`\", time=10)\n await eod(xx, f\"Link:- {r.link}\")\n\n\n@ultroid_cmd(\n pattern=\"create (b|g|c)(?: |$)(.*)\",\n)\nasync def _(e):\n type_of_group = e.pattern_match.group(1)\n group_name = e.pattern_match.group(2)\n xx = await eor(e, \"`Processing...`\")\n if type_of_group == \"b\":\n try:\n r = await e.client(\n functions.messages.CreateChatRequest(\n users=[\"@missrose_bot\"],\n title=group_name,\n )\n )\n created_chat_id = r.chats[0].id\n await e.client(\n functions.messages.DeleteChatUserRequest(\n chat_id=created_chat_id,\n user_id=\"@missrose_bot\",\n )\n )\n result = await e.client(\n functions.messages.ExportChatInviteRequest(\n peer=created_chat_id,\n )\n )\n await xx.edit(\n f\"Your [{group_name}]({result.link}) Group Made Boss!\",\n link_preview=False,\n )\n except Exception as ex:\n await xx.edit(str(ex))\n elif type_of_group == \"g\" or type_of_group == \"c\":\n try:\n r = await e.client(\n functions.channels.CreateChannelRequest(\n title=group_name,\n about=\"Join @TeamUltroid\",\n megagroup=False if type_of_group == \"c\" else True,\n )\n )\n created_chat_id = r.chats[0].id\n result = await e.client(\n functions.messages.ExportChatInviteRequest(\n peer=created_chat_id,\n )\n )\n await xx.edit(\n f\"Your [{group_name}]({result.link}) Group/Channel Has been made Boss!\",\n link_preview=False,\n )\n except Exception as ex:\n await xx.edit(str(ex))\n\n\nHELP.update({f\"{__name__.split('.')[1]}\": f\"{__doc__.format(i=HNDLR)}\"})\n",
"id": "8373174",
"language": "Python",
"matching_score": 1.3762890100479126,
"max_stars_count": 0,
"path": "chats.py"
},
{
"content": "import asyncio, subprocess\nimport time, re, io\nfrom userbot import bot, BOTLOG, BOTLOG_CHATID, CMD_HELP\nfrom telethon import events, functions, types\nfrom telethon.events import StopPropagation\nfrom telethon.tl.functions.messages import ExportChatInviteRequest\nfrom telethon.tl.functions.contacts import BlockRequest\nfrom telethon.tl.functions.channels import LeaveChannelRequest, CreateChannelRequest, DeleteMessagesRequest\nfrom collections import deque\nfrom telethon.tl.functions.users import GetFullUserRequest\nfrom userbot.events import register\nfrom userbot.utils import admin_cmd\n\n@borg.on(admin_cmd(\"leave\"))\nasync def leave(e):\n if not e.text[0].isalpha() and e.text[0] not in (\"/\", \"#\", \"@\", \"!\"):\n await e.edit(\"`நான் இந்த குழுவை விட்டு வெளியேறுகிறேன்!🚶 🚶 🚶`\")\n time.sleep(3)\n if '-' in str(e.chat_id):\n await bot(LeaveChannelRequest(e.chat_id))\n else:\n await e.edit('`இது ஒரு குழு அல்ல`')\n\n@borg.on(admin_cmd(\"hm\"))\n#@register(outgoing=True, pattern=\"^;__;$\")\nasync def fun(e):\n t = \";__;\"\n for j in range(10):\n t = t[:-1] + \"_;\"\n await e.edit(t)\n\n@borg.on(admin_cmd(\"oof\"))\n#@register(outgoing=True, pattern=\"^Oof$\")\nasync def Oof(e):\n t = \"Oof\"\n for j in range(15):\n t = t[:-1] + \"of\"\n await e.edit(t)\n\n@borg.on(admin_cmd(\"cry\"))\n#@register(outgoing=True, pattern=\"^.cry$\")\nasync def cry(e):\n if not e.text[0].isalpha() and e.text[0] not in (\"/\", \"#\", \"@\", \"!\"):\n await e.edit(\"(;´༎ຶД༎ຶ)\")\n\n@borg.on(admin_cmd(\"fp\"))\n#@register(outgoing=True, pattern=\"^.fp$\")\nasync def facepalm(e):\n if not e.text[0].isalpha() and e.text[0] not in (\"/\", \"#\", \"@\", \"!\"):\n await e.edit(\"🤦♂\")\n\n@borg.on(admin_cmd(\"moon\"))\n#@register(outgoing=True, pattern=\"^.mmoon$\")\nasync def _(event):\n\tif event.fwd_from:\n\t\treturn\n\tdeq = deque(list(\"🌗🌘🌑🌒🌓🌔🌕🌖\"))\n\tfor _ in range(32):\n\t\tawait asyncio.sleep(0.1)\n\t\tawait event.edit(\"\".join(deq))\n\t\tdeq.rotate(1)\n\n@borg.on(admin_cmd(\"source\"))\n#@register(outgoing=True, pattern=\"^.source$\")\nasync def source(e):\n if not e.text[0].isalpha() and e.text[0] not in (\"/\", \"#\", \"@\", \"!\"):\n await e.edit(\"github.com/TamilBots/TamilBot\")\n \n@borg.on(admin_cmd(\"readme\"))\n#@register(outgoing=True, pattern=\"^.readme$\")\nasync def reedme(e):\n if not e.text[0].isalpha() and e.text[0] not in (\"/\", \"#\", \"@\", \"!\"):\n await e.edit(\"https://github.com/TamilBots/TamilBot/blob/master/README.md\")\n\n\n\n@borg.on(admin_cmd(\"heart\"))\t\t\n#@register(outgoing=True, pattern=\"^.heart$\")\nasync def _(event):\n\tif event.fwd_from:\n\t\treturn\n\tdeq = deque(list(\"❤️🧡💛💚💙💜🖤\"))\n\tfor _ in range(32):\n\t\tawait asyncio.sleep(0.1)\n\t\tawait event.edit(\"\".join(deq))\n\t\tdeq.rotate(1)\n\t\t\n@borg.on(admin_cmd(\"king\"))\n#@register(outgoing=True, pattern=\"^.king$\")\nasync def _(event):\n\tif event.fwd_from:\n\t\treturn\n\tdeq = deque(list(\"👉🤴🏻👉🙂👉😎\"))\n\tfor _ in range(32):\n\t\tawait asyncio.sleep(0.1)\n\t\tawait event.edit(\"\".join(deq))\n\t\tdeq.rotate(1)\n\n@borg.on(admin_cmd(pattern=r\"hack\"))\n\nasync def _(event):\n\n if event.fwd_from:\n\n return\n\n animation_interval = 2\n\n animation_ttl = range(0, 11)\n\n #input_str = event.pattern_match.group(1)\n\n #if input_str == \"hack\":\n\n if event.reply_to_msg_id:\n reply_message = await event.get_reply_message()\n replied_user = await event.client(GetFullUserRequest(reply_message.from_id))\n firstname = replied_user.user.first_name\n usname = replied_user.user.username\n idd = reply_message.from_id\n if idd==813878981:\n await event.edit(\"This is My Master\\nI can't hack my master's Account\\n**How dare you trying to hack my master's account AssKisser!**\\n\\n__Your account has been hacked! Pay 69$ to my master__ [FridayOT](t.me/fridayOT) __to release your account__😏\")\n else:\n await event.edit(\"Hacking..\")\n animation_chars = [\n \n \"`Hacking... 0%\\n▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `\\n\\n\\n TERMINAL:\\nDownloading Bruteforce-Telegram-0.1.tar.gz (9.3 kB)\",\n \"`Hacking... 4%\\n█▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `\\n\\n\\n TERMINAL:\\nDownloading Bruteforce-Telegram-0.1.tar.gz (9.3 kB)\\nCollecting Data Package\",\n \"`Hacking... 8%\\n██▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `\\n\\n\\n TERMINAL:\\nDownloading Bruteforce-Telegram-0.1.tar.gz (9.3 kB)\\nCollecting Data Package\\n Downloading Telegram-Data-Sniffer-7.1.1-py2.py3-none-any.whl (82 kB)\", \n \"`Hacking... 20%\\n█████▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `\\n\\n\\n TERMINAL:\\nDownloading Bruteforce-Telegram-0.1.tar.gz (9.3 kB)\\nCollecting Data Package\\n Downloading Telegram-Data-Sniffer-7.1.1-py2.py3-none-any.whl (82 kB)\\nBuilding wheel for Tg-Bruteforcing (setup.py): finished with status 'done'\",\n \"`Hacking... 36%\\n█████████▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒▒ `\\n\\n\\n TERMINAL:\\nDownloading Bruteforce-Telegram-0.1.tar.gz (9.3 kB)\\nCollecting Data Package\\n Downloading Telegram-Data-Sniffer-7.1.1-py2.py3-none-any.whl (82 kB)\\nBuilding wheel for Tg-Bruteforcing (setup.py): finished with status 'done'\\nCreated wheel for telegram: filename=Telegram-Data-Sniffer-0.0.1-py3-none-any.whl size=1306 sha256=cb224caad7fe01a6649188c62303cd4697c1869fa12d280570bb6ac6a88e6b7e\",\n \"`Hacking... 52%\\n█████████████▒▒▒▒▒▒▒▒▒▒▒▒ `\\n\\n\\n TERMINAL:\\nDownloading Bruteforce-Telegram-0.1.tar.gz (9.3 kB)\\nCollecting Data Package\\n Downloading Telegram-Data-Sniffer-7.1.1-py2.py3-none-any.whl (82 kB)\\nBuilding wheel for Tg-Bruteforcing (setup.py): finished with status 'done'\\nCreated wheel for telegram: filename=Telegram-Data-Sniffer-0.0.1-py3-none-any.whl size=1306 sha256=cb224caad7fe01a6649188c62303cd4697c1869fa12d280570bb6ac6a88e6b7e\\n Stored in directory: /app/.cache/pip/wheels/a2/9f/b5/650dd4d533f0a17ca30cc11120b176643d27e0e1f5c9876b5b\",\n \"`Hacking... 84%\\n█████████████████████▒▒▒▒ `\\n\\n\\n TERMINAL:\\nDownloading Bruteforce-Telegram-0.1.tar.gz (9.3 kB)\\nCollecting Data Package\\n Downloading Telegram-Data-Sniffer-7.1.1-py2.py3-none-any.whl (82 kB)\\nBuilding wheel for Tg-Bruteforcing (setup.py): finished with status 'done'\\nCreated wheel for telegram: filename=Telegram-Data-Sniffer-0.0.1-py3-none-any.whl size=1306 sha256=cb224caad7fe01a6649188c62303cd4697c1869fa12d280570bb6ac6a88e6b7e\\n Stored in directory: /app/.cache/pip/wheels/a2/9f/b5/650dd4d533f0a17ca30cc11120b176643d27e0e1f5c9876b5b\\n\\n **Successfully Hacked Telegram Server Database**\",\n \"`Hacking... 100%\\n█████████HACKED███████████ `\\n\\n\\n TERMINAL:\\nDownloading Bruteforce-Telegram-0.1.tar.gz (9.66 kB)\\nCollecting Data Package\\n Downloading Telegram-Data-Sniffer-7.1.1-py2.py3-none-any.whl (82 kB)\\nBuilding wheel for Tg-Bruteforcing (setup.py): finished with status 'done'\\nCreated wheel for telegram: filename=Telegram-Data-Sniffer-0.0.1-py3-none-any.whl size=1306 sha256=cb224caad7fe01a6649188c62303cd4697c1869fa12d280570bb6ac6a88e6b7e\\n Stored in directory: /app/.cache/pip/wheels/a2/9f/b5/650dd4d533f0a17ca30cc11120b176643d27e0e1f5c9876b5b\\n\\n **Successfully Hacked Telegram Server Database**\\n\\n\\n🔹Output: Generating.....\",\n \"`Targeted Account Hacked...\\n\\nPay 999999999$ To My Boss Remove this hack....`\\n\\nTERMINAL:\\nDownloading Bruteforce-Telegram-0.1.tar.gz (9.3 kB)\\nCollecting Data Package\\n Downloading Telegram-Data-Sniffer-7.1.1-py2.py3-none-any.whl (82 kB)\\nBuilding wheel for Tg-Bruteforcing (setup.py): finished with status 'done'\\nCreated wheel for telegram: filename=Telegram-Data-Sniffer-0.0.1-py3-none-any.whl size=1306 sha256=cb224caad7fe01a6649188c62303cd4697c1869fa12d280570bb6ac6a88e6b7e\\n Stored in directory: /app/.cache/pip/wheels/a2/9f/b5/650dd4d533f0a17ca30cc11120b176643d27e0e1f5c9876b5b\\n\\n **Successfully Hacked this Account From Telegram Database**\\n\\n\\n🔹**Output:** Successful\"\n\n ]\n\n for i in animation_ttl:\n\n await asyncio.sleep(animation_interval)\n\n await event.edit(animation_chars[i % 11])\n else:\n await event.edit(\"No User is Defined\\n are u dumb\\n reply to a user.\")\n\n\t\t\nCMD_HELP.update(\n {\n \"Extra\":\n\n \"\"\"╼•∘ 🅲🅼🅽🅳 ∘•╾ : `.leave`\n ╼•∘ 🆄🆂🅰️🅶🅴 ∘•╾ __Leave a Chat__\n\n╼•∘ 🅲🅼🅽🅳 ∘•╾ : `.hm`\n╼•∘ 🆄🆂🅰️🅶🅴 ∘•╾ __You try it!__\n\n╼•∘ 🅲🅼🅽🅳 ∘•╾ : `.cry`\n╼•∘ 🆄🆂🅰️🅶🅴 ∘•╾ __Send face palm emoji.__\n\n╼•∘ 🅲🅼🅽🅳 ∘•╾ : `.moon`\n╼•∘ 🆄🆂🅰️🅶🅴 ∘•╾ __Bot will send a cool moon animation.__\n\n╼•∘ 🅲🅼🅽🅳 ∘•╾ : `.clock`\n╼•∘ 🆄🆂🅰️🅶🅴 ∘•╾ __Bot will send a cool clock animation.__\n\n╼•∘ 🅲🅼🅽🅳 ∘•╾ : `.readme`\n╼•∘ 🆄🆂🅰️🅶🅴 ∘•╾ __Reedme.__\n\n╼•∘ 🅲🅼🅽🅳 ∘•╾ : `.source`\n╼•∘ 🆄🆂🅰️🅶🅴 ∘•╾ __Gives the source of your userbot__\n\n╼•∘ 🅲🅼🅽🅳 ∘•╾ : `.myusernames`\n╼•∘ 🆄🆂🅰️🅶🅴 ∘•╾ __List of Usernames owned by you.__\n\n╼•∘ 🅲🅼🅽🅳 ∘•╾ : `.oof`\n╼•∘ 🆄🆂🅰️🅶🅴 ∘•╾ __Same as ;__; but ooof__\n\n╼•∘ 🅲🅼🅽🅳 ∘•╾ : `.earth`\n╼•∘ 🆄🆂🅰️🅶🅴 ∘•╾ __Sends Earth animation__\n\n╼•∘ 🅲🅼🅽🅳 ∘•╾ : `.hack`\n╼•∘ 🆄🆂🅰️🅶🅴 ∘•╾ __hack targeted users database__\n\n╼•∘ 🅲🅼🅽🅳 ∘•╾ : `.heart`\n╼•∘ 🆄🆂🅰️🅶🅴 ∘•╾ __Try and you'll get your emotions back__\n\n╼•∘ 🅲🅼🅽🅳 ∘•╾ : `.king`\n╼•∘ 🆄🆂🅰️🅶🅴 ∘•╾ __Be The Real King__\n\"\"\"\n }\n)\n",
"id": "12332083",
"language": "Python",
"matching_score": 3.0698304176330566,
"max_stars_count": 25,
"path": "userbot/plugins/extra.py"
},
{
"content": "from asyncio import sleep\n\nfrom telethon.errors import rpcbaseerrors\n\nfrom userbot.utils import errors_handler, admin_cmd\nfrom userbot import BOTLOG, BOTLOG_CHATID, CMD_HELP\n\npurgelist = {}\n\n\n@borg.on(admin_cmd(pattern=\"purge(?: |$)(.*)\"))\n@errors_handler\nasync def fastpurger(event):\n if event.fwd_from:\n return\n chat = await event.get_input_chat()\n msgs = []\n count = 0\n input_str = event.pattern_match.group(1)\n reply = await event.get_reply_message()\n if reply:\n if input_str and input_str.isnumeric():\n count += 1\n async for msg in event.client.iter_messages(\n event.chat_id,\n limit=(int(input_str) - 1),\n offset_id=reply.id,\n reverse=True,\n ):\n msgs.append(msg)\n count += 1\n msgs.append(event.reply_to_msg_id)\n if len(msgs) == 100:\n await event.client.delete_messages(chat, msgs)\n msgs = []\n elif input_str:\n return await edit_or_reply(\n event, f\"**Error**\\n`{input_str} is not an integer. Use proper syntax.`\"\n )\n else:\n async for msg in event.client.iter_messages(\n chat, min_id=event.reply_to_msg_id\n ):\n msgs.append(msg)\n count += 1\n msgs.append(event.reply_to_msg_id)\n if len(msgs) == 100:\n await event.client.delete_messages(chat, msgs)\n msgs = []\n else:\n await edit_or_reply(\n event,\n \"`No message specified.`\",\n )\n return\n if msgs:\n await event.client.delete_messages(chat, msgs)\n await event.delete()\n hi = await event.client.send_message(\n event.chat_id,\n \"`Fast purge complete!\\nPurged \" + str(count) + \" messages.`\",\n )\n if BOTLOG:\n await event.client.send_message(\n BOTLOG_CHATID,\n \"#PURGE \\n`Purge of \" + str(count) + \" messages done successfully.`\",\n )\n await sleep(5)\n await hi.delete()\n\n\n@borg.on(admin_cmd(pattern=\"purgefrom$\"))\n@errors_handler\nasync def purge_from(event):\n if event.fwd_from:\n return\n reply = await event.get_reply_message()\n if reply:\n reply_message = await reply_id(event)\n purgelist[event.chat_id] = reply_message\n await edit_delete(\n event,\n \"`This Message marked for deletion. Reply to another message with purgeto to delete all messages in between.`\",\n )\n else:\n await edit_delete(event, \"`Reply to a message to let me know what to delete.`\")\n\n\n@borg.on(admin_cmd(pattern=\"purgeto$\"))\n@errors_handler\nasync def purge_to(event):\n chat = await event.get_input_chat()\n if event.fwd_from:\n return\n reply = await event.get_reply_message()\n try:\n from_message = purgelist[event.chat_id]\n except KeyError:\n return await edit_delete(\n event,\n \"`First mark the messsage with purgefrom and then mark purgeto .So, I can delete in between Messages`\",\n )\n if not reply or not from_message:\n return await edit_delete(\n event,\n \"`First mark the messsage with purgefrom and then mark purgeto .So, I can delete in between Messages`\",\n )\n try:\n to_message = await reply_id(event)\n msgs = []\n count = 0\n async for msg in event.client.iter_messages(\n event.chat_id, min_id=(from_message - 1), max_id=(to_message + 1)\n ):\n msgs.append(msg)\n count += 1\n msgs.append(event.reply_to_msg_id)\n if len(msgs) == 100:\n await event.client.delete_messages(chat, msgs)\n msgs = []\n if msgs:\n await event.client.delete_messages(chat, msgs)\n await edit_delete(\n event,\n \"`Fast purge complete!\\nPurged \" + str(count) + \" messages.`\",\n )\n if BOTLOG:\n await event.client.send_message(\n BOTLOG_CHATID,\n \"#PURGE \\n`Purge of \" + str(count) + \" messages done successfully.`\",\n )\n except Exception as e:\n await edit_delete(event, f\"**Error**\\n`{str(e)}`\")\n\n\n@borg.on(admin_cmd(pattern=\"purgeme\"))\n@errors_handler\nasync def purgeme(event):\n if event.fwd_from:\n return\n message = event.text\n count = int(message[9:])\n i = 1\n\n async for message in event.client.iter_messages(event.chat_id, from_user=\"me\"):\n if i > count + 1:\n break\n i += 1\n await message.delete()\n\n smsg = await event.client.send_message(\n event.chat_id,\n \"**Purge complete!**` Purged \" + str(count) + \" messages.`\",\n )\n if BOTLOG:\n await event.client.send_message(\n BOTLOG_CHATID,\n \"#PURGEME \\n`Purge of \" + str(count) + \" messages done successfully.`\",\n )\n await sleep(5)\n await smsg.delete()\n\n\n@borg.on(admin_cmd(pattern=\"del(?: |$)(.*)\"))\n@errors_handler\nasync def delete_it(event):\n if event.fwd_from:\n return\n input_str = event.pattern_match.group(1)\n msg_src = await event.get_reply_message()\n if msg_src:\n if input_str and input_str.isnumeric():\n await event.delete()\n await sleep(int(input_str))\n try:\n await msg_src.delete()\n if BOTLOG:\n await event.client.send_message(\n BOTLOG_CHATID, \"#DEL \\n`Deletion of message was successful`\"\n )\n except rpcbaseerrors.BadRequestError:\n if BOTLOG:\n await event.client.send_message(\n BOTLOG_CHATID,\n \"`Well, I can't delete a message. I am not an admin`\",\n )\n elif input_str:\n await edit_or_reply(event, \"`Well the time you mentioned is invalid.`\")\n else:\n try:\n await msg_src.delete()\n await event.delete()\n if BOTLOG:\n await event.client.send_message(\n BOTLOG_CHATID, \"#DEL \\n`Deletion of message was successful`\"\n )\n except rpcbaseerrors.BadRequestError:\n await edit_or_reply(event, \"`Well, I can't delete a message`\")\n else:\n await event.delete()\n\n\nCMD_HELP.update(\n {\n \"purge\": \"**Plugin : **`purge`\\\n \\n\\n• **Syntax : **`.purge <count> reply`\\\n \\n• **Function : **__Deletes the x(count) amount of messages from the replied message if you don't use count then deletes all messages from there.__\\\n \\n\\n• **Syntax : **`.purgefrom reply`\\\n \\n• **Function : **__Will Mark that message as oldest message of interval to delete messages.__\\\n \\n\\n• **Syntax : **`.purgeto reply`\\\n \\n• **Function : **__Will Mark that message as newest message of interval to delete messages and will delete all messages in that interval.__\\\n \\n\\n• **Syntax : **`.purgeme <count>`\\\n \\n• **Function : **__Deletes x(count) amount of your latest messages.__\\\n \\n\\n• **Syntax : **`.del <count> reply`\\\n \\n• **Function : **__Deletes the message you replied to in x(count) seconds if count is not used then deletes immediately.__\"\n }\n)\n",
"id": "10210942",
"language": "Python",
"matching_score": 2.359581232070923,
"max_stars_count": 25,
"path": "userbot/plugins/purge.py"
},
{
"content": "import asyncio\n\nfrom telethon.events import ChatAction\nfrom telethon.tl.functions.contacts import BlockRequest, UnblockRequest\nfrom telethon.tl.types import MessageEntityMentionName\n\nfrom userbot import CMD_HELP\nfrom userbot.plugins.sql_helper.mute_sql import is_muted, mute, unmute\nfrom userbot.utils import admin_cmd\n\nfrom userbot.events import register\n\nasync def get_full_user(event):\n args = event.pattern_match.group(1).split(\":\", 1)\n extra = None\n if event.reply_to_msg_id and not len(args) == 2:\n previous_message = await event.get_reply_message()\n user_obj = await event.client.get_entity(previous_message.from_id)\n extra = event.pattern_match.group(1)\n elif len(args[0]) > 0:\n user = args[0]\n if len(args) == 2:\n extra = args[1]\n if user.isnumeric():\n user = int(user)\n if not user:\n await event.edit(\"`User ID Is Required\")\n return\n if event.message.entities is not None:\n probable_user_mention_entity = event.message.entities[0]\n if isinstance(probable_user_mention_entity, MessageEntityMentionName):\n user_id = probable_user_mention_entity.user_id\n user_obj = await event.client.get_entity(user_id)\n return user_obj\n try:\n user_obj = await event.client.get_entity(user)\n except Exception as err:\n return await event.edit(\"Something Went Wrong\", str(err))\n return user_obj, extra\n\n\nasync def get_user_from_id(user, event):\n if isinstance(user, str):\n user = int(user)\n try:\n user_obj = await event.client.get_entity(user)\n except (TypeError, ValueError) as err:\n await event.edit(str(err))\n return None\n return user_obj\n\n\n@borg.on(admin_cmd(pattern=\"ggban ?(.*)\"))\nasync def gspider(userbot):\n lol = userbot\n sender = await lol.get_sender()\n me = await lol.client.get_me()\n if not sender.id == me.id:\n event = await lol.reply(\"Gbanning This User !\")\n else:\n event = await lol.edit(\"Wait Processing.....\")\n me = await userbot.client.get_me()\n await event.edit(f\"Global Ban Is Coming ! Wait And Watch You Nigga\")\n my_mention = \"[{}](tg://user?id={})\".format(me.first_name, me.id)\n f\"@{me.username}\" if me.username else my_mention\n await userbot.get_chat()\n a = b = 0\n if userbot.is_private:\n user = userbot.chat\n reason = userbot.pattern_match.group(1)\n else:\n userbot.chat.title\n try:\n user, reason = await get_full_user(userbot)\n except:\n pass\n try:\n if not reason:\n reason = \"Private\"\n except:\n return await event.edit(f\"**Something W3NT Wrong 🤔**\")\n if user:\n if user.id == 1169076058 or user.id == 1492186775:\n return await event.edit(\n f\"**Didn't , Your Father Teach You ? That You Cant Gban Dev**\"\n )\n try:\n from userbot.modules.sql_helper.gmute_sql import gmute\n except:\n pass\n try:\n await userbot.client(BlockRequest(user))\n except:\n pass\n testuserbot = [\n d.entity.id\n for d in await userbot.client.get_dialogs()\n if (d.is_group or d.is_channel)\n ]\n for i in testuserbot:\n try:\n await userbot.client.edit_permissions(i, user, view_messages=False)\n a += 1\n await event.edit(f\"**GBANNED⚠️ \\n🚫Total Affected Chats **: `{a}`\")\n except:\n b += 1\n else:\n await event.edit(f\"**Reply to a user !!**\")\n try:\n if gmute(user.id) is False:\n return await event.edit(f\"**Error! User probably already gbanned.**\")\n except:\n pass\n return await event.edit(\n f\"**⚠️Gbanned\\nUSER👤[{user.first_name}](tg://user?id={user.id}) \\n🚫Affected Chats : {a} **\"\n )\n\n\n@borg.on(admin_cmd(pattern=\"unggban ?(.*)\"))\nasync def gspider(userbot):\n lol = userbot\n sender = await lol.get_sender()\n me = await lol.client.get_me()\n if not sender.id == me.id:\n event = await lol.reply(\"`Wait Let Me Process`\")\n else:\n event = await lol.edit(\"One Min ! \")\n me = await userbot.client.get_me()\n await event.edit(f\"Trying To Ungban User !\")\n my_mention = \"[{}](tg://user?id={})\".format(me.first_name, me.id)\n f\"@{me.username}\" if me.username else my_mention\n await userbot.get_chat()\n a = b = 0\n if userbot.is_private:\n user = userbot.chat\n reason = userbot.pattern_match.group(1)\n else:\n userbot.chat.title\n try:\n user, reason = await get_full_user(userbot)\n except:\n pass\n try:\n if not reason:\n reason = \"Private\"\n except:\n return await event.edit(\"Someting Went Wrong 🤔\")\n if user:\n if user.id == 1169076058 or user.id == 1492186775:\n return await event.edit(\"**You Cant Ungban A Dev !**\")\n try:\n from userbot.modules.sql_helper.gmute_sql import ungmute\n except:\n pass\n try:\n await userbot.client(UnblockRequest(user))\n except:\n pass\n testuserbot = [\n d.entity.id\n for d in await userbot.client.get_dialogs()\n if (d.is_group or d.is_channel)\n ]\n for i in testuserbot:\n try:\n await userbot.client.edit_permissions(i, user, send_messages=True)\n a += 1\n await event.edit(f\"**UNGBANNING // AFFECTED CHATS - {a} **\")\n except:\n b += 1\n else:\n await event.edit(\"**Reply to a user !!**\")\n try:\n if ungmute(user.id) is False:\n return await event.edit(\"**Error! User probably already ungbanned.**\")\n except:\n pass\n return await event.edit(\n f\"**🔹UNGBANNED\\n🔹USER - [{user.first_name}](tg://user?id={user.id}) \\n🔹CHATS : {a} **\"\n )\n\n\n@borg.on(ChatAction)\nasync def handler(rkG):\n if rkG.user_joined or rkG.user_added:\n try:\n from userbot.modules.sql_helper.gmute_sql import is_gmuted\n\n guser = await rkG.get_user()\n gmuted = is_gmuted(guser.id)\n except:\n return\n if gmuted:\n for i in gmuted:\n if i.sender == str(guser.id):\n chat = await rkG.get_chat()\n admin = chat.admin_rights\n creator = chat.creator\n if admin or creator:\n try:\n await client.edit_permissions(\n rkG.chat_id, guser.id, view_messages=False\n )\n await rkG.reply(\n f\"**Gbanned User Joined!!** \\n\"\n f\"**Victim Id**: [{guser.id}](tg://user?id={guser.id})\\n\"\n f\"**Action ** : `Banned`\"\n )\n except:\n rkG.reply(\"`No Permission To Ban`\")\n return\n\n\n@borg.on(admin_cmd(pattern=r\"gmute ?(\\d+)?\"))\nasync def startgmute(event):\n private = False\n if event.fwd_from:\n return\n elif event.is_private:\n await event.edit(\"Unexpected issues or ugly errors may occur!\")\n await asyncio.sleep(3)\n private = True\n reply = await event.get_reply_message()\n if event.pattern_match.group(1) is not None:\n userid = event.pattern_match.group(1)\n elif reply is not None:\n userid = reply.sender_id\n elif private is True:\n userid = event.chat_id\n else:\n return await event.edit(\n \"Please reply to a user or add their into the command to gmute them.\"\n )\n event.chat_id\n await event.get_chat()\n if is_muted(userid, \"gmute\"):\n return await event.edit(\"`He has Tap Already On His Mouth.`\")\n try:\n mute(userid, \"gmute\")\n except Exception as e:\n await event.edit(\"Error occured!\\nError is \" + str(e))\n else:\n await event.edit(\"Here A Tape, Now Shutup \\nGmuteD\")\n\n\n@borg.on(admin_cmd(pattern=r\"ungmute ?(\\d+)?\"))\nasync def endgmute(event):\n private = False\n if event.fwd_from:\n return\n elif event.is_private:\n await event.edit(\"Unexpected issues or ugly errors may occur!\")\n await asyncio.sleep(3)\n private = True\n reply = await event.get_reply_message()\n if event.pattern_match.group(1) is not None:\n userid = event.pattern_match.group(1)\n elif reply is not None:\n userid = reply.sender_id\n elif private is True:\n userid = event.chat_id\n else:\n return await event.edit(\n \"Please reply to a user or add their into the command to ungmute them.\"\n )\n event.chat_id\n if not is_muted(userid, \"gmute\"):\n return await event.edit(\"This user is not gmuted\")\n try:\n unmute(userid, \"gmute\")\n except Exception as e:\n await event.edit(\"Error occured!\\nError is \" + str(e))\n else:\n await event.edit(\"Successfully ungmuted that person\")\n\n\n@command(incoming=True)\nasync def watcher(event):\n if is_muted(event.sender_id, \"gmute\"):\n await event.delete()\n\n@register(outgoing=True, pattern=r\"^\\.gcast(?: |$)(.*)\")\nasync def gcast(event):\n xx = event.pattern_match.group(1)\n if not xx:\n return await event.edit(\"`Berikan aku teks`\")\n tt = event.text\n msg = tt[6:]\n kk = await event.edit(\"`Proses Mengirim Pesan Broadcast...`\")\n er = 0\n done = 0\n async for x in bot.iter_dialogs():\n if x.is_group:\n chat = x.id\n try:\n done += 1\n await bot.send_message(chat, msg)\n except BaseException:\n er += 1\n await kk.edit(f\"Done in {done} chats, error in {er} chat(s)\")\n\n\n@register(outgoing=True, pattern=r\"^\\.gucast(?: |$)(.*)\")\nasync def gucast(event):\n xx = event.pattern_match.group(1)\n if not xx:\n return await event.edit(\"`Berikan aku teks`\")\n tt = event.text\n msg = tt[7:]\n kk = await event.edit(\"`Proses Mengirim Pesan Broadcast...`\")\n er = 0\n done = 0\n async for x in bot.iter_dialogs():\n if x.is_user and not x.entity.bot:\n chat = x.id\n try:\n done += 1\n await bot.send_message(chat, msg)\n except BaseException:\n er += 1\n await kk.edit(f\"Done in {done} chats, error in {er} chat(s)\")\n\n#XBot-Remix \n\nfrom telethon.errors.rpcerrorlist import (UserIdInvalidError,\n MessageTooLongError)\nfrom telethon.tl.functions.channels import (EditAdminRequest,\n EditBannedRequest,\n EditPhotoRequest)\nfrom telethon.tl.functions.messages import UpdatePinnedMessageRequest\nfrom telethon.tl.types import (ChannelParticipantsAdmins,\n ChatAdminRights,\n ChatBannedRights,\n MessageEntityMentionName,\n MessageMediaPhoto)\nfrom userbot.utils import register, errors_handler\nfrom userbot.utils import admin_cmd\n\nasync def get_full_user(event): \n args = event.pattern_match.group(1).split(':', 1)\n extra = None\n if event.reply_to_msg_id and not len(args) == 2:\n previous_message = await event.get_reply_message()\n user_obj = await event.client.get_entity(previous_message.sender_id)\n extra = event.pattern_match.group(1)\n elif len(args[0]) > 0:\n user = args[0]\n if len(args) == 2:\n extra = args[1]\n if user.isnumeric():\n user = int(user)\n if not user:\n await event.edit(\"`Itz not possible without an user ID`\")\n return\n if event.message.entities is not None:\n probable_user_mention_entity = event.message.entities[0]\n if isinstance(probable_user_mention_entity,\n MessageEntityMentionName):\n user_id = probable_user_mention_entity.user_id\n user_obj = await event.client.get_entity(user_id)\n return user_obj\n try:\n user_obj = await event.client.get_entity(user)\n except Exception as err:\n return await event.edit(\"Error... Please report at @TamilsuPPorT\", str(err)) \n return user_obj, extra\n\nasync def get_user_from_id(user, event):\n if isinstance(user, str):\n user = int(user)\n try:\n user_obj = await event.client.get_entity(user)\n except (TypeError, ValueError) as err:\n await event.edit(str(err))\n return None\n return user_obj\n@borg.on(admin_cmd(pattern=\"gpromote ?(.*)\"))\nasync def gben(userbot):\n dc = dark = userbot\n i = 0\n sender = await dc.get_sender()\n me = await userbot.client.get_me()\n await dark.edit(\"`promoting...`\")\n my_mention = \"[{}](tg://user?id={})\".format(me.first_name, me.id)\n f\"@{me.username}\" if me.username else my_mention\n await userbot.get_chat()\n if userbot.is_private:\n user = userbot.chat\n rank = userbot.pattern_match.group(1)\n else:\n userbot.chat.title\n try:\n user, rank = await get_full_user(userbot)\n except:\n pass\n if me == user:\n k = await dark.edit(\"U want to promote urself 😑😑 waao..\")\n return\n try:\n if not rank:\n rank = \"ㅤㅤ\"\n except:\n return await dark.edit(f\"**Something W3NT Wrong 🤔**\")\n if user:\n telchanel = [d.entity.id\n for d in await userbot.client.get_dialogs()\n if (d.is_group or d.is_channel)\n ]\n rgt = ChatAdminRights(add_admins=False,\n invite_users=True,\n change_info=False,\n ban_users=True,\n delete_messages=True,\n pin_messages=True)\n for x in telchanel:\n try:\n await userbot.client(EditAdminRequest(x, user, rgt, rank))\n i += 1\n await dark.edit(f\"**Promoted in Chats **: `{i}`\")\n except:\n pass\n else:\n await dark.edit(f\"**Reply to a user you dumbo !!**\")\n return await dark.edit(\n f\"**Globally promoted [{user.first_name}](tg://user?id={user.id})\\n On Chats😏 : {i} **\"\n )\n@borg.on(admin_cmd(pattern=\"gdemote ?(.*)\"))\nasync def gben(userbot):\n dc = dark = userbot\n i = 0\n sender = await dc.get_sender()\n me = await userbot.client.get_me()\n await dark.edit(\"`demoting...`\")\n my_mention = \"[{}](tg://user?id={})\".format(me.first_name, me.id)\n f\"@{me.username}\" if me.username else my_mention\n await userbot.get_chat()\n if userbot.is_private:\n user = userbot.chat\n rank = userbot.pattern_match.group(1)\n else:\n userbot.chat.title\n try:\n user, rank = await get_full_user(userbot)\n except:\n pass\n if me == user:\n k = await dark.edit(\"U want to demote urself 😑😑 waao..\")\n return\n try:\n if not rank:\n rank = \"ㅤㅤ\"\n except:\n return await dark.edit(f\"**Something W3NT Wrong 🤔**\")\n if user:\n telchanel = [d.entity.id\n for d in await userbot.client.get_dialogs()\n if (d.is_group or d.is_channel)\n ]\n rgt = ChatAdminRights(add_admins=None,\n invite_users=None,\n change_info=None,\n ban_users=None,\n delete_messages=None,\n pin_messages=None)\n for x in telchanel:\n try:\n await userbot.client(EditAdminRequest(x, user, rgt, rank))\n i += 1\n await dark.edit(f\"**Demoted in Chats **: `{i}`\")\n except:\n pass\n else:\n await dark.edit(f\"**Reply to a user you dumbo !!**\")\n return await dark.edit(\n f\"**Globally Demoted [{user.first_name}](tg://user?id={user.id})\\n On Chats😏 : {i} **\"\n )\n\nCMD_HELP.update(\n {\n \"Globaltools\":\n \"╼•∘ 🅲🅼🅽🅳 ∘•╾ : `.gmute <replying to user message>`\\\n\\n╼•∘ 🆄🆂🅰️🅶🅴 ∘•╾ Gmute User And Delete His Msg.\\\n\\n\\n╼•∘ 🅲🅼🅽🅳 ∘•╾ : `.ungmute <replying to user message>`\\\n\\n╼•∘ 🆄🆂🅰️🅶🅴 ∘•╾ UnGmute User And Stops Deleting His Msgs.\\\n\\n\\n╼•∘ 🅲🅼🅽🅳 ∘•╾ : `.gban <replying to user message>`\\\n\\n╼•∘ 🆄🆂🅰️🅶🅴 ∘•╾ Gban User And Blow Him From Your Groups\\\n\\n\\n╼•∘ 🅲🅼🅽🅳 ∘•╾ : `.ungban <replying to user message>`\\\n\\n╼•∘ 🆄🆂🅰️🅶🅴 ∘•╾ Ugban User.\\\n\\n\\n╼•∘ 🅲🅼🅽🅳 ∘•╾ : `.gban <replying to user message>`\\\n\\n╼•∘ 🆄🆂🅰️🅶🅴 ∘•╾ Gban User And Blow Him From Your Groups\\\n\\n\\n╼•∘ 🅲🅼🅽🅳 ∘•╾ : `.ungban <replying to user message>`\\\n\\n╼•∘ 🆄🆂🅰️🅶🅴 ∘•╾ Ugban User.\"\n\n }\n)\n",
"id": "10100434",
"language": "Python",
"matching_score": 3.7001194953918457,
"max_stars_count": 25,
"path": "userbot/plugins/gtools.py"
},
{
"content": "import asyncio\nfrom datetime import datetime\nfrom random import choice, randint\n\nfrom telethon.tl.functions.channels import EditAdminRequest\nfrom telethon.tl.types import ChatAdminRights\n\nfrom . import ALIVE_NAME\n\nDEFAULTUSER = str(ALIVE_NAME) if ALIVE_NAME else \"cat\"\n\n\n@bot.on(admin_cmd(pattern=\"scam(?: |$)(.*)\"))\n@bot.on(sudo_cmd(pattern=\"scam(?: |$)(.*)\", allow_sudo=True))\nasync def _(event):\n if event.fwd_from:\n return\n options = [\n \"typing\",\n \"contact\",\n \"game\",\n \"location\",\n \"voice\",\n \"round\",\n \"video\",\n \"photo\",\n \"document\",\n ]\n input_str = event.pattern_match.group(1)\n args = input_str.split()\n if len(args) == 0:\n scam_action = choice(options)\n scam_time = randint(300, 360)\n elif len(args) == 1:\n try:\n scam_action = str(args[0]).lower()\n scam_time = randint(200, 300)\n except ValueError:\n scam_action = choice(options)\n scam_time = int(args[0])\n elif len(args) == 2:\n scam_action = str(args[0]).lower()\n scam_time = int(args[1])\n else:\n await edit_delete(event, \"`Invalid Syntax !!`\")\n return\n try:\n if scam_time > 0:\n await event.delete()\n async with event.client.action(event.chat_id, scam_action):\n await asyncio.sleep(scam_time)\n except BaseException:\n return\n\n\n@bot.on(admin_cmd(pattern=\"prankpromote ?(.*)\"))\n@bot.on(sudo_cmd(pattern=\"prankpromote ?(.*)\", allow_sudo=True))\nasync def _(event):\n if event.fwd_from:\n return\n datetime.now()\n to_promote_id = None\n rights = ChatAdminRights(post_messages=True)\n input_str = event.pattern_match.group(1)\n reply_msg_id = event.message.id\n if reply_msg_id:\n r_mesg = await event.get_reply_message()\n to_promote_id = r_mesg.sender_id\n elif input_str:\n to_promote_id = input_str\n try:\n await event.client(EditAdminRequest(event.chat_id, to_promote_id, rights, \"\"))\n except (Exception) as exc:\n await edit_or_reply(event, str(exc))\n else:\n await edit_or_reply(event, \"Successfully Promoted\")\n\n\n@bot.on(admin_cmd(pattern=f\"padmin$\", outgoing=True))\n@bot.on(sudo_cmd(pattern=\"padmin$\", allow_sudo=True))\nasync def _(event):\n if event.fwd_from:\n return\n animation_interval = 1\n animation_ttl = range(20)\n event = await edit_or_reply(event, \"promoting.......\")\n animation_chars = [\n \"**Promoting User As Admin...**\",\n \"**Enabling All Permissions To User...**\",\n \"**(1) Send Messages: ☑️**\",\n \"**(1) Send Messages: ✅**\",\n \"**(2) Send Media: ☑️**\",\n \"**(2) Send Media: ✅**\",\n \"**(3) Send Stickers & GIFs: ☑️**\",\n \"**(3) Send Stickers & GIFs: ✅**\",\n \"**(4) Send Polls: ☑️**\",\n \"**(4) Send Polls: ✅**\",\n \"**(5) Embed Links: ☑️**\",\n \"**(5) Embed Links: ✅**\",\n \"**(6) Add Users: ☑️**\",\n \"**(6) Add Users: ✅**\",\n \"**(7) Pin Messages: ☑️**\",\n \"**(7) Pin Messages: ✅**\",\n \"**(8) Change Chat Info: ☑️**\",\n \"**(8) Change Chat Info: ✅**\",\n \"**Permission Granted Successfully**\",\n f\"**pRoMooTeD SuCcEsSfUlLy bY: {DEFAULTUSER}**\",\n ]\n for i in animation_ttl:\n await asyncio.sleep(animation_interval)\n await event.edit(animation_chars[i % 20])\n\n\nCMD_HELP.update(\n {\n \"fake\": \"**fake**\\\n \\n\\n**Syntax :** `.scam <action> <time>` \\\n \\n**Usage : **Type .scam (action name) This shows the fake action in the group, The actions are typing ,contact ,game, location, voice, round, video,photo,document, cancel.\\\n \\n\\n**Syntax :** `.prankpromote` reply to user to whom you want to prank promote\\\n \\n**Usage : **it promotes him to admin but he will not have any permission to take action that is he can see rection actions but cant take any admin action\\\n \\n\\n**Syntax :** `.padmin`\\\n \\n**Usage : ** An animation that shows enabling all permissions to him that he is admin(fake promotion)\\\n \"\n }\n)\n",
"id": "2711251",
"language": "Python",
"matching_score": 2.445146322250366,
"max_stars_count": 0,
"path": "userbot/plugins/fake.py"
},
{
"content": "\"\"\"\nCreated by @Jisan7509\nmodified by @mrconfused\nUserbot plugin for CatUserbot\n\"\"\"\nimport emoji\n\nfrom . import fonts as emojify\n\n\n@bot.on(admin_cmd(pattern=\"emoji(?: |$)(.*)\"))\n@bot.on(sudo_cmd(pattern=\"emoji(?: |$)(.*)\", allow_sudo=True))\nasync def itachi(event):\n args = event.pattern_match.group(1)\n if not args:\n get = await event.get_reply_message()\n args = get.text\n if not args:\n await edit_or_reply(\n event, \"`What am I Supposed to do with this stupid, Give me a text. `\"\n )\n return\n result = \"\"\n for a in args:\n a = a.lower()\n if a in emojify.kakashitext:\n char = emojify.kakashiemoji[emojify.kakashitext.index(a)]\n result += char\n else:\n result += a\n await edit_or_reply(event, result)\n\n\n@bot.on(admin_cmd(pattern=\"cmoji(?: |$)(.*)\"))\n@bot.on(sudo_cmd(pattern=\"cmoji(?: |$)(.*)\", allow_sudo=True))\nasync def itachi(event):\n args = event.pattern_match.group(1)\n if not args:\n get = await event.get_reply_message()\n args = get.text\n if not args:\n await edit_or_reply(\n event, \"`What am I Supposed to do with this stupid, Give me a text. `\"\n )\n return\n try:\n emoji, arg = args.split(\" \", 1)\n except:\n arg = args\n emoji = \"😺\"\n if not char_is_emoji(emoji):\n arg = args\n emoji = \"😺\"\n result = \"\"\n for a in arg:\n a = a.lower()\n if a in emojify.kakashitext:\n char = emojify.itachiemoji[emojify.kakashitext.index(a)].format(cj=emoji)\n result += char\n else:\n result += a\n await edit_or_reply(event, result)\n\n\ndef char_is_emoji(character):\n return character in emoji.UNICODE_EMOJI\n\n\nCMD_HELP.update(\n {\n \"emojify\": \"**Plugin :** `emojify`\\\n \\n\\n**Syntax :** `.emoji` <text>\\\n \\n****Usage : **Converts your text to big emoji text, with default emoji. \\\n \\n\\n**Syntax :** `.cmoji` <emoji> <text>\\\n \\n****Usage : **Converts your text to big emoji text, with your custom emoji.\\\n \"\n }\n)\n",
"id": "591156",
"language": "Python",
"matching_score": 0.27832502126693726,
"max_stars_count": 0,
"path": "userbot/plugins/emojify.py"
},
{
"content": "# Copyright (C) 2020 \n# <https://www.github.com/TeamUltroid/Ultroid/blob/main/LICENSE/>.\n\n\"\"\"\n✘ Commands Available -\n• `{i}superfban <reply to user/userid/username>`\n FBan the person across all feds in which you are admin.\n• `{i}superunfban <reply to user/userid/username>`\n Un-FBan the person across all feds in which you are admin.\nSpecify FBan Group and Feds to exclude in the assistant.\n• `{i}fstat <username/id/reply to user>`\n Collect fed stat of the person in Rose.\n• `{i}fedinfo <(fedid)>`\n Collect federation info of the given fed id, or of the fed you own, from Rose.\n\"\"\"\n\nimport asyncio\nimport os\n\nfrom telethon.errors.rpcerrorlist import YouBlockedUserError\n\nfrom . import *\n\nbot = \"@MissRose_bot\"\n\n\n@ultroid_cmd(pattern=\"superfban ?(.*)\")\nasync def _(event):\n msg = await eor(event, \"Starting a Mass-FedBan...\")\n fedList = []\n if event.reply_to_msg_id:\n previous_message = await event.get_reply_message()\n if previous_message.media:\n downloaded_file_name = await ultroid_bot.download_media(\n previous_message, \"fedlist\"\n )\n file = open(downloaded_file_name, encoding=\"utf8\")\n lines = file.readlines()\n for line in lines:\n try:\n fedList.append(line[:36])\n except BaseException:\n pass\n arg = event.text.split(\" \", maxsplit=2)\n if len(arg) > 2:\n FBAN = arg[1]\n REASON = arg[2]\n else:\n FBAN = arg[1]\n REASON = \" #TBMassBanned \"\n else:\n FBAN = previous_message.sender_id\n try:\n REASON = event.text.split(\" \", maxsplit=1)[1]\n except BaseException:\n REASON = \"\"\n if REASON.strip() == \"\":\n REASON = \" #TBMassBanned \"\n else:\n arg = event.text.split(\" \", maxsplit=2)\n if len(arg) > 2:\n try:\n FBAN = arg[1]\n REASON = arg[2]\n except BaseException:\n return await msg.edit(\"`No user designated!`\")\n else:\n try:\n FBAN = arg[1]\n REASON = \" #TBMassBanned \"\n except BaseException:\n return await msg.edit(\"`No user designated!`\")\n try:\n if str(FBAN) in DEVLIST:\n await msg.edit(\"You can't ban my dev you noob!!\")\n return\n elif FBAN.startswith(\"@\"):\n try:\n x = await ultroid_bot(GetFullUserRequest(FBAN))\n uid = x.user.id\n if str(uid) in DEVLIST:\n await msg.edit(\"You can't ban my dev you noob!!\")\n return\n except Exception as e:\n print(str(e))\n return await msg.edit(str(e))\n except Exception as e:\n print(str(e))\n return await msg.edit(str(e))\n if udB.get(\"FBAN_GROUP_ID\"):\n chat = int(udB.get(\"FBAN_GROUP_ID\"))\n else:\n chat = await event.get_chat()\n if not len(fedList):\n for a in range(3):\n async with ultroid_bot.conversation(\"@MissRose_bot\") as bot_conv:\n await bot_conv.send_message(\"/start\")\n await asyncio.sleep(3)\n await bot_conv.send_message(\"/myfeds\")\n await asyncio.sleep(3)\n try:\n response = await bot_conv.get_response()\n except asyncio.exceptions.TimeoutError:\n return await msg.edit(\n \"`Seems like rose isn't responding, or, the plugin is misbehaving`\"\n )\n await asyncio.sleep(3)\n if \"make a file\" in response.text or \"Looks like\" in response.text:\n await response.click(0)\n await asyncio.sleep(3)\n fedfile = await bot_conv.get_response()\n await asyncio.sleep(3)\n if fedfile.media:\n downloaded_file_name = await ultroid_bot.download_media(\n fedfile, \"fedlist\"\n )\n await asyncio.sleep(6)\n file = open(downloaded_file_name, \"r\", errors=\"ignore\")\n lines = file.readlines()\n for line in lines:\n try:\n fedList.append(line[:36])\n except BaseException:\n pass\n elif \"You can only use fed commands once every 5 minutes\" in (\n await bot_conv.get_edit\n ):\n await msg.edit(\"Try again after 5 mins.\")\n return\n if len(fedList) == 0:\n await msg.edit(\n f\"Unable to collect FedAdminList. Retrying ({a+1}/3)...\"\n )\n else:\n break\n else:\n await msg.edit(\"Error\")\n In = False\n tempFedId = \"\"\n for x in response.text:\n if x == \"`\":\n if In:\n In = False\n fedList.append(tempFedId)\n tempFedId = \"\"\n else:\n In = True\n elif In:\n tempFedId += x\n if len(fedList) == 0:\n await msg.edit(\"Unable to collect FedAdminList.\")\n return\n await msg.edit(f\"FBaning in {len(fedList)} feds.\")\n try:\n await ultroid_bot.send_message(chat, f\"/start\")\n except BaseException:\n await msg.edit(\"Specified FBan Group ID is incorrect.\")\n return\n await asyncio.sleep(3)\n if udB.get(\"EXCLUDE_FED\"):\n excludeFed = udB.get(\"EXCLUDE_FED\").split(\" \")\n for n in range(len(excludeFed)):\n excludeFed[n] = excludeFed[n].strip()\n exCount = 0\n for fed in fedList:\n if udB.get(\"EXCLUDE_FED\") and fed in excludeFed:\n await ultroid_bot.send_message(chat, f\"{fed} Excluded.\")\n exCount += 1\n continue\n await ultroid_bot.send_message(chat, f\"/joinfed {fed}\")\n await asyncio.sleep(3)\n await ultroid_bot.send_message(chat, f\"/fban {FBAN} {REASON}\")\n await asyncio.sleep(3)\n try:\n os.remove(\"fedlist\")\n except Exception as e:\n print(f\"Error in removing FedAdmin file.\\n{str(e)}\")\n await msg.edit(\n f\"SuperFBan Completed.\\nTotal Feds - {len(fedlist)}.\\nExcluded - {exCount}.\\n Affected {len(fedList) - exCount} feds.\\n#TB\"\n )\n\n\n@ultroid_cmd(pattern=\"superunfban ?(.*)\")\nasync def _(event):\n msg = await eor(event, \"Starting a Mass-UnFedBan...\")\n fedList = []\n if event.reply_to_msg_id:\n previous_message = await event.get_reply_message()\n if previous_message.media:\n downloaded_file_name = await ultroid_bot.download_media(\n previous_message, \"fedlist\"\n )\n file = open(downloaded_file_name, encoding=\"utf8\")\n lines = file.readlines()\n for line in lines:\n try:\n fedList.append(line[:36])\n except BaseException:\n pass\n arg = event.text.split(\" \", maxsplit=2)\n if len(arg) > 2:\n FBAN = arg[1]\n REASON = arg[2] # rose unbans now can have reasons\n else:\n FBAN = arg[1]\n REASON = \"\"\n else:\n FBAN = previous_message.sender_id\n try:\n REASON = event.text.split(\" \", maxsplit=1)[1]\n except BaseException:\n REASON = \"\"\n if REASON.strip() == \"\":\n REASON = \"\"\n else:\n arg = event.text.split(\" \", maxsplit=2)\n if len(arg) > 2:\n try:\n FBAN = arg[1]\n REASON = arg[2]\n except BaseException:\n return await msg.edit(\"`No user designated!`\")\n else:\n try:\n FBAN = arg[1]\n REASON = \" #TBMassUnBanned \"\n except BaseException:\n return await msg.edit(\"`No user designated!`\")\n try:\n if str(FBAN) in DEVLIST:\n await msg.edit(\"You can't ban my dev you noob!!\")\n return\n except Exception as e:\n print(str(e))\n return await msg.edit(str(e))\n if udB.get(\"FBAN_GROUP_ID\"):\n chat = int(udB.get(\"FBAN_GROUP_ID\"))\n else:\n chat = await event.get_chat()\n if not len(fedList):\n for a in range(3):\n async with ultroid_bot.conversation(\"@MissRose_bot\") as bot_conv:\n await bot_conv.send_message(\"/start\")\n await asyncio.sleep(3)\n await bot_conv.send_message(\"/myfeds\")\n await asyncio.sleep(3)\n try:\n response = await bot_conv.get_response()\n except asyncio.exceptions.TimeoutError:\n return await msg.edit(\n \"`Seems like rose isn't responding, or, the plugin is misbehaving`\"\n )\n await asyncio.sleep(3)\n if \"make a file\" in response.text or \"Looks like\" in response.text:\n await response.click(0)\n await asyncio.sleep(3)\n fedfile = await bot_conv.get_response()\n await asyncio.sleep(3)\n if fedfile.media:\n downloaded_file_name = await ultroid_bot.download_media(\n fedfile, \"fedlist\"\n )\n await asyncio.sleep(6)\n file = open(downloaded_file_name, \"r\", errors=\"ignore\")\n lines = file.readlines()\n for line in lines:\n try:\n fedList.append(line[:36])\n except BaseException:\n pass\n elif \"You can only use fed commands once every 5 minutes\" in (\n await bot_conv.get_edit\n ):\n await msg.edit(\"Try again after 5 mins.\")\n return\n if len(fedList) == 0:\n await msg.edit(\n f\"Unable to collect FedAdminList. Retrying ({a+1}/3)...\"\n )\n else:\n break\n else:\n await msg.edit(\"Error\")\n In = False\n tempFedId = \"\"\n for x in response.text:\n if x == \"`\":\n if In:\n In = False\n fedList.append(tempFedId)\n tempFedId = \"\"\n else:\n In = True\n elif In:\n tempFedId += x\n if len(fedList) == 0:\n await msg.edit(\"Unable to collect FedAdminList.\")\n return\n await msg.edit(f\"UnFBaning in {len(fedList)} feds.\")\n try:\n await ultroid_bot.send_message(chat, f\"/start\")\n except BaseException:\n await msg.edit(\"Specified FBan Group ID is incorrect.\")\n return\n await asyncio.sleep(3)\n if udB.get(\"EXCLUDE_FED\"):\n excludeFed = udB.get(\"EXCLUDE_FED\").split(\" \")\n for n in range(len(excludeFed)):\n excludeFed[n] = excludeFed[n].strip()\n exCount = 0\n for fed in fedList:\n if udB.get(\"EXCLUDE_FED\") and fed in excludeFed:\n await ultroid_bot.send_message(chat, f\"{fed} Excluded.\")\n exCount += 1\n continue\n await ultroid_bot.send_message(chat, f\"/joinfed {fed}\")\n await asyncio.sleep(3)\n await ultroid_bot.send_message(chat, f\"/unfban {FBAN} {REASON}\")\n await asyncio.sleep(3)\n try:\n os.remove(\"fedlist\")\n except Exception as e:\n print(f\"Error in removing FedAdmin file.\\n{str(e)}\")\n await msg.edit(\n f\"SuperUnFBan Completed.\\nTotal Feds - {len(fedlist)}.\\nExcluded - {exCount}.\\n Affected {len(fedList) - exCount} feds.\\n#TB\"\n )\n\n\n@ultroid_cmd(pattern=\"fstat ?(.*)\")\nasync def _(event):\n ok = await event.edit(\"`Checking...`\")\n if event.reply_to_msg_id:\n previous_message = await event.get_reply_message()\n sysarg = str(previous_message.sender_id)\n user = f\"[user](tg://user?id={sysarg})\"\n if event.pattern_match.group(1):\n sysarg += f\" {event.pattern_match.group(1)}\"\n else:\n sysarg = event.pattern_match.group(1)\n user = sysarg\n if sysarg == \"\":\n await ok.edit(\n \"`Give me someones id, or reply to somones message to check his/her fedstat.`\"\n )\n return\n else:\n async with ultroid.conversation(bot) as conv:\n try:\n await conv.send_message(\"/start\")\n await conv.get_response()\n await conv.send_message(\"/fedstat \" + sysarg)\n audio = await conv.get_response()\n if \"Looks like\" in audio.text:\n await audio.click(0)\n await asyncio.sleep(2)\n audio = await conv.get_response()\n await ultroid.send_file(\n event.chat_id,\n audio,\n caption=f\"List of feds {user} has been banned in.\\n\\nCollected using Ultroid.\",\n link_preview=False,\n )\n else:\n await ultroid.send_message(event.chat_id, audio.text)\n await ultroid.send_read_acknowledge(bot)\n await event.delete()\n except YouBlockedUserError:\n await ok.edit(\"**Error**\\n `Unblock` @MissRose_Bot `and try again!\")\n\n\n@ultroid_cmd(pattern=\"fedinfo ?(.*)\")\nasync def _(event):\n ok = await event.edit(\"`Extracting information...`\")\n sysarg = event.pattern_match.group(1)\n async with ultroid.conversation(bot) as conv:\n try:\n await conv.send_message(\"/start\")\n await conv.get_response()\n await conv.send_message(\"/fedinfo \" + sysarg)\n audio = await conv.get_response()\n await ultroid.send_read_acknowledge(bot)\n await ok.edit(audio.text + \"\\n\\nFedInfo Extracted by Ultroid\")\n except YouBlockedUserError:\n await ok.edit(\"**Error**\\n `Unblock` @MissRose_Bot `and try again!\")\n\n\nHELP.update({f\"{__name__.split('.')[1]}\": f\"{__doc__.format(i=HNDLR)}\"})\n",
"id": "6607239",
"language": "Python",
"matching_score": 5.2520976066589355,
"max_stars_count": 0,
"path": "userbot/plugins/fedutils.py"
},
{
"content": "# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see <https://www.gnu.org/licenses/>.\n\nimport os\nimport asyncio\n\nfrom telethon.errors import ChatAdminRequiredError\nfrom telethon.errors.rpcerrorlist import MessageTooLongError, YouBlockedUserError\nfrom telethon.tl.functions.users import GetFullUserRequest\n\nfrom userbot.utils import admin_cmd\nfrom userbot import ALIVE_NAME, CMD_HELP\n\nbot = \"@MissRose_bot\"\n\nnaam = str(ALIVE_NAME)\n\nBOTLOG_CHATID = Config.PRIVATE_GROUP_ID\n\nG_BAN_LOGGER_GROUP = os.environ.get(\"G_BAN_LOGGER_GROUP\", None)\nif G_BAN_LOGGER_GROUP:\n G_BAN_LOGGER_GROUP = int(G_BAN_LOGGER_GROUP)\n\nPRIVATE_GROUP_ID = os.environ.get(\"PRIVATE_GROUP_ID\", None)\nif PRIVATE_GROUP_ID:\n PRIVATE_GROUP_ID = int(PRIVATE_GROUP_ID)\n\n@borg.on(admin_cmd(pattern=\"fstat ?(.*)\"))\nasync def _(event):\n if event.fwd_from:\n return\n ok = await event.edit(\"`Checking...`\")\n if event.reply_to_msg_id:\n previous_message = await event.get_reply_message()\n sysarg = str(previous_message.sender_id)\n user = f\"[user](tg://user?id={sysarg})\"\n else:\n sysarg = event.pattern_match.group(1)\n user = sysarg\n if sysarg == \"\":\n await ok.edit(\n \"`Give me someones id, or reply to somones message to check his/her fedstat.`\"\n )\n return\n else:\n async with borg.conversation(bot) as conv:\n try:\n await conv.send_message(\"/start\")\n await conv.get_response()\n await conv.send_message(\"/fedstat \" + sysarg)\n audio = await conv.get_response()\n if \"Looks like\" in audio.text:\n await audio.click(0)\n await asyncio.sleep(2)\n audio = await conv.get_response()\n await telebot.send_file(\n event.chat_id,\n audio,\n caption=f\"List of feds {user} has been banned in.\\n\\nCollected using TamilBot.\",\n )\n else:\n await borg.send_message(event.chat_id, audio.text)\n await event.delete()\n except YouBlockedUserError:\n await ok.edit(\"**Error**\\n `Unblock` @MissRose_Bot `and try again!\")\n\n\n@borg.on(admin_cmd(pattern=\"fedinfo ?(.*)\"))\nasync def _(event):\n if event.fwd_from:\n return\n ok = await event.edit(\"`Extracting information...`\")\n sysarg = event.pattern_match.group(1)\n async with borg.conversation(bot) as conv:\n try:\n await conv.send_message(\"/start\")\n await conv.get_response()\n await conv.send_message(\"/fedinfo \" + sysarg)\n audio = await conv.get_response()\n await ok.edit(audio.text + \"\\n\\nFedInfo Excracted by TamilBot\")\n except YouBlockedUserError:\n await ok.edit(\"**Error**\\n `Unblock` @MissRose_Bot `and try again!\")\n\n@borg.on(admin_cmd(\"roseinfo ?(.*)\"))\nasync def _(event):\n if event.fwd_from:\n return\n sysarg = event.pattern_match.group(1)\n if sysarg == \"\":\n async with borg.conversation(bots) as conv:\n try:\n await conv.send_message(\"/start\")\n await conv.get_response()\n await conv.send_message(\"/info\")\n audio = await conv.get_response()\n await borg.send_message(event.chat_id, audio.text)\n await event.delete()\n except YouBlockedUserError:\n await event.edit(\"**Error:** `unblock` @MissRose_bot `and retry!\")\n elif \"@\" in sysarg:\n async with borg.conversation(bots) as conv:\n try:\n await conv.send_message(\"/start\")\n await conv.get_response()\n await conv.send_message(\"/info \" + sysarg)\n audio = await conv.get_response()\n await borg.send_message(event.chat_id, audio.text)\n await event.delete()\n except YouBlockedUserError:\n await event.edit(\"**Error:** `unblock` @MissRose_Bot `and try again!\")\n elif \"\" in sysarg:\n async with borg.conversation(bots) as conv:\n try:\n await conv.send_message(\"/start\")\n await conv.get_response()\n await conv.send_message(\"/info \" + sysarg)\n audio = await conv.get_response()\n await borg.send_message(event.chat_id, audio.text)\n await event.delete()\n except YouBlockedUserError:\n await event.edit(\"**Error:** `unblock` @MissRose_Bot `and try again!\")\n\n@borg.on(admin_cmd(\"myfeds ?(.*)\"))\nasync def _(event):\n if event.fwd_from:\n return\n async with event.client.conversation(bots) as conv:\n try:\n await conv.send_message(\"/start\")\n await conv.get_response()\n await conv.send_message(\"/myfeds\")\n myfed = await conv.get_response()\n if \"file\" in myfed.text:\n await fedstat.click(0)\n reply = await conv.get_response()\n await event.client.forward_messages(event.chat_id, reply)\n else:\n await event.client.forward_messages(event.chat_id, myfed)\n await event.delete()\n except YouBlockedUserError:\n await event.edit(\"**Error:** `unblock` @MissRose_Bot `and try again!\")\n\n\n@borg.on(admin_cmd(pattern=\"bgban ?(.*)\"))\nasync def _(event):\n if G_BAN_LOGGER_GROUP is None:\n await event.edit(\"ENV VAR is not set. This module will not work.\")\n return\n if event.fwd_from:\n return\n reason = event.pattern_match.group(1)\n if event.reply_to_msg_id:\n r = await event.get_reply_message()\n if r.forward:\n r_from_id = r.forward.from_id or r.from_id\n else:\n r_from_id = r.from_id\n await borg.send_message(\n G_BAN_LOGGER_GROUP,\n \"/gban [user](tg://user?id={}) {}\".format(r_from_id, reason),\n )\n await event.delete()\n\n\n@borg.on(admin_cmd(pattern=\"bungban ?(.*)\"))\nasync def _(event):\n if G_BAN_LOGGER_GROUP is None:\n await event.edit(\"ENV VAR is not set. This module will not work.\")\n return\n if event.fwd_from:\n return\n reason = event.pattern_match.group(1)\n if event.reply_to_msg_id:\n r = await event.get_reply_message()\n r_from_id = r.from_id\n await borg.send_message(\n G_BAN_LOGGER_GROUP,\n \"/ungban [user](tg://user?id={}) {}\".format(r_from_id, reason),\n )\n await event.delete()\n\n\n@borg.on(admin_cmd(\"superfban ?(.*)\"))\nasync def _(event):\n if event.fwd_from:\n return\n await event.edit(\"Starting a Mass-UnFedBan...\")\n if event.reply_to_msg_id:\n previous_message = await event.get_reply_message()\n FBAN = previous_message.sender_id\n else:\n FBAN = event.pattern_match.group(1)\n\n if PRIVATE_GROUP_ID:\n chat = PRIVATE_GROUP_ID \n else:\n chat = await event.get_chat()\n fedList = []\n for a in range(3):\n async with event.client.conversation(\"@MissRose_bot\") as bot_conv:\n await bot_conv.send_message(\"/start\")\n await bot_conv.send_message(\"/myfeds\")\n response = await bot_conv.get_response()\n if \"make a file\" in response.text:\n await asyncio.sleep(1)\n await response.click(0)\n fedfile = await bot_conv.get_response()\n if fedfile.media:\n downloaded_file_name = await event.client.download_media(\n fedfile, \"fedlist\"\n )\n file = open(downloaded_file_name, \"r\")\n lines = file.readlines()\n for line in lines:\n fedList.append(line[:36])\n else:\n return\n if len(fedList) == 0:\n await event.edit(f\"Something went wrong. Retrying ({a+1}/3)...\")\n else:\n break\n else:\n await event.edit(f\"Error\")\n if \"You can only use fed commands once every 5 minutes\" in response.text:\n await event.edit(\"Try again after 5 mins.\")\n return\n In = False\n tempFedId = \"\"\n for x in response.text:\n if x == \"`\":\n if In:\n In = False\n fedList.append(tempFedId)\n tempFedId = \"\"\n else:\n In = True\n\n elif In:\n tempFedId += x\n\n await event.edit(f\"UnFbaning in {len(fedList)} feds.\")\n try:\n await event.client.send_message(chat, f\"/feddemoteme {fed}\")\n except:\n await event.edit(\"PRIVATE_GROUP_ID is incorrect.\")\n return\n await asyncio.sleep(3)\n for fed in fedList:\n await event.client.send_message(chat, f\"/start\")\n await asyncio.sleep(3)\n await event.client.send_message(chat, f\"/feddemoteme {FBAN}\")\n await asyncio.sleep(3)\n await event.edit(f\"SuperUnFBan Completed. Affected {len(fedList)} feds.\")\n\n\nCMD_HELP.update(\n {\n \"fed\":\n\n \"\"\"╼•∘ 🅲🅼🅽🅳 ∘•╾ : `.sendto category_name`\n ╼•∘ 🆄🆂🅰️🅶🅴 ∘•╾ __will send the replied message to all the chats in give category__\n\n ╼•∘ 🅲🅼🅽🅳 ∘•╾ : `.fwdto category_name`\n ╼•∘ 🆄🆂🅰️🅶🅴 ∘•╾ __will forward the replied message to all the chats in give category__\n\n ╼•∘ 🅲🅼🅽🅳 ∘•╾ : `.addto category_name`\n ╼•∘ 🆄🆂🅰️🅶🅴 ∘•╾ __It will add this chat/user/channel to the category of the given name__\n\n ╼•∘ 🅲🅼🅽🅳 ∘•╾ : `.rmfrom category_name`\n ╼•∘ 🆄🆂🅰️🅶🅴 ∘•╾ __To remove the Chat/user/channel from the given category name__\n\n ╼•∘ 🅲🅼🅽🅳 ∘•╾ : `.list category_name`\n ╼•∘ 🆄🆂🅰️🅶🅴 ∘•╾ __Will show the list of all chats in the given category__\n\n ╼•∘ 🅲🅼🅽🅳 ∘•╾ : `.listall`\n ╼•∘ 🆄🆂🅰️🅶🅴 ∘•╾ __Will show the list of all category names__\n\n ╼•∘ 🅲🅼🅽🅳 ∘•╾ : `.frmfrom category_name chat_id`\n ╼•∘ 🆄🆂🅰️🅶🅴 ∘•╾ __To force remove the given chat_id from the given category name usefull when you left that chat or banned you there__\n\n ╼•∘ 🅲🅼🅽🅳 ∘•╾ : `delc category_name`\n ╼•∘ 🆄🆂🅰️🅶🅴 ∘•╾ __Deletes the category completely in database__\n\"\"\"\n }\n)\n\n\n\n",
"id": "2831502",
"language": "Python",
"matching_score": 2.8079802989959717,
"max_stars_count": 25,
"path": "userbot/plugins/fed.py"
},
{
"content": "\"\"\" Spotify / Deezer downloader plugin by @Sur_vivor | Syntax: .dzd link\"\"\"\nimport asyncio\n\nfrom telethon.errors.rpcerrorlist import YouBlockedUserError\n\nfrom userbot.utils import admin_cmd\nfrom userbot import CMD_HELP\n\n@borg.on(admin_cmd(outgoing=True, pattern=\"dzd(?: |$)(.*)\"))\nasync def DeezLoader(Deezlod):\n if Deezlod.fwd_from:\n return\n d_link = Deezlod.pattern_match.group(1)\n if \".com\" not in d_link:\n await Deezlod.edit(\"` I need a link to download something pro.`**(._.)**\")\n else:\n await Deezlod.edit(\"🎶**Initiating Download!**🎶\")\n chat = \"@DeezLoadBot\"\n async with bot.conversation(chat) as conv:\n try:\n msg_start = await conv.send_message(\"/start\")\n response = await conv.get_response()\n r = await conv.get_response()\n msg = await conv.send_message(d_link)\n details = await conv.get_response()\n song = await conv.get_response()\n \"\"\" - don't spam notif - \"\"\"\n await bot.send_read_acknowledge(conv.chat_id)\n except YouBlockedUserError:\n await Deezlod.edit(\"**Error:** `Unblock` @DeezLoadBot `and retry!`\")\n return\n await bot.send_file(Deezlod.chat_id, song, caption=details.text)\n await Deezlod.client.delete_messages(\n conv.chat_id, [msg_start.id, response.id, r.id, msg.id, details.id, song.id]\n )\n await Deezlod.delete()\n\n\n@borg.on(admin_cmd(outgoing=True, pattern=\"song(?: |$)(.*)\"))\nasync def WooMai(rose):\n if rose.fwd_from:\n return\n song = rose.pattern_match.group(1)\n chat = \"@SongProBot\"\n link = f\"/s {song}\"\n await rose.edit(\"```Getting Your Music```\")\n async with bot.conversation(chat) as conv:\n await asyncio.sleep(2)\n await rose.edit(\"`Downloading...Please wait`\")\n try:\n msg = await conv.send_message(link)\n response = await conv.get_response()\n respond = await conv.get_response()\n \"\"\" - don't spam notif - \"\"\"\n await bot.send_read_acknowledge(conv.chat_id)\n except YouBlockedUserError:\n await netase.reply(\"```Please unblock @SongProBot and try again```\")\n return\n await rose.edit(\"`Sending Your Music...`\")\n await asyncio.sleep(3)\n await bot.send_file(rose.chat_id, respond)\n await rose.client.delete_messages(conv.chat_id, [msg.id, response.id, respond.id])\n await rose.delete()\n\nCMD_HELP.update(\n {\n \"Songs\":\n \"╼•∘ 🅲🅼🅽🅳 ∘•╾ : `.dzd`\"\n \"\\n ╼•∘ 🆄🆂🅰🅶🅴 ∘•╾ : get songs from @DeezLoadBot \"\n \"\\n\\n ╼•∘ 🅲🅼🅽🅳 ∘•╾ : `.song`\"\n \"\\n ╼•∘ 🆄🆂🅰🅶🅴 ∘•╾ : get your favourite 😍 song from @SongProBot, \"\n }\n)\n \n",
"id": "12615780",
"language": "Python",
"matching_score": 3.3953733444213867,
"max_stars_count": 25,
"path": "userbot/plugins/songs.py"
},
{
"content": "Created by @Jamelsingh\nplugin for jameluserbot\n\"\"\"\n\nfrom telethon.errors.rpcerrorlist import YouBlockedUserError\n\n\n@bot.on(admin_cmd(pattern=r\"score$\"))\n@bot.on(sudo_cmd(pattern=r\"score$\", allow_sudo=True))\nasync def _(event):\n if event.fwd_from:\n return\n chat = \"@cricbuzz_bot\"\n reply_to_id = event.message\n catevent = await edit_or_reply(event, \"```Gathering info...```\")\n async with event.client.conversation(chat) as conv:\n try:\n msg_start = await conv.send_message(\"/start\")\n response = await conv.get_response()\n msg = await conv.send_message(\"/score\")\n respond = await conv.get_response()\n await event.client.send_read_acknowledge(conv.chat_id)\n except YouBlockedUserError:\n await catevent.edit(\"Unblock @cricbuzz_bot & try again\")\n return\n if respond.text.startswith(\"I can't find that\"):\n await catevent.edit(\"sorry i can't find it\")\n else:\n await catevent.delete()\n await event.client.send_message(\n event.chat_id, respond.message, reply_to=reply_to_id\n )\n await event.client.delete_messages(\n conv.chat_id, [msg_start.id, msg.id, response.id, respond.id]\n )\n\n\n@bot.on(admin_cmd(pattern=r\"cric (.*)\"))\n@bot.on(sudo_cmd(pattern=r\"cric (.*)\", allow_sudo=True))\nasync def _(event):\n if event.fwd_from:\n return\n details = event.pattern_match.group(1)\n chat = \"@cricbuzz_bot\"\n reply_to_id = event.message\n catevent = await edit_or_reply(event, \"```Gathering info...```\")\n async with event.client.conversation(chat) as conv:\n try:\n msg_start = await conv.send_message(\"/start\")\n response = await conv.get_response()\n msg = await conv.send_message(f\"{details}\")\n respond = await conv.get_response()\n await event.client.send_read_acknowledge(conv.chat_id)\n except YouBlockedUserError:\n await catevent.edit(\"Unblock @cricbuzz_bot & try again\")\n return\n if respond.text.startswith(\"I can't find that\"):\n await catevent.edit(\"sorry i can't find it\")\n else:\n await catevent.delete()\n await event.client.send_message(\n event.chat_id, respond.message, reply_to=reply_to_id\n )\n await event.client.delete_messages(\n conv.chat_id, [msg_start.id, msg.id, response.id, respond.id]\n )\n\n\nCMD_HELP.update(\n {\n \"cricket\": \"**Plugin :** `cricket`\\\n \\n\\n** • Syntax : **`.score` \\\n \\n** • Function : **__To see score of ongoing matches.__\\\n \\n\\n** • Syntax : **`.cric <commnd>`\\\n \\n** • Function : **__That will send details like scoreboard or commentary.__\\\n \\n\\n** • Example :-** `.cric /scorecard_30....`\"\n }\n)\n",
"id": "2277712",
"language": "Python",
"matching_score": 2.588603973388672,
"max_stars_count": 0,
"path": "userbot/plugins/cricket.py"
}
] | 2.588604 |
AmasiaNalbandian | [
{
"content": "import urllib.request\nimport json\nimport logging as log\nfrom bs4 import BeautifulSoup\n\ndef main():\n # Number of rows on the logged-out world selector\n MAX_ROWS = 24\n URL = \"https://oldschool.runescape.com/a=13/slu?order=WMLPA\"\n\n worlds_data = dict()\n\n # Open page and setup parser\n page = urllib.request.urlopen(URL)\n soup = BeautifulSoup(page, features=\"html.parser\")\n\n # Find the table rows\n tbody = soup.find(\"tbody\", class_=\"server-list__body\")\n trs = tbody.find_all(\"tr\")\n\n log.info(\"Scraping \" + URL + \"...\")\n\n row = 1\n col = 1\n\n # Iterate each <tr> element\n for tr in trs:\n # Get all <td> elements in the row\n tds = tr.find_all(\"td\")\n\n # Parse out relevant data\n world = tds[0].find(\"a\").get(\"id\").replace(\"slu-world-\", \"\")\n world_members_only = True if \"Members\" == tds[3].get_text() else False\n world_description = tds[4].get_text()\n\n # False and \"None\" by default\n world_pvp = False\n world_skill_requirement = \"None\"\n\n # Check world description\n if \"PvP\" in world_description:\n world_pvp = True\n elif \"skill total\" in world_description:\n world_skill_requirement = tds[4].get_text().replace(\" skill total\", \"\")\n\n worlds_data[world] = {\n \"members_only\": world_members_only,\n \"pvp\": world_pvp,\n \"total_level_requirement\": world_skill_requirement,\n \"row\": row,\n \"column\": col,\n }\n\n row += 1\n\n if row > MAX_ROWS:\n row = 1\n col += 1\n\n # Write to json file\n with open(\"worlds.json\", \"w\") as f:\n json.dump(worlds_data, f, indent=4)\n\n log.info('Wrote worlds to \"worlds.json\"')\n\nif __name__ == \"__main__\":\n main()",
"id": "675671",
"language": "Python",
"matching_score": 0.34207725524902344,
"max_stars_count": 44,
"path": "ocvbot/world_scraper.py"
},
{
"content": "# coding=UTF-8\n\"\"\"\nSets global variables and constants.\n\n\"\"\"\nimport json\nimport logging as log\nimport random as rand\nimport time\n\nimport pyautogui as pag\nfrom ocvbot import config\n\n# TODO: Finish implementing stats.\n# Stats ----------------------------------------------------------------\n\n# Used for tracking how long the script has been running.\nstart_time = round(time.time())\n\n# The number of inventories a script has gone through.\ninventories = 0\n# The number of items gathered, approximately.\nitems_gathered = 0\n# The amount of experience gained since the script started, approximately.\nxp_gained = 0\n# The amount of experience gained since installing this package\nxp_per_hour = 0\n\nore_xp_dict = {\"copper\": 16.5, \"iron\": 35.5}\n\n# ----------------------------------------------------------------------\n# These variables are used to setup behavior.logout_rand_range(). ------\n# ----------------------------------------------------------------------\n\n# Set initial checkpoint_checked values.\ncheckpoint_1_checked = False\ncheckpoint_2_checked = False\ncheckpoint_3_checked = False\ncheckpoint_4_checked = False\n\n# Convert run duration within config file from minutes to seconds.\nmin_session_duration_sec = (int(config[\"main\"][\"min_session_duration\"])) * 60\nmax_session_duration_sec = (int(config[\"main\"][\"max_session_duration\"])) * 60\n\nif min_session_duration_sec > max_session_duration_sec:\n raise Exception(\"min_session_duration must be less than max_session_duration!\")\n\nmin_break_duration = int(config[\"main\"][\"min_break_duration\"])\nmax_break_duration = int(config[\"main\"][\"max_break_duration\"])\n\nif min_break_duration > max_break_duration:\n raise Exception(\"min_break_duration must be less than max_break_duration!\")\n\n# Break the duration of time between the minimum and maximum duration\n# into a set of evenly-sized durations of time. These chunks of time\n# are consecutively added to the start time to create \"checkpoints\".\n# Checkpoints are timestamps at which a logout roll will occur.\ncheckpoint_interval = (max_session_duration_sec - min_session_duration_sec) / 4\n\n# Space each checkpoint evenly between the min duration and the max\n# duration.\ncheckpoint_1 = round(start_time + min_session_duration_sec)\ncheckpoint_2 = round(start_time + min_session_duration_sec + checkpoint_interval)\ncheckpoint_3 = round(start_time + min_session_duration_sec + (checkpoint_interval * 2))\ncheckpoint_4 = round(start_time + min_session_duration_sec + (checkpoint_interval * 3))\ncheckpoint_5 = round(start_time + max_session_duration_sec)\n\n# Determine how many sessions the bot will run for before quitting.\nmin_sessions = int(config[\"main\"][\"min_sessions\"])\nmax_sessions = int(config[\"main\"][\"max_sessions\"])\n\nif min_sessions > max_sessions:\n raise Exception(\"min_sessions must be less than max_sessions!\")\n\nsession_total = rand.randint(min_sessions, max_sessions)\nlog.info(\n \"Checkpoint 1 is at %s, session_total is %s\",\n time.ctime(checkpoint_1),\n session_total,\n)\n\n# The current number of sessions that have been completed.\nsession_num = 0\n\nwith open(\"worlds.json\") as f:\n worlds = json.load(f)\n\n\n# Define custom exception types. ------------------------------------------------------------------\n\n\nclass BankingError(Exception):\n \"\"\"\n Raised when an unexpected or unrecoverable situation occurs in the\n banking window.\n \"\"\"\n\n\nclass InefficientUseOfInventory(Exception):\n \"\"\"\n Raised when the number of free inventory spaces available would result in\n inefficient or overly arduous gameplay. For example, this exception is\n raised when attempting to drop-mine with only 4 free inventory spaces.\n \"\"\"\n\n\nclass InventoryError(Exception):\n \"\"\"\n Raised when an unexpected or unrecoverable situation occurs with the\n player's inventory.\n \"\"\"\n\n\nclass InventoryFull(Exception):\n \"\"\"\n Raised whenever the player's inventory is too full to perform the desired\n action.\n \"\"\"\n\n\nclass NeedleError(Exception):\n \"\"\"\n A generic exception raised when a necessary needle could not be found.\n \"\"\"\n\n\nclass RockEmpty(Exception):\n \"\"\"\n Raised by skills.Miner when the given rock is empty.\n \"\"\"\n\n\nclass TimeoutException(Exception):\n \"\"\"\n Raised whenever an action takes longer than expected and times out.\n \"\"\"\n",
"id": "8718634",
"language": "Python",
"matching_score": 3.423389434814453,
"max_stars_count": 0,
"path": "ocvbot/startup.py"
},
{
"content": "# coding=UTF-8\n\"\"\"\nContains non-skilling player behaviors.\n\n\"\"\"\nimport logging as log\nimport pathlib\nimport random as rand\nimport sys\nimport time\n\nimport cv2\nimport numpy as np\nimport pyautogui as pag\nfrom ocvbot import banking\nfrom ocvbot import inputs\nfrom ocvbot import interface\nfrom ocvbot import misc\nfrom ocvbot import startup as start\nfrom ocvbot import vision as vis\n\n\n# TODO: Move login and world-switcher functions to login_menu.py.\n# TODO: Add switch_worlds_logged_in()\n\n\n# TODO: Add tests.\n# TODO: Move to login_menu.py\ndef switch_worlds_logged_out(world: str, attempts=5) -> bool:\n MAX_COLUMNS = 7\n X_OFFSET = 93\n Y_OFFSET = 19\n\n # Get world's row and col\n world_info = start.worlds[world]\n column = world_info[\"column\"]\n row = world_info[\"row\"]\n\n # Click world switch button\n switcher_clicked = vis.Vision(\n region=vis.CLIENT, needle=\"needles/login-menu/world-switcher-logged-out.png\"\n ).click_needle()\n\n if switcher_clicked is False:\n log.error(\"Unable to find world switcher!\")\n return False\n\n # Wait for green world filter button, fails if filter is not set correctly\n world_filter = vis.Vision(\n region=vis.CLIENT, needle=\"needles/login-menu/world-filter-enabled.png\"\n ).wait_for_needle()\n\n if world_filter is False:\n enabled_filter = interface.enable_button(\n \"needles/login-menu/world-filter-disabled.png\",\n vis.CLIENT,\n \"needles/login-menu/world-filter-enabled.png\",\n vis.CLIENT,\n )\n if enabled_filter is False:\n return False\n\n # If the world is off screen\n if column > MAX_COLUMNS:\n # Click next page until the world is on screen\n times_to_click = column % MAX_COLUMNS\n next_page_button = vis.Vision(\n region=vis.CLIENT, needle=\"needles/login-menu/next-page.png\"\n ).click_needle(number_of_clicks=times_to_click)\n\n if next_page_button is False:\n log.error(\"Unable to find next page button!\")\n return False\n\n # Set the world's col to max, it'll always be in the last col\n # after it's visible\n col = MAX_COLUMNS\n\n # Coordinates for the first world\n first_world_x = vis.client_left + 110\n first_world_y = vis.client_top + 43\n\n # Apply offsets using the first world as a base\n x = first_world_x + ((col - 1) * X_OFFSET)\n y = first_world_y + ((row - 1) * Y_OFFSET)\n\n # Click a random spot in the world's button\n for _ in range(attempts):\n inputs.Mouse(region=(x, y, 32, 6), move_duration_range=(50, 200)).click_coord()\n\n # Wait for login screen\n login_screen = vis.Vision(\n region=vis.CLIENT, needle=\"needles/login-menu/orient-logged-out.png\"\n ).wait_for_needle()\n if login_screen is True:\n return True\n\n log.error(\"Timed out waiting for login screen!\")\n return False\n\n\n# TODO: Move to inventory.py\ndef check_skills() -> None:\n \"\"\"\n Used to mimic human-like behavior. Checks the stats of a random\n skill.\n\n Returns:\n Returns after hovering mouse over skill.\n\n \"\"\"\n open_side_stone(\"skills\")\n inputs.Mouse(region=vis.INV).move_to()\n misc.sleep_rand(1000, 7000)\n return\n\n\n# TODO: Move to inventory.py\ndef drop_item(\n item,\n random_wait: bool = True,\n shift_click: bool = True,\n) -> None:\n \"\"\"\n Drops all instances of the provided item from the inventory.\n The \"Shift+Click\" setting to drop items MUST be enabled in the OSRS\n client.\n\n Args:\n item (file): Filepath to an image of the item to drop, as it\n appears in the player's inventory.\n random_wait (bool): Whether to roll for a chance to randomly wait\n while dropping items. Default is True.\n shift_click (bool): Whether to hold down Shift before clicking the\n item. This arg only exists because it must be\n disabled when running unit tests with PyTest and\n feh -- don't change it unless you know what\n you're doing. Default is True.\n\n Examples:\n drop_item(\"./needles/items/iron-ore.png\")\n\n Returns:\n Returns when all instances of the given item have been dropped, or when\n there were already zero instances of the given item in the inventory.\n\n Raises:\n Raises start.InventoryError if not all instances of the given item could\n be dropped.\n \"\"\"\n # TODO: Create four objects, one for each quadrant of the inventory\n # and rotate dropping items randomly among each quadrant to make\n # item-dropping more randomized.\n\n open_side_stone(\"inventory\")\n\n number_of_items = vis.Vision(region=vis.INV, needle=item).count_needles()\n if number_of_items == 0:\n log.info(\"No instances of item %s exist in the inventory\", item)\n return\n\n log.info(\"Dropping %s instances of %s\", number_of_items, item)\n for _ in range(35):\n\n if shift_click:\n pag.keyDown(\"shift\")\n # Alternate between searching for the item in left half and the\n # right half of the player's inventory. This helps reduce the\n # chances the bot will click on the same item twice.\n vis.Vision(region=vis.INV_RIGHT_HALF, needle=item, loop_num=1).click_needle(\n sleep_range=(10, 50, 10, 50)\n )\n vis.Vision(region=vis.INV_LEFT_HALF, needle=item, loop_num=1).click_needle(\n sleep_range=(10, 50, 10, 50)\n )\n\n # Search the entire inventory to check if the item is still\n # there.\n item_remains = vis.Vision(\n region=vis.INV, loop_num=1, needle=item\n ).wait_for_needle()\n\n # Chance to sleep while dropping items.\n if random_wait:\n misc.sleep_rand_roll(chance_range=(30, 40), sleep_range=(1000, 20000))\n\n if shift_click:\n pag.keyUp(\"shift\")\n if item_remains is False:\n return\n\n raise start.InventoryError(\"Tried dropping item too many times!\")\n\n\ndef human_behavior_rand(chance) -> None:\n \"\"\"\n Randomly chooses from a list of human behaviors if the roll passes.\n This is done to make the bot appear more human.\n\n Args:\n chance (int): The number that must be rolled for a random\n behavior to be triggered. For example, if this\n parameter is 25, then there is a 1 in 25 chance\n for the roll to pass.\n\n Returns:\n Returns after random human behavior has been completed.\n \"\"\"\n roll = rand.randint(1, chance)\n log.debug(\"Human behavior rolled %s\", roll)\n\n if roll == chance:\n log.info(\"Attempting to act human.\")\n roll = rand.randint(1, 2)\n if roll == 1:\n check_skills()\n elif roll == 2:\n roll = rand.randint(1, 8)\n if roll == 1:\n open_side_stone(\"attacks\")\n elif roll == 2:\n open_side_stone(\"quests\")\n elif roll == 3:\n open_side_stone(\"equipment\")\n elif roll == 4:\n open_side_stone(\"prayers\")\n elif roll == 5:\n open_side_stone(\"spellbook\")\n elif roll == 6:\n open_side_stone(\"music\")\n elif roll == 7:\n open_side_stone(\"friends\")\n elif roll == 8:\n open_side_stone(\"settings\")\n\n\n# TODO: Move to login_menu.py\ndef login_basic(\n username_file=start.config[\"main\"][\"username_file\"],\n password_file=start.config[\"main\"][\"password_file\"],\n cred_sleep_range: tuple[int, int] = (800, 5000),\n) -> bool:\n \"\"\"\n Performs a login without checking if the login was successful.\n\n Advances to the user credentials screen, enters the user's\n credentials, and submits the user's credentials, that's it.\n\n Args;\n username_file (file): The path to a file containing the user's\n username login, by default reads the\n `username_file` field in the main config\n file.\n password_file (file): The path to a file containing the user's\n password, by default reads the\n `password_file` field in the main config\n file.\n cred_sleep_range (tuple): A 2-tuple containing the minimum and\n maximum number of miliseconds to wait\n between actions while entering account\n credentials, default is (800, 5000).\n Returns:\n Returns True if credentials were entered and a login was\n initiated. Returns False otherwise.\n\n \"\"\"\n # Remove line breaks from credential files to make logging in more\n # predictable.\n username = open(username_file, \"r\", encoding=\"utf-8\").read()\n username = str(username.replace(\"\\n\", \"\"))\n password = open(password_file, \"r\", encoding=\"utf-8\").read()\n password = str(password.replace(\"\\n\", \"\"))\n\n for _ in range(1, 3):\n log.info(\"Logging in.\")\n\n # Click the \"Ok\" button if it's present at the login screen.\n # This button appears if the user was disconnected due to\n # inactivity.\n ok_button = vis.Vision(\n region=vis.CLIENT, needle=\"./needles/login-menu/ok-button.png\", loop_num=1\n ).click_needle()\n # If the \"Ok\" button isn't found, look for the \"Existing user\"\n # button.\n existing_user_button = vis.Vision(\n region=vis.CLIENT,\n needle=\"./needles/login-menu/existing-user-button.png\",\n loop_num=1,\n ).click_needle()\n\n if existing_user_button is True or ok_button is True:\n credential_screen = vis.Vision(\n region=vis.CLIENT,\n needle=\"./needles/login-menu/login-cancel-buttons.png\",\n loop_num=5,\n ).wait_for_needle()\n\n if credential_screen is True:\n # Click to make sure the \"Login\" field is active.\n inputs.Mouse(region=(vis.LOGIN_FIELD)).click_coord()\n # Enter login field credentials.\n misc.sleep_rand(cred_sleep_range[0], cred_sleep_range[1])\n inputs.Keyboard(log_keys=False).typewriter(username)\n misc.sleep_rand(cred_sleep_range[0], cred_sleep_range[1])\n\n # Click to make sure the \"Password\" field is active.\n inputs.Mouse(region=(vis.PASS_FIELD)).click_coord()\n # Enter password field credentials and login.\n inputs.Keyboard(log_keys=False).typewriter(password)\n misc.sleep_rand(cred_sleep_range[0], cred_sleep_range[1])\n\n inputs.Keyboard().keypress(key=\"enter\")\n return True\n\n log.critical(\"Could not perform login!\")\n return False\n\n\n# TODO: Move to login_menu.py\ndef login_full(\n login_sleep_range: tuple[int, int] = (500, 5000),\n postlogin_sleep_range: tuple[int, int] = (500, 5000),\n username_file=start.config[\"main\"][\"username_file\"],\n password_file=start.config[\"main\"][\"password_file\"],\n) -> bool:\n \"\"\"\n Logs into the client using the credentials specified in the main\n config file. Waits until the login is successful before returning.\n\n Args:\n login_sleep_range (tuple): A 2-tuple containing the minimum and\n maximum number of miliseconds to wait\n after hitting \"Enter\" to login,\n default is (500, 5000).\n postlogin_sleep_range (tuple): The minimum and maximum number of\n miliseconds to wait after clicking\n the \"Click here to play\" button,\n default is (500, 5000).\n\n Raises:\n Raises an exception if the login was not successful for any\n reason.\n\n Returns:\n Returns True if the login was successful.\n\n \"\"\"\n log.info(\"Attempting to login.\")\n for _ in range(1, 3):\n\n login = login_basic(username_file, password_file)\n if login is False:\n raise Exception(\"Could not perform initial login!\")\n\n misc.sleep_rand(login_sleep_range[0], login_sleep_range[1])\n postlogin_screen_button = vis.Vision(\n region=vis.CLIENT,\n needle=\"./needles/login-menu/orient-postlogin.png\",\n conf=0.8,\n loop_num=10,\n loop_sleep_range=(1000, 2000),\n ).click_needle()\n\n if postlogin_screen_button is True:\n misc.sleep_rand(postlogin_sleep_range[0], postlogin_sleep_range[1])\n\n # Wait for the orient function to return true in order to\n # confirm the login.\n logged_in = vis.Vision(\n region=vis.CLIENT,\n needle=\"./needles/minimap/orient.png\",\n loop_num=50,\n loop_sleep_range=(1000, 2000),\n ).wait_for_needle()\n if logged_in is True:\n # Reset the timer that's used to count the number of\n # seconds the bot has been running for.\n start.start_time = time.time()\n # Make sure client camera is oriented correctly after\n # logging in.\n # TODO: move this to a 'configure_camera' function\n pag.keyDown(\"Up\")\n misc.sleep_rand(3000, 7000)\n pag.keyUp(\"Up\")\n return True\n raise Exception(\"Could not detect login after postlogin screen!\")\n # Begin checking for the various non-successful login messages.\n # This includes messages like \"invalid credentials\",\n # \"you must be a member to use this world\", \"cannot\n # connect to server,\" etc.\n log.warning(\"Cannot find postlogin screen!\")\n\n # TODO: Add additional checks to other login messages.\n invalid_credentials = vis.Vision(\n region=vis.CLIENT,\n needle=\"./needles/login-menu/invalid-credentials.png\",\n loop_num=1,\n ).wait_for_needle()\n if invalid_credentials is True:\n raise Exception(\"Invalid user credentials!\")\n log.critical(\"Cannot find postlogin screen!\")\n\n raise Exception(\"Unable to login!\")\n\n\n# TODO: Move to inventory.py\ndef logout() -> None:\n \"\"\"\n If the client is logged in, logs out.\n\n Raises:\n Raises an exception if the client could not logout.\n\n Returns:\n Returns if the logout was successful, or the client is already logged\n out.\n\n \"\"\"\n # Make sure the client is logged in.\n if vis.orient()[0] == \"logged_out\":\n log.warning(\"Client already logged out!\")\n return\n\n log.info(\"Attempting to logout.\")\n banking.close_bank()\n open_side_stone(\"logout\")\n\n def is_logged_out() -> bool:\n logged_out = vis.Vision(\n region=vis.CLIENT,\n needle=\"./needles/login-menu/orient-logged-out.png\",\n loop_num=5,\n loop_sleep_range=(1000, 1200),\n ).wait_for_needle()\n if logged_out is True:\n return True\n\n # Look for any one of the three possible logout buttons.\n for _ in range(5):\n\n # The standard logout button.\n logout_button = vis.Vision(\n region=vis.INV,\n needle=\"./needles/side-stones/logout/logout.png\",\n conf=0.9,\n loop_num=1,\n ).click_needle(move_away=True)\n if logout_button is True:\n if is_logged_out() is True:\n return\n\n # The logout button as it appears when the mouse is over it.\n logout_button_highlighted = vis.Vision(\n region=vis.INV,\n needle=\"./needles/side-stones/logout/logout-highlighted.png\",\n conf=0.9,\n loop_num=1,\n ).click_needle(move_away=True)\n if logout_button_highlighted is True:\n if is_logged_out() is True:\n return\n\n # The logout button when the world switcher is open.\n logout_button_world_switcher = vis.Vision(\n region=vis.SIDE_STONES,\n needle=\"./needles/side-stones/logout/logout-world-switcher.png\",\n conf=0.95,\n loop_num=1,\n ).click_needle(move_away=True)\n if logout_button_world_switcher is True:\n if is_logged_out() is True:\n return\n\n raise Exception(\"Could not logout!\")\n\n\n# TODO: Move to misc.py\ndef logout_break_range() -> None:\n \"\"\"\n Triggers a random logout within a specific range of times, set by the user\n in the main config file. Additional configuration for this function is set\n by variables in startup.py.\n\n To determine when a logout roll should occur, this function creates five\n evenly-spaced timestamps at which to roll for a logout. These timestamps\n are called \"checkpoints\". Each roll has a 1/5 chance to pass. The first and\n last checkpoints are based on the user-defined minimum and maximum session\n duration. As a result of this, the last checkpoint's roll always has a 100%\n chance of success. All variables set by this function are reset if a logout\n roll passes.\n\n When called, this function checks if an checkpoint's timestamp has passed\n and hasn't yet been rolled. If true, it rolls for that checkpoint and marks\n it (so it's not rolled again). If the roll passes, a logout is called and\n all checkpoints are reset. If the roll fails or a checkpoint's timestamp\n hasn't yet passed, the function does nothing and returns.\n\n \"\"\"\n current_time = round(time.time())\n\n # If a checkpoint's timestamp has passed, roll for a logout, then set\n # a global variable so that checkpoint isn't rolled again.\n if current_time >= start.checkpoint_1 and start.checkpoint_1_checked is False:\n log.info(\"Rolling for checkpoint 1...\")\n start.checkpoint_1_checked = True\n logout_break_roll(5)\n\n elif current_time >= start.checkpoint_2 and start.checkpoint_2_checked is False:\n log.info(\"Rolling for checkpoint 2...\")\n start.checkpoint_2_checked = True\n logout_break_roll(5)\n\n elif current_time >= start.checkpoint_3 and start.checkpoint_3_checked is False:\n log.info(\"Rolling for checkpoint 3...\")\n start.checkpoint_3_checked = True\n logout_break_roll(5)\n\n elif current_time >= start.checkpoint_4 and start.checkpoint_4_checked is False:\n log.info(\"Rolling for checkpoint 4...\")\n start.checkpoint_4_checked = True\n logout_break_roll(5)\n\n # The last checkpoint's timestamp is based on the maximum session\n # duration, so force a logout and reset all the other checkpoints.\n elif current_time >= start.checkpoint_5:\n start.checkpoint_1_checked = False\n start.checkpoint_2_checked = False\n start.checkpoint_3_checked = False\n start.checkpoint_4_checked = False\n logout_break_roll(1)\n\n # Print the correct logging information according to which checkpoint(s)\n # have been rolled for.\n else:\n if start.checkpoint_1_checked is False:\n log.info(\"Checkpoint 1 is at %s\", time.ctime(start.checkpoint_1))\n elif start.checkpoint_1_checked is True and start.checkpoint_2_checked is False:\n log.info(\"Checkpoint 2 is at %s\", time.ctime(start.checkpoint_2))\n elif start.checkpoint_2_checked is True and start.checkpoint_3_checked is False:\n log.info(\"Checkpoint 3 is at %s\", time.ctime(start.checkpoint_3))\n elif start.checkpoint_3_checked is True and start.checkpoint_4_checked is False:\n log.info(\"Checkpoint 4 is at %s\", time.ctime(start.checkpoint_4))\n elif start.checkpoint_4_checked is True:\n log.info(\"Checkpoint 5 is at %s\", time.ctime(start.checkpoint_5))\n\n\n# TODO: Move to misc.py\ndef logout_break_roll(\n chance,\n min_break_duration=int(start.config[\"main\"][\"min_break_duration\"]),\n max_break_duration=int(start.config[\"main\"][\"max_break_duration\"]),\n) -> None:\n \"\"\"\n Rolls for a chance to take a logout break.\n\n Args:\n chance (int): See wait_rand()'s docstring.\n min_break_duration (int): The minimum number of minutes to wait\n if the roll passes, by default reads\n the config file.\n max_break_duration (int): The maximum number of minutes to wait\n if the roll passes, by default reads\n the config file.\n\n \"\"\"\n logout_roll = rand.randint(1, chance)\n log.info(\"Logout roll was %s, needed %s\", logout_roll, chance)\n\n if logout_roll == chance:\n log.info(\"Random logout called.\")\n logout()\n # Make sure all checkpoints are reset.\n start.checkpoint_1_checked = False\n start.checkpoint_2_checked = False\n start.checkpoint_3_checked = False\n start.checkpoint_4_checked = False\n\n # Track the number of play sessions that have occurred so far.\n start.session_num += 1\n log.info(\"Completed session %s/%s\", start.session_num, start.session_total)\n\n # If the maximum number of sessions has been reached, kill the bot.\n if start.session_num >= start.session_total:\n log.info(\"Final session completed! Script done.\")\n sys.exit(0)\n\n else:\n # Convert from minutes to miliseconds.\n min_break_duration *= 60000\n max_break_duration *= 60000\n wait_time_seconds = misc.rand_seconds(\n min_break_duration, max_break_duration\n )\n\n # Convert back to human-readable format for logging.\n wait_time_minutes = wait_time_seconds / 60\n log.info(\"Sleeping for %s minutes.\", round(wait_time_minutes))\n\n time.sleep(wait_time_seconds)\n login_full()\n else:\n return\n\n\n# TODO: Move to inventory.py\ndef open_side_stone(side_stone) -> bool:\n \"\"\"\n Opens a side stone menu.\n\n Args:\n side_stone (str): The name of the side stone to open. Available\n options are `attacks`, `skills`, `quests`,\n `inventory`, `equipment`, `prayers`, `spellbook`,\n `clan`, `friends`, `account`, `logout`,\n `settings`, `emotes`, and `music`.\n\n Returns:\n Returns True if desired side stone was opened or is already open.\n\n Raises:\n Raises an exception if side stone could not be opened.\n\n \"\"\"\n side_stone_open = \"./needles/side-stones/open/\" + side_stone + \".png\"\n side_stone_closed = \"./needles/side-stones/closed/\" + side_stone + \".png\"\n\n try:\n banking.close_bank()\n log.debug(\"Ensuring side stone %s is open\", side_stone)\n interface.enable_button(\n button_disabled=side_stone_closed,\n button_disabled_region=vis.SIDE_STONES,\n button_enabled=side_stone_open,\n button_enabled_region=vis.SIDE_STONES,\n conf=0.98,\n )\n except Exception as error:\n raise Exception(\"Could not open side stone!\") from error\n return True\n\n\n# TODO: Update the terminology used in this function. Make sure to\n# distinguish between \"waypoint\" and \"destination\". Probably going to\n# redefine \"waypoint\" to be \"the coordinates that you click on the\n# minimap to tell your character to walk to\", and \"destination\" to be\n# \"the desired coordinates you want your character to be at\".\ndef travel(param_list, haystack_map, attempts=100) -> bool:\n \"\"\"\n Clicks on the minimap until the player has arrived at the desired\n coordinates.\n\n Here's an example of what the arguments might look like for this\n function:\n\n ([((240, 399), 1, (4, 4), (5, 10)), <- This is the first waypoint.\n ((420, 401), 3, (25, 25), (5, 10))], <- This is the second waypoint.\n haystack.png, 150)\n\n (240, 399) = The first waypoint is at X=240 Y=399, relative to\n haystack.png.\n 1 = Issued \"walk\" or \"run\" commands will vary by 1 coordinate\n when travelling to the waypoint.\n (4, 4) = The player will have arrived at the waypoint when they're\n within 4 coordinates of the waypoint's coordinates.\n (5, 10) = The function will wait between 5 and 10 seconds between\n each \"walk\" or \"run\" command.\n 150 = The function will issue a total of 150 \"walk\" or \"run\"\n commands before giving up.\n\n Args:\n param_list (list): A list of tuples containing the parameters that\n describe how to get the player to the wapoint(s).\n Each tuple in the list describes a single\n waypoint with its associated parameters.\n Each tuple in the list containes three tuples\n and an integer in the following order:\n - A 2-tuple of the desired (X, Y) coordinates\n to travel to. This is the waypoint's coordinates\n relative to the haystack map's coordinates.\n - An integer of the coordinate tolerance for\n each minimap click.\n - A 2-tuple of the (X, Y) tolerance allowed\n for determining if the player has reached\n the waypoint.\n - A 2-tuple of the minimum and maximum number of\n seconds to sleep before re-checking position\n while going to that waypoint.\n haystack_map (file): Filepath to the map to use to navigate.\n All waypoint coordinates are relative to\n this map.\n attempts (int): The number of \"walk\" or \"run\" commands the function\n will issue to the player before giving up.\n\n Raises:\n Logs out if any errors occur.\n\n \"\"\"\n # TODO: Make this function travel to a single waypoint only.\n # Create a separate function if multiple waypoints need to be\n # joined together.\n\n # Make sure file path is OS-agnostic.\n haystack_map = str(pathlib.Path(haystack_map))\n haystack = cv2.imread(haystack_map, cv2.IMREAD_GRAYSCALE)\n\n # Loop through each waypoint.\n # TODO: Change param_list to a dictionary so parameter names can be\n # seen when this function is called.\n log.info(\"Travelling to location.\")\n for params in param_list:\n\n # Break down the parameters for the current waypoint.\n waypoint, coord_tolerance, waypoint_tolerance, sleep_range = params\n\n for attempt in range(1, attempts):\n\n if attempt > attempts:\n log.error(\"Could not reach destination!\")\n return False\n\n # Find the minimap position within the haystack map.\n coords = ocv_find_location(haystack)\n (\n coords_map_left,\n coords_map_top,\n coords_map_width,\n coords_map_height,\n ) = coords\n\n # Get center of minimap coordinates within haystack map.\n coords_map_x = int(coords_map_left + (coords_map_width / 2))\n coords_map_y = int(coords_map_top + (coords_map_height / 2))\n\n # Get center of minimap coordinates within client.\n # Absolute coordinates are used rather than using an image\n # search to speed things up.\n coords_client_x = vis.CLIENT[0] + 642\n coords_client_y = vis.CLIENT[1] + 85\n\n # Figure out how far the waypoint is from the current location.\n waypoint_distance_x = waypoint[0] - coords_map_x\n waypoint_distance_y = waypoint[1] - coords_map_y\n log.debug(\n \"dest_distance is (x=%s, y=%s)\",\n waypoint_distance_x,\n waypoint_distance_y,\n )\n\n # Check if player has reached waypoint before making the click.\n if (\n abs(waypoint_distance_x) <= waypoint_tolerance[0]\n and abs(waypoint_distance_y) <= waypoint_tolerance[1]\n ):\n break\n\n # Generate random click coordinate variation.\n coord_rand = rand.randint(-coord_tolerance, coord_tolerance)\n # If the waypoint's distance is larger than the size of the\n # minimap (about 50 pixels in either direction), reduce\n # the click distance to the edge of the minimap.\n if waypoint_distance_x >= 50:\n click_pos_x = coords_client_x + 50 + coord_rand\n # Since the minimap is circular, if the Y-distance is low\n # enough, we can make the click-position for the X-coordinate\n # farther left/right to take advantage of the extra space.\n if waypoint_distance_y <= 10:\n click_pos_x += 13\n\n # If the waypoint's X distance is negative, we know we\n # need to subtract X coordinates.\n elif abs(waypoint_distance_x) >= 50:\n click_pos_x = coords_client_x - 50 + coord_rand\n if abs(waypoint_distance_y) <= 10:\n click_pos_x -= 13\n else:\n click_pos_x = coords_client_x + waypoint_distance_x + coord_rand\n\n # Do the same thing, but for the Y coordinates.\n coord_rand = rand.randint(-coord_tolerance, coord_tolerance)\n if waypoint_distance_y >= 50:\n click_pos_y = coords_client_y + 50 + coord_rand\n if waypoint_distance_x <= 10:\n click_pos_y += 13\n elif abs(waypoint_distance_y) >= 50:\n click_pos_y = coords_client_y - 50 + coord_rand\n if abs(waypoint_distance_x) <= 10:\n click_pos_y -= 13\n else:\n click_pos_y = coords_client_y + waypoint_distance_y + coord_rand\n\n click_pos_y = abs(click_pos_y)\n click_pos_x = abs(click_pos_x)\n # Holding down CTRL while clicking will cause character to\n # run.\n\n if start.config[\"main\"][\"ctrl_click_run\"] is True:\n pag.keyDown(\"ctrl\")\n inputs.Mouse(\n region=(click_pos_x, click_pos_y, 0, 0),\n sleep_range=(50, 100, 100, 200),\n move_duration_range=(0, 300),\n ).click_coord()\n if start.config[\"main\"][\"ctrl_click_run\"] is True:\n pag.keyUp(\"ctrl\")\n misc.sleep_rand((sleep_range[0] * 1000), (sleep_range[1] * 1000))\n\n if (\n abs(waypoint_distance_x) <= waypoint_tolerance[0]\n and abs(waypoint_distance_y) <= waypoint_tolerance[1]\n ):\n break\n # logout()\n # raise Exception('Could not reach destination!')\n return True\n\n\ndef ocv_find_location(haystack) -> tuple[int, int, int, int]:\n \"\"\"\n OpenCV helper function used by travel() to find the minimap within\n the haystack map.\n\n Currently hard-coded to using the travel() function, so it's not\n very flexible.\n\n Args:\n haystack: The haystack to match the needle within. Must be\n an OpenCV vision object.\n\n Returns:\n Returns the (left, top, width, height) coordinates of the\n needle within the haystack.\n\n \"\"\"\n needle = pag.screenshot(region=vis.MINIMAP_SLICE)\n needle = cv2.cvtColor(np.array(needle), cv2.COLOR_RGB2GRAY)\n w, h = needle.shape[::-1]\n result = cv2.matchTemplate(haystack, needle, cv2.TM_CCOEFF_NORMED)\n loc = cv2.minMaxLoc(result)\n match = loc[3]\n return match[0], match[1], w, h\n",
"id": "7646715",
"language": "Python",
"matching_score": 5.788534164428711,
"max_stars_count": 0,
"path": "ocvbot/behavior.py"
},
{
"content": "# coding=UTF-8\n\"\"\"\nControls the mouse and keyboard.\n\n\"\"\"\nimport logging as log\nimport random as rand\n\nimport pyautogui as pag\nimport pyclick as pyc\nfrom ocvbot import misc\n\n# initialize HumanClicker object\nhc = pyc.HumanClicker()\n\n\nclass Mouse:\n \"\"\"\n Class to move and click the mouse cursor.\n\n Args:\n region (tuple): A 4-tuple containing the left, top, width, and\n height coordinates. The width and height values\n are used for randomizing the location of the mouse\n cursor.\n sleep_range (tuple): A 4-tuple containing the minimum and maximum\n number of miliseconds to wait before\n performing the action, and the minimum and\n maximum number of miliseconds to wait after\n performing the action, default is\n (0, 500, 0, 500).\n action_duration_range (tuple): A 2-tuple containing the\n minimum and maximum number of\n miliseconds during which the\n action will be performed, such as\n holding down the mouse button,\n default is (1, 100).\n move_duration_range (tuple): A 2-tuple containing the\n minimum and maximum number of\n miliseconds to take to move the\n mouse cursor to its destination,\n default is (50, 1500).\n button (str): The mouse button to click with, default is left.\n\n \"\"\"\n\n def __init__(\n self,\n region: tuple[\n int, int, int, int\n ], # In the format of (left, top, width, height).\n sleep_range: tuple[int, int, int, int] = (0, 500, 0, 500),\n move_duration_range: tuple[int, int] = (1, 50),\n action_duration_range: tuple[int, int] = (1, 100),\n button: str = \"left\",\n ) -> None:\n\n self.region = region\n self.sleep_range = sleep_range\n self.move_duration_range = move_duration_range\n self.action_duration_range = action_duration_range\n self.button = button\n\n def click_coord(self, move_away: bool = False, number_of_clicks: int = 1) -> bool:\n \"\"\"\n Clicks within the provided coordinates. If width and height are\n both 0, then this function will click in the exact same location\n every time.\n\n Args:\n move_away (bool): Whether to move the mouse cursor a short\n distance away from the coordinates that\n were just clicked on, default is False.\n number_of_clicks (int): The number of times to click the\n coordinates.\n\n \"\"\"\n self.move_to()\n for _ in range(number_of_clicks):\n self.click()\n if move_away is True:\n self.region = (15, 15, 100, 100)\n self.move_duration_range = (0, 500)\n self.moverel()\n return True\n\n def move_to(self) -> bool:\n \"\"\"\n Moves the mouse pointer to the specified coordinates. Coordinates\n are based on the display's dimensions. Units are in pixels. Uses\n Bezier curves to make mouse movement appear more human-like.\n\n \"\"\"\n left, top, width, height = self.region\n\n # hc.move uses a (x1, x2, y1, y2) coordinate format instead of a\n # (left, top, width, height) format.\n # x2 and y2 are obtained by adding width to left and height to top.\n x_coord = rand.randint(left, (left + width))\n y_coord = rand.randint(top, (top + height))\n\n hc.move((x_coord, y_coord), self.move_duration())\n return True\n\n def moverel(self) -> bool:\n \"\"\"\n Moves the mouse in a random direction, relative to its current\n position. Uses left/width to determinie the minimum and maximum\n X distance to move and top/height to determine the minimum and\n maximum Y distance to move.\n\n Whichever of the two left/width values is lower will be used as\n the minimum X distance and whichever of the two values is higher\n will be used as the maximum X distance. Same for top/height.\n\n \"\"\"\n left, top, width, height = self.region\n (x_position, y_position) = pag.position()\n\n # Get min and max values based on the provided ltwh coordinates.\n x_min = min(left, width)\n x_max = max(left, width)\n y_min = min(top, height)\n y_max = max(top, height)\n\n # Get a random distance to move based on min and max values.\n x_distance = rand.randint(x_min, x_max)\n y_distance = rand.randint(y_min, y_max)\n\n y_destination = y_position + y_distance\n x_destination = x_position + x_distance\n\n # Roll for a chance to reverse the direction the mouse moves in.\n if (rand.randint(1, 2)) == 2:\n x_destination = x_position - x_distance\n if (rand.randint(1, 2)) == 2:\n y_destination = y_position - y_distance\n\n hc.move((x_destination, y_destination), self.move_duration())\n return True\n\n def move_duration(self) -> float:\n \"\"\"\n Randomizes the amount of time the mouse cursor takes to move to\n a new location.\n\n Returns:\n Returns a float containing a number in seconds.\n\n \"\"\"\n move_durmin, move_durmax = self.move_duration_range\n move_duration_var = misc.rand_seconds(\n min_seconds=move_durmin, max_seconds=move_durmax\n )\n return move_duration_var\n\n def click(self, hold: bool = False) -> bool:\n \"\"\"\n Clicks the left or right mouse button, waiting both before and\n after for a randomized period of time.\n\n Args:\n hold (bool): Whether to hold down the mouse button rather\n than just clicking it.\n Uses self.action_duration_range to determine\n the minimum and maximum duration to hold down\n the mouse button.\n\n \"\"\"\n # Random sleep before click.\n misc.sleep_rand(sleep_min=self.sleep_range[0], sleep_max=self.sleep_range[1])\n\n if hold is True:\n duration = misc.rand_seconds(\n min_seconds=self.action_duration_range[0],\n max_seconds=self.action_duration_range[1],\n )\n pag.click(button=self.button, duration=duration)\n else:\n pag.click(button=self.button)\n\n # Random sleep after click.\n misc.sleep_rand(sleep_min=self.sleep_range[2], sleep_max=self.sleep_range[3])\n return True\n\n\nclass Keyboard:\n \"\"\"\n Manipulates the keyboard.\n\n Args:\n sleep_range (tuple): A 4-tuple containing the minimum and\n maximum number of miliseconds to wait\n before performing the action, and the\n minimum and maximum number of miliseconds\n to wait after performing the action,\n default is (0, 500, 0, 500).\n action_duration_range (tuple): A 2-tuple containing the\n minimum and maximum number of\n miliseconds during which the\n action will be performed, such as\n holding down a key, default is\n (1, 100).\n log_keys (bool): Whether to log keystrokes at DEBUG level. This\n is always set to False when entering user\n credentials, default is True.\n\n \"\"\"\n\n def __init__(\n self,\n sleep_range: tuple[int, int, int, int] = (0, 500, 0, 500),\n action_duration_range: tuple[int, int] = (1, 100),\n log_keys: bool = True,\n ):\n\n self.sleep_range = sleep_range\n self.action_duration_range = action_duration_range\n self.log = log_keys\n\n def typewriter(self, message: str) -> bool:\n \"\"\"\n Types out the specified message with a randomized delay between\n each key press.\n\n Args:\n message (str): The message to type.\n\n \"\"\"\n self.sleep_range = (0, 20, 0, 20)\n self.action_duration_range = (1, 50)\n for key in message:\n self.keypress(key)\n return True\n\n def keypress(self, key: str) -> bool:\n \"\"\"\n Presses the specified key.\n\n Args:\n key (str): The key on the keyboard to press, according to\n PyAutoGUI.\n\n \"\"\"\n if self.log is True:\n log.debug(\"Pressing key: %s.\", key)\n\n misc.sleep_rand(sleep_min=self.sleep_range[0], sleep_max=self.sleep_range[1])\n pag.keyDown(key)\n misc.sleep_rand(\n sleep_min=self.action_duration_range[0],\n sleep_max=self.action_duration_range[1],\n )\n pag.keyUp(key)\n misc.sleep_rand(sleep_min=self.sleep_range[2], sleep_max=self.sleep_range[3])\n return True\n",
"id": "10079611",
"language": "Python",
"matching_score": 2.6638991832733154,
"max_stars_count": 44,
"path": "ocvbot/inputs.py"
},
{
"content": "#!/usr/bin/env python3\n# coding=UTF-8\n\"\"\"\nKeeps the OSRS client logged in by randomly pressing an arrow key every\nfew minutes.\n\n\"\"\"\nimport logging as log\nimport pathlib\nimport random as rand\nimport sys\n\n# Ensure ocvbot files are added to sys.path.\nSCRIPTPATH = str(pathlib.Path(__file__).parent.parent.absolute())\nsys.path.insert(1, SCRIPTPATH)\n\nfrom ocvbot import inputs\nfrom ocvbot import misc\nfrom ocvbot import vision as vis\n\nvis.init()\n\ndef main() -> None:\n # Focus the client by clicking a random spot on the chat menu.\n inputs.Mouse(region=vis.CHAT_MENU).click_coord()\n\n # Every 100-299 seconds, hit an arrow key to move the client's camera.\n # Auto-logout occurs after 5 minutes (300 seconds) of inactivity.\n while True:\n # Units for sleep_rand() are in miliseconds.\n # 5 min = 300000 miliseconds.\n misc.sleep_rand(100000, 299000)\n roll = rand.randint(1, 4)\n\n if roll == 1:\n key = \"left\"\n elif roll == 2:\n key = \"right\"\n elif roll == 3:\n key = \"up\"\n else:\n key = \"down\"\n\n log.info(\"Pressing key %s to remain logged in\", key)\n inputs.Keyboard().keypress(key)\n\n\nif __name__ == \"__main__\":\n main()\n",
"id": "5774907",
"language": "Python",
"matching_score": 1.173885464668274,
"max_stars_count": 44,
"path": "tools/idle.py"
},
{
"content": "# coding=UTF-8\n\"\"\"\nUnit tests for the inputs.py module.\n\nLinux only. Requires feh.\n\n\"\"\"\nimport pyautogui as pag\nimport pytest\n\nimport init_tests\n\n# OCVBot modules must be imported after init_tests.\nfrom ocvbot import inputs\n\nmove_to_params = (\n (100, 100, 100, -100),\n (200, 200, 100, 100),\n (300, 300, 100, 100),\n (400, 400, 100, 100),\n (500, 5000, 0, 100),\n)\n\n\n# @pytest.mark.parametrize('x,y,xmax,ymax', move_to_params)\n# def test_move_to(x, y, xmax, ymax):\n# input.move_to(x, y, xmax, ymax)\n# assert True\n\n\ndef test_click_coord():\n left = 100\n top = 100\n width = 100\n height = 100\n inputs.Mouse(left, top, width, height).click_coord()\n",
"id": "8300901",
"language": "Python",
"matching_score": 1.2502636909484863,
"max_stars_count": 44,
"path": "tests/test_inputs.py"
},
{
"content": "# coding=UTF-8\n\"\"\"\nUnit tests for the behavior.py module.\n\nLinux only. Requires feh.\n\n\"\"\"\nimport os\n\nimport pytest\n\nimport init_tests\n\n# This statement exists to prevent the OCVBot imports from being re-ordered.\npass\n\n# OCVBot modules must be imported after init_tests.\nfrom ocvbot import behavior\nfrom ocvbot import startup as start\n\nimage_directory = (os.path.dirname(__file__)) + \"/test_behavior/\"\n\n\n# CHECK_SKILLS ------------------------------------------------------------------------------------\n\ncheck_skills_pass_params = (\"01\",)\n\n\n@pytest.mark.parametrize(\"params\", check_skills_pass_params)\ndef test_check_skills_pass(params) -> None:\n test_number = params\n init_tests.feh(\"check_skills\", \"pass\", test_number, image_directory)\n result = behavior.check_skills()\n assert result is True\n init_tests.kill_feh()\n\n\n# DROP_ITEM ---------------------------------------------------------------------------------------\n\ndrop_item_pass_params = (\n # Must open side stone first.\n (\"./needles/items/iron-ore.png\", \"01\"),\n # No items exist in inventory.\n (\"./needles/items/iron-ore.png\", \"02\"),\n)\n\n\n@pytest.mark.parametrize(\"params\", drop_item_pass_params)\ndef test_drop_item_pass(params) -> None:\n item, test_number = params\n init_tests.feh(\"drop_item\", \"pass\", test_number, image_directory)\n result = behavior.drop_item(item=item, random_wait=False, shift_click=False)\n assert result is None\n init_tests.kill_feh()\n\n\n# Try dropping item too many times.\ndrop_item_fail_params = ((\"./needles/items/iron-ore.png\", \"01\"),)\n\n\n@pytest.mark.parametrize(\"params\", drop_item_fail_params)\ndef test_drop_item_fail(params) -> None:\n item, test_number = params\n init_tests.feh(\"drop_item\", \"fail\", test_number, image_directory)\n with pytest.raises(start.InventoryError, match=\"Tried dropping item too many\"):\n behavior.drop_item(item=item, random_wait=False, shift_click=False)\n init_tests.kill_feh()\n\n\n# LOGOUT ------------------------------------------------------------------------------------------\n\nlogout_pass_params = (\n \"01\", # Logout tab is already open.\n \"02\", # Logout tab is already open, button highlighted.\n \"03\", # Logout tab is already open, world switcher open.\n \"04\", # Already logged out\n \"05\", # Attack tab is open, switching tabs doesn't work at first.\n \"06\", # Must click logout button multiple times.\n)\n\n\n@pytest.mark.parametrize(\"params\", logout_pass_params)\ndef test_logout_pass(params) -> None:\n test_number = params\n init_tests.feh(\"logout\", \"pass\", test_number, image_directory)\n result = behavior.logout()\n assert result is None\n init_tests.kill_feh()\n\n\n# Try too many times to click on logout button.\nlogout_fail_params = (\"01\",)\n\n\n@pytest.mark.parametrize(\"params\", logout_fail_params)\ndef test_logout_fail(params) -> None:\n test_number = params\n init_tests.feh(\"logout\", \"fail\", test_number, image_directory)\n with pytest.raises(Exception, match=\"Could not logout\"):\n behavior.logout()\n init_tests.kill_feh()\n\n\n# OPEN SIDE STONE ---------------------------------------------------------------------------------\n\nopen_side_stone_pass_params = (\n (\"attacks\", \"01\"), # Bank window must be closed first, stone already open.\n (\"skills\", \"02\"), # Must try multiple times to open stone.\n (\"quests\", \"03\"),\n (\"inventory\", \"04\"),\n (\"equipment\", \"05\"),\n (\"prayers\", \"06\"),\n (\"spellbook\", \"07\"),\n (\"logout\", \"08\"),\n)\n\n\n@pytest.mark.parametrize(\"params\", open_side_stone_pass_params)\ndef test_open_side_stone_pass(params) -> None:\n side_stone, test_number = params\n init_tests.feh(\"open_side_stone\", \"pass\", test_number, image_directory)\n result = behavior.open_side_stone(side_stone)\n assert result is True\n init_tests.kill_feh()\n\n\nopen_side_stone_fail_params = ((\"settings\", \"01\"),)\n\n\n@pytest.mark.parametrize(\"params\", open_side_stone_fail_params)\ndef test_open_side_stone_fail(params) -> None:\n side_stone, test_number = params\n init_tests.feh(\"open_side_stone\", \"fail\", test_number, image_directory)\n with pytest.raises(Exception, match=\"Could not open side stone!\"):\n behavior.open_side_stone(side_stone)\n init_tests.kill_feh()\n",
"id": "5994491",
"language": "Python",
"matching_score": 3.3595426082611084,
"max_stars_count": 0,
"path": "tests/test_behavior.py"
},
{
"content": "# coding=UTF-8\n\"\"\"\nIntegration tests for the scenarios in main.py.\n\nLinux only. Requires feh.\n\n\"\"\"\nimport os\n\nimport pytest\n\nimport init_tests\n\n# This statement exists to prevent the OCVBot imports from being re-ordered.\npass\n\n# OCVBot modules must be imported after init_tests.\nfrom ocvbot import main\n\nimage_directory = (os.path.dirname(__file__)) + \"/test_main/\"\n\n\n# CHEF --------------------------------------------------------------------------------------------\n\nchef_pass_params = ((\"raw-anchovies\", \"al-kharid\", \"01\"),) # Level-up occurs halfway through\n\n\n@pytest.mark.parametrize(\"params\", chef_pass_params)\ndef test_chef_pass(params) -> None:\n item, location, test_number = params\n init_tests.feh(\"chef\", \"pass\", test_number, image_directory)\n result = main.chef(item=item, location=location, loops=1)\n assert result is None\n init_tests.kill_feh()\n\n\n# Pass an unsupported location.\nchef_fail_01_params = ((\"raw-anchovies\", \"unsupported-location\", \"00\"),)\n\n\n@pytest.mark.parametrize(\"params\", chef_fail_01_params)\ndef test_chef_fail_01(params) -> None:\n item, location, test_number = params\n with pytest.raises(ValueError, match=\"Unsupported value for location\"):\n main.chef(item=item, location=location, loops=1)\n init_tests.kill_feh()\n\n\n# SMITH -------------------------------------------------------------------------------------------\n\nsmith_pass_params = ((\"iron-bar\", \"iron-platebody\", \"varrock\", \"01\"),)\n\n\n@pytest.mark.parametrize(\"params\", smith_pass_params)\ndef test_smith_pass(params) -> None:\n bar_type, item, location, test_number = params\n init_tests.feh(\"smith\", \"pass\", test_number, image_directory)\n result = main.smith(bar=bar_type, item=item, location=location, loops=1)\n assert result is True\n init_tests.kill_feh()\n\n\n# ALCHEMIST ---------------------------------------------------------------------------------------\n\nalchemist_pass_params = (\n (\"bank-note\", \"01\"), # Item is in top left.\n (\"bank-note\", \"02\"), # Item is at bottom.\n (\"bank-note\", \"03\"), # Item is at bottom, side stone must be opened.\n)\n\nalchemist_fail_params = (\n (\"bank-note\", \"01\"), # Item on the right side of inventory.\n (\"bank-note\", \"02\"), # Item on the right side of inventory, near center.\n)\n\n\n@pytest.mark.parametrize(\"params\", alchemist_pass_params)\ndef test_alchemist_pass(params) -> None:\n alch_item_type, test_number = params\n init_tests.feh(\"alchemist\", \"pass\", test_number, image_directory)\n result = main.alchemist(alch_item_type=alch_item_type, loops=1)\n assert result is None\n init_tests.kill_feh()\n\n\n@pytest.mark.parametrize(\"params\", alchemist_fail_params)\ndef test_alchemist_fail(params) -> None:\n alch_item_type, test_number = params\n init_tests.feh(\"alchemist\", \"fail\", test_number, image_directory)\n with pytest.raises(Exception, match=\"Could not find target\"):\n main.alchemist(alch_item_type=alch_item_type, loops=1)\n init_tests.kill_feh()\n\n\n# SPELLCASTER -------------------------------------------------------------------------------------\n\nspellcaster_pass_params = ((\"curse-varrock-castle\", \"01\"),)\n\n\n@pytest.mark.parametrize(\"params\", spellcaster_pass_params)\ndef test_spellcaster_pass(params) -> None:\n scenario, test_number = params\n init_tests.feh(\"spellcaster\", \"pass\", test_number, image_directory)\n result = main.spellcaster(scenario=scenario, loops=1)\n assert result is None\n init_tests.kill_feh()\n",
"id": "5340306",
"language": "Python",
"matching_score": 1.946245789527893,
"max_stars_count": 0,
"path": "tests/test_main.py"
},
{
"content": "# coding=utf-8\n\"\"\"\nModule for invoking main bot scripts.\n\nMost main scripts define a preset list of `scenarios`, which the user\nmust choose from. Each scenario has a predetermined configuration that will\nused for training that skill. For example, the `varrock-east-mine` scenario\nfor the `miner` script is configured to only mine two specific iron rocks\nwithin Varrock East Mine.\n\nSee `config.yaml.example` for more info.\nSee `docs/scenarios/` for the required client configuration settings in\neach scenario.\n\n\"\"\"\nimport glob\nimport logging as log\nimport os\nimport pathlib\nimport random as rand\nimport sys\nimport traceback\n\n# Global TODOs:\n# TODO: Transition to use proper exceptions rather than checking for a False return value.\n\n# TODO: See if these statements are really necessary since they're in init.py\n# Make sure the program's working directory is the directory in which\n# this file is located.\nos.chdir(os.path.dirname(__file__))\n# Ensure ocvbot files are added to sys.path.\nSCRIPTPATH = str(pathlib.Path(__file__).parent.parent.absolute())\nsys.path.insert(1, SCRIPTPATH)\n\nfrom ocvbot import banking\nfrom ocvbot import behavior\nfrom ocvbot import misc\nfrom ocvbot import skills\nfrom ocvbot import startup as start\nfrom ocvbot import vision as vis\n\n\ndef miner(scenario: str, loops: int = 10000) -> None:\n \"\"\"\n Script for mining rocks in a handful of locations. Banking support is\n limited.\n\n Supported scenarios:\n `lumbridge-mine` = Mines copper in Lumbridge Swamp.\n `varrock-east-mine` = Mines iron in Varrock East mine. Banking\n supported.\n\n See `/docs/scenarios/` for the required client\n configuration settings for each scenario.\n\n Raises:\n Raises an exception if an unsupported scenario is passed.\n\n \"\"\"\n # TODO: Function is too large. Refactor.\n log.info(\"Launching miner script with scenario %s.\", scenario)\n # Make the path to the rock needles shorter.\n prefix = \"./needles/game-screen/\" + scenario + \"/\"\n haystack_map = \"./haystacks/\" + scenario + \".png\"\n\n # Read the config file to get argument values.\n drop_sapphire_config = start.config[\"mining\"][\"drop_sapphire\"]\n drop_emerald_config = start.config[\"mining\"][\"drop_emerald\"]\n drop_ruby_config = start.config[\"mining\"][\"drop_ruby\"]\n drop_diamond_config = start.config[\"mining\"][\"drop_diamond\"]\n drop_clue_geode_config = start.config[\"mining\"][\"drop_clue_geode\"]\n # Determine if the player will be dropping the ore or banking it.\n # This var is forced to True in scenarios where banking is not\n # supported.\n drop_ore_config = start.config[\"mining\"][\"drop_ore\"]\n\n bank_from_mine = None\n mine_from_bank = None\n\n # CONFIGURE SCENARIO PARAMETERS ---------------------------------------------------------------\n\n if scenario == \"varrock-east-mine\":\n ore = \"./needles/items/iron-ore.png\"\n mining = skills.Mining(\n rocks=[\n (prefix + \"north-full2.png\", prefix + \"north-empty.png\"),\n (prefix + \"west-full.png\", prefix + \"west-empty.png\"),\n ],\n ore=ore,\n drop_sapphire=drop_sapphire_config,\n drop_emerald=drop_emerald_config,\n drop_ruby=drop_ruby_config,\n drop_diamond=drop_diamond_config,\n drop_clue_geode=drop_clue_geode_config,\n )\n\n bank_from_mine = [\n ((253, 181), 5, (35, 35), (1, 6)),\n ((112, 158), 5, (20, 20), (1, 6)),\n ((108, 194), 1, (10, 4), (3, 8)),\n ]\n\n mine_from_bank = [\n ((240, 161), 5, (35, 35), (1, 6)),\n ((262, 365), 5, (25, 25), (1, 6)),\n ((240, 399), 1, (4, 4), (3, 8)),\n ]\n\n elif scenario == \"lumbridge-mine\":\n drop_ore_config = True # Banking not supported.\n ore = \"./needles/items/copper-ore.png\"\n mining = skills.Mining(\n rocks=[\n (prefix + \"east-full.png\", prefix + \"east-empty.png\"),\n (prefix + \"south-full.png\", prefix + \"south-empty.png\"),\n ],\n ore=ore,\n drop_sapphire=drop_sapphire_config,\n drop_emerald=drop_emerald_config,\n drop_ruby=drop_ruby_config,\n drop_diamond=drop_diamond_config,\n drop_clue_geode=drop_clue_geode_config,\n )\n\n elif scenario == \"camdozaal-mine\":\n drop_ore_config = True # Banking not supported.\n ore = \"./needles/items/barronite-deposit.png\"\n mining = skills.Mining(\n rocks=[\n (prefix + \"east-full.png\", prefix + \"east-empty.png\"),\n (prefix + \"west-full.png\", prefix + \"west-empty.png\"),\n ],\n ore=ore,\n drop_sapphire=drop_sapphire_config,\n drop_emerald=drop_emerald_config,\n drop_ruby=drop_ruby_config,\n drop_diamond=drop_diamond_config,\n drop_clue_geode=drop_clue_geode_config,\n )\n else:\n raise Exception(\"Scenario not supported!\")\n\n # MAIN FUNCTION LOOP --------------------------------------------------------------------------\n\n for _ in range(loops):\n try:\n mining.mine_multiple_rocks()\n misc.sleep_rand_roll(chance_range=(200, 300))\n except start.TimeoutException:\n misc.sleep_rand_roll(chance_range=(50, 60))\n except start.InventoryFull:\n misc.sleep_rand_roll(chance_range=(50, 60), sleep_range=(1000, 120000))\n if drop_ore_config is True:\n mining.drop_inv_ore()\n else:\n behavior.travel(bank_from_mine, haystack_map)\n banking.open_bank(\"south\")\n misc.sleep_rand_roll()\n\n # Deposit all possible mined items.\n for item in [\n ore,\n \"./needles/items/uncut-sapphire.png\",\n \"./needles/items/uncut-emerald.png\",\n \"./needles/items/uncut-ruby.png\",\n \"./needles/items/uncut-diamond.png\",\n \"./needles/items/clue-geode.png\",\n ]:\n banking.deposit_item(item=item, quantity=\"all\")\n\n behavior.travel(mine_from_bank, haystack_map)\n misc.sleep_rand_roll()\n # Roll for randomized actions when the inventory is full.\n behavior.logout_break_range()\n\n misc.session_duration(human_readable=True)\n # Logout when anything else unexpected occurs.\n except (Exception, start.InefficientUseOfInventory):\n behavior.logout()\n\n\ndef alchemist(alch_item_type, loops: int = 10000) -> None:\n \"\"\"\n Script for training high alchemy.\n\n Args:\n alch_item_type (str): See the `magic` section of `config.yaml.example`\n for the available options.\n loops (int): Number of loops to run the given scenario. Changing this\n is only useful for testing purposes. Default is 10000.\n\n \"\"\"\n spell = \"./needles/side-stones/spellbook/high-alchemy.png\"\n if alch_item_type == \"bank-note\":\n target = \"./needles/items/bank-note.png\"\n else:\n target = \"./needles/items/\" + alch_item_type + \".png\"\n\n behavior.open_side_stone(\"spellbook\")\n for _ in range(loops):\n try:\n skills.Magic(\n spell=spell,\n target=target,\n inventory=True,\n conf=0.5,\n region=vis.INV_LEFT_HALF,\n move_duration_range=(0, 200),\n ).cast_spell()\n misc.sleep_rand_roll(chance_range=(10, 20), sleep_range=(100, 10000))\n\n # Every once in a while, print the amount of time the bot has been running.\n # Also roll for randomized logout.\n if rand.randint(1, 50) == 50:\n misc.session_duration(human_readable=True)\n behavior.logout_break_range()\n\n # This will occur when we're out of runes or out of items to alch.\n except start.NeedleError as error:\n raise error\n\n\ndef spellcaster(scenario: str, loops: int = 10000) -> None:\n \"\"\"\n Script for training magic with combat spells.\n\n Args:\n scenario (str): See the `magic` section of `config.yaml.example` for\n the available options.\n loops (int): Number of loops to run the given scenario. Changing this\n is only useful for testing purposes. Default is 10000.\n\n Raises:\n Raises an exception if an unsupported scenario is passed.\n\n \"\"\"\n log.info(\"Launching spellcaster script with scenario %s.\", scenario)\n\n if scenario == \"curse-varrock-castle\":\n spell = \"./needles/side-stones/spellbook/curse.png\"\n target = \"./needles/game-screen/varrock/monk-of-zamorak.png\"\n haystack_map = \"./haystacks/varrock-castle.png\"\n behavior.travel([((75, 128), 1, (4, 4), (5, 10))], haystack_map)\n else:\n raise Exception(\"Scenario not supported!\")\n\n behavior.open_side_stone(\"spellbook\")\n for _ in range(loops):\n skills.Magic(\n spell=spell,\n target=target,\n conf=0.75,\n region=vis.GAME_SCREEN,\n ).cast_spell()\n misc.sleep_rand_roll(chance_range=(10, 20), sleep_range=(100, 10000))\n\n # Every once in a while, print the amount of time the bot has been running.\n # Also roll for randomized logout.\n if rand.randint(1, 50) == 50:\n misc.session_duration(human_readable=True)\n behavior.logout_break_range()\n\n\ndef chef(item: str, location: str, loops: int = 10000) -> None:\n \"\"\"\n Cooks a given item at a given location.\n\n Args:\n item (str): See the `cooking` section of `config.yaml.example` for\n the available options.\n location (str): See the `cooking` section of `config.yaml.example` for\n the available options.\n loops (int): Number of loops to run the given scenario. Changing this\n is only useful for testing purposes. Default is 10000.\n\n Returns:\n\n \"\"\"\n if location == \"al-kharid\":\n bank_coords = [((91, 207), 3, (4, 7), (3, 9))]\n range_coords = [((103, 148), 1, (5, 5), (8, 12))]\n heat_source = \"./needles/game-screen/al-kharid/range.png\"\n # Assumes starting location is the bank.\n banking.open_bank(\"west\")\n else:\n raise ValueError(\"Unsupported value for location!\")\n\n log.info(\"Launching chef script with item %s and location %s.\", item, location)\n # Must have staff of water equipped!\n # TODO: In Al Kharid, deal with the door to the house with the range\n # possibly being shut.\n haystack_map = \"./haystacks/\" + location + \".png\"\n item_inv = \"./needles/items/\" + item + \".png\"\n item_bank = \"./needles/items/\" + item + \"-bank.png\"\n\n for _ in range(loops):\n try:\n # Conf is higher than default because raw food looks very\n # similar to cooked food.\n banking.withdrawal_item(item_bank=item_bank, item_inv=item_inv, conf=0.99)\n log.info(\"Withdrawing raw food.\")\n misc.sleep_rand_roll(chance_range=(10, 20), sleep_range=(100, 10000))\n # Go to range.\n behavior.travel(range_coords, haystack_map)\n # Cook food.\n skills.Cooking(item_inv, item_bank, heat_source).cook_item()\n # Go back to bank and deposit cooked food.\n behavior.travel(bank_coords, haystack_map)\n banking.open_bank(\"west\")\n misc.sleep_rand_roll(chance_range=(10, 20), sleep_range=(100, 10000))\n banking.deposit_inventory()\n # Roll for randomized logout.\n behavior.logout_break_range()\n misc.session_duration(human_readable=True)\n\n except Exception:\n print(traceback.format_exc())\n behavior.logout()\n return\n return\n\n\ndef smith(bar: str, item: str, location: str, loops: int = 10000):\n \"\"\"\n Smiths bars at an anvil.\n\n Args:\n bar (str): See the `smithing` section of `config.yaml.example` for\n the available options.\n location (str): See the `smithing` section of `config.yaml.example` for\n the available options.\n loops (int): Number of loops to run the given scenario. Changing this\n is only useful for testing purposes. Default is 10000.\n\n \"\"\"\n if location == \"varrock\":\n haystack_map = \"./haystacks/varrock-west-bank.png\"\n bank_coords = [((88, 93), 1, (4, 5), (7, 9))]\n anvil_coords = [((97, 130), 1, (3, 3), (7, 9))]\n anvil = \"./needles/game-screen/varrock/anvil.png\"\n else:\n raise Exception(\"Unsupported value for location!\")\n\n # We can use banked versions of the smith item because the smithing menu\n # has the same background as the bank menu.\n bar = \"./needles/items/\" + bar + \".png\"\n item = \"./needles/items/\" + item + \"-bank.png\"\n\n # Determine how many bars are needed to smith the given item.\n if \"platebody\" in item:\n bars_required = 5\n elif \"scimitar\" in item:\n bars_required = 2\n elif \"axe\" in item or \"warhammer\" in item:\n bars_required = 3\n else:\n raise Exception(\"Unsupported value of item!\")\n\n behavior.open_side_stone(\"inventory\")\n for _ in range(loops):\n if location == \"varrock\":\n banking.open_bank(\"east\")\n banking.deposit_inventory()\n misc.sleep_rand_roll(chance_range=(20, 30))\n\n # Ensure we have bars in the bank.\n have_bars = vis.Vision(\n region=vis.GAME_SCREEN, needle=bar, conf=0.9999\n ).find_needle()\n # Stop script if we don't\n if have_bars is False:\n log.info(\"Out of bars, stopping script.\")\n return\n\n banking.withdrawal_item(\n item_bank=\"./needles/items/hammer-bank.png\",\n item_inv=\"./needles/items/hammer.png\",\n quantity=\"1\",\n )\n misc.sleep_rand_roll(chance_range=(20, 30))\n banking.withdrawal_item(item_bank=bar, item_inv=bar)\n misc.sleep_rand_roll(chance_range=(20, 30))\n\n # Check if we withdrew a full inventory of bars. Stop script if we didn't\n bars_in_inventory = vis.Vision(region=vis.INV, needle=bar).count_needles()\n if bars_in_inventory != 27:\n log.warning(\"Out of bars, stopping script.\")\n return\n\n behavior.travel(anvil_coords, haystack_map)\n skills.Smithing(\n item_in_menu=item,\n bar_type=bar,\n bars_required=bars_required,\n anvil=anvil,\n ).smith_items()\n misc.sleep_rand_roll(chance_range=(20, 30))\n behavior.travel(bank_coords, haystack_map)\n misc.session_duration(human_readable=True)\n # Roll for randomized logout.\n behavior.logout_break_range()\n misc.sleep_rand_roll(chance_range=(20, 30))\n\n\ndef test():\n banking.deposit_inventory()\n\n\ndef cleanup():\n \"\"\"\n Cleans up leftover screenshots created by PyAutoGUI.\n \"\"\"\n glob_string = \".screenshot2*[0-9][0-9][0-9][0-9][0-9][0-9].png\"\n for filepath in glob.glob(glob_string):\n os.remove(filepath)\n\n\n# TODO: Add basic firemaking script that starts at a bank booth and\n# creates 27 fires, all in a straight line, then returns to the booth.\n\n# TODO: Add oak woodcutting script that waits by an oak tree, clicks on\n# it when it appears, and empties inventory when full -- super simple.\n\n# TODO: Possible location for starting a fishing script where the\n# \"fishing tiles\" don't change much is fly fishing at barbarian village.\n\n\nscript = start.config[\"main\"][\"script\"]\n\n\ndef main():\n \"\"\"\n Calls the main botting script defined in the config file.\n\n \"\"\"\n cleanup()\n vis.init()\n\n if script == \"mining\":\n miner(start.config[script][\"location\"])\n\n elif script == \"magic\":\n if start.config[script][\"scenario\"] == \"high-alchemy\":\n alchemist(start.config[script][\"alch_item_type\"])\n else:\n spellcaster(start.config[script][\"scenario\"])\n\n elif script == \"cooking\":\n chef(\n item=start.config[script][\"item\"],\n location=start.config[script][\"location\"],\n )\n\n elif script == \"smithing\":\n smith(\n bar=start.config[script][\"bar\"],\n item=start.config[script][\"item\"],\n location=start.config[script][\"location\"],\n )\n\n elif script == \"test\":\n test()\n\n else:\n log.critical(\"Unknown value provided for 'script' key in config file!\")\n raise RuntimeError(\"Unknown value provided for 'script' key in config file!\")\n\n cleanup()\n sys.exit(0)\n\n\nif __name__ == \"__main__\":\n main()\n",
"id": "12702704",
"language": "Python",
"matching_score": 7.392195224761963,
"max_stars_count": 0,
"path": "ocvbot/main.py"
},
{
"content": "# coding=UTF-8\n\"\"\"\nContains all functions related to training skills.\n\n\"\"\"\nimport logging as log\n\nfrom ocvbot import behavior\nfrom ocvbot import inputs\nfrom ocvbot import misc\nfrom ocvbot import startup as start\nfrom ocvbot import vision as vis\n\n\ndef wait_for_level_up(wait_time: int):\n \"\"\"\n Waits the specified number of seconds for a level-up message to\n appear in the chat menu.\n\n Args:\n wait_time: Approximately the number of seconds to wait for a\n level-up message to appear. Checks for a level-up\n message about once every second.\n\n Returns:\n If a level-up message appears, returns True.\n Returns False otherwise.\n\n \"\"\"\n log.debug(\"Checking for level-up\")\n level_up = vis.Vision(\n region=vis.CHAT_MENU,\n needle=\"./needles/chat-menu/level-up.png\",\n loop_num=wait_time,\n loop_sleep_range=(900, 1100),\n ).wait_for_needle()\n\n if level_up is True:\n return True\n return False\n\n\nclass Cooking:\n \"\"\"\n Class for all functions related to training the Cooking skill.\n\n Args:\n item_inv (file): Filepath to the food to cook as it appears in\n the player's inventory. This is the raw version.\n item_bank (file): Filepath to the food to cook as it appears in\n the players' bank. Make sure this image doesn't\n include the stack count if this item is stacked.\n heat_source (file): Filepath to the fire or range to cook the\n item with as it appears in the game world.\n\n \"\"\"\n\n def __init__(self, item_inv: str, item_bank: str, heat_source: str):\n self.item_inv = item_inv\n self.item_bank = item_bank\n self.heat_source = heat_source\n\n def cook_item(self) -> bool:\n \"\"\"\n Cooks all instances of the given food in the player's inventory.\n\n Returns:\n Returns True if all items were cooked. Returns False in all\n other cases.\n\n \"\"\"\n log.info(\"Attempting to cook food.\")\n behavior.open_side_stone(\"inventory\")\n # Select the raw food in the inventory.\n # Confidence must be higher than normal since raw food is very\n # similar in appearance to its cooked version.\n item_selected = vis.Vision(\n region=vis.CLIENT, needle=self.item_inv, loop_num=3, conf=0.99\n ).click_needle()\n if item_selected is False:\n log.error(\"Unable to find item %s!\", self.item_inv)\n return False\n\n # Select the range or fire.\n heat_source_selected = vis.Vision(\n region=vis.GAME_SCREEN,\n needle=self.heat_source,\n loop_num=3,\n loop_sleep_range=(500, 1000),\n conf=0.80,\n ).click_needle()\n if heat_source_selected is False:\n log.error(\"Unable to find heat source %s!\", self.heat_source)\n return False\n\n # Wait for the \"how many of this item do you want to cook\" chat\n # menu to appear.\n do_x_screen = vis.Vision(\n region=vis.CHAT_MENU,\n needle=\"./needles/chat-menu/do-x.png\",\n loop_num=30,\n loop_sleep_range=(500, 1000),\n ).wait_for_needle()\n if do_x_screen is False:\n log.error('Timed out waiting for \"Make X\" screen!')\n return False\n\n # Begin cooking food.\n inputs.Keyboard().keypress(key=\"space\")\n misc.sleep_rand(3000, 5000)\n\n # Wait for either a level-up or for the player to stop cooking.\n # To determine when the player is done cooking, look for the\n # bright blue \"Staff of Water\" orb to re-appear (equipped weapons\n # disappear while cooking food). The player must have this item\n # equipped.\n for _ in range(1, 60):\n misc.sleep_rand(1000, 3000)\n level_up = wait_for_level_up(1)\n # If the player levels-up while cooking, restart cooking.\n if level_up is True:\n self.cook_item()\n cooking_done = vis.Vision(\n region=vis.GAME_SCREEN,\n needle=\"./needles/game-screen/staff-of-water-top.png\",\n conf=0.9,\n loop_num=1,\n ).wait_for_needle()\n if cooking_done is True:\n log.info(\"Cooking is done.\")\n break\n return True\n\n\nclass Magic:\n \"\"\"\n Class for all activities related to training the Magic skill.\n\n Args:\n spell (file): Filepath to the spell to cast as it appears in the\n player's spellbook (NOT greyed-out).\n target (file): Filepath to the target to cast the spell on as it\n appears in the game world. If the spell is a non-\n combat spell, this would be an item as it appears\n in the player's inventory.\n conf (float): Confidence required to match the target.\n region (tuple): The coordinate region to use when searching for\n the target. This will either be \"vis.inv\" or\n \"vis.game_screen\".\n inventory (bool): Whether the spell is being cast on an item in\n the player's inventory (as opposed to a monster),\n default is False.\n move_duration_range (tuple): A 2-tuple of the minimum and maximum\n number of miliseconds the mouse cursor\n will take while moving to the spell\n icon and the target, default is\n (10, 1000).\n \"\"\"\n\n def __init__(\n self,\n spell: str,\n target: str,\n conf: float,\n region: tuple[int, int, int, int],\n inventory: bool = False,\n move_duration_range: tuple[int, int] = (10, 1000),\n ):\n self.spell = spell\n self.target = target\n self.conf = conf\n self.region = region\n self.inventory = inventory\n self.move_duration_range = move_duration_range\n\n def _select_spell(self) -> None:\n \"\"\"\n Helper function that activates the desired spell.\n\n Returns:\n Returns when the desired spell has been selected.\n\n Raises:\n Raises start.NeedleError if the spell could not be found.\n\n \"\"\"\n for _ in range(5):\n spell_available = vis.Vision(\n needle=self.spell, region=vis.INV, loop_num=2\n ).click_needle(\n sleep_range=(\n 50,\n 800,\n 50,\n 800,\n ),\n move_duration_range=self.move_duration_range,\n )\n if spell_available is False:\n behavior.open_side_stone(\"spellbook\")\n else:\n return\n raise start.NeedleError(\"Could not select spell!\")\n\n def _select_target(self) -> None:\n \"\"\"\n Helper function to select a spell's target. Can be either a monster in\n the game world or an item in the inventory.\n\n Returns:\n Returns when the desired target has been selected.\n\n Raises:\n Raises start.NeedleError if the target could not be found.\n\n \"\"\"\n for _ in range(5):\n target = vis.Vision(\n needle=self.target, region=self.region, loop_num=2, conf=self.conf\n ).click_needle(\n sleep_range=(\n 10,\n 500,\n 10,\n 500,\n ),\n move_duration_range=self.move_duration_range,\n )\n if target is False:\n # Make sure the inventory is active when casting on items.\n if self.inventory is True:\n behavior.open_side_stone(\"inventory\")\n else:\n return\n raise start.NeedleError(\"Could not find target!\")\n\n def cast_spell(self) -> None:\n \"\"\"\n Main function of the Magic class to cast a spell at a target.\n\n Returns:\n Returns once spell has been cast.\n\n \"\"\"\n self._select_spell()\n self._select_target()\n\n # Wait for spell to be cast.\n misc.sleep_rand(\n int(start.config[\"magic\"][\"min_cast_delay\"]),\n int(start.config[\"magic\"][\"max_cast_delay\"]),\n )\n\n\nclass Mining:\n \"\"\"\n Class for all activities related to training the Mining skill.\n\n Args:\n rocks (list): A list containing an arbitrary number of 2-tuples. Each\n tuple must contain two filepaths: The first filepath\n must be a needle of the rock in its \"full\" state. The\n second filepath must be a needle of the same rock in its\n \"empty\" state.\n ore (file): Filepath to a needle of the item icon of the ore\n being mined, as it appears in the player's\n inventory.\n\n drop_sapphire (bool): Whether to drop mined sapphires. Ignored if\n banking is enabled.\n drop_emerald (bool): Whether to drop mined emeralds. Ignore if\n banking is enabled.\n drop_ruby (bool): Whether to drop mined rubies. Ignore if\n banking is enabled.\n drop_diamond (bool): Whether to drop mined diamonds. Ignore if\n banking is enabled.\n drop_clue_geode (bool): Whether to drop mined clue geodes. Ignore if\n banking is enabled.\n\n Example:\n skills.Mining(\n rocks=[\n (\"./needles/game-screen/camdozaal-mine/west-full\",\n \"./needles/game-screen/camdozaal-mine/west-empty\"),\n (\"./needles/game-screen/camdozaal-mine/east-full\",\n \"./needles/game-screen/camdozaal-mine/east-empty\"),\n ],\n ore=\"./needles/items/barronite-deposit.png\",\n drop_sapphire=True\n drop_emerald=True\n drop_ruby=True\n drop_diamond=False\n drop_clue_geode=False\n )\n \"\"\"\n\n def __init__(\n self,\n rocks: list,\n ore: str,\n drop_sapphire: bool,\n drop_emerald: bool,\n drop_ruby: bool,\n drop_diamond: bool,\n drop_clue_geode: bool,\n conf: float = 0.85,\n ):\n self.rocks = rocks\n self.ore = ore\n self.drop_sapphire = drop_sapphire\n self.drop_emerald = drop_emerald\n self.drop_ruby = drop_ruby\n self.drop_diamond = drop_diamond\n self.drop_clue_geode = drop_clue_geode\n self.conf = conf\n\n def _is_inventory_full(self) -> bool:\n \"\"\"\n Helper function to determine if the player's inventory is full. Looks\n for a \"your inventory is too full to hold any more resources\" chat\n message.\n\n Returns:\n Returns True if the player's inventory is full,\n returns False otherwise.\n \"\"\"\n log.debug(\"Checking for full inventory.\")\n inventory_full = vis.Vision(\n region=vis.CHAT_MENU,\n loop_num=3,\n needle=\"./needles/chat-menu/mining-inventory-full.png\",\n conf=0.85,\n ).wait_for_needle()\n if inventory_full is True:\n return True\n log.debug(\"Inventory is not full.\")\n return False\n\n def _mine_rock(self, rock_full_needle, rock_empty_needle) -> None:\n \"\"\"\n Helper function to mine a given rock until it's been depleted.\n\n Raises:\n Raises start.RockEmpty if the given rock is already depleted.\n\n Raises start.InventoryFull if the player's inventory is too full to\n mine the rock.\n\n Raises start.TimeoutException if it took too long to mine the rock.\n \"\"\"\n # If rock is full, begin mining it.\n # Move the mouse away from the rock so it doesn't interfere with\n # matching the needle.\n rock_full = vis.Vision(\n region=vis.GAME_SCREEN,\n loop_num=1,\n needle=rock_full_needle,\n conf=self.conf,\n ).click_needle(move_away=True)\n\n if rock_full is False:\n raise start.RockEmpty(\"Rock is already empty!\")\n\n # Wait until the rock is empty or the inventory is full.\n # Check for both at the same time since some rocks (e.g. in Camdozaal Mine)\n # provide multiple ore and may create a full inventory before the rock\n # is empty.\n for _ in range(20):\n\n if self._is_inventory_full() is True:\n raise start.InventoryFull(\"Inventory is full!\")\n\n rock_empty = vis.Vision(\n region=vis.GAME_SCREEN,\n loop_num=3,\n conf=self.conf,\n needle=rock_empty_needle,\n loop_sleep_range=(100, 600),\n ).wait_for_needle()\n if rock_empty is True:\n log.info(\"Rock has been mined.\")\n return\n\n raise start.TimeoutException(\"Timeout waiting for rock to be mined!\")\n\n def mine_multiple_rocks(self) -> None:\n \"\"\"\n Main function used in the Mining class to mine multiple rocks in\n sequence. This function alternates mining among the rocks that were\n provided. All rocks must be of the same ore type.\n\n Returns:\n Returns once an attempt has been made to mine all the rocks given.\n\n Raises:\n Raises start.InventoryFull if the player's inventory is too full to\n mine the rock.\n\n Raises start.TimeoutException if it took too long to mine the rock.\n\n \"\"\"\n for rocks in self.rocks:\n # Unpack each tuple in the rocks[] list to obtain the \"full\"\n # and \"empty\" versions of each ore.\n (rock_full_needle, rock_empty_needle) = rocks\n try:\n self._mine_rock(rock_full_needle, rock_empty_needle)\n except start.RockEmpty:\n pass\n except start.InventoryFull as error:\n raise error\n except start.TimeoutException as error:\n raise error\n\n def drop_inv_ore(self) -> None:\n \"\"\"\n Drops mined ore ore and other mined items from inventory.\n\n Returns:\n Returns if ore and/or other mined items were successfully dropped.\n\n Raises:\n Raises start.InefficientUseOfInventory when the number of free\n inventory spaces available would result in inefficient or overly\n arduous gameplay.\n\n Raises Exception if no ore could be found in the inventory to drop.\n \"\"\"\n # Raise an error if we have <=5 ores in the inventory, as it's very\n # inefficient to mine with an inventory so small.\n ores_in_inventory = vis.Vision(region=vis.INV, needle=self.ore).count_needles()\n if ores_in_inventory <= 5:\n raise start.InefficientUseOfInventory(\n \"Free inventory too small! Must have at least 5 free spaces!\"\n )\n\n ore_dropped = behavior.drop_item(item=self.ore)\n if ore_dropped is False:\n raise Exception(\"Could not find any ore to drop!\")\n\n # Iterate through the other items that could be dropped. If any of them\n # is true, drop that item.\n non_ore_items = [\n (\n self.drop_sapphire,\n \"./needles/items/uncut-sapphire.png\",\n ),\n (\n self.drop_emerald,\n \"./needles/items/uncut-emerald.png\",\n ),\n (\n self.drop_ruby,\n \"./needles/items/uncut-ruby.png\",\n ),\n (\n self.drop_diamond,\n \"./needles/items/uncut-diamond.png\",\n ),\n (\n self.drop_clue_geode,\n \"./needles/items/clue-geode.png\",\n ),\n ]\n for item in non_ore_items:\n (drop_item_bool, path) = item\n if drop_item_bool is True:\n behavior.drop_item(item=str(path))\n return\n return\n\n\nclass Smithing:\n \"\"\"\n Class for all functions related to training the Smithing skill.\n\n Args:\n item_in_menu (file): Filepath to the item to select in the smithing menu.\n \"-bank.png\" items can be used here.\n bar_type (file): Filepath to the bar to use, as it appears in the inventory.\n bars_required (int): The number of bars required to smith the desired item.\n anvil (file): Filepath to the anvil to use, as it appears in the game world.\n uncompleted_inv (file): Filepath to the uncompleted inventory needle. We\n know we're done smithing when this needle can't\n be found.\n \"\"\"\n\n def __init__(\n self, item_in_menu: str, bar_type: str, bars_required: int, anvil: str\n ):\n self.item_in_menu = item_in_menu\n self.bar_type = bar_type\n self.bars_required = bars_required\n self.anvil = anvil\n\n if self.bars_required > 5:\n raise Exception(\"The value of bars_required must be <= 5!\")\n\n def click_anvil(self) -> bool:\n \"\"\"\n Clicks the given anvil.\n\n Returns:\n Returns True once the smithing menu appears.\n \"\"\"\n log.info(\"Attempting to click anvil.\")\n\n anvil_clicked = vis.Vision(\n region=vis.GAME_SCREEN,\n needle=self.anvil,\n loop_num=3,\n loop_sleep_range=(500, 1000),\n conf=0.85,\n ).click_needle()\n\n if anvil_clicked is False:\n log.error(\"Unable to find anvil %s!\", self.anvil)\n return False\n\n smith_menu_open = vis.Vision(\n region=vis.CLIENT,\n needle=\"./needles/buttons/close.png\",\n loop_num=30,\n ).wait_for_needle()\n\n misc.sleep_rand_roll(chance_range=(20, 35), sleep_range=(1000, 6000))\n\n if smith_menu_open is False:\n log.error(\"Timed out waiting for smithing menu.\")\n return False\n\n return True\n\n def smith_items(self) -> bool:\n \"\"\"\n Smiths an inventory of the given item.\n\n Returns:\n Returns True once done smithing.\n \"\"\"\n clicked_anvil = self.click_anvil()\n if clicked_anvil is False:\n log.error(\"Unable to find anvil %s!\", self.anvil)\n return False\n\n log.info(\"Attempting to select item to smith.\")\n\n menu_clicked = vis.Vision(\n region=vis.GAME_SCREEN,\n needle=self.item_in_menu,\n loop_num=3,\n loop_sleep_range=(500, 1000),\n conf=0.85,\n ).click_needle()\n if menu_clicked is False:\n log.error(\"Unable to click menu item %s!\", self.item_in_menu)\n return False\n\n log.info(\"Smithing...\")\n\n # Wait for either a level-up or for smithing to finish.\n for _ in range(1, 60):\n misc.sleep_rand(1000, 3000)\n\n # Based the number of bars we need to smith the current item, we'll\n # end up with a different number of bars leftover.\n if self.bars_required == 5:\n bars_leftover = 2\n elif self.bars_required == 2:\n bars_leftover = 1\n else:\n bars_leftover = 0\n\n # We're done smithing when the number of bars in our inventory is\n # equal to bars_leftover.\n bars_remaining = vis.Vision(\n region=vis.INV, needle=self.bar_type, conf=0.9\n ).count_needles()\n if bars_remaining <= bars_leftover:\n log.info(\"Done smithing.\")\n return True\n\n # If the player levels-up while smithing, restart.\n if wait_for_level_up(1) is True:\n self.smith_items()\n\n return False\n",
"id": "2979507",
"language": "Python",
"matching_score": 3.9011030197143555,
"max_stars_count": 0,
"path": "ocvbot/skills.py"
},
{
"content": "# coding=UTF-8\n\"\"\"\nModule for \"seeing\" the client.\n\n\"\"\"\nimport logging as log\nimport pathlib\n\nimport pyautogui as pag\nfrom ocvbot import inputs\nfrom ocvbot import misc\nfrom ocvbot import startup as start\n\n# -------------------------------------------------------------------------------------------------\n# Setup the necessary region tuples for the Vision class and orient the client.\n# -------------------------------------------------------------------------------------------------\n\n# Set initial values for vision regions.\n# See ./docs/client_anatomy.png for more info.\n# Captures the width and height of various different elements within the\n# game client. Units are in pixels.\n\nBANK_ITEMS_WINDOW_WIDTH = 375\nBANK_ITEMS_WINDOW_HEIGHT = 215\nBANK_ITEMS_WINDOW = (0, 0, 0, 0)\n\nCHAT_MENU_WIDTH = 506\nCHAT_MENU_HEIGHT = 129\nCHAT_MENU = (0, 0, 0, 0)\n\nCHAT_MENU_RECENT_WIDTH = 490\nCHAT_MENU_RECENT_HEIGHT = 17\nCHAT_MENU_RECENT = (0, 0, 0, 0)\n\nCLIENT_WIDTH = 765\nCLIENT_HEIGHT = 503\nCLIENT = (0, 0, 0, 0)\n\nDISPLAY_WIDTH = pag.size().width\nDISPLAY_HEIGHT = pag.size().height\nDISPLAY = (0, 0, DISPLAY_WIDTH, DISPLAY_HEIGHT)\n\nGAME_SCREEN_WIDTH = 512\nGAME_SCREEN_HEIGHT = 340\nGAME_SCREEN = (0, 0, 0, 0)\n\nINV_WIDTH = 186\nINV_HEIGHT = 262\nINV_HALF_WIDTH = round((INV_WIDTH / 2) + 5)\nINV_HALF_HEIGHT = round(INV_HEIGHT / 2)\nINV = (0, 0, 0, 0)\nINV_BOTTOM = (0, 0, 0, 0)\nINV_RIGHT_HALF = (0, 0, 0, 0)\nINV_LEFT_HALF = (0, 0, 0, 0)\n\nLOGIN_FIELD_WIDTH = 258\nLOGIN_FIELD_HEIGHT = 12\nLOGIN_FIELD = (0, 0, 0, 0)\nPASS_FIELD = (0, 0, 0, 0)\n\nMINIMAP_WIDTH = 146\nMINIMAP_HEIGHT = 151\nMINIMAP = (0, 0, 0, 0)\n\nMINIMAP_SLICE_WIDTH = 85\nMINIMAP_SLICE_HEIGHT = 85\nMINIMAP_SLICE = (0, 0, 0, 0)\n\nSIDE_STONES_WIDTH = 249\nSIDE_STONES_HEIGHT = 366\nSIDE_STONES = (0, 0, 0, 0)\n\n# TODO\ndef wait_for_needle_list(\n loops: int,\n needle_list: list[tuple[str, tuple[int, int, int, int]]],\n sleep_range: tuple[int, int],\n):\n \"\"\"\n Works like vision.wait_for_needle(), except multiple needles can be\n searched for simultaneously.\n\n Args:\n loops: The number of tries to look for each needle in needle_list.\n needle_list: A list of filepaths to the needles to look for. Each\n item in the list is a 2-tuple containing:\n - The filepath to the needle.\n - The region in which to search for that needle.\n sleep_range: A 2-tuple containing the minimum and maximum number\n of miliseconds to wait after each loop.\n\n Returns:\n If a needle in needle_list is found, returns a 2-tuple containing\n the ltwh dimensions of the needle and the index of the needle in\n needle_list (This is so the function knows which needle was found).\n\n Returns false if no needles in needle_list could be found.\n\n \"\"\"\n for _ in range(1, loops):\n\n for item in needle_list:\n needle, region = item\n\n needle_found = Vision(\n region=region, needle=needle, loop_num=1\n ).wait_for_needle(get_tuple=True)\n if needle_found is True:\n return needle_found, needle_list.index(needle)\n\n misc.sleep_rand(sleep_range[0], sleep_range[1])\n\n return False\n\n\n# TODO: Add examples of usage.\nclass Vision:\n \"\"\"\n Contains methods for locating images on the display.\n All coordinates are relative to the top left corner of the display.\n All coordinates are in a (left, top, width, height) format.\n\n Args:\n region (tuple): A 4-tuple containing the Left, Top, Width, and\n Height of the region in which to look for the\n needle.\n needle (file): The image to search within the (ltwh) coordinates\n for. Must be a filepath.\n loctype (str): Whether to return the needle's (ltwh) coordinates\n or its (X, Y) center.\n regular = Returns the needle's left, top, width, and height\n as a 4-tuple.\n center = Returns the (X, Y) coordinates of the needle's\n center as a 2-tuple (relative to the display's\n dimensions).\n conf (float): The confidence value required to match the needle\n successfully, expressed as a decimal <= 1. This is\n used by PyAutoGUI, default is 0.95.\n loop_num (int): The number of times wait_for_needle() will search\n the given coordinates for the needle, default is\n 10.\n loop_sleep_range (tuple): A 2-tuple containing the minimum and\n maximum number of miliseconds to wait\n between image-search loops. Used by\n the wait_for_needle() method, default\n is (0, 100).\n grayscale (bool): Converts the haystack to grayscale before\n searching within it. Speeds up searching by\n about 30%, default is false.\n\n \"\"\"\n\n def __init__(\n self,\n region: tuple[int, int, int, int],\n needle: str,\n loctype: str = \"regular\",\n conf: float = 0.95,\n # TODO: Move to a parameter of wait_for_needle().\n loop_num: int = 10,\n # TODO: Move to a parameter of wait_for_needle().\n loop_sleep_range: tuple[int, int] = (0, 100),\n grayscale: bool = False,\n ):\n self.grayscale = grayscale\n self.region = region\n self.needle = needle\n self.loctype = loctype\n self.conf = conf\n self.loop_num = loop_num\n self.loop_sleep_range = loop_sleep_range\n\n # TODO: Add examples of usage.\n def find_needle(self):\n \"\"\"\n Searches within the self.ltwh coordinates for self.needle.\n\n Returns:\n If the needle is found and self.loctype is `regular`, returns\n the needle's left/top/width/height dimensions as a 4-tuple.\n\n If the needle is found and self.loctype is `center`, returns\n coordinates of the needle's (X, Y) center as a 2-tuple.\n\n If the needle is not found, returns False.\n\n \"\"\"\n # Make sure file path is OS-agnostic.\n needle = str(pathlib.Path(self.needle))\n\n if self.loctype == \"regular\":\n needle_coords = pag.locateOnScreen(\n needle,\n confidence=self.conf,\n grayscale=self.grayscale,\n region=self.region,\n )\n if needle_coords is not None:\n # log.debug(\"Found regular image %s, %s\", needle, needle_coords)\n return needle_coords\n # log.debug(\"Cannot find regular image %s, conf=%s\", needle, self.conf)\n return False\n\n elif self.loctype == \"center\":\n needle_coords = pag.locateCenterOnScreen(\n needle,\n confidence=self.conf,\n grayscale=self.grayscale,\n region=self.region,\n )\n if needle_coords is not None:\n # log.debug(\"Found center of image %s, %s\", needle, needle_coords)\n return needle_coords\n # log.debug(\"Cannot find center of image %s, conf=%s\", needle, self.conf)\n return False\n\n raise RuntimeError(\n \"self.loctype must be 'regular' or 'center', got '%s'\", self.loctype\n )\n\n # TODO: Add examples of usage.\n def wait_for_needle(self, get_tuple: bool = False):\n \"\"\"\n Repeatedly searches within the self.ltwh coordinates for the needle.\n\n Args:\n get_tuple (bool): Whether to return a tuple containing the\n needle's coordinates, default is False.\n\n Returns:\n If get_tuple is False, returns True if needle was found.\n\n If get_tuple is true and self.loctype is `regular`, returns\n a 4-tuple containing the (left, top, width, height) coordinates\n of the needle. If self.loctype is `center`, returns a 2-tuple\n containing the (X, Y) center of the needle.\n\n Returns False if needle was not found.\n\n \"\"\"\n # Add 1 to self.loop_num because if loop_num=1, it won't loop at\n # all.\n for tries in range(1, (self.loop_num + 1)):\n\n needle_coords = Vision.find_needle(self)\n\n if isinstance(needle_coords, tuple) is True:\n log.debug(\"Found %s after trying %s times.\", self.needle, tries)\n if get_tuple is True:\n return needle_coords\n return True\n log.debug(\"Cannot find %s, tried %s times.\", self.needle, tries)\n misc.sleep_rand(self.loop_sleep_range[0], self.loop_sleep_range[1])\n\n log.debug(\"Timed out looking for %s\", self.needle)\n return False\n\n # TODO: Add examples of usage.\n def click_needle(\n self,\n sleep_range: tuple[int, int, int, int] = (50, 200, 50, 200),\n move_duration_range: tuple[int, int] = (1, 50),\n button: str = \"left\",\n move_away: bool = False,\n number_of_clicks: int = 1,\n ) -> bool:\n \"\"\"\n Moves the mouse to the provided needle image and clicks on\n it. Automatically randomizes the location the mouse cursor\n will click to based on the dimensions of the needle image.\n\n Args:\n sleep_range (tuple): Passed to the Mouse class in inputs.py,\n see its docstring for more info.\n move_duration_range (tuple): Passed to the Mouse class in\n inputs.py, see its docstring for\n more info.\n button (str): The mouse button to use when clicking on the\n needle, default is `left`.\n move_away (bool): Whether to move the mouse out of the way\n after clicking on the needle. Useful when\n we needs to determine the status of a button\n that the mouse just clicked.\n number_of_clicks (int): Passed to the click_coord() function of the\n Mouse class, see its docstring for more\n info. Default is 1.\n\n Returns:\n Returns True if the needle was clicked on successfully,\n returns False otherwise.\n\n \"\"\"\n log.debug(\"Looking for %s to click on.\", self.needle)\n\n needle_coords = self.wait_for_needle(get_tuple=True)\n\n if isinstance(needle_coords, tuple) is True:\n # Randomize the location the mouse cursor will move to using\n # the dimensions of needle image.\n # The mouse will click anywhere within the needle image.\n inputs.Mouse(\n region=needle_coords,\n sleep_range=sleep_range,\n move_duration_range=move_duration_range,\n button=button,\n ).click_coord(number_of_clicks=number_of_clicks)\n\n log.debug(\"Clicking on %s\", self.needle)\n\n if move_away is True:\n inputs.Mouse(\n region=(25, 25, 100, 100), move_duration_range=(50, 200)\n ).moverel()\n return True\n return False\n\n def count_needles(self):\n \"\"\"\n Counts the number of needles found within the region specified.\n\n Examples:\n Count the number of iron bars in the player's inventory:\n vision.Vision(region=vis.inv, needle=\"./needles/items/iron-bar.png\").count_needles()\n\n Returns:\n Returns an int.\n \"\"\"\n # Make sure file path is OS-agnostic.\n needle = str(pathlib.Path(self.needle))\n\n try:\n needles_coords = pag.locateAllOnScreen(\n needle,\n confidence=self.conf,\n grayscale=self.grayscale,\n region=self.region,\n )\n needles_coords_list = list(needles_coords)\n number_of_needles = len(needles_coords_list)\n return number_of_needles\n\n # If no needles can be found, then the number of needles is 0.\n except ImageNotFoundException:\n return 0\n\n\n# TODO: Add examples of usage.\n# TODO: Break out an \"is_logged_in\" function.\ndef orient(\n region: tuple[int, int, int, int] = (DISPLAY),\n launch_client: bool = False,\n):\n \"\"\"\n Looks for an icon to orient the client. If it's found, use its\n location within the game client to determine the coordinates of the\n game client relative to the display's coordinates.\n\n This function is also used to determine if the client is logged out.\n This is generally one of the first functions that is run upon script\n startup.\n\n Args:\n region (tuple): A 4-tuple containing the left, top, width, and\n height of the coordinate space to search within,\n relative to the display's coordinates. By default\n uses the entire display.\n\n Raises:\n Raises an exception if the client cannot be found, or if the\n function can't determine if the client is logged in or logged\n out.\n\n Returns:\n If client is logged in, returns a 2-tuple containing a string\n with the text \"logged_in\" and a 2-tuple of the center (X, Y)\n coordinates of the orient needle.\n\n If client is logged out, returns a 2-tuple containing a string\n with the text \"logged_out\" and a 2-tuple of the center (X, Y)\n coordinates of the orient-logged-out needle.\n\n \"\"\"\n logged_in = Vision(\n region=region,\n needle=\"needles/minimap/orient.png\",\n loctype=\"center\",\n loop_num=1,\n conf=0.8,\n ).wait_for_needle(get_tuple=True)\n if isinstance(logged_in, tuple) is True:\n log.info(\"Client is logged in\")\n return \"logged_in\", logged_in\n\n # If the client is not logged in, check if it's logged out.\n logged_out = Vision(\n region=region,\n needle=\"needles/login-menu/orient-logged-out.png\",\n loctype=\"center\",\n loop_num=1,\n conf=0.8,\n ).wait_for_needle(get_tuple=True)\n if isinstance(logged_out, tuple) is True:\n log.info(\"Client is logged out\")\n return \"logged_out\", logged_out\n\n if launch_client is True:\n # TODO: Write start_client()\n # start_client()\n # Try 10 times to find the login screen after launching the client.\n for _ in range(1, 10):\n misc.sleep_rand(8000, 15000)\n orient(region=region, launch_client=False)\n log.critical(\"Could not find client! %s\", launch_client)\n raise Exception(\"Could not find client!\")\n\n\n# TODO: add 'configure camera' function that clicks on compass, zooms in camera, and holds down up arrow\n# only click on the compass if it isn't perfectly aligned\n\n\ndef init() -> None:\n \"\"\"\n Locates the client and sets the value of the vision regions.\n This function MUST be run before OCVBot can do anything else.\n \"\"\"\n\n (client_status, anchor) = orient(region=DISPLAY)\n (client_left, client_top) = anchor\n\n if client_status == \"logged_in\":\n client_left -= 735\n client_top -= 21\n elif client_status == \"logged_out\":\n client_left -= 183\n client_top -= 59\n\n # Each of these tuples contains coordinates for the \"region\" parameter\n # of PyAutoGUI's Locate() functions. These tuples are used by methods\n # in the Vision class to look for needles within the specified set of\n # coordinates, rather than within the entire display's coordinates,\n # which is much faster.\n\n # All coordinates are in a (left, top, width, height) format, to match\n # PyAutoGUI.\n\n # The fixed-width game client.\n global CLIENT\n CLIENT = (client_left, client_top, CLIENT_WIDTH, CLIENT_HEIGHT)\n\n # The player's inventory.\n inv_left = client_left + 548\n inv_top = client_top + 205\n global INV\n INV = (inv_left, inv_top, INV_WIDTH, INV_HEIGHT)\n\n # Bottom half of the player's inventory.\n inv_bottom_left = inv_left\n inv_bottom_top = inv_top + INV_HALF_HEIGHT\n global INV_BOTTOM\n INV_BOTTOM = (\n inv_bottom_left,\n inv_bottom_top,\n INV_WIDTH,\n INV_HALF_HEIGHT,\n )\n\n # Right half of the player's inventory.\n inv_right_half_left = (inv_left + INV_HALF_WIDTH) - 5\n inv_right_half_top = inv_top\n global INV_RIGHT_HALF\n INV_RIGHT_HALF = (\n inv_right_half_left,\n inv_right_half_top,\n INV_HALF_WIDTH,\n INV_HEIGHT,\n )\n\n # Left half of the player's inventory.\n inv_left_half_left = inv_left\n inv_left_half_top = inv_top\n global INV_LEFT_HALF\n INV_LEFT_HALF = (\n inv_left_half_left,\n inv_left_half_top,\n INV_HALF_WIDTH,\n INV_HEIGHT,\n )\n\n # The \"gameplay screen\". This is the screen that displays the player\n # character and the game world.\n game_screen_left = client_left + 4\n game_screen_top = client_top + 4\n global GAME_SCREEN\n GAME_SCREEN = (\n game_screen_left,\n game_screen_top,\n GAME_SCREEN_WIDTH,\n GAME_SCREEN_HEIGHT,\n )\n\n # Banking window, minus the tabs at the top and other surrounding elements.\n # This is done to prevent the bot from attempting to withdrawal items by\n # clicking on their tab icons\n bank_items_window_left = game_screen_left + 68\n bank_items_window_top = game_screen_top + 77\n global BANK_ITEMS_WINDOW\n BANK_ITEMS_WINDOW = (\n bank_items_window_left,\n bank_items_window_top,\n BANK_ITEMS_WINDOW_WIDTH,\n BANK_ITEMS_WINDOW_HEIGHT,\n )\n\n # The player's inventory, plus the top and bottom \"side stone\" tabs that\n # open all the different menus.\n side_stones_left = client_left + 516\n side_stones_top = client_top + 166\n global SIDE_STONES\n SIDE_STONES = (\n side_stones_left,\n side_stones_top,\n SIDE_STONES_WIDTH,\n SIDE_STONES_HEIGHT,\n )\n\n # Chat menu.\n chat_menu_left = client_left + 7\n chat_menu_top = client_top + 345\n global CHAT_MENU\n CHAT_MENU = (\n chat_menu_left,\n chat_menu_top,\n CHAT_MENU_WIDTH,\n CHAT_MENU_HEIGHT,\n )\n\n # The most recent chat message.\n chat_menu_recent_left = chat_menu_left - 3\n chat_menu_recent_top = chat_menu_top + 98\n global CHAT_MENU_RECENT\n CHAT_MENU_RECENT = (\n chat_menu_recent_left,\n chat_menu_recent_top,\n CHAT_MENU_RECENT_WIDTH,\n CHAT_MENU_RECENT_HEIGHT,\n )\n\n # The \"Login\" field on the main login screen.\n login_field_left = client_left + 273\n login_field_top = client_top + 242\n global LOGIN_FIELD\n LOGIN_FIELD = (\n login_field_left,\n login_field_top,\n LOGIN_FIELD_WIDTH,\n LOGIN_FIELD_HEIGHT,\n )\n\n # The \"Password\" field on the main login screen.\n pass_field_left = client_left + 275\n pass_field_top = client_top + 258\n global PASS_FIELD\n PASS_FIELD = (\n pass_field_left,\n pass_field_top,\n LOGIN_FIELD_WIDTH,\n LOGIN_FIELD_HEIGHT,\n )\n\n # The entire minimap.\n minimap_left = client_left + 571\n minimap_top = client_top + 11\n global MINIMAP\n MINIMAP = (minimap_left, minimap_top, MINIMAP_WIDTH, MINIMAP_HEIGHT)\n\n # The current minimap \"slice\" for locating the player on the world map.\n # The largest area of the minimap, centered on the player, that can be\n # used to determine the player's location for the travel() function.\n minimap_slice_left = client_left + 599\n minimap_slice_top = client_top + 43\n global MINIMAP_SLICE\n MINIMAP_SLICE = (\n minimap_slice_left,\n minimap_slice_top,\n MINIMAP_SLICE_WIDTH,\n MINIMAP_SLICE_HEIGHT,\n )\n",
"id": "3896155",
"language": "Python",
"matching_score": 7.484404563903809,
"max_stars_count": 0,
"path": "ocvbot/vision.py"
},
{
"content": "#!/usr/bin/env python3\n# coding=UTF-8\n\"\"\"\nCaptures a screenshot of the OSRS client and overlays rectangles onto the\nscreenshot cooresponding to the various coordinate spaces used by the bot.\n\nThis is only useful for debugging or development purposes.\n\n\"\"\"\nimport logging as log\nimport pathlib\nimport subprocess\nimport sys\n\nimport screenshot\n\n# Ensure ocvbot files are added to sys.path.\nSCRIPTPATH = str(pathlib.Path(__file__).parent.parent.absolute())\nsys.path.insert(1, SCRIPTPATH)\n\nfrom ocvbot import vision as vis, startup as start\n\nvis.init()\n\nlog.basicConfig(\n format=\"%(asctime)s -- %(filename)s.%(funcName)s - %(message)s\", level=\"INFO\"\n)\n\n# We must take a screenshot of the entire display instead of only the client\n# because the Vision objects (e.g. vis.client_left and vis.client_top, etc.)\n# use coordinates that are relative to the entire display.\n# Cropping is performed after the Vision object regions have been highlighted.\nSCREENSHOT_PATH = str(screenshot.main(region=vis.DISPLAY))\n\n\ndef crop_to_client(file_name: str) -> None:\n log.info(\"Creating %s\", file_name)\n try:\n subprocess.run(\n \"convert\"\n + \" \"\n + file_name\n + \" -crop\"\n + \" \"\n + str(vis.CLIENT_WIDTH)\n + \"x\"\n + str(vis.CLIENT_HEIGHT)\n + \"+\"\n + str(vis.client_left)\n + \"+\"\n + str(vis.client_top)\n + \" \"\n + file_name,\n check=True,\n shell=True,\n )\n except FileNotFoundError:\n log.critical(\"ImageMagick not found!\")\n\n\ndef mark_region(\n region_name: str,\n region_left: int,\n region_top: int,\n region_width: int,\n region_height: int,\n) -> None:\n file_name = \"ocvbot_\" + region_name + \".png\"\n log.info(\"Creating %s\", file_name)\n\n region_coordinates = (\n str(region_left)\n + \" \"\n + str(region_top)\n + \" \"\n + str(region_left + region_width)\n + \" \"\n + str(region_top + region_height)\n )\n convert_rectangle_arg = ' -draw \"rectangle ' + region_coordinates + '\"'\n\n try:\n subprocess.run(\n \"convert \"\n + SCREENSHOT_PATH\n + ' -fill \"rgba(255,0,0,0.5)\"'\n + convert_rectangle_arg\n + \" \"\n + file_name,\n check=True,\n shell=True,\n )\n except FileNotFoundError:\n log.critical(\"ImageMagick not found!\")\n\n crop_to_client(file_name)\n pngcrush(file_name)\n\n\ndef pngcrush(filename: str) -> None:\n try:\n subprocess.call([\"pngcrush\", \"-ow \", filename])\n except FileNotFoundError:\n log.warning(\"pngcrush not present!\")\n\n\ndef main() -> None:\n # Unpack vision region tuples, because mark_region() cannot take a tuple as\n # an argument.\n (vis.client_left, vis.client_top, vis.CLIENT_WIDTH, vis.CLIENT_HEIGHT) = vis.CLIENT\n (\n vis.inv_left,\n vis.inv_top,\n vis.INV_WIDTH,\n vis.INV_HEIGHT,\n ) = vis.INV\n (\n vis.inv_bottom_left,\n vis.inv_bottom_top,\n vis.INV_WIDTH,\n vis.INV_HALF_HEIGHT,\n ) = vis.INV_BOTTOM\n (\n vis.inv_right_half_left,\n vis.inv_right_half_top,\n vis.INV_HALF_WIDTH,\n vis.INV_HEIGHT,\n ) = vis.INV_RIGHT_HALF\n (\n vis.inv_left_half_left,\n vis.inv_left_half_top,\n vis.INV_HALF_WIDTH,\n vis.INV_HEIGHT,\n ) = vis.INV_LEFT_HALF\n (\n vis.game_screen_left,\n vis.game_screen_top,\n vis.GAME_SCREEN_WIDTH,\n vis.GAME_SCREEN_HEIGHT,\n ) = vis.GAME_SCREEN\n (\n vis.bank_items_window_left,\n vis.bank_items_window_top,\n vis.BANK_ITEMS_WINDOW_WIDTH,\n vis.BANK_ITEMS_WINDOW_HEIGHT,\n ) = vis.BANK_ITEMS_WINDOW\n (\n vis.side_stones_left,\n vis.side_stones_top,\n vis.SIDE_STONES_WIDTH,\n vis.SIDE_STONES_HEIGHT,\n ) = vis.SIDE_STONES\n (\n vis.chat_menu_left,\n vis.chat_menu_top,\n vis.CHAT_MENU_WIDTH,\n vis.CHAT_MENU_HEIGHT,\n ) = vis.CHAT_MENU\n (\n vis.chat_menu_recent_left,\n vis.chat_menu_recent_top,\n vis.CHAT_MENU_RECENT_WIDTH,\n vis.CHAT_MENU_RECENT_HEIGHT,\n ) = vis.CHAT_MENU_RECENT\n (\n vis.minimap_left,\n vis.minimap_top,\n vis.MINIMAP_WIDTH,\n vis.MINIMAP_HEIGHT,\n ) = vis.MINIMAP\n (\n vis.minimap_slice_left,\n vis.minimap_slice_top,\n vis.MINIMAP_SLICE_WIDTH,\n vis.MINIMAP_SLICE_HEIGHT,\n ) = vis.MINIMAP_SLICE\n\n # Import all the coordinate spaces to overlay onto the screenshot.\n # Create a separate file for coordinate space, as some of them\n # overlap.\n\n mark_region(\n \"client\",\n vis.client_left,\n vis.client_top,\n vis.CLIENT_WIDTH,\n vis.CLIENT_HEIGHT,\n )\n mark_region(\n \"inv\",\n vis.inv_left,\n vis.inv_top,\n vis.INV_WIDTH,\n vis.INV_HEIGHT,\n )\n mark_region(\n \"inv_bottom\",\n vis.inv_bottom_left,\n vis.inv_bottom_top,\n vis.INV_WIDTH,\n vis.INV_HALF_HEIGHT,\n )\n mark_region(\n \"inv_right_half\",\n vis.inv_right_half_left,\n vis.inv_right_half_top,\n vis.INV_HALF_WIDTH,\n vis.INV_HEIGHT,\n )\n mark_region(\n \"inv_left_half\",\n vis.inv_left_half_left,\n vis.inv_left_half_top,\n vis.INV_HALF_WIDTH,\n vis.INV_HEIGHT,\n )\n mark_region(\n \"game_screen\",\n vis.game_screen_left,\n vis.game_screen_top,\n vis.GAME_SCREEN_WIDTH,\n vis.GAME_SCREEN_HEIGHT,\n )\n mark_region(\n \"bank_items_window\",\n vis.bank_items_window_left,\n vis.bank_items_window_top,\n vis.BANK_ITEMS_WINDOW_WIDTH,\n vis.BANK_ITEMS_WINDOW_HEIGHT,\n )\n mark_region(\n \"side_stones\",\n vis.side_stones_left,\n vis.side_stones_top,\n vis.SIDE_STONES_WIDTH,\n vis.SIDE_STONES_HEIGHT,\n )\n mark_region(\n \"chat_menu\",\n vis.chat_menu_left,\n vis.chat_menu_top,\n vis.CHAT_MENU_WIDTH,\n vis.CHAT_MENU_HEIGHT,\n )\n mark_region(\n \"chat_menu_recent\",\n vis.chat_menu_recent_left,\n vis.chat_menu_recent_top,\n vis.CHAT_MENU_RECENT_WIDTH,\n vis.CHAT_MENU_RECENT_HEIGHT,\n )\n mark_region(\n \"minimap\",\n vis.minimap_left,\n vis.minimap_top,\n vis.MINIMAP_WIDTH,\n vis.MINIMAP_HEIGHT,\n )\n mark_region(\n \"minimap_slice\",\n vis.minimap_slice_left,\n vis.minimap_slice_top,\n vis.MINIMAP_SLICE_WIDTH,\n vis.MINIMAP_SLICE_HEIGHT,\n )\n\n\nif __name__ == \"__main__\":\n main()\n",
"id": "5573336",
"language": "Python",
"matching_score": 1.5114600658416748,
"max_stars_count": 44,
"path": "tools/vision_regions.py"
},
{
"content": "# coding=UTF-8\n\"\"\"\nFunctions for interacting with the banking window.\n\nUsed for:\n - opening the bank\n - closing the bank\n - withdrawing items\n - depositing items\n\n\"\"\"\nimport logging as log\n\nfrom ocvbot import interface\nfrom ocvbot import misc\nfrom ocvbot import startup as start\nfrom ocvbot import vision as vis\n\n# TODO: Add search_for_item() function.\n# TODO: Finish enter_bank_pin() function.\n\n\ndef bank_settings_check(setting: str, value: str) -> None:\n \"\"\"\n Checks for specific bank window configuration settings.\n Currently only the `quantity` setting is supported.\n\n Args:\n setting (str): The setting you wish to configure.\n quantity = Sets the value of the `quantity` setting. Available\n values are `1`, `5`, `10`, and `all`.\n placeholder = Sets the value of the `placeholder` setting.\n Available values are `set` and `unset`.\n value (str): The value you wish the setting to have.\n\n Examples:\n bank_settings_check(\"quantity\", \"all\")\n bank_settings_check(\"placeholder\", \"unset\")\n\n Raises:\n Raises a ValueError if the setting or value is not supported.\n\n Raises an Exception if the setting could not be set.\n\n \"\"\"\n if setting == \"quantity\":\n if value not in (\"1\", \"5\", \"10\", \"all\"):\n raise ValueError(\"Unsupported value for quantity setting!\")\n setting_unset = (\n \"./needles/bank/settings/\" + setting + \"/\" + value + \"-unset.png\"\n )\n setting_set = \"./needles/bank/settings/\" + setting + \"/\" + value + \"-set.png\"\n\n elif setting == \"placeholder\":\n if value == \"set\":\n setting_unset = \"./needles/bank/settings/placeholder/placeholder-unset.png\"\n setting_set = \"./needles/bank/settings/placeholder/placeholder-set.png\"\n elif value == \"unset\":\n setting_unset = \"./needles/bank/settings/placeholder/placeholder-set.png\"\n setting_set = \"./needles/bank/settings/placeholder/placeholder-unset.png\"\n else:\n raise ValueError(\"Unsupported value for placeholder setting!\")\n\n else:\n raise ValueError(\"Unsupported bank setting!\")\n\n try:\n log.debug(\"Checking if bank setting %s is set to %s\", setting, value)\n interface.enable_button(\n button_disabled=setting_unset,\n button_disabled_region=vis.GAME_SCREEN,\n button_enabled=setting_set,\n button_enabled_region=vis.GAME_SCREEN,\n )\n except Exception as error:\n raise Exception(\"Could not set bank setting!\") from error\n\n\ndef close_bank():\n \"\"\"\n Closes the bank window if it is open.\n\n Raises:\n Raises an exception if the bank window could not be closed.\n\n \"\"\"\n # Must use invert_match here because we want to check for the absence of\n # the `close` button.\n try:\n interface.enable_button(\n button_disabled=\"./needles/buttons/close.png\",\n button_disabled_region=vis.GAME_SCREEN,\n button_enabled=\"./needles/buttons/close.png\",\n button_enabled_region=vis.GAME_SCREEN,\n invert_match=True,\n )\n except Exception as error:\n raise Exception(\"Could not close bank window!\") from error\n\n\ndef deposit_inventory() -> None:\n \"\"\"\n Deposits entire inventory into the bank. Assumes the bank window is\n open and the \"deposit inventory\" button is visible.\n\n Raises:\n Raises an exception if the inventory could not be deposited.\n\n \"\"\"\n try:\n interface.enable_button(\n button_disabled=\"./needles/bank/deposit-inventory.png\",\n button_disabled_region=vis.GAME_SCREEN,\n button_enabled=\"./needles/side-stones/inventory/empty-inventory.png\",\n button_enabled_region=vis.INV,\n )\n except Exception as error:\n raise Exception(\"Could not deposit inventory!\") from error\n\n\ndef deposit_item(item, quantity) -> None:\n \"\"\"\n Deposits the given item into the bank. Assumes the bank window is\n open.\n\n Args:\n item (str): Filepath to an image of the item to deposit as it\n appears in the player's inventory.\n quantity (str): Quantity of the item to deposit. Available\n values are `1`, `5`, `10`, and `all`.\n\n Returns:\n Returns when item was successfully deposited into bank, or there were\n no items of that type in the inventory to begin with.\n\n Raises:\n Raises a ValueError if `quantity` doesn't match the available values.\n\n Raises a BankingError if too many items were deposited by mistake.\n Raises a BankingError if the item could not be deposited.\n\n \"\"\"\n # Count the initial number of the given item in the inventory.\n initial_number_of_items = vis.Vision(region=vis.INV, needle=item).count_needles()\n # If there are no matching items in the inventory to begin with, return.\n if initial_number_of_items == 0:\n log.debug(\"No items of type %s to deposit\", item)\n return\n\n # Determine the number of of items that should be removed from the inventory.\n if quantity == \"1\":\n items_to_deposit = 1\n elif quantity == \"5\":\n items_to_deposit = 5\n elif quantity == \"10\":\n items_to_deposit = 10\n elif quantity == \"all\":\n items_to_deposit = initial_number_of_items\n else:\n raise ValueError(\"Unsupported value for quantity argument!\")\n desired_number_of_items = initial_number_of_items - items_to_deposit\n\n log.info(\"Attempting to deposit %s of item %s\", quantity, item)\n\n # Make sure the correct quantity is deposited.\n bank_settings_check(\"quantity\", str(quantity))\n\n # Try clicking on the item multiple times.\n for _ in range(5):\n vis.Vision(\n region=vis.INV,\n needle=item,\n loop_num=3,\n ).click_needle(sleep_range=(0, 100, 0, 100), move_away=True)\n\n # Loop and wait until the item has been deposited.\n for _ in range(10):\n misc.sleep_rand(200, 700)\n final_number_of_items = vis.Vision(\n region=vis.INV, needle=item\n ).count_needles()\n if desired_number_of_items == final_number_of_items:\n log.debug(\"Deposited item %s\", item)\n return\n if desired_number_of_items > final_number_of_items:\n raise start.BankingError(\"Deposited too many items!\")\n\n raise start.BankingError(\"Could not deposit items!\")\n\n\ndef enter_bank_pin(pin=(start.config[\"main\"][\"bank_pin\"])) -> bool:\n \"\"\"\n Enters the user's bank PIN. Assumes the bank window is open.\n\n Args:\n pin (tuple): A 4-tuple of the player's PIN.\n\n Examples:\n enter_bank_pin(pin=1234)\n\n Returns:\n Returns True if the bank PIN was successfully entered or PIN\n window could not be found, returns False if PIN was incorrect\n\n \"\"\"\n pin = tuple(str(pin))\n # Confirm that the bank PIN screen is actually present.\n bank_pin_screen = vis.Vision(\n region=vis.GAME_SCREEN, needle=\"./needles/.png\", loop_num=1\n ).wait_for_needle(get_tuple=False)\n if bank_pin_screen is False:\n return True\n\n # Loop through the different PIN screens for each of the 4 digits.\n for pin_ordinal in range(1, 4):\n\n # Wait for the first/second/third/fourth PIN prompt screen to\n # appear.\n pin_ordinal_prompt = vis.Vision(\n region=vis.GAME_SCREEN, needle=\"./needles/\" + str(pin_ordinal), loop_num=1\n ).wait_for_needle(get_tuple=False)\n\n # Enter the first/second/third/fourth digit of the PIN.\n if pin_ordinal_prompt is True:\n enter_digit = vis.Vision(\n region=vis.GAME_SCREEN,\n needle=\"./needles/\" + pin[pin_ordinal],\n loop_num=1,\n ).click_needle()\n return True\n\n\ndef open_bank(direction) -> None:\n \"\"\"\n Opens the bank. Assumes the player is within 2 empty tiles of a bank booth.\n\n Args:\n direction (str): The cardinal direction of the bank booth relative to\n the player. Must be `north`, `south`, `east`, or\n `west`.\n\n Examples:\n open_bank(\"west\")\n\n Returns:\n Returns if bank was opened successfully or is already open.\n\n Raises:\n Raises a ValueError if an invalid direction is given.\n\n Raises an Exception if the bank could not be opened.\n\n \"\"\"\n if direction not in (\"north\", \"south\", \"east\", \"west\"):\n raise ValueError(\"Must provide a cardinal direction to open bank!\")\n\n bank_open = vis.Vision(\n region=vis.GAME_SCREEN, needle=\"./needles/buttons/close.png\", loop_num=1\n ).wait_for_needle()\n if bank_open is True:\n log.info(\"Bank window is already open.\")\n return\n\n log.info(\"Attempting to open bank window.\")\n for _ in range(5):\n one_tile = vis.Vision(\n region=vis.GAME_SCREEN,\n needle=\"./needles/game-screen/bank/bank-booth-\" + direction + \"-1-tile.png\",\n loop_num=1,\n conf=0.85,\n ).click_needle()\n\n two_tiles = vis.Vision(\n region=vis.GAME_SCREEN,\n needle=\"./needles/game-screen/bank/bank-booth-\"\n + direction\n + \"-2-tiles.png\",\n loop_num=1,\n conf=0.85,\n ).click_needle()\n\n if one_tile is True or two_tiles is True:\n bank_open = vis.Vision(\n region=vis.GAME_SCREEN,\n needle=\"./needles/buttons/close.png\",\n loop_num=10,\n ).wait_for_needle()\n if bank_open is True:\n return\n misc.sleep_rand(1000, 3000)\n\n raise Exception(\"Unable to open bank window!\")\n\n\ndef withdrawal_item(\n item_bank: str, item_inv: str, conf: float = 0.95, quantity: str = \"all\"\n) -> None:\n \"\"\"\n Withdrawals an item from the bank. Assumes the bank window is open and\n the item to withdrawal is visible. Does NOT check if the correct\n quantity is withdrawn.\n\n Args:\n item_bank (str): Filepath to an image of the item to withdrawal as it\n appears in the bank window.\n item_inv (str): Filepath to an image of the item to withdrawal as it\n appears in the player's inventory.\n conf (float): See the `conf` arg of the vision.Vision class. Default is\n 0.95\n quantity (str): The number of items to withdrawal. Available\n options are `1`, `5`, `10`, or `all`. Default is `all`.\n\n Examples:\n withdrawal_item(item_bank=\"./needles/items/raw-anchovies-bank.png\",\n item_inv=\"./needles/items/raw-anchovies.png\",\n conf=0.98)\n\n Returns:\n Returns if the item was successfully withdrawn from bank,\n\n Raises:\n Raises start.BankingError if item could not be withdrawn.\n\n \"\"\"\n log.info(\"Attempting to withdrawal item: %s\", item_bank)\n try:\n\n # Make sure no placeholders are left behind, as this makes image\n # matching much more difficult -- placeholders look very similar\n # to regular \"real\" items.\n bank_settings_check(\"placeholder\", \"unset\")\n\n # Ensure the correct quantity is withdrawn.\n bank_settings_check(\"quantity\", str(quantity))\n\n interface.enable_button(\n button_disabled=item_bank,\n button_disabled_region=vis.BANK_ITEMS_WINDOW,\n button_enabled=item_inv,\n button_enabled_region=vis.INV,\n loop_num=10,\n conf=conf,\n )\n\n except Exception as error:\n raise start.BankingError(\"Could not withdrawal item!\") from error\n",
"id": "2531001",
"language": "Python",
"matching_score": 3.8820574283599854,
"max_stars_count": 0,
"path": "ocvbot/banking.py"
},
{
"content": "# coding=UTF-8\r\n\"\"\"\r\nConvenience functions for interacting with the buttons, toggles,\r\nswitches, and various other game interface elements.\r\n\r\n\"\"\"\r\nimport logging as log\r\n\r\nfrom ocvbot import vision as vis\r\n\r\n\r\n# TODO: Add a set_compass(direction) function, as the client now supports\r\n# right-clicking on the compass to set specific cardinal directions.\r\n\r\n\r\ndef enable_button(\r\n button_disabled: str,\r\n button_disabled_region: tuple[int, int, int, int],\r\n button_enabled: str,\r\n button_enabled_region: tuple[int, int, int, int],\r\n conf: float = 0.95,\r\n loop_num: int = 5,\r\n attempts: int = 5,\r\n invert_match: bool = False,\r\n):\r\n \"\"\"\r\n Enables a button in the interface. Tries multiple times to ensure the\r\n button has been enabled. Assumes the button's \"enabled\" state looks\r\n different from its \"disabled\" state.\r\n\r\n More generically, this function can also be used to confirm certain actions\r\n have taken place (see example #2).\r\n\r\n Args:\r\n button_disabled (str): Filepath to an image of the disabled version\r\n of the button.\r\n button_disabled_region (tuple): Vision region to use to search for the\r\n button_disabled image (e.g. `vis.inv` or\r\n `vis.side_stones` or `vis.game_screen`)\r\n button_enabled (str): Filepath to an image of the enabled version of\r\n the button.\r\n button_enabled_region (tuple): Vision region to use to search for the\r\n button_enabled image.\r\n conf (float): Confidence required to match button images. See the `conf`\r\n arg in the docstring of the `Vision` class for more info.\r\n Default is `0.95`.\r\n loop_num (int): Number of times to search button_enabled after clicking\r\n button_disabled. Default is 5.\r\n attempts (int): Number of times the function will try clicking on\r\n button_disabled. Default is 5.\r\n invert_match (bool): Setting this to True will cause the function to\r\n check for the absence of button_enabled instead\r\n of its presence (see example #3). Default is False.\r\n\r\n Examples:\r\n Open a side stone:\r\n enable_button(\"./needles/side-stones/attacks-deselected.png\",\r\n vis.side_stones,\r\n \"./needles/side-stones/attacks-selected.png\",\r\n vis.side_stones)\r\n\r\n Logout of the game client:\r\n enable_button(\"./needles/buttons/logout.png\", vis.inv,\r\n \"./needles/login-menu/orient.png\", vis.game_screen)\r\n\r\n Close the bank window. Since the \"close\" button disappears after\r\n clicking on it, we must invert the match:\r\n enable_button(\"./needles/buttons/close.png\", vis.game_screen,\r\n \"./needles/buttons/close.png\", vis.game_screen,\r\n 0.95, True)\r\n\r\n Returns:\r\n Returns True if the button was enabled or was already enabled.\r\n\r\n Raises:\r\n Raises an exception if the button could not be enabled.\r\n\r\n \"\"\"\r\n # Check if the button has already been enabled first.\r\n button_is_enabled = vis.Vision(\r\n region=button_enabled_region, needle=button_enabled, loop_num=1, conf=conf\r\n ).wait_for_needle()\r\n if invert_match is False:\r\n if button_is_enabled is True:\r\n log.debug(\"Button %s was already enabled\", button_enabled)\r\n return True\r\n elif invert_match is True:\r\n if button_is_enabled is False:\r\n log.debug(\"Button %s was already enabled (invert_match)\", button_enabled)\r\n return True\r\n\r\n # Try multiple times to enable the button.\r\n for _ in range(attempts):\r\n\r\n log.debug(\"Attempting to enable button %s\", button_enabled)\r\n\r\n # Move mouse out of the way after clicking so the function can\r\n # tell if the button is enabled.\r\n vis.Vision(\r\n region=button_disabled_region,\r\n needle=button_disabled,\r\n loop_num=3,\r\n ).click_needle(sleep_range=(0, 100, 0, 100), move_away=True)\r\n\r\n # See if the button has been enabled.\r\n button_is_enabled = vis.Vision(\r\n region=button_enabled_region,\r\n needle=button_enabled,\r\n loop_num=loop_num,\r\n conf=conf,\r\n ).wait_for_needle()\r\n if invert_match is False:\r\n if button_is_enabled is True:\r\n log.debug(\"Button %s has been enabled\", button_enabled)\r\n return True\r\n elif invert_match is True:\r\n if button_is_enabled is False:\r\n log.debug(\"Button %s has been enabled (invert_match)\", button_enabled)\r\n return True\r\n\r\n raise Exception(\"Could not enable button!\", button_enabled)\r\n",
"id": "3430742",
"language": "Python",
"matching_score": 0.20843113958835602,
"max_stars_count": 0,
"path": "ocvbot/interface.py"
},
{
"content": "# coding=UTF-8\n\"\"\"\nCompares OpenCV Template Matching speed of color image vs grayscale\nimage.\n\n\"\"\"\nimport cv2\n\n# Set to 1 to display the matched region of the haystack.\nshow_match = 1\n# The number of iterations from which to determine the average speed.\nruns = 5\n# The base path to use for haystacks and templates.\npath = \"../tests/haystacks/user-interface/maps/\"\n# The path to each image.\nhaystack = path + \"chunks/varrock-east-mine.png\"\nneedle = path + \"minimap/image_001.png\"\n\n# ----------------------------------------------------------------------\n\nhaystack_color = cv2.imread(haystack)\nneedle_color = cv2.imread(needle)\nhaystack_gray = cv2.imread(haystack, cv2.IMREAD_GRAYSCALE)\nneedle_gray = cv2.imread(needle, cv2.IMREAD_GRAYSCALE)\n\ndurations = []\nmatch_color = None\nmatch_gray = None\ncw = None\nch = None\ngw = None\ngh = None\n\n# Run the template match five times.\nfor _ in range(1, runs):\n start_time = cv2.getTickCount()\n rgb, cw, ch = needle_color.shape[::-1]\n result_color = cv2.matchTemplate(haystack_color, needle_color, cv2.TM_CCOEFF_NORMED)\n loc = cv2.minMaxLoc(result_color)\n match_color = loc[3]\n stop_time = cv2.getTickCount()\n duration = (stop_time - start_time) / cv2.getTickFrequency()\n durations.append(duration)\n\n# Get average duration for each run.\ncolor_avg = round((sum(durations) / runs), 3)\n# Convert to miliseconds\ncolor_avg = int(color_avg * 1000)\nprint(\"Color Avg =\", color_avg, \"miliseconds\")\n\ndurations = []\n# Do the same thing for grayscale versions of the images.\nfor _ in range(runs):\n start_time = cv2.getTickCount()\n gw, gh = needle_gray.shape[::-1]\n result_gray = cv2.matchTemplate(haystack_gray, needle_gray, cv2.TM_CCOEFF_NORMED)\n loc = cv2.minMaxLoc(result_gray)\n match_gray = loc[3]\n stop_time = cv2.getTickCount()\n duration = (stop_time - start_time) / cv2.getTickFrequency()\n durations.append(duration)\n\ngray_avg = round((sum(durations) / runs), 3)\ngray_avg = int(gray_avg * 1000)\nprint(\"Grayscale Avg =\", gray_avg, \"miliseconds\")\nprint(\"\\nGrayscale avg / Color avg =\", round((gray_avg / color_avg), 2))\n\nif show_match == 1:\n cv2.rectangle(\n haystack_color,\n match_color,\n (match_color[0] + cw, match_color[1] + ch),\n (0, 255, 0),\n 2,\n )\n cv2.imshow(\"haystack\", haystack_color)\n cv2.waitKey(0)\n\n cv2.rectangle(\n haystack_gray,\n match_gray,\n (match_gray[0] + gw, match_gray[1] + gh),\n (0, 255, 0),\n 2,\n )\n cv2.imshow(\"haystack\", haystack_gray)\n cv2.waitKey(0)\n",
"id": "3456633",
"language": "Python",
"matching_score": 2.884031295776367,
"max_stars_count": 44,
"path": "tools/opencv_benchmark.py"
},
{
"content": "# coding=UTF-8\n\"\"\"\nScript to test OpenCV's Template Matching using different confidence\nlevels.\n\nDisplays the needle matched against the haystack in 0.05 confidence\nintervals from 1.0 to 0.05.\n\n\"\"\"\nimport cv2\nimport numpy as np\n\n# The base path to use for haystacks and templates.\npath = \"../tests/haystacks/user-interface/maps/\"\n# The path to each image.\nhaystack = path + \"chunks/varrock-east-mine.png\"\nneedle = path + \"minimap/image_001.png\"\n\n# Specify a confidence threshold that each match must exceed to qualify.\nconfidence = 1.0\n\n# ----------------------------------------------------------------------\n\n# Show the match at each confidence interval, from 0.9 to 0.1.\nwhile confidence >= 0.01:\n haystack_color = cv2.imread(haystack)\n needle_color = cv2.imread(needle)\n\n # Store width and height of template in w and h.\n rgb, w, h = needle_color.shape[::-1]\n\n # Perform template match.\n match_color = cv2.matchTemplate(haystack_color, needle_color, cv2.TM_CCOEFF_NORMED)\n\n # Store the coordinates of matched area in a numpy array.\n match_array = np.where(match_color >= confidence)\n\n # Draw a rectangle around the matched region.\n for pt in zip(*match_array[::-1]):\n cv2.rectangle(haystack_color, pt, (pt[0] + w, pt[1] + h), (0, 255, 255), 1)\n\n # Show the final image with the matched area.\n cv2.imshow(str(round(confidence, 2)), haystack_color)\n cv2.waitKey(0)\n\n # Lower the confidence rating for the next loop.\n confidence = confidence - 0.05\n",
"id": "4132104",
"language": "Python",
"matching_score": 0.6680266261100769,
"max_stars_count": 44,
"path": "tools/opencv_match_test.py"
},
{
"content": "#!/usr/bin/env python3\n# coding=UTF-8\n\"\"\"\nQuickly logs the user in. Pass a name for \"user\" on the command line\nto specify the user to use.\n\nSETUP:\nYour user credentials file must have the format of:\n`username-{USER}.txt`\nWhere {USER} is to be replaced with your account's username.\n\nYour user password credentials file must have the format of:\n`password-{USER}.txt`\nWhere {USER} is to be replaced with your account's username.\n\nRUNNING:\nIf your username is `alice123`, run the script like this:\n`python3 login_user.py alice123`\n\"\"\"\nimport pathlib\nimport sys\n\n# Ensure ocvbot files are added to sys.path.\nSCRIPTPATH = str(pathlib.Path(__file__).parent.parent.absolute())\nsys.path.insert(1, SCRIPTPATH)\n\nfrom ocvbot import vision as vis\nfrom ocvbot import behavior as behav\n\nvis.init()\nuser = sys.argv[1]\n\n\ndef main():\n \"\"\"\n Quickly logs in the desired user.\n \"\"\"\n behav.login_basic(\n username_file=\"../ocvbot/credentials/username-\" + user + \".txt\",\n password_file=\"../ocvbot/credentials/password-\" + user + \".txt\",\n cred_sleep_range=(50, 100),\n )\n\n\nif __name__ == \"__main__\":\n main()\n",
"id": "6187903",
"language": "Python",
"matching_score": 1.925744891166687,
"max_stars_count": 44,
"path": "tools/login_user.py"
},
{
"content": "#!/usr/bin/env python3\n# coding=UTF-8\n\"\"\"\nSimple screenshot tool for quickly capturing the OSRS client window.\nCompresses screenshot with pngcrush if it's available.\nAutomatically censors player's username with ImageMagick if it's available.\n\nProduces an image in the format of `osrs_$(date +%Y-%m-%d_%H-%M-%S).png`\nin the current directory.\n\nSyntax:\n python3 screnshot.py [DELAY]\n\nExample:\n python3 screenshot.py 5 = Wait 5 seconds before taking screenshot.\n\nOptional positional arguments:\n DELAY (int): The number of seconds to wait before taking the\n screenshot, default is 0.\n\n\"\"\"\nimport datetime\nimport logging as log\nimport os\nimport pathlib\nimport subprocess\nimport sys\nimport time\n\nimport pyautogui as pag\n\ncurrent_dir = os.getcwd()\n\n# Ensure ocvbot files are added to sys.path so they can be imported.\nSCRIPTPATH = str(pathlib.Path(__file__).parent.parent.absolute())\nsys.path.insert(1, SCRIPTPATH)\n# Importing ocvbot modules changes the current dir to the directory the files are in.\nfrom ocvbot import vision as vis, startup as start\n\nvis.init()\nlog.basicConfig(\n format=\"%(asctime)s -- %(filename)s.%(funcName)s - %(message)s\", level=\"INFO\"\n)\nARGUMENTS: int = len(sys.argv)\n\n# If the name of the script is the only argument given, set the optional\n# arguments to their default values.\nif ARGUMENTS == 1:\n DELAY = 0\nelif ARGUMENTS == 2:\n DELAY = int(sys.argv[1])\nelse:\n raise Exception(\"Unsupported arguments!\")\n\n\ndef pngcrush(filename: str) -> None:\n try:\n subprocess.call([\"pngcrush\", \"-ow \", filename])\n except FileNotFoundError:\n log.warning(\"pngcrush not present!\")\n\n\ndef censor_username(filename: str) -> None:\n try:\n subprocess.run(\n (\n \"convert\"\n + \" \"\n + filename\n + \" -fill black\"\n + ' -draw \"rectangle 7 458 190 473\"'\n + \" \"\n + filename\n ),\n check=True,\n shell=True,\n )\n except FileNotFoundError:\n log.warning(\"ImageMagick not present!\")\n\n\ndef main(region: tuple[int, int, int, int] = vis.CLIENT) -> str:\n \"\"\"\n Takes a screenshot of the OSRS client window.\n\n Returns:\n Returns the filepath to the screenshot.\n \"\"\"\n if DELAY > 0:\n log.info(\"Waiting %s seconds ...\", DELAY)\n time.sleep(DELAY)\n\n timestamp = datetime.datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")\n file_name = str(\"osrs_\" + timestamp + \".png\")\n pag.screenshot(file_name, region=region)\n\n # Determine if we're logged in or logged out.\n client_status = vis.orient()[0]\n\n if client_status == \"logged_in\":\n # If the client is logged in, censor the player's username\n # by drawing a black rectangle over it with ImageMagick.\n censor_username(file_name)\n pngcrush(file_name)\n # Move the file into the current dir.\n new_file_name = current_dir + \"/\" + file_name\n os.rename(file_name, new_file_name)\n return new_file_name\n\n\nif __name__ == \"__main__\":\n main()\n",
"id": "7042654",
"language": "Python",
"matching_score": 3.1903254985809326,
"max_stars_count": 0,
"path": "tools/screenshot.py"
},
{
"content": "# coding=UTF-8\n\"\"\"\nSetup a few global configurations before script is run.\n\n\"\"\"\nimport logging as log\nimport os\nimport sys\n\nimport pathlib\nimport pyautogui as pag\nimport yaml\n\npag.PAUSE = 0\npag.FAILSAFE = False\nsys.setrecursionlimit(9999)\n\n# Make sure the program's working directory is the directory in which\n# this file is located.\nos.chdir(os.path.dirname(__file__))\n\n# Ensure ocvbot files are added to sys.path.\nSCRIPTPATH = str(pathlib.Path(__file__).parent.parent.absolute())\nsys.path.insert(1, SCRIPTPATH)\n\n# Read in the config file.\nwith open(\"config.yaml\", encoding=\"utf-8\") as config_file:\n config = yaml.safe_load(config_file)\n\n# Configure logging.\nlog_level = config[\"main\"][\"log_level\"]\nlog.basicConfig(\n format=\"%(asctime)s %(filename)s.%(funcName)s - %(message)s\", level=log_level\n)",
"id": "9698260",
"language": "Python",
"matching_score": 1.9422608613967896,
"max_stars_count": 44,
"path": "ocvbot/__init__.py"
}
] | 2.663899 |
lukasjelonek | [
{
"content": "from enum import Enum\n\n# source https://www.ddbj.nig.ac.jp/ddbj/code-e.html\n_dna_unamb_chars = ['A','T','G','C']\n_rna_unamb_chars = ['A','U','G','C']\n_aa_unamb_chars = ['A','C','D','E','F','G','H','I','K','L','M','N','P','Q','R','S','T','V','W','Y']\n_aa_nc_unamb_chars = ['A','C','D','E','F','G','H','I','K','L','M','N','P','Q','R','S','T','V','W','Y','O','U']\n_dna_amb_chars = ['N','R','Y','S','W','K','M','B','D','H','V']\n_rna_amb_chars = _dna_unamb_chars\n_aa_amb_chars = ['X','B','Z','J']\n_dna_special_chars = ['-'] # gap\n_rna_special_chars = _dna_special_chars\n_aa_special_chars = ['*', '-'] # stop and gap\n\nclass Alphabet(Enum):\n DNA = (_dna_unamb_chars, _dna_amb_chars, _dna_special_chars)\n RNA = (_rna_unamb_chars, _rna_amb_chars, _rna_special_chars)\n AA = (_aa_unamb_chars, _aa_amb_chars, _aa_special_chars )\n AA_NC = (_aa_nc_unamb_chars, _aa_amb_chars, _aa_special_chars)\n # other contains all characters from above, but everything from the ambiguous section\n # is added to the non-ambiguous section\n OTHER = (\n list(set(_dna_unamb_chars + _rna_unamb_chars + _aa_unamb_chars + _dna_amb_chars + _rna_amb_chars )),\n _aa_amb_chars,\n list(set(_dna_special_chars + _rna_special_chars + _aa_special_chars))\n )\n\n def __init__(self, unambiguous_chars, ambiguous_chars, special_chars):\n self.unambiguous_chars = unambiguous_chars\n self.ambiguous_chars = ambiguous_chars\n self.special_chars = special_chars\n self.all_chars = unambiguous_chars + ambiguous_chars + special_chars\n",
"id": "2866905",
"language": "Python",
"matching_score": 1.8279114961624146,
"max_stars_count": 2,
"path": "fastaqc/alphabet.py"
},
{
"content": "#!/usr/bin/env python3\nimport argparse\nfrom argparse import RawTextHelpFormatter\nimport logging\nfrom pbr.version import VersionInfo\nfrom Bio import SeqIO\nimport itertools\nfrom prettytable import PrettyTable\nimport tabulate\nfrom xopen import xopen\nfrom fastaqc.alphabet import Alphabet\nimport pprint\n\n__version__ = VersionInfo('fastaqc').semantic_version().release_string()\n\ndef main():\n parser = argparse.ArgumentParser(description='Version ' + __version__ + '\\nCheck fasta file', formatter_class=RawTextHelpFormatter)\n parser.set_defaults(func=help)\n parser.add_argument('--verbose', '-v', action=\"store_true\")\n\n subparsers = parser.add_subparsers()\n info_parser = subparsers.add_parser('info')\n info_parser.add_argument('fasta', nargs=argparse.REMAINDER)\n info_parser.set_defaults(func=info)\n\n args = parser.parse_args()\n config = {} # implement when needed\n if ('verbose' in vars(args) and args.verbose):\n logging.basicConfig(level=logging.INFO)\n args.parser = parser\n args.func(args, config)\n\ndef help(args, config):\n args.parser.print_help()\n\ndef info(args, cfg):\n if not args.fasta:\n help(args, cfg)\n return\n # Concept\n #\n # The base method iterates through all sequences in the fasta file and applies multiple \n # actions on it. Each action is modelled as a function with two parameters, the sequence\n # record and the stats-dictionary. The stats-dictionary holds the results of each action.\n # Subsequent actions can access the results from previous actions.\n #\n # Coonvetions for stats dictionary\n # * Intermediary results for only one sequence (those that will not be presented to the user)\n # start with an underscore.\n checks = [\n count, \n collect_lengths,\n compute_character_distribution,\n compute_character_positions,\n detect_sequence_type,\n detect_ambiguous_and_special_characters,\n set_sequence_category_name,\n count_sequence_types,\n count_sequences_with_special_characters,\n count_sequences_with_ambiguous_characters,\n count_sequences_with_unknown_characters,\n clear_temporary_fields\n ]\n\n for filename in args.fasta:\n with xopen(filename) as fh:\n stats = {\n 'filename': filename\n }\n for record in SeqIO.parse(fh, \"fasta\"):\n for c in checks:\n c(record, stats)\n print_stats(stats)\n\ndef clear_temporary_fields(record, stats):\n for_removal = []\n for k in stats.keys():\n if k.startswith('_'):\n for_removal.append(k)\n for k in for_removal:\n del stats[k]\n\ndef print_stats(stats):\n header = [stats['filename'], 'count']\n table = []\n table.append(['sequences', stats['sequences']])\n logging.info(pprint.pformat(stats))\n for k,v in sorted(stats['type_counts'].items()):\n table.append([' ' + k, v])\n if 'special_char_count' in stats:\n if k in stats['special_char_count']:\n for k2,v2 in sorted(stats['special_char_count'][k].items()):\n table.append([' ' + k2,v2])\n if 'ambiguous_char_count' in stats:\n if k in stats['ambiguous_char_count']:\n for k2,v2 in sorted(stats['ambiguous_char_count'][k].items()):\n table.append([' ' + k2,v2])\n if 'unknown_char_count' in stats:\n if k in stats['unknown_char_count']:\n for k2,v2 in sorted(stats['unknown_char_count'][k].items()):\n table.append([' ' + k2,v2])\n\n tabulate.PRESERVE_WHITESPACE = True\n print(tabulate.tabulate(table, headers=header, tablefmt='pretty', colalign=('left', 'right')))\n if merge(stats['unknown_char_count']):\n print(\"WARNING: The file contains unknown characters for DNA, RNA and AA sequences. \")\n print(\" It will probably fail in applications with strict alphabet checking.\")\n\n if 'seq_lenghts' in stats:\n import plotille\n print('')\n print('Sequence length distribution')\n print(plotille.histogram(stats['seq_lenghts'], height=25, x_min=0))\n print('')\n\ndef merge(dict_of_dicts):\n merged = {}\n for name, subdict in dict_of_dicts.items():\n for k,v in subdict.items():\n if k not in merged:\n merged[k] = v\n else:\n merged[k] = merged[k] + v\n return merged\n\ndef count(record, stats):\n '''counts the number of processed sequences in the field \"sequences\".'''\n if 'sequences' not in stats:\n stats['sequences'] = 0\n stats['sequences'] = stats['sequences'] + 1\n\ndef compute_character_distribution(record, stats):\n distribution = {}\n for c in record.seq.upper():\n if c not in distribution:\n distribution[c] = 0\n distribution[c] = distribution[c] + 1\n\n stats['_character_distribution'] = distribution\n\ndef compute_character_positions(record, stats):\n '''computes a dictionary with all positions per character of the current \n sequence and stores it in \"_character_positions\"'''\n positions = {}\n for i, c in enumerate(record.seq.upper()):\n if c not in positions:\n positions[c] = []\n positions[c].append(i)\n stats['_character_positions'] = positions\n\ndef count_sequences_with_special_characters(record, stats):\n '''counts the sequences with special characters (depends on the alphabet)\n and stores them in \"special_char_count.<sequence_category>.<character>\"'''\n alphabet = assert_sequence_type_available(stats)\n _count(stats, 'special_char_count', alphabet.special_chars)\n\ndef count_sequences_with_ambiguous_characters(record, stats):\n '''counts the sequences with ambiguous characters (depends on the alphabet)\n and stores them in \"ambiguous_char_count.<sequence_category>.<character>\"'''\n alphabet = assert_sequence_type_available(stats)\n _count(stats, 'ambiguous_char_count', alphabet.ambiguous_chars)\n\ndef _count(stats, fieldname, chars):\n category_name = assert_sequence_category_name_available(stats)\n c_dist = assert_character_distribution_available(stats)\n if fieldname not in stats:\n stats[fieldname] = {}\n if category_name not in stats[fieldname]:\n stats[fieldname][category_name] = {}\n counts = stats[fieldname][category_name]\n for c in chars:\n if c in c_dist:\n if c not in counts:\n counts[c] = 0\n counts[c] = counts[c] + 1\n\ndef count_sequences_with_unknown_characters(record, stats):\n '''counts the sequences with unknown characters (depends on the alphabet)\n and stores them in \"ambiguous_char_count.<sequence_category>.<character>\"'''\n category_name = assert_sequence_category_name_available(stats)\n alphabet = assert_sequence_type_available(stats)\n c_dist = assert_character_distribution_available(stats)\n if 'unknown_char_count' not in stats:\n stats['unknown_char_count'] = {}\n if category_name not in stats['unknown_char_count']:\n stats['unknown_char_count'][category_name] = {}\n counts = stats['unknown_char_count'][category_name]\n chars = set(alphabet.all_chars)\n for c in c_dist.keys():\n if c not in chars:\n if c not in counts:\n counts[c] = 0\n counts[c] = counts[c] + 1\n\ndef detect_sequence_type(record, stats):\n c_dist = assert_character_distribution_available(stats)\n if _contains_only(c_dist, Alphabet.DNA.all_chars):\n stats['_type'] = Alphabet.DNA\n elif _contains_only(c_dist, Alphabet.RNA.all_chars):\n stats['_type'] = Alphabet.RNA\n elif _contains_only(c_dist, Alphabet.AA.all_chars):\n stats['_type'] = Alphabet.AA\n elif _contains_only(c_dist, Alphabet.AA_NC.all_chars):\n stats['_type'] = Alphabet.AA_NC\n else:\n stats['_type'] = Alphabet.OTHER\n\ndef detect_ambiguous_and_special_characters(record, stats):\n c_dist = assert_character_distribution_available(stats)\n alphabet = assert_sequence_type_available(stats)\n type_flags = set()\n if _contains_only(c_dist, alphabet.unambiguous_chars):\n type_flags.add('unambiguous')\n else:\n if _contains(c_dist, alphabet.ambiguous_chars):\n type_flags.add('ambiguous')\n if _contains(c_dist, alphabet.special_chars):\n type_flags.add('special')\n stats['_type_flags'] = type_flags\n\ndef set_sequence_category_name(record, stats):\n alphabet = assert_sequence_type_available(stats)\n flags = assert_sequence_type_flags_available(stats)\n name = \"{} ({})\".format(alphabet.name, \",\".join(sorted(flags)))\n stats['_category_name'] = name\n\ndef count_sequence_types(record, stats):\n alphabet = assert_sequence_type_available(stats)\n flags = assert_sequence_type_flags_available(stats)\n type = assert_sequence_category_name_available(stats)\n if 'type_counts' not in stats:\n stats['type_counts'] = {}\n if type not in stats['type_counts']:\n stats['type_counts'][type] = 0\n stats['type_counts'][type] = stats['type_counts'][type] + 1\n\ndef collect_lengths(record, stats):\n if 'seq_lenghts' not in stats:\n stats['seq_lenghts'] = []\n stats['seq_lenghts'].append(len(record.seq))\n\ndef _contains(dist, characters):\n for c in characters:\n if c in dist:\n return True\n return False\n\ndef _contains_only(dist, characters):\n remaining = len(dist)\n for c in characters:\n if c in dist:\n remaining = remaining - 1\n return remaining == 0\n\ndef assert_character_distribution_available(stats):\n assert '_character_distribution' in stats, 'Sequence character distribution not availabe. It must be computed before this check.'\n return stats['_character_distribution']\n\ndef assert_sequence_type_available(stats):\n assert '_type' in stats, 'Sequence type information not availabe. It must be computed before this check.'\n return stats['_type']\n\ndef assert_sequence_type_flags_available(stats):\n assert '_type_flags' in stats, 'Sequence type flag information not availabe. It must be computed before this check.'\n return stats['_type_flags']\n\ndef assert_sequence_category_name_available(stats):\n assert '_category_name' in stats, 'Sequence category_name information not availabe. It must be computed before this check.'\n return stats['_category_name']\n\nif __name__ == \"__main__\":\n main()\n",
"id": "5938475",
"language": "Python",
"matching_score": 4.387250900268555,
"max_stars_count": 2,
"path": "fastaqc/main.py"
},
{
"content": "#!/usr/bin/env python3\nimport argparse\nfrom argparse import RawTextHelpFormatter\nimport os\nimport logging\nfrom dbxref import resolver, config\nfrom pbr.version import VersionInfo\nimport json\n\n__version__ = VersionInfo('dbxref').semantic_version().release_string()\n\ndef main():\n parser = argparse.ArgumentParser(description='Version ' + __version__ + '\\nLookup locations of database cross references and retrieve them as json', formatter_class=RawTextHelpFormatter)\n parser.set_defaults(func=help)\n\n subparsers = parser.add_subparsers()\n info_parser = subparsers.add_parser('info')\n info_parser.set_defaults(func=info)\n\n resolve_parser = subparsers.add_parser('resolve')\n resolve_parser.add_argument('dbxrefs', nargs=argparse.REMAINDER)\n resolve_parser.add_argument('--no_check', '-n', action='store_false', default=True, help=\"Do not check existence of cross reference\")\n resolve_parser.add_argument('--verbose', '-v', action='store_true', default=False, help=\"Show debug output\")\n resolve_parser.set_defaults(func=resolve)\n\n retrieve_parser = subparsers.add_parser('retrieve')\n retrieve_parser.set_defaults(func=retrieve)\n retrieve_parser.add_argument('dbxrefs', nargs=argparse.REMAINDER)\n retrieve_parser.add_argument('--ignore_cache', '-C', action='store_true', default=False, help=\"Ignore entries from cache. Fetched entries are still stored in cache.\")\n retrieve_parser.add_argument('--verbose', '-v', action='store_true', default=False, help=\"Show debug output\")\n\n args = parser.parse_args()\n config = {} # implement when needed\n if ('verbose' in vars(args) and args.verbose):\n logging.basicConfig(level=logging.INFO)\n args.parser = parser\n args.func(args, config)\n\ndef help(args, config):\n args.parser.print_help()\n\ndef info(args, cfg):\n print ('dbxref Version ' + __version__)\n print ('')\n print ('Supported dbxref databases:')\n providers = config.load_providers()\n for provider in providers:\n print (' ' + provider['name'])\n print (' Prefixes: ' + str.join(', ', [x for x in provider['prefixes']]))\n print (' Formats : ' + str.join(', ', [x for x in provider['resources']]))\n\ndef resolve(args, config):\n print(json.dumps(resolver.resolve(resolver.convert_to_dbxrefs(args.dbxrefs), check_existence=args.no_check)))\n\ndef retrieve(args, config):\n from dbxref import retriever\n print(\n json.dumps(\n retriever.retrieve(\n resolver.convert_to_dbxrefs(args.dbxrefs),\n ignore_cache = args.ignore_cache\n )\n )\n )\n\nif __name__ == \"__main__\":\n main()\n",
"id": "9538671",
"language": "Python",
"matching_score": 1.976820707321167,
"max_stars_count": 1,
"path": "dbxref/main.py"
},
{
"content": "import logging\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\n\nfrom dbxref import config\nfrom itertools import groupby\nfrom diskcache import Cache\nfrom appdirs import user_cache_dir\n\nimport json\n\ndef retrieve(dbxrefs, ignore_cache=False):\n cache = init_cache()\n\n # normalize db notation\n normalize_db_notation(dbxrefs)\n dbxrefs = sorted(dbxrefs, key=lambda x: x['db'])\n\n # lookup from cache\n uncached = []\n cached = []\n if ignore_cache:\n uncached = dbxrefs\n else :\n (cached, uncached) = find_cached_entries(cache, dbxrefs)\n\n # load uncached\n loaded_uncached = load_uncached_entries(uncached)\n cache_entries(cache, loaded_uncached)\n\n # compile results\n results = []\n results.extend(cached)\n results.extend(loaded_uncached)\n return results\n\ndef normalize_db_notation(dbxrefs):\n # take first prefix that matches the db\n for dbxref in dbxrefs:\n key = dbxref['db']\n if config.has_provider(key):\n provider = config.get_provider(key)\n for prefix in provider['prefixes']:\n if key.lower() == prefix.lower():\n dbxref['db'] = prefix\n logger.debug(\"'{}' -> '{}'\".format(key, dbxref['db']))\n\ndef load_with_external_provider(provider, dbxrefs):\n logger.debug('Loading {0} via external provider'.format(dbxrefs))\n script = provider['retriever']['location']\n call = '{} {}'.format(script, ' '.join(list(map(toString, dbxrefs))))\n logger.debug(\"Running '{}'\".format(call))\n import subprocess\n result = subprocess.check_output(call, shell=True)\n return json.loads(result.decode('utf-8'))\n\ndef load_with_internal_provider(provider, dbxrefs):\n import importlib\n retrieve_method = getattr(importlib.import_module(provider['retriever']['location']), 'retrieve')\n retrieved = retrieve_method(dbxrefs)\n return retrieved\n\ndef toString(dbxref):\n return '{}:{}'.format(dbxref['db'], dbxref['id'])\n\ndef init_cache():\n cachedir = user_cache_dir('dbxref')\n cache = Cache(cachedir)\n return cache\n\ndef cache_entries(cache, entries):\n expiration_time = 86400 # one day\n for e in entries:\n logger.debug('Caching {}'.format(e['id']))\n cache.set(e['id'], e, expire=expiration_time)\n\ndef find_cached_entries(cache, dbxrefs):\n cached = []\n uncached = []\n for d in dbxrefs:\n key = toString(d)\n if key in cache:\n logger.debug(\"Found {} in cache\".format(key))\n cached.append(cache[key])\n else:\n uncached.append(d)\n return (cached, uncached)\n\ndef load_uncached_entries(dbxrefs):\n results = []\n for key, dbxrefs in groupby(dbxrefs, lambda x: x['db']):\n if config.has_provider(key):\n provider = config.get_provider(key)\n logger.debug('{0} is supported'.format(key))\n if 'retriever' in provider:\n if provider['retriever']['type'] == 'external':\n results.extend( load_with_external_provider(provider, list(dbxrefs)))\n elif provider['retriever']['type'] == 'internal':\n results.extend(load_with_internal_provider(provider, list(dbxrefs)))\n else:\n raise Exception('Unknown retriever type', provider['retriever']['type'])\n else:\n logger.debug('{0} is not supported'.format(key))\n results.extend( map(lambda x: {'id': toString(x), 'status': 'not supported'}, dbxrefs))\n else:\n logger.debug('{0} is not supported'.format(key))\n results.extend( map(lambda x: {'id': toString(x), 'status': 'not supported'}, dbxrefs))\n return (results)\n",
"id": "5203841",
"language": "Python",
"matching_score": 1.995010495185852,
"max_stars_count": 1,
"path": "dbxref/retriever.py"
},
{
"content": "import requests\nfrom cachecontrol import CacheControl\nfrom cachecontrol.caches.file_cache import FileCache\nimport logging\nlogger = logging.getLogger(__name__)\n\nfrom dbxref import config\n\ncache = FileCache(\".web_cache\", forever=True)\nsess = CacheControl(requests.Session(), cache=cache)\n\nSTATUS_EXISTS='found'\nSTATUS_NOT_EXISTS='not found'\nSTATUS_UNKNOWN='status unknown'\nSTATUS_NOT_CHECKED='status not checked'\nSTATUS_CHECK_NOT_SUPPORTED='check of status not supported'\nSTATUS_CHECK_TIMEOUT='status check timed out'\nSTATUS_UNSUPPORTED_DB='database unsupported'\n\ndef resolve(dbxrefs, check_existence=True):\n results = []\n for dbxref in dbxrefs:\n status = STATUS_NOT_CHECKED\n if check_existence:\n status = check_dbxref_exists(dbxref)\n if config.has_provider(dbxref['db']):\n provider = config.get_provider(dbxref['db'])\n locations = {}\n for _type in provider['resources']:\n urls = []\n for url_template in provider['resources'][_type]:\n urls.append(compile_url(url_template, dbxref))\n locations[_type] = urls\n results.append({'dbxref': dbxref['db'] + ':' + dbxref['id'], 'locations': locations, 'status': status})\n else:\n results.append({'dbxref': dbxref['db'] + ':' + dbxref['id'], 'status': STATUS_UNSUPPORTED_DB})\n return results\n\ndef convert_to_dbxrefs(strings):\n '''convert a list of strings to dbxref maps with db and id attribute'''\n return list(map(convert_string_to_dbxref, strings))\n\ndef check_dbxref_exists(dbxref):\n if config.has_provider(dbxref['db']):\n provider = config.get_provider(dbxref['db'])\n urls = []\n exists = STATUS_NOT_CHECKED\n if 'check_existence' in provider:\n url = compile_url(provider['check_existence'], dbxref)\n logger.debug('Checking existence of dbxref at \"%s\"', url)\n exists = check_url_exists(url)\n return exists\n else:\n return STATUS_CHECK_NOT_SUPPORTED\n return STATUS_UNSUPPORTED_DB\n\ndef compile_url(template, dbxref):\n return template.replace('%i', dbxref['id']).replace('%d', dbxref['db'])\n\ndef check_url_exists(url):\n try:\n r = sess.head(url, allow_redirects=True, timeout=5)\n r.close()\n if r.status_code < 400:\n return STATUS_EXISTS\n else:\n logger.debug('The server responded with status code: %s', r.status_code)\n return STATUS_NOT_EXISTS\n except requests.exceptions.Timeout as ex:\n logger.info('Timeout for URL: \"%s\"', url)\n return STATUS_CHECK_TIMEOUT\n except:\n return STATUS_NOT_EXISTS\n\ndef convert_string_to_dbxref(string):\n \"\"\"\n A dbxref is dictionary with two keys: db and id.\n \"\"\"\n split = string.split(':', 1)\n if len(split) > 1:\n return {'db': split[0], 'id': split[1]}\n else:\n # invalid dbxref. nevertheless return a valid dbxref object with the value as the db and a empty id.\n return {'db': split[0], 'id': ''}\n",
"id": "10782264",
"language": "Python",
"matching_score": 3.4220492839813232,
"max_stars_count": 1,
"path": "dbxref/resolver.py"
},
{
"content": "import unittest\nfrom dbxref import resolver\n\nvalid_ids = [\n 'GO:0097281',\n 'EC:1.1.1.1',\n 'UniProtKB/Swiss-Prot:P12345',\n 'UniProtKB/TrEMBL:A2VB99',\n 'taxon:452271',\n 'pubmed:19037750',\n 'PDB:4AJY',\n 'http://www.google.de',\n 'https://www.google.de',\n 'GeneID:956582',\n 'GI:731497',\n 'PFAM:PF00002',\n 'RFAM:RF00360',\n 'InterPro:IPR002928',\n 'SO:0000704',\n]\n\ninvalid_ids = [\n 'GO:123',\n 'EC:hoho',\n 'UniProtKB/Swiss-Prot:45',\n 'UniProtKB/TrEMBL:99',\n 'taxon:hoho',\n 'pubmed:hoho',\n 'PDB:hoho',\n 'http://wurst',\n 'https://wurst',\n #'InterPro:hoho',\n #'GI:hoho',\n #'GeneID:hoho',\n #'PFAM:hoho',\n #'RFAM:hoho',\n #'SO:123',\n]\n\nclass TestDbxrefResolve(unittest.TestCase):\n\n def test_conversion_of_string_to_dbxref(self):\n\n data = [\n ('GO:1234', {'db': 'GO', 'id': '1234'}),\n ('https://www.google.de', {'db': 'https', 'id': '//www.google.de'}),\n ('db:sub:id', {'db': 'db', 'id': 'sub:id'}),\n \n ]\n for d in data:\n with self.subTest(d=d):\n self.assertEqual(resolver.convert_string_to_dbxref(d[0]), d[1])\n\n\n def test_resolve_enzyme(self):\n self.assertNotEqual(resolver.resolve(resolver.convert_to_dbxrefs([\"EC:1.1.1.1\"])), [])\n\n def test_check_dbxref_exists(self):\n import logging\n from dbxref.resolver import STATUS_EXISTS, STATUS_NOT_EXISTS, STATUS_UNSUPPORTED_DB, STATUS_UNKNOWN\n logging.basicConfig(level=logging.DEBUG)\n logging.getLogger().setLevel(logging.WARNING)\n resolver.logger.setLevel(logging.DEBUG)\n data = [\n # existent ids\n ('GO:0097281', STATUS_EXISTS),\n ('EC:1.1.1.1', STATUS_EXISTS),\n ('UniProtKB/Swiss-Prot:P12345', STATUS_EXISTS),\n ('UniProtKB/TrEMBL:A2VB99', STATUS_EXISTS),\n ('taxon:452271', STATUS_EXISTS),\n ('pubmed:19037750', STATUS_EXISTS),\n ('PDB:4AJY', STATUS_EXISTS),\n ('http://www.google.de', STATUS_EXISTS),\n ('https://www.google.de', STATUS_EXISTS),\n\n # non existent ids\n ('GO:123', STATUS_NOT_EXISTS),\n #('EC:hoho', STATUS_NOT_EXISTS),\n ('UniProtKB/Swiss-Prot:45', STATUS_NOT_EXISTS),\n ('UniProtKB/TrEMBL:99', STATUS_NOT_EXISTS),\n ('taxon:hoho', STATUS_NOT_EXISTS),\n ('pubmed:hoho', STATUS_NOT_EXISTS),\n ('PDB:hoho', STATUS_NOT_EXISTS),\n ('http://wurst', STATUS_NOT_EXISTS),\n ('https://wurst', STATUS_NOT_EXISTS),\n\n # currently unsupported\n #('GeneID:956582', FOUND),\n #('GI:731497', FOUND),\n #('PFAM:PF00002', FOUND),\n #('RFAM:RF00360', FOUND),\n #('InterPro:IPR002928', FOUND),\n #('SO:0000704', FOUND),\n\n #('InterPro:hoho', NOT_FOUND),\n #('GI:hoho', NOT_FOUND),\n #('GeneID:hoho', NOT_FOUND),\n #('PFAM:hoho', NOT_FOUND),\n #('RFAM:hoho', NOT_FOUND),\n #('SO:123', NOT_FOUND),\n ]\n\n for d in data:\n with self.subTest(d=d):\n self.assertEqual(resolver.check_dbxref_exists(resolver.convert_string_to_dbxref(d[0])), d[1] )\n\n def test_check_urls(self):\n import requests\n dbxrefs = resolver.convert_to_dbxrefs(valid_ids)\n resolved = resolver.resolve(dbxrefs, check_existence=False)\n for r in resolved:\n for k in r['locations']:\n for url in r['locations'][k]:\n with self.subTest(url=url):\n try:\n with requests.get(url, allow_redirects=True, timeout=3) as req:\n self.assertLess(req.status_code, 400)\n except:\n self.assertTrue(False)\n\n\n",
"id": "11815645",
"language": "Python",
"matching_score": 3.5827009677886963,
"max_stars_count": 1,
"path": "tests/test_resolver.py"
},
{
"content": "import unittest\nfrom dbxref import retriever, resolver\n\nclass TestDbxrefResolve(unittest.TestCase):\n\n def test_different_case_database_prefix(self):\n entries = resolver.convert_to_dbxrefs(['PFAM:PF00002','Pfam:PF00002','pfam:PF00002'])\n documents = retriever.retrieve(entries)\n for d in documents:\n with self.subTest(d=d):\n self.assertTrue('description' in d)\n",
"id": "4241970",
"language": "Python",
"matching_score": 0.7846153974533081,
"max_stars_count": 1,
"path": "tests/test_retriever.py"
},
{
"content": "import unittest\nfrom dbxref.retrieve import uniprot\n\nclass TestPfam(unittest.TestCase):\n\n def test_no_position(self):\n '''regression test for missing position parameter in uniprot entry'''\n documents = uniprot.retrieve([{'db': 'UniProtKB/Swiss-Prot', 'id': 'P0CM58'}])\n # this test failed due to an error due to missing None handling, \n # so no assertions here. Once fixed. this should suffice\n",
"id": "1503188",
"language": "Python",
"matching_score": 3.45978045463562,
"max_stars_count": 1,
"path": "tests/test_uniprot.py"
},
{
"content": "import unittest\nfrom dbxref.retrieve import pfam\n\nclass TestPfam(unittest.TestCase):\n\n def test_no_position(self):\n '''regression test for missing comment in pfam entry'''\n documents = pfam.retrieve([{'db': 'PFAM', 'id': 'PF00083.23'}])\n # this test failed due to an error due to missing None handling, \n # so no assertions here. Once fixed. this should suffice\n\n def test_renamed_family(self):\n '''regression test for missing comment in pfam entry'''\n documents = pfam.retrieve([{'db': 'PFAM', 'id': 'Tiny_TM_bacill'}])\n # this test failed due to a redirect when a family was renamed\n # unfortunately the redirect was not encoded in http headers, but in \n # html markup (<meta http-equiv=\"Refresh\" content=\"5; URL=/family/PF09680\" />)\n # so no assertions here. Once fixed. this should suffice\n",
"id": "5858935",
"language": "Python",
"matching_score": 0.2601068913936615,
"max_stars_count": 1,
"path": "tests/test_pfam.py"
},
{
"content": "#!/usr/bin/env python3\nimport dbxref.resolver\nimport requests\nimport xml.etree.ElementTree as ET\nimport lxml.html as HTML\nimport logging\nimport json\nimport argparse\n#logging.basicConfig(level=logging.DEBUG)\n#logging.getLogger().setLevel(logging.WARNING)\nlogger = logging.getLogger(__name__)\n#logger.setLevel(logging.DEBUG)\n\nns = {'uniprot': 'http://uniprot.org/uniprot'}\n\ndef main():\n parser = argparse.ArgumentParser(description='Retrieve uniprot xml documents for dbxrefs and convert them into json')\n parser.add_argument('--basic', '-b', action='store_true', help='Include id and description')\n parser.add_argument('--sequence', '-s', action='store_true', help='Include sequence')\n parser.add_argument('--organism', '-o', action='store_true', help='Include organism info')\n parser.add_argument('--annotation', '-a', action='store_true', help='Include annotation')\n parser.add_argument('--features', '-f', action='store_true', help='Include features')\n parser.add_argument('dbxrefs', nargs=argparse.REMAINDER)\n args = parser.parse_args()\n\n if not (args.basic or args.sequence or args.organism or args.annotation or args.features):\n args.basic = True\n args.sequence = True\n args.organism = True\n args.annotation = True\n args.features = True\n\n dbxrefs = dbxref.resolver.convert_to_dbxrefs(args.dbxrefs)\n\n documents = retrieve(dbxrefs, basic=args.basic, sequence=args.sequence, organism=args.organism, annotation=args.annotation, features=args.features)\n print(json.dumps(documents))\n\ndef lookup(xml_url, retries):\n requests.get(xml_url)\n\ndef retrieve(dbxrefs, basic=True, sequence=True, organism=True, annotation=True, features=True):\n resolved = dbxref.resolver.resolve(dbxrefs, check_existence=False)\n documents = []\n for entry in resolved:\n xml_url = entry['locations']['xml'][0]\n logger.debug('URL: %s', xml_url)\n retries = 10\n r = None\n while retries > 0 and r == None:\n try:\n r = requests.get(xml_url)\n break\n except requests.exceptions.SSLError:\n retries = retries - 1\n if retries > 0:\n logger.warning(\"Request failed, retrying\")\n else:\n raise\n logger.debug('Content: %s', r.text)\n\n output = {'id': entry['dbxref']}\n try:\n root = ET.fromstring(r.text)\n for child in root.findall('uniprot:entry', ns):\n if basic:\n output.update(read_basic(child))\n if sequence:\n output.update(read_sequence(child))\n if organism:\n output.update(read_taxonomy(child))\n if annotation:\n output.update(read_annotation(child))\n if features:\n output['features'] = read_features(child)\n except (RuntimeError, ET.ParseError) as e:\n output['message'] = 'an error occurred'\n try:\n html = HTML.document_fromstring(r.text.replace('\\n', ' '))\n if html.get_element_by_id('noResultsMessage') is not None:\n output['message'] = 'no results found; probably invalid ID'\n except:\n pass\n except:\n logger.warn('Error in retrieving %s', str(entry))\n logger.warn('Document:\\n%s', r.text)\n raise\n documents.append(output)\n return documents\n\ndef read_basic(entry):\n protein = entry.find('uniprot:protein', ns)\n recname = protein.find('uniprot:recommendedName', ns)\n if recname is None:\n # use submittedName if recommendedName is not available\n recname = protein.find('uniprot:submittedName', ns)\n fullName = recname.find('uniprot:fullName', ns).text\n shortName = recname.find('uniprot:shortName', ns)\n\n output = {}\n if shortName is not None:\n return {'description': fullName + '(' + shortName.text + ')'}\n else:\n return {'description': fullName }\n\ndef read_sequence(entry):\n sequence = entry.find('uniprot:sequence', ns).text\n # remove whitespaces\n sequence = ''.join(sequence.split())\n return {'sequence': sequence}\n\ndef read_taxonomy(entry):\n organism = entry.find('uniprot:organism', ns)\n taxid = organism.find('uniprot:dbReference', ns).attrib\n return {'organism': 'Taxon:' + taxid['id'] }\n\ndef read_annotation(entry):\n annotation = {\n 'accessions': read_accessions(entry),\n 'dbxrefs' : read_dbrefs(entry),\n 'keywords': read_keywords(entry)\n }\n annotation.update(read_names(entry))\n return annotation\n\ndef read_dbrefs(entry):\n dbrefs = entry.findall('uniprot:dbReference', ns)\n refs = []\n for dbref in dbrefs:\n type = dbref.attrib['type']\n id = dbref.attrib['id']\n if type == 'GO':\n id = id.split(':')[1]\n refs.append(type + ':' + id)\n return refs\n\ndef read_names(entry):\n output = {}\n protein = entry.find('uniprot:protein', ns)\n recname = protein.find('uniprot:recommendedName', ns)\n if recname is not None:\n output['recommended_name'] = { 'full' : recname.find('uniprot:fullName', ns).text }\n short = recname.find('uniprot:shortName', ns)\n if short is not None:\n output['recommended_name']['short'] = short.text\n subname = protein.find('uniprot:submittedName', ns)\n if subname is not None:\n output['submitted_name'] = { 'full' : subname.find('uniprot:fullName', ns).text }\n short = subname.find('uniprot:shortName', ns)\n if short is not None:\n output['submitted_name']['short'] = short.text\n\n alternative_names = []\n altnames = protein.findall('uniprot:alternativeName', ns)\n for altname in altnames:\n alternative_name = {'full': altname.find('uniprot:fullName', ns).text}\n short = altname.find('uniprot:shortName', ns)\n if short is not None:\n alternative_name['short'] = short.text\n alternative_names.append(alternative_name)\n output['alternative_names'] = alternative_names\n\n return output\n\ndef read_accessions(entry):\n accessions = []\n for acc in entry.findall('uniprot:accession', ns):\n accessions.append(acc.text)\n return accessions\n\ndef read_keywords(entry):\n keywords = []\n for kw in entry.findall('uniprot:keyword', ns):\n keywords.append(kw.text)\n return keywords\n\ndef read_features(entry):\n features = []\n for f in entry.findall('uniprot:feature', ns):\n feature = {}\n if 'description' in f.attrib:\n feature['description'] = f.attrib['description']\n feature['type'] = f.attrib['type']\n if f.find('uniprot:location', ns).find('uniprot:position', ns) is not None:\n feature['position'] = f.find('uniprot:location', ns).find('uniprot:position', ns).attrib['position']\n else:\n begin = f.find('uniprot:location', ns).find('uniprot:begin', ns)\n if 'position' in begin.attrib:\n feature['begin'] = begin.attrib['position']\n else:\n feature['begin'] = begin.attrib['status']\n\n end = f.find('uniprot:location', ns).find('uniprot:end', ns)\n if 'position' in end.attrib:\n feature['end'] = end.attrib['position']\n else:\n feature['end'] = end.attrib['status']\n\n if feature['begin'] is not 'unknown':\n feature['begin'] = None\n else:\n feature['begin'] = int(feature['begin'])\n if feature['end'] is not 'unknown':\n feature['end'] = None\n else:\n feature['end'] = int(feature['end'])\n features.append (feature)\n return features\n\nif __name__ == '__main__':\n main()\n",
"id": "3525372",
"language": "Python",
"matching_score": 4.529444217681885,
"max_stars_count": 1,
"path": "dbxref/retrieve/uniprot.py"
},
{
"content": "#!/usr/bin/env python3\nimport dbxref.resolver\nimport requests\nimport xml.etree.ElementTree as ET\nfrom xml.etree.ElementTree import ParseError\nimport logging\nimport json\nimport argparse\n#logging.basicConfig(level=logging.DEBUG)\n#logging.getLogger().setLevel(logging.WARNING)\nlogger = logging.getLogger(__name__)\n#logger.setLevel(logging.DEBUG)\n\nns = {'pfam': 'https://pfam.xfam.org/'}\n\ndef main():\n parser = argparse.ArgumentParser(description='Retrieve pfam xml documents for dbxrefs and convert them into json')\n parser.add_argument('--basic', '-b', action='store_true', help='Include dbxref and description')\n parser.add_argument('--annotation', '-a', action='store_true', help='Include annotation')\n parser.add_argument('dbxrefs', nargs=argparse.REMAINDER)\n args = parser.parse_args()\n if not (args.basic or args.annotation):\n args.basic = True\n args.annotation = True\n dbxrefs = dbxref.resolver.convert_to_dbxrefs(args.dbxrefs)\n\n documents = retrieve(dbxrefs, basic=args.basic, annotation=args.annotation)\n print(json.dumps(documents))\n\ndef retrieve(dbxrefs, basic=True, annotation=True):\n resolved = dbxref.resolver.resolve(dbxrefs, check_existence=False)\n documents = []\n for entry in resolved:\n if 'xml' in entry['locations']:\n xml_url = entry['locations']['xml'][0]\n logger.debug('URL: %s', xml_url)\n r = requests.get(xml_url)\n logger.debug('Content: %s', r.text)\n\n output = {'id': entry['dbxref']}\n\n try:\n root = ET.fromstring(r.text)\n\n tree = str(ET.tostring(root))\n if '<error>' in tree:\n output['message'] = tree[tree.find('<error>')+7:tree.rfind('</error>')]\n else:\n for child in root.findall('pfam:entry', ns):\n if basic:\n output.update(read_basic(child))\n if annotation:\n output.update(read_annotation(child))\n except (KeyError, AttributeError) as e:\n logger.warn('Error in retrieving %s', str(entry))\n raise\n except (ParseError, RuntimeError) as e:\n output['message'] = 'an error occurred'\n try:\n html = HTML.document_fromstring(r.text.replace('\\n', ' '))\n if html.get_element_by_id('noResultsMessage') is not None:\n output['message'] = 'no results found; probably invalid ID'\n except:\n pass\n documents.append(output)\n return documents\n\ndef read_basic(entry):\n description = entry.find('pfam:description', ns).text.strip()\n return {'description': description}\n\ndef read_annotation(entry):\n annotation = {\n 'domain': entry.attrib['id'],\n 'accession': entry.attrib['accession'],\n 'terms' : []\n }\n\n comment = entry.find('pfam:comment', ns)\n if comment:\n annotation['comment'] = comment.text.strip()\n\n go_terms = entry.find('pfam:go_terms', ns)\n if go_terms:\n categories = go_terms.findall('pfam:category', ns)\n for category in categories:\n terms = category.findall('pfam:term', ns)\n for term in terms:\n annotation['terms'].append({\n 'id': term.attrib['go_id'],\n 'description': term.text\n })\n return annotation\n\nif __name__ == \"__main__\":\n main()\n",
"id": "2033110",
"language": "Python",
"matching_score": 1.5040595531463623,
"max_stars_count": 1,
"path": "dbxref/retrieve/pfam.py"
},
{
"content": "#!/usr/bin/env python3\n\nimport dbxref.resolver\nimport requests\nimport logging\nimport json\nimport argparse\nimport xml.etree.ElementTree as ET\n\nlogger = logging.getLogger(__name__)\n\n\ndef main():\n \"\"\"main()method for script usage\"\"\"\n parser = argparse.ArgumentParser(description=\"Retrieves Protein Information from NCBIs Gene Identifier. \"\n \"Database: Protein\")\n parser.add_argument(\"--basics\", \"-b\", action=\"store_true\", help=\"Include basic informations such as \"\n \"dbxref/accession-nr., locus, source organism and \"\n \"definition.\")\n parser.add_argument(\"--dbsource\", \"-db\", action=\"store_true\", help=\"Include source database information.\")\n parser.add_argument(\"--references\", \"-r\", action=\"store_true\", help=\"Include reference information.\")\n parser.add_argument(\"dbxref\", nargs=argparse.REMAINDER)\n args = parser.parse_args()\n\n # When not specified, include all information available\n if None not in (args.basics, args.dbsource, args.references):\n args.basics = True\n args.dbsource = True\n args.references = True\n\n dbxrefs = dbxref.resolver.convert_to_dbxrefs(args.dbxref)\n documents = retrieve(dbxrefs, basics=args.basics, dbsource=args.dbsource, references=args.references)\n print(json.dumps(documents, sort_keys=True, indent=4))\n\n\ndef retrieve(dbxrefs, basics=True, dbsource=True, references=True):\n \"\"\"Retrieve Protein data as xml and parse into json format\"\"\"\n resolved = dbxref.resolver.resolve(dbxrefs, check_existence=False)\n documents = []\n for entry in resolved:\n xml_url = entry[\"locations\"][\"xml\"][0]\n logger.debug(\"URL: %s\", xml_url)\n gi = requests.get(xml_url)\n logger.debug(\"Content: %s\", gi.text)\n output = {\"id\": entry[\"dbxref\"]}\n try:\n root = ET.fromstring(gi.text)\n if basics:\n try:\n output.update(read_basics(root))\n except KeyError:\n print(\"One ore more of the basic information were not available for given dbxref. \"\n \"Please check the source data.\")\n raise\n if dbsource:\n try:\n output.update(read_dbsource(root))\n except KeyError:\n print(\"Source database information wasn't or wasn't fully available. Please check the source data\")\n raise\n if references:\n try:\n output.update(read_references(root))\n except KeyError:\n print(\"reference information wasn't or wasn't fully available. Please check the source data\")\n raise\n except (RuntimeError, ET.ParseError):\n print(\"An error occurred\")\n raise\n documents.append(output)\n return documents\n\n\ndef read_basics(root):\n \"\"\"Finds basic information such as locus, dbxref, definition, organism, molecular information and representational\n structure, if available, and puts out a dictionary containing the information\"\"\"\n locus = root.find(\"Seq-entry_seq/Bioseq/Bioseq_id/Seq-id/Seq-id_swissprot/Textseq-id/Textseq-id_name\").text\n dbxref_id = \"GI:\" + root.find(\"Seq-entry_seq/Bioseq/Bioseq_id/Seq-id/Seq-id_swissprot/Textseq-id/\"\n \"Textseq-id_accession\").text\n definition = root.find(\"Seq-entry_seq/Bioseq/Bioseq_descr/Seq-descr/Seqdesc/Seqdesc_title\").text\n organism = {\"name\": root.find(\"Seq-entry_seq/Bioseq/Bioseq_descr/Seq-descr/Seqdesc/Seqdesc_source/BioSource/\"\n \"BioSource_org/Org-ref/Org-ref_orgname/OrgName/OrgName_name/\"\n \"OrgName_name_binomial/BinomialOrgName/BinomialOrgName_genus\").text + \" \" +\n root.find(\"Seq-entry_seq/Bioseq/Bioseq_descr/Seq-descr/Seqdesc/Seqdesc_source/BioSource/\"\n \"BioSource_org/Org-ref/Org-ref_orgname/OrgName/OrgName_name/OrgName_name_binomial/\"\n \"BinomialOrgName/BinomialOrgName_species\").text,\n \"taxonomy\": root.find(\"OrgName_lineage\")}\n mol_info = root.find(\"MolInfo_biomol\")\n structure = root.find(\"Seqdesc_comment\")\n return {\"locus\": locus, \"dbxref\": dbxref_id, \"definition\": definition, \"organism\": organism,\n \"molecular_info\": mol_info, \"structure\": structure}\n\n\ndef read_dbsource(root):\n \"\"\"Finds databank sources in the xmland puts out a list with all dbxrefs found.\"\"\"\n dbxref_list = []\n for dbtag in root.findall(\"Seq-entry_seq/Bioseq/Bioseq_descr/Seq-descr/Seqdesc/Seqdesc_sp/SP-block/SP-block_dbref/\"\n \"Dbtag\"):\n dbxref_list.append(dbtag.find(\"Dbtag_db\").text + \":\" + dbtag.find(\"Dbtag_tag/Object-id/Object-id_str\").text)\n return {\"source databases\": dbxref_list}\n\n\ndef read_references(root):\n \"\"\"Finds reference information in the xml and puts out a list containing information for authors, title, journal\n and pubmed DOI\"\"\"\n references = []\n for cit_art in root.findall(\"Seq-entry_seq/Bioseq/Bioseq_descr/Seq-descr/Seqdesc/Seqdesc_pub/Pubdesc/Pubdesc_pub/\"\n \"Pub-equiv/Pub/Pub_article/Cit-art\"):\n author_list = []\n journal = {}\n title = \"\"\n doi = \"\"\n # Find Authors\n for author in cit_art.findall(\"Cit-art_authors/Auth-list/Auth-list_names/Auth-list_names_std/Author\"):\n author_list.append(author.find(\"Author_name/Person-id/Person-id_name/Name-std/Name-std_last\").text + \", \" +\n author.find(\"Author_name/Person-id/Person-id_name/Name-std/Name-std_initials\").text)\n # Find Title\n title = cit_art.find(\"Cit-art_title/Title/Title_E/Title_E_name\").text\n # Find Journal\n journal = {\"name\": cit_art.find(\"Cit-art_from/Cit-art_from_journal/Cit-jour/Cit-jour_title/Title/Title_E/\"\n \"Title_E_iso-jta\").text,\n \"date\": cit_art.find(\"Cit-art_from/Cit-art_from_journal/Cit-jour/Cit-jour_imp/Imprint/Imprint_date/\"\n \"Date/Date_std/Date-std/Date-std_day\").text + \".\" +\n cit_art.find(\"Cit-art_from/Cit-art_from_journal/Cit-jour/Cit-jour_imp/Imprint/Imprint_date/\"\n \"Date/Date_std/Date-std/Date-std_month\").text + \".\" +\n cit_art.find(\"Cit-art_from/Cit-art_from_journal/Cit-jour/Cit-jour_imp/Imprint/Imprint_date/\"\n \"Date/Date_std/Date-std/Date-std_year\").text\n }\n # Find Pubmed DOI\n doi = cit_art.find(\"Cit-art_ids/ArticleIdSet/ArticleId/ArticleId_doi/DOI\").text\n # Put into dictionary\n references.append({\"authors\": author_list,\n \"title\": title,\n \"journal\": journal,\n \"doi\": doi\n })\n return {\"references\": references}\n\n\nif __name__ == \"__main__\":\n main()\n",
"id": "1356916",
"language": "Python",
"matching_score": 5.00596809387207,
"max_stars_count": 1,
"path": "dbxref/retrieve/gi.py"
},
{
"content": "#!/usr/bin/env python3\n\nimport dbxref.resolver\nimport requests\nimport logging\nimport json\nimport argparse\nimport xml.etree.ElementTree as ET\n\nlogger = logging.getLogger(__name__)\n\n\ndef main():\n \"\"\"main()method for script usage\"\"\"\n parser = argparse.ArgumentParser(description=\"Retrieves Nucleotide or Protein Sequences data from RefSeq\")\n parser.add_argument(\"--basics\", \"-b\", action=\"store_true\", help=\"Include basic information\")\n parser.add_argument(\"--taxonomy\", \"-ta\", action=\"store_true\", help=\"Include taxonomy\")\n parser.add_argument(\"--references\", \"-r\", action=\"store_true\", help=\"Include references\")\n parser.add_argument(\"--source_db\", \"-s\", action=\"store_true\", help=\"Include source database\")\n parser.add_argument(\"--features_table\", \"-f\", action=\"store_true\", help=\"Include table of features\")\n parser.add_argument(\"dbxrefs\", nargs=argparse.REMAINDER)\n args = parser.parse_args()\n # when not specified include all data available\n if None not in (args.basics, args.taxonomy, args.references, args.source_db, args.features_table):\n args.basics = True\n args.taxonomy = True\n args.references = True\n args.source_db = True\n args.features_table = True\n\n dbxrefs = dbxref.resolver.convert_to_dbxrefs(args.dbxrefs)\n documents = retrieve(dbxrefs, basics=args.basics, taxonomy=args.taxonomy,\n references=args.references, source_db=args.source_db, features_table=args.features_table)\n print(json.dumps(documents, sort_keys=True, indent=4))\n\n\ndef retrieve(dbxrefs, basics=True, taxonomy=False, references=False, source_db=False, features_table=False):\n \"\"\"Retrieves Nucleotide or Protein Sequence data from RefSeq as xml and convert it to json format.\"\"\"\n # expected xml input (example):\n # <GBSet>\n # <GBSeq>\n # <GBSeq_locus>X52740</GBSeq_locus>\n # <GBSeq_length>1762</GBSeq_length>\n # <GBSeq_strandedness>single</GBSeq_strandedness>\n # [.....]\n # <GBSeq_moltype>mRNA</GBSeq_moltype>\n # <GBSeq_sequence></GBSeq_sequence>\n # </GBSeq>\n # </GBSet>\n #\n # expected xml output (example):\n # [\n # {\n # \"locus\": \"3269\",\n # \"sequence_length\": \"322\",\n # \"molecular_type\": \"AA\",\n # }\n # ]\n\n resolved = dbxref.resolver.resolve(dbxrefs, check_existence=False)\n documents = []\n xml_url = \"\"\n r = None\n for entry in resolved:\n nucleotide_xml_url = entry[\"locations\"][\"xml\"][0]\n logger.debug(\"URL: %s\", nucleotide_xml_url)\n protein_xml_url = entry[\"locations\"][\"xml\"][1]\n logger.debug(\"URL: %s\", protein_xml_url)\n r_n = requests.get(nucleotide_xml_url)\n r_p = requests.get(protein_xml_url)\n if r_n.status_code == 200:\n r = r_n\n elif r_p.status_code == 200:\n r = r_p\n else:\n return print(\"There is no entry for the given ID. Please check the ID.\")\n refseq = r.text\n logger.debug(\"Content: %s\", refseq)\n output = {\"id\": entry[\"dbxref\"]}\n try:\n root = ET.fromstring(refseq)\n for child in root.findall(\"./GBSeq\"): # iterate over children and perform parsing tasks\n if basics:\n try:\n output.update(read_basics(child))\n except AttributeError:\n logger.warning(\"One ore more of the basic information were not available for given dbxref. \"\n \"Please check the dbxref.\")\n raise\n try:\n output.update(read_topology(child))\n except AttributeError:\n logger.warning(\"No topology available for given dbxref\")\n raise\n if taxonomy:\n try:\n output.update(read_taxonomy(child))\n except AttributeError:\n logger.warning(\"No taxonomy available for given dbxref\")\n raise\n if references:\n try:\n output.update(read_references(child))\n except AttributeError:\n logger.warning(\"No references available for given dbxref\")\n raise\n if source_db:\n try:\n output.update(read_source_db(child))\n except AttributeError:\n logger.warning(\"No source database available for given dbxref\")\n raise\n if features_table:\n try:\n output.update(read_features(child))\n except AttributeError:\n logger.warning(\"No table of features available for given dbxref\")\n raise\n except (RuntimeError, ET.ParseError):\n logger.warning(\"An error occurred\")\n raise\n documents.append(output)\n return documents\n\n\ndef read_basics(entry):\n \"\"\"Receives child (xml) and converts information into a dictionary (json format compatible)\"\"\"\n locus = entry.find(\"GBSeq_locus\").text\n seq_length = entry.find(\"GBSeq_length\").text\n mol_type = entry.find(\"GBSeq_moltype\").text\n definition = entry.find(\"GBSeq_definition\").text\n other_seq_ids = []\n for child in entry.findall(\"GBSeq_other-seqids/GBSeqid\"):\n other_seq_ids.append(child.text)\n dbxref_id = entry.find(\"GBSeq_primary-accession\").text\n organism = entry.find(\"GBSeq_organism\").text\n accession_version = entry.find(\"GBSeq_accession-version\").text\n return {\"locus\": locus, \"sequence_length\": seq_length, \"molecular_type\": mol_type, \"definition\": definition,\n \"other_sequence_ids\": other_seq_ids, \"dbxref\": \"RefSeq:\" + dbxref_id, \"organism\": organism,\n \"accession_version\": accession_version}\n\n\ndef read_references(entry):\n \"\"\"Receives child (xml) and converts information into a dictionary (json format compatible)\"\"\"\n references_list = []\n authors = []\n for child in entry.findall(\"GBSeq_references/GBReference\"):\n for grandchild in child.find(\"GBReference_authors\"):\n authors.append(grandchild.text)\n single_reference = {\"authors\": authors,\n \"title\": child.find(\"GBReference_title\").text,\n \"journal\": child.find(\"GBReference_journal\").text}\n references_list.append(single_reference)\n single_reference = {}\n authors = []\n return {\"references\": references_list}\n\n\ndef read_features(entry):\n \"\"\"Receives child (xml) and converts information into a dictionary (json format compatible)\"\"\"\n features_table = []\n for feature in entry.findall(\"GBSeq_feature-table/GBFeature\"):\n key = feature.find(\"GBFeature_key\").text\n location = feature.find(\"GBFeature_location\").text\n intervals = []\n for child in feature.find(\"GBFeature_intervals\"):\n single_interval = {\"from\": child.find(\"GBInterval_from\").text, \"to\": child.find(\"GBInterval_to\").text,\n \"accession\": child.find(\"GBInterval_accession\").text}\n intervals.append(single_interval)\n qualifier = []\n for child in feature.find(\"GBFeature_quals\"):\n single_qualifier = {\"name\": child.find(\"GBQualifier_name\").text, \"value\": child.find(\"GBQualifier_value\").text}\n qualifier.append(single_qualifier)\n features_table.append({\"key\": key, \"location\": location, \"intervals\": intervals, \"qualifier\": qualifier})\n return {\"features_table\": features_table}\n\n\n# Following functions could also be written in retrieve()-function for more compact code\ndef read_taxonomy(entry):\n return {\"taxonomy\": entry.find(\"GBSeq_taxonomy\").text}\n\n\ndef read_topology(entry):\n return {\"topology\": entry.find(\"GBSeq_topology\").text}\n\n\ndef read_source_db(entry):\n return {\"source_databank\": entry.find(\"GBSeq_source-db\").text}\n\n\nif __name__ == \"__main__\":\n main()\n",
"id": "10035353",
"language": "Python",
"matching_score": 3.3351528644561768,
"max_stars_count": 1,
"path": "dbxref/retrieve/refseq.py"
},
{
"content": "#!/usr/bin/env python3\nimport dbxref.resolver\nimport requests\nimport logging\nimport json\nimport argparse\n\nlogger = logging.getLogger(__name__)\n\n\ndef main():\n \"\"\"main()method for script usage\"\"\"\n parser = argparse.ArgumentParser(description=\"Retrieve Pubmed json documents and parse into dbxref json format\")\n parser.add_argument(\"--basics\", \"-b\", action=\"store_true\", help=\"Include basic information such as title, language,\"\n \" dbxref-id, and day of publishment on pubmed.\")\n parser.add_argument(\"dbxrefs\", nargs=argparse.REMAINDER)\n args = parser.parse_args()\n\n if None not in (args.basics):\n args.basics = True\n\n dbxrefs = dbxref.resolver.convert_to_dbxrefs(args.dbxrefs)\n documents = retrieve(dbxrefs, basics=args.basics)\n print(json.dumps(documents, sort_keys=True, indent=4))\n\ndef _get(result, field, mandatory=False, default=\"\", transform=lambda x: x):\n \"\"\"Retrieve a given field if available, return default or exception otherwise. Result may be manipulated by transformation function\"\"\"\n if field in result:\n return transform(result[field])\n else:\n if mandatory:\n raise KeyError(\"Field '\"+field+\"' not found in dictionary\")\n else:\n return default\n\ndef find_id(list, type):\n \"\"\"Find id of given type in pubmed islist\"\"\"\n matches = [x for x in list if x['idtype'] == type]\n if matches:\n return matches[0][\"value\"]\n else:\n raise KeyError(\"Id of type '\" + type + \"' not found in idlist.\")\n\ndef join_authors(list):\n \"\"\"Joins pubmed entry authors to a single string\"\"\"\n return \", \".join([x[\"name\"] for x in list])\n\ndef retrieve(dbxrefs, basics=True):\n \"\"\"Retrieve Pubmed json documents and parse into dbxref json format\"\"\"\n resolved = dbxref.resolver.resolve(dbxrefs, check_existence=False)\n documents = []\n for entry in resolved:\n # Construct URL for retrival\n json_url = entry[\"locations\"][\"json\"][0]\n logger.debug(\"URL: %s\", json_url)\n r = requests.get(json_url)\n logger.debug(\"Content: %s\", r.text)\n pm = json.loads(r.text)\n output = {\"id\": entry[\"dbxref\"]}\n entry_id = dbxrefs[0][\"id\"]\n # Parse basic information\n result = pm[\"result\"][entry_id]\n if basics:\n output[\"publication-date\"] = _get(result, \"epubdate\")\n output[\"dbxref\"] = \"Pubmed:\" + _get(result, \"uid\")\n output[\"title\"] = _get(result, \"title\")\n output[\"language\"] = _get(result, \"lang\", transform=lambda x: \", \".join(x))\n output[\"authors\"] = _get(result, \"authors\", transform=lambda x: join_authors(x))\n output[\"source\"] = _get(result, \"source\")\n output[\"volume\"] = _get(result, \"volume\")\n output[\"issue\"] = _get(result, \"issue\")\n output[\"doi\"] = _get(result, \"articleids\", transform=lambda x: find_id(x, \"doi\"))\n documents.append(output)\n return documents\n\n\nif __name__ == '__main__':\n main()\n",
"id": "6292475",
"language": "Python",
"matching_score": 4.438246250152588,
"max_stars_count": 1,
"path": "dbxref/retrieve/pubmed.py"
},
{
"content": "#!/usr/bin/env python3\n\nimport dbxref.resolver\nimport requests\nimport logging\nimport json\nimport argparse\nimport xml.etree.ElementTree as ET\n\nlogger = logging.getLogger(__name__)\n\n\ndef main():\n \"\"\"main()method for script usage\"\"\"\n parser = argparse.ArgumentParser(description=\"Retrieve Rfam json documents and parse them into dbxref json format\")\n parser.add_argument(\"--basics\", \"-b\", action=\"store_true\", help=\"Include basic informations such as dbxref_id, \"\n \"name, description and comment.\")\n parser.add_argument(\"--references\", \"-r\", action=\"store_true\", help=\"Include reference information.\")\n parser.add_argument(\"dbxref\", nargs=argparse.REMAINDER)\n args = parser.parse_args()\n\n if None not in (args.basics, args.references):\n args.basics = True\n args.references = True\n\n dbxrefs = dbxref.resolver.convert_to_dbxrefs(args.dbxref)\n documents = retrieve(dbxrefs, basics=args.basics, references=args.references)\n print(json.dumps(documents, sort_keys=True, indent=4))\n\n\ndef retrieve(dbxrefs, basics=True, references=True):\n \"\"\"Retrieve rfam json documents and parse into dbxref json format\"\"\"\n resolved = dbxref.resolver.resolve(dbxrefs, check_existence=False)\n documents = []\n for entry in resolved:\n # Construct URL for retrival\n json_url = entry[\"locations\"][\"json\"][0]\n logger.debug(\"URL: %s\", json_url)\n r = requests.get(json_url)\n logger.debug(\"Content: %s\", r.text)\n rfam = json.loads(r.text)\n output = {\"id\": entry[\"dbxref\"]}\n # Parse basic information\n if basics:\n try:\n output.update({\"dbxref\": \"RFAM:\" + rfam[\"rfam\"][\"acc\"],\n \"name\": rfam[\"rfam\"][\"id\"],\n \"description\": rfam[\"rfam\"][\"description\"],\n \"comment\": rfam[\"rfam\"][\"comment\"]\n })\n except KeyError:\n print(\"Basic information weren't fully or only partly available. \"\n \"Please check the dbxref and the Rfam-site.\")\n raise\n # Parse reference information\n if references:\n try:\n output.update({\"references\": {\"author\": rfam[\"rfam\"][\"curation\"][\"author\"],\n \"DOI\": rfam[\"rfam\"][\"curation\"][\"structure_source\"],\n \"type\": rfam[\"rfam\"][\"curation\"][\"type\"]\n }\n })\n except KeyError:\n print(\"References weren't fully or only partly available. \"\n \"Please check the dbxref and the Rfam-site\")\n raise\n documents.append(output)\n\n return documents\n\n\nif __name__ == \"__main__\":\n main()\n",
"id": "4048644",
"language": "Python",
"matching_score": 2.3140032291412354,
"max_stars_count": 1,
"path": "dbxref/retrieve/rfam.py"
},
{
"content": "#!/usr/bin/env python3\n\nimport dbxref.resolver\nimport requests\nimport logging\nimport json\nimport argparse\n\nlogger = logging.getLogger(__name__)\n\ndef main():\n \"\"\"main()method for script usage\"\"\"\n # AVAILABLE for implementation:\n # 'go_terms', 'member_databases', 'integrated', 'entry_annotations', ''\n #\n # USED:\n # basics: 'accession', 'type', 'description', 'counters', 'entry_id', 'source_database', 'name'\n # hierarchy\n # wikipedia\n # literature\n # cross_references\n # overlaps_with\n\n parser = argparse.ArgumentParser(description=\"Retrieve InterPro documents and convert them into json\")\n parser.add_argument(\"--basics\", \"-b\", action=\"store_true\", help=\"Include basic information such as accession, \"\n \"type, name, description, counters, entry_id and \"\n \"source_database\")\n parser.add_argument(\"--hierarchy\", \"-hi\", action=\"store_true\", help=\"\")\n parser.add_argument(\"--wikipedia\", \"-w\", action=\"store_true\", help=\"\")\n parser.add_argument(\"--literature\", \"-l\", action=\"store_true\", help=\"\")\n parser.add_argument(\"--cross_references\", \"-cr\", action=\"store_true\", help=\"\")\n parser.add_argument(\"--overlaps\", \"-o\", action=\"store_true\", help=\"\")\n parser.add_argument(\"dbxrefs\", nargs=argparse.REMAINDER)\n args = parser.parse_args()\n\n # if nothing specified, output all available information for the entry\n if None not in (args.basics, args.hierarchy, args.wikipedia, args.literature, args.cross_references, args.overlaps):\n args.basics = True\n args.hierarchy = True\n args.wikipedia = True\n args.literature = True\n args.cross_references = True\n args.overlaps = True\n\n dbxrefs = dbxref.resolver.convert_to_dbxrefs(args.dbxrefs)\n\n documents = retrieve(dbxrefs, basics=args.basics, hierarchy=args.hierarchy, wikipedia=args.wikipedia,\n literature=args.literature, cross_references=args.cross_references, overlaps=args.overlaps)\n print(json.dumps(documents, sort_keys=True, indent=4))\n\n\ndef retrieve(dbxrefs, basics=True, hierarchy=True, wikipedia=True, literature=True, cross_references=True, overlaps=True):\n \"\"\"Retrieve json document from InterPro REST api, filter information by selected Options and parse into new json\"\"\"\n resolved = dbxref.resolver.resolve(dbxrefs, check_existence=False)\n documents = []\n for entry in resolved:\n # Construct URL for retrieve\n json_url = entry['locations']['json'][0]\n logger.debug('URL: %s', json_url)\n r = requests.get(json_url)\n logger.debug('Content: %s', r.text)\n ipro = json.loads(r.text)\n\n # Parse retrieved json file by selected Options\n output = {\"id\": entry[\"dbxref\"]}\n if basics:\n try:\n output.update(accession=ipro[\"metadata\"][\"accession\"], entry_type=ipro[\"metadata\"][\"type\"],\n description=ipro[\"metadata\"][\"description\"], counters=ipro[\"metadata\"][\"counters\"],\n entry_id=ipro[\"metadata\"][\"entry_id\"], name=ipro[\"metadata\"][\"name\"],\n source_database=ipro[\"metadata\"][\"source_database\"])\n except KeyError:\n logger.warning(\"One or more basic information were not available for the given entry. Please check your output.\")\n if hierarchy:\n try:\n output.update(hierarchy=ipro[\"metadata\"][\"hierarchy\"])\n except KeyError:\n logger.warning(\"Hierarchy information was not available for the given entry.\")\n if wikipedia:\n try:\n output.update(wikipedia=ipro[\"metadata\"][\"wikipedia\"])\n except KeyError:\n logger.warning(\"Wikipedia articel were not available for the given entry.\")\n if literature:\n try:\n output.update(literature=ipro[\"metadata\"][\"literature\"])\n except KeyError:\n logger.warning(\"Literature was not available for the given entry.\")\n if cross_references:\n try:\n output.update(cross_references=ipro[\"metadata\"][\"cross_references\"])\n except KeyError:\n logger.warning(\"Cross_references were not available for the given entry.\")\n if overlaps:\n try:\n output.update(overlaps=ipro[\"metadata\"][\"overlaps_with\"])\n except KeyError:\n logger.warning(\"Overlap information was not available for the given entry.\")\n documents.append(output)\n return documents\n\n\nif __name__ == \"__main__\":\n main()\n",
"id": "3271124",
"language": "Python",
"matching_score": 3.0079078674316406,
"max_stars_count": 1,
"path": "dbxref/retrieve/interpro.py"
},
{
"content": "import unittest\nfrom dbxref.retrieve import interpro\n\n\nclass TestIPro(unittest.TestCase):\n\n # Test if ipro retriever gives any output\n def test_output(self):\n documents = interpro.retrieve([{'db': 'InterPro', 'id': 'IPR000003'}], basics=True, hierarchy=True, wikipedia=True,\n literature=True, cross_references=True, overlaps=True)\n self.assertTrue(documents)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"id": "2165153",
"language": "Python",
"matching_score": 0.06617185473442078,
"max_stars_count": 1,
"path": "tests/test_interpro.py"
},
{
"content": "#!/usr/bin/env python3\n\nimport dbxref.resolver\nimport requests\nimport logging\nimport json\nimport argparse\n\nlogger = logging.getLogger(__name__)\nNO_INFO = \"NULL\"\n\ndef main():\n \"\"\"main()method for script usage\"\"\"\n parser = argparse.ArgumentParser(description=\"Retrieve Kegg text documents and convert them into json\")\n parser.add_argument(\"--basics\", \"-b\", action=\"store_true\", help=\"Include ID/Entry, names/aliases and definition\")\n parser.add_argument(\"--pathway\", \"-p\", action=\"store_true\", help=\"Include metabolic pathway\")\n parser.add_argument(\"--brite\", \"-br\", action=\"store_true\", help=\"Include hierarchical classifications\")\n parser.add_argument(\"--dbxref_links\", \"-db\", action=\"store_true\", help=\"Include database links in dbxref format\")\n parser.add_argument(\"--genes\", \"-g\", action=\"store_true\", help=\"Include associated genes\")\n parser.add_argument(\"--reference\", \"-ref\", action=\"store_true\", help=\"Include paper reference ID, authors,title \"\n \"and published journal\")\n parser.add_argument(\"--orthology\", \"-o\", action=\"store_true\", help=\"Include ortholog genes\")\n parser.add_argument(\"--motif\", \"-m\", action=\"store_true\", help=\"Include motif\")\n parser.add_argument(\"--formula\", \"-f\", action=\"store_true\", help=\"Include chemical formula\")\n parser.add_argument(\"--reaction\", \"-r\", action=\"store_true\", help=\"Include chemical reaction partners\")\n parser.add_argument(\"dbxrefs\", nargs=argparse.REMAINDER)\n args = parser.parse_args()\n\n if None not in (args.basics, args.pathway, args.brite, args.dbxref_links, args.genes, args.reference,\n args.orthology, args.motif, args.formula, args.reaction):\n # if nothing specified, output all available information for the entry\n args.basics = True\n args.pathway = True\n args.brite = True\n args.dbxref_links = True\n args.genes = True\n args.reference = True\n args.orthology = True\n args.motif = True\n args.formula = True\n args.reaction = True\n\n dbxrefs = dbxref.resolver.convert_to_dbxrefs(args.dbxrefs)\n\n documents = retrieve(dbxrefs, basics=args.basics, pathway=args.pathway, brite=args.brite, genes=args.genes,\n reference=args.reference, orthology=args.orthology, motif=args.motif, formula=args.formula,\n reaction=args.reaction, dbxrefs_links=args.dbxref_links)\n print(json.dumps(documents))\n\n\ndef retrieve(dbxrefs, basics = True, pathway = True, brite = True, dbxrefs_links = True, genes = False, reference = True, orthology = True, motif = True, formula = True, reaction = True):\n \"\"\"Parse kegg text file and return a list \"documents\" including the extracted information of the given entries. \"\"\"\n\n resolved = dbxref.resolver.resolve(dbxrefs, check_existence=False)\n documents = []\n for entry in resolved:\n text_url = entry['locations']['text'][0]\n logger.debug('URL: %s', text_url)\n r = requests.get(text_url)\n logger.debug('Content: %s', r.text)\n lines = r.text.strip().split('\\n')\n output = {} # dictionary with terms as keys and the information of given term as values\n # Sorting the received list 'line' in a dictionary with the terms (f.e.: 'ENTRY', 'NAMES') as keys\n kegg_information = parse_entry(lines)\n # Search dictionary for existence of keywords requested by user.\n # If the keyword is present the data receiving function() is started and is put into the output file\n # Every keyword is a single search, to make the code more robust if keywords are missing\n if basics:\n if \"ENTRY\" in kegg_information:\n entry_information = read_id(kegg_information[\"ENTRY\"])\n output.update({\"id\": entry_information[0]})\n output.update({\"type\": entry_information[1]})\n if len(entry_information) > 3:\n output.update({\"associated organism\": entry_information[2]})\n if \"NAME\" in kegg_information:\n output.update({\"names\": read_information(kegg_information[\"NAME\"])[0].replace(\",\", \"\").split()})\n if \"DEFINITION\" in kegg_information:\n output.update({\"definition\": read_information(kegg_information[\"DEFINITION\"])[0]})\n if \"ORGANISM\" in kegg_information:\n output.update({\"organism\": read_information(kegg_information[\"ORGANISM\"])[0]})\n else:\n logger.warn(\"No Entry\")\n if pathway:\n if \"PATHWAY\" in kegg_information:\n output.update({\"pathways\": read_information(kegg_information[\"PATHWAY\"])})\n else:\n logger.warn(\"No Pathway\")\n if genes:\n if \"GENES\" in kegg_information:\n output.update({\"genes\": read_information(kegg_information[\"GENES\"])})\n else:\n logger.warn(\"No Genes\")\n if orthology:\n if \"ORTHOLOGY\" in kegg_information:\n output.update({\"ortholog genes\": read_information(kegg_information[\"ORTHOLOGY\"])})\n else:\n logger.warn(\"No orthology\")\n if motif:\n if \"MOTIF\" in kegg_information:\n output.update({\"motif\": read_information(kegg_information[\"MOTIF\"])})\n else:\n logger.warn(\"No motif\")\n if formula:\n if \"FORMULA\" in kegg_information:\n output.update({\"chemical formula\": read_information(kegg_information[\"FORMULA\"][0])})\n else:\n logger.warn(\"No formula\")\n if reaction:\n if \"REACTION\" in kegg_information:\n output.update({\"reaction partners\": read_information(kegg_information[\"REACTION\"])})\n else:\n logger.warn(\"No reaction\")\n if brite:\n if \"BRITE\" in kegg_information:\n output.update({\"brite\": read_brite(kegg_information[\"BRITE\"])})\n else:\n logger.warn(\"No brite\")\n if reference:\n if \"REFERENCE\" in kegg_information:\n output.update({\"reference\": read_reference(kegg_information[\"REFERENCE\"])})\n else:\n logger.warn(\"No reference\")\n if dbxrefs_links:\n if \"DBLINKS\" in kegg_information:\n output.update({\"dbxref_links\": read_dbxrefs(kegg_information[\"DBLINKS\"])})\n else:\n logger.warn(\"No dbxref_links\")\n documents.append(output)\n return documents\n\n\ndef parse_entry(lines):\n \"\"\"Parses the entire entry document (text) and returns a dictionary containing the left indented titles as keys with\n the corresponding lines in a list of strings as values. \"kegg_information\" contains the entire information of the\n given text document, no information is dismissed, even when it might not be used later (f.e. it was not requested).\n \"\"\"\n # expected input (example):\n # ENTRY K00768 KO\n # NAME E2.4.2.21, cobU, cobT\n # DEFINITION nicotinate-nucleotide--dimethylbenzimidazole phosphoribosyltransferase [EC:2.4.2.21]\n # PATHWAY ko00860 Porphyrin and chlorophyll metabolism\n # ko01100 Metabolic pathways\n # MODULE M00122 Cobalamin biosynthesis, cobinamide => cobalamin\n # BRITE KEGG Orthology (KO) [BR:ko00001]\n # 09100 Metabolism\n # 09108 Metabolism of cofactors and vitamins\n # 00860 Porphyrin and chlorophyll metabolism\n # K00768 E192.168.127.12, cobU, cobT; nicotinate-nucleotide--dimethylbenzimidazole phosphoribosyltransferase\n #\n # expected output (example):\n # kegg_information = {\n # 'ENTRY': ['ENTRY K00768 KO']\n # 'NAME': ['NAME E2.4.2.21, cobU, cobT']\n # 'DEFINITION': [DEFINITION nicotinate-nucleotide--dimethylbenzimidazole phosphoribosyltransferase [EC:2.4.2.21]]\n # 'PATHWAY': ['PATHWAY ko00860 Porphyrin and chlorophyll metabolism',\n # ' ko01100 Metabolic pathways']\n # 'MODULE': ['MODULE M00122 Cobalamin biosynthesis, cobinamide => cobalamin']\n # 'BRITE': ['BRITE KEGG Orthology (KO) [BR:ko00001',\n # ' 09100 Metabolism',\n # ' 09108 Metabolism of cofactors and vitamins',\n # ' 00860 Porphyrin and chlorophyll metabolism',\n # ' K00768 E2.4.2.21, cobU, cobT; nicotinate-nucleotide--dimethylbenzimidazole phosphoribosyltransferase']\n # }\n\n kegg_information = {}\n keyword = \"\"\n information = []\n for line in lines:\n if line[:2].isspace():\n information.append(line)\n else:\n if line.startswith(\"///\"): # last line of document is always \"///\"\n if keyword in kegg_information:\n kegg_information[keyword].append(information)\n else:\n kegg_information.update({keyword: [information]})\n elif line[0:4].isupper():\n if len(keyword) and len(information) != 0:\n if keyword in kegg_information:\n kegg_information[keyword].append(information)\n else:\n kegg_information.update({keyword: [information]})\n else:\n pass\n split_line = line.split(\" \", 1) # slice the keyword\n keyword = split_line[0]\n information = [line]\n return kegg_information\n\n\ndef read_id(entry):\n \"\"\"Parse entry information (id, type and associated organism) as dictionaries\"\"\"\n # expected input (example):\n # kegg_information['ENTRY']: [\n # 'ENTRY 10458 CDS T01001'\n # ]\n #\n # expected output (example):\n # entry_id = ['10458']\n # entry_type = ['CDS']\n # associated_organism = \"T01001\"\n\n information = read_information(entry)[0].split()\n entry_id = information[1]\n entry_type = information[2]\n associated_organism = \"\"\n if len(information) > 3:\n associated_organism = information[3]\n return entry_id, entry_type, associated_organism\n else:\n return entry_id, entry_type\n\n\ndef read_reference(entry):\n \"\"\"Parse reference information(pmid, authors, title and journal as keys with corresponding value) as a dictionary\"\"\"\n # expected input (example):\n # kegg_information['REFERENCE']:\n # [\n # 'REFERENCE PMID:11939774'\n # 'AUTHORS <NAME>, <NAME>, <NAME>, <NAME>, <NAME>'\n # 'TITLE Three-dimensional structure of the L-threonine-O-3-phosphate\n # decarboxylase (CobD) enzyme from Salmonella enterica.'\n # 'JOURNAL Biochemistry 41:4798-808 (2002)'\n # 'DOI:10.1021/bi012111w'\n # ],\n # [\n # 'REFERENCE PMID:11939774',\n # 'AUTHORS <NAME>, <NAME>, <NAME>, <NAME>, <NAME>'\n # 'TITLE Three-dimensional structure of the L-threonine-O-3-phosphate\n # decarboxylase (CobD) enzyme from Salmonella enterica.'\n # 'JOURNAL Biochemistry 41:4798-808 (2002)'\n # 'DOI:10.1021/bi012111w'\n # ]\n #\n # expected output (example):\n # reference output = [\n # {\n # 'dbxref': 'PMID:11939774',\n # 'authors': ['<NAME>', '<NAME>', '<NAME>', 'Escalante-Semerena JC', '<NAME>']\n # 'title': 'Three-dimensional structure of the L-threonine-O-3-phosphate decarboxylase (CobD)\n # enzyme from Salmonella enterica.'\n # 'journal': 'Biochemistry 41:4798-808 (2002)'\n # 'DOI': '10.1021/bi012111w'\n # },\n # {\n # 'dbxref': 'PMID:11939774',\n # 'authors': ['<NAME>', '<NAME>', '<NAME>', 'Escalante-Semerena JC', '<NAME>']\n # 'title': 'Three-dimensional structure of the L-threonine-O-3-phosphate decarboxylase (CobD)\n # enzyme from Salmonella enterica.'\n # 'journal': 'Biochemistry 41:4798-808 (2002)'\n # 'DOI': '10.1021/bi012111w'\n # }\n # ]\n\n reference_output = []\n for lines in entry:\n next_reference = {\"dbxref\": \"\", \"authors\": \"\", \"title\": \"\", \"journal\": \"\",\n \"doi\": \"\"} # Create a new Dictionary with empty values\n for line in lines:\n if line.startswith(\"REFERENCE\"):\n next_reference[\"dbxref\"] = \"\".join(line.strip().split(\" \", )[-1].replace(\"[\", \"\").replace(\"]\", \"\"))\n if line.startswith(\" AUTHORS\"):\n next_reference[\"authors\"] = \" \".join(line.split()[1:])\n if line.startswith(\" TITLE\"):\n next_reference[\"title\"] = \" \".join(line.split()[1:])\n if line.startswith(\" JOURNAL\"):\n next_reference[\"journal\"] = \" \".join(line.split()[1:])\n if line.strip().startswith(\"DOI:\"):\n next_reference[\"DOI\"] = line.split(\":\")[1:]\n reference_output.append(next_reference)\n return reference_output\n\n\ndef read_brite(entry):\n \"\"\"Parse brite information as an adjacency dictionary containing a list of vertices and a list of edges.\n The combination of the two lists yields a directed, unweighted, acyclic and labeled Graph g=(v,e). The labels of\n the vertices are included in the list of vertices (\"vertices) and include the scientific name\"\"\"\n # expected input (example):\n # kegg_information[\"BRITE\"] = [\n # [\"BRITE KEGG Orthology (KO) [BR:hsa00001]\"],\n # [\" 09140 Cellular Processes\"],\n # [\" 09144 Cellular community - eukaryotes\"],\n # [\" 04520 Adherens junction\"],\n # [\" 10458 (BAIAP2)\"],\n # [\" 09142 Cell motility\"]\n # [\" 04810 Regulation of actin cytoskeleton\"]\n # [\" 10458 (BAIAP2)\"]\n # ]\n #\n # expected output (example):\n # tree = {\"vertices\": [\"KEGG Orthology (KO) [BR:hsa00001]\", \"09140 Cellular Processes\",\n # \"09144 Cellular community - eukaryotes\", \"04520 Adherens junction\",\n # \"04520 Adherens junction\", \"10458 (BAIAP2)\", \"09142 Cell motility\",\n # \"04810 Regulation of actin cytoskeleton\", \"10458 (BAIAP2)\"],\n # \"edges\": {\"0\": [\"1\"],\n # \"1\": [\"2\", \"5\"],\n # \"2\": [\"3\"],\n # \"3\": [\"4\"],\n # \"4\": [\"],\n # \"5\": [\"6\"]\n # \"6\": [\"7\"]\n # \"7\": []\n # }\n # }\n\n tree = {}\n # create list of vertices containing the labels of the graph\n vertices = []\n for lines in entry:\n for line in lines:\n vertices.append(\" \".join(line.replace(\"BRITE\", \"\").split()))\n\n # create a dictionary 'edges' containing a key for every label in 'vertices' with an empty list[] as value that\n # gets filled in the following progress\n edges = {str(i): [] for i, _ in enumerate(vertices)}\n stack = [] # create a list that will be used as a stack (first in, last out)\n for lines in entry:\n for line in lines:\n depth = get_depth(line)-12 # save amount of whitespace as depth, depth 0 means 12 whitespaces in front\n if depth <= 0: # new root\n stack = [(\" \".join(line.replace(\"BRITE\", \"\").split()), len(stack))] # empty entire stack, set new root\n else: # not a root = is a branch\n new_branch = (\" \".join(line.split()), len(stack)) # save branch with label and depth\n if depth <= len(stack): # line is a branch not from the line above\n stack = stack[:depth] # stack is emptied until depth > len(stack)\n else: # line is a new branch of the branch above\n pass\n stack.append(new_branch)\n if len(stack) == 1: # only root in stack\n pass\n else: # more than root in stack\n # new adjacency is saved in 'edges' under the corresponding key that has the connection to a new branch\n edges[str(vertices.index(stack[-2][0]))].append(str(vertices.index(stack[-1][0])))\n tree.update({\"vertices\": vertices})\n tree.update({\"edges\": edges})\n return tree\n\n\ndef read_dbxrefs(entry):\n \"\"\"Parse db_links and return a list of dbxrefs\"\"\"\n # expected input (example):\n # kegg_information[\"DBLINKS\"] = [\n # 'DBLINKS PubChem: 4509',\n # 'ChEBI: 17950',\n # 'LIPIDMAPS: LMSP0501AB00',\n # 'LipidBank: GSG1147'\n # ]\n #\n # expected output (example):\n # dbxref_id = [\n # 'PubChem:4509',\n # 'ChEBI:17950',\n # 'LIPIDMAPS:LMSP0501AB00',\n # 'LipidBank:GSG1147'\n # ]\n\n dbxref_id = []\n for lines in entry:\n for line in lines:\n line = line.strip().split()\n if \"DBLINKS\" in line[0]:\n for word in line[2:]:\n dbxref_tuple = (line[1], word)\n dbxref_id.append(\"\".join(dbxref_tuple))\n else:\n for word in line[1:]:\n dbxref_tuple = (line[0], word)\n dbxref_id.append(\"\".join(dbxref_tuple))\n return dbxref_id\n\n\ndef read_information(entry):\n \"\"\"Parse given key-values information by deleting whitespace and joining the information into a list\"\"\"\n # expected input (example):\n # kegg_information[\"\"]: [\n # 'PATHWAY ko00860 Porphyrin and chlorophyll metabolism'\n # ' ko01100 Metabolic pathways'\n # ]\n #\n # expected output (example):\n # information = ['ko00860 Porphyrin and chlorophyll metabolism',\n # 'ko01100 Metabolic pathways'\n # ]\n\n information = []\n for lines in entry:\n for line in lines:\n information.append(\" \".join(line.replace(\"NAME\", \"\").replace(\"DEFINITION\", \"\").replace(\"\", \"\")\n .replace(\"ORGANISM\", \"\").replace(\"PATHWAY\", \"\").replace(\"GENES\", \"\")\n .replace(\"ORTHOLOGY\", \"\").replace(\"MOTIF\", \"\").replace(\"FORMULA\", \"\")\n .replace(\"REACTION\", \"\").split()))\n return information\n\n\ndef get_depth(string):\n \"\"\"Calculates amount of whitespaces leading the given string and returns int\"\"\"\n # expected input (example):\n # string = [' 09140 Cellular Processes']\n #\n # expected output (example):\n # depth = 13\n\n depth = len(string) - len(string.lstrip(' '))\n return depth\n\n\nif __name__ == \"__main__\":\n main()\n",
"id": "804850",
"language": "Python",
"matching_score": 3.1034882068634033,
"max_stars_count": 1,
"path": "dbxref/retrieve/kegg.py"
},
{
"content": "import unittest\nfrom dbxref.retrieve import kegg\n\n\nclass TestKegg(unittest.TestCase):\n\n def test_output(self):\n \"\"\"Test if kegg.py gives any output\"\"\"\n documents = kegg.retrieve([{\"db\": \"KEGG\", \"id\": \"K00121\"}], basics=True, brite=True, pathway=True,\n dbxrefs_links=True, formula=True, reaction=True, genes=True, motif=True,\n orthology=True, reference=True)\n self.assertTrue(documents)\n\n def test_brite_output_1(self):\n # Test parsing and saving of a graph(v,e) in an adjacency list. Tree with one root and one continuous branch\n brite_example_1 = [[\"BRITE Root1\"],\n [\" branch1\"],\n [\" branch2\"],\n [\" Branch3\"],\n [\" BRANCH4\"],\n [\" branch5\"]\n ]\n brite_example_output_1 = {\"vertices\": [\"Root1\", \"branch1\", \"branch2\", \"Branch3\", \"BRANCH4\", \"branch5\"],\n \"edges\": {\"0\": [\"1\"],\n \"1\": [\"2\"],\n \"2\": [\"3\"],\n \"3\": [\"4\"],\n \"4\": [\"5\"],\n \"5\": []\n }\n }\n self.assertEqual(kegg.read_brite(brite_example_1), brite_example_output_1)\n\n # Test parsing and saving of a graph(v,e) in an adjacency list. Tree with one root but two branches.\n brite_example_2 = [[\"BRITE Root1\"],\n [\" branch1\"],\n [\" branch2\"],\n [\" Branch3\"],\n [\" BRANCH4\"],\n [\" branch5\"]\n ]\n brite_example_output_2 = {\"vertices\": [\"Root1\", \"branch1\", \"branch2\", \"Branch3\", \"BRANCH4\", \"branch5\"],\n \"edges\": {\"0\": [\"1\", \"3\"],\n \"1\": [\"2\"],\n \"2\": [],\n \"3\": [\"4\"],\n \"4\": [\"5\"],\n \"5\": []\n }\n }\n self.assertEqual(kegg.read_brite(brite_example_2), brite_example_output_2)\n\n # Test parsing and saving of a graph(v,e) in an adjacency list. Tree with a second root and separate branches\n brite_example_3 = [[\"BRITE Root1\"],\n [\" branch1\"],\n [\" branch2\"],\n [\" Root2\"],\n [\" BRANCH4\"],\n [\" branch5\"]\n ]\n brite_example_output_3 = {\"vertices\": [\"Root1\", \"branch1\", \"branch2\", \"Root2\", \"BRANCH4\", \"branch5\"],\n \"edges\": {\"0\": [\"1\"],\n \"1\": [\"2\"],\n \"2\": [],\n \"3\": [\"4\"],\n \"4\": [\"5\"],\n \"5\": []\n }\n }\n self.assertEqual(kegg.read_brite(brite_example_3), brite_example_output_3)\n\n # Test parsing and saving of a graph(v,e) in an adjacency list. Tree with one root and branch, bu multiple leafs\n brite_example_4 = [[\"BRITE Root1\"],\n [\" branch1\"],\n [\" branch2\"],\n [\" Branch3\"],\n [\" BRANCH4\"],\n [\" branch5\"]\n ]\n brite_example_output_4 = {\"vertices\": [\"Root1\", \"branch1\", \"branch2\", \"Branch3\", \"BRANCH4\", \"branch5\"],\n \"edges\": {\"0\": [\"1\"],\n \"1\": [\"2\", \"3\", \"4\", \"5\"],\n \"2\": [],\n \"3\": [],\n \"4\": [],\n \"5\": []\n }\n }\n self.assertEqual(kegg.read_brite(brite_example_4), brite_example_output_4)\n\n # Test parsing and saving of a graph(v,e) in an adjacency list. Tree with a mix of above testing methods\n brite_example_5 = [[\"BRITE Root1\"],\n [\" branch1\"],\n [\" branch2\"],\n [\" Branch3\"],\n [\" BRANCH4\"],\n [\" branch5\"],\n [\" Branch6\"],\n [\" Branch7\"],\n [\" Branch8\"],\n [\" Branch9\"]\n ]\n brite_example_output_5 = {\"vertices\": [\"Root1\", \"branch1\", \"branch2\", \"Branch3\", \"BRANCH4\", \"branch5\",\n \"Branch6\", \"Branch7\", \"Branch8\", \"Branch9\"],\n \"edges\": {\"0\": [\"1\"],\n \"1\": [\"2\", \"8\"],\n \"2\": [\"3\"],\n \"3\": [\"4\", \"6\", \"7\"],\n \"4\": [\"5\"],\n \"5\": [],\n \"6\": [],\n \"7\": [],\n \"8\": [\"9\"],\n \"9\": []\n }\n }\n self.assertEqual(kegg.read_brite(brite_example_5), brite_example_output_5)\n\n\nif __name__ == '__main__':\n unittest.main()",
"id": "10626070",
"language": "Python",
"matching_score": 0.7010523676872253,
"max_stars_count": 1,
"path": "tests/test_kegg.py"
},
{
"content": "#!/usr/bin/env python3\n\nimport dbxref.resolver\nimport requests\nimport logging\nimport json\nimport argparse\nimport re\nimport lxml.html as HTML\n\nlogger = logging.getLogger(__name__)\n\ndef main():\n parser = argparse.ArgumentParser(description='Retrieve enzyme text documents for dbxrefs and convert them into json')\n parser.add_argument('--basic', '-b', action='store_true', help='Include id, definition, name and synonyms')\n parser.add_argument('--references', '-r', action='store_true', help='Include id, uniprot dbxrefs')\n parser.add_argument('dbxrefs', nargs=argparse.REMAINDER)\n args = parser.parse_args()\n\n # Enable all options by default if they are not set\n if not args.basic and not args.references:\n args.basic = True\n args.references = True\n\n dbxrefs = dbxref.resolver.convert_to_dbxrefs(args.dbxrefs)\n\n documents = retrieve(dbxrefs, basic=args.basic, references=args.references)\n print(json.dumps(documents))\n\ndef retrieve(dbxrefs, basic=True, references=True):\n \"\"\"Retrieve the data for the dbxrefs and return a list\"\"\"\n resolved = dbxref.resolver.resolve(dbxrefs, check_existence=False)\n documents = []\n for entry in resolved:\n txt_url = entry['locations']['text'][0]\n logger.debug('URL: %s', txt_url)\n r = requests.get(txt_url)\n logger.debug('Content: %s', r.text)\n try:\n # We expect a plain text document\n # check if the document returned is a html document\n # if it is something went from and we assume that\n # it is a error page.\n ls = r.text.replace('\\n', ' ')\n html = HTML.document_fromstring(ls).head.text_content()\n # when everything is fine an exception was thrown for\n # the last line\n output = {'id': entry['dbxref']}\n output['status'] = 'not found'\n documents.append(output)\n except:\n retrieved_entry = parse_flat_file(r.text)\n retrieved_entry['id'] = entry['dbxref']\n documents.append(retrieved_entry)\n return documents\n\n\ndef parse_flat_file(text): \n lines = text.split('\\n')\n\n comment = \"\"\n reaction = \"\"\n output = {}\n refs = []\n for line in lines:\n line_elements = line.strip().split(' ')\n if line_elements[0] == 'DE':\n output['name'] = line_elements[1]\n if line_elements[0] == 'AN':\n if 'alternative_names' in output:\n output['alternative_names'].append(line_elements[1])\n else:\n output['alternative_names'] = [line_elements[1]]\n if line_elements[0] == 'CA':\n if re.match(re.compile('^\\(\\d+\\) '), line_elements[1]):\n if len(reaction) == 0:\n reaction += line_elements[1][line_elements[1].find(' ')+1:]\n else:\n if 'reaction_catalyzed' in output:\n output['reaction_catalyzed'].append(reaction)\n else:\n output['reaction_catalyzed'] = [reaction]\n reaction = line_elements[1][line_elements[1].find(' ')+1:]\n else:\n if len(reaction) == 0:\n reaction = line_elements[1]\n else:\n reaction = reaction + \" \" + line_elements[1]\n if line_elements[0] == 'CF':\n if 'cofactors' in output:\n output['cofactors'].append(line_elements[1])\n else:\n output['cofactors'] = [line_elements[1]]\n if line_elements[0] == 'CC':\n if \"-!-\" in line_elements[1]:\n if len(comment) == 0:\n comment += line_elements[1][4:]\n else:\n if 'comments' in output:\n output['comments'].append(comment)\n else:\n output['comments'] = [comment]\n comment = line_elements[1][4:]\n else:\n comment += line_elements[2]\n if line_elements[0] == 'PR':\n link = line_elements[1].replace(';', '').split()\n if 'prosite' in output:\n output['prosite'].append(link[1])\n else:\n output['prosite'] = [link[1]]\n if line_elements[0] == 'DR':\n for i in range(1, len(line_elements)):\n for e in line_elements[i].split('; '):\n if len(e) > 1:\n l = e.split(', ')\n l[1] = l[1].replace(' ', '')\n l[1] = l[1].replace(';', '')\n refs.append('UniProtKB/Swiss-Prot:' + l[0])\n output['dbxrefs'] = refs\n if len(reaction) > 0:\n if 'reaction_catalyzed' in output:\n output['reaction_catalyzed'].append(reaction)\n else:\n output['reaction_catalyzed'] = [reaction]\n if len(comment) > 0:\n if 'comments' in output:\n output['comments'].append(comment)\n else:\n output['comments'] = [comment]\n return output\n\n\ndef read_basic(d):\n out = {}\n definition = {}\n if 'message' in d:\n out['message'] = d['message']\n if 'name' in d:\n out['name'] = d['name']\n if 'alternative_names' in d:\n out['synonyms'] = d.pop('alternative_names')\n if 'reaction_catalyzed' in d:\n definition['reaction_catalyzed'] = d['reaction_catalyzed']\n if 'cofactors' in d:\n definition['cofactors'] = d['cofactors']\n if 'comments' in d:\n definition['comments'] = d['comments']\n if len(definition) == 1:\n out['definition'] = definition[0]\n elif len(definition) > 1:\n out['definition'] = definition\n return (out)\n\ndef format_output(d, basic, references):\n out = {'id': d['dbxref']}\n if basic:\n out.update(read_basic(d))\n if references:\n out['dbxrefs'] = d['dbxrefs']\n if not basic and not references:\n out.update(read_basic(d))\n if 'dbxrefs' in d:\n out['dbxrefs'] = d['dbxrefs']\n return (out)\n\nif __name__ == '__main__':\n main()\n",
"id": "7255765",
"language": "Python",
"matching_score": 2.685051918029785,
"max_stars_count": 1,
"path": "dbxref/retrieve/enzyme.py"
},
{
"content": "#!/usr/bin/env python3\n\nimport dbxref.resolver\nimport requests\nimport logging\nimport json\nimport argparse\nimport lxml.html as HTML\n\nlogger = logging.getLogger(__name__)\n\ndef main():\n parser = argparse.ArgumentParser(description='Retrieve sequence ontology csv documents for dbxrefs and convert them into json')\n parser.add_argument('--basic', '-b', action='store_true', help='Include id, definition, name and synonyms')\n parser.add_argument('--relations', '-r', action='store_true', help='Include id, parents and children')\n parser.add_argument('dbxrefs', nargs=argparse.REMAINDER)\n args = parser.parse_args()\n\n if not (args.basic or args.relations):\n args.basic = True\n args.relations = False\n\n dbxrefs = dbxref.resolver.convert_to_dbxrefs(args.dbxrefs)\n\n documents = retrieve(dbxrefs, basic=args.basic, relations=args.relations)\n print(json.dumps(documents))\n\ndef retrieve(dbxrefs, basic=True, relations=False):\n resolved = dbxref.resolver.resolve(dbxrefs, check_existence=False)\n documents = []\n for entry in resolved:\n obo_url = entry['locations']['obo'][0]\n logger.debug('URL: %s', obo_url)\n r = requests.get(obo_url)\n logger.debug('Content: %s', r.text)\n lines = r.text.strip().split('\\n')\n elements = []\n output = {}\n d = {}\n for line in lines:\n if line == '[Term]' and len(elements) > 0:\n d = resolve_elements(elements)\n if 'id' in d and d['id'] == entry['dbxref']:\n output = format_output(d)\n else:\n d = {}\n elements = []\n else:\n elements.append(line.strip())\n output = {'id': entry['dbxref']}\n html = HTML.document_fromstring(r.text.replace('\\n', ' '))\n if len(html) > 1:\n output['message'] = 'an error occurred'\n if html.head.text_content() == ' 500 Internal Server Error ':\n output['message'] = '500 Internal Server Error; probably invalid ID'\n else:\n d = resolve_elements(elements)\n if basic:\n output.update(format_output(d))\n if relations:\n output['relations'] = resolve_relations(entry)\n documents.append(output)\n return documents\n\ndef resolve_relations(entry):\n tsv_url = entry['locations']['tsv'][0]\n r = requests.get(tsv_url)\n lines = r.text.strip().split('\\n')\n lines[0] = lines[0].split('\\t')\n lines[1] = lines[1].split('\\t')\n dic = {'parents': []}\n if lines[1][3] != '':\n dic['parents'] = lines[1][3].split(',')\n if len(lines[1]) == 5:\n dic['children'] = lines[1][4].split(',')\n else:\n dic['children'] = []\n return (dic)\n\ndef resolve_elements(es):\n dict = {}\n for element in es:\n if len(element) > 0:\n if element.split(': ')[0] in dict:\n dict[element.split(': ')[0]].append(element.split(': ')[1])\n else:\n dict[element.split(': ')[0]] = [element.split(': ')[1]]\n for key in dict.keys():\n if key != 'synonym' and len(dict[key]) == 1:\n dict[key] = dict[key][0]\n return (dict)\n\ndef format_output(d):\n out = {}\n if 'def' in d:\n de = d['def'].split('\" ')\n de = de[0].replace('\"', '')\n de = de.replace('\\\\', '')\n out['definition'] = de\n else:\n out['definition'] = \"\"\n if 'name' in d:\n out['name'] = d['name'].replace('_', ' ')\n else:\n out['name'] = \"\"\n if 'namespace' in d:\n out['namespace'] = d['namespace']\n else:\n out['namespace'] = \"\"\n if 'synonym' in d:\n out['synonyms'] = []\n for synonym in d['synonym']:\n sy = synonym.split('\" ')\n sy[0] = sy[0].replace('\\\\', '')\n sy[0] = sy[0].replace('\"', '')\n sy[1] = sy[1].replace('[', '')\n sy[1] = sy[1].replace(']', '')\n sy[1] = sy[1].replace(' ', '')\n out['synonyms'].append({'name': sy[0], 'type': sy[1].lower()})\n return out\n\nif __name__ == \"__main__\":\n main()\n",
"id": "3987951",
"language": "Python",
"matching_score": 3.7864174842834473,
"max_stars_count": 1,
"path": "dbxref/retrieve/sequence_ontology.py"
},
{
"content": "#!/usr/bin/env python3\nimport dbxref.resolver\nimport requests\nimport logging\nimport json\nimport argparse\n\nlogger = logging.getLogger(__name__)\n\ndef main():\n parser = argparse.ArgumentParser(description='Retrieve gene ontology documents for dbxrefs and convert them into json')\n parser.add_argument('--basic', '-b', action='store_true', help='Include id, definition, name and synonyms')\n parser.add_argument('--relations', '-r', action='store_true', help='Include id, parents and children')\n parser.add_argument('dbxrefs', nargs=argparse.REMAINDER)\n args = parser.parse_args()\n if not args.basic and not args.relations:\n args.basic = True\n args.relations = False\n dbxrefs = dbxref.resolver.convert_to_dbxrefs(args.dbxrefs)\n\n documents = retrieve(dbxrefs, basic=args.basic, relations=args.relations)\n print(json.dumps(documents))\n\ndef retrieve(dbxrefs, basic=True, relations=False):\n resolved = dbxref.resolver.resolve(dbxrefs, check_existence=False)\n documents = []\n for entry in resolved:\n json_url = entry['locations']['json'][0]\n logger.debug('URL: %s', json_url)\n r = requests.get(json_url)\n logger.debug('Content: %s', r.text)\n d = json.loads(r.text)\n output = {'id': entry['dbxref']}\n if 'messages' in d:\n output['message'] = '; '.join(d['messages'])\n else:\n if len(d['results']) > 0:\n if basic:\n output.update(read_basic(d))\n if relations:\n output.update(read_relations(d))\n else: \n output['message'] = \"no results found, probably invalid ID\"\n documents.append(output)\n return documents\n\ndef read_basic(d):\n out = {'definition': d['results'][0]['definition']['text'], 'synonyms': []}\n out['name'] = d['results'][0]['name']\n if 'aspect' in d['results'][0]:\n out['aspect'] = d['results'][0]['aspect']\n if 'synonyms' in d['results'][0]:\n out['synonyms'] = d['results'][0]['synonyms']\n return (out)\n\ndef read_relations(d):\n out = {'relations': {'children': [], 'parents': []}}\n if 'children' in d['results'][0]:\n out['relations']['children'] = d['results'][0]['children']\n for child in out['relations']['children']:\n child['type'] = child.pop('relation')\n if 'history' in d['results'][0]:\n out['relations']['parents'] = parse_history(d['results'][0]['history'])\n return (out)\n\ndef parse_history(h):\n out = []\n for history in reversed(h):\n if history['category'] == \"RELATION\":\n if history['action'] == \"Updated\" or history['action'] == \"Added\":\n out.append(history)\n if history['action'] == \"Deleted\":\n for i in reversed(range(len(out))):\n if out[i]['text'] == history['text']:\n del out[i]\n break\n for i in range(len(out)):\n out[i] = parse_text(out[i]['text'])\n return (out)\n\ndef parse_text(t):\n words = t.split(' ')\n type = ''\n out = {}\n for word in words:\n if 'GO:' in word:\n out['id'] = word\n break\n else:\n if type == '':\n type = word\n else:\n type += \"_\" + word\n out['type'] = type\n return (out)\n\nif __name__ == '__main__':\n main()\n",
"id": "2418374",
"language": "Python",
"matching_score": 1.947119951248169,
"max_stars_count": 1,
"path": "dbxref/retrieve/gene_ontology.py"
},
{
"content": "#!/usr/bin/env python3\n\nimport dbxref.resolver\nimport requests\nimport logging\nimport json\nimport argparse\n\nlogger = logging.getLogger(__name__)\n\ndef main():\n \"\"\"For script usage of this retriever\"\"\"\n parser = argparse.ArgumentParser(description='Retrieve taxonomy xml documents for dbxrefs and convert them into json')\n parser.add_argument('--basic', '-b', action='store_true', help='Include dbxref, scientificName, commonName, lineage and rank')\n parser.add_argument('--geneticcodes', '-g', action='store_true', help='Include geneticCode and mitochondrialGeneticCode')\n parser.add_argument('dbxrefs', nargs=argparse.REMAINDER)\n args = parser.parse_args()\n\n # set default values\n if not args.basic and not args.geneticcodes:\n args.basic = True\n args.geneticcodes = True\n\n documents = retrieve(args.dbxrefs, basic=args.basic, geneticcodes=args.geneticcodes)\n print (json.dumps(documents))\n\ndef retrieve(dbxrefs, basic=True, geneticcodes=True):\n \"\"\"Retrieve the data for the dbxrefs and return a list\"\"\"\n resolved = dbxref.resolver.resolve(dbxrefs, check_existence=False)\n documents = []\n for entry in resolved:\n json_url = entry['locations']['json'][0]\n logger.debug('URL: %s', json_url)\n r = requests.get(json_url)\n logger.debug('Content: %s', r.text)\n output = {'id': entry['dbxref']}\n d = {}\n try:\n d = json.loads(r.text)\n except:\n pass \n if len(d) > 0:\n if basic:\n output.update(read_basic(d))\n if geneticcodes:\n output.update(read_geneticCodes(d))\n else:\n output['message'] = \"An error occurred! probably invalid ID\"\n documents.append(output)\n return documents\n\ndef read_basic(d):\n out = {}\n if 'scientificName' in d:\n out['scientificName'] = d['scientificName']\n if 'commonName' in d:\n out['commonName'] = d['commonName']\n if 'lineage' in d:\n # the lineage string contains a ';' separated list, that is converted here\n # into a true list\n lineage_list = d['lineage'].split(\"; \")\n if not lineage_list[-1].strip():\n lineage_list = lineage_list[:-1]\n out['lineage'] = lineage_list\n if 'rank' in d:\n out['rank'] = d['rank']\n return (out)\n\ndef read_geneticCodes(d):\n out = {'geneticCodes': {}}\n if 'geneticCode' in d:\n out['geneticCodes']['geneticCode'] = d['geneticCode']\n if 'mitochondrialGeneticCode' in d:\n out['geneticCodes']['mitochondrialGeneticCode'] = d['mitochondrialGeneticCode']\n return (out)\n\nif __name__ == '__main__':\n main()\n",
"id": "1298551",
"language": "Python",
"matching_score": 1.5741220712661743,
"max_stars_count": 1,
"path": "dbxref/retrieve/taxonomy.py"
},
{
"content": "#!/usr/bin/env python3\n\nimport dbxref.resolver\nimport requests\nimport logging\nimport json\nimport argparse\n\nlogger = logging.getLogger(__name__)\n\n\ndef main():\n \"\"\"main()method for script usage\"\"\"\n parser = argparse.ArgumentParser(description=\"Retrieve HAMAP text documents and convert them into dbxref json \"\n \"format.\")\n parser.add_argument(\"--basics\", \"-b\", action=\"store_true\", help=\"Include basic information such as name, type, \"\n \"dbxref, definition and dates\")\n parser.add_argument(\"--matrix\", \"-m\", action=\"store_true\", help=\"Include matrix data\")\n parser.add_argument(\"dbxrefs\", nargs=argparse.REMAINDER)\n args = parser.parse_args()\n\n dbxrefs = dbxref.resolver.convert_to_dbxrefs(args.dbxrefs)\n documents = retrieve(dbxrefs, basics=args.basics, matrix=args.matrix)\n print(json.dumps(documents, sort_keys=True, indent=4))\n\n\ndef retrieve(dbxrefs, matrix=False, basics=True):\n \"\"\"Retrieve text document from expasy/hamap api and parse into json format.\"\"\"\n # example list input:\n # [\n # 'ID 16SrRNA_methyltr_A; MATRIX.',\n # 'AC MF_00607;',\n # 'DT 28-FEB-2005 CREATED; 10-MAY-2017 DATA UPDATE; 01-DEC-2013 INFO UPDATE.',\n # 'DE Ribosomal RNA small subunit methyltransferase A [rsmA].',\n # 'CC /VERSION=10;',\n # 'MA /GENERAL_SPEC: ALPHABET='ACDEFGHIKLMNPQRSTVWY'; LENGTH=311; LOG_BASE=1.071779; P0=0.9972;\n # 'MA P= 7.552363, 1.698108, 5.303439, 6.320015, 4.078187, 6.844419, 2.240667, 5.731561,'\n # 5.941916, 9.343274, 2.356961, 4.531310, 4.927747, 4.024831, 5.158416, 7.224652, 5.747474,\n # 6.524775, 1.251734, 3.199681;',\n # 'MA /DISJOINT: DEFINITION=PROTECT; N1=6; N2=306;'\n # ]\n #\n # example json output:\n # [\n # {\n # \"dates\": {\n # \"created\": \"28-FEB-2005\",\n # \"last_data_update\": \"10-MAY-2017\",\n # \"last_info_update\": \"01-DEC-2013\"\n # },\n # \"dbxref\": \"HM:MF_00607;\",\n # \"definition\": \"Ribosomal RNA small subunit methyltransferase A [rsmA].\",\n # \"matrix\": [\n # \"/GENERAL_SPEC: ALPHABET='ACDEFGHIKLMNPQRSTVWY'; LENGTH=311; LOG_BASE=1.071779; P0=0.9972;\",\n # \"P= 7.552363, 1.698108, 5.303439, 6.320015, 4.078187, 6.844419, 2.240667, 5.731561,\n # 5.941916, 9.343274, 2.356961, 4.531310, 4.927747, 4.024831, 5.158416, 7.224652,\n # 5.747474, 6.524775, 1.251734, 3.199681;\",\n # ],\n # \"name\": \"16SrRNA_methyltr_A\",\n # \"type\": \"MATRIX\"\n # }\n # ]\n\n resolved = dbxref.resolver.resolve(dbxrefs, check_existence=False)\n documents = []\n for entry in resolved:\n text_url = entry['locations']['text'][0]\n logger.debug('URL: %s', text_url)\n r = requests.get(text_url)\n logger.debug('Content: %s', r.text)\n lines = r.text.strip().split('\\n')\n output = {\"id\": entry[\"dbxref\"]}\n matrix_list = []\n for line in lines:\n if basics:\n try:\n if line.startswith(\"ID\"):\n output.update({\"name\": line[3:].split(\";\", 1)[0].strip(),\n \"type\": line[3:].split(\";\", 1)[1].replace(\".\", \"\").strip()\n })\n if line.startswith(\"AC\"):\n output.update({\"dbxref\": \"HM:\" + line[3:].strip()})\n if line.startswith(\"DE\"):\n output.update({\"definition\": line[3:].strip()})\n if line.startswith(\"DT\"):\n output.update({\"dates\": read_date(line)})\n except RuntimeError:\n logger.warning(\"Basic information were not or only partly available.\")\n raise\n if matrix:\n try:\n if line.startswith(\"MA\") and matrix:\n matrix_list.append(line.replace(\"MA\", \"\").strip())\n except RuntimeError:\n logger.warning(\"Matrix was not available.\")\n raise\n if matrix and matrix_list:\n try:\n output.update({\"matrix\": matrix_list})\n except RuntimeError:\n logger.warning(\"An error occurred regarding the matrix.\")\n raise\n documents.append(output)\n return documents\n\n\ndef read_date(line):\n \"\"\"Function that reads the lines given and parses date of creation and last updates.\"\"\"\n # example string input:\n #\n # DT 28-FEB-2005 CREATED; 10-MAY-2017 DATA UPDATE; 01-DEC-2013 INFO UPDATE.\n #\n # example dictionary output:\n # dates: {\n # \"created\": \"28-FEB-2005\",\n # \"last_data_update\": \"10-MAY-2017\",\n # \"last_info_update\": \"01-DEC-2013\"\n # }\n\n dates = line.split(\";\")\n dates_dic = {}\n for date in dates:\n if \"CREATED\" in date:\n dates_dic.update({\"created\": date.replace(\"DT\", \" \").replace(\"CREATED\", \" \").strip()})\n if \"DATA UPDATE\" in date:\n dates_dic.update({\"last_data_update\": date.split(\"DATA\", 1)[0].strip()})\n if \"INFO UPDATE\" in date:\n dates_dic.update({\"last_info_update\": date.split(\"INFO\", 1)[0].strip()})\n return dates_dic\n\n\nif __name__ == \"__main__\":\n main()\n",
"id": "11597498",
"language": "Python",
"matching_score": 1.8033418655395508,
"max_stars_count": 1,
"path": "dbxref/retrieve/hamap.py"
},
{
"content": "#!/usr/bin/env python3\n\nimport unittest\nfrom dbxref.retrieve import hamap\n\n\nclass TestHAMAP(unittest.TestCase):\n\n def test_output(self):\n \"\"\"test if HAMAP retriever gives any output, ergo functions in any way\"\"\"\n documents = hamap.retrieve([{\"db\": \"HAMAP\", \"id\": \"HM:MF_00607\"}], matrix=True)\n\n self.assertTrue(documents)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"id": "11718015",
"language": "Python",
"matching_score": 0.9766349792480469,
"max_stars_count": 1,
"path": "tests/test_hamap.py"
},
{
"content": "import unittest\nfrom dbxref.retrieve import pubmed\n\n\nclass TestPubmed(unittest.TestCase):\n\n def test_output(self):\n documents = pubmed.retrieve([{\"db\": \"Pubmed\", \"id\": \"19393038\"}], basics=True)\n self.assertTrue(documents)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"id": "7932596",
"language": "Python",
"matching_score": 1.3688781261444092,
"max_stars_count": 1,
"path": "tests/test_pubmed.py"
},
{
"content": "import unittest\nfrom dbxref.retrieve import rfam\n\n\nclass TestRfam(unittest.TestCase):\n\n def test_output(self):\n documents = rfam.retrieve([{\"db\": \"Rfam\", \"id\": \"RF03094\"}], basics=True, references=True)\n self.assertTrue(documents)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"id": "1030669",
"language": "Python",
"matching_score": 2.947979688644409,
"max_stars_count": 1,
"path": "tests/test_rfam.py"
},
{
"content": "import unittest\nfrom dbxref.retrieve import gi\n\n\nclass TestGI(unittest.TestCase):\n\n def test_output(self):\n documents = gi.retrieve([{\"db\": \"GI\", \"id\": \"P0ABT0\"}], basics=True, dbsource=True, references=True)\n self.assertTrue(documents)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"id": "2310800",
"language": "Python",
"matching_score": 1.1359976530075073,
"max_stars_count": 1,
"path": "tests/test_gi.py"
},
{
"content": "import unittest\nfrom dbxref.retrieve import refseq\n\n\nclass TestRefSeq(unittest.TestCase):\n\n def test_output(self):\n \"\"\"Test if refseq.py gives any output\"\"\"\n documents = refseq.retrieve([{\"db\": \"RefSeq\", \"id\": \"3269\"}], basics=True, taxonomy=True,\n references=True, source_db=True, features_table=True)\n self.assertTrue(documents)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"id": "12630402",
"language": "Python",
"matching_score": 0.47972941398620605,
"max_stars_count": 1,
"path": "tests/test_refseq.py"
},
{
"content": "import unittest\nfrom dbxref import config\n\nclass TestConfig(unittest.TestCase):\n\n def test_load_providers_works(self):\n self.assertNotEqual(config.load_providers(), [])\n\n def test_index_providers(self):\n data = [{'name': 'test', 'prefixes':['a', 'b']}]\n self.assertEqual(config.index_providers(data), {'a': data[0], 'b': data[0]})\n\n def test_normalize_index(self):\n index = {'A': 'some value', 'b': 'some other value'}\n self.assertEqual(config.normalize_index(index), {'a' : 'some value', 'b':'some other value'})\n\n def test_has_provider(self):\n index = config.normalize_index({'A': 'some value', 'b': 'some other value'})\n self.assertTrue(config._has_provider(index, 'B'))\n self.assertTrue(config._has_provider(index, 'a'))\n",
"id": "11760204",
"language": "Python",
"matching_score": 2.4901576042175293,
"max_stars_count": 1,
"path": "tests/test_config.py"
},
{
"content": "def get_providers_path():\n import pkg_resources\n return pkg_resources.resource_filename('dbxref', 'providers.yaml')\n\ndef load_providers():\n return _load_providers(get_providers_path())\n\ndef _load_providers(path):\n import yaml\n data = []\n with open(path) as data_file:\n data = yaml.load(data_file, Loader=yaml.Loader)\n return data\n\ndef load_indexed_providers():\n return normalize_index(index_providers(load_providers()))\n\ndef index_providers(providers):\n index = {}\n for p in providers:\n for db in p['prefixes']:\n index[db] = p\n return index\n\ndef normalize_index(index):\n 'create a new index with lowercase keys'\n return {k.lower():v for (k,v) in index.items()}\n\ndef has_provider(provider):\n return _has_provider(load_indexed_providers(), provider)\n\ndef _has_provider(providers, provider):\n return provider.lower() in providers\n\ndef get_provider(provider):\n return load_indexed_providers()[provider.lower()]\n",
"id": "4326935",
"language": "Python",
"matching_score": 2.179596424102783,
"max_stars_count": 1,
"path": "dbxref/config.py"
}
] | 2.179596 |
npatellumeta | [
{
"content": "# Copyright 2018 BlueCat Networks (USA) Inc. and its affiliates\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# By: <NAME> (<EMAIL>)\n# <NAME> (<EMAIL>)\n# Date: 06-09-2018\n# Gateway Version: 18.9.1\n# Description: This workflow will provide access to a REST based API for Gateway.\n# Once imported, documentation for the various end points available can\n# be viewed by navigating to /api/v1/.\n\nfrom flask import g, jsonify\nfrom flask_restplus import fields, reqparse, Resource\n\nfrom bluecat import util\nimport config.default_config as config\nfrom .configuration_page import config_doc, config_defaults, entity_parser, entity_model, entity_return_model\nfrom main_app import api\n\n\nview_default_ns = api.namespace('views', path='/views/', description='View operations')\nview_ns = api.namespace('views', path='/configurations/<string:configuration>/views/', description='View operations')\n\nzone_default_root_ns = api.namespace('zones', description='Zone operations')\nzone_root_ns = api.namespace(\n 'zones',\n path='/configurations/<string:configuration>/views/<string:view>/zones/',\n description='Zone operations',\n)\n\nzone_default_ns = api.namespace('zones', description='Zone operations')\nzone_ns = api.namespace(\n 'zones',\n path='/configurations/<string:configuration>/views/<string:view>/zones/',\n description='Zone operations',\n)\n\nhost_default_ns = api.namespace('host_records', description='Host Record operations')\nhost_ns = api.namespace(\n 'host_records',\n path='/configurations/<string:configuration>/views/<string:view>/host_records/',\n description='Host Record operations',\n)\n\nexternal_host_default_ns = api.namespace('host_records', description='External Host Record operations')\nexternal_host_ns = api.namespace(\n 'host_records',\n path='/configurations/<string:configuration>/views/<string:view>/external_host_records/',\n description='External Host Record operations',\n)\n\nhost_zone_default_ns = api.namespace(\n 'host_records',\n path='/zones/<path:zone>/host_records',\n description='Host Record operations',\n)\nhost_zone_ns = api.namespace(\n 'host_records',\n path='/configurations/<string:configuration>/views/<string:view>/zones/<path:zone>/host_records/',\n description='Host Record operations',\n)\n\ncname_default_ns = api.namespace('cname_records', description='CName Record operations')\ncname_ns = api.namespace(\n 'cname_records',\n path='/configurations/<string:configuration>/views/<string:view>/cname_records/',\n description='CName Record operations',\n)\n\ncname_zone_default_ns = api.namespace(\n 'cname_records',\n path='/zones/<path:zone>/cname_records',\n description='CName Record operations',\n)\ncname_zone_ns = api.namespace(\n 'cname_records',\n path='/configurations/<string:configuration>/views/<string:view>/zones/<path:zone>/cname_records/',\n description='CName Record operations',\n)\n\nview_doc = dict(config_doc, view={'in': 'path', 'description': 'View name'})\nzone_doc = dict(view_doc, zone={'in': 'path', 'description': 'Recursive Zone name and subzone name'})\nabsolute_name_doc = {'absolute_name': {'in': 'path', 'description': 'The FQDN of the record'}}\nhost_doc = dict(view_doc, **absolute_name_doc)\n\nhost_parser = reqparse.RequestParser()\nhost_parser.add_argument('absolute_name', location=\"json\", required=True, help='The FQDN of the record')\nhost_parser.add_argument(\n 'ip4_address',\n location=\"json\",\n required=True,\n help='The IPv4 addresses associated with the host record',\n)\nhost_parser.add_argument('ttl', type=int, location=\"json\", help='The TTL of the record')\nhost_parser.add_argument('properties', location=\"json\", help='The properties of the record')\n\nhost_patch_parser = host_parser.copy()\nhost_patch_parser.replace_argument(\n 'ip4_address',\n location=\"json\",\n required=False,\n help='The IPv4 addresses associated with the host record',\n)\nhost_patch_parser.remove_argument('absolute_name')\n\ncname_parser = host_parser.copy()\ncname_parser.remove_argument('ip4_address')\ncname_parser.add_argument('linked_record', location=\"json\", help='The name of the record to which this alias will link')\n\ncname_patch_parser = cname_parser.copy()\ncname_patch_parser.remove_argument('absolute_name')\n\nzone_model = api.clone(\n 'zones',\n entity_model\n)\n\nexternal_host_parser = host_parser.copy()\nexternal_host_parser.remove_argument('ip4_address')\nexternal_host_parser.remove_argument('properties')\nexternal_host_parser.remove_argument('ttl')\n\n\nexternal_host_model = api.model(\n 'external_host_records',\n {\n 'absolute_name': fields.String(required=True, description='The FQDN of the external host record')\n },\n)\n\nhost_model = api.model(\n 'host_records',\n {\n 'absolute_name': fields.String(required=True, description='The FQDN of the host record'),\n 'ip4_address': fields.String(description='The IPv4 addresses associated with the host record'),\n 'ttl': fields.Integer(description='The TTL of the host record'),\n 'properties': fields.String(description='The properties of the host record', default='attribute=value|'),\n },\n)\n\nhost_patch_model = api.model(\n 'host_records_patch',\n {\n 'ip4_address': fields.String(description='The IPv4 addresses associated with the host record'),\n 'ttl': fields.Integer(description='The TTL of the host record'),\n 'properties': fields.String(description='The properties of the host record', default='attribute=value|'),\n },\n)\n\ncname_model = api.model(\n 'cname_records',\n {\n 'absolute_name': fields.String(required=True, description='The FQDN of the CName record'),\n 'linked_record': fields.String(\n required=True,\n description='The name of the record to which this alias will link',\n ),\n 'ttl': fields.Integer(description='The TTL of the CName record'),\n 'properties': fields.String(description='The properties of the CName record', default='attribute=value|'),\n },\n)\n\ncname_patch_model = api.model(\n 'cname_records_patch',\n {\n 'linked_record': fields.String(description='The name of the record to which this alias will link'),\n 'ttl': fields.Integer(description='The TTL of the CName record'),\n 'properties': fields.String(description='The properties of the CName record', default='attribute=value|'),\n },\n)\n\ndns_defaults = {'configuration': config.default_configuration, 'view': config.default_view}\n\n\n\n@view_ns.route('/<string:view>/')\n@view_default_ns.route('/<string:view>/', defaults=config_defaults)\n@view_ns.doc(params=view_doc)\n@view_ns.response(200, 'View found.', model=entity_return_model)\nclass View(Resource):\n\n @util.rest_workflow_permission_required('rest_page')\n def get(self, configuration, view):\n \"\"\" Get View belonging to default or provided Configuration. \"\"\"\n configuration = g.user.get_api().get_configuration(configuration)\n view = configuration.get_view(view)\n result = view.to_json()\n return result\n\n @util.rest_workflow_permission_required('rest_page')\n def delete(self, configuration, view):\n \"\"\" Delete View belonging to default or provided Configuration. \"\"\"\n configuration = g.user.get_api().get_configuration(configuration)\n view = configuration.get_view(view)\n view.delete()\n return '', 204\n\n\n@zone_ns.route('/<path:zone>/')\n@zone_default_ns.route('/<path:zone>/', defaults=dns_defaults)\n@zone_ns.doc(params=zone_doc)\nclass Zone(Resource):\n\n @util.rest_workflow_permission_required('rest_page')\n @zone_ns.response(200, 'Zone found.', model=entity_return_model)\n def get(self, configuration, view, zone):\n \"\"\"\n Get a subzone belonging to default or provided Configuration and View plus Zone hierarchy.\n Subzones can be recursively retrieved by specifying extra \"zones\" parameters.\n Zones should be of the format:\n\n 1. zone_name\n 2. zone_name1/zones/subzone_name2/zones/subzone_name3\n \"\"\"\n configuration = g.user.get_api().get_configuration(configuration)\n zone_parent = configuration.get_view(view)\n zone_hierarchy = zone.split('/zones')\n zone_entity = zone_parent.get_zone(zone_hierarchy[0])\n zone = check_zone_in_path(zone_entity, zone_hierarchy[0], zone_hierarchy[1:], zone_parent)\n if zone is None:\n return 'No matching Zone(s) found', 404\n return zone.to_json()\n\n @util.rest_workflow_permission_required('rest_page')\n def delete(self, configuration, view, zone):\n \"\"\"\n Delete subzone belonging to default or provided Configuration and View plus Zone hierarchy.\n Subzones can be recursively retrieved by specifying extra \"zones\" parameters.\n Zones should be of the format:\n\n 1. zone_name\n 2. zone_name1/zones/subzone_name2/zones/subzone_name3\n \"\"\"\n configuration = g.user.get_api().get_configuration(configuration)\n zone_parent = configuration.get_view(view)\n zone_hierarchy = zone.split('/zones')\n zone_entity = zone_parent.get_zone(zone_hierarchy[0])\n zone = check_zone_in_path(zone_entity, zone_hierarchy[0], zone_hierarchy[1:], zone_parent)\n if zone is None:\n return 'No matching Zone(s) found', 404\n zone.delete()\n return '', 204\n\n\n@zone_root_ns.route('/')\n@zone_ns.route('/<path:zone>/zones/')\n@zone_default_root_ns.route('/', defaults=dns_defaults)\n@zone_default_ns.route('/<path:zone>/zones/', defaults=dns_defaults)\n@zone_ns.doc(params=zone_doc)\nclass ZoneCollection(Resource):\n\n @util.rest_workflow_permission_required('rest_page')\n def get(self, configuration, view, zone=None):\n \"\"\"\n Get all direct subzones belonging to default or provided Configuration and View plus Zone hierarchy.\n Subzones can be recursively retrieved by specifying extra \"zones\" parameters.\n Zones should be of the format:\n\n 1. zone_name\n 2. zone_name1/zones/zone_name2\n \"\"\"\n configuration = g.user.get_api().get_configuration(configuration)\n zone_parent = configuration.get_view(view)\n leaf_zone = zone_parent\n if zone:\n zone_hierarchy = zone.split('/zones')\n zone_entity = zone_parent.get_zone(zone_hierarchy[0])\n leaf_zone = check_zone_in_path(zone_entity, zone_hierarchy[0], zone_hierarchy[1:], zone_parent)\n if leaf_zone is None:\n return 'No matching Zone(s) found', 404\n zones = leaf_zone.get_zones()\n result = [zone_entity.to_json() for zone_entity in zones]\n return jsonify(result)\n\n @util.rest_workflow_permission_required('rest_page')\n @zone_ns.response(200, 'Zone created.', model=entity_return_model)\n @zone_ns.expect(zone_model)\n def post(self, configuration, view, zone):\n \"\"\"\n Create a zone or subzone belonging to default or provided Configuration and View plus Zone hierarchy.\n Subzones can be recursively retrieved by specifying extra \"zones\" parameters.\n Zones should be of the format:\n\n 1. zone_name\n 2. zone_name1/zones/zone_name2\n \"\"\"\n data = entity_parser.parse_args()\n configuration = g.user.get_api().get_configuration(configuration)\n view = configuration.get_view(view)\n zone_parent = view\n zone_hierarchy = zone.split('/zones')\n zone_entity = zone_parent.get_zone(zone_hierarchy[0])\n zone = check_zone_in_path(zone_entity, zone_hierarchy[0], zone_hierarchy[1:], zone_parent)\n if zone is None:\n return 'No matching Zone(s) found', 404\n zone_name = data['name']\n kwargs = util.properties_to_map(data['properties'])\n zone = view.add_zone('%s.%s' % (zone_name, zone.get_full_name()), **kwargs)\n return zone.to_json(), 201\n\n\ndef check_zone_in_path(zone_entity, pre_path, post_path, zone_parent, delimiter='/zones'):\n \"\"\"Because \"/\" characters can be in the zone name, need to check if the \"/\" in the path is part of zone name or not\n \"\"\"\n if not post_path:\n return zone_entity\n\n if zone_entity is not None:\n new_pre_path = post_path[0]\n if new_pre_path.startswith('/'):\n new_pre_path = new_pre_path[1:]\n sub_zone = zone_entity.get_zone(new_pre_path)\n final_result = check_zone_in_path(sub_zone, new_pre_path, post_path[1:], zone_entity)\n if final_result is not None:\n return final_result\n pre_path += delimiter + post_path.pop(0)\n\n zone = zone_parent.get_zone(pre_path)\n if zone is not None:\n final_result = check_zone_in_path(zone, pre_path, post_path, zone_parent)\n if final_result is not None:\n return final_result\n else:\n return None\n\n\n@host_ns.route('/')\n@host_zone_ns.route('/')\n@host_default_ns.route('/', defaults=dns_defaults)\n@host_zone_default_ns.route('/', defaults=dns_defaults)\nclass HostRecordCollection(Resource):\n\n @util.rest_workflow_permission_required('rest_page')\n @host_ns.response(201, 'Host Record successfully created.')\n def get(self, configuration, view, zone=None):\n \"\"\" Get all host records belonging to default or provided Configuration and View plus Zone hierarchy. \"\"\"\n configuration = g.user.get_api().get_configuration(configuration)\n view = configuration.get_view(view)\n zone_parent = view\n zone_hierarchy = zone.split('/zones')\n zone_entity = zone_parent.get_zone(zone_hierarchy[0])\n zone = check_zone_in_path(zone_entity, zone_hierarchy[0], zone_hierarchy[1:], zone_parent)\n\n host_records = zone.get_children_of_type(zone.HostRecord)\n result = [host.to_json() for host in host_records]\n return jsonify(result)\n\n @util.rest_workflow_permission_required('rest_page')\n @host_ns.response(201, 'Host Record successfully created.', model=entity_return_model)\n @host_ns.expect(host_model, validate=True)\n def post(self, configuration, view, zone=None):\n \"\"\" Create a host record belonging to default or provided Configuration and View plus Zone hierarchy. \"\"\"\n data = host_parser.parse_args()\n configuration = g.user.get_api().get_configuration(configuration)\n view = configuration.get_view(view)\n\n if zone is None:\n absolute_name = data['absolute_name']\n else:\n zone_parent = view\n zone_hierarchy = zone.split('/zones')\n zone_entity = zone_parent.get_zone(zone_hierarchy[0])\n zone = check_zone_in_path(zone_entity, zone_hierarchy[0], zone_hierarchy[1:], zone_parent)\n absolute_name = data['absolute_name'] + '.' + zone.get_full_name()\n ip4_address_list = data['ip4_address'].split(',')\n ttl = data.get('ttl', -1)\n properties = data.get('properties', '')\n host_record = view.add_host_record(absolute_name, ip4_address_list, ttl, properties)\n result = host_record.to_json()\n return result, 201\n\n\n@external_host_ns.route('/')\nclass ExternalHostRecordCollection(Resource):\n\n @util.rest_workflow_permission_required('rest_page')\n @external_host_ns.response(201, 'External Host Record successfully created.', model=entity_return_model)\n @external_host_ns.expect(external_host_model, validate=True)\n def post(self, configuration, view):\n \"\"\" Create an external host record belonging to default or provided Configuration and View. \"\"\"\n data = external_host_parser.parse_args()\n configuration = g.user.get_api().get_configuration(configuration)\n view = configuration.get_view(view)\n\n absolute_name = data.get('absolute_name', '')\n external_host_record = view.add_external_host_record(absolute_name)\n result = external_host_record.to_json()\n return result, 201\n\n\n@external_host_ns.route('/<string:absolute_name>/')\n@external_host_ns.doc(params=host_doc)\n@external_host_default_ns.route('/<string:absolute_name>/', defaults=dns_defaults)\n@external_host_default_ns.doc(params=absolute_name_doc)\n@external_host_ns.response(200, 'External Host Record found.', model=entity_return_model)\nclass ExternalHostRecord(Resource):\n\n @util.rest_workflow_permission_required('rest_page')\n def get(self, configuration, view, absolute_name):\n \"\"\" Get specified external host record belonging to default or provided Configuration and View plus Zone hierarchy. \"\"\"\n config = g.user.get_api().get_configuration(configuration)\n view = config.get_view(view)\n\n host_record = view.get_external_host_record(absolute_name)\n if host_record is None:\n return 'No matching External Host Record(s) found', 404\n result = host_record.to_json()\n return jsonify(result)\n\n @util.rest_workflow_permission_required('rest_page')\n def delete(self, configuration, view, absolute_name):\n \"\"\"\n Delete specified external host record belonging to default or provided Configuration and View plus Zone hierarchy.\n \"\"\"\n config = g.user.get_api().get_configuration(configuration)\n view = config.get_view(view)\n\n try:\n host_record = view.get_external_host_record(absolute_name)\n except:\n host_record = None\n if host_record is None:\n return 'No matching External Host Record(s) found', 404\n host_record.delete()\n return '', 204\n\n\n@host_ns.route('/<string:absolute_name>/')\n@host_ns.doc(params=host_doc)\n@host_default_ns.route('/<string:absolute_name>/', defaults=dns_defaults)\n@host_default_ns.doc(params=absolute_name_doc)\n@host_ns.response(200, 'Host Record found.', model=entity_return_model)\nclass HostRecord(Resource):\n\n @util.rest_workflow_permission_required('rest_page')\n def get(self, configuration, view, absolute_name):\n \"\"\" Get specified host record belonging to default or provided Configuration and View plus Zone hierarchy. \"\"\"\n config = g.user.get_api().get_configuration(configuration)\n view = config.get_view(view)\n\n host_record = view.get_host_record(absolute_name)\n if host_record is None:\n return 'No matching Host Record(s) found', 404\n result = host_record.to_json()\n return jsonify(result)\n\n @util.rest_workflow_permission_required('rest_page')\n def delete(self, configuration, view, absolute_name):\n \"\"\"\n Delete specified host record belonging to default or provided Configuration and View plus Zone hierarchy.\n \"\"\"\n config = g.user.get_api().get_configuration(configuration)\n view = config.get_view(view)\n\n host_record = view.get_host_record(absolute_name)\n if host_record is None:\n return 'No matching Host Record(s) found', 404\n host_record.delete()\n return '', 204\n\n @util.rest_workflow_permission_required('rest_page')\n @host_ns.expect(host_patch_model, validate=True)\n def patch(self, configuration, view, absolute_name):\n \"\"\"\n Update specified host record belonging to default or provided Configuration and View plus Zone hierarchy.\n \"\"\"\n data = host_patch_parser.parse_args()\n configuration = g.user.get_api().get_configuration(configuration)\n view = configuration.get_view(view)\n\n absolute_name = data.get('absolute_name', absolute_name)\n host_record = view.get_host_record(absolute_name)\n if host_record is None:\n return 'No matching Host Record(s) found', 404\n if data['properties'] is not None:\n properties = data.get('properties')\n host_record.properties = util.properties_to_map(properties)\n if data['ip4_address'] is not None:\n host_record.set_property('addresses', data['ip4_address'])\n if data['ttl'] is not None:\n host_record.set_property('ttl', str(data.get('ttl')))\n host_record.update()\n result = host_record.to_json()\n return result\n\n\n@cname_zone_ns.route('/')\n@cname_zone_default_ns.route('/', defaults=dns_defaults)\nclass CNameRecordCollection(Resource):\n\n @util.rest_workflow_permission_required('rest_page')\n @cname_ns.response(200, 'Found CName records.')\n def get(self, configuration, view, zone=None):\n \"\"\" Get all cname records belonging to default or provided Configuration and View plus Zone hierarchy. \"\"\"\n configuration = g.user.get_api().get_configuration(configuration)\n view = configuration.get_view(view)\n if zone is None:\n return 'No matching Zone(s) found', 404\n zone_parent = view\n zone_hierarchy = zone.split('/zones')\n zone_entity = zone_parent.get_zone(zone_hierarchy[0])\n zone = check_zone_in_path(zone_entity, zone_hierarchy[0], zone_hierarchy[1:], zone_parent)\n\n host_records = zone.get_children_of_type(zone.AliasRecord)\n result = [host.to_json() for host in host_records]\n return jsonify(result)\n\n @util.rest_workflow_permission_required('rest_page')\n @cname_ns.response(201, 'CName Record successfully created.', model=entity_return_model)\n @cname_ns.expect(cname_model, validate=True)\n def post(self, configuration, view, zone=None):\n \"\"\" Create a cname record belonging to default or provided Configuration and View plus Zone hierarchy. \"\"\"\n data = cname_parser.parse_args()\n configuration = g.user.get_api().get_configuration(configuration)\n view = configuration.get_view(view)\n\n if zone is None:\n absolute_name = data['absolute_name']\n else:\n zone_parent = view\n zone_hierarchy = zone.split('/zones')\n zone_entity = zone_parent.get_zone(zone_hierarchy[0])\n zone = check_zone_in_path(zone_entity, zone_hierarchy[0], zone_hierarchy[1:], zone_parent)\n absolute_name = data['absolute_name'] + '.' + zone.get_full_name()\n ip4_address_list = data['linked_record']\n ttl = data.get('ttl', -1)\n properties = data.get('properties', '')\n cname_record = view.add_alias_record(absolute_name, ip4_address_list, ttl, properties)\n result = cname_record.to_json()\n return result, 201\n\n\n@cname_ns.route('/<string:absolute_name>/')\n@cname_ns.doc(params=host_doc)\n@cname_default_ns.route('/<string:absolute_name>/', defaults=dns_defaults)\n@cname_default_ns.doc(params=absolute_name_doc)\n@cname_ns.response(200, 'CName Record found.', model=entity_return_model)\nclass CNameRecord(Resource):\n\n @util.rest_workflow_permission_required('rest_page')\n def get(self, configuration, view, absolute_name):\n \"\"\" Get specified cname record belonging to default or provided Configuration and View plus Zone hierarchy. \"\"\"\n config = g.user.get_api().get_configuration(configuration)\n view = config.get_view(view)\n\n cname_record = view.get_alias_record(absolute_name)\n if cname_record is None:\n return 'No matching CName Record(s) found', 404\n result = cname_record.to_json()\n return jsonify(result)\n\n @util.rest_workflow_permission_required('rest_page')\n def delete(self, configuration, view, absolute_name):\n \"\"\"\n Delete specified cname record belonging to default or provided Configuration and View plus Zone hierarchy.\n \"\"\"\n config = g.user.get_api().get_configuration(configuration)\n view = config.get_view(view)\n\n cname_record = view.get_alias_record(absolute_name)\n if cname_record is None:\n return 'No matching CName Record(s) found', 404\n cname_record.delete()\n return '', 204\n\n @util.rest_workflow_permission_required('rest_page')\n @cname_ns.expect(cname_patch_model, validate=True)\n def patch(self, configuration, view, absolute_name):\n \"\"\"\n Update specified cname record belonging to default or provided Configuration and View plus Zone hierarchy.\n \"\"\"\n data = cname_patch_parser.parse_args()\n configuration = g.user.get_api().get_configuration(configuration)\n view = configuration.get_view(view)\n\n absolute_name = data.get('absolute_name', absolute_name)\n cname_record = view.get_alias_record(absolute_name)\n if cname_record is None:\n return 'No matching CName Record(s) found', 404\n if data['properties'] is not None:\n properties = data.get('properties')\n cname_record.properties = util.properties_to_map(properties)\n if data['linked_record'] is not None:\n cname_record.set_property('linkedRecordName', data['linked_record'])\n if data['ttl'] is not None:\n cname_record.set_property('ttl', str(data.get('ttl')))\n\n cname_record.update()\n result = cname_record.to_json()\n return result\n\n",
"id": "11708850",
"language": "Python",
"matching_score": 3.909395694732666,
"max_stars_count": 0,
"path": "Community/rest_api/dns_page.py"
},
{
"content": "# Copyright 2018 BlueCat Networks (USA) Inc. and its affiliates\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# By: <NAME> (<EMAIL>)\n# <NAME> (<EMAIL>)\n# Date: 06-09-2018\n# Gateway Version: 18.9.1\n# Description: This workflow will provide access to a REST based API for Gateway.\n# Once imported, documentation for the various end points available can\n# be viewed by navigating to /api/v1/.\n\nfrom flask import g, jsonify\nfrom flask_restplus import fields, reqparse, Resource\n\nfrom bluecat import util\nfrom .configuration_page import config_defaults, entity_return_model\nfrom main_app import api\n\n\nip4_address_root_default_ns = api.namespace('ipv4_addresses', description='IPv4 Address operations')\nip4_address_root_ns = api.namespace(\n 'ipv4_addresses',\n path='/configurations/<string:configuration>/ipv4_networks/',\n description='IPv4 Address operations',\n)\n\nip4_address_default_ns = api.namespace('ipv4_addresses', description='IPv4 Address operations')\nip4_address_ns = api.namespace(\n 'ipv4_addresses',\n path='/configurations/<string:configuration>/ipv4_networks/',\n description='IPv4 Address operations',\n)\n\n\nip4_block_root_default_ns = api.namespace('ipv4_blocks', description='IPv4 Block operations')\nip4_block_root_ns = api.namespace(\n 'ipv4_blocks',\n path='/configurations/<string:configuration>/ipv4_blocks/',\n description='IPv4 Block operations',\n)\n\nip4_block_default_ns = api.namespace('ipv4_blocks', description='IPv4 Block operations')\nip4_block_ns = api.namespace(\n 'ipv4_blocks',\n path='/configurations/<string:configuration>/ipv4_blocks/',\n description='IPv4 Block operations',\n)\n\nip4_network_default_ns = api.namespace('ipv4_networks', description='IPv4 Network operations')\nip4_network_ns = api.namespace(\n 'ipv4_networks',\n path='/configurations/<string:configuration>/ipv4_networks/',\n description='IPv4 Network operations',\n)\n\nip4_network_block_ns = api.namespace('ipv4_networks', path='/ipv4_blocks/', description='IPv4 Network operations')\nip4_network_config_block_ns = api.namespace(\n 'ipv4_networks',\n path='/configurations/<string:configuration>/ipv4_blocks/',\n description='IPv4 Network operations',\n)\n\n\nip4_address_post_model = api.model(\n 'ip4_address_post',\n {\n 'mac_address': fields.String(description='MAC Address value'),\n 'hostinfo': fields.String(\n description='A string containing host information for the address in the following format: '\n 'hostname,viewId,reverseFlag,sameAsZoneFlag'\n ),\n 'action': fields.String(\n description='Desired IP4 address state: MAKE_STATIC / MAKE_RESERVED / MAKE_DHCP_RESERVED'\n ),\n 'properties': fields.String(description='The properties of the IP4 Address', default='attribute=value|'),\n },\n)\n\n\nnetwork_patch_model = api.model(\n 'ipv4_networks_patch',\n {\n 'name': fields.String(description='The name associated with the IP4 Network.'),\n 'properties': fields.String(description='The properties of the IP4 Network', default='attribute=value|'),\n },\n)\n\nnetwork_post_model = api.model(\n 'ipv4_networks_post',\n {\n 'name': fields.String(description='The name associated with the IP4 Network.'),\n 'size': fields.String(\n description='The number of addresses in the network expressed as a power of 2 (i.e. 2, 4, 8, 16, ... 256)',\n default='attribute=value|'\n ),\n 'properties': fields.String(description='The properties of the IP4 Network', default='attribute=value|'),\n },\n)\n\nnetwork_patch_parser = reqparse.RequestParser()\nnetwork_patch_parser.add_argument('name', location=\"json\", help='The name of the network')\nnetwork_patch_parser.add_argument('properties', location=\"json\", help='The properties of the record')\n\nnetwork_post_parser = reqparse.RequestParser()\nnetwork_post_parser.add_argument('name', location=\"json\", help='The name of the network')\nnetwork_post_parser.add_argument('properties', location=\"json\", help='The properties of the network')\nnetwork_post_parser.add_argument(\n 'size',\n location=\"json\",\n help='The number of addresses in the network expressed as a power of 2 (i.e. 2, 4, 8, 16, ... 256)'\n)\n\n\nip4_address_post_parser = reqparse.RequestParser()\nip4_address_post_parser.add_argument('mac_address', location=\"json\", help='The MAC address')\nip4_address_post_parser.add_argument('hostinfo', location=\"json\", help='The hostinfo of the address')\nip4_address_post_parser.add_argument('action', location=\"json\", help='The action for address assignment')\nip4_address_post_parser.add_argument('properties', location=\"json\", help='The properties of the record')\n\n\n@ip4_address_ns.route('/<string:network>/get_next_ip/')\n@ip4_address_default_ns.route('/<string:network>/get_next_ip/', defaults=config_defaults)\n@ip4_address_ns.response(404, 'IPv4 address not found')\nclass IPv4NextIP4Address(Resource):\n\n @util.rest_workflow_permission_required('rest_page')\n @ip4_address_ns.response(201, 'Next IP successfully created.', model=entity_return_model)\n @ip4_address_ns.expect(ip4_address_post_model, validate=True)\n def post(self, configuration, network):\n \"\"\"\n Create the next available IP4 Address\n\n Network can be of the format of network address:\n 1. 10.1.0.0\n\n \"\"\"\n data = ip4_address_post_parser.parse_args()\n mac = data.get('mac_address', '')\n hostinfo = data.get('hostinfo', '')\n action = data.get('action', '')\n properties = data.get('properties', '')\n\n configuration = g.user.get_api().get_configuration(configuration)\n network = configuration.get_ip_range_by_ip(\"IP4Network\", network)\n\n ip = network.assign_next_available_ip4_address(mac, hostinfo, action, properties)\n result = ip.to_json()\n\n return result, 201\n\n\n@ip4_block_ns.route('/<path:block>/get_next_network/')\n@ip4_block_default_ns.route('/<path:block>/get_next_network/', defaults=config_defaults)\n@ip4_block_ns.response(404, 'IPv4 network not found')\nclass IPv4NextNetworkCollection(Resource):\n\n @util.rest_workflow_permission_required('rest_page')\n @ip4_block_ns.response(201, 'Next network successfully created.', model=entity_return_model)\n @ip4_block_ns.expect(network_post_model, validate=True)\n def post(self, configuration, block):\n \"\"\"\n Create the next available IP4 Network\n\n Blocks can be of the format:\n 1. 10.1.0.0/16\n 2. 10.0.0.0/8/ipv4_blocks/10.1.0.0/24\n\n \"\"\"\n data = network_post_parser.parse_args()\n name = data.get('name', '')\n size = data.get('size', '')\n properties = data.get('properties', '')\n range = g.user.get_api().get_configuration(configuration)\n block_hierarchy = []\n if block:\n block_hierarchy = block.split('ipv4_blocks')\n\n for block in block_hierarchy:\n block_cidr = block.strip('/')\n range = range.get_entity_by_cidr(block_cidr, range.IP4Block)\n network = range.get_next_available_ip_range(size, \"IP4Network\", properties)\n network.set_name(name)\n network.update()\n result = network.to_json()\n\n return result, 201\n\n\n@ip4_block_root_ns.route('/')\n@ip4_block_ns.route('/<path:block>/ipv4_blocks/')\n@ip4_block_root_default_ns.route('/', defaults=config_defaults)\n@ip4_block_default_ns.route('/<path:block>/ipv4_blocks/', defaults=config_defaults)\n@ip4_block_ns.response(404, 'IPv4 Blocks not found')\nclass IPv4BlockCollection(Resource):\n\n @util.rest_workflow_permission_required('rest_page')\n def get(self, configuration, block=None):\n \"\"\"\n Get all direct child IPv4 Blocks belonging to default or provided Configuration and Block hierarchy.\n Blocks can be recursively retrieved by specifying extra ipv4_block parameters.\n Blocks can be of the format:\n\n 1. 10.1.0.0/16\n 2. 10.1.0.0/16/ipv4_blocks/10.1.1.0/24/ipv4_blocks/\n \"\"\"\n range = g.user.get_api().get_configuration(configuration)\n block_hierarchy = []\n if block:\n block_hierarchy = block.split('ipv4_blocks')\n\n for block in block_hierarchy:\n block_cidr = block.strip('/')\n range = range.get_entity_by_cidr(block_cidr, range.IP4Block)\n blocks = range.get_ip4_blocks()\n\n result = [block_entity.to_json() for block_entity in blocks]\n return jsonify(result)\n\n\n@ip4_block_ns.route('/<path:block>/')\n@ip4_block_default_ns.route('/<path:block>/', defaults=config_defaults)\n@ip4_block_ns.param('block', 'Recursive')\n@ip4_block_ns.response(404, 'No matching IPv4 Block(s) found')\nclass IPv4Block(Resource):\n\n @util.rest_workflow_permission_required('rest_page')\n def get(self, configuration, block):\n \"\"\"\n Get IPv4 Block belonging to default or provided Configuration and Block hierarchy.\n Blocks can be recursively retrieved by specifying extra ipv4_block parameters.\n Blocks can be of the format:\n\n 1. 10.1.0.0/16\n 2. 10.1.0.0/16/ipv4_blocks/10.1.1.0/24\n \"\"\"\n range = g.user.get_api().get_configuration(configuration)\n block_hierarchy = block.split('ipv4_blocks')\n\n for block in block_hierarchy:\n block_cidr = block.strip('/')\n range = range.get_entity_by_cidr(block_cidr, range.IP4Block)\n\n result = range.to_json()\n return jsonify(result)\n\n\n@ip4_network_config_block_ns.route('/<path:block>/ipv4_networks/')\n@ip4_network_block_ns.route('/<path:block>/ipv4_networks/', defaults=config_defaults)\n@ip4_network_config_block_ns.param('block', 'Recursive')\n@ip4_block_ns.response(404, 'No matching IPv4 Network(s) found')\nclass IPv4NetworkCollection(Resource):\n\n @util.rest_workflow_permission_required('rest_page')\n def get(self, configuration, block):\n \"\"\"\n Get all IPv4 Networks belonging to default or provided Configuration and Block hierarchy.\n Path can be of the format:\n\n 1. ipv4_blocks/10.1.0.0/16/ipv4_blocks/10.1.1.0/24/ipv4_networks/\n \"\"\"\n range = g.user.get_api().get_configuration(configuration)\n block_hierarchy = block.split('ipv4_blocks')\n for block in block_hierarchy:\n block_cidr = block.strip('/')\n\n range = range.get_entity_by_cidr(block_cidr, range.IP4Block)\n networks = range.get_children_of_type(range.IP4Network)\n\n result = [network.to_json() for network in networks]\n return jsonify(result)\n\n\n@ip4_network_ns.route('/<path:network>/')\n@ip4_network_default_ns.route('/<path:network>/', defaults=config_defaults)\n@ip4_network_ns.response(404, 'No matching IPv4 Network(s) found')\nclass IPv4Network(Resource):\n\n @util.rest_workflow_permission_required('rest_page')\n def get(self, configuration, network):\n \"\"\"\n Get IPv4 Network belonging to default or provided Configuration.\n Network can be of the format:\n\n 1. 10.1.1.0\n 2. 10.1.1.0/24\n \"\"\"\n configuration = g.user.get_api().get_configuration(configuration)\n network_ip = network.split('/')[0]\n network_range = configuration.get_ip_range_by_ip(configuration.IP4Network, network_ip)\n\n if '/' in network and network_range.get_property('CIDR') != network:\n return 'No matching IPv4 Network(s) found', 404\n result = {'id': network_range.get_id(), 'name': network_range.get_name()}\n return jsonify(result)\n\n @util.rest_workflow_permission_required('rest_page')\n def delete(self, configuration, network):\n \"\"\"\n Delete IPv4 Network belonging to default or provided Configuration.\n Network can be of the format:\n\n 1. 10.1.1.0\n 2. 10.1.1.0/24\n \"\"\"\n configuration = g.user.get_api().get_configuration(configuration)\n network_ip = network.split('/')[0]\n network_range = configuration.get_ip_range_by_ip(configuration.IP4Network, network_ip)\n\n if '/' in network and network_range.get_property('CIDR') != network:\n return 'No matching IPv4 Network(s) found', 404\n network_range.delete()\n return '', 204\n\n @util.rest_workflow_permission_required('rest_page')\n @ip4_network_ns.expect(network_patch_model, validate=True)\n def patch(self, configuration, network):\n \"\"\"\n Update IPv4 Network belonging to default or provided Configuration.\n Network can be of the format:\n\n 1. 10.1.1.0\n 2. 10.1.1.0/24\n \"\"\"\n data = network_patch_parser.parse_args()\n configuration = g.user.get_api().get_configuration(configuration)\n network_ip = network.split('/')[0]\n network_range = configuration.get_ip_range_by_ip(configuration.IP4Network, network_ip)\n\n if network_range is None:\n return 'No matching IPv4 Network(s) found', 404\n if '/' in network and network_range.get_property('CIDR') != network:\n return 'No matching IPv4 Network(s) found', 404\n\n if data['properties'] is not None:\n properties = data.get('properties')\n network_range.set_properties(util.properties_to_map(properties))\n if data['name'] is not None:\n network_range.set_name(data['name'])\n network_range.update()\n result = network_range.to_json()\n return jsonify(result)\n\n",
"id": "2807378",
"language": "Python",
"matching_score": 2.8473987579345703,
"max_stars_count": 0,
"path": "Community/rest_api/ip_space_page.py"
},
{
"content": "# Copyright 2019 BlueCat Networks. All rights reserved.\n\nimport ipaddress\nfrom flask import request, g, abort, jsonify\nfrom bluecat.api_exception import PortalException, APIException\nfrom bluecat import route, util\nfrom main_app import app\n\n# application config\n\n# Define global variable to hold handle to API object\napi = None\n\n#\n# GET, PUT or POST\n#\n@route(app, '/lumeta/getnetworklist', methods=['GET', 'PUT', 'POST'])\n@util.rest_workflow_permission_required('lumeta_workflow_page')\n@util.rest_exception_catcher\ndef get_networks_get_networks_page():\n # are we authenticated?\n g.user.logger.info('SUCCESS')\n configurations = None\n configurations_json = []\n if g.user:\n configurations = g.user.get_api().get_configurations()\n for c in configurations:\n print (c)\n configuration_json = {\"id\": c.get_id(), \"name\": c.get_name()}\n configurations_json.append(configuration_json)\n return jsonify(configurations_json)\n\n\n@route(app, '/lumeta/getiplist', methods=['GET', 'PUT', 'POST'])\n@util.rest_workflow_permission_required('lumeta_workflow_page')\n@util.rest_exception_catcher\ndef getiplist_getiplist_page():\n # are we authenticated?\n g.user.logger.info('SUCCESS')\n networks = []\n\n # Return object that contains all the networks (and eventually all ip addresses)\n # list of all properties objects\n ip_addresses = []\n # If name is given, use get_configuration(name)\n if g.user:\n configurations = g.user.get_api().get_configurations()\n for c in configurations:\n print(c)\n configuration_json = {\"id\": c.get_id(), \"name\": c.get_name()}\n\n # FIXME - need code to get network list from configuration id. Is there a call to get children_of_types\n # (['IP4Block', 'IP4Network', 'IP6Block', 'IP6Network'\n # use get_by_object_types(*, ['IP4Block', 'IP4Network', 'IP6Block', 'IP6Network']) - returns flat list\n # We might want to request IP4Network, IP6Network\n # FIXME - extract below code in a function and call it for IP4Block and IP6Block\n try:\n for nw in c.get_children_of_type('IP4Block'):\n print(nw)\n # get all blocks and networks for block\n for n in g.user.get_api().get_by_object_types(nw.get_property('CIDR'),\n ['IP4Network', 'IP4Block', 'IP6Network', 'IP6Block']):\n if '6' in n.get_type():\n networks.append({'network_id': n.get_id(), 'display_text': n.get_properties()['prefix']})\n ip_addresses.extend(calculate_block_stats(n, c.get_id(), c.get_name()))\n else:\n networks.append({'network_id': n.get_id(), 'display_text': n.get_properties()['CIDR']})\n ip_addresses.extend(calculate_block_stats(n, c.get_id(), c.get_name()))\n\n except Exception as e:\n app.loggererror('get_subnets: ' + e.message)\n return jsonify(ip_addresses)\n\n\ndef calculate_network_stats(bam_network, config_id, config_name):\n if bam_network.get_type() == 'IP4Network':\n network_address = bam_network.get_property('CIDR')\n network = ipaddress.ip_network(network_address)\n else:\n network_address = bam_network.get_property('prefix')\n network = ipaddress.ip_network(network_address)\n\n ip_addresses = []\n ip_data = {}\n\n if bam_network.get_type() == 'IP4Network':\n\n # run below for IP4Address, IP6Address - properties will be populated as well\n for n in bam_network.get_children_of_type('IP4Address'):\n # Sometimes below list contains all ip addresses and sometimes only one for gateway address\n # Look through n.get_properties() and add them to ip_data\n ip_data = {}\n ip_data.update({'ip_address': n.get_address()})\n ip_data.update({'properties': n.get_properties()})\n ip_data.update({'config_id': config_id})\n ip_data.update({'config_name': config_name})\n ip_data.update({'id': n.get_id()})\n ip_addresses.append(ip_data)\n\n next_address = bam_network.get_next_available_ip4_address()\n\n else:\n for n in bam_network.get_children_of_type('IP6Address'):\n ip_data = {}\n ip_data.update({'ip_address': n.get_address()})\n ip_data.update({'properties': n.get_properties()})\n ip_data.update({'config_id': config_id})\n ip_data.update({'config_name': config_name})\n ip_data.update({'id': n.get_id()})\n ip_addresses.append(ip_data)\n\n #return network_data\n return ip_addresses\n\n\ndef calculate_block_stats(bam_block, config_id, config_name):\n if bam_block.get_type() == 'IP6Block':\n block_address = bam_block.get_property('prefix')\n block = ipaddress.ip_network(block_address)\n else:\n block_address = bam_block.get_property('CIDR')\n # block = ipaddress.ip_network(block_address, config_id, config_name)\n block = ipaddress.ip_network(block_address)\n block_data = {}\n block_data_list = []\n\n if bam_block.get_type() == 'IP4Block':\n for network in bam_block.get_ip4_networks():\n return_data = calculate_network_stats(network, config_id, config_name)\n # This constructs adding network as key with all values that were returned from calculate network stats\n block_data_list.extend(return_data)\n\n for found_block in bam_block.get_ip4_blocks():\n return_data = calculate_block_stats(found_block, config_id, config_name)\n block_data_list.extend(return_data)\n\n next_address = bam_block.get_next_available_ip4_address()\n if next_address != '':\n block_data.update({'next_available_address': next_address})\n try:\n next_available = bam_block.get_next_available_ip4_network(256, auto_create=False)\n block_data.update({'next_available_network': next_available})\n except APIException as e:\n # Nothing to do here since we aren't adding anything to the object\n next_available = ''\n elif bam_block.get_type() == 'IP6Block':\n for network in bam_block.get_ip6_networks():\n return_data = calculate_network_stats(network, config_id, config_name)\n\n for found_block in bam_block.get_ip6_blocks():\n return_data = calculate_block_stats(found_block, config_id, config_name)\n\n else:\n next_available = ''\n\n return block_data_list\n\n# to tag address, add_ip4 - get back IP4Address object. Call object.link_entity(entity id of the tag)\n#\n# GET, PUT or POST\n@route(app, '/lumeta/addiplist', methods=['GET', 'PUT', 'POST'])\n# @util.rest_workflow_permission_required('addiplist_page')\n@util.rest_workflow_permission_required('lumeta_workflow_page')\n@util.rest_exception_catcher\ndef addiplist_addiplist_page():\n\n # are we authenticated?\n g.user.logger.info('SUCCESS')\n rdata_arr = request.get_json()\n stats = {}\n global api\n\n for rdata in rdata_arr:\n config_name = rdata[\"config_name\"]\n add_network = rdata[\"add_network_block\"]\n device_list = rdata[\"deviceList\"]\n added_ips = 0\n dup_ips = 0\n # Get API object up front and use it going forward. That way, auth key doesn't expire on us\n # when we are midway in processing\n api = g.user.get_api()\n print(add_network)\n print(device_list)\n config = api.get_configuration(config_name)\n for device in device_list:\n print(device[\"ip\"])\n (added_ip, dup_ip, ip) = add_device(device, config, add_network)\n added_ips += added_ip\n dup_ips += dup_ip\n\n # Add tag if ip was added\n if added_ip == 1:\n add_tag(ip)\n stats.update({config_name: {\"added_ips\": added_ips, \"dup_ips\": dup_ips}})\n return jsonify(stats)\n\n\ndef add_device(device, config, add_network):\n # Algorithm to add ip to BAM\n # check if block exists for this ip address.\n try:\n ip = device[\"ip\"]\n mac = ''\n mac = device[\"mac\"]\n family = device[\"family\"]\n blk_data = None\n dup_ip = 0\n added_ip = 0\n ip_obj = None\n\n if family == '4':\n blk_data = config.get_ip_range_by_ip('IP4Block', ip)\n else:\n blk_data = config.get_ip_range_by_ip('IP6Block', ip)\n # if block exists, check for network\n network_data = None\n\n if family == '4':\n network_data = config.get_ip_range_by_ip('IP4Network', ip)\n else:\n network_data = config.get_ip_range_by_ip('IP6Network', ip)\n\n # If Block and Network exists, add ip address\n # currently, assigning ip address is throwing API exception:Server raised fault: \"Duplicate of another item\"\n # Need to see how we can catch it\n if blk_data is not None and network_data is not None:\n # Add ip address\n ip_obj = assign_ip(network_data, ip, mac, family)\n added_ip += 1\n\n # If no block exists and add_network is set to true, create Block with /32, create Network with /32 and then\n # create ip with /32\n except PortalException as e:\n # No block address containing input ip address exists. Check the flag and create one\n if add_network:\n try:\n # Add Block, then network and finally add ip\n # Below line is returning BAMException - IPv4 Blocks cannot be in size of /31 and /32\n # So, at this point, if there is no container, do not add ip address\n # config.add_ip4_block_by_cidr(ip)\n if blk_data is None:\n # add /30 for addressblock\n block_network = ipaddress.ip_network(ip + '/30', strict=False)\n config.add_ip4_block_by_cidr(block_network.exploded)\n blk_data = config.get_ip_range_by_ip('IP4Block', ip)\n\n if blk_data is not None:\n # create network in block\n blk_data.add_ip4_network(ip + '/32')\n\n # create ip under above created network\n network_data = config.get_ip_range_by_ip('IP4Network', ip)\n if network_data is not None:\n # Add ip address\n ip_obj = assign_ip(network_data, ip, mac, family)\n added_ip += 1\n except APIException as ex:\n if \"Duplicate\" in ex.get_message():\n dup_ip += 1\n # else:\n # Seeing intermittent error while adding address block, so had to stop logging error\n # app.loggererror('add_ip: ' + ex.message)\n except APIException as ex:\n # when ip address already exists, it returns BAMException with message 'Server raised fault: \"Duplicate of another item\"'\n # \"Duplicate\" in ex.get_message()\n if \"Duplicate\" in ex.get_message():\n dup_ip += 1\n else:\n # TODO - how to log info message and not error?\n app.loggererror('add_ip: ' + ex.get_message())\n return (added_ip, dup_ip, ip_obj)\n\n\ndef assign_ip(network_data, ip, mac, family):\n if mac is not '':\n if family == '4':\n ip = network_data.assign_ip4_address(ip, mac, '', 'MAKE_DHCP_RESERVED')\n else:\n ip = network_data.assign_ip6_address(ip, mac, '', 'MAKE_DHCP_RESERVED')\n else:\n if family == '4':\n ip = network_data.assign_ip4_address(ip, '', '', 'MAKE_STATIC')\n else:\n ip = network_data.assign_ip6_address(ip, '', '', 'MAKE_STATIC')\n return ip\n\n\ndef add_tag(ip):\n tag_group = None\n tag = None\n try:\n tag_group = api.get_tag_group_by_name(\"Lumeta\")\n\n # If tag group exists, chances are that tag exists as well, but just in case if it doesn't\n tag = tag_group.get_tag_by_name(\"Discovered Device\")\n\n except PortalException as e:\n if tag_group is None:\n # Tag group does not exist, create one\n tag_group = api.add_tag_group(\"Lumeta\")\n if tag is None:\n # Get tag group object. above API to add tag group is only returning object id instead of entire object\n # Calling add_tag on it is throwing exception 'int' object has no attribute 'add_tag'\n tag_group = api.get_tag_group_by_name(\"Lumeta\")\n # Create Tag under Lumeta\n tag = tag_group.add_tag(\"Discovered Device\")\n try:\n # assign tag to ip\n ip.link_entity(tag)\n except APIException as ex:\n print(ex.get_message())\n",
"id": "3808730",
"language": "Python",
"matching_score": 1.7781741619110107,
"max_stars_count": 0,
"path": "Community/AssetManagement/lumeta_workflow_page.py"
},
{
"content": "# Copyright 2019 BlueCat Networks. All rights reserved.\n# -*- coding: utf-8 -*-\n\ntype = 'api'\nsub_pages = [\n {\n 'name' : 'lumeta_workflow_page',\n 'title' : u'lumeta_workflows',\n 'endpoint' : 'lumeta/getstatus',\n 'description' : u'Lumeta integration workflows endpoints'\n }\n]\n",
"id": "9958370",
"language": "Python",
"matching_score": 0.1267109215259552,
"max_stars_count": 0,
"path": "Community/AssetManagement/__init__.py"
}
] | 2.312786 |
jecky100000 | [
{
"content": "import markov_speaking\r\nimport re\r\nimport random\r\nimport numpy as np\r\nimport os\r\nimport keras\r\nfrom rhyme_searching import *\r\nfrom keras.models import Sequential\r\nfrom keras.layers import LSTM\r\nfrom keras.layers.core import Dense\r\n\r\n# training depth\r\ndepth = 4 \r\n\r\ntrain_mode = False\r\nartist = \"chinese_rappers\" \r\nrap_file = \"demo.txt\"\r\n\r\ndef create_network(depth):\r\n\tmodel = Sequential()\r\n\tmodel.add(LSTM(4, input_shape=(2, 2), return_sequences=True))\r\n\tfor i in range(depth):\r\n\t\tmodel.add(LSTM(8, return_sequences=True))\r\n\tmodel.add(LSTM(2, return_sequences=True))\r\n\tmodel.summary()\r\n\tmodel.compile(optimizer='rmsprop',\r\n loss='mse')\r\n\tif artist + \".rap\" in os.listdir(\".\") and train_mode == False:\r\n\t\tmodel.load_weights(str(artist + \".rap\"))\r\n\t\tprint(\"loading saved network: \" + str(artist) + \".rap\")\r\n\treturn model\r\n\r\n# split the text\r\ndef split_lyrics_file(text_file):\r\n\ttext = open(text_file, encoding='UTF8', errors='ignore').read()\r\n\ttext = text.replace(\" \",\"\").split(\"\\n\")\r\n\twhile \"\" in text:\r\n\t\ttext.remove(\"\")\r\n\treturn text\r\n\r\n# build the dataset for training\r\ndef build_dataset(lines):\r\n\tprint(\"Start biulding,you have to wait\")\r\n\t# print(lines)\r\n\tdataset = []\r\n\tline_list = []\r\n\tj = 0\r\n\tfor line in lines:\r\n\t\tline_list = [line, len(line), rhyme(line)]\r\n\t\tdataset.append(line_list)\r\n\t\tj += 1\r\n\t\tprint(j)\r\n\tx_data = []\r\n\ty_data = []\r\n\tfor i in range(len(dataset) - 3):\r\n\t\tprint(i)\r\n\t\tline1 = dataset[i][1:]\r\n\t\tline2 = dataset[i + 1][1:]\r\n\t\tline3 = dataset[i + 2][1:]\r\n\t\tline4 = dataset[i + 3][1:]\r\n\t\tx = [line1[0], line1[1], line2[0], line2[1]]\r\n\t\tx = np.array(x)\r\n\t\tx = x.reshape(2, 2)\r\n\t\tx_data.append(x)\r\n\t\ty = [line3[0], line3[1], line4[0], line4[1]]\r\n\t\ty = np.array(y)\r\n\t\ty = y.reshape(2, 2)\r\n\t\ty_data.append(y)\r\n\tx_data = np.array(x_data)\r\n\ty_data = np.array(y_data)\r\n\tprint(\"Finished building the dataset\")\r\n\treturn x_data, y_data\r\n\r\n# use for predicting the next bar\r\ndef compose_rap(lines, lyrics_file, model):\r\n\trap_vectors = []\r\n\thuman_lyrics = split_lyrics_file(lyrics_file)\r\n\r\n\tinitial_index = random.choice(range(len(human_lyrics) - 1))\r\n\tinitial_lines = human_lyrics[initial_index:initial_index + 2]\r\n\r\n\tstarting_input = []\r\n\tfor line in initial_lines:\r\n\t\tstarting_input.append([len(line), rhyme(line)])\r\n\r\n\tstarting_vectors = model.predict(\r\n\t\tnp.array([starting_input]).flatten().reshape(1, 2, 2))\r\n\trap_vectors.append(starting_vectors)\r\n\r\n\tfor i in range(100):\r\n\t\trap_vectors.append(model.predict(\r\n\t\t\tnp.array([rap_vectors[-1]]).flatten().reshape(1, 2, 2)))\r\n\r\n\treturn rap_vectors\r\n\r\n# use the vectors to make songs\r\ndef vectors_into_song(vectors, generated_lyrics):\r\n\tprint(\"\\n\\n\")\r\n\tprint(\"About to write rap (this could take a moment)...\")\r\n\tprint(\"\\n\\n\")\r\n\r\n\r\n\tdef calculate_score(vector_half, syllables, rhyme):\r\n\t\tdesired_syllables = vector_half[0]\r\n\t\tdesired_rhyme = vector_half[1]\r\n\t\tdesired_rhyme = desired_rhyme * len(rhyme_list)\r\n\r\n\t\tscore = 1.0 - (abs((float(desired_syllables) - float(syllables))) +\r\n\t\t abs((float(desired_rhyme) - float(rhyme))))\r\n\t\treturn score\r\n\tdataset = []\r\n\tfor line in generated_lyrics:\r\n\t\tline_list = [line, len(line), rhyme(line)]\r\n\t\tdataset.append(line_list)\r\n\trap = []\r\n\tvector_halves = []\r\n\tfor vector in vectors:\r\n\t\tvector_halves.append(list(vector[0][0]))\r\n\t\tvector_halves.append(list(vector[0][1]))\r\n\tfor vector in vector_halves:\r\n\t\tscorelist = []\r\n\t\tfor item in dataset:\r\n\t\t\tline = item[0]\r\n\t\t\ttotal_score = calculate_score(vector, item[1], item[2])\r\n\t\t\tscore_entry = [line, total_score]\r\n\t\t\tscorelist.append(score_entry)\r\n\t\tfixed_score_list = []\r\n\t\tfor score in scorelist:\r\n\t\t\tfixed_score_list.append(float(score[1]))\r\n\t\tif len(fixed_score_list) == 0:\r\n\t\t\treturn rap\r\n\t\tmax_score = max(fixed_score_list)\r\n\t\tfor item in scorelist:\r\n\t\t\tif item[1] == max_score:\r\n\t\t\t\trap.append(item[0])\r\n\t\t\t\tprint(str(item[0]))\r\n\r\n\t\t\t\tfor i in dataset:\r\n\t\t\t\t\tif item[0] == i[0]:\r\n\t\t\t\t\t\tdataset.remove(i)\r\n\t\t\t\t\t\tbreak\r\n\t\t\t\tbreak\r\n\treturn rap\r\n\r\n# start training\r\ndef train(x_data, y_data, model):\r\n\tmodel.fit(np.array(x_data), np.array(y_data),\r\n batch_size=2,\r\n epochs=5,\r\n verbose=1)\r\n\tmodel.save_weights(artist + \".rap\")\r\n\tprint(\"Finished training\")\r\n\r\n# the main function\r\ndef main(depth, train_mode):\r\n\t# create the network\r\n\tmodel = create_network(depth)\r\n\ttext_file = \"chinese_lyrics.txt\"\r\n\tif train_mode == True:\r\n\t\tbars = split_lyrics_file(text_file)\r\n\tif train_mode == False:\r\n\t\tp = markov_speaking.Markov(text_file, 1)\r\n\t\tbars = []\r\n\t\tfor _ in range(10000):\r\n\t\t\tbars.append(p.say())\r\n\tif train_mode == True:\r\n\t\tx_data, y_data = build_dataset(bars)\r\n\t\ttrain(x_data, y_data, model)\r\n\tif train_mode == False:\r\n\t\tvectors = compose_rap(bars,text_file, model)\r\n\t\trap = vectors_into_song(vectors, bars)\r\n\t\tf = open(rap_file, \"w\",encoding=\"UTF8\")\r\n\t\tfor bar in rap:\r\n\t\t\tf.write(bar)\r\n\t\t\tf.write(\"\\n\")\r\nmain(depth, train_mode)\r\n",
"id": "9525739",
"language": "Python",
"matching_score": 2.223435401916504,
"max_stars_count": 71,
"path": "Chinese_lyrics_flow.py"
},
{
"content": "#coding=utf8\n\n# last edit date: 2016/10/19\n# author: Forec\n# LICENSE\n# Copyright (c) 2015-2017, Forec <<EMAIL>>\n\n# Permission to use, copy, modify, and/or distribute this code for any\n# purpose with or without fee is hereby granted, provided that the above\n# copyright notice and this permission notice appear in all copies.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES\n# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF\n# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR\n# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\n# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\n# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF\n# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\nimport jieba\nimport codecs\nimport random\nimport re\n\nclass Markov:\n\tdef __init__(self, filepath = None, mode = 0, coding=\"utf8\"):\n\t\tself.dictLen = 0\n\t\tself.Cap = []\n\t\tself.mode = mode\n\t\tself.coding = coding\n\t\tself.dic = {}\n\t\tif filepath is not None:\n\t\t\tself.train(filepath, mode, coding)\n\tdef train(self, filepath = '', mode = 0, coding=\"utf8\"):\n\t\tself.dic = {}\n\t\tself.Cap = []\n\t\tself.mode = mode\n\t\tself.coding = coding\n\t\tif filepath is None or filepath == '':\n\t\t\treturn\n\t\teg_puncmark = re.compile('[\\,\\.\\!\\;\\?\\~\\`\\#\\$\\%\\@\\^&\\*\\(\\)\\]\\[]')\n\t\tzh_puncmark = re.compile('[,。!;]')\n\t\twith codecs.open(filepath, \"r\", coding) as f:\n\t\t\tfor line in f.readlines():\n\t\t\t\twords = []\n\t\t\t\tline = re.sub('[\\r\\n]', \"\", line)\n\t\t\t\tif mode == 0:\n\t\t\t\t\tsentences = eg_puncmark.split(line)\n\t\t\t\t\tsentences_words = []\n\t\t\t\t\tfor sentence in sentences:\n\t\t\t\t\t\tsentences_words.append(filter(lambda x:x != '',sentence.split(\" \")))\n\t\t\t\t\tfor words in sentences_words:\n\t\t\t\t\t\tfor i in range(len(words)-2):\n\t\t\t\t\t\t\tkeypair = words[i] + \" \" + words[i+1]\n\t\t\t\t\t\t\tif keypair[0].isupper():\n\t\t\t\t\t\t\t\tself.Cap.append(keypair)\n\t\t\t\t\t\t\tif self.dic.get(keypair) is None:\n\t\t\t\t\t\t\t\tself.dic[keypair] = [words[i+2]]\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tself.dic[keypair].append(words[i+2])\n\t\t\t\telse:\n\t\t\t\t\tsentences = zh_puncmark.split(line)\n\t\t\t\t\tfor sentence in sentences:\n\t\t\t\t\t\tjwords = jieba.cut(sentence, cut_all=False)\n\t\t\t\t\t\tfor word in jwords:\n\t\t\t\t\t\t\tif len(word) >= 2:\n\t\t\t\t\t\t\t\twords.append(word)\n\t\t\t\t\t\tif len(words) > 2:\n\t\t\t\t\t\t\tself.Cap.append(words[0] + \" \" + words[1])\n\t\t\t\t\t\t\t#print(words)\n\t\t\t\t\t\t\t#words = filter(lambda x:x != '', words)\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\tfor i in range(len(words)-2):\n\t\t\t\t\t\t\t\tkeypair = words[i] + \" \" + words[i+1]\n\t\t\t\t\t\t\t\tif self.dic.get(keypair) is None:\n\t\t\t\t\t\t\t\t\tself.dic[keypair] = [words[i+2]]\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tself.dic[keypair].append(words[i+2])\n\t\t\tself.dictLen = len(self.dic)\n\tdef getDic(self):\n\t\treturn self.dic\n\tdef say(self, length = 10):\n\t\tif self.dictLen <= 2:\n\t\t\tprint(\"I feel tired and I need food to say something.\")\n\t\telse:\n\t\t\tkeypair = self.Cap[random.randint(0, len(self.Cap)-1)]\n\t\t\tfst, snd = keypair.split(\" \")[0], keypair.split(\" \")[1]\n\t\t\tpairlen = len(self.dic[keypair])\n\t\t\tif self.mode == 0:\n\t\t\t\tsentence = fst + \" \" + snd\n\t\t\telse:\n\t\t\t\tsentence = fst + snd\n\t\t\twhile length > 0 and pairlen > 0:\n\t\t\t\ttemp = self.dic[keypair][random.randint(0, pairlen-1)]\n\t\t\t\tfst = snd\n\t\t\t\tsnd = temp\n\t\t\t\tif self.mode == 0:\n\t\t\t\t\tsentence = sentence + \" \" + snd\n\t\t\t\telse:\n\t\t\t\t\tsentence = sentence + snd\n\t\t\t\tkeypair = fst + \" \" + snd\n\t\t\t\tif self.dic.get(keypair) is not None:\n\t\t\t\t\tpairlen = len(self.dic[keypair])\n\t\t\t\telse:\n\t\t\t\t\tbreak\n\t\t\t\tlength -= 1\n\t\t\tif self.mode == 0:\n\t\t\t\tprint(sentence + \".\")\n\t\t\telse:\n\t\t\t\t\t\t\t#sentence + \"。\".encode(\"utf8\")\n\t\t\t\t\t\t\t\t#print(sentence + \"。\".decode(\"utf8\"))\n\t\t\t\tprint(sentence)\n\t\treturn sentence\n\t\n",
"id": "5621603",
"language": "Python",
"matching_score": 0.23125851154327393,
"max_stars_count": 71,
"path": "markov_speaking.py"
},
{
"content": "from xpinyin import Pinyin\r\nrhyme_list = [\r\n ['a','ba','ca','cha','da','fa','ga','gua','ha','hua','jia','ka','kua','la','lia','ma','na','pa','qia','sa','sha','shua','ta','wa','xia','ya','za','zha','zhua'],\r\n ['ai','bai','cai','chai','dai','gai','guai','hai','huai','kai','kuai','lai','mai','nai','pai','sai','shai','shuai','tai','wai','zai','zhai'],\r\n ['an','ban','can','chan','chuan','cuan','dan','duan','fan','gan','guan','han','huan','kan','kuan','lan','luan','man','nan','nuan','pan','ran','ruan','san','shan','shuan','suan','tan','tuan','wan','zan','zhan','zhuan','zuan'],\r\n ['ang','bang','cang','chang','chuang','dang','fang','gang','guang','hang','huang','jiang','kang','kuang','lang','liang','mang','nang','niang','pang','qiang','rang','sang','shang','shuang','tang','wang','xiang','yang','zang','zhang','zhuang'],\r\n ['ao','bao','biao','cao','chao','dao','diao','gao','hao','jiao','kao','lao','liao','mao','miao','nao','niao','pao','piao','qiao','rao','sao','shao','tao','tiao','xiao','yao','zao','zhao'],\r\n ['bei','cui','chui','dei','dui','ei','fei','gei','gui','hei','hui','kui','lei','mei','nei','pei','rui','shui','sui','tui','wei','zei','zhui','zui'],\r\n ['ben','cen','ceng','chen','cheng','chun','cun','dun','en','fen','gen','gun','hen','heng','hun','jun','ken','keng','kun','lun','men','nen','neng','pen','ren','reng','run','sen','seng','shen','sheng','shun','sun','teng','tun','wen','zen','zeng','zhen','zheng','zhun','zun'],\r\n ['beng','chong','cong','deng','dong','eng','feng','geng','gong','hong','jiong','kong','leng','long','meng','nong','peng','qiong','rong','song','tong','weng','xiong','yong','zhong','zong'],\r\n ['bi','di','ji','ju','li','lv','mi','ni','nv','pi','qi','qu','ti','xi','xu','yi','yu'],\r\n ['bian','dian','jian','juan','lian','mian','nian','pian','qian','quan','tian','xian','xuan','yan','yuan'],\r\n ['bie','die','jie','jue','lie','lve','mie','nie','nve','pie','qie','que','tie','xie','xue','ye','yue'],\r\n ['bin','bing','ding','jin','jing','lin','ling','min','ming','nin','ning','pin','ping','qin','qing','qun','ting','xin','xing','xun','yin','ying','yun'],\r\n ['bo','chou','chou','cou','cuo','diu','dou','duo','fo','fou','gou','guo','hou','huo','jiu','kou','kuo','liu','lou','luo','miu','mo','mou','niu','nou','nuo','o','ou','po','pou','qiu','rou','ruo','shou','shuo','sou','suo','tou','tuo','wo','xiu','you','zhou','zhuo','zou','zuo'],\r\n ['bu','chu','cu','du','fu','gu','hu','ku','lu','mu','nu','pu','ru','shu','su','tu','wu','zhu','zu'],\r\n ['ce','che','de','e','er','ge','he','ke','le','me','ne','re','se','she','te','ze','zhe'],\r\n ['chi','ci','ri','shi','si','zhi','zi']\r\n]\r\ndef rhyme(line):\r\n test = Pinyin()\r\n b=str(test.get_pinyin(line[-1]))\r\n number = 0\r\n for rhymes in range(len(rhyme_list)):\r\n if b in rhyme_list[rhymes]:\r\n number = rhymes + 1\r\n break\r\n number /= len(rhyme_list)\r\n # print(number)\r\n return number\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n",
"id": "6665532",
"language": "Python",
"matching_score": 0.8138420581817627,
"max_stars_count": 71,
"path": "rhyme_searching.py"
},
{
"content": "from rhyme_searching import *\r\ntext = open('demo.txt', encoding='UTF8', errors='ignore').read()\r\ntext = text.split(\"\\n\")\r\nlenth = 16\r\ntot = 0\r\nvisit = []\r\nans = []\r\ncnt = 0\r\nfor _ in range(lenth):\r\n ans.append(0)\r\nwhile 1:\r\n if tot == lenth:\r\n break\r\n st = 0\r\n for line in range(lenth):\r\n if rhyme(text[line]) not in visit:\r\n visit.append(rhyme(text[line]))\r\n st = rhyme(text[line])\r\n cnt += 1\r\n break\r\n for line in range(lenth):\r\n if rhyme(text[line]) == st:\r\n ans[line] = cnt\r\n tot += 1\r\nprint(ans)",
"id": "12206377",
"language": "Python",
"matching_score": 1.199421763420105,
"max_stars_count": 71,
"path": "flow_finding.py"
},
{
"content": "import subprocess\r\nimport time\r\nimport win32com.client as win\r\nsubprocess.Popen(\"beat.mp3\",shell=True).wait()\r\nspeak = win.Dispatch(\"SAPI.SpVoice\")\r\ntext = open('demo.txt', encoding='UTF8', errors='ignore').read()\r\ntext = text.split(\"\\n\")\r\nspeak.Speak(\"第一代说唱机器即将开始\")\r\nfor lines in range(len(text)):\r\n speak.Speak(text[lines])\r\n \r\n",
"id": "483517",
"language": "Python",
"matching_score": 0.517223060131073,
"max_stars_count": 71,
"path": "start.py"
}
] | 0.813842 |
Vladimir-125 | [
{
"content": "#-----------------------------------------------------------------------------\n# Written by <NAME> <<EMAIL>> 2019\n# Copyright(C) 2019, All Rights Reserved\n# This file is subject to the terms and conditions defined in\n# file 'LICENSE.txt', which is part of this source code package.\n#-----------------------------------------------------------------------------\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nimport sys\nfrom calc import Ui_Form\n\n#Create app\napp = QtWidgets.QApplication(sys.argv)\n\n#Create form and init UI\nForm = QtWidgets.QWidget()\nui = Ui_Form()\nui.setupUi(Form)\nForm.show()\n\n# Globals\noperator = ''\nexp1 = ''\nisFirst = True; # first digit\n\n#Hook logic\ndef btn_0():\n\tglobal isFirst;\n\tif not isFirst:\n\t\t# if first digit is 0: do nothing\n\t\tif ui.lineEdit.text() != '0':\n\t\t\tui.lineEdit.setText(ui.lineEdit.text() + str(0));\n\telse:\n\t\tui.lineEdit.setText(\"\")\n\t\tui.lineEdit.setText(ui.lineEdit.text() + str(0));\n\t\tisFirst = False;\n\ndef btn_1():\n\tglobal isFirst;\n\tif not isFirst:\n\t\t# if first digit is 0 remove it\n\t\tif ui.lineEdit.text() == '0':\n\t\t\tui.lineEdit.setText(\"\")\n\t\tui.lineEdit.setText(ui.lineEdit.text() + str(1));\n\telse:\n\t\tui.lineEdit.setText(\"\")\n\t\tui.lineEdit.setText(ui.lineEdit.text() + str(1));\n\t\tisFirst = False;\n\ndef btn_2():\n\tglobal isFirst;\n\tif not isFirst:\n\t\t# if first digit is 0 remove it\n\t\tif ui.lineEdit.text() == '0':\n\t\t\tui.lineEdit.setText(\"\")\n\t\tui.lineEdit.setText(ui.lineEdit.text() + str(2));\n\telse:\n\t\tui.lineEdit.setText(\"\")\n\t\tui.lineEdit.setText(ui.lineEdit.text() + str(2));\n\t\tisFirst = False;\n\ndef btn_3():\n\tglobal isFirst;\n\tif not isFirst:\n\t\t# if first digit is 0 remove it\n\t\tif ui.lineEdit.text() == '0':\n\t\t\tui.lineEdit.setText(\"\")\n\t\tui.lineEdit.setText(ui.lineEdit.text() + str(3));\n\telse:\n\t\tui.lineEdit.setText(\"\")\n\t\tui.lineEdit.setText(ui.lineEdit.text() + str(3));\n\t\tisFirst = False;\n\ndef btn_4():\n\tglobal isFirst;\n\tif not isFirst:\n\t\t# if first digit is 0 remove it\n\t\tif ui.lineEdit.text() == '0':\n\t\t\tui.lineEdit.setText(\"\")\n\t\tui.lineEdit.setText(ui.lineEdit.text() + str(4));\n\telse:\n\t\tui.lineEdit.setText(\"\")\n\t\tui.lineEdit.setText(ui.lineEdit.text() + str(4));\n\t\tisFirst = False;\n\ndef btn_5():\n\tglobal isFirst;\n\tif not isFirst:\n\t\t# if first digit is 0 remove it\n\t\tif ui.lineEdit.text() == '0':\n\t\t\tui.lineEdit.setText(\"\")\n\t\tui.lineEdit.setText(ui.lineEdit.text() + str(5));\n\telse:\n\t\tui.lineEdit.setText(\"\")\n\t\tui.lineEdit.setText(ui.lineEdit.text() + str(5));\n\t\tisFirst = False;\n\ndef btn_6():\n\tglobal isFirst;\n\tif not isFirst:\n\t\t# if first digit is 0 remove it\n\t\tif ui.lineEdit.text() == '0':\n\t\t\tui.lineEdit.setText(\"\")\n\t\tui.lineEdit.setText(ui.lineEdit.text() + str(6));\n\telse:\n\t\tui.lineEdit.setText(\"\")\n\t\tui.lineEdit.setText(ui.lineEdit.text() + str(6));\n\t\tisFirst = False;\n\ndef btn_7():\n\tglobal isFirst;\n\tif not isFirst:\n\t\t# if first digit is 0 remove it\n\t\tif ui.lineEdit.text() == '0':\n\t\t\tui.lineEdit.setText(\"\")\n\t\tui.lineEdit.setText(ui.lineEdit.text() + str(7));\n\telse:\n\t\tui.lineEdit.setText(\"\")\n\t\tui.lineEdit.setText(ui.lineEdit.text() + str(7));\n\t\tisFirst = False;\n\ndef btn_8():\n\tglobal isFirst;\n\tif not isFirst:\n\t\t# if first digit is 0 remove it\n\t\tif ui.lineEdit.text() == '0':\n\t\t\tui.lineEdit.setText(\"\")\n\t\tui.lineEdit.setText(ui.lineEdit.text() + str(8));\n\telse:\n\t\tui.lineEdit.setText(\"\")\n\t\tui.lineEdit.setText(ui.lineEdit.text() + str(8));\n\t\tisFirst = False;\n\ndef btn_9():\n\tglobal isFirst;\n\tif not isFirst:\n\t\t# if first digit is 0 remove it\n\t\tif ui.lineEdit.text() == '0':\n\t\t\tui.lineEdit.setText(\"\")\n\t\tui.lineEdit.setText(ui.lineEdit.text() + str(9));\n\telse:\n\t\tui.lineEdit.setText(\"\")\n\t\tui.lineEdit.setText(ui.lineEdit.text() + str(9));\n\t\tisFirst = False;\n\ndef erase():\n\tglobal operator;\n\tglobal exp1;\n\tglobal isFirst;\n\tisFirst = True;\n\toperator = ''\n\texp1 = ''\n\tui.lineEdit.setText(\"\")\n\ndef equal():\n\tglobal operator;\n\tglobal exp1;\n\tglobal isFirst;\n\tisFirst = True;\n\tif operator is not '' and exp1 is not'':\n\t\tif operator=='+':\n\t\t\tui.lineEdit.setText(str(float(exp1) + float(ui.lineEdit.text()))); # do math\n\t\t\toperator==''; # clear operator\n\t\telif operator=='-':\n\t\t\tui.lineEdit.setText(str(float(exp1) - float(ui.lineEdit.text()))); # do math\n\t\t\toperator==''; # clear operator\n\t\telif operator=='*':\n\t\t\tui.lineEdit.setText(str(float(exp1) * float(ui.lineEdit.text()))); # do math\n\t\t\toperator==''; # clear operator\n\t\telif operator=='/':\n\t\t\tui.lineEdit.setText(str(float(exp1) / float(ui.lineEdit.text()))); # do math\n\t\t\toperator==''; # clear operator\n\ndef mult():\n\tglobal operator;\n\tglobal exp1;\n\toperator='*';\n\texp1=ui.lineEdit.text();\n\tglobal isFirst;\n\tisFirst = True;\ndef div():\n\tglobal operator;\n\tglobal exp1;\n\tglobal isFirst;\n\tisFirst = True;\n\toperator='/';\n\texp1=ui.lineEdit.text();\ndef sub():\n\tglobal operator;\n\tglobal exp1;\n\tglobal isFirst;\n\tisFirst = True;\n\toperator='-';\n\texp1=ui.lineEdit.text();\ndef add():\n\tglobal operator;\n\tglobal exp1;\n\tglobal isFirst;\n\tisFirst = True;\n\toperator='+';\n\texp1=ui.lineEdit.text();\ndef back():\n\tui.lineEdit.setText(ui.lineEdit.text()[:-1]);\ndef brackets():\n\tprint()\ndef dot():\n\tglobal isFirst;\n\tif not isFirst:\n\t\tui.lineEdit.setText(ui.lineEdit.text() + '.');\n\telse: # was first\n\t\tui.lineEdit.setText(\"\")\n\t\tisFirst = False; # not a first now\n\t\tui.lineEdit.setText('0.');\n\n\n# Connectors to functions\t\nui.btn_0.clicked.connect(btn_0)\nui.btn_1.clicked.connect(btn_1)\nui.btn_2.clicked.connect(btn_2)\nui.btn_3.clicked.connect(btn_3)\nui.btn_4.clicked.connect(btn_4)\nui.btn_5.clicked.connect(btn_5)\nui.btn_6.clicked.connect(btn_6)\nui.btn_7.clicked.connect(btn_7)\nui.btn_8.clicked.connect(btn_8)\nui.btn_9.clicked.connect(btn_9)\nui.btn_erase.clicked.connect(erase)\nui.btn_equal.clicked.connect(equal)\t\nui.btn_mult.clicked.connect(mult)\t\nui.btn_div.clicked.connect(div)\t\nui.btn_sub.clicked.connect(sub)\t\nui.btn_add.clicked.connect(add)\t\nui.btn_back.clicked.connect(back)\t\nui.btn_brackets.clicked.connect(brackets)\t\nui.btn_dot.clicked.connect(dot)\t\n\n# Run main loop\nsys.exit(app.exec_())\n\n\n \n",
"id": "2360896",
"language": "Python",
"matching_score": 4.445559978485107,
"max_stars_count": 0,
"path": "main.py"
},
{
"content": "# -*- coding: utf-8 -*-\n\n# Form implementation generated from reading ui file 'calc.ui'\n#\n# Created by: PyQt5 UI code generator 5.9.2\n#\n# WARNING! All changes made in this file will be lost!\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\n\nclass Ui_Form(object):\n def setupUi(self, Form):\n Form.setObjectName(\"Form\")\n Form.resize(302, 393)\n Form.setStyleSheet(\"QPushButton{\\n\"\n\"width: 40px;\\n\"\n\"height: 30px; \\n\"\n\"}\\n\"\n\"QPushButton:hover{\\n\"\n\"background-color: silver;\\n\"\n\"}\")\n self.gridLayoutWidget = QtWidgets.QWidget(Form)\n self.gridLayoutWidget.setGeometry(QtCore.QRect(20, 100, 261, 251))\n self.gridLayoutWidget.setObjectName(\"gridLayoutWidget\")\n self.gridLayout = QtWidgets.QGridLayout(self.gridLayoutWidget)\n self.gridLayout.setContentsMargins(0, 0, 0, 0)\n self.gridLayout.setObjectName(\"gridLayout\")\n self.btn_1 = QtWidgets.QPushButton(self.gridLayoutWidget)\n font = QtGui.QFont()\n font.setPointSize(14)\n self.btn_1.setFont(font)\n self.btn_1.setObjectName(\"btn_1\")\n self.gridLayout.addWidget(self.btn_1, 3, 0, 1, 1)\n self.btn_mult = QtWidgets.QPushButton(self.gridLayoutWidget)\n font = QtGui.QFont()\n font.setPointSize(14)\n self.btn_mult.setFont(font)\n self.btn_mult.setObjectName(\"btn_mult\")\n self.gridLayout.addWidget(self.btn_mult, 0, 2, 1, 1)\n self.btn_div = QtWidgets.QPushButton(self.gridLayoutWidget)\n font = QtGui.QFont()\n font.setPointSize(14)\n self.btn_div.setFont(font)\n self.btn_div.setObjectName(\"btn_div\")\n self.gridLayout.addWidget(self.btn_div, 0, 1, 1, 1)\n self.btn_brackets = QtWidgets.QPushButton(self.gridLayoutWidget)\n font = QtGui.QFont()\n font.setPointSize(14)\n self.btn_brackets.setFont(font)\n self.btn_brackets.setObjectName(\"btn_brackets\")\n self.gridLayout.addWidget(self.btn_brackets, 3, 3, 1, 1)\n self.btn_8 = QtWidgets.QPushButton(self.gridLayoutWidget)\n font = QtGui.QFont()\n font.setPointSize(14)\n self.btn_8.setFont(font)\n self.btn_8.setObjectName(\"btn_8\")\n self.gridLayout.addWidget(self.btn_8, 1, 1, 1, 1)\n self.btn_4 = QtWidgets.QPushButton(self.gridLayoutWidget)\n font = QtGui.QFont()\n font.setPointSize(14)\n self.btn_4.setFont(font)\n self.btn_4.setObjectName(\"btn_4\")\n self.gridLayout.addWidget(self.btn_4, 2, 0, 1, 1)\n self.btn_2 = QtWidgets.QPushButton(self.gridLayoutWidget)\n font = QtGui.QFont()\n font.setPointSize(14)\n self.btn_2.setFont(font)\n self.btn_2.setObjectName(\"btn_2\")\n self.gridLayout.addWidget(self.btn_2, 3, 1, 1, 1)\n self.btn_5 = QtWidgets.QPushButton(self.gridLayoutWidget)\n font = QtGui.QFont()\n font.setPointSize(14)\n self.btn_5.setFont(font)\n self.btn_5.setObjectName(\"btn_5\")\n self.gridLayout.addWidget(self.btn_5, 2, 1, 1, 1)\n self.btn_3 = QtWidgets.QPushButton(self.gridLayoutWidget)\n font = QtGui.QFont()\n font.setPointSize(14)\n self.btn_3.setFont(font)\n self.btn_3.setObjectName(\"btn_3\")\n self.gridLayout.addWidget(self.btn_3, 3, 2, 1, 1)\n self.btn_6 = QtWidgets.QPushButton(self.gridLayoutWidget)\n font = QtGui.QFont()\n font.setPointSize(14)\n self.btn_6.setFont(font)\n self.btn_6.setObjectName(\"btn_6\")\n self.gridLayout.addWidget(self.btn_6, 2, 2, 1, 1)\n self.btn_9 = QtWidgets.QPushButton(self.gridLayoutWidget)\n font = QtGui.QFont()\n font.setPointSize(14)\n self.btn_9.setFont(font)\n self.btn_9.setObjectName(\"btn_9\")\n self.gridLayout.addWidget(self.btn_9, 1, 2, 1, 1)\n self.btn_7 = QtWidgets.QPushButton(self.gridLayoutWidget)\n font = QtGui.QFont()\n font.setPointSize(14)\n self.btn_7.setFont(font)\n self.btn_7.setObjectName(\"btn_7\")\n self.gridLayout.addWidget(self.btn_7, 1, 0, 1, 1)\n self.btn_equal = QtWidgets.QPushButton(self.gridLayoutWidget)\n font = QtGui.QFont()\n font.setPointSize(14)\n self.btn_equal.setFont(font)\n self.btn_equal.setObjectName(\"btn_equal\")\n self.gridLayout.addWidget(self.btn_equal, 4, 3, 1, 1)\n self.btn_erase = QtWidgets.QPushButton(self.gridLayoutWidget)\n font = QtGui.QFont()\n font.setPointSize(14)\n self.btn_erase.setFont(font)\n self.btn_erase.setObjectName(\"btn_erase\")\n self.gridLayout.addWidget(self.btn_erase, 0, 0, 1, 1)\n self.btn_add = QtWidgets.QPushButton(self.gridLayoutWidget)\n font = QtGui.QFont()\n font.setPointSize(14)\n self.btn_add.setFont(font)\n self.btn_add.setObjectName(\"btn_add\")\n self.gridLayout.addWidget(self.btn_add, 2, 3, 1, 1)\n self.btn_0 = QtWidgets.QPushButton(self.gridLayoutWidget)\n font = QtGui.QFont()\n font.setPointSize(14)\n self.btn_0.setFont(font)\n self.btn_0.setStyleSheet(\"\")\n self.btn_0.setObjectName(\"btn_0\")\n self.gridLayout.addWidget(self.btn_0, 4, 0, 1, 1)\n self.btn_dot = QtWidgets.QPushButton(self.gridLayoutWidget)\n font = QtGui.QFont()\n font.setPointSize(14)\n self.btn_dot.setFont(font)\n self.btn_dot.setObjectName(\"btn_dot\")\n self.gridLayout.addWidget(self.btn_dot, 4, 1, 1, 1)\n self.btn_sub = QtWidgets.QPushButton(self.gridLayoutWidget)\n font = QtGui.QFont()\n font.setPointSize(14)\n self.btn_sub.setFont(font)\n self.btn_sub.setObjectName(\"btn_sub\")\n self.gridLayout.addWidget(self.btn_sub, 1, 3, 1, 1)\n self.btn_reserved = QtWidgets.QPushButton(self.gridLayoutWidget)\n self.btn_reserved.setEnabled(False)\n font = QtGui.QFont()\n font.setPointSize(14)\n self.btn_reserved.setFont(font)\n self.btn_reserved.setText(\"\")\n self.btn_reserved.setObjectName(\"btn_reserved\")\n self.gridLayout.addWidget(self.btn_reserved, 4, 2, 1, 1)\n self.btn_back = QtWidgets.QPushButton(self.gridLayoutWidget)\n font = QtGui.QFont()\n font.setPointSize(14)\n self.btn_back.setFont(font)\n self.btn_back.setObjectName(\"btn_back\")\n self.gridLayout.addWidget(self.btn_back, 0, 3, 1, 1)\n self.lineEdit = QtWidgets.QLineEdit(Form)\n self.lineEdit.setEnabled(True)\n self.lineEdit.setGeometry(QtCore.QRect(20, 30, 261, 41))\n font = QtGui.QFont()\n font.setFamily(\"Maiandra GD\")\n font.setPointSize(19)\n self.lineEdit.setFont(font)\n self.lineEdit.setText(\"\")\n self.lineEdit.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)\n self.lineEdit.setReadOnly(True)\n self.lineEdit.setObjectName(\"lineEdit\")\n\n self.retranslateUi(Form)\n QtCore.QMetaObject.connectSlotsByName(Form)\n\n def retranslateUi(self, Form):\n _translate = QtCore.QCoreApplication.translate\n Form.setWindowTitle(_translate(\"Form\", \"Form\"))\n self.btn_1.setText(_translate(\"Form\", \"1\"))\n self.btn_1.setShortcut(_translate(\"Form\", \"1\"))\n self.btn_mult.setText(_translate(\"Form\", \"X\"))\n self.btn_mult.setShortcut(_translate(\"Form\", \"*\"))\n self.btn_div.setText(_translate(\"Form\", \"÷\"))\n self.btn_div.setShortcut(_translate(\"Form\", \"/\"))\n self.btn_brackets.setText(_translate(\"Form\", \"( )\"))\n self.btn_8.setText(_translate(\"Form\", \"8\"))\n self.btn_8.setShortcut(_translate(\"Form\", \"8\"))\n self.btn_4.setText(_translate(\"Form\", \"4\"))\n self.btn_4.setShortcut(_translate(\"Form\", \"4\"))\n self.btn_2.setText(_translate(\"Form\", \"2\"))\n self.btn_2.setShortcut(_translate(\"Form\", \"2\"))\n self.btn_5.setText(_translate(\"Form\", \"5\"))\n self.btn_5.setShortcut(_translate(\"Form\", \"5\"))\n self.btn_3.setText(_translate(\"Form\", \"3\"))\n self.btn_3.setShortcut(_translate(\"Form\", \"3\"))\n self.btn_6.setText(_translate(\"Form\", \"6\"))\n self.btn_6.setShortcut(_translate(\"Form\", \"6\"))\n self.btn_9.setText(_translate(\"Form\", \"9\"))\n self.btn_9.setShortcut(_translate(\"Form\", \"9\"))\n self.btn_7.setText(_translate(\"Form\", \"7\"))\n self.btn_7.setShortcut(_translate(\"Form\", \"7\"))\n self.btn_equal.setText(_translate(\"Form\", \"=\"))\n self.btn_equal.setShortcut(_translate(\"Form\", \"Return\"))\n self.btn_erase.setText(_translate(\"Form\", \"C\"))\n self.btn_erase.setShortcut(_translate(\"Form\", \"Ctrl+Z\"))\n self.btn_add.setText(_translate(\"Form\", \"+\"))\n self.btn_add.setShortcut(_translate(\"Form\", \"+\"))\n self.btn_0.setText(_translate(\"Form\", \"0\"))\n self.btn_0.setShortcut(_translate(\"Form\", \"0\"))\n self.btn_dot.setText(_translate(\"Form\", \",\"))\n self.btn_sub.setText(_translate(\"Form\", \"-\"))\n self.btn_sub.setShortcut(_translate(\"Form\", \"-\"))\n self.btn_back.setText(_translate(\"Form\", \"←\"))\n self.btn_back.setShortcut(_translate(\"Form\", \"Backspace\"))\n\n\nif __name__ == \"__main__\":\n import sys\n app = QtWidgets.QApplication(sys.argv)\n Form = QtWidgets.QWidget()\n ui = Ui_Form()\n ui.setupUi(Form)\n Form.show()\n sys.exit(app.exec_())\n\n",
"id": "1324785",
"language": "Python",
"matching_score": 0.03345950320363045,
"max_stars_count": 0,
"path": "calc.py"
},
{
"content": "import numpy as np\nimport pandas as pd\n\nmovies = pd.read_csv('./data/movies_w_imgurl.csv')\ntags_csv = pd.read_csv('./data/tags.csv')\n\n# count number of movies\n# count number of unique id\nmovie_count = {}\n# how many times each unique tag has occured in all documents\nunique_tags = {} \nfor i in range(len(tags_csv)):\n # sepatare string into tags\n tags = tags_csv['tag'][i].split(',')\n movie = str(tags_csv['movieId'][i])\n for tag in tags:\n tag = tag.strip()\n unique_tags.setdefault(tag, 0)\n unique_tags[tag] +=1\n \n movie_count.setdefault(movie, [])\n movie_count[movie].append(tag)\n\ndef count_IDF():\n 'count IDF for each tag'\n IDF = {}\n # total number of movies with tags\n total_movie_count = len(movie_count)\n for tag in unique_tags.keys():\n IDF[tag] = np.log10(total_movie_count/unique_tags[tag])\n return IDF\n\ndef create_TF():\n 'count TF for each movie in tags csv'\n TF = {}\n for movie in movie_count.keys():\n row = {}\n for tag in unique_tags.keys():\n total_tags = len(movie_count[movie])\n tags_inmovie = 0\n for m_tag in movie_count[movie]:\n # count how many times current tag appeared at the movie\n if tag == m_tag:\n tags_inmovie+=1\n row[tag] = tags_inmovie/total_tags\n TF[movie] = row\n return TF\n\ndef tags_representation(): \n 'returns movieID*tag TF-IDF representation'\n IDF = count_IDF()\n TF = create_TF()\n TFIDF = {}\n for movie in TF:\n row = {}\n for tag in TF[movie]:\n row[tag] = TF[movie][tag] * IDF[tag]\n\n TFIDF[movie] = row\n \n return TFIDF\n\ndef jenres_representation(movies):\n 'Returns genres movie representation'\n movie_representation = {} # final movie representation\n total_count = len(movies)\n genre_count = {}\n \n for genres in movies.genres:\n genre_list = genres.split('|')\n for genre in genre_list:\n genre_count.setdefault(genre, 0) # create new element if not exist\n genre_count[genre]+=1 # increment if exist\n\n genre_list = list(genre_count.keys()) # create a list of keys\n genre_list.sort()\n dict_movies = dict(movies)\n \n for i in range(len(dict_movies['movieId'])):\n row = {}\n for g in genre_list:\n if g in dict_movies['genres'][i].split('|'):\n IDF = np.log10((total_count/genre_count[g]))\n row[g] = IDF\n else:\n row[g] = 0\n movie_representation[str(dict_movies['movieId'][i])] = row\n \n return movie_representation \n\ndef final_representation():\n 'Returns movieId*(jenres+tags) representation'\n movie_repres = jenres_representation(movies)\n tag_repres = tags_representation()\n # list of tagged movieIds\n tag_movieIds = list(tag_repres.keys())\n # list of tags\n tags = list(tag_repres[tag_movieIds[0]].keys())\n # initiate new tags to 0\n for movie in movie_repres:\n for tag in tags:\n # check if movie already has genre=tag\n if tag in movie_repres[movie].keys():\n # rename old genre to old_genre+randint (mydict[new_key] = mydict.pop(old_key))\n movie_repres[movie][tag+str(np.random.randint(999999))] = movie_repres[movie][tag]\n movie_repres[movie][tag] = 0.0\n \n # add movie tags to genre representation\n for movieId in tag_movieIds:\n for tag in tags:\n movie_repres[movieId][tag] = tag_repres[movieId][tag]\n return movie_repres\n\ndef movie_similarity():\n 'Finds cosine similarity between movies'\n movie_repres = final_representation()\n array2d = []\n # dict to 2d array movie representation\n for movie in movie_repres:\n row = list(movie_repres[movie].values())\n array2d.append(row)\n \n # numpy matrix movie representation\n mat_movie_repres = np.matrix(array2d)\n return mat_movie_repres\n\n# def mat_mat_cos_similarity(mat):\n# result = []\n# print('Started calculating movie-movie cosine similarity...')\n# for rows in range(len(mat)):\n# row = []\n# for cols in range(len(mat)):\n# a = mov_sim[rows,:]\n# b = mov_sim.T[:,cols]\n# cosine = np.dot(a, b)/(np.linalg.norm(a)*np.linalg.norm(b))\n# row.append(float(cosine))\n# result.append(row)\n# if rows%100==0:\n# print(str(round(rows*100/len(mat), 2)) + '% is done.')\n# # save intermidiate movie similarity\n# save_obj(np.matrix(result), 'movie-cos-similarity')\n# # save final movie similarity\n# save_obj(np.matrix(result), 'movie-cos-similarity')\n\ndef get_partial_similarity(user_movieIds):\n user_sim = False\n for movie in user_movieIds:\n if type(user_sim) == type(False):\n user_sim = cos_sim[movieIds[str(movie)], :]\n else:\n user_sim = np.vstack([user_sim, cos_sim[movieIds[str(movie)], :]])\n return user_sim\n\nmov_sim = movie_similarity()\n#mat_mat_cos_similarity(mov_sim)\n#tfidf = np.concatenate((genre_tfidf, tag_tfidf), axis=1)\nnorm = np.linalg.norm(mov_sim, axis=1, keepdims=True)\nnormalized_tfidf = mov_sim / norm\ncos_sim = np.matmul(normalized_tfidf, normalized_tfidf.T)\n\n# make id dictionarie to accelerate id access\nmovieIds = {}\nindex = 0\nfor movie in movies['movieId']:\n movieIds[str(movie)] = index\n index+=1 #incremrent index\n\ndef get_topN(userId, topN):\n 'Returns top N recommendations'\n ratings = pd.read_csv('./data/ratings.csv')\n ratings = ratings[ratings['userId'] == userId]\n \n user_rating = np.matrix(ratings['rating']).T\n user_sim = get_partial_similarity(ratings['movieId'])\n \n # sum across colums\n sim_sum = np.sum(user_sim, axis=0)\n \n recommendation = np.divide(np.matmul(user_sim.T, user_rating), np.add(sim_sum, 1).T)\n\n # add movie keys row\n recommendation = np.concatenate((np.matrix(np.ones((len(recommendation),), dtype=int)*userId).T, np.matrix(list(movieIds.keys())).T.astype(int), recommendation), axis=1)\n # sort recommendations\n idx = np.lexsort((recommendation[:, 1].squeeze(), -recommendation[:, 2].squeeze()))\n recommendation = recommendation[idx].squeeze()\n \n #leave only topN terms\n recommendation = recommendation[:topN, :]\n return recommendation\n",
"id": "2291876",
"language": "Python",
"matching_score": 2.5383002758026123,
"max_stars_count": 2,
"path": "Movie recommendation/topNrecommender.py"
},
{
"content": "# -*- coding: utf-8 -*-\nimport numpy as np\nimport pandas as pd\n\ntest_ratings = pd.read_csv('./data/ratings_test.csv')\ntrain_ratings = pd.read_csv('./data/ratings_train.csv')\n\ndef get_movie_sim(train_ratings, fillMean=False):\n \"\"\" Pass trainig raitings that contains movieId, userId, rating columns\n Returns movie-movie similarity martix with movieId as indexes\n Cosine similarity is used\n Pass second argument as True if empty entries of sparce matrix should be filled by movie mean.\n Pass second argument as False if emoty entries of sparce matrix should be filled by 0.\"\"\"\n movie_user = train_ratings.pivot('movieId', 'userId', 'rating')\n if fillMean:\n # set unrated movies to movie mean\n movie_means = movie_user.mean(axis=1)\n for i, col in enumerate(movie_user):\n # using i allows for duplicate columns\n movie_user.iloc[:, i] = movie_user.iloc[:, i].fillna(movie_means)\n else:\n # Fill NaNs with 0\n movie_user = movie_user.fillna(value=0)\n # Calculate cosine similarity\n # create normalazing vector\n norm = np.linalg.norm(movie_user, axis=1, keepdims=True)\n # normalize rates\n normalized_movie_repres = movie_user.div(norm)\n movie_sim = normalized_movie_repres.dot(normalized_movie_repres.T)\n # normalized_movie_repres = movie_user.dot(movie_user.T)\n # movie_sim = normalized_movie_repres.div(norm)\n return movie_sim\n\ndef get_movie_sim_v2(train_ratings, fillMean=False):\n \"\"\" Pass trainig raitings that contains movieId, userId, rating columns\n Returns movie-movie similarity martix with movieId as indexes\n Cosine similarity is used\n Pass second argument as True if empty entries of sparce matrix should be filled by movie mean.\n Pass second argument as False if emoty entries of sparce matrix should be filled by 0.\"\"\"\n movie_user = train_ratings.pivot('movieId', 'userId', 'rating')\n if fillMean:\n # set unrated movies to movie mean\n movie_means = movie_user.mean(axis=1)\n for i, col in enumerate(movie_user):\n # using i allows for duplicate columns\n movie_user.iloc[:, i] = movie_user.iloc[:, i].fillna(movie_means)\n else:\n # Fill NaNs with 0\n movie_user = movie_user.fillna(value=0)\n # Calculate cosine similarity\n # create normalazing vector\n norm = np.linalg.norm(movie_user, axis=1, keepdims=True)\n # normalize rates\n # normalized_movie_repres = movie_user.div(norm)\n # movie_sim = normalized_movie_repres.dot(normalized_movie_repres.T)\n normalized_movie_repres = movie_user.dot(movie_user.T)\n movie_sim = normalized_movie_repres.div(norm)\n return movie_sim\n\ndef get_prediction(train_ratings, movie_sim, userId):\n \"\"\"Returns predictions for a given user\n Requires: training ratings, movie_similarity, userId\"\"\"\n user_ratings = train_ratings[train_ratings.userId == userId]\n # retunr null of user does not exist in training set\n if user_ratings.empty:\n return user_ratings\n # get movie similarity user_rated_movieId x all_movies\n user_sim = movie_sim.loc[list(user_ratings.movieId),:]\n # drop if there are any mismatch\n user_sim = user_sim.dropna(how='any')\n # create pandas dataframe with 'movieId' as indexes and user ratings as 'rating' column\n user_ratings = pd.DataFrame({'rating': list(user_ratings.rating)},\n index=user_ratings.movieId)\n # add one to ratings sum to prevent division by 0\n sim_sum = user_sim.sum() + 1\n # create pandas dataframe with 'movieId' as indexes and user ratings sum as 'rating' column\n sim_sum = pd.DataFrame({'rating': sim_sum.tolist()},\n index=sim_sum.index)\n # multiply user_sim by user_ratings\n unnorm_ratings = user_sim.T.dot(user_ratings)\n # normalize user ratings\n user_all_movie_ratings = unnorm_ratings.div(sim_sum)\n # return user_rating predictions\n return user_all_movie_ratings\n\ndef calc_rmse(train_ratings, test_ratings, movie_sim, userId):\n \"\"\"Calculate RMSE score for a single user\n Return: RMSE score for a user\"\"\"\n # get user predictions\n user_predicted_ratings = get_prediction(train_ratings, movie_sim, userId)\n # return None if unknown user\n if user_predicted_ratings.empty:\n return None\n # get user actual ratings\n test_user_ratings = test_ratings[test_ratings.userId == userId]\n \n # remove movies where predictions are not known\n unique = []\n for i in test_user_ratings.movieId:\n if i in movie_sim.index:\n unique.append(i)\n test_user_ratings = test_user_ratings[test_user_ratings.movieId.isin(unique)]\n # remove predictions that will not be used\n user_predicted_ratings = user_predicted_ratings[user_predicted_ratings.index.isin(test_user_ratings.movieId)]\n n = len(user_predicted_ratings)\n #err_square = (user_predicted_ratings.rating - test_user_ratings.rating)**2\n err_square = (np.array(user_predicted_ratings.rating) - np.array(test_user_ratings.rating))**2\n return (err_square.sum()/n)**(1/2)\n\ndef build_movie_repres(train_ratings):\n movie_user = train_ratings.pivot('movieId', 'userId', 'rating')\n movie_means = movie_user.mean(axis=1)\n for i, col in enumerate(movie_user):\n # using i allows for duplicate columns\n # inplace *may* not always work here, so IMO the next line is preferred\n # df.iloc[:, i].fillna(m, inplace=True)\n movie_user.iloc[:, i] = movie_user.iloc[:, i].fillna(movie_means)\n return movie_user\ndef get_factors(movie_user):\n u, s, vh = np.linalg.svd(movie_user, full_matrices=True)\n # take k factors\n K = 400\n U = u[:,:K]\n S = np.diag(s[:K])\n VH = vh[:K, :]\n P = U.dot(S)\n return P, VH\n\ndef get_prediction_svd(P, VH, movie_user, userId, movieId):\n if not int(userId) in list(movie_user.columns):\n print(\"Cannon predict for userId=\" + str(userId))\n return 'unknown'\n elif not int(movieId) in list(movie_user.index):\n print(\"Cannot predict for movieId=\"+ str(movieId))\n return 'unknown'\n else:\n user = movie_user.columns.get_loc(int(userId))\n movie = movie_user.index.get_loc(int(movieId))\n user_predicted_ratings = P.dot(VH[:,user])\n return '{:.4f}'.format(user_predicted_ratings[movie])\n\ndef predict_movie_rate(train_ratings, movie_sim, userId, movieId):\n \"\"\"Predict a rate for specific movieId\"\"\"\n user_recomendations = get_prediction(train_ratings, movie_sim, int(userId))\n if user_recomendations.empty:\n print(\"Cannon predict for userId=\" + str(userId))\n return 'unknown'\n elif not int(movieId) in list(user_recomendations.index):\n print(\"Cannot predict for movieId=\"+ str(movieId))\n return 'unknown'\n rating = user_recomendations.loc[int(movieId)].rating\n return '{:.4f}'.format(rating)\n\ndef read_user_id():\n with open('input.txt', 'r') as f:\n return [l.strip().split(',') for l in f.readlines()]\n\n\ndef write_output(prediction):\n with open('output.txt', 'w') as f:\n for p in prediction:\n f.write(p + \"\\n\")\n\ndef do(ids):\n # test implementation\n movie_sim = get_movie_sim(train_ratings)\n movie_sim2 = get_movie_sim_v2(train_ratings)\n movie_user = build_movie_repres(train_ratings)\n P, VH = get_factors(movie_user)\n prediction = []\n for i in ids:\n rate1 = predict_movie_rate(train_ratings, movie_sim, i[0], i[1])\n prediction.append('{},{},{}'.format(i[0], i[1], rate1))\n rate2 = get_prediction_svd(P, VH, movie_user, i[0], i[1])\n prediction.append('{},{},{}'.format(i[0], i[1], rate2))\n rate3 = predict_movie_rate(train_ratings, movie_sim2, i[0], i[1])\n prediction.append('{},{},{}'.format(i[0], i[1], rate3))\n return prediction\n\nif __name__ == \"__main__\":\n user_ids = read_user_id()\n result = do(user_ids)\n write_output(result)",
"id": "12576991",
"language": "Python",
"matching_score": 3.7427093982696533,
"max_stars_count": 2,
"path": "neighborhood-model-based RS/main.py"
},
{
"content": "import numpy as np\nfrom scipy.sparse import rand as sprand\nimport torch\nimport pandas as pd\n\nDEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n# open training file\ntrain = pd.read_csv('./data/ratings_train.csv')\n# open validation file\nvali = pd.read_csv('./data/ratings_vali.csv') \n\n# create user-movie matrix\nuser_movie = train.pivot('userId', 'movieId', 'rating')\n# fill empty values with 0\nuser_movie = user_movie.fillna(0.0)\n\n# Make up some random explicit feedback ratings\n# and convert to a numpy array\nn_users = len(train.userId.unique())\nn_items = len(train.movieId.unique())\nuserIds = list(user_movie.index)\nmovieIds = list(user_movie.columns)\n# convert to numpy\nratings = user_movie.to_numpy()\n\nclass DenseNet(torch.nn.Module):\n\n def __init__(self, n_users, n_items):\n super().__init__()\n \t # user and item embedding layers\n factor_len = 30\n self.user_factors = torch.nn.Embedding(n_users, factor_len)\n self.item_factors = torch.nn.Embedding(n_items, factor_len)\n \t # linear layers\n self.inputs = torch.nn.Linear(factor_len*2, 50)\n # hidden liers\n self.linear1 = torch.nn.Linear(50, 30)\n self.linear2 = torch.nn.Linear(30, 20)\n # output lyer\n self.outputs = torch.nn.Linear(20, 1)\n\n self.to(DEVICE)\n\n def forward(self, users, items, dim):\n users_embedding = self.user_factors(users)\n items_embedding = self.item_factors(items)\n\t# concatenate user and item embeddings to form input\n x = torch.cat([users_embedding, items_embedding], dim)\n x = torch.relu(self.inputs(x))\n x = torch.relu(self.linear1(x))\n x = torch.relu(self.linear2(x))\n output_scores = self.outputs(x)\n return output_scores\n\n\ndef batch(iterable1, iterable2, n=1):\n l = len(iterable1)\n for ndx in range(0, l, n):\n yield iterable1[ndx:min(ndx + n, l)], iterable2[ndx:min(ndx + n, l)]\n\n# model instance\nmodel = DenseNet(n_users, n_items)\nmodel.load_state_dict(torch.load('./param.data'))\nmodel.eval()\n\ndef train():\n # loss function\n loss_func = torch.nn.MSELoss()\n optimizer = torch.optim.Adam(model.parameters(), lr=1e-3, weight_decay=1e-2) # learning rate\n # Get indexes of nonzero elements (row indexes, col indexes)\n rows, cols = ratings.nonzero()\n # randomly shuffle array\n p = np.random.permutation(len(rows)) # returns shuffled indexes\n rows, cols = rows[p], cols[p]\n for epoch in range(50):\n loss_sum = 0\n batch_num = 1\n for row, col in batch(rows, cols, 256):\n\t # Always clear any previously calculated gradients before performing a\n\t # backward pass. PyTorch doesn't do this automatically because \n\t # accumulating the gradients is \"convenient while training RNNs\". \n\t # (source: https://stackoverflow.com/questions/48001598/why-do-we-need-to-call-zero-grad-in-pytorch)\n\t optimizer.zero_grad()\n # Turn data into tensors\n rating = torch.FloatTensor(ratings[row, col]).to(DEVICE)\n #print(rating)\n row = torch.LongTensor([row]).to(DEVICE)\n col = torch.LongTensor([col]).to(DEVICE)\n \n # Predict and calculate loss\n prediction = model(row, col, 2)\n loss = loss_func(prediction.squeeze(), rating)\n # save total loss\n loss_sum += loss.item()\n batch_num += 1\n # Backpropagate\n loss.backward()\n # Update the parameters\n optimizer.step()\n print('Epoch: {}, loss: {}'.format(epoch+1, loss_sum/batch_num))\n\n torch.save(model.state_dict(), './param.data')\n\ndef predict(uid, mid):\n try:\n row = userIds.index(uid)\n col = movieIds.index(mid)\n except:\n return 'unknown'\n row = torch.LongTensor([row]).to(DEVICE)\n col = torch.LongTensor([col]).to(DEVICE)\n prediction = model(row, col, 1)\n return '{:.4f}'.format(prediction.item())\n\n\ndef read_user_id():\n with open('input.txt', 'r') as f:\n return [l.strip().split(',') for l in f.readlines()]\n\n\ndef write_output(prediction):\n with open('output.txt', 'w') as f:\n for p in prediction:\n f.write(p + \"\\n\")\n\ndef do(ids):\n # test implementation\n prediction = []\n for i in ids:\n rate1 = predict(int(i[0]), int(i[1]))\n prediction.append('{},{},{}'.format(i[0], i[1], rate1))\n return prediction\n\nif __name__ == \"__main__\":\n user_ids = read_user_id()\n result = do(user_ids)\n write_output(result)\n# train()\n",
"id": "12728470",
"language": "Python",
"matching_score": 2.524510622024536,
"max_stars_count": 2,
"path": "DNN/main.py"
},
{
"content": "# -*- coding: utf-8 -*-\nimport topNrecommender as rec\n\ndef read_user_id():\n with open('input.txt', 'r') as f:\n return [l.strip() for l in f.readlines()]\n\n\ndef write_output(prediction):\n with open('output.txt', 'w') as f:\n for p in prediction:\n f.write(p + \"\\n\")\n\ndef do(ids):\n # test implementation\n #prediction = [['{},{},{}'.format(i, 5, 3.5)]*30 for i in ids]\n prediction = []\n for i in ids:\n recommends = rec.get_topN(int(i), 30)\n predics = [['{},{},{:.4f}'.format(int(e[0,0]), int(e[0,1]), e[0,2])] for e in recommends]\n for p in predics:\n prediction.append(str(p[0]))\n return prediction\n\n\nif __name__ == \"__main__\":\n user_ids = read_user_id()\n result = do(user_ids)\n write_output(result)",
"id": "2485895",
"language": "Python",
"matching_score": 0.7642566561698914,
"max_stars_count": 2,
"path": "Movie recommendation/main.py"
}
] | 2.531405 |
sorkhemiri | [
{
"content": "BAD_REQUEST_400 = 400\nUNAUTHORIZED_401 = 401\nPAYMENT_REQUIRED_402 = 402\nFORBIDDEN_403 = 403\nNOT_FOUND_404 = 404\nMETHOD_NOT_ALLOWED_405 = 405\nNOT_ACCEPTABLE_406 = 406\nPROXY_AUTHENTICATION_REQUIRED_407 = 407\nREQUEST_TIMEOUT_408 = 408\nCONFLICT_409 = 409\nGONE_410 = 410\nLENGTH_REQUIRED_411 = 411\nPRECONDITION_FAILED_412 = 412\nPAYLOAD_TOO_LARGE_413 = 413\nURI_TOO_LONG_414 = 414\nUNSUPPORTED_MEDIA_TYPE_415 = 415\nRANGE_NOT_SATISFIABLE_416 = 416\nEXPECTATION_FAILED_417 = 417\nIMATEAPOT_418 = 418\nMISDIRECTED_REQUEST_421 = 421\nUNPROCESSABLE_ENTITY_422 = 422\nLOCKED_423 = 423\nFAILED_DEPENDENCY_424 = 424\nTOO_EARLY_425 = 425\nUPGRADE_REQUIRED_426 = 426\nPRECONDITION_REQUIRED_428 = 428\nTOO_MANY_REQUESTS = 429\nREQUEST_HEADER_FIELDS_TOO_LARGE_431 = 431\nNO_RESPONSE_444 = 444\nUNAVAILABLE_FOR_LEGAL_REASONS_451 = 451\n\nINTERNAL_SERVER_ERROR_500 = 500\nNOT_IMPLEMENTED_501 = 501\nBAD_GATEWAY_502 = 502\nSERVICE_UNAVAILABLE_503 = 503\nGATEWAY_TIMEOUT_504 = 504\nHTTP_VERSION_NOT_SUPPORTED_505 = 505\nVARIANT_ALSO_NEGOTIATES_506 = 506\nINSUFFICIENT_STORAGE_507 = 507\nLOOP_DETECTED_508 = 508\nNOT_EXTENDED_510 = 510\nNETWORK_AUTHENTICATION_REQUIRED_511 = 511\n",
"id": "4050272",
"language": "Python",
"matching_score": 1.3770027160644531,
"max_stars_count": 2,
"path": "jexcept/status.py"
},
{
"content": "from fastapi import FastAPI\nfrom starlette.middleware.cors import CORSMiddleware\nimport http.cookies\n\n\nhttp.cookies._is_legal_key = lambda _: True\n\napp = FastAPI()\napp.add_middleware(\n CORSMiddleware, allow_origins=[\"*\"], allow_methods=[\"*\"], allow_headers=[\"*\"]\n)\n\n\nif __name__ == \"__main__\":\n import uvicorn\n\n uvicorn.run(app, host=\"0.0.0.0\", port=3000, debug=True)\n",
"id": "5288295",
"language": "Python",
"matching_score": 0,
"max_stars_count": 0,
"path": "src/app.py"
},
{
"content": "import pytest\n\nfrom jexcept import JException, status\nfrom jexcept import BaseJException\n\n\nclass BaseExceptionTestCase:\n\n @staticmethod\n def test_input():\n err = JException()\n assert err.HTTP_STATUS == 500\n assert err.MESSAGE == \"\"\n\n err = JException(message=\"error message\", http_status=status.BAD_REQUEST_400)\n assert err.HTTP_STATUS == 400\n assert err.MESSAGE == \"error message\"\n\n @staticmethod\n def test_dict():\n err = JException(message=\"error message\", http_status=status.BAD_REQUEST_400, detail=\"error detail\")\n assert err.dict() == {\"Exception\": \"error message\", \"Detail\": \"error detail\"}\n\n @staticmethod\n def test_raise():\n with pytest.raises(BaseJException):\n raise JException()\n\n try:\n raise JException(message=\"error message\", http_status=status.BAD_REQUEST_400, detail=\"error detail\")\n except BaseJException as err:\n assert err.HTTP_STATUS == 400\n assert err.MESSAGE == \"error message\"\n assert err.DETAIL == \"error detail\"\n",
"id": "10334590",
"language": "Python",
"matching_score": 4.519054412841797,
"max_stars_count": 2,
"path": "test/test_module/test_exception.py"
},
{
"content": "import pytest\n\nfrom jexcept import BaseJException, status\nfrom jexcept import JAPIException\n\n\nclass BaseExceptionTestCase:\n\n @staticmethod\n def test_dict():\n class MyAPIException(JAPIException):\n MESSAGE = \"error message\"\n DETAIL = \"error detail\"\n HTTP_STATUS = status.BAD_REQUEST_400\n\n assert MyAPIException.dict() == {\"Exception\": \"error message\", \"Detail\": \"error detail\"}\n\n @staticmethod\n def test_raise():\n class MyAPIException(JAPIException):\n pass\n\n with pytest.raises(BaseJException):\n raise MyAPIException\n\n class MyAPIException(JAPIException):\n MESSAGE = \"error message\"\n DETAIL = \"error detail\"\n HTTP_STATUS = status.BAD_REQUEST_400\n\n try:\n raise MyAPIException\n except BaseJException as err:\n assert err.HTTP_STATUS == 400\n assert err.MESSAGE == \"error message\"\n assert err.DETAIL == \"error detail\"\n",
"id": "2276723",
"language": "Python",
"matching_score": 2.5069448947906494,
"max_stars_count": 2,
"path": "test/test_module/test_api_exception.py"
},
{
"content": "from .base_exception import BaseJException\nfrom .status import INTERNAL_SERVER_ERROR_500\n\n\nclass JAPIException(BaseJException):\n HTTP_STATUS = INTERNAL_SERVER_ERROR_500\n MESSAGE = \"\"\n DETAIL = \"\"\n\n @classmethod\n def http_status(cls):\n return cls.HTTP_STATUS\n\n @classmethod\n def message(cls):\n return cls.MESSAGE\n\n @classmethod\n def detail(cls):\n return cls.DETAIL\n\n @classmethod\n def dict(cls):\n return {\n \"Exception\": cls.MESSAGE,\n \"Detail\": cls.DETAIL,\n }\n\n def __str__(self):\n return f\"{self.__class__.__name__}:{self.MESSAGE}__HTTP STATUS({self.HTTP_STATUS})\"\n\n def __repr__(self):\n return f\"{self.__class__.__name__}:<HTTP STATUS({self.HTTP_STATUS})>\"\n",
"id": "5756307",
"language": "Python",
"matching_score": 5.218903064727783,
"max_stars_count": 2,
"path": "jexcept/api_exception.py"
},
{
"content": "from .base_exception import BaseJException\nfrom .status import INTERNAL_SERVER_ERROR_500\n\n\nclass JException(BaseJException):\n def __init__(self, message: str = \"\", http_status: int = INTERNAL_SERVER_ERROR_500, detail: str = \"\"):\n super(JException, self).__init__(message)\n self.HTTP_STATUS = http_status\n self.MESSAGE = message\n self.DETAIL = detail\n\n def dict(self):\n return {\n \"Exception\": self.MESSAGE,\n \"Detail\": self.DETAIL,\n }\n\n def __str__(self):\n return f\"{self.__class__.__name__}:{self.MESSAGE}__HTTP STATUS({self.HTTP_STATUS})\"\n\n def __repr__(self):\n return f\"{self.__class__.__name__}:<HTTP STATUS({self.HTTP_STATUS})>\"\n",
"id": "4543133",
"language": "Python",
"matching_score": 3.977593421936035,
"max_stars_count": 2,
"path": "jexcept/exception.py"
},
{
"content": "import abc\n\n\nclass BaseJException(Exception):\n HTTP_STATUS = None\n MESSAGE = None\n DETAIL = None\n\n @abc.abstractmethod\n def dict(self):\n pass\n",
"id": "7407355",
"language": "Python",
"matching_score": 1.8692500591278076,
"max_stars_count": 2,
"path": "jexcept/base_exception.py"
},
{
"content": "\"\"\"\n Json Serializable Exceptions for Python.\n :copyright: 2021 by <NAME>.\n :license: ISC, see LICENSE for more details.\n\"\"\"\nfrom .base_exception import BaseJException\nfrom .exception import JException\nfrom . import status\nfrom .api_exception import JAPIException\n__all__ = [JException, JAPIException, BaseJException, status]\n",
"id": "8229951",
"language": "Python",
"matching_score": 1.207371711730957,
"max_stars_count": 2,
"path": "jexcept/__init__.py"
},
{
"content": "from distutils.core import setup\nsetup(\n name='JExcept',\n packages=['jexcept'],\n version='0.1.1',\n license='ISC',\n description='Json Serializable Exceptions for Python',\n author='<NAME>',\n author_email='<EMAIL>',\n url='https://github.com/sorkhemiri/jexcept',\n download_url='https://github.com/sorkhemiri/jexcept/archive/refs/tags/0.1.1.tar.gz',\n keywords=['EXCEPTION', 'HTTP_EXCEPTION', 'JSON'],\n install_requires=[],\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Topic :: Software Development :: Build Tools',\n 'License :: OSI Approved :: ISC License (ISCL)',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n ],\n)\n",
"id": "2581499",
"language": "Python",
"matching_score": 5.417664527893066,
"max_stars_count": 2,
"path": "setup.py"
},
{
"content": "from distutils.core import setup\n\nwith open(\"README.rst\", \"r\") as fl:\n long_description = fl.read()\n\nsetup(\n name = 'ahura',\n packages = ['ahura'],\n version = '0.1.12',\n license='MIT',\n description = 'A God Like Serializer For God Like Developers.',\n long_description=long_description,\n author = '<NAME>',\n author_email = '<EMAIL>',\n url = 'https://github.com/sorkhemiri/ahura',\n download_url = 'https://github.com/sorkhemiri/ahura/archive/0.1.tar.gz',\n keywords = ['python', 'django', 'serializer', 'json', 'model', 'orm'],\n install_requires=[],\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Topic :: Software Development :: Build Tools',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ],\n)",
"id": "2385970",
"language": "Python",
"matching_score": 0.2536886930465698,
"max_stars_count": 3,
"path": "setup.py"
},
{
"content": "import json\nimport yaml\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.shortcuts import render\nfrom django.conf import settings\n\ndef yaml_to_html(request):\n if hasattr(settings, 'SWAGGER_YAML_FILE'):\n file = open(settings.SWAGGER_YAML_FILE)\n spec = yaml.load(file.read())\n return render(request, template_name=\"swagger_base.html\", context={'data': json.dumps(spec)})\n else:\n raise ImproperlyConfigured('You should define SWAGGER_YAML_FILE in your settings')\n",
"id": "5326772",
"language": "Python",
"matching_score": 2.392305374145508,
"max_stars_count": 0,
"path": "swagger_ui/views.py"
},
{
"content": "\n\nfrom .views import yaml_to_html\n\ntry:\n from django.urls import path\n\n urlpatterns = [\n path('api-doc/', yaml_to_html, name=\"api-doc\"),\n ]\nexcept:\n from django.conf.urls import url\n\n urlpatterns = [\n url(r'^api-doc/', yaml_to_html, name=\"api-doc\"),\n ]\n",
"id": "154823",
"language": "Python",
"matching_score": 1.6128216981887817,
"max_stars_count": 38,
"path": "swagger_ui/urls.py"
}
] | 2.130778 |
JonAWhite | [
{
"content": "\"\"\"\nThis module contains a class to make requests to the Gemini API.\n\nAuthor: <NAME>\n\"\"\"\nimport time\nimport json\nimport hmac\nimport base64\nimport hashlib\nimport requests\n\n\nclass Geminipy(object):\n \"\"\"\n A class to make requests to the Gemini API.\n\n Make public or authenticated requests according to the API documentation:\n https://docs.gemini.com/\n \"\"\"\n\n live_url = 'https://api.gemini.com'\n sandbox_url = 'https://api.sandbox.gemini.com'\n base_url = sandbox_url\n api_key = ''\n secret_key = ''\n\n def __init__(self, api_key='', secret_key='', live=False):\n \"\"\"\n Initialize the class.\n\n Arguments:\n api_key -- your Gemini API key\n secret_key -- your Gemini API secret key for signatures\n live -- use the live API? otherwise, use the sandbox (default False)\n \"\"\"\n self.api_key = api_key\n self.secret_key = secret_key\n\n if live:\n self.base_url = self.live_url\n\n # public requests\n def symbols(self):\n \"\"\"Send a request for all trading symbols, return the response.\"\"\"\n url = self.base_url + '/v1/symbols'\n\n return requests.get(url)\n\n def book(self, symbol='btcusd', limit_bids=0, limit_asks=0):\n \"\"\"\n Send a request to get the public order book, return the response.\n\n Arguments:\n symbol -- currency symbol (default 'btcusd')\n limit_bids -- limit the number of bids returned (default 0)\n limit_asks -- limit the number of asks returned (default 0)\n \"\"\"\n url = self.base_url + '/v1/book/' + symbol\n params = {\n 'limit_bids': limit_bids,\n 'limit_asks': limit_asks\n }\n\n return requests.get(url, params)\n\n def trades(self, symbol='btcusd', since=0, limit_trades=50,\n include_breaks=0):\n \"\"\"\n Send a request to get all public trades, return the response.\n\n Arguments:\n symbol -- currency symbol (default 'btcusd')\n since -- only return trades after this unix timestamp (default 0)\n limit_trades -- maximum number of trades to return (default 50).\n include_breaks -- whether to display broken trades (default False)\n \"\"\"\n url = self.base_url + '/v1/trades/' + symbol\n params = {\n 'since': since,\n 'limit_trades': limit_trades,\n 'include_breaks': include_breaks\n }\n\n return requests.get(url, params)\n\n # authenticated requests\n def new_order(self, amount, price, side, client_order_id=None,\n symbol='btcusd', type='exchange limit'):\n \"\"\"\n Send a request to place an order, return the response.\n\n Arguments:\n amount -- quoted decimal amount of BTC to purchase\n price -- quoted decimal amount of USD to spend per BTC\n side -- 'buy' or 'sell'\n client_order_id -- an optional client-specified order id (default None)\n symbol -- currency symbol (default 'btcusd')\n type -- the order type (default 'exchange limit')\n \"\"\"\n request = '/v1/order/new'\n url = self.base_url + request\n params = {\n 'request': request,\n 'nonce': self.get_nonce(),\n 'symbol': symbol,\n 'amount': amount,\n 'price': price,\n 'side': side,\n 'type': type\n }\n\n if client_order_id is not None:\n params['client_order_id'] = client_order_id\n\n return requests.post(url, headers=self.prepare(params))\n\n def cancel_order(self, order_id):\n \"\"\"\n Send a request to cancel an order, return the response.\n\n Arguments:\n order_id - the order id to cancel\n \"\"\"\n request = '/v1/order/cancel'\n url = self.base_url + request\n params = {\n 'request': request,\n 'nonce': self.get_nonce(),\n 'order_id': order_id\n }\n\n return requests.post(url, headers=self.prepare(params))\n\n def cancel_session(self):\n \"\"\"Send a request to cancel all session orders, return the response.\"\"\"\n request = '/v1/order/cancel/session'\n url = self.base_url + request\n params = {\n 'request': request,\n 'nonce': self.get_nonce()\n }\n\n return requests.post(url, headers=self.prepare(params))\n\n def cancel_all(self):\n \"\"\"Send a request to cancel all orders, return the response.\"\"\"\n request = '/v1/order/cancel/all'\n url = self.base_url + request\n params = {\n 'request': request,\n 'nonce': self.get_nonce()\n }\n\n return requests.post(url, headers=self.prepare(params))\n\n def order_status(self, order_id):\n \"\"\"\n Send a request to get an order status, return the response.\n\n Arguments:\n order_id -- the order id to get information on\n \"\"\"\n request = '/v1/order/status'\n url = self.base_url + request\n params = {\n 'request': request,\n 'nonce': self.get_nonce(),\n 'order_id': order_id\n }\n\n return requests.post(url, headers=self.prepare(params))\n\n def active_orders(self):\n \"\"\"Send a request to get active orders, return the response.\"\"\"\n request = '/v1/orders'\n url = self.base_url + request\n params = {\n 'request': request,\n 'nonce': self.get_nonce()\n }\n\n return requests.post(url, headers=self.prepare(params))\n\n def past_trades(self, symbol='btcusd', limit_trades=50, timestamp=0):\n \"\"\"\n Send a trade history request, return the response.\n\n Arguements:\n symbol -- currency symbol (default 'btcusd')\n limit_trades -- maximum number of trades to return (default 50)\n timestamp -- only return trades after this unix timestamp (default 0)\n \"\"\"\n request = '/v1/mytrades'\n url = self.base_url + request\n params = {\n 'request': request,\n 'nonce': self.get_nonce(),\n 'symbol': symbol,\n 'limit_trades': limit_trades,\n 'timestamp': timestamp\n }\n\n return requests.post(url, headers=self.prepare(params))\n\n def balances(self):\n \"\"\"Send an account balance request, return the response.\"\"\"\n request = '/v1/balances'\n url = self.base_url + request\n params = {\n 'request': request,\n 'nonce': self.get_nonce()\n }\n\n return requests.post(url, headers=self.prepare(params))\n\n def heartbeat(self):\n \"\"\"Send a heartbeat message, return the response.\"\"\"\n request = '/v1/heartbeat'\n url = self.base_url + request\n params = {\n 'request': request,\n 'nonce': self.get_nonce()\n }\n\n return requests.post(url, headers=self.prepare(params))\n\n def withdraw(self, currency, address, amount):\n request = '/v1/withdraw/' + currency\n url = self.base_url + request\n params = {\n 'request': request,\n 'nonce': self.get_nonce(),\n 'address': address,\n 'amount': amount\n }\n\n return requests.post(url, headers=self.prepare(params))\n\n def get_nonce(self):\n \"\"\"Return the current millisecond timestamp as the nonce.\"\"\"\n return int(round(time.time() * 1000))\n\n def prepare(self, params):\n \"\"\"\n Prepare, return the required HTTP headers.\n\n Base 64 encode the parameters, sign it with the secret key,\n create the HTTP headers, return the whole payload.\n\n Arguments:\n params -- a dictionary of parameters\n \"\"\"\n jsonparams = json.dumps(params)\n payload = base64.b64encode(jsonparams)\n signature = hmac.new(self.secret_key, payload,\n hashlib.sha384).hexdigest()\n\n return {'X-GEMINI-APIKEY': self.api_key,\n 'X-GEMINI-PAYLOAD': payload,\n 'X-GEMINI-SIGNATURE': signature}\n",
"id": "10076158",
"language": "Python",
"matching_score": 3.1505537033081055,
"max_stars_count": 0,
"path": "geminipy.py"
},
{
"content": "from geminipy import Geminipy\nimport argparse\nimport sys\nfrom decimal import *\n\ngetcontext().prec = 28 \ngetcontext().mode = ROUND_HALF_EVEN\n\ndef find_balance(balances, currency):\n for x in range(len(balances)):\n balance = balances[x]\n if balance[u'currency'] == currency:\n return balance\n\ndef balance_minus_fees(balance, fee):\n one_plus_fee = Decimal('1')+fee\n balance_minus_fee = balance / one_plus_fee \n return balance_minus_fee.quantize(Decimal('1.00'))\n\ndef get_trade_and_fees(usd_to_spend, fee=Decimal('0.0025')):\n usd_to_trade = balance_minus_fees(usd_to_spend, fee)\n usd_in_fees = usd_to_spend - usd_to_trade\n trade_and_fees = {\"trade\": usd_to_trade, \"fees\": usd_in_fees}\n return trade_and_fees \n\ndef get_ask_price_and_amount(con):\n book = con.book(limit_bids=1, limit_asks=1)\n book.raise_for_status()\n current_ask = book.json()[u'asks'][0]\n current_ask_amount = Decimal(current_ask[u'amount'])\n current_ask_price = Decimal(current_ask[u'price'])\n return {\"price\":current_ask_price, \"amount\":current_ask_amount}\n\ndef get_currency_available_for_withdrawl(con, currency):\n balances = con.balances()\n balances.raise_for_status()\n currency_balance = find_balance(balances.json(), currency=currency) \n currency_balance_available = Decimal(currency_balance[u'availableForWithdrawal'])\n return currency_balance_available\n\ndef validate_usd_to_spend(con, usd_to_spend): \n # Get USD Available for Withdrawl\n usd_balance_available = get_currency_available_for_withdrawl(con, u'USD')\n print 'Available: $' + str(usd_balance_available)\n \n # Confirm that you have enough USD in your account\n if usd_to_spend > usd_balance_available:\n usd_missing = usd_to_spend - usd_balance_available\n print '[ERROR] You need an additional $' + str(usd_missing) + ' to make that trade'\n sys.exit(1)\n\ndef validate_btc_to_buy(btc_to_buy, current_ask_amount): \n if btc_to_buy > current_ask_amount:\n btc_unavailable = btc_to_buy - current_ask_amount\n print '[ERROR] There is ' + str(btc_unavailable) + ' BTC missing at that price'\n sys.exit(1)\n\ndef purchase_btc(con, btc_to_buy, current_ask_price):\n print \"Buying \" + str(btc_to_buy) + \" BTC @ $\" + str(current_ask_price)\n order = con.new_order(amount=str(btc_to_buy), price=str(current_ask_price), side='buy')\n print order.json()\n return\n\ndef spend_usd(con, usd_to_spend):\n # Validate USD to spend \n validate_usd_to_spend(con, usd_to_spend)\n\n # Get Trade and Fees\n trade_and_fees = get_trade_and_fees(usd_to_spend)\n usd_to_trade = trade_and_fees[\"trade\"]\n usd_in_fees = trade_and_fees[\"fees\"] \n print 'Spend: $' + args.usd_to_spend + ' => Trade: $' + str(usd_to_trade) + '; Fees: $' + str(usd_in_fees)\n \n # Get Current Ask\n ask_price_and_amount = get_ask_price_and_amount(con)\n current_ask_price = ask_price_and_amount[\"price\"]\n current_ask_amount = ask_price_and_amount[\"amount\"]\n btc_to_buy = (usd_to_trade / current_ask_price).quantize(Decimal('1.00000000'))\n print 'BTC to Buy: ' + str(btc_to_buy) \n print 'Ask: ' + str(current_ask_amount) + ' @ $' + str(current_ask_price) \n \n # Make sure there is enough BTC to cover trade\n validate_btc_to_buy(btc_to_buy, current_ask_amount)\n \n #Place Trade\n purchase_btc(con, btc_to_buy, current_ask_price)\n\ndef validate_btc_to_withdraw(con, btc_amount): \n btc_balance_available = get_currency_available_for_withdrawl(con, u'BTC')\n if btc_amount > btc_balance_available:\n print '[ERROR] You only have ' + str(btc_balance_available) + ' BTC available to withdraw.'\n sys.exit(1)\n\ndef withdraw_btc(con, btc_amount, withdrawl_address):\n validate_btc_to_withdraw(con, btc_amount)\n print \"Withdrawing \" + str(btc_amount) + \" BTC to \" + withdrawl_address\n withdrawl = con.withdraw(currency='btc', address=withdrawl_address, amount=str(btc_amount))\n print withdrawl.json() \n return\n\ndef get_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--usd_to_spend\", help='The amount of USD to use to buy BTC')\n parser.add_argument(\"--btc_to_withdraw\", nargs=2, help='The amount of BTC to withdraw and the address to withdraw to (ex: .01 1HDLnpVFgqgnfiKjxJUPmb82P7XamASuZf)')\n parser.add_argument(\"gemini_api_key\", help='Get this from gemini.com')\n parser.add_argument(\"gemini_api_secret\", help='Get this from gemini.com')\n args = parser.parse_args()\n return args\n\nargs = get_args()\ncon = Geminipy(api_key=args.gemini_api_key, secret_key=args.gemini_api_secret, live=True)\nif args.usd_to_spend:\n usd_to_spend = Decimal(args.usd_to_spend)\n spend_usd(con, usd_to_spend)\n\nif args.btc_to_withdraw:\n btc_amount = Decimal(args.btc_to_withdraw[0]).quantize(Decimal('1.00000000'))\n withdrawl_address = args.btc_to_withdraw[1]\n withdraw_btc(con, btc_amount, withdrawl_address) \n\n",
"id": "4566388",
"language": "Python",
"matching_score": 0.4330703318119049,
"max_stars_count": 0,
"path": "gemini_manager.py"
},
{
"content": "#!/usr/bin/python\n\n# Creates a TXF file for LibraTax Form to populate Form 8949.\n# https://turbotax.intuit.com/txf/TXF042.jsp\n# Look for codes 712, 714.\n\n# Export LibraTax for the Appropriate Calendar Year\n# Remove the first three lines and last four lines.\n# Essentially, you're looking for something like the following over and over:\n# 0.4227 BTC,9/3/2015,1/3/2016,182.15,97.53,84.62,Short\n\n# python create-txf-2015.py Libra-Tax_report-2016.csv > 2016_form_8949.txf\n\n# Import the .txf file into TurboTax via \n# \"File > Import > From TXF Files\".\n# You should see this:\n# These Documents Are Now Ready for Import:\n# - 1099-B (number of transactions)\n\n# If you don't like what you see, you can remove the imported data via\n# \"File > Remove Imported Data\".\n\nimport sys\nimport csv\nimport datetime\n\nbox_dict = {'Short': 712, 'Long': 714}\n\ntoday = datetime.date.today()\ntoday_formatted = today.strftime('%m/%d/%Y')\n\nprint 'V042'\nprint 'ALibraTax'\nprint 'D ' + today_formatted \nprint '^'\n\nwith open(sys.argv[1], 'r') as csvfile:\n for row in csv.reader(csvfile): \n amount = row[0]\n print 'Amount: ' + amount\n symbol = row[1]\n print 'Symbol: ' + symbol \n acquired = row[2]\n print 'Acquired: ' + acquired \n disposed = row[3]\n proceeds = row[4]\n base = row[5]\n gain = row[6]\n term = row[7]\n taxref = box_dict[term]\n descr = amount + ' ' + symbol \n print 'TD'\n print 'N' + str(taxref)\n print 'C1'\n print 'L1'\n print 'P' + descr\n print 'D' + acquired\n print 'D' + disposed\n print '$' + base\n print '$' + proceeds\n print '^'\n",
"id": "10796300",
"language": "Python",
"matching_score": 0.4420890808105469,
"max_stars_count": 0,
"path": "create-txf-2015.py"
},
{
"content": "from coinbase.wallet.client import Client\nimport argparse\nimport json\n\ndef find_account(accounts, name):\n for x in range(len(accounts)):\n account = accounts[x] \n if account.name == name:\n return account\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"budget_file\", help='JSON file in the form [{\"name\":\"Budget Item 1\", \"amount_usd\":5.50}]')\nparser.add_argument(\"bitcoin_paid_price\", help='The price you paid for the coins')\nparser.add_argument(\"coinbase_api_key\", help='Get this from coinbase.com')\nparser.add_argument(\"coinbase_api_secret\", help='Get this from coinbase.com')\nargs = parser.parse_args()\n\nwith open(args.budget_file) as data_file:\n budget_accounts = json.load(data_file)\n\nclient = Client(args.coinbase_api_key, args.coinbase_api_secret)\nprimary_account = client.get_primary_account()\nbitcoin_spot_price_in_usd = client.get_spot_price(currency_pair = 'BTC-USD')[\"amount\"]\nbitcoin_paid_price_in_usd = args.bitcoin_paid_price\naccounts_obj = client.get_accounts(limit=\"100\")\nassert (accounts_obj.pagination is None) or isinstance(accounts_obj.pagination, dict)\naccounts = accounts_obj[::]\n\ntotal_usd = 0\nfor budget_account in budget_accounts:\n total_usd += budget_account[\"amount_usd\"]\n\ntotal_btc = 0\nfor budget_account in budget_accounts:\n budget_account_name = budget_account[\"name\"]\n budget_account_id = find_account(accounts, budget_account_name).id\n budget_account_amount_usd = budget_account[\"amount_usd\"]\n budget_account_amount_btc = float(\"{0:.8f}\".format(budget_account_amount_usd / float(bitcoin_paid_price_in_usd)))\n total_btc += budget_account_amount_btc\n print 'Transfering ' + str(budget_account_amount_btc) + ' BTC from ' + primary_account.name + ' (' + primary_account.id + ') to ' + budget_account_name + ' (' + budget_account_id + ')' \n #client.transfer_money(primary_account.id, to=budget_account_id, amount=str(budget_account_amount_btc), currency=\"BTC\")\n\nprint 'BTC-USD Spot Price: ' + str(bitcoin_spot_price_in_usd)\nprint 'BTC-USD Paid Price: ' + bitcoin_paid_price_in_usd\nprint 'Budget Total: $' + str(\"%.2f\" % total_usd) \nprint 'Budget Total: ' + str(\"%.8f\" % total_btc) + ' BTC' \n",
"id": "6084824",
"language": "Python",
"matching_score": 1.1683005094528198,
"max_stars_count": 0,
"path": "coinbase_execute_budget.py"
},
{
"content": "from dateutil import parser\n\ndates = [\n\"Sun Jan 03 2016 01:39:39 GMT+0000 (UTC)\",\n\"Sat Jan 09 2016 18:59:47 GMT+0000 (UTC)\",\n]\n\nfor date in dates:\n date_obj = parser.parse(date)\n print str(date_obj.date()) + ' ' + str(date_obj.time())\n",
"id": "11923687",
"language": "Python",
"matching_score": 0.38295429944992065,
"max_stars_count": 0,
"path": "circle_date_convert_simple.py"
}
] | 0.442089 |
BartekMatuszewski01 | [
{
"content": "import math\nfrom typing import Any, Optional, Tuple\n\nimport numpy as np\nfrom numpy import ndarray\n\n\ndef cylinder_area(r: float, h: float) -> float:\n \"\"\"Obliczenie pola powierzchni walca. \n Szczegółowy opis w zadaniu 1.\n \n Parameters:\n r (float): promień podstawy walca \n h (float): wysokosć walca\n \n Returns:\n float: pole powierzchni walca \n \"\"\"\n nan = float('nan')\n if r > 0 and h > 0:\n p = 2 * math.pi * r * r + 2 * math.pi * r * h\n return p\n else:\n return nan\n\n\ndef fib(n: int) -> np.ndarray or None:\n \"\"\"Obliczenie pierwszych n wyrazów ciągu Fibonnaciego. \n Szczegółowy opis w zadaniu 3.\n \n Parameters:\n n (int): liczba określająca ilość wyrazów ciągu do obliczenia \n \n Returns:\n np.ndarray: wektor n pierwszych wyrazów ciągu Fibonnaciego.\n \"\"\"\n if type(n) is int:\n vector_of_fib_numbers = np.empty(0, int)\n a_1 = 1\n a_2 = 1\n a_wyn = 0\n\n if n > 1:\n vector_of_fib_numbers = np.append(vector_of_fib_numbers, [a_1, a_2])\n for i in range(1, n):\n a_wyn = a_2 + a_1\n\n a_1 = a_2\n a_2 = a_wyn\n\n vector_of_fib_numbers = np.append(vector_of_fib_numbers, a_2)\n\n return vector_of_fib_numbers\n elif n == 1:\n vector_of_fib_numbers = np.append(vector_of_fib_numbers, a_1)\n return vector_of_fib_numbers\n else:\n return None\n\n else:\n return None\n\n\ndef matrix_calculations(a: float) -> Tuple[Any]:\n \"\"\"Funkcja zwraca wartości obliczeń na macierzy stworzonej \n na podstawie parametru a. \n Szczegółowy opis w zadaniu 4.\n \n Parameters:\n a (float): wartość liczbowa \n \n Returns:\n touple: krotka zawierająca wyniki obliczeń \n (Minv, Mt, Mdet) - opis parametrów w zadaniu 4.\n \"\"\"\n matrix = np.array([[a, 1, -a],\n [0, 1, 1],\n [-a, a, 1]])\n matrix_det = np.linalg.det(matrix)\n matrix_transponsed = np.transpose(matrix)\n\n if matrix_det != 0:\n matrix_inv = np.linalg.inv(matrix)\n t = matrix_inv, matrix_transponsed, matrix_det\n else:\n t = float('nan'), matrix_transponsed, matrix_det\n\n print(t)\n return t\n\n\ndef custom_matrix(m: int, n: int) -> Optional[ndarray]:\n \"\"\"Funkcja zwraca macierz o wymiarze mxn zgodnie\n z opisem zadania 7.\n\n Parameters:\n m (int): ilość wierszy macierzy\n n (int): ilość kolumn macierzy\n\n Returns:\n np.ndarray: macierz zgodna z opisem z zadania 7.\n \"\"\"\n if m > 0 and n > 0 and type(m) is int and type(n) is int:\n matrix = np.zeros((m, n))\n for i in range(m):\n for j in range(n):\n if i > j:\n matrix[i, j] = i\n else:\n matrix[i, j] = j\n\n return matrix\n else:\n return None\n\n\n\n\n",
"id": "11022083",
"language": "Python",
"matching_score": 0,
"max_stars_count": 0,
"path": "Metody numeryczne 2021/Laboratorium 1/main.py"
},
{
"content": "# -*- coding: ISO-8859-2 -*-\n\nimport pytest\nimport main\nimport pickle\nimport math\nimport numpy as np\n\nexpected = pickle.load(open('expected','rb'))\n\nresults_cylinder_area = expected['cylinder_area']\nresults_fib = expected['fib']\nresults_matrix_calculations = expected['matrix_calculations']\nresults_custom_matrix = expected['custom_matrix']\n\n@pytest.mark.parametrize(\"r,h,result\", results_cylinder_area)\ndef test_cylinder_area(r:float,h:float,result):\n if math.isnan(result):\n assert math.isnan(main.cylinder_area(r,h)), 'Spodziewany wynik: {0}, aktualny {1}. Błedy wejścia.'.format(result, main.cylinder_area(r,h))\n else:\n assert main.cylinder_area(r,h) == pytest.approx(result), 'Spodziewany wynik: {0}, aktualny {1}. Błędy implementacji.'.format(result, main.cylinder_area(r,h))\n\n@pytest.mark.parametrize(\"n,result\", results_fib)\ndef test_fib(n:int,result):\n if result is None:\n assert main.fib(n) is None, 'Spodziewany wynik: {0}, aktualny {1}. Błedy wejścia.'.format(result, main.fib(n))\n else:\n assert main.fib(n) == pytest.approx(result), 'Spodziewany wynik: {0}, aktualny {1}. Błędy implementacji.'.format(result, main.fib(n))\n\n@pytest.mark.parametrize(\"a,result\", results_matrix_calculations)\ndef test_matrix_calculations(a:float,result):\n test_result = main.matrix_calculations(a)\n if not isinstance(result[0], np.ndarray):\n assert math.isnan(test_result[0]) and test_result[1] == pytest.approx(result[1]) and test_result[2] == pytest.approx(result[2])\n else:\n assert test_result[0] == pytest.approx(result[0]) and test_result[1] == pytest.approx(result[1]) and test_result[2] == pytest.approx(result[2])\n\n@pytest.mark.parametrize(\"m,n,result\", results_custom_matrix)\ndef test_custom_matrix(m:int, n:int,result):\n if result is None:\n assert main.custom_matrix(m,n) is None, 'Spodziewany wynik: {0}, aktualny {1}. Błedy wejścia.'.format(result, main.custom_matrix(m,n))\n else:\n assert main.custom_matrix(m,n) == pytest.approx(result), 'Spodziewany wynik: {0}, aktualny {1}. Błędy implementacji.'.format(result, main.custom_matrix(m,n))\n\n ",
"id": "8961362",
"language": "Python",
"matching_score": 0,
"max_stars_count": 0,
"path": "Metody numeryczne 2021/Laboratorium 1/test_main.py"
}
] | 0 |
umrashrf | [
{
"content": "from .wsgi import *\n",
"id": "2268682",
"language": "Python",
"matching_score": 0,
"max_stars_count": 0,
"path": "saleor/wsgi/__init__.py"
},
{
"content": "\"\"\"\n{{cookiecutter.repo_name}}\n-----------------\n\n{{cookiecutter.short_description}}\n\"\"\"\n\nimport datetime\nimport gettext\nimport sys\nimport time\nimport tkinter\nimport tkinter.ttk as ttk\nfrom tkinter.filedialog import askopenfilename\n\n# All translations provided for illustrative purposes only.\n{% if cookiecutter.language == 'german' %}\nde = gettext.translation('messages', localedir='locale', languages=['de'])\nde.install()\n{% elif cookiecutter.language == 'spanish' %}\nes = gettext.translation('messages', localedir='locale', languages=['es'])\nes.install()\n{% else %} # english\n_ = lambda s: s\n{% endif %}\n\n\nclass PopupDialog(ttk.Frame):\n \"Sample popup dialog implemented to provide feedback.\"\n\n def __init__(self, parent, title, body):\n ttk.Frame.__init__(self, parent)\n self.top = tkinter.Toplevel(parent)\n _label = ttk.Label(self.top, text=body, justify=tkinter.LEFT)\n _label.pack(padx=10, pady=10)\n _button = ttk.Button(self.top, text=_(\"OK\"), command=self.ok_button)\n _button.pack(pady=5)\n self.top.title(title)\n\n def ok_button(self):\n \"OK button feedback.\"\n\n self.top.destroy()\n\n\n{% if cookiecutter.insert_navigation == 'y' %}\nclass NavigationBar(ttk.Frame):\n \"Sample navigation pane provided by cookiecutter switch.\"\n\n def __init__(self, parent):\n ttk.Frame.__init__(self, parent)\n self.config(border=1, relief=tkinter.GROOVE)\n\n self.scrollbar = ttk.Scrollbar(self, orient=tkinter.VERTICAL)\n self.scrollbar.pack(side=tkinter.RIGHT, fill=tkinter.Y, expand=1)\n\n self.listbox = tkinter.Listbox(self, bg='white')\n self.listbox.pack(side=tkinter.LEFT, fill=tkinter.BOTH, expand=1)\n for i in range(1, 100):\n self.listbox.insert(tkinter.END, _('Navigation ') + str(i))\n self.listbox.config(yscrollcommand=self.scrollbar.set)\n self.scrollbar.config(command=self.listbox.yview)\n self.bind_all('<<ListboxSelect>>', self.onselect)\n self.pack()\n\n def onselect(self, event):\n \"\"\"Sample function provided to show how navigation commands may be \\\n received.\"\"\"\n\n widget = event.widget\n _index = int(widget.curselection()[0])\n _value = widget.get(_index)\n print(_('List item'), ' %d / %s' % (_index, _value))\n{% endif %}\n\n\n{% if cookiecutter.insert_status == 'y' %}\nclass StatusBar(ttk.Frame):\n \"Sample status bar provided by cookiecutter switch.\"\n _status_bars = 4\n\n def __init__(self, parent):\n ttk.Frame.__init__(self, parent)\n self.labels = []\n self.config(border=1, relief=tkinter.GROOVE)\n for i in range(self._status_bars):\n _label_text = _('Unset status ') + str(i + 1)\n self.labels.append(ttk.Label(self, text=_label_text))\n self.labels[i].config(relief=tkinter.GROOVE)\n self.labels[i].pack(side=tkinter.LEFT, fill=tkinter.X)\n self.pack()\n\n def set_text(self, status_index, new_text):\n self.labels[status_index].config(text=new_text)\n{% endif %}\n\n\n{% if cookiecutter.insert_toolbar == 'y' %}\nclass ToolBar(ttk.Frame):\n \"Sample toolbar provided by cookiecutter switch.\"\n\n def __init__(self, parent):\n ttk.Frame.__init__(self, parent)\n self.buttons = []\n self.config(border=1, relief=tkinter.GROOVE)\n for i in range(1, 5):\n _button_text = _('Tool ') + str(i)\n self.buttons.append(ttk.Button(self, text=_button_text,\n command=lambda i=i:\n self.run_tool(i)))\n self.buttons[i - 1].pack(side=tkinter.LEFT, fill=tkinter.X)\n self.pack()\n\n def run_tool(self, number):\n \"Sample function provided to show how a toolbar command may be used.\"\n\n print(_('Toolbar button'), number, _('pressed'))\n{% endif %}\n\n\nclass MainFrame(ttk.Frame):\n \"Main area of user interface content.\"\n\n past_time = datetime.datetime.now()\n _advertisement = 'Cookiecutter: Open-Source Project Templates'\n _product = _('Template') + ': {{cookiecutter.display_name}}'\n _boilerplate = _advertisement + '\\n\\n' + _product + '\\n\\n'\n\n def __init__(self, parent):\n ttk.Frame.__init__(self, parent)\n self.display = ttk.Label(parent, anchor=tkinter.CENTER,\n foreground='green', background='black')\n self.display.pack(fill=tkinter.BOTH, expand=1)\n self.tick()\n\n def tick(self):\n \"Invoked automatically to update a clock displayed in the GUI.\"\n\n this_time = datetime.datetime.now()\n if this_time != self.past_time:\n self.past_time = this_time\n _timestamp = this_time.strftime('%Y-%m-%d %H:%M:%S')\n self.display.config(text=self._boilerplate + _timestamp)\n self.display.after(100, self.tick)\n\n\nclass MenuBar(tkinter.Menu):\n \"Menu bar appearing with expected components.\"\n\n def __init__(self, parent):\n tkinter.Menu.__init__(self, parent)\n\n filemenu = tkinter.Menu(self, tearoff=False)\n filemenu.add_command(label=_('New'), command=self.new_dialog)\n filemenu.add_command(label=_('Open'), command=self.open_dialog)\n filemenu.add_separator()\n filemenu.add_command(label=_('Exit'), underline=1,\n command=self.quit)\n\n helpmenu = tkinter.Menu(self, tearoff=False)\n helpmenu.add_command(label=_('Help'), command=lambda:\n self.help_dialog(None), accelerator=\"F1\")\n helpmenu.add_command(label=_('About'), command=self.about_dialog)\n self.bind_all('<F1>', self.help_dialog)\n\n self.add_cascade(label=_('File'), underline=0, menu=filemenu)\n self.add_cascade(label=_('Help'), underline=0, menu=helpmenu)\n\n def quit(self):\n \"Ends toplevel execution.\"\n\n sys.exit(0)\n\n def help_dialog(self, event):\n \"Dialog cataloging results achievable, and provided means available.\"\n\n _description = _('Help not yet created.')\n PopupDialog(self, '{{cookiecutter.display_name}}', _description)\n\n def about_dialog(self):\n \"Dialog concerning information about entities responsible for program.\"\n\n _description = '{{cookiecutter.short_description}}'\n if _description == '':\n _description = _('No description available')\n _description += '\\n'\n _description += '\\n' + _('Author') + ': {{cookiecutter.full_name}}'\n _description += '\\n' + _('Email') + ': {{cookiecutter.email}}'\n _description += '\\n' + _('Version') + ': {{cookiecutter.version}}'\n _description += '\\n' + _('GitHub Package') + \\\n ': {{cookiecutter.repo_name}}'\n PopupDialog(self, _('About') + ' {{cookiecutter.display_name}}',\n _description)\n\n def new_dialog(self):\n \"Non-functional dialog indicating successful navigation.\"\n\n PopupDialog(self, _('New button pressed'), _('Not yet implemented'))\n\n def open_dialog(self):\n \"Standard askopenfilename() invocation and result handling.\"\n\n _name = tkinter.filedialog.askopenfilename()\n if isinstance(_name, str):\n print(_('File selected for open: ') + _name)\n else:\n print(_('No file selected'))\n\n\nclass Application(tkinter.Tk):\n \"Create top-level Tkinter widget containing all other widgets.\"\n\n def __init__(self):\n tkinter.Tk.__init__(self)\n menubar = MenuBar(self)\n self.config(menu=menubar)\n self.wm_title('{{cookiecutter.display_name}}')\n self.wm_geometry('640x480')\n\n{% if cookiecutter.insert_status == 'y' %}# Status bar selection == 'y'\n self.statusbar = StatusBar(self)\n self.statusbar.pack(side='bottom', fill='x')\n self.bind_all('<Enter>', lambda e: self.statusbar.set_text(0,\n 'Mouse: 1'))\n self.bind_all('<Leave>', lambda e: self.statusbar.set_text(0,\n 'Mouse: 0'))\n self.bind_all('<Button-1>', lambda e: self.statusbar.set_text(1,\n 'Clicked at x = ' + str(e.x) + ' y = ' + str(e.y)))\n self.start_time = datetime.datetime.now()\n self.uptime()\n{% endif %}\n\n{% if cookiecutter.insert_navigation == 'y' %}# Navigation selection == 'y'\n self.navigationbar = NavigationBar(self)\n self.navigationbar.pack(side='left', fill='y')\n{% endif %}\n\n{% if cookiecutter.insert_toolbar == 'y' %}# Tool bar selection == 'y'\n self.toolbar = ToolBar(self)\n self.toolbar.pack(side='top', fill='x')\n{% endif %}\n\n self.mainframe = MainFrame(self)\n self.mainframe.pack(side='right', fill='y')\n\n{% if cookiecutter.insert_status == 'y' %}# Status bar selection == 'y'\n def uptime(self):\n _upseconds = str(int(round((datetime.datetime.now() - self.start_time).total_seconds())))\n self.statusbar.set_text(2, _('Uptime') + ': ' + _upseconds)\n self.after(1000, self.uptime)\n{% endif %}\n\nif __name__ == '__main__':\n APPLICATION_GUI = Application()\n APPLICATION_GUI.mainloop()\n",
"id": "2177211",
"language": "Python",
"matching_score": 1.5851072072982788,
"max_stars_count": 19,
"path": "{{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}.py"
},
{
"content": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9 on 2018-11-25 09:36\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('dynamic_scraper', '0025_new_follow_pages_page_xpath_pagination_attribute'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='checker',\n name='checker_type',\n field=models.CharField(choices=[('4', '404'), ('X', '404_OR_X_PATH')], default='4', max_length=1),\n ),\n migrations.AlterField(\n model_name='checker',\n name='scraped_obj_attr',\n field=models.ForeignKey(help_text='Attribute of type DETAIL_PAGE_URL, several checkers for same DETAIL_PAGE_URL attribute possible.', on_delete=django.db.models.deletion.CASCADE, to='dynamic_scraper.ScrapedObjAttr'),\n ),\n migrations.AlterField(\n model_name='log',\n name='level',\n field=models.IntegerField(choices=[(50, 'CRITICAL'), (40, 'ERROR'), (30, 'WARNING'), (20, 'INFO'), (10, 'DEBUG')]),\n ),\n migrations.AlterField(\n model_name='logmarker',\n name='mark_with_type',\n field=models.CharField(choices=[('PE', 'Planned Error'), ('DD', 'Dirty Data'), ('IM', 'Important'), ('IG', 'Ignore'), ('MI', 'Miscellaneous'), ('CU', 'Custom')], help_text='Choose \"Custom\" and enter your own type in the next field for a custom type', max_length=2),\n ),\n migrations.AlterField(\n model_name='requestpagetype',\n name='content_type',\n field=models.CharField(choices=[('H', 'HTML'), ('X', 'XML'), ('J', 'JSON')], default='H', help_text='Data type format for scraped pages of page type (for JSON use JSONPath instead of XPath)', max_length=1),\n ),\n migrations.AlterField(\n model_name='requestpagetype',\n name='dont_filter',\n field=models.BooleanField(default=False, help_text='Do not filter duplicate requests, useful for some scenarios with requests falsely marked as being duplicate (e.g. uniform URL + pagination by HTTP header).'),\n ),\n migrations.AlterField(\n model_name='requestpagetype',\n name='meta',\n field=models.TextField(blank=True, help_text='Optional Scrapy meta attributes as JSON dict (use double quotes!), see Scrapy docs for reference.'),\n ),\n migrations.AlterField(\n model_name='requestpagetype',\n name='method',\n field=models.CharField(choices=[('GET', 'GET'), ('POST', 'POST')], default='GET', help_text='HTTP request via GET or POST.', max_length=10),\n ),\n migrations.AlterField(\n model_name='requestpagetype',\n name='render_javascript',\n field=models.BooleanField(default=False, help_text='Render Javascript on pages (ScrapyJS/Splash deployment needed, careful: resource intense)'),\n ),\n migrations.AlterField(\n model_name='requestpagetype',\n name='request_type',\n field=models.CharField(choices=[('R', 'Request'), ('F', 'FormRequest')], default='R', help_text='Normal (typically GET) request (default) or form request (typically POST), using Scrapys corresponding request classes (not used for checker).', max_length=1),\n ),\n migrations.AlterField(\n model_name='requestpagetype',\n name='scraped_obj_attr',\n field=models.ForeignKey(blank=True, help_text='Empty for main page, attribute of type DETAIL_PAGE_URL scraped from main page for detail pages.', null=True, on_delete=django.db.models.deletion.CASCADE, to='dynamic_scraper.ScrapedObjAttr'),\n ),\n migrations.AlterField(\n model_name='schedulerruntime',\n name='runtime_type',\n field=models.CharField(choices=[('S', 'SCRAPER'), ('C', 'CHECKER')], default='P', max_length=1),\n ),\n migrations.AlterField(\n model_name='scrapedobjattr',\n name='attr_type',\n field=models.CharField(choices=[('S', 'STANDARD'), ('T', 'STANDARD (UPDATE)'), ('B', 'BASE'), ('U', 'DETAIL_PAGE_URL'), ('I', 'IMAGE')], max_length=1),\n ),\n migrations.AlterField(\n model_name='scrapedobjclass',\n name='checker_scheduler_conf',\n field=models.TextField(default='\"MIN_TIME\": 1440,\\n\"MAX_TIME\": 10080,\\n\"INITIAL_NEXT_ACTION_FACTOR\": 1,\\n\"ZERO_ACTIONS_FACTOR_CHANGE\": 5,\\n\"FACTOR_CHANGE_FACTOR\": 1.3,\\n'),\n ),\n migrations.AlterField(\n model_name='scrapedobjclass',\n name='scraper_scheduler_conf',\n field=models.TextField(default='\"MIN_TIME\": 15,\\n\"MAX_TIME\": 10080,\\n\"INITIAL_NEXT_ACTION_FACTOR\": 10,\\n\"ZERO_ACTIONS_FACTOR_CHANGE\": 20,\\n\"FACTOR_CHANGE_FACTOR\": 1.3,\\n'),\n ),\n migrations.AlterField(\n model_name='scraper',\n name='max_items_read',\n field=models.IntegerField(blank=True, help_text='Max number of items to be read (empty: unlimited).', null=True),\n ),\n migrations.AlterField(\n model_name='scraper',\n name='max_items_save',\n field=models.IntegerField(blank=True, help_text='Max number of items to be saved (empty: unlimited).', null=True),\n ),\n migrations.AlterField(\n model_name='scraper',\n name='pagination_append_str',\n field=models.CharField(blank=True, help_text='Syntax: /somepartofurl/{page}/moreurlstuff.html', max_length=200),\n ),\n migrations.AlterField(\n model_name='scraper',\n name='pagination_page_replace',\n field=models.TextField(blank=True, help_text=\"RANGE_FUNCT: uses Python range funct., syntax: [start], stop[, step], FREE_LIST: 'Replace text 1', 'Some other text 2', 'Maybe a number 3', ...\"),\n ),\n ]\n",
"id": "216616",
"language": "Python",
"matching_score": 1.5748432874679565,
"max_stars_count": 0,
"path": "dynamic_scraper/migrations/0026_auto_20181125_1001.py"
}
] | 1.574843 |
yang-junhwan | [
{
"content": "import cv2\nimport numpy as np\nimport copy\nfrom image_registration import cross_correlation_shifts\nfrom src.utils import draw_ball_curve, fill_lost_tracking\nfrom src.FrameInfo import FrameInfo\n\n\ndef generate_overlay(video_frames, width, height, fps, outputPath):\n print('Saving overlay result to', outputPath)\n codec = cv2.VideoWriter_fourcc(*'XVID')\n out = cv2.VideoWriter(outputPath, codec, fps / 2, (width, height))\n\n frame_lists = sorted(video_frames, key=len, reverse=True)\n balls_in_curves = [[] for i in range(len(frame_lists))]\n shifts = {}\n times = []\n\n # Take the longest frames as background\n for idx, base_frame in enumerate(frame_lists[0]):\n # Overlay frames\n background_frame = base_frame.frame.copy()\n for list_idx, frameList in enumerate(frame_lists[1:]):\n if(idx < len(frameList)):\n overlay_frame = frameList[idx]\n else:\n overlay_frame = frameList[len(frameList) - 1]\n\n alpha = 1.0 / (list_idx + 2)\n beta = 1.0 - alpha\n corrected_frame = image_registration(background_frame, overlay_frame, shifts, list_idx, width, height)\n background_frame = cv2.addWeighted(corrected_frame, alpha, background_frame, beta, 0)\n\n # Prepare balls to draw\n if(overlay_frame.ball_in_frame):\n balls_in_curves[list_idx+1].append([overlay_frame.ball[0], overlay_frame.ball[1], overlay_frame.ball_color])\n\n if(base_frame.ball_in_frame):\n balls_in_curves[0].append([base_frame.ball[0], base_frame.ball[1], base_frame.ball_color])\n\n # # Emphasize base frame\n # base_frame_weight = 0.55\n # background_frame = cv2.addWeighted(base_frame.frame, base_frame_weight, background_frame, 1-base_frame_weight, 0)\n\n # Draw transparent curve and non-transparent balls\n for trajectory in balls_in_curves:\n background_frame = draw_ball_curve(background_frame, trajectory)\n\n result_frame = cv2.cvtColor(background_frame, cv2.COLOR_RGB2BGR)\n cv2.imshow('result_frame', result_frame)\n out.write(result_frame)\n if cv2.waitKey(60) & 0xFF == ord('q'):\n break\n\n\ndef image_registration(ref_image, offset_image, shifts, list_idx, width, height):\n # The shift is calculated once for each video and stored\n if(list_idx not in shifts):\n xoff, yoff = cross_correlation_shifts(\n ref_image[:, :, 0], offset_image.frame[:, :, 0])\n shifts[list_idx] = (xoff, yoff)\n else:\n xoff, yoff = shifts[list_idx]\n\n offset_image.ball = tuple([offset_image.ball[0] - int(xoff), offset_image.ball[1] - int(yoff)])\n matrix = np.float32([[1, 0, -xoff], [0, 1, -yoff]])\n corrected_image = cv2.warpAffine(offset_image.frame, matrix, (width, height))\n\n return corrected_image\n",
"id": "838094",
"language": "Python",
"matching_score": 0,
"max_stars_count": 0,
"path": "src/generate_overlay.py"
}
] | 0 |
Worrarat1 | [
{
"content": "'''Python Code to Convert OpenImage Dataset into VOC XML format. \r\n\r\nAuthor: https://github.com/AtriSaxena\r\nPlease see Read me file to know how to use this file.\r\n\r\n'''\r\n\r\nfrom xml.etree.ElementTree import Element, SubElement, Comment\r\nimport xml.etree.cElementTree as ET\r\n#from ElementTree_pretty import prettify\r\nimport cv2\r\nimport os\r\nfrom pathlib import Path\r\nfrom shutil import move\r\nimport argparse\r\n\r\nparser = argparse.ArgumentParser(description = 'Convert OIDV4 dataset to VOC XML format')\r\nparser.add_argument('--sourcepath',type = str, default = 'dataset/', help ='Path of class to convert')\r\nparser.add_argument('--dest_path',type=str, required=True, default='Annotation/',help='Path of Dest XML files')\r\nargs = parser.parse_args()\r\n\r\nids = []\r\nfor file in os.listdir(args.sourcepath): #Save all images in a list\r\n filename = os.fsdecode(file)\r\n if filename.endswith('.jpg'):\r\n ids.append(filename[:-4])\r\n\r\nfor fname in ids: \r\n myfile = os.path.join(args.dest_path,fname +'.xml')\r\n myfile = Path(myfile)\r\n if not myfile.exists(): #if file is not existing \r\n txtfile = os.path.join(args.sourcepath, 'Label', fname + '.txt') #Read annotation of each image from txt file\r\n f = open(txtfile,\"r\")\r\n imgfile = os.path.join(args.sourcepath, fname +'.jpg')\r\n img = cv2.imread(imgfile, cv2.IMREAD_UNCHANGED) #Read image to get image width and height\r\n top = Element('annotation')\r\n child = SubElement(top,'folder')\r\n child.text = os.path.join(args.sourcepath, fname + '.jpg')\r\n\r\n child_filename = SubElement(top,'filename')\r\n child_filename.text = fname +'.jpg'\r\n\r\n child_path = SubElement(top,'path')\r\n child_path.text = os.path.join(args.sourcepath, fname + '.jpg')\r\n\r\n child_source = SubElement(top,'source')\r\n child_database = SubElement(child_source, 'database')\r\n child_database.text = 'Unknown'\r\n\r\n child_size = SubElement(top,'size')\r\n child_width = SubElement(child_size,'width')\r\n child_width.text = str(img.shape[1])\r\n\r\n child_height = SubElement(child_size,'height')\r\n child_height.text = str(img.shape[0])\r\n\r\n child_depth = SubElement(child_size,'depth')\r\n if len(img.shape) == 3: \r\n child_depth.text = str(img.shape[2])\r\n else:\r\n child_depth.text = '3'\r\n\r\n child_seg = SubElement(top, 'segmented')\r\n child_seg.text = '0'\r\n\r\n for x in f: #Iterate for each object in an image.\r\n x = list(x.split())\r\n x_name = \"\"\r\n x_idx = []\r\n for i in range(len(x)):\r\n try:\r\n float(x[i])\r\n except ValueError:\r\n x_name += x[i]\r\n x_idx.append(i)\r\n x = [j for i, j in enumerate(x) if i not in x_idx]\r\n x = [x_name] + x\r\n\r\n child_obj = SubElement(top, 'object')\r\n\r\n child_name = SubElement(child_obj, 'name')\r\n child_name.text = x[0] #name\r\n\r\n child_pose = SubElement(child_obj, 'pose')\r\n child_pose.text = 'Unspecified'\r\n\r\n child_trun = SubElement(child_obj, 'truncated')\r\n child_trun.text = '0'\r\n\r\n child_diff = SubElement(child_obj, 'difficult')\r\n child_diff.text = '0'\r\n\r\n child_bndbox = SubElement(child_obj, 'bndbox')\r\n\r\n child_xmin = SubElement(child_bndbox, 'xmin')\r\n child_xmin.text = str(int(float(x[1]))) #xmin\r\n\r\n child_ymin = SubElement(child_bndbox, 'ymin')\r\n child_ymin.text = str(int(float(x[2]))) #ymin\r\n\r\n child_xmax = SubElement(child_bndbox, 'xmax')\r\n child_xmax.text = str(int(float(x[3]))) #xmax\r\n\r\n child_ymax = SubElement(child_bndbox, 'ymax')\r\n child_ymax.text = str(int(float(x[4]))) #ymax\r\n\r\n tree = ET.ElementTree(top)\r\n save = fname+'.xml'\r\n tree.write(save)\r\n move(fname+'.xml', myfile)\r\n",
"id": "4700103",
"language": "Python",
"matching_score": 0,
"max_stars_count": 0,
"path": "OIDv4_to_VOC.py"
}
] | 0 |
iancraz | [
{
"content": "#!/usr/bin/env python3\n\"\"\"Pass input directly to output.\nhttps://app.assembla.com/spaces/portaudio/git/source/master/test/patest_wire.c\n\"\"\"\nimport argparse\n\nimport sounddevice as sd\nimport numpy # Make sure NumPy is loaded before it is used in the callback\nassert numpy # avoid \"imported but unused\" message (W0611)\nimport numpy as np\n\ndef callback(indata, outdata, frames, time, status):\n if status:\n print(status)\n outdata[:] = np.array(indata)\n\nwith sd.Stream(channels=1, callback=callback):\n print('#' * 80)\n print('press Return to quit')\n print('#' * 80)\n input()",
"id": "1100127",
"language": "Python",
"matching_score": 2.103203296661377,
"max_stars_count": 0,
"path": "Examples sounddevice/wire.py"
},
{
"content": "import sounddevice as sd\nimport matplotlib.pyplot as plt\nimport numpy as np\nduration = 5.5 # seconds\n\ndata = []\ndef callback(indata, outdata, frames, time, status):\n if status:\n print(status)\n outdata[:] = indata\n data.append(outdata)\n\nwith sd.Stream(channels=2, callback=callback):\n sd.sleep(int(duration * 1000))\n\ndata = np.array(data)\nprint(data.shape)\nprint(data.flatten())\nplt.plot(data[:,0].flatten())\nplt.show()",
"id": "6832177",
"language": "Python",
"matching_score": 0.05362920090556145,
"max_stars_count": 0,
"path": "Examples sounddevice/playback_wire.py"
},
{
"content": "import matplotlib.pyplot as plt\nimport numpy as np\n\n#Recibe parametros en la data del tipo\n# Fs,NFFT,Window\\n\n#datos\n\nf = open(\"data.ian\",\"r\")\nname = f.readline()\nname = name.strip()\nf1 = f.readline()\nf2 = f.readline()\ntime = []\ntimevec = []\ny = f1.split(',')\nfs = int(y[0])\nnfft = int(y[1])\noverlap = int(y[3])\nsave = int(y[4])\nshowTime = int(y[5])\n\nif int(y[2]) == 0:\n window = None\nelif int(y[2]) == 1:\n window = 'BALCKMAN'\nelif int(y[2]) == 2:\n window = 'HAMMING'\nelif int(y[2]) == 3:\n window = 'BARTLETT'\n\n\n\ny = f2.split(',')\nfor i in y:\n try:\n a = float(i)\n time.append(a)\n except:\n time.pop()\n pass\n\nfor i in range(len(time)):\n timevec.append(i)\ntimevec = [x * 1/fs for x in timevec]\n\nif showTime:\n fig, (ax1, ax2) = plt.subplots(2, 1)\n ax1.plot(timevec,time,'k')\n ax1.set_xlim(left=0,right=timevec[len(timevec)-1])\n ax1.set_ylabel(\"Amplitud\")\n ax1.minorticks_on()\n ax1.grid(b=True, which='major', color='#666666', linestyle='-')\n ax1.grid(b=True, which='minor', color='#999999', linestyle='-', alpha=0.2)\nelse:\n fig, ax2 = plt.subplots(1, 1)\n#ax1.grid(b=True,which='major')\n\nif window == None:\n ax2.specgram(time,Fs=fs,NFFT=nfft,cmap='Greys',noverlap= overlap)\nif window == 'BALCKMAN':\n ax2.specgram(time,Fs=fs,NFFT=nfft,window= np.blackman(nfft),cmap='Greys',noverlap= overlap)\nelif window == 'HAMMING':\n ax2.specgram(time,Fs=fs,NFFT=nfft, window= np.hamming(nfft),cmap='Greys',noverlap= overlap)\nelif window == 'BARTLETT':\n ax2.specgram(time,Fs=fs,NFFT=nfft, window= np.bartlett(nfft),cmap='Greys',noverlap= overlap)\n\nax2.set_ylabel(\"Frecuencias\")\nax2.set_xlabel(\"tiempo\")\nax2.set_ylim(0,20e3)\n#ax2.grid(b=True,which='major')\n##ax2.minorticks_on()\nax2.grid(b=True, which='major', color='#666666', linestyle='-', alpha = 0.5)\n##ax2.grid(b=True, which='minor', color='#999999', linestyle='-', alpha=0.2)\nif save:\n plt.savefig(name)\nelse:\n plt.show()\n",
"id": "8504988",
"language": "Python",
"matching_score": 2.9917163848876953,
"max_stars_count": 0,
"path": "Leandro/Spectrogram/plot.py"
},
{
"content": "import matplotlib.pyplot as plt\nimport numpy as np\nf = open(\"fft.ian\",\"r\")\nl1 = f.readline()\nl2 = f.readline()\ninp = []\nout = []\n\nx = l2.split(',')\nfor i in x:\n try:\n a = float(i)\n out.append(a)\n except:\n pass\n\n\n\n\n######################################\n\nfrom scipy.fft import fft\n# Number of sample points\nN = 4096\n# sample spacing\nT = 1.0 / 800.0\nx = np.linspace(0.0, N*T, N)\ny = 10 * np.sin(100.0 * 2.0*np.pi*x) + 5*np.sin(200.0 * 2.0*np.pi*x) + 2.5*np.sin(300.0 * 2.0*np.pi*x)\n\nyf = fft(y)\nxf = np.linspace(0.0, 1.0/(2.0*T), N//2)\ntemp = []\nfor i in range(len(out)//2):\n temp.append(out[i] * 2.0 / N)\n\nfig, (ax1, ax2) = plt.subplots(2, 1)\nax1.plot(xf, 2.0/N * np.abs(yf[0:N//2]),'k')\nax1.grid()\nax1.set_ylabel(\"FFT Python\")\n\nax2.plot(xf, temp, 'k')\nax2.grid()\nax2.set_ylabel(\"FFT Cooley-Tukey\")\n\nplt.xlabel(\"Frecuencias\")\nplt.show()\n\n#import matplotlib.pyplot as plt\n#plt.plot(xf, 2.0/N * np.abs(yf[0:N//2]))\n#plt.grid()\n#plt.show()\n\n#plt.plot(xf, temp)\n#plt.grid()\n#plt.show()",
"id": "10305946",
"language": "Python",
"matching_score": 0.054461169987916946,
"max_stars_count": 0,
"path": "Leandro/fft/test.py"
},
{
"content": "import numpy as np\nfrom scipy.signal import hann, lfilter, butter\nfrom numpy import array, double, amax, absolute, zeros, floor, arange, mean\nfrom numpy import correlate, dot, append, divide, argmax, int16, sqrt, power\nfrom numpy.random import randn\nfrom scipy.linalg import solve_toeplitz\nimport matplotlib.pyplot as plt\nfrom scipy import signal\n\n# Glottal Pulses Definition\n\ndef glotal_triangular(len_block, p_coverage=0.1, r1_start=0, r1_stop=3, r2_start=1, r2_stop=0):\n vocoded = np.zeros(len_block)\n ramp_len=int(len_block*p_coverage)//2\n ramp_up = np.linspace(r1_start, r1_stop,ramp_len,endpoint=False)\n ramp_down = np.linspace(r2_start,r2_stop,ramp_len*2)\n ramp = np.hstack((ramp_up, ramp_down))\n vocoded[len(vocoded)//2-ramp_len:len(vocoded)//2+ramp_len*2] = ramp\n return vocoded\n\ndef glotal_hamming(len_block, p_coverage=0.1):\n vocoded = np.zeros(len_block)\n len_hamming = int(len_block*p_coverage)\n if len_hamming%2 != 0:\n len_hamming = len_hamming + 1\n vocoded[len(vocoded)//2-len_hamming//2:len(vocoded)//2 + len_hamming//2] = np.hamming(len_hamming)\n return vocoded\n\ndef glotal_square(len_block, p_coverage=0.1):\n vocoded = np.zeros(len_block)\n square_len=int(len_block*p_coverage)//2\n vocoded[len(vocoded)//2-square_len:len(vocoded)//2+square_len] = 1\n return vocoded\n\ndef glotal_exp_rising(len_block, p_coverage=0.1, th= 0.1, amplitude= 1.0):\n vocoded = np.zeros(len_block)\n alpha = (-2/(p_coverage*len_block)) * np.log(th/amplitude)\n t = np.arange(-len_block//2, len_block//2)\n vocoded = amplitude * np.exp(-alpha * np.abs(t))\n return vocoded\n\n# Pitch Maker Definition\n\ndef pitch_maker(len_block, T_samples, prev_delta, overlap=0.5):\n block = np.zeros(len_block)\n current_pos = int(len_block*overlap) + prev_delta\n if current_pos >= len_block:\n return block, prev_delta-len_block\n block[current_pos] = 1\n new_delta = 0\n finish = False\n temp_pos = current_pos\n while temp_pos >= 0:\n temp_pos = temp_pos - T_samples\n if temp_pos >= 0:\n block[temp_pos] = 1\n while not finish:\n dist = len_block-current_pos\n new_delta = T_samples-dist\n if new_delta < 0:\n current_pos = current_pos+T_samples\n block[current_pos] = 1\n else:\n finish = True\n return block, new_delta\n\n\n# Vocoder\n\nglotales = {\"square\": glotal_square, \"triang\": glotal_triangular, \"exp\": glotal_exp_rising, \"hamming\": glotal_hamming}\n\ndef block_process(data, fs, block_len, overlap):\n \"\"\"\n A generator that slices an input array into overlapping blocks.\n data the input array as a one dimensional numpy array.\n fs the sample rate as integer number.\n block_len the length of one block in seconds.\n overlap the percentage of overlap between blocks.\n \"\"\"\n block_samples = round(block_len * fs)\n overlap_samples = round(block_samples * overlap)\n shift_samples = block_samples - overlap_samples\n num_blocks = int(floor((len(data) - overlap_samples) / shift_samples))\n for idx in range(0, num_blocks):\n samples = data[idx * shift_samples:idx * shift_samples + block_samples]\n yield (array(samples, copy=True), idx * shift_samples)\n\ndef fundamental_period_estimate(rxx, fs):\n \"\"\"\n Calculates the fundamental frequency of an auto correlation array.\n rxx the auto correlation array.\n fs the sample rate in hertz.\n \"\"\"\n f_low, f_high = 50, 250\n f_low_idx = round(fs / f_low)\n f_high_idx = round(fs / f_high)\n period_idx = argmax(rxx[f_high_idx:f_low_idx]) + f_high_idx\n is_voiced = max(rxx) > 0.20\n return (period_idx, is_voiced)\n\ndef vocode(signal, fs, f_custom, block_len, overlap, order, prev_block, p_coverage=0.01, unvoiced2zeros=True, glotal_type=\"triang\"):\n \"\"\"\n Analyzes a speech signal and synthesizes a vocoded speech signal.\n The speech signal is analyzed using the levinson-durben algorithm\n of the given order. Then, an corresponding output signal is\n synthesized from the levinson-durben coefficients.\n signal the speech signal as a one dimensional numpy array.\n fs the sample rate in hertz.\n block_len the block processing block length in seconds.\n overlap the block processing block overlap in percent (0..1).\n order the number of coefficients to use.\n returns a vocoded signal of the same sample rate as the original.\n \"\"\"\n\n b_butter, a_butter = butter(1, 200 / fs, 'high')\n glottal_lowpass = lambda signal, b, a: lfilter(b, a, signal)\n\n out = zeros(len(signal))\n out[:len(prev_block)] = prev_block\n # pitch tunning, ignore period samples\n prev_delta = 0\n T_samples = int((fs / f_custom)) # (muestras/segundo) / (1/segundo)\n\n for block, idx in block_process(signal, fs, block_len, overlap):\n gain_correction = (1 - overlap) * 2 # *2 due to hann window\n block *= hann(len(block)) * gain_correction\n\n rxx = correlate(block, block, mode='full')\n rxx = rxx[len(rxx) // 2:]\n period_samples, is_voiced = fundamental_period_estimate(rxx, fs)\n # LPC coefficients\n #block = preemphasis(block)\n #rxx = correlate(block, block, mode='full')\n #rxx = rxx[len(rxx) // 2:]\n a = -solve_toeplitz(rxx[:order], rxx[1:order + 1])\n a = np.concatenate(([1], a))\n error_power = rms(lfilter(a, (1,), block))\n if is_voiced:\n try:\n vocoded, new_delta = pitch_maker(len(block), T_samples, prev_delta, overlap=overlap)\n prev_delta = new_delta\n impulse_response = glotales[glotal_type](len(block), p_coverage=p_coverage)\n vocoded = np.convolve(vocoded, impulse_response, mode=\"same\")\n except:\n continue\n else:\n if unvoiced2zeros:\n vocoded = np.zeros(len(block)) # randn(len(block))/2\n else:\n vocoded = randn(len(block)) / 2\n\n vocoded = lfilter((error_power,), a, vocoded)\n vocoded *= hann(len(block))\n out[idx:idx + len(block)] += preemphasis(vocoded) # deemphasis(vocoded)\n return out\n\ndef preemphasis(signal):\n return lfilter([1, -0.70], 1, signal)\n\ndef deemphasis(signal):\n return lfilter([1, 0.70], 1, signal)\n\ndef rms(signal):\n return sqrt(mean(power(signal, 2)))\n\n",
"id": "5487294",
"language": "Python",
"matching_score": 3.608034133911133,
"max_stars_count": 0,
"path": "Test/Vocoder.py"
},
{
"content": "import numpy as np\n\nglotal_types = [\"triangular\", \"hamming\", \"square\", \"exp\"]\n\nclass ArtificialGlotal:\n def __init__(self) -> None:\n pass\n\n @staticmethod\n def glotal_triangular(len_block: int, p_coverage=0.1, r1_start=0, r1_stop=3, r2_start=1, r2_stop=0):\n \"\"\"\n Generardor de pulso glotal con base a una señal triangular no necesariamente simetrica\ns\n Args:\n len_block (int): largo del bloque\n p_coverage (float, optional): proporción del bloque que cubre el pulso glotal. Defaults to 0.1.\n r1_start (int, optional): comienzo de rampa 1. Defaults to 0.\n r1_stop (int, optional): Fin de rampa 1. Defaults to 3.\n r2_start (int, optional): comienzo de rampa 2. Defaults to 1.\n r2_stop (int, optional): Fin de rampa 2. Defaults to 0.\n\n Returns:\n glotal pulse: pulso glotal artificial de duración len_block\n \"\"\"\n vocoded = np.zeros(len_block)\n ramp_len=int(len_block*p_coverage)//2\n ramp_up = np.linspace(r1_start, r1_stop,ramp_len,endpoint=False)\n ramp_down = np.linspace(r2_start,r2_stop,ramp_len*2)\n ramp = np.hstack((ramp_up, ramp_down))\n vocoded[len(vocoded)//2-ramp_len:len(vocoded)//2+ramp_len*2] = ramp\n return vocoded\n\n @staticmethod\n def glotal_hamming(len_block:int, p_coverage=0.1):\n \"\"\"\n Generardor de pulso glotal con base a una ventana de hamming\n\n Args:\n len_block (int): largo del bloque\n p_coverage (float, optional): [description]. Defaults to 0.1.\n\n Returns:\n glotal pulse: pulso glotal artificial de duración len_block\n \"\"\"\n vocoded = np.zeros(len_block)\n len_hamming = int(len_block*p_coverage)\n if len_hamming%2 != 0:\n len_hamming = len_hamming + 1\n vocoded[len(vocoded)//2-len_hamming//2:len(vocoded)//2 + len_hamming//2] = np.hamming(len_hamming)\n return vocoded\n\n @staticmethod\n def glotal_square(len_block: int, p_coverage=0.1):\n \"\"\"\n Generardor de pulso glotal con base a una ventana rectangular\n\n Args:\n len_block (int): largo del bloque\n p_coverage (float, optional): [description]. Defaults to 0.1.\n\n Returns:\n glotal pulse: pulso glotal artificial de duración len_block\n \"\"\"\n vocoded = np.zeros(len_block)\n square_len=int(len_block*p_coverage)//2\n vocoded[len(vocoded)//2-square_len:len(vocoded)//2+square_len] = 1\n return vocoded\n\n @staticmethod\n def glotal_exp_rising(len_block:int, p_coverage=0.1, th= 0.1, amplitude= 1.0):\n \"\"\"\n Generardor de pulso glotal con base a e^(-alfa*abs(x))\n\n Args:\n len_block (int): largo del bloque\n p_coverage (float, optional): [description]. Defaults to 0.1.\n th (float, optional): [description]. Defaults to 0.1.\n amplitude (float, optional): [description]. Defaults to 1.0.\n\n Returns:\n Generardor de pulso glotal con base a una ventana de hamming\n \"\"\"\n vocoded = np.zeros(len_block)\n alpha = (-2/(p_coverage*len_block)) * np.log(th/amplitude) \n t = np.arange(-len_block//2, len_block//2)\n vocoded = amplitude * np.exp(-alpha * np.abs(t))\n return vocoded",
"id": "496933",
"language": "Python",
"matching_score": 0.6850249171257019,
"max_stars_count": 0,
"path": "Vocoder/ArtificialGlotal.py"
},
{
"content": "import numpy as np\n\nclass PitchMaker:\n def __init__(self, len_block:int, f0:float, fs:float, overlap:float,name:str=None) -> None:\n \"\"\"\n Unidad generadora de una sola fundamental\n Args:\n len_block (int): [description]\n f0 (float): [description]\n fs (float): [description]\n overlap (float): [description]\n name (str, optional): [description]. Defaults to None.\n \"\"\"\n self.len_block = len_block\n self.prev_delta = 0\n self.overlap = overlap\n self.f0 = f0\n self.fs = fs\n self.T_samples = int(fs/self.f0)\n self.name = name if name is not None else \"nameless Pitch\"\n\n def get_next_block(self):\n block = np.zeros(self.len_block)\n self.current_pos = int(self.len_block*self.overlap) + self.prev_delta \n if self.current_pos >= self.len_block:\n return block, self.prev_delta-self.len_block\n block[self.current_pos] = 1\n new_delta = 0\n finish = False\n temp_pos = self.current_pos\n while temp_pos >= 0:\n temp_pos = temp_pos - self.T_samples\n if temp_pos >= 0:\n block[temp_pos] = 1\n while not finish:\n dist = self.len_block-self.current_pos\n new_delta = self.T_samples-dist\n if new_delta < 0:\n self.current_pos = self.current_pos+self.T_samples \n block[self.current_pos] = 1\n else:\n finish = True\n self.prev_delta = new_delta\n return block\n \n def __repr__(self) -> str:\n return f\"fundamental frequency of {self.f0}\"\n\n \n def set_fundamental(self, freq:float):\n self.T_samples = int(self.fs/freq)\n\nclass ChordMaker():\n\n def __init__(self, len_block:int, fs:float, overlap:float, name:str=None) -> None:\n \"\"\"ChordMaker permite combinar diferentes notas\n\n Args:\n len_block (int): [description]\n fs (float): [description]\n overlap (float): [description]\n \"\"\"\n self.len_block = len_block\n self.overlap = overlap\n self.fs = fs\n self.name = name if name is not None else \"nameless Chord\"\n self.notes = {}\n \n def add_note(self, f0):\n new_note = PitchMaker(self.len_block, f0, self.fs, self.overlap)\n if f0 in self.notes.keys():\n print(\"f0 already in Chord\") \n else:\n self.notes[f0] = new_note\n\n def remove_note(self, f0):\n try:\n self.notes.pop(f0)\n except KeyError:\n print(f\"tried to delte {f0} but no key was found with that name\")\n\n def generate_block(self):\n all_notes = np.zeros(self.len_block)\n for pitch_gen in self.notes.values():\n all_notes = all_notes + pitch_gen.get_next_block()\n return all_notes\n \n def __repr__(self) -> str:\n a = f\"Este acorde contiene {len(self.notes)} \\n\"\n b = \"Las notas son\"\n c = \"\"\n for f0 in self.notes.keys():\n c += f\"f0: {f0}\\n\"\n \n return a+b+c\n",
"id": "8581987",
"language": "Python",
"matching_score": 1.560131311416626,
"max_stars_count": 0,
"path": "Vocoder/FundamentalMaker.py"
},
{
"content": "import pyaudio\nimport msvcrt\nimport Vocoder as gp\nimport numpy as np\n# import matplotlib.pyplot as plt\nimport Vocoder as vc\nfrom scipy.io.wavfile import write\n\n# Definition of chords\n\nfrecuencies = [500,500,500]\nchords = {'A': [110,138.5913,164.8138] , 'B':[123.4708,155.5635,184.9972], 'C':[130.8128,164.8138,195.9977],\n 'D':[146.8324,184.9972,220.0000], 'E':[164.8138,207.6523,246.9417], 'F':[174.6141,220.0000,261.6256],\n 'G':[195.9977,246.9417,293.6648], 'Am':[110,130.8128,164.8138], 'Bm':[123.4708,146.8324,184.9972],\n 'Cm':[130.8128,155.5635,195.9977], 'Dm':[146.8324,174.6141,220.0000], 'Em':[164.8138,195.9977,246.9417],\n 'Fm':[174.6141,207.6523,261.6256], 'Gm':[195.9977,233.0819,293.6648]}\nkeyboard2chords = {'a':chords['A'], 's':chords['B'], 'd':chords['C'], 'f':chords['D'], 'g':chords['E'], 'h':chords['F'],\n 'j':chords['G'], 'z':chords['Am'], 'x':chords['Bm'], 'c':chords['Cm'], 'v':chords['Dm'], 'b':chords['Em'],\n 'n':chords['Fm'], 'm':chords['Gm']}\n\n#PyAudio Stuff\n\nCHUNK = 2**14\nWIDTH = 2\nCHANNELS = 1\nRATE = 48000\nRECORD_SECONDS = 999999\np = pyaudio.PyAudio()\nstream = p.open(format=p.get_format_from_width(WIDTH),\n channels=CHANNELS,\n rate=RATE,\n input=True,\n output=True,\n input_device_index=2,\n output_device_index=5,\n frames_per_buffer=CHUNK)\n\n#Start processing\noverlp = 0.5\nblock_secs = 0.04\nfs = RATE\nglob_arr = np.zeros(CHUNK)\nprev_half_block_sintetized = np.zeros(int(overlp*block_secs*fs))\n\n\nWAV_TEST = []\n\nfor i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):\n\n if msvcrt.kbhit():\n frecuencies = keyboard2chords[msvcrt.getwch()]\n print(f\"Las frecuencias utilizadas son: {frecuencies[0]}, {frecuencies[1]}, {frecuencies[2]}\")\n data = stream.read(CHUNK)\n input = np.fromstring(data,'int16')\n input = input/(2**15)\n\n\n out1 = vc.vocode(input, fs, f_custom=frecuencies[0], block_len=block_secs, overlap=overlp, order=16, prev_block=prev_half_block_sintetized)\n out2 = vc.vocode(input, fs, f_custom=frecuencies[1], block_len=block_secs, overlap=overlp, order=16, prev_block=prev_half_block_sintetized)\n out3 = vc.vocode(input, fs, f_custom=frecuencies[2], block_len=block_secs, overlap=overlp, order=16, prev_block=prev_half_block_sintetized)\n out = (out1 + out2 + out3)/3\n out = (out/np.max(np.abs(out))*0.75) * 2**15\n out = out.astype(np.int16)\n asd = out.tobytes()\n stream.write(asd, CHUNK)\n # prev_half_block_sintetized = out[len(out)-len(prev_half_block_sintetized):]\n\n\nWAV_TEST = np.array(WAV_TEST).flatten()\nWAV_TEST = (WAV_TEST + 1)/2 *255\n\nwrite(\"example.wav\", RATE,WAV_TEST)\n\n\n#PyAudio Terminators\n\nstream.stop_stream()\nstream.close()\np.terminate()",
"id": "9038440",
"language": "Python",
"matching_score": 4.4190545082092285,
"max_stars_count": 0,
"path": "Test/main.py"
},
{
"content": "import pyaudio\nimport struct\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nif __name__ == '__main__':\n CHUNK = 1024*4\n FORMAT = pyaudio.paInt16\n CHANNELS = 2\n RATE = 44100\n RECORD_SECONDS = 5\n p = pyaudio.PyAudio()\n # stream = p.open(format=FORMAT,\n # channels=CHANNELS,\n # rate=RATE,\n # input=True,\n # output=True,\n # frames_per_buffer=CHUNK)\n\n for i in range(p.get_device_count()):\n print(p.get_device_info_by_index(i))\n\n# frames = []\n\n# for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):\n# print(f\"recording {i}\", end='\\r')\n# data = stream.read(CHUNK)\n# # print(data)\n# # data_int = np.array(struct.unpack(str(2*CHUNK)+'B', data), dtype='b')[::2] + 127\n# data = np.fromstring(data, 'int16');\n# frames.append(data)\n\n# stream.stop_stream()\n# stream.close()\n# p.terminate()\n# frames = np.array(frames)\n# print(frames.shape)\n\n# plt.plot(frames.flatten())\n# plt.show()\n\n\n# # stream.write(signal.tobytes())\n# # https://stackoverflow.com/questions/64801274/sound-played-with-pyaudio-seems-correct-but-too-short",
"id": "7992537",
"language": "Python",
"matching_score": 1.236201524734497,
"max_stars_count": 0,
"path": "Vocoder/main.py"
},
{
"content": "import numpy as np\nimport matplotlib.pyplot as plt\nimport math\nfrom scipy.io import wavfile\n\nwith open('debug.txt') as data:\n y = data.readlines()\n y = [float(i) for i in y]\n\nt = np.arange(0, len(y))/44100\ny = np.array(y, dtype=np.float32)\nwavfile.write('pirates_clarinete.wav', 44100, y)\nplt.plot(y)\nplt.show()",
"id": "7648225",
"language": "Python",
"matching_score": 0.6210678815841675,
"max_stars_count": 0,
"path": "Leandro/test.py"
}
] | 1.398166 |
yuhasem | [
{
"content": "#!/usr/bin/env python3\n\n\nimport cv2\nimport torch\nimport torch.nn.functional\n\n\n# Initialize the board randomly\ngeneration = 0\nX_SIZE, Y_SIZE = (100, 100)\nboard = torch.rand(1, 1, X_SIZE, Y_SIZE)\n\n\n# Initialize our convolution kernel\nh = torch.tensor([[1., 1., 1.,],\n [1., 0.5, 1.,],\n [1., 1., 1.,]]).view(1, 1, 3, 3)\n\n\nwhile generation < 10000:\n generation += 1\n\n # Apply convolution\n board = torch.nn.functional.conv2d(board, h, padding=1)\n\n # Apply activation function\n board = torch.logical_and(board >= 2.5, board <= 3.5).float()\n\n # Draw the current iteration\n cv2.imshow('😎', cv2.resize(board.view(X_SIZE, Y_SIZE).numpy(), (400, 400)))\n cv2.waitKey(10)\n\n\ncv2.waitKey(0)\n",
"id": "12322513",
"language": "Python",
"matching_score": 0,
"max_stars_count": 0,
"path": "game_of_life.py"
}
] | 0 |
LucaRibeiro | [
{
"content": "#!/usr/bin/python3\n\nlist = [\"Binwalk\",\"bulk-extractor\",\"Capstone\",\"chntpw\",\"Cuckoo\",\n\"dc3dd\",\"ddrescue\",\"DFF\",\"diStorm3\",\"Dumpzilla\",\"extundelete\",\n\"Foremost\",\"Galleta\",\"Guymager\",\"iPhone Backup Analyzer\",\"p0f\",\n\"pdf-parser\",\"pdfid\",\"pdgmail\",\"peepdf\",\"RegRipper\",\"Volatility\",\"Xplico\"]",
"id": "140749",
"language": "Python",
"matching_score": 0,
"max_stars_count": 1,
"path": "tools/forensics.py"
},
{
"content": "#!/usr/python3\n\nlist = [\"CryptCat\",\"Cymothoa\",\"dbd\",\"dns2tcp\",\"HTTPTunnel\",\n\"Intersect\",\"Nishang\",\"polenum\",\"PowerSploit\",\"pwnat\",\"RidEnum\",\n\"sbd\",\"shellter\",\"U3-Pwn\",\"Webshells\",\"Weevely\",\"Winexe\"]",
"id": "9578780",
"language": "Python",
"matching_score": 0.6313691735267639,
"max_stars_count": 1,
"path": "tools/maintaning_access.py"
},
{
"content": "#!/usr/bin/python3\n\nlist = [\"ace-voip\",\"Amap\",\"APT2\",\"arp-scan\",\"Automater\",\n\"bing-ip2hosts\",\"braa\",\"CaseFile\",\"CDPSnarf\",\"cisco-torch\",\n\"copy-router-config\",\"DMitry\",\"dnmap\",\"dnsenum\",\"dnsmap\",\"DNSRecon\",\n\"dnstracer\",\"dnswalk\",\"DotDotPwn\",\"enum4linux\",\"enumIAX\",\"EyeWitness\",\n\"Faraday\",\"Fierce\",\"Firewalk\",\"fragroute\",\"fragrouter\",\"Ghost Phisher\",\n\"GoLismero\",\"goofile\",\"hping3\",\"ident-user-enum\",\"InSpy\",\"InTrace\",\n\"iSMTP\",\"lbd\",\"Mal<NAME>\",\"masscan\",\"Metagoofil\",\"Miranda\",\n\"nbtscan-unixwiz\",\"Nikto\",\"Nmap\",\"ntop\",\"OSRFramework\",\"p0f\",\n\"Parsero\",\"Recon-ng\",\"SET\",\"SMBMap\",\"smtp-user-enum\",\"snmp-check\",\n\"SPARTA\",\"sslcaudit\",\"SSLsplit\",\"sslstrip\",\"SSLyze\",\"Sublist3r\",\"THC-IPV6\",\n\"theHarvester\",\"TLSSLed\",\"twofi\",\"Unicornscan\",\"URLCrazy\",\"Wireshark\",\"WOL-E\",\"Xplico\"]",
"id": "4931636",
"language": "Python",
"matching_score": 2.2190871238708496,
"max_stars_count": 1,
"path": "tools/information_gathering.py"
},
{
"content": "#!/usr/bin/python3\n\nlist = [\"bettercap\",\"Burp Suite\",\"DNSChef\",\"fiked\",\"hamster-sidejack\",\n\"HexInject\",\"iaxflood\",\"inviteflood\",\"iSMTP\",\"isr-evilgrade\",\"mitmproxy\",\n\"ohrwurm\",\"protos-sip\",\"rebind\",\"responder\",\"rtpbreak\",\"rtpinsertsound\",\n\"rtpmixsound\",\"sctpscan\",\"SIPArmyKnife\",\"SIPp\",\"SIPVicious\",\"SniffJoke\",\n\"SSLsplit\",\"sslstrip\",\"THC-IPV6\",\"VoIPHopper\",\"WebScarab\",\"Wifi Honey\",\n\"Wireshark\",\"xspy\",\"Yersinia\",\"zaproxy\",\"Password Attacks\"]",
"id": "6120173",
"language": "Python",
"matching_score": 2.0450146198272705,
"max_stars_count": 1,
"path": "tools/sniffing_spoofing.py"
},
{
"content": "#!/usr/bin/python3\nlist = [\"BruteSpray\",\"Burp Suite\",\"CeWL\",\"chntpw\",\"cisco-auditing-tool\",\n\"CmosPwd\",\"creddump\",\"crowbar\",\"crunch\",\"findmyhash\",\"gpp-decrypt\",\"hash-identifier\",\n\"Hashcat\",\"HexorBase\",\"THC-Hydra\",\"<NAME>\",\"Johnny\",\"keimpx\",\"<NAME>\",\n\"Maskprocessor\",\"multiforcer\",\"Ncrack\",\"oclgausscrack\",\"ophcrack\",\"PACK\",\"patator\",\n\"phrasendrescher\",\"polenum\",\"RainbowCrack\",\"rcracki-mt\",\"RSMangler\",\"SecLists\",\"SQLdict\",\n\"Statsprocessor\",\"THC-pptp-bruter\",\"TrueCrack\",\"WebScarab\",\"wordlists\",\"zaproxy\"]",
"id": "10513851",
"language": "Python",
"matching_score": 1.5292623043060303,
"max_stars_count": 1,
"path": "tools/password_atacks.py"
},
{
"content": "#!/usr/bin/python3\n\nlist = [\"BBQSQL\",\"BED\",\"cisco-auditing-tool\",\"cisco-global-exploiter\",\n\"cisco-ocs\",\"cisco-torch\",\"copy-router-config\",\"Doona\",\"DotDotPwn\",\n\"HexorBase\",\"jSQL Injection\",\"Lynis\",\"Nmap\",\"ohrwurm\",\"openvas\",\"Oscanner\",\n\"Powerfuzzer\",\"sfuzz\",\"SidGuesser\",\"SIPArmyKnife\",\"sqlmap\",\"Sqlninja\",\n\"sqlsus\",\"THC-IPV6\",\"tnscmd10g\",\"unix-privesc-check\",\"Yersinia\"]",
"id": "4138427",
"language": "Python",
"matching_score": 2.1012449264526367,
"max_stars_count": 1,
"path": "tools/vulnerability_analysis.py"
},
{
"content": "#!/usr/bin/python3\n\nlist = [\"Armitage\", \"Backdoor Factory\", \"BeEF\",\"cisco-auditing-tool\",\n\"cisco-global-exploiter\",\"cisco-ocs\",\"cisco-torch\",\"Commix\",\"crackle\",\n\"exploitdb\",\"jboss-autopwn\",\"Linux Exploit Suggester\",\"Maltego Teeth\",\n\"Metasploit Framework\",\"MSFPC\",\"RouterSploit\",\"SET\",\"ShellNoob\",\"sqlmap\",\n\"THC-IPV6\",\"Yersinia\"]\n",
"id": "24787",
"language": "Python",
"matching_score": 1.3583569526672363,
"max_stars_count": 1,
"path": "tools/exploitation_tools.py"
},
{
"content": "#!/usr/bin/env python3\n\nimport platform\nimport sys\nimport subprocess\n\nBanner = '''\n\n ******* ******** **** ** ************ ******** ******** ** ********\n ** *** ** ** ** ** ** *** *** *** *** ** ***\n ** *** ** ** ** ** ** *** *** *** *** ** ***\n ****** ******** ** ** ** ** *** *** *** *** ** ********\n ** ** ** ** ** ** *** *** *** *** ** ***\n ** ** ** ** ** ** *** *** *** *** ** ***\n ** ******** ** ***** ** ******** ******** ********* ********\n \n'''\nprint(Banner)\n\nOS = platform.system()\n\nif OS == 'Windows':\n sys.exit(\"Sorry :(, but Pentools is only compatible with Unix based Systems.\")\nelif OS == 'Linux':\n pass\nelse :\n print(\"Unidentifield OS....\")\n r = chr(input(' Pentools is only compatible with Linux System Operation, continue? (y)'))\n if r.upper != 'Y':\n sys.exit()\n\nsubprocess.run([\"sudo\",\"bash\", \"./core.sh\"])\n",
"id": "1787634",
"language": "Python",
"matching_score": 0,
"max_stars_count": 1,
"path": "pentools.py"
},
{
"content": "#!/usr/bin/python3\n\nlist = [\"apktool\",\"dex2jar\",\"diStorm3\",\"edb-debugger\",\"jad\",\"javasnoop\",\n\"JD-GUI\",\"OllyDbg\",\"smali\",\"Valgrind\",\"YARA\",\"Reporting Tools\"]",
"id": "1692004",
"language": "Python",
"matching_score": 2,
"max_stars_count": 1,
"path": "tools/reverse_engineering.py"
},
{
"content": "#!/usr/bin/python3\n\nlist = [\"android-sdk\",\"apktool\",\"Arduino\",\"dex2jar\",\"Sakis3G\",\"smali\"]",
"id": "7230782",
"language": "Python",
"matching_score": 0,
"max_stars_count": 1,
"path": "tools/hardware_hacking.py"
},
{
"content": "#!/usr/bin/python3\n\nlist = [\"Airbase-ng\",\"Aircrack-ng\",\"Airdecap-ng and Airdecloak-ng\",\"Aireplay-ng\",\n\"airgraph-ng\",\"Airmon-ng\",\"Airodump-ng\",\"airodump-ng-oui-update\",\"Airolib-ng\",\n\"Airserv-ng\",\"Airtun-ng\",\"Asleap\",\"Besside-ng\",\"Bluelog\",\"BlueMaho\",\"Bluepot\",\n\"BlueRanger\",\"Bluesnarfer\",\"Bully\",\"coWPAtty\",\"crackle\",\"eapmd5pass\",\"Easside-ng\",\n\"Fern Wifi Cracker\",\"FreeRADIUS-WPE\",\"Ghost Phisher\",\"GISKismet\",\"Gqrx\",\"gr-scan\",\n\"hostapd-wpe\",\"ivstools\",\"kalibrate-rtl\",\"KillerBee\",\"Kismet\",\"makeivs-ng\",\"mdk3\",\n\"mfcuk\",\"mfoc\",\"mfterm\",\"Multimon-NG\",\"Packetforge-ng\",\"PixieWPS\",\"Pyrit\",\"Reaver\",\n\"redfang\",\"RTLSDR Scanner\",\"Spooftooph\",\"Tkiptun-ng\",\"Wesside-ng\",\"Wifi Honey\",\"wifiphisher\",\n\"Wifitap\",\"Wifite\",\"wpaclean\"]",
"id": "11129786",
"language": "Python",
"matching_score": 0.7069661617279053,
"max_stars_count": 1,
"path": "tools/wireless_attacks.py"
},
{
"content": "#usr/bin/python3\n\nlist = [\"DHCPig\",\"FunkLoad\",\"iaxflood\",\"Inundator\",\"inviteflood\",\n\"ipv6-toolkit\",\"mdk3\",\"Reaver\",\"rtpflood\",\"SlowHTTPTest\",\"t50\",\n\"Termineter\",\"THC-IPV6\",\"THC-SSL-DOS\"]",
"id": "8540149",
"language": "Python",
"matching_score": 0.9822307825088501,
"max_stars_count": 1,
"path": "tools/stress_testing.py"
},
{
"content": "#!/usr/bin/python3\n\nlist = [\"apache-users\",\"Arachni\",\"BBQSQL\",\"BlindElephant\",\n\"Burp Suite\",\"CutyCapt\",\"DAVTest\",\"deblaze\",\"DIRB\",\"DirBuster\",\n\"fimap\",\"FunkLoad\",\"Gobuster\",\"Grabber\",\"hURL\",\"jboss-autopwn\",\n\"joomscan\",\"jSQL Injection\",\"Maltego Teeth\",\"Nikto\",\"PadBuster\",\n\"Paros\",\"Parsero\",\"plecost\",\"Powerfuzzer\",\"ProxyStrike\",\"Recon-ng\",\n\"Skipfish\",\"sqlmap\",\"Sqlninja\",\"sqlsus\",\"ua-tester\",\"Uniscan\",\"w3af\",\n\"WebScarab\",\"Webshag\",\"WebSlayer\",\"WebSploit\",\"Wfuzz\",\"WhatWeb\",\"WPScan\",\"XSSer\",\"zaproxy\"]",
"id": "5845528",
"language": "Python",
"matching_score": 1.4449063539505005,
"max_stars_count": 1,
"path": "tools/web_application_analysis.py"
},
{
"content": "#!/usr/bin/python3\n\nlist = [\"CaseFile\",\"cherrytree\",\"CutyCapt\",\"dos2unix\",\"Dradis\",\n\"MagicTree\",\"Metagoofil\",\"Nipper-ng\",\"pipal\",\"RDPY\"]",
"id": "5722874",
"language": "Python",
"matching_score": 0.7027186155319214,
"max_stars_count": 1,
"path": "tools/reporting_tools.py"
}
] | 1.170294 |
GeorgianStan | [
{
"content": "# BASE SETUP ( require modules)\n# ==============================================\nimport smbus\nimport time\nimport sys\nfrom datetime import datetime\nimport schedule\nimport pymongo\n\n#database setup\ntry:\n # set connection uri\n uri = 'mongodb://sensor:<EMAIL>:59305/ccs811'\n\n client = pymongo.MongoClient(uri)\n print('Connection successful to mLab')\n\n #get db and collection\n db = client.get_default_database()\n collection = db['data']\nexcept pymongo.errors.ConnectionFailure:\n print (\"Could not connect to MongoDB\")\n sys.exit()\n\n# set i2c address as 1\nbus = smbus.SMBus(1)\n# slave address\naddress = 0x5b\n\n#file where the results will be printed\nfile = open('data_log.txt','a')\n\ndef get_status():\n bus.write_byte(address,0x00)\n return bus.read_byte(address)\n\ndef get_hw_id():\n bus.write_byte(address,0x20)\n return bus.read_byte(address)\n\ndef reset_sensor():\n reset_vals = [0x11,0xE5,0x72,0x8A]\n bus.write_i2c_block_data(address,0XFF,reset_vals)\n\n#combine hex\n#/////////////////\ndef combine_hex(byte_1,byte_2):\n combined = byte_1 << 8 | byte_2\n return combined\n\n#get eCO2\n#/////////////////\ndef get_eCO2(eCO2_high_byte,eCO2_low_byte):\n eCO2 = combine_hex(eCO2_high_byte,eCO2_low_byte)\n return eCO2\n\n#get TVOC\n#/////////////////\ndef get_TVOC(TVOC_high_byte,TVOC_low_byte):\n TVOC = combine_hex(TVOC_high_byte,TVOC_low_byte)\n return TVOC\n\n#Pretty prin the data in a file and console\n#/////////////////\ndef print_data(eCO2,TVOC,status):\n date_str = str(\"Sample data on \" + str(time.strftime(\"%Y-%m-%d %H:%M:%S\")))\n line_separator = \"------------------------------------------\"\n data_array = [[line_separator,\"\",\"\"],[date_str,'',\"\"],[\"eC02\",\"TVOC\",\"status\"],[str(eCO2) + 'ppm' ,str(TVOC) + 'ppb',str(status)]]\n for row in data_array:\n file.write(\"{: >10} {: >10} {: >10}\\n\".format(*row))\n\tprint(\"{: >10} {: >10} {: >10}\\n\".format(*row))\n\ndef save_data(eCO2,TVOC):\n #post data in the collection\n date_object = datetime.now()\n collection.insert_one({\n \"eCO2\":eCO2,\n \"TVOC\":TVOC,\n \"time\":date_object\n })\n# Read data\n#/////////////////\ndef read_routine():\n data_status = get_status()\n if(data_status is not int('0x98',16)):\n print('failed to set a new data sample in ALG_RESULT_DATA status is',hex(data_status))\n # sys.exit()\n else:\n print('a new data sample is ready in ALG_RESULT_DATA')\n bus.write_byte(address,0x02)\n block_data_eCO2_TVOC = bus.read_i2c_block_data(address,0x02,8)\n\n eCO2 = get_eCO2(block_data_eCO2_TVOC[0],block_data_eCO2_TVOC[1])\n TVOC = get_TVOC(block_data_eCO2_TVOC[2],block_data_eCO2_TVOC[3])\n status = hex(block_data_eCO2_TVOC[4])\n print_data(eCO2,TVOC,status)\n save_data(eCO2,TVOC)\n\n#main\n#/////////////////\ndef run():\n hw_id = get_hw_id()\n if(hw_id is not int('0x81',16)):\n print('hw id invalid')\n sys.exit()\n else:\n print('hw id is OK, 0x81')\n\n # Read status to check if app is valid\n status_app_valid = get_status()\n if(status_app_valid is not int('0x10',16)):\n print('app not valid')\n sys.exit()\n else:\n print('status is OK, 0x10')\n\n # Switch from boot mode to application mode\n bus.write_byte(address,0xF4)\n status_fw_mode = get_status()\n if(status_fw_mode is not int('0x90',16)):\n print('failed to set in application mode, current status is',hex(status_fw_mode))\n sys.exit()\n else:\n print('status is OK(application mode), 0x90')\n\n # Set drive mode to x10 = measurements every second\n bus.write_byte_data(address, 0x01, 0x10)\n print('Sensor is reading data')\n time.sleep(1) #wait one secont so the sensor read data\n\n # schedule the read routine to one second\n schedule.every(1).seconds.do(read_routine)\n\n while(True):\n try:\n schedule.run_pending()\n except (KeyboardInterrupt,SystemExit):\n print('KeyboardInterrupt')\n file.close()\n reset_sensor()\n bus.close()\n\nif(__name__ == \"__main__\"):\n run()\n",
"id": "7770133",
"language": "Python",
"matching_score": 3.2546236515045166,
"max_stars_count": 1,
"path": "CCS811_python_node/ccs811_setup/css811.py"
},
{
"content": "\n#info https://github.com/mongolab/mongodb-driver-examples/blob/master/python/pymongo_simple_example.py\n\nimport pymongo\nimport time\nfrom datetime import datetime\nimport schedule\n\n\n# set connection uri\nuri = 'mongodb://sensor:<EMAIL>:59305/ccs811'\nclient = pymongo.MongoClient(uri)\n\ndb = client.get_default_database()\ncoll = db['data']\n\ndef save_data():\n date_object = datetime.now()\n coll.insert_one({\n \"eCO2\":'400ppm',\n \"TVOC\":'69ppb',\n \"time\":date_object\n })\n\n\nschedule.every(10).seconds.do(save_data)\n\nwhile(True):\n schedule.run_pending()\n",
"id": "4401828",
"language": "Python",
"matching_score": 2.196533441543579,
"max_stars_count": 1,
"path": "CCS811_python_node/ccs811_setup/dev/mlab_demo.py"
},
{
"content": "import pymongo\nimport time\nfrom datetime import datetime\nimport schedule\n\n#database setup\ntry:\n # set connection uri\n uri = 'mongodb://sensor:<EMAIL>:59305/ccs811'\n client = pymongo.MongoClient(uri)\n print('Connection successful to mLab')\n\n #get db and collection\n db = client.get_default_database()\n collection = db['data']\nexcept pymongo.errors.ConnectionFailure:\n print (\"Could not connect to MongoDB\")\n\ndate_object = datetime.now()\ncollection.insert_one({\n \"eCO2\":'asfd',\n \"TVOC\":'asfd',\n \"time\":date_object\n})\n",
"id": "10152926",
"language": "Python",
"matching_score": 3.235703468322754,
"max_stars_count": 1,
"path": "CCS811_python_node/ccs811_setup/dev/mlab_post.py"
},
{
"content": "import pymongo\nuri = None\ndef do_connection():\n try:\n # set connection uri\n uri = 'mongodb://interface:<EMAIL>:59305/ccs811'\n client = pymongo.MongoClient(uri)\n print('Connection successful to mLab')\n\n #get db and collection\n db = client.get_default_database()\n # coll = db['data']\n except pymongo.errors.ConnectionFailure:\n print (\"Could not connect to MongoDB\")\n\nprint(uri)\nif(__name__ == \"__main__\"):\n do_connection()\n",
"id": "1151068",
"language": "Python",
"matching_score": 1.828768253326416,
"max_stars_count": 1,
"path": "CCS811_python_node/ccs811_setup/dev/mlab_conn.py"
}
] | 2.716118 |
Hawrkw | [
{
"content": "import numpy as np\nimport json\nimport sys\nimport os\nfrom collections import OrderedDict\n\n\ndef ltwh2ltrb(ltwh, width, height):\n ltrb = np.asarray([ltwh[0], ltwh[1], ltwh[0] + ltwh[2], ltwh[1] + ltwh[3]])\n ltrb[ltrb <= 0] = 0\n ltrb[0] = ltrb[0] if ltrb[0] <= width else width\n ltrb[1] = ltrb[1] if ltrb[1] <= height else height\n ltrb[2] = ltrb[2] if ltrb[2] <= width else width\n ltrb[3] = ltrb[3] if ltrb[3] <= height else height\n return ltrb.tolist()\n\n\ndef GtToJson(inputfile, width=768, height=576):\n \"\"\"\n This function transform Gt file format to coco data format.\n Gt file is download in \"https://motchallenge.net/\".\n Usage:\n ex) $ python3 make.py gt/gt.txt\n \"\"\"\n f = open(inputfile, 'r')\n lines = f.readlines()\n entire = []\n last_index = -1\n is_first = True\n folder_name = []\n folder_name = inputfile.split('/')[-3]\n folder_name = folder_name if folder_name else './'\n\n for i, line in enumerate(lines):\n line_list = line.split(',')\n line_list = [int(float(x)) for x in line_list]\n if (line_list[7] != 1 and line_list[7] != 2 and line_list[7] != 7) and line_list[8] < 0.3:\n continue\n new_index = line_list[0]\n ltwh = line_list[2:6]\n\n if last_index != new_index:\n if is_first:\n is_first = False\n else:\n group_data[\"ann\"] = ann\n entire.append(group_data)\n\n group_data = OrderedDict()\n #file_name = MOT17-02/img1/000002.jpg---需要修改\n file_name = folder_name + \"/img1/{0:06}.jpg\".format(int(new_index)) # file format\n group_data[\"filename\"] = file_name\n group_data[\"width\"] = width\n group_data[\"height\"] = height\n\n ann = OrderedDict()\n\n ann[\"bboxes\"] = []\n ann[\"labels\"] = []\n ann[\"bboxes_ignore\"] = []\n ann[\"labels_ignore\"] = []\n\n ann[\"bboxes\"].append(ltwh2ltrb(ltwh, width, height))\n ann[\"labels\"].append(7)\n last_index = new_index\n\n else:\n ann[\"bboxes\"].append(ltwh2ltrb(ltwh, width, height))\n ann[\"labels\"].append(7)\n last_index = new_index\n\n if i + 1 == len(lines):\n group_data[\"ann\"] = ann\n entire.append(group_data)\n\n f.close()\n with open('json_from_{}.json'.format(inputfile.split('/')[-3]), 'w', encoding=\"utf-8\") as make_file:\n json.dump(entire, make_file, ensure_ascii=False, indent=\"\\t\")\n print(f\"\\nfile_name : {make_file.name}\\nwidth : {width}, height : {height}\\n\")\n os.chmod(make_file.name, 0o777)\n\n\nif __name__ == '__main__':\n # if (len(sys.argv) < 2):\n # print(\"please run 'make.py gt.txt [width, height]' format\")\n # sys.exit(1)\n # if len(sys.argv) == 4:\n # GtToJson(sys.argv[1], int(sys.argv[2]), int(sys.argv[3]))\n # elif len(sys.argv) == 2:\n # GtToJson(sys.argv[1])\n # else:\n # print(\"please run 'make.py gt.txt [width, height]' format\")\n # sys.exit(1)\n GtToJson(inputfile='/home/ivo/PycharmProjects/CenterNet-Track/data/MOT17Det/train/MOT17-02/gt/gt.txt',\n width=1920,height=1080)\n",
"id": "6828557",
"language": "Python",
"matching_score": 0,
"max_stars_count": 0,
"path": "src/lib/datasets/dataset/mot2coco.py"
},
{
"content": "import os\nimport cv2\nimport os\nimport numpy as np\nimport time\nimport argparse\nimport shutil\nimport codecs\nimport progressbar\nimport sys\n#sys.path.append('/home/ivo/PycharmProjects/CenterNet-Track/')\ntrain_17 = ['/home/ivo/PycharmProjects/CenterNet-Track/data/MOT17Det/train/MOT17-02/',\n '/home/ivo/PycharmProjects/CenterNet-Track/data/MOT17Det/train/MOT17-04/',\n '/home/ivo/PycharmProjects/CenterNet-Track/data/MOT17Det/train/MOT17-05/',\n '/home/ivo/PycharmProjects/CenterNet-Track/data/MOT17Det/train/MOT17-11/',\n '/home/ivo/PycharmProjects/CenterNet-Track/data/MOT17Det/train/MOT17-13/']\n\nval_17 = ['/home/ivo/PycharmProjects/CenterNet-Track/data/MOT17Det/val/MOT17-09/',\n '/home/ivo/PycharmProjects/CenterNet-Track/data/MOT17Det/val/MOT17-10/',]\n\ntrain_20 = ['data/MOT20Det/train/MOT20-01/',\n 'data/MOT20Det/train/MOT20-02/',\n 'data/MOT20Det/train/MOT20-03/',\n 'data/MOT20Det/train/MOT20-05/']\n\nval_20 = ['MOT20Det/test/MOT20-04/',\n 'MOT20Det/test/MOT20-06/',\n 'MOT20Det/test/MOT20-07/',\n 'MOT20Det/test/MOT20-08/']\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Convert MOT2VOC format')\n parser.add_argument('--year',choices=['17', '20'],default='17',type=str,help='year of MOT dataset')\n args = parser.parse_args()\n return args\n\n\ndef parse_ini(dir):\n ini_fp = open(dir + 'seqinfo.ini', 'r')\n seq_info = ini_fp.readlines()\n seqLenth = int(seq_info[4][10:])\n imWidth = int(seq_info[5][8:])\n imHeight = int(seq_info[6][9:])\n return seqLenth, imWidth, imHeight\n\n\ndef gennerate_gt(gt, Annotation, frame, filename, width, height):\n fp_gt = open(gt)\n gt_lines = fp_gt.readlines()\n\n gt_fram = []\n for line in gt_lines:\n fram_id = int(line.split(',')[0])\n if fram_id == frame:\n visible = float(line.split(',')[8])\n label_class = line.split(',')[7]\n if (label_class == '1' or label_class == '2' or label_class == '7') and visible > 0.3:\n gt_fram.append(line)\n\n with codecs.open(Annotation + filename + '.xml', 'w') as xml:\n xml.write('<?xml version=\"1.0\" encoding=\"UTF-8\"?>\\n')\n xml.write('<annotation>\\n')\n xml.write('\\t<folder>' + 'voc' + '</folder>\\n')\n xml.write('\\t<filename>' + filename + '.jpg' + '</filename>\\n')\n # xml.write('\\t<path>' + path + \"/\" + info1 + '</path>\\n')\n xml.write('\\t<source>\\n')\n xml.write('\\t\\t<database> The MOT-Det </database>\\n')\n xml.write('\\t</source>\\n')\n xml.write('\\t<size>\\n')\n xml.write('\\t\\t<width>' + str(width) + '</width>\\n')\n xml.write('\\t\\t<height>' + str(height) + '</height>\\n')\n xml.write('\\t\\t<depth>' + '3' + '</depth>\\n')\n xml.write('\\t</size>\\n')\n xml.write('\\t\\t<segmented>0</segmented>\\n')\n for bbox in gt_fram:\n x1 = int(bbox.split(',')[2])\n y1 = int(bbox.split(',')[3])\n x2 = int(bbox.split(',')[4])\n y2 = int(bbox.split(',')[5])\n\n xml.write('\\t<object>\\n')\n xml.write('\\t\\t<name>person</name>\\n')\n xml.write('\\t\\t<pose>Unspecified</pose>\\n')\n xml.write('\\t\\t<truncated>0</truncated>\\n')\n xml.write('\\t\\t<difficult>0</difficult>\\n')\n xml.write('\\t\\t<bndbox>\\n')\n xml.write('\\t\\t\\t<xmin>' + str(x1) + '</xmin>\\n')\n xml.write('\\t\\t\\t<ymin>' + str(y1) + '</ymin>\\n')\n xml.write('\\t\\t\\t<xmax>' + str(x1 + x2) + '</xmax>\\n')\n xml.write('\\t\\t\\t<ymax>' + str(y1 + y2) + '</ymax>\\n')\n xml.write('\\t\\t</bndbox>\\n')\n xml.write('\\t</object>\\n')\n xml.write('</annotation>')\n\n\n# 用于校验图片数量和标注数量是否一致\ndef check_num(data_dir, JPEGImage_dir, Annotations_dir=None, ori_num=0):\n num = 0\n for folder in data_dir:\n folder_len, _, _ = parse_ini(folder)\n num += folder_len\n img_list = os.listdir(JPEGImage_dir)\n if ori_num == 0:\n img_num = len(img_list)\n else:\n img_num = len(img_list) - ori_num\n # print('img_num:',img_num)\n if Annotations_dir:\n ann_list = os.listdir(Annotations_dir)\n ann_num = len(ann_list)\n assert ann_num == num\n assert img_num == num, 'if it is the second time run this demo, please delete the JPEGImages folder and retry'\n # print('num:', num)\n print('folders {} have been succeed checked'.format(data_dir))\n return num\n\n\ndef segment_dataset(ImageSets, Main, thr1=0.8, thr2=0.9):\n fp_all = open(ImageSets + 'train_all.txt', 'r')\n fp_train = open(Main + 'train.txt', 'w')\n fp_test = open(Main + 'test.txt', 'w')\n fp_val = open(Main + 'val.txt', 'w')\n train_list = fp_all.readlines()\n print(len(train_list))\n\n for line in train_list:\n rand_a = np.random.rand(1)\n if rand_a <= thr1:\n fp_train.writelines(line)\n if rand_a > thr1:\n fp_val.writelines(line)\n # if rand_a > thr1 and rand_a <= thr2:\n # fp_val.writelines(line)\n # if rand_a > thr2 and rand_a <= 1:\n # fp_test.writelines(line)\n fp_train.close()\n fp_val.close()\n #fp_test.close()\n\n print('segment the MOT dataset into train,val,test subsets')\n\n\ndef transform_train():\n args = parse_args()\n if args.year == '17':\n train_dirs = train_17\n val_dirs = val_17\n if args.year == '20':\n train_dirs = train_20\n val_dirs = val_20\n\n motyear = args.year\n folder = '/home/ivo/PycharmProjects/CenterNet-Track/data/' + 'MOT' + motyear + 'Det' + '/train/'\n Annotations = folder + 'Annotations/'\n ImageSets = folder + 'ImageSets/'\n JPEGImages = folder + 'JPEGImages/'\n Main = ImageSets + 'Main/'\n if not os.path.exists(Annotations):\n os.makedirs(Annotations)\n if not os.path.exists(ImageSets):\n os.makedirs(ImageSets)\n if not os.path.exists(JPEGImages):\n os.makedirs(JPEGImages)\n if not os.path.exists(Main):\n os.makedirs(Main)\n\n fp_txt = open(ImageSets + 'train_all.txt', 'w')\n\n for train_ in train_dirs:\n seqLenth, imWidth, imHeight = parse_ini(train_)\n img1 = train_ + 'img1/'\n gt = train_ + 'gt/gt.txt'\n folder_id = train_[-3:-1]\n img_list = os.listdir(img1)\n\n assert len(img_list) == seqLenth\n print('start')\n for img in img_list:\n\n format_name = args.year + folder_id + img\n fp_txt.writelines(format_name[:-4] + '\\n') # 将生成的新的文件名写入train_all.txt,用于后续数据集拆分\n shutil.copy(img1 + img, JPEGImages + '/' + format_name) # 将文件移动到指定文件夹并重新命名\n frame = int(img[:-4])\n gennerate_gt(gt, Annotation=Annotations, frame=frame, filename=format_name[:-4], width=imWidth,\n height=imHeight) # 生成标注文件\n\n fp_txt.close()\n\n train_num = check_num(train_dirs, JPEGImages, Annotations)\n #segment_dataset(ImageSets, Main)\n\ndef transform_val():\n args = parse_args()\n if args.year == '17':\n train_dirs = train_17\n val_dirs = val_17\n if args.year == '20':\n train_dirs = train_20\n val_dirs = val_20\n\n motyear = args.year\n folder = 'data/' + 'MOT' + motyear + 'Det' + '/val/'\n Annotations = folder + 'Annotations/'\n ImageSets = folder + 'ImageSets/'\n JPEGImages = folder + 'JPEGImages/'\n Main = ImageSets + 'Main/'\n if not os.path.exists(Annotations):\n os.makedirs(Annotations)\n if not os.path.exists(ImageSets):\n os.makedirs(ImageSets)\n if not os.path.exists(JPEGImages):\n os.makedirs(JPEGImages)\n if not os.path.exists(Main):\n os.makedirs(Main)\n\n fp_txt = open(ImageSets + 'val_all.txt', 'w')\n\n for val_ in val_dirs:\n seqLenth, imWidth, imHeight = parse_ini(val_)\n img1 = val_ + 'img1/'\n gt = val_ + 'gt/gt.txt'\n folder_id = val_[-3:-1]\n img_list = os.listdir(img1)\n\n assert len(img_list) == seqLenth\n print('start')\n for img in img_list:\n format_name = args.year + folder_id + img\n fp_txt.writelines(format_name[:-4] + '\\n') # 将生成的新的文件名写入train_all.txt,用于后续数据集拆分\n shutil.copy(img1 + img, JPEGImages + '/' + format_name) # 将文件移动到指定文件夹并重新命名\n frame = int(img[:-4])\n gennerate_gt(gt, Annotation=Annotations, frame=frame, filename=format_name[:-4], width=imWidth,\n height=imHeight) # 生成标注文件\n\n fp_txt.close()\n\n val_num = check_num(val_dirs, JPEGImages, Annotations)\n print(val_num)\n\nif __name__ == '__main__':\n transform_train()\n #transform_val()",
"id": "4871518",
"language": "Python",
"matching_score": 0,
"max_stars_count": 0,
"path": "src/lib/datasets/dataset/mot2voc.py"
}
] | 0 |
swathisa | [
{
"content": "\"\"\"Helper functions required to set up the environment for the example integration tests.\n\"\"\"\n\nimport random\nfrom datetime import datetime\nfrom unittest.mock import MagicMock\n\nimport pytest\nfrom elasticsearch_dsl.connections import connections\nfrom elasticsearch.helpers import bulk\nfrom elasticsearch.helpers.test import get_test_client\nimport docker\n\nfrom ebr_connector.schema.build_results import BuildResults\nfrom ebr_connector.index.generate_template import generate_template\nfrom tests import get_test_data_for_failed_build, get_test_data_for_successful_build\n\n\ndef get_index_name():\n \"\"\"Return name of index.\n \"\"\"\n return \"test_index\"\n\n\ndef get_alias_name():\n \"\"\"Return name of alias.\n \"\"\"\n return \"test_index_alias\"\n\n\ndef get_index_data():\n \"\"\"Return data to be stored in index.\n \"\"\"\n job_data_set = [\n {\n \"callback\": get_test_data_for_failed_build,\n \"buildstatus\": \"FAILURE\",\n \"buildid\": 1234,\n \"jobname\": \"cpp-reflection-tests-BB-baseline\",\n },\n {\n \"callback\": get_test_data_for_successful_build,\n \"buildstatus\": \"SUCCESS\",\n \"buildid\": 3,\n \"jobname\": \"cpp-reflection-tests-BB/PR-1234\",\n },\n {\n \"callback\": get_test_data_for_successful_build,\n \"buildstatus\": \"SUCCESS\",\n \"buildid\": 12345,\n \"jobname\": \"cpp-reflection-tests-BB-baseline\",\n },\n ]\n\n index_data = []\n for job_data in job_data_set:\n date_time = (datetime.utcnow()).isoformat()\n build_results = BuildResults.create(\n job_name=job_data.get(\"jobname\"),\n job_link=\"http://ci/%s\" % job_data.get(\"jobname\"),\n build_date_time=str(date_time),\n build_id=job_data.get(\"buildid\"),\n platform=\"Linux-x86_64\",\n product=\"MyProduct\",\n )\n mock_status_callback = MagicMock()\n mock_status_callback.return_value = job_data.get(\"buildstatus\")\n build_results.store_tests(job_data.get(\"callback\"))\n build_results.store_status(mock_status_callback)\n index_data.append(\n {\n \"_index\": get_index_name(),\n \"_type\": \"doc\",\n \"_id\": random.getrandbits(128),\n \"_source\": build_results.to_dict(),\n }\n )\n\n return index_data\n\n\n@pytest.fixture(scope=\"session\")\ndef elasticsearch_instance():\n \"\"\"Creates an elasticsearch docker container\n \"\"\"\n docker_client = docker.from_env()\n try:\n es_container = docker_client.containers.list(filters={\"name\": \"ebr_elasticsearch\"})\n if es_container and len(es_container) == 1:\n es_container[0].stop()\n es_container[0].remove()\n except docker.errors.DockerException:\n pass\n es_container = docker_client.containers.run(\n \"docker.elastic.co/elasticsearch/elasticsearch-oss:6.5.1\",\n detach=True,\n auto_remove=True,\n ports={\"9200\": \"9200\"},\n name=\"ebr_elasticsearch\",\n )\n yield\n # try:\n # es_container.stop()\n # except docker.errors.DockerException:\n # pass\n\n\n# pylint: disable=redefined-outer-name,unused-argument\n@pytest.fixture(scope=\"session\")\ndef client(elasticsearch_instance):\n \"\"\"Return a connection to the elasticsearch server\n \"\"\"\n connection = get_test_client(nowait=True)\n connections.add_connection(\"default\", connection)\n return connection\n\n\ndef create_index(connection):\n \"\"\"Creates an test index based on the BuildResults meta data (eg. mapping)\n \"\"\"\n connection.indices.create(index=get_index_name(), body=generate_template(get_alias_name()))\n\n\n# pylint: disable=redefined-outer-name\n@pytest.fixture(scope=\"session\")\ndef data_client(client):\n \"\"\"Connects to client and stores some test index data in elasticsearch\n \"\"\"\n create_index(client)\n bulk(client, get_index_data(), raise_on_error=True, refresh=True)\n yield client\n # client.indices.delete(get_index_name())\n",
"id": "2783000",
"language": "Python",
"matching_score": 2.3391153812408447,
"max_stars_count": 5,
"path": "tests/integration/examples/conftest.py"
},
{
"content": "\"\"\"\nTests for the examples.\n\"\"\"\n\nfrom ebr_connector.examples.query import query_failed_tests, query_for_successful_job\nfrom .conftest import get_index_name\n\n\n# pylint: disable=unused-argument\ndef test_query_failed_tests(data_client):\n \"\"\"Test `query_failed_tests` example.\n \"\"\"\n response = query_failed_tests(get_index_name())\n assert len(response) == 1 # 1 documents\n assert len(response[0][\"br_tests_object\"][\"br_tests_failed_object\"]) == 5\n\n\n# pylint: disable=unused-argument\ndef test_query_for_successful_job(data_client):\n \"\"\"Test `query_for_successful_job` example.\n \"\"\"\n response = query_for_successful_job(get_index_name())\n assert len(response) == 1\n assert not \"br_tests_failed_object\" in response[0][\"br_tests_object\"]\n assert not \"br_tests_skipped_object\" in response[0][\"br_tests_object\"]\n assert len(response[0][\"br_tests_object\"][\"br_tests_passed_object\"]) == 15\n",
"id": "1423353",
"language": "Python",
"matching_score": 1.832786202430725,
"max_stars_count": 5,
"path": "tests/integration/examples/test_query.py"
},
{
"content": "\"\"\"\nTests for the Tests innerdoc class.\n\"\"\"\n\nfrom unittest.mock import Mock\nfrom ebr_connector.schema.build_results import Tests\n\n\ndef test_default_ctor():\n \"\"\"Test default constructor\n \"\"\"\n summary = Tests()\n assert summary.__dict__ == {\"_d_\": {}, \"meta\": {}}\n\n\ndef test_create_factory_method():\n \"\"\"TestSummary create factory method\n \"\"\"\n summary = Mock()\n suites = Mock()\n tests_passed = Mock()\n tests_failed = Mock()\n tests_skipped = Mock()\n\n tests = Tests.create(\n suites=suites,\n tests_passed=tests_passed,\n tests_failed=tests_failed,\n tests_skipped=tests_skipped,\n summary=summary,\n )\n assert tests.br_suites_object == suites\n assert tests.br_summary_object == summary\n assert tests.br_tests_failed_object == tests_failed\n assert tests.br_tests_passed_object == tests_passed\n assert tests.br_tests_skipped_object == tests_skipped\n",
"id": "11029328",
"language": "Python",
"matching_score": 2.3925442695617676,
"max_stars_count": 0,
"path": "tests/unit/schema/test_tests_innerdoc.py"
},
{
"content": "\"\"\"\nTests for the TestSummary innerdoc class.\n\"\"\"\n\nfrom ebr_connector.schema.build_results import TestSummary\n\n\ndef test_default_ctor():\n \"\"\"Test default constructor\n \"\"\"\n summary = TestSummary()\n assert summary.__dict__ == {\"_d_\": {}, \"meta\": {}}\n\n\ndef test_create_factory_method():\n \"\"\"TestSummary create factory method\n \"\"\"\n test_summary = TestSummary.create(\n total_passed_count=10, total_failed_count=2, total_skipped_count=3, total_count=15\n )\n assert test_summary.br_total_passed_count == 10\n assert test_summary.br_total_failed_count == 2\n assert test_summary.br_total_skipped_count == 3\n assert test_summary.br_total_count == 15\n",
"id": "5991445",
"language": "Python",
"matching_score": 2.717994451522827,
"max_stars_count": 0,
"path": "tests/unit/schema/test_test_summary_innerdoc.py"
},
{
"content": "# -*- coding: utf-8 -*-\n\n\"\"\"\nSerialization library to/from ElasticSearch for build results.\n\nThe classes :class:`ebr_connector.schema.Test`, :class:`ebr_connector.schema.TestSuite` and :class:`ebr_connector.schema.BuildResults`\nexpose factory methods that create instances of these types.\n\nWe cannot use Python constructors (`__init__`) since the underlying elasticsearch_dsl components make heavily usage\nof them, for example to add meta data attributes to document instances while using the search API.\n\nIn order to still have protection against violations of the schema fields we make use of a\nfactory method instead to create instances of these types.\n\"\"\"\n\nimport socket\nimport ssl\nimport json\nimport traceback\nimport warnings\n\nfrom enum import Enum\nfrom elasticsearch_dsl import Document, Text, InnerDoc, Float, Integer, Nested, Date, Keyword, MetaField, Object\n\nimport ebr_connector\nfrom ebr_connector.schema.dynamic_template import DYNAMIC_TEMPLATES\n\n\nclass Test(InnerDoc):\n \"\"\"\n Provides serialization for a single test\n\n Args:\n br_suite: Set (suite) the test is a part of\n br_classname: Class that the test is from\n br_test: Name of the test\n br_result: Result of the test (e.g. passed)\n br_message: Any output from the test\n br_duration: Duration in milliseconds (float) of the test\n br_reportset: (Optional) Report set the test is a part of\n br_context: (Optional) The runtime context of the test required to reproduce this execution\n \"\"\"\n\n br_suite = Text(fields={\"raw\": Keyword()})\n br_classname = Text(fields={\"raw\": Keyword()})\n br_test = Text(fields={\"raw\": Keyword()})\n br_result = Text()\n br_message = Text()\n br_duration = Float()\n br_reportset = Text()\n br_context = Text()\n br_fullname = Text(fields={\"raw\": Keyword()})\n\n class Result(Enum):\n \"\"\"Enum for keeping the test results in sync across CI hooks.\n \"\"\"\n\n FAILED = 1\n PASSED = 2\n SKIPPED = 3\n\n @staticmethod\n def create(result_str):\n \"\"\"Converts a test result string into a :class:`ebr_connector.schema.Test.Result` enum.\n \"\"\"\n upper_result_str = result_str.upper()\n if upper_result_str in [\"PASS\", \"PASSED\", \"SUCCESS\", \"FIXED\"]:\n return Test.Result.PASSED\n if upper_result_str in [\"FAILURE\", \"ERROR\", \"REGRESSION\", \"FAILED\"]:\n return Test.Result.FAILED\n if upper_result_str in [\"SKIP\", \"SKIPPED\"]:\n return Test.Result.SKIPPED\n raise ValueError(\"Unknown test result value '%s'\" % result_str)\n\n @staticmethod\n def create(suite, classname, test, result, message, duration, reportset=None, context=None):\n \"\"\"\n Factory method for creating a new instance of :class:`ebr_connector.schema.Test`.\n \"\"\"\n\n return Test(\n br_suite=suite,\n br_classname=classname,\n br_test=test,\n br_result=result,\n br_message=message,\n br_duration=duration,\n br_reportset=reportset,\n br_context=context,\n br_fullname=suite + \".\" + test,\n )\n\n\nclass TestSuite(InnerDoc):\n \"\"\"\n Provides serialization for Test Sets (test suites)\n\n Args:\n br_name: Name of the suite\n br_failures_count: Number of failing tests\n br_skipped_count: Number of skipped tests\n br_passed_count: Number of passed tests\n br_total_count: Total number of tests\n br_duration: Duration in milliseconds (float) of the entire test suite\n br_package: (Optional) package the test set is associated with\n \"\"\"\n\n br_name = Text(fields={\"raw\": Keyword()})\n br_failures_count = Integer()\n br_skipped_count = Integer()\n br_passed_count = Integer()\n br_total_count = Integer()\n br_duration = Float()\n br_package = Text(fields={\"raw\": Keyword()})\n\n @staticmethod\n def create(name, failures_count, skipped_count, passed_count, total_count, duration, package=None):\n \"\"\"\n Factory method for creating a new instance of :class:`ebr_connector.schema.TestSuite`.\n \"\"\"\n return TestSuite(\n br_name=name,\n br_failures_count=failures_count,\n br_skipped_count=skipped_count,\n br_passed_count=passed_count,\n br_total_count=total_count,\n br_duration=duration,\n br_package=package,\n )\n\n\nclass TestSummary(InnerDoc):\n \"\"\"\n Class summarizing all passed/failed/skipped tests across all test sets.\n\n Args:\n br_total_passed_count: Total number of all passed test cases\n br_total_failed_count: Total number of all failed test cases\n br_total_skipped_count: Total number of all skipped test cases\n br_total_count: Total number of all passed/failed/skipped test cases\n \"\"\"\n\n br_total_passed_count = Integer()\n br_total_failed_count = Integer()\n br_total_skipped_count = Integer()\n br_total_count = Integer()\n\n @staticmethod\n def create(total_passed_count, total_failed_count, total_skipped_count, total_count):\n \"\"\"\n Factory method for creating a new instance of :class:`ebr_connector.schema.TestSummary`.\n \"\"\"\n return TestSummary(\n br_total_passed_count=total_passed_count,\n br_total_failed_count=total_failed_count,\n br_total_skipped_count=total_skipped_count,\n br_total_count=total_count,\n )\n\n\nclass Tests(InnerDoc):\n \"\"\"\n Class used to group nested objects of failed/passed/skipped tests, suites, etc\n\n Args:\n br_suites_object: Set of test suites\n br_tests_passed_object: Set of passed test cases\n br_tests_failed_object: Set of failed test cases\n br_tests_skipped_object: Set of skipped test cases\n br_summary_object: Summary over all tests\n \"\"\"\n\n br_suites_object = Nested(TestSuite)\n br_tests_passed_object = Nested(Test)\n br_tests_failed_object = Nested(Test)\n br_tests_skipped_object = Nested(Test)\n br_summary_object = Object(TestSummary)\n\n @staticmethod\n def create(suites, tests_passed, tests_failed, tests_skipped, summary):\n \"\"\"\n Factory method for creating a new instance of :class:`ebr_connector.schema.Tests`.\n \"\"\"\n return Tests(\n br_suites_object=suites,\n br_tests_passed_object=tests_passed,\n br_tests_failed_object=tests_failed,\n br_tests_skipped_object=tests_skipped,\n br_summary_object=summary,\n )\n\n\nclass _BuildResultsMetaDocument(Document):\n \"\"\"Base class for the BuildResults document describing the index structure.\n \"\"\"\n\n # pylint: disable=too-few-public-methods\n class Meta:\n \"\"\"Stores the plain template version in the generated index template. We cannot use the builtin `version` field\n since it is of type `integer` and we use semantic versioning.\n This data is for pure information purposes and won't be used at all by Elasticsearch.\n See as well https://www.ebr_connector.co/guide/en/elasticsearch/reference/current/mapping-meta-field.html#mapping-meta-field.\n \"\"\"\n\n meta = MetaField(template_version=ebr_connector.__version__)\n dynamic_templates = MetaField(DYNAMIC_TEMPLATES)\n\n\nclass BuildResults(_BuildResultsMetaDocument):\n \"\"\"\n Top level serialization for build results\n\n Args:\n br_job_name: Name of the job that owns the build being recorded (eg. Jenkins job name or QuickBuild configuration name)\n br_job_url_key: Link to the job on the CI system that executed it\n br_job_info: Additional information about the job. (eg. 'B.1234.COMMIT-1234')\n br_source: The source which caused the job to be triggered (eg. PR id or branch name)\n br_build_date_time: Execution time of the build (ISO-8601 format recommended)\n br_build_id_key: ID of the build\n br_platform: Platform of the build\n br_product: Product the build is associated with\n br_status_key: Status of the build (eg. if one test failed the overall build status should as well be failed)\n br_tests_object: A container for storing failed/passed/skipped tests, total summary, etc. See :class:`ebr_connector.schema.Tests` for more details\n br_version_key: Version of the BuildResults schema\n br_product_version_key: Version of the product (eg. Commit hash or semantic version)\n \"\"\"\n\n br_job_name = Text(fields={\"raw\": Keyword()})\n br_job_url_key = Keyword()\n br_job_info = Text(fields={\"raw\": Keyword()})\n br_source = Text(fields={\"raw\": Keyword()})\n br_build_date_time = Date()\n br_build_id_key = Keyword()\n br_platform = Text(fields={\"raw\": Keyword()})\n br_product = Text(fields={\"raw\": Keyword()})\n br_status_key = Keyword()\n br_tests_object = Object(Tests)\n br_version_key = Keyword()\n br_product_version_key = Keyword()\n\n class BuildStatus(Enum):\n \"\"\"\n Status of a build\n \"\"\"\n\n ABORTED = 1\n FAILURE = 2\n NOT_BUILT = 3\n RUNNING = 4\n SUCCESS = 5\n TIMEOUT = 6\n UNSTABLE = 7\n\n @staticmethod\n def create(build_status_str):\n \"\"\"\n Converts a build status string into a :class:`ebr_connector.schema.BuildResults.BuildStatus` enum.\n \"\"\"\n upper_build_status_str = build_status_str.upper()\n if upper_build_status_str in [\"SUCCESS\", \"SUCCESSFUL\"]:\n status = BuildResults.BuildStatus.SUCCESS\n elif upper_build_status_str in [\"FAILURE\", \"FAILED\"]:\n status = BuildResults.BuildStatus.FAILURE\n elif upper_build_status_str in [\"ABORT\", \"ABORTED\", \"CANCEL\", \"CANCELLED\"]:\n status = BuildResults.BuildStatus.ABORTED\n elif upper_build_status_str in [\"NOT_BUILT\", \"SKIPPED\"]:\n status = BuildResults.BuildStatus.NOT_BUILT\n elif upper_build_status_str in [\"UNSTABLE\"]:\n status = BuildResults.BuildStatus.UNSTABLE\n elif upper_build_status_str in [\"TIMEOUT\", \"TIMEDOUT\"]:\n status = BuildResults.BuildStatus.TIMEOUT\n elif upper_build_status_str in [\"RUNNING\", \"BUILDING\"]:\n status = BuildResults.BuildStatus.RUNNING\n else:\n raise ValueError(\"Unknown build status string '%s'\" % build_status_str)\n return status\n\n @staticmethod\n def create(\n job_name, job_link, build_date_time, build_id, platform, product=None, job_info=None, product_version=None\n ):\n \"\"\"\n Creates an immutable instance of :class:`ebr_connector.schema.BuildResults`.\n \"\"\"\n return BuildResults(\n br_job_name=job_name,\n br_job_url_key=job_link,\n br_build_date_time=build_date_time,\n br_build_id_key=build_id,\n br_platform=platform,\n br_product=product,\n br_job_info=job_info,\n br_status_key=None,\n br_tests_object={},\n br_version_key=ebr_connector.__version__,\n br_product_version_key=product_version,\n )\n\n def store_tests(self, retrieve_function, *args, **kwargs):\n \"\"\"\n Retrieves the test results of a build and adds them to the :class:`ebr_connector.schema.BuildResults` object\n\n Args:\n retrieve_function: Callback function which provides test and suite data in dictionaries\n (see Test and TestSuite documentation for format)\n \"\"\"\n try:\n results = retrieve_function(*args, **kwargs)\n self.br_tests_object = Tests()\n\n for test in results.get(\"tests\", None):\n test_result = Test.Result[test.get(\"result\", Test.Result.FAILED)]\n if test_result == Test.Result.PASSED:\n self.br_tests_object.br_tests_passed_object.append(Test.create(**test))\n elif test_result == Test.Result.FAILED:\n self.br_tests_object.br_tests_failed_object.append(Test.create(**test))\n else:\n self.br_tests_object.br_tests_skipped_object.append(Test.create(**test))\n\n total_passed_count = len(self.br_tests_object.br_tests_passed_object)\n total_failed_count = len(self.br_tests_object.br_tests_failed_object)\n total_skipped_count = len(self.br_tests_object.br_tests_skipped_object)\n self.br_tests_object.br_summary_object = TestSummary.create(\n total_passed_count=total_passed_count,\n total_failed_count=total_failed_count,\n total_skipped_count=total_skipped_count,\n total_count=total_passed_count + total_failed_count + total_skipped_count,\n )\n\n for suite in results.get(\"suites\", None):\n self.br_tests_object.br_suites_object.append(TestSuite.create(**suite))\n\n except (KeyError, TypeError):\n warnings.warn(\"Failed to retrieve test data.\")\n traceback.print_exc()\n\n def store_status(self, status_function, *args, **kwargs):\n \"\"\"\n Retrieves the status of a build and adds it to the :class:`ebr_connector.schema.BuildResults` object\n\n Args:\n status_function: Callback function which provides status information\n \"\"\"\n try:\n self.br_status_key = status_function(*args, **kwargs)\n except (KeyError, TypeError):\n warnings.warn(\"Failed to retrieve status information.\")\n traceback.print_exc()\n\n def save_logcollect(self, dest, port, cafile=None, clientcert=None, clientkey=None, keypass=\"\", timeout=10):\n \"\"\"\n Saves the :class:`ebr_connector.schema.BuildResults` object to a LogCollector instance.\n\n Args:\n dest: URL/IP of the LogCollector server\n port: port of the raw intake on the LogCollector server\n cafile: (optional) file location of the root CA certificate that signed the\n LogCollector's certificate (or the LogCollector's certificate if self-signed)\n clientcert: (optional) file location of the client certificate\n clientkey: (optional) file location of the client key\n keypass: (optional) password of the client key (leave blank if unset)\n timeout: (optional) socket timeout in seconds for the write operation (10 seconds if unset)\n \"\"\"\n\n result = str.encode(json.dumps(self.to_dict()))\n\n bare_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n bare_socket.settimeout(timeout)\n context = ssl.create_default_context(cafile=cafile)\n\n if clientcert:\n context.verify_mode = ssl.CERT_REQUIRED\n context.load_cert_chain(clientcert, clientkey, keypass)\n\n with context.wrap_socket(bare_socket, server_hostname=dest) as secure_socket:\n secure_socket.connect((dest, port))\n secure_socket.send(result)\n",
"id": "3751751",
"language": "Python",
"matching_score": 4.007740497589111,
"max_stars_count": 0,
"path": "ebr_connector/schema/build_results.py"
},
{
"content": "\"\"\"\nTests for the Test innerdoc class.\n\"\"\"\n\nimport pytest\n\nfrom ebr_connector.schema.build_results import Test\n\n\n@pytest.mark.parametrize(\n \"test_input,expected\",\n [\n (\"Success\", Test.Result.PASSED),\n (\"Fixed\", Test.Result.PASSED),\n (\"Pass\", Test.Result.PASSED),\n (\"Passed\", Test.Result.PASSED),\n (\"Failure\", Test.Result.FAILED),\n (\"Error\", Test.Result.FAILED),\n (\"Regression\", Test.Result.FAILED),\n (\"Failed\", Test.Result.FAILED),\n (\"Skip\", Test.Result.SKIPPED),\n (\"Skipped\", Test.Result.SKIPPED),\n ],\n)\ndef test_create_valid_test_result(test_input, expected):\n \"\"\"Test various valid test result strings that can be converted\n to proper :class:`ebr_connector.schema.Test.Result` objects.\n \"\"\"\n assert Test.Result.create(test_input) == expected\n assert Test.Result.create(test_input.lower()) == expected\n assert Test.Result.create(test_input.upper()) == expected\n\n\ndef test_create_test_result_throws_exception():\n \"\"\"Test that unknown status strings should result in exception.\n \"\"\"\n with pytest.raises(ValueError):\n Test.Result.create(\"unknown_result\")\n\n\ndef test_default_ctor():\n \"\"\"Test default constructor\n \"\"\"\n test = Test()\n assert test.__dict__ == {\"_d_\": {}, \"meta\": {}}\n\n\ndef test_create_factory_method():\n \"\"\"Test create factory method\n \"\"\"\n test = Test.create(\n suite=\"my_suitename\",\n classname=\"my_classname\",\n test=\"my_testname\",\n result=\"SUCCESS\",\n message=\"my_message\",\n duration=100.12,\n reportset=\"my_unittests\",\n )\n assert test.br_suite == \"my_suitename\"\n assert test.br_classname == \"my_classname\"\n assert test.br_test == \"my_testname\"\n assert test.br_result == \"SUCCESS\"\n assert test.br_message == \"my_message\"\n assert test.br_duration == 100.12\n assert test.br_reportset == \"my_unittests\"\n assert test.br_fullname == \"my_suitename.my_testname\"\n",
"id": "392799",
"language": "Python",
"matching_score": 2.045187473297119,
"max_stars_count": 0,
"path": "tests/unit/schema/test_test_innerdoc.py"
},
{
"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Module providing some test data.\n\"\"\"\n\nfrom ebr_connector.schema.build_results import Test\n\n\ndef get_test_data_for_successful_build():\n \"\"\"Returns a test data set of test suites and cases that passed.\n \"\"\"\n return _get_test_data([\"PASSED\", \"PASSED\", \"PASSED\"])\n\n\ndef get_test_data_for_failed_build():\n \"\"\"Returns a test data set of test suites and cases with some failed/passed/skipped tests.\n \"\"\"\n return _get_test_data([\"SKIPPED\", \"FAILED\", \"PASSED\"])\n\n\ndef _get_test_data(test_case_results):\n \"\"\"Returns suites with `len(test_case_results)` test cases per suite.\n \"\"\"\n results = {\"tests\": [], \"suites\": []}\n\n for suite_index in range(0, 5):\n failed_case_no = 0\n passed_case_no = 0\n skipped_case_no = 0\n suite_duration = 0\n suite_name = \"MySuite_%s\" % str(suite_index)\n\n for test_index, result in enumerate(test_case_results):\n test_result = Test.Result.create(result)\n test = {\n \"suite\": suite_name,\n \"classname\": \"org.acme.MyTest_%s_%s\" % (str(suite_index), str(test_index)),\n \"test\": \"test_case_%s\" % str(test_index),\n \"result\": test_result.name,\n \"message\": \"Some test output message - %s\" % str(test_index),\n \"duration\": float(158 + suite_index + test_index),\n }\n\n if test_result == Test.Result.FAILED:\n failed_case_no += 1\n elif test_result == Test.Result.SKIPPED:\n skipped_case_no += 1\n else:\n passed_case_no += 1\n suite_duration += test[\"duration\"]\n results[\"tests\"].append(test)\n\n suite_result = {\n \"failures_count\": failed_case_no,\n \"skipped_count\": skipped_case_no,\n \"passed_count\": passed_case_no,\n \"total_count\": len(test_case_results),\n \"name\": suite_name,\n \"duration\": suite_duration,\n }\n\n results[\"suites\"].append(suite_result)\n\n return results\n",
"id": "1065513",
"language": "Python",
"matching_score": 0.6096992492675781,
"max_stars_count": 5,
"path": "tests/__init__.py"
},
{
"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nGenerates an index template for ElasticSearch from the BuildResults document.\n\"\"\"\n\nimport argparse\nimport json\nimport sys\n\nfrom elasticsearch_dsl import Index\nfrom ebr_connector.schema.build_results import _BuildResultsMetaDocument\n\n\ndef generate_template(index_name):\n \"\"\"\n Generates the index template associated with the structure of the BuildResults\n document, allowing it to be uploaded to an ElasticSearch instance.\n\n Args:\n index_name: index name to generate the template with, should be the index the module will upload to\n output_file: (optional) file path to write template to\n \"\"\"\n\n document = _BuildResultsMetaDocument()\n index = Index(name=index_name)\n index.document(document)\n index.settings(refresh_interval=\"30s\", number_of_shards=\"1\", number_of_replicas=\"1\")\n index.aliases(**{index_name: {}})\n index_template = index.as_template(template_name=\"template_\" + index_name, pattern=\"%s-*\" % index_name)\n return index_template.to_dict()\n\n\ndef main():\n \"\"\"\n CLI interface to generate the index template for BuildResults\n \"\"\"\n parser = argparse.ArgumentParser(description=\"Script for generating an index template out of a document\")\n parser.add_argument(\"INDEX_NAME\", help=\"Name of index\")\n parser.add_argument(\"--output_file\", help=\"File to write schema to\")\n args = parser.parse_args()\n\n output = generate_template(args.INDEX_NAME)\n if args.output_file:\n with open(args.output_file, \"w\") as file:\n json.dump(output, file, ensure_ascii=False, indent=4, sort_keys=True)\n else:\n print(json.dumps(output, ensure_ascii=False, indent=4, sort_keys=True))\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n",
"id": "10201021",
"language": "Python",
"matching_score": 2.4460110664367676,
"max_stars_count": 5,
"path": "ebr_connector/index/generate_template.py"
},
{
"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"Example how to query ElasticSearch for test reports.\n\"\"\"\n\n\nimport argparse\nfrom getpass import getpass\nimport json\nimport sys\nimport urllib\nfrom elasticsearch_dsl import connections\nfrom ebr_connector.prepacked_queries.multi_jobs import successful_jobs, failed_tests\n\n\ndef main():\n \"\"\"Main entrypoint for example script querying ElasticSearch.\n \"\"\"\n\n parser = argparse.ArgumentParser(description=\"Script runnig several example queries against Elasticsearch.\")\n parser.add_argument(\n \"--host\", default=\"localhost\", help=\"[Optional] Elasticsearch host to connect to (default: localhost)\"\n )\n parser.add_argument(\"--port\", default=9200, help=\"[Optional] Elasticsearch port to connect to (default: 9200)\")\n parser.add_argument(\"--cacert\", help=\"[Optional] CA cert file in PEM format (if required)\")\n parser.add_argument(\"--ssl\", default=True, type=bool, help=\"[Optional] Set to false to use plaintext HTTP\")\n parser.add_argument(\"--user\", default=\"elastic\", help=\"[Optional] User account to bind to (default: elastic)\")\n parser.add_argument(\n \"--password\", default=None, help=\"[Optional] Password for user account to bind to (default: None)\"\n )\n parser.add_argument(\n \"--index\", default=\"staging*\", help=\"[Optional] Name of Elasticsearch index (default: staging*)\"\n )\n args = parser.parse_args()\n\n if not args.password and args.user:\n args.password = getpass(\"Password for \" + args.user + \": \")\n\n # URL encode the user and password to enable it to used with HTTP BASIC auth safely\n enc_user = urllib.parse.quote_plus(args.user)\n enc_password = urllib.parse.quote_plus(args.password)\n\n # Create default connection to Elasticsearch instance\n connections.create_connection(\n hosts=[\n {\n \"host\": args.host,\n \"http_auth\": enc_user + \":\" + enc_password,\n \"port\": args.port,\n \"timeout\": 20,\n \"use_ssl\": args.ssl,\n \"verify_certs\": bool(args.cacert),\n \"ca_certs\": args.cacert,\n }\n ]\n )\n\n query_failed_tests(args.index)\n query_for_successful_job(args.index)\n\n\ndef query_for_successful_job(index):\n \"\"\"Queries for successful tests\n \"\"\"\n response = successful_jobs(index, \"cpp-reflection-tests-BB.*PR-.*\", size=5)\n\n # Iterate over the search results\n for hit in response:\n dump_formatted(hit)\n print(\"Hits: %d\" % len(response))\n\n return response\n\n\ndef query_failed_tests(index):\n \"\"\"Queries for failed tests\n \"\"\"\n response = failed_tests(index, job_name=\"cpp-reflection-tests-BB-baseline\", size=5)\n\n # Iterate over the response and print only the hits\n for hit in response:\n dump_formatted(hit)\n print(\"Hits: %d\" % len(response))\n\n return response\n\n\ndef dump_formatted(json_value):\n \"\"\"Dump the json value formatted on the console.\n \"\"\"\n print(json.dumps(json_value.to_dict(), indent=2, sort_keys=True, default=str))\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n",
"id": "3700277",
"language": "Python",
"matching_score": 1.5274325609207153,
"max_stars_count": 0,
"path": "ebr_connector/examples/query.py"
},
{
"content": "\"\"\"\nModule with basic wrapper for making a query to elastic search, as well as default field lists for including/excluding in results\n\"\"\"\n\nfrom deprecated.sphinx import deprecated\nfrom ebr_connector.schema.build_results import BuildResults\nfrom ebr_connector.prepacked_queries import DEPRECATION_MESSAGE\n\n\n# Provides common job details, without all passing and skipped tests\nDETAILED_JOB = {\n \"includes\": [\n \"br_build_date_time\",\n \"br_job_name\",\n \"br_job_url_key\",\n \"br_source\",\n \"br_build_id_key\",\n \"br_platform\",\n \"br_product\",\n \"br_status_key\",\n \"br_version_key\",\n \"br_tests_object\",\n ],\n \"excludes\": [\n \"lhi*\",\n \"br_tests_object.br_tests_passed_object.*\",\n \"br_tests_object.br_tests_skipped_object.*\",\n \"br_tests_object.br_suites_object.*\",\n ],\n}\n\nJOB_MINIMAL = {\"includes\": [\"br_job_name\", \"br_build_id_key\", \"br_status_key\", \"br_build_date_time\"], \"excludes\": []}\n\n\n@deprecated(version=\"0.1.1\", reason=DEPRECATION_MESSAGE)\ndef make_query(index, combined_filter, includes, excludes, agg=None, size=1):\n \"\"\"\n Simplifies the execution and usage of a typical query, including cleaning up the results.\n\n Args:\n index: index to search on\n combined_filter: combined set of filters to run the query with\n includes: list of fields to include on the results (keep as small as possible to improve execution time)\n excludes: list of fields to explicitly exclude from the results\n size: [Optional] number of results to return. Defaults to 1.\n Returns:\n List of dicts with results of the query.\n \"\"\"\n search = BuildResults().search(index=index)\n search = search.source(includes=includes, excludes=excludes)\n if agg:\n search = search.aggs.metric(\"fail_count\", agg)\n search = search.query(\"bool\", filter=[combined_filter])[0:1] # pylint: disable=no-member\n search = search[0:size]\n response = search.execute()\n results = []\n\n if agg:\n results = response[\"aggregations\"][\"fail_count\"][\"buckets\"]\n else:\n for hit in response[\"hits\"][\"hits\"]:\n results.append(hit[\"_source\"])\n return results\n",
"id": "4237853",
"language": "Python",
"matching_score": 4.4553680419921875,
"max_stars_count": 5,
"path": "ebr_connector/prepacked_queries/query.py"
},
{
"content": "\"\"\"\nA collection of queries that provide multiple results as an array of dicts\n\"\"\"\n\nfrom elasticsearch_dsl import Q, A\nfrom deprecated.sphinx import deprecated\n\nfrom ebr_connector.schema.build_results import BuildResults\nfrom ebr_connector.prepacked_queries import DEPRECATION_MESSAGE\nfrom ebr_connector.prepacked_queries.query import make_query, DETAILED_JOB, JOB_MINIMAL\n\n\n@deprecated(version=\"0.1.1\", reason=DEPRECATION_MESSAGE)\ndef successful_jobs(index, job_name_regex, size=10, start_date=\"now-7d\", end_date=\"now\"):\n \"\"\"\n Get the results of jobs matching the job name regex provided.\n\n Args:\n index: Elastic search index to use\n job_name_regex: Regex for elastic search to match against\n size: [Optional] Number of results to return. Default is 10.\n start_date: [Optional] Specify start date (string in elastic search format). Default is 7 days ago.\n end_data: [Optional] Specify end date (string in elastic search format). Default is now.\n Returns:\n An array of dicts of the matching jobs\n \"\"\"\n ## Search for all jobs that fullfil the regex. The regex is evaluated on the keyword field (`raw`) of the field `br_job_name`.\n match_jobname = Q(\"regexp\", br_job_name__raw=job_name_regex)\n ## and have the following build status\n match_status = Q(\"match\", br_status_key=BuildResults.BuildStatus.SUCCESS.name)\n\n range_time = Q(\"range\", **{\"br_build_date_time\": {\"gte\": start_date, \"lt\": end_date}})\n\n combined_filter = match_jobname & match_status & range_time\n\n result = make_query(\n index, combined_filter, includes=DETAILED_JOB[\"includes\"], excludes=DETAILED_JOB[\"excludes\"], size=size\n )\n return result\n\n\n@deprecated(version=\"0.1.1\", reason=DEPRECATION_MESSAGE)\ndef failed_tests(\n index,\n job_name,\n size=10,\n fail_count=5,\n duration_low=162.38,\n duration_high=320,\n start_date=\"now-7d\",\n end_date=\"now\",\n agg=False,\n): # pylint: disable=too-many-locals\n \"\"\"\n Get jobs with failed tests matching certain parameters\n\n Args:\n index: Elastic search index to use\n job_name: Job name to evaluate\n size: [Optional] Number of results to return. Default is 10.\n fail_count: [Optional] Minimum number of failures for inclusion. Default is 5.\n duration_low: [Optional] Minimum test duration for inclusion in results. Default is 162.38\n duration_high: [Optional] Maximum test duration for inclusion in results. Default is 320.\n start_date: [Optional] Specify start date (string in elastic search format). Default is 7 days ago.\n end_data: [Optional] Specify end date (string in elastic search format). Default is now.\n agg: [Optional] Converts the query to an aggregation query over the tests.\n Returns:\n An array of dicts of the matching jobs\n \"\"\"\n ## Search for \"failure\", \"FAILURE\", \"unstable\", \"UNSTABLE\"\n match_status = Q(\"match\", br_status_key=BuildResults.BuildStatus.FAILURE.name) | Q(\n \"match\", br_status_key=BuildResults.BuildStatus.UNSTABLE.name\n )\n ## Search for documents within the last 7 days\n range_time = Q(\"range\", **{\"br_build_date_time\": {\"gte\": start_date, \"lt\": end_date}})\n ## Search for documents where the total fail count >= 5\n more_than_one_failures = Q(\n \"range\", **{\"br_tests_object.br_summary_object.br_total_failed_count\": {\"gte\": fail_count}}\n )\n\n ## Filter out the test cases running between 162.38 and 320 seconds\n duration_between = Q(\n \"range\", **{\"br_tests_object.br_tests_failed_object.br_duration\": {\"gte\": duration_low, \"lte\": duration_high}}\n )\n\n # Combine them\n combined_filter = match_status & range_time & more_than_one_failures & duration_between\n\n if job_name:\n ## Search for the exact job name\n combined_filter &= Q(\"term\", br_job_name__raw=job_name)\n\n # Setup aggregation\n test_agg = None\n if agg:\n test_agg = A(\"terms\", field=\"br_tests_object.br_tests_failed_object.br_fullname.raw\")\n\n return make_query(\n index,\n combined_filter,\n includes=DETAILED_JOB[\"includes\"],\n excludes=DETAILED_JOB[\"excludes\"],\n size=size,\n agg=test_agg,\n )\n\n\n@deprecated(version=\"0.1.1\", reason=DEPRECATION_MESSAGE)\ndef job_matching_test(\n index,\n test_name,\n passed=True,\n failed=True,\n skipped=False,\n job_name=None,\n size=10,\n start_date=\"now-7d\",\n end_date=\"now\",\n):\n \"\"\"\n Get information on a given test\n\n Args:\n index: Elastic search index to use\n test_name: Test name to look up, can include wildcards\n passed: Set to true to include passed tests while searching\n failed: Set to true to include failed tests while searching\n skipped: Set to true to include skipped tests while searching\n job_name: Job name to evaluate\n size: [Optional] Number of results to return. Default is 10.\n start_date: [Optional] Specify start date (string in elastic search format). Default is 7 days ago.\n end_date: [Optional] Specify end date (string in elastic search format). Default is now.\n Returns:\n An array of dicts of the matching information\n \"\"\"\n # Over the specified time\n combined_filter = Q(\"range\", **{\"br_build_date_time\": {\"gte\": start_date, \"lt\": end_date}})\n test_status_filter = None\n\n if passed:\n match_testname_passed = Q(\"wildcard\", br_tests_object__br_tests_passed_object__br_fullname__raw=test_name)\n test_status_filter = match_testname_passed\n\n if failed:\n match_testname_failed = Q(\"wildcard\", br_tests_object__br_tests_failed_object__br_fullname__raw=test_name)\n test_status_filter = (\n match_testname_failed if not test_status_filter else test_status_filter | match_testname_failed\n )\n\n if skipped:\n match_testname_skipped = Q(\"wildcard\", br_tests_object__br_tests_skipped_object__br_fullname__raw=test_name)\n test_status_filter = (\n match_testname_skipped if not test_status_filter else test_status_filter | match_testname_skipped\n )\n\n if test_status_filter:\n combined_filter &= test_status_filter\n\n # Add job_name restriction of set\n if job_name:\n match_jobname = Q(\"term\", br_job_name__raw=job_name)\n combined_filter &= match_jobname\n\n return make_query(\n index, combined_filter, includes=JOB_MINIMAL[\"includes\"], excludes=JOB_MINIMAL[\"excludes\"], size=size\n )\n\n\n@deprecated(version=\"0.1.1\", reason=DEPRECATION_MESSAGE)\ndef get_job(index, job_name, wildcard=False, size=10, start_date=\"now-7d\", end_date=\"now\"):\n \"\"\"\n Get a list of all the builds recorded for a given job\n\n Args:\n index: Elastic search index to use\n job_name: Name of job to search within\n wildcard: When true, search with wildcard instead of exact match\n Returns:\n A list of the results from the job requested\n \"\"\"\n search_type = \"term\"\n if wildcard:\n search_type = \"wildcard\"\n match_job_name = Q(search_type, br_job_name__raw=job_name)\n range_time = Q(\"range\", **{\"br_build_date_time\": {\"gte\": start_date, \"lt\": end_date}})\n\n combined_filters = match_job_name & range_time\n\n return make_query(\n index, combined_filters, includes=DETAILED_JOB[\"includes\"], excludes=DETAILED_JOB[\"excludes\"], size=size\n )\n",
"id": "9372664",
"language": "Python",
"matching_score": 5.082693576812744,
"max_stars_count": 5,
"path": "ebr_connector/prepacked_queries/multi_jobs.py"
},
{
"content": "\"\"\"\nCollection of queries that return a single result (in dictionary form)\n\"\"\"\n\nfrom elasticsearch_dsl import Q\nfrom deprecated.sphinx import deprecated\n\nfrom ebr_connector.prepacked_queries import DEPRECATION_MESSAGE\nfrom ebr_connector.prepacked_queries.query import make_query, DETAILED_JOB\n\n\n@deprecated(version=\"0.1.1\", reason=DEPRECATION_MESSAGE)\ndef get_build(index, job_name, build_id, wildcard=False):\n \"\"\"\n Get result of a single build from the elastic search database by its ID and the name of the job it belongs to.\n\n Args:\n index: Elastic search index to use\n job_name: Name of job to search within\n build_id: ID of the build\n wildcard: When true, search with wildcard instead of exact match\n Returns:\n A single dict of the results from the build requested\n \"\"\"\n search_type = \"term\"\n if wildcard:\n search_type = \"wildcard\"\n match_job_name = Q(search_type, br_job_name__raw=job_name)\n match_build_id = Q(search_type, br_build_id_key=build_id)\n combined_filter = match_job_name + match_build_id\n result = make_query(\n index, combined_filter, includes=DETAILED_JOB[\"includes\"], excludes=DETAILED_JOB[\"excludes\"], size=1\n )\n\n return result[0]\n",
"id": "4626756",
"language": "Python",
"matching_score": 1.2194756269454956,
"max_stars_count": 5,
"path": "ebr_connector/prepacked_queries/single_jobs.py"
},
{
"content": "\"\"\"The dynamic template configuration used when creating index templates.\nWe prefer dynamic templates over fixed ones to avoid updating the Elasticsearch cluster.\n\"\"\"\n\nDYNAMIC_TEMPLATES = [\n {\"nested_fields\": {\"match\": \"br_*_nested\", \"mapping\": {\"type\": \"nested\"}}},\n {\"count_fields\": {\"match\": \"br_*_count\", \"mapping\": {\"type\": \"integer\"}}},\n {\"duration_fields\": {\"match\": \"br_*duration*\", \"mapping\": {\"type\": \"float\"}}},\n {\n \"keyword_only_fields\": {\n \"match\": \"br_*_key\",\n \"match_mapping_type\": \"string\",\n \"mapping\": {\"type\": \"keyword\", \"ignore_above\": 256},\n }\n },\n {\n \"message_field\": {\n \"mapping\": {\"norms\": False, \"type\": \"text\"},\n \"match_mapping_type\": \"string\",\n \"path_match\": \"message\",\n }\n },\n {\n \"string_fields\": {\n \"mapping\": {\"fields\": {\"raw\": {\"ignore_above\": 256, \"type\": \"keyword\"}}, \"norms\": False, \"type\": \"text\"},\n \"match\": \"*\",\n \"match_mapping_type\": \"string\",\n }\n },\n]\n",
"id": "11163452",
"language": "Python",
"matching_score": 0.02499815635383129,
"max_stars_count": 5,
"path": "ebr_connector/schema/dynamic_template.py"
},
{
"content": "\"\"\"\nprepacked queries\n\"\"\"\n\nimport warnings\n\nwarnings.warn(\"prepacked_queries are deprecated\", DeprecationWarning, stacklevel=2)\n\n\nDEPRECATION_MESSAGE = (\n \"The prepacked_queries modules will be removed. A replacement is under consideration but not guaranteed.\"\n)\n",
"id": "3269693",
"language": "Python",
"matching_score": 0,
"max_stars_count": 5,
"path": "ebr_connector/prepacked_queries/__init__.py"
},
{
"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Top-level package for ebr-connector.\"\"\"\n\n__project__ = \"ebr-connector\"\n__author__ = \"<NAME>\"\n__email__ = \"<EMAIL>\"\n__version__ = \"0.1.6-dev\"\n",
"id": "7714074",
"language": "Python",
"matching_score": 0,
"max_stars_count": 5,
"path": "ebr_connector/__init__.py"
}
] | 2.045187 |
disturb16 | [
{
"content": "\"\"\"\nDoes the following:\n1. Inits\n\"\"\"\nfrom __future__ import print_function\nimport os\nfrom subprocess import Popen\n\n# Get the root project directory\nPROJECT_DIRECTORY = os.path.realpath(os.path.curdir)\n\ndef init_git():\n \"\"\"\n Initialises git on the new project folder\n \"\"\"\n GIT_COMMANDS = [\n [\"git\", \"init\"],\n [\"git\", \"add\", \".\"],\n [\"git\", \"commit\", \"-a\", \"-m\", \"Initial Commit.\"]\n ]\n\n for command in GIT_COMMANDS:\n git = Popen(command, cwd=PROJECT_DIRECTORY)\n git.wait()\n\n\n\n# 1. Initialize Git\ninit_git()\n\n# 2. Format the project\nos.system(\"gofmt -s -w .\")\n",
"id": "9440398",
"language": "Python",
"matching_score": 0,
"max_stars_count": 3,
"path": "hooks/post_gen_project.py"
}
] | 0 |
jainakshay91 | [
{
"content": "#!/usr/bin/env python \n\n# =============================\n# Import the necessary binaries\n# =============================\n\nimport subprocess \nimport time\nimport os, sys\nimport json, requests\nfrom multiprocessing import Pool, Process\nimport numpy as np\nimport signal \nfrom scenario_var import scenario_var \nimport logging as lg\n\n# =====================================\n# Check Presence of Storage Directories\n# =====================================\n\ndef path_checker():\n\t#print \"here\"\n\t#print os.getcwd()\n\tflag = -1; # Initialize the flag variable \n\tpath = os.getcwd() + '/Data'; # This is the path we have to check for\n\tsubpath = os.getcwd() + '/Data/Temp'; # This is the subdirectory to store data \n\tif os.path.isdir(path):\n\t\tif os.path.isdir(subpath):\n\t\t\tflag = 0; # We do not need to generate the scenario data again\n\t\telse:\n\t\t\tflag = 1; # Generate the Data if the Subpath does not exist\n\telse:\n\t\tflag = 1; # Generate the Data if the Path does not exist \n\t#print flag\n\treturn flag \n\n# ==================================\n# Create a Telegram Bot Communicator\n# ==================================\n\nTOKEN = \"\"\nURL = \"https://api.telegram.org/bot{}/\".format(TOKEN)\n\ndef get_url(url):\n\tresponse = requests.get(url)\n\tcontent = response.content.decode(\"utf8\")\n\treturn content\n\ndef get_json_from_url(url):\n\tcontent = get_url(url)\n\tjs = json.loads(content)\n\treturn js\n\ndef get_updates():\n\turl = URL + \"getUpdates\"\n\tjs = get_json_from_url(url)\n\treturn js\n\ndef last_chat_id(updates):\n\tnum_updates = len(updates[\"result\"])\n\tlast_update = num_updates - 1\n\tchat_id = updates[\"result\"][last_update][\"message\"][\"chat\"][\"id\"]\n\treturn chat_id\n\ndef send_message(text, chat_id):\n\turl = URL + \"sendMessage?text={}&chat_id={}\".format(text,chat_id)\n\tget_url(url)\n\n# ==========================\n# Parallel Process Function\n# ==========================\n\ndef parallel_executor(iter_num):\n\tprint (\"Iteration number:\", iter_num)\n\tsubprocess.call(['python',os.path.join(os.getcwd(),\"main.py\"), '-iter', str(iter_num), '-interf', str(0)])\n\ndef Single_assoc(MCMC_iter, chat_frequency):\n\tfor i in range(MCMC_iter):\n\t\tchat = last_chat_id(get_updates()) # Get the Bot Chat ID\n\t\ttry:\n\t\t\tprint MCMC_iter\n\t\t\tprint chat_frequency\n\t\t\t#subprocess.check_call(['python',os.path.join(os.getcwd(),\"main.py\")]); # Open Main File for Generating the scenario\n\t\t\tsubprocess.call(['python',os.path.join(os.getcwd(),\"optimizer_func.py\"),'-iter', str(i) ,'-minRate', '0','-dual', '0','-bhaul', '0','-latency', '0', '-mipGP', '0'])\n\t\t\t#chat = last_chat_id(get_updates()) # Get the Bot Chat ID\n\t\t\tif i%chat_frequency == 0:\n\t\t\t\ttry:\n\t\t\t\t\tmessage = \"Execution of Iteration \" + str(i) + \" Completed for SA\"\n\t\t\t\t\tsend_message(message,chat) # Send the Message \n\t\t\t\texcept(RuntimeError, TypeError, NameError, IndexError):\n\t\t\t\t\tpass\n\t\texcept:\n\t\t\tmessage = \"Programme has encountered an error\"\n\t\t\tsend_message(message, chat) # Send the message if an error has been encountered in the code\n\t\t\tmessage = \"Ending the Processing for Debugging\"\n\t\t\tsend_message(message, chat) # Send the End process message\n\t\t\tsys.exit('Error Encountered')\n\ndef Dual_assoc(MCMC_iter, chat_frequency):\n\tfor i in range(MCMC_iter):\n\t\tchat = last_chat_id(get_updates()) # Get the Bot Chat ID\n\t\ttry:\n\t\t\t#subprocess.check_call(['python',os.path.join(os.getcwd(),\"main.py\")]); # Open Main File for Generating the scenario\n\t\t\tsubprocess.call(['python',os.path.join(os.getcwd(),\"optimizer_func.py\"),'-iter', str(i) ,'-minRate', '0','-dual', '1','-bhaul', '0','-latency', '0', '-mipGP', '0'])\n\t\t\t#chat = last_chat_id(get_updates()) # Get the Bot Chat ID\n\t\t\tif i%chat_frequency == 0:\n\t\t\t\ttry:\n\t\t\t\t\tmessage = \"Execution of Iteration \" + str(i) + \" Completed for DA\"\n\t\t\t\t\tsend_message(message,chat) # Send the Message \n\t\t\t\texcept(RuntimeError, TypeError, NameError, IndexError):\n\t\t\t\t\tpass\n\t\texcept:\n\t\t\tmessage = \"Programme has encountered an error\"\n\t\t\tsend_message(message, chat) # Send the message if an error has been encountered in the code\n\t\t\tmessage = \"Ending the Processing for Debugging\"\n\t\t\tsend_message(message, chat) # Send the End process message\n\t\t\tsys.exit('Error Encountered')\n\ndef DA_MRT(MCMC_iter, chat_frequency):\n\tfor i in range(MCMC_iter):\n\t\tchat = last_chat_id(get_updates()) # Get the Bot Chat ID\n\t\ttry:\n\t\t\t#subprocess.check_call(['python',os.path.join(os.getcwd(),\"main.py\")]); # Open Main File for Generating the scenario\n\t\t\tsubprocess.call(['python',os.path.join(os.getcwd(),\"optimizer_func.py\"),'-iter', str(i) ,'-minRate', '1','-dual', '1','-bhaul', '0','-latency', '0', '-mipGP', '1'])\n\t\t\t#chat = last_chat_id(get_updates()) # Get the Bot Chat ID\n\t\t\tif i%chat_frequency == 0:\n\t\t\t\ttry:\n\t\t\t\t\tmessage = \"Execution of Iteration \" + str(i) + \" Completed for DA + MRT\"\n\t\t\t\t\tsend_message(message,chat) # Send the Message \n\t\t\t\texcept(RuntimeError, TypeError, NameError, IndexError):\n\t\t\t\t\tpass\n\t\texcept:\n\t\t\tmessage = \"Programme has encountered an error\"\n\t\t\tsend_message(message, chat) # Send the message if an error has been encountered in the code\n\t\t\tmessage = \"Ending the Processing for Debugging\"\n\t\t\tsend_message(message, chat) # Send the End process message\n\t\t\tsys.exit('Error Encountered')\n\ndef DA_BHCAP(MCMC_iter, chat_frequency):\n\tfor i in range(MCMC_iter):\n\t\tchat = last_chat_id(get_updates()) # Get the Bot Chat ID\n\t\ttry:\n\t\t\t#subprocess.check_call(['python',os.path.join(os.getcwd(),\"main.py\")]); # Open Main File for Generating the scenario\n\t\t\tsubprocess.call(['python',os.path.join(os.getcwd(),\"optimizer_func.py\"),'-iter', str(i) ,'-minRate', '0','-dual', '1','-bhaul', '1','-latency', '0', '-mipGP', '1'])\n\t\t\t#chat = last_chat_id(get_updates()) # Get the Bot Chat ID\n\t\t\tif i%chat_frequency == 0:\n\t\t\t\ttry:\n\t\t\t\t\tmessage = \"Execution of Iteration \" + str(i) + \" Completed for DA + BHCAP\"\n\t\t\t\t\tsend_message(message,chat) # Send the Message \n\t\t\t\texcept(RuntimeError, TypeError, NameError, IndexError):\n\t\t\t\t\tpass\n\t\texcept:\n\t\t\tmessage = \"Programme has encountered an error\"\n\t\t\tsend_message(message, chat) # Send the message if an error has been encountered in the code\n\t\t\tmessage = \"Ending the Processing for Debugging\"\n\t\t\tsend_message(message, chat) # Send the End process message\n\t\t\tsys.exit('Error Encountered')\n\ndef DA_BHCAP_LAT(MCMC_iter, chat_frequency):\n\tfor i in range(MCMC_iter):\n\t\tchat = last_chat_id(get_updates()) # Get the Bot Chat ID\n\t\ttry:\n\t\t\t#subprocess.check_call(['python',os.path.join(os.getcwd(),\"main.py\")]); # Open Main File for Generating the scenario\n\t\t\tsubprocess.call(['python',os.path.join(os.getcwd(),\"optimizer_func.py\"),'-iter', str(i) ,'-minRate', '0','-dual', '1','-bhaul', '1','-latency', '1', '-mipGP', '1'])\n\t\t\t#chat = last_chat_id(get_updates()) # Get the Bot Chat ID\n\t\t\tif i%chat_frequency == 0:\n\t\t\t\ttry:\n\t\t\t\t\tmessage = \"Execution of Iteration \" + str(i) + \" Completed for DA + BHCAP + LAT\"\n\t\t\t\t\tsend_message(message,chat) # Send the Message \n\t\t\t\texcept(RuntimeError, TypeError, NameError, IndexError):\n\t\t\t\t\tpass\n\t\texcept:\n\t\t\tmessage = \"Programme has encountered an error\"\n\t\t\tsend_message(message, chat) # Send the message if an error has been encountered in the code\n\t\t\tmessage = \"Ending the Processing for Debugging\"\n\t\t\tsend_message(message, chat) # Send the End process message\n\t\t\tsys.exit('Error Encountered')\n\ndef DA_LAT(MCMC_iter, chat_frequency):\n\tfor i in range(MCMC_iter):\n\t\tchat = last_chat_id(get_updates()) # Get the Bot Chat ID\n\t\ttry:\n\t\t\t#subprocess.check_call(['python',os.path.join(os.getcwd(),\"main.py\")]); # Open Main File for Generating the scenario\n\t\t\tsubprocess.call(['python',os.path.join(os.getcwd(),\"optimizer_func.py\"),'-iter', str(i) ,'-minRate', '0','-dual', '1','-bhaul', '0','-latency', '1', '-mipGP', '0'])\n\t\t\t#chat = last_chat_id(get_updates()) # Get the Bot Chat ID\n\t\t\tif i%chat_frequency == 0:\n\t\t\t\ttry:\n\t\t\t\t\tmessage = \"Execution of Iteration \" + str(i) + \" Completed for DA + LAT\"\n\t\t\t\t\tsend_message(message,chat) # Send the Message \n\t\t\t\texcept(RuntimeError, TypeError, NameError, IndexError):\n\t\t\t\t\tpass\n\t\texcept:\n\t\t\tmessage = \"Programme has encountered an error\"\n\t\t\tsend_message(message, chat) # Send the message if an error has been encountered in the code\n\t\t\tmessage = \"Ending the Processing for Debugging\"\n\t\t\tsend_message(message, chat) # Send the End process message\n\t\t\tsys.exit('Error Encountered')\t\n\ndef DA_MRT_LAT(MCMC_iter, chat_frequency):\n\tfor i in range(MCMC_iter):\n\t\tchat = last_chat_id(get_updates()) # Get the Bot Chat ID\n\t\ttry:\n\t\t\t#subprocess.check_call(['python',os.path.join(os.getcwd(),\"main.py\")]); # Open Main File for Generating the scenario\n\t\t\tsubprocess.call(['python',os.path.join(os.getcwd(),\"optimizer_func.py\"),'-iter', str(i) ,'-minRate', '1','-dual', '1','-bhaul', '0','-latency', '1', '-mipGP', '0'])\n\t\t\t#chat = last_chat_id(get_updates()) # Get the Bot Chat ID\n\t\t\tif i%chat_frequency == 0:\n\t\t\t\ttry:\n\t\t\t\t\tmessage = \"Execution of Iteration \" + str(i) + \" Completed for DA + MRT + LAT\"\n\t\t\t\t\tsend_message(message,chat) # Send the Message \n\t\t\t\texcept(RuntimeError, TypeError, NameError, IndexError):\n\t\t\t\t\tpass\n\t\texcept:\n\t\t\tmessage = \"Programme has encountered an error\"\n\t\t\tsend_message(message, chat) # Send the message if an error has been encountered in the code\n\t\t\tmessage = \"Ending the Processing for Debugging\"\n\t\t\tsend_message(message, chat) # Send the End process message\n\t\t\tsys.exit('Error Encountered')\t\n\ndef SA_MRT(MCMC_iter, chat_frequency):\n\tfor i in range(MCMC_iter):\n\t\tchat = last_chat_id(get_updates()) # Get the Bot Chat ID\n\t\ttry:\n\t\t\t#subprocess.check_call(['python',os.path.join(os.getcwd(),\"main.py\")]); # Open Main File for Generating the scenario\n\t\t\tsubprocess.call(['python',os.path.join(os.getcwd(),\"optimizer_func.py\"),'-iter', str(i) ,'-minRate', '1','-dual', '0','-bhaul', '0','-latency', '0', '-mipGP', '0'])\n\t\t\t#chat = last_chat_id(get_updates()) # Get the Bot Chat ID\n\t\t\tif i%chat_frequency == 0:\n\t\t\t\ttry:\n\t\t\t\t\tmessage = \"Execution of Iteration \" + str(i) + \" Completed for SA + MRT\"\n\t\t\t\t\tsend_message(message,chat) # Send the Message \n\t\t\t\texcept(RuntimeError, TypeError, NameError, IndexError):\n\t\t\t\t\tpass\n\t\texcept:\n\t\t\tmessage = \"Programme has encountered an error\"\n\t\t\tsend_message(message, chat) # Send the message if an error has been encountered in the code\n\t\t\tmessage = \"Ending the Processing for Debugging\"\n\t\t\tsend_message(message, chat) # Send the End process message\n\t\t\tsys.exit('Error Encountered')\t\n\ndef SA_BHCAP(MCMC_iter, chat_frequency):\n\tfor i in range(MCMC_iter):\n\t\tchat = last_chat_id(get_updates()) # Get the Bot Chat ID\n\t\ttry:\n\t\t\t#subprocess.check_call(['python',os.path.join(os.getcwd(),\"main.py\")]); # Open Main File for Generating the scenario\n\t\t\tsubprocess.call(['python',os.path.join(os.getcwd(),\"optimizer_func.py\"),'-iter', str(i) ,'-minRate', '0','-dual', '0','-bhaul', '1','-latency', '0', '-mipGP', '1'])\n\t\t\t#chat = last_chat_id(get_updates()) # Get the Bot Chat ID\n\t\t\tif i%chat_frequency == 0:\n\t\t\t\ttry:\n\t\t\t\t\tmessage = \"Execution of Iteration \" + str(i) + \" Completed for SA + BHCAP\"\n\t\t\t\t\tsend_message(message,chat) # Send the Message \n\t\t\t\texcept(RuntimeError, TypeError, NameError, IndexError):\n\t\t\t\t\tpass\n\t\texcept:\n\t\t\tmessage = \"Programme has encountered an error\"\n\t\t\tsend_message(message, chat) # Send the message if an error has been encountered in the code\n\t\t\tmessage = \"Ending the Processing for Debugging\"\n\t\t\tsend_message(message, chat) # Send the End process message\n\t\t\tsys.exit('Error Encountered')\t\n\ndef SA_BHCAP_LAT(MCMC_iter, chat_frequency):\n\tfor i in range(MCMC_iter):\n\t\tchat = last_chat_id(get_updates()) # Get the Bot Chat ID\n\t\ttry:\n\t\t\t#subprocess.check_call(['python',os.path.join(os.getcwd(),\"main.py\")]); # Open Main File for Generating the scenario\n\t\t\tsubprocess.call(['python',os.path.join(os.getcwd(),\"optimizer_func.py\"),'-iter', str(i) ,'-minRate', '0','-dual', '0','-bhaul', '1','-latency', '1', '-mipGP', '1'])\n\t\t\t#chat = last_chat_id(get_updates()) # Get the Bot Chat ID\n\t\t\tif i%chat_frequency == 0:\n\t\t\t\ttry:\n\t\t\t\t\tmessage = \"Execution of Iteration \" + str(i) + \" Completed for SA + BHCAP + LAT\"\n\t\t\t\t\tsend_message(message,chat) # Send the Message \n\t\t\t\texcept(RuntimeError, TypeError, NameError, IndexError):\n\t\t\t\t\tpass\n\t\texcept:\n\t\t\tmessage = \"Programme has encountered an error\"\n\t\t\tsend_message(message, chat) # Send the message if an error has been encountered in the code\n\t\t\tmessage = \"Ending the Processing for Debugging\"\n\t\t\tsend_message(message, chat) # Send the End process message\n\t\t\tsys.exit('Error Encountered')\t\n\ndef SA_LAT(MCMC_iter, chat_frequency):\n\tfor i in range(MCMC_iter):\n\t\tchat = last_chat_id(get_updates()) # Get the Bot Chat ID\n\t\ttry:\n\t\t\t#subprocess.check_call(['python',os.path.join(os.getcwd(),\"main.py\")]); # Open Main File for Generating the scenario\n\t\t\tsubprocess.call(['python',os.path.join(os.getcwd(),\"optimizer_func.py\"),'-iter', str(i) ,'-minRate', '0','-dual', '0','-bhaul', '0','-latency', '1', '-mipGP', '0'])\n\t\t\t#chat = last_chat_id(get_updates()) # Get the Bot Chat ID\n\t\t\tif i%chat_frequency == 0:\n\t\t\t\ttry:\n\t\t\t\t\tmessage = \"Execution of Iteration \" + str(i) + \" Completed for SA + LAT\"\n\t\t\t\t\tsend_message(message,chat) # Send the Message \n\t\t\t\texcept(RuntimeError, TypeError, NameError, IndexError):\n\t\t\t\t\tpass\n\t\texcept:\n\t\t\tmessage = \"Programme has encountered an error\"\n\t\t\tsend_message(message, chat) # Send the message if an error has been encountered in the code\n\t\t\tmessage = \"Ending the Processing for Debugging\"\n\t\t\tsend_message(message, chat) # Send the End process message\n\t\t\tsys.exit('Error Encountered')\t\n\n\ndef SA_MRT_LAT(MCMC_iter, chat_frequency):\n\tfor i in range(MCMC_iter):\n\t\tchat = last_chat_id(get_updates()) # Get the Bot Chat ID\n\t\ttry:\n\t\t\t#subprocess.check_call(['python',os.path.join(os.getcwd(),\"main.py\")]); # Open Main File for Generating the scenario\n\t\t\tsubprocess.call(['python',os.path.join(os.getcwd(),\"optimizer_func.py\"),'-iter', str(i) ,'-minRate', '1','-dual', '0','-bhaul', '0','-latency', '1', '-mipGP', '0'])\n\t\t\t#chat = last_chat_id(get_updates()) # Get the Bot Chat ID\n\t\t\tif i%chat_frequency == 0:\n\t\t\t\ttry:\n\t\t\t\t\tmessage = \"Execution of Iteration \" + str(i) + \" Completed for SA + MRT + LAT\"\n\t\t\t\t\tsend_message(message,chat) # Send the Message \n\t\t\t\texcept(RuntimeError, TypeError, NameError, IndexError):\n\t\t\t\t\tpass\n\t\texcept:\n\t\t\tmessage = \"Programme has encountered an error\"\n\t\t\tsend_message(message, chat) # Send the message if an error has been encountered in the code\n\t\t\tmessage = \"Ending the Processing for Debugging\"\n\t\t\tsend_message(message, chat) # Send the End process message\n\t\t\tsys.exit('Error Encountered')\t\n\ndef DA_MRT_BHCAP(MCMC_iter, chat_frequency):\n\tfor i in range(MCMC_iter):\n\t\tchat = last_chat_id(get_updates()) # Get the Bot Chat ID\n\t\ttry:\n\t\t\t#subprocess.check_call(['python',os.path.join(os.getcwd(),\"main.py\")]); # Open Main File for Generating the scenario\n\t\t\tsubprocess.call(['python',os.path.join(os.getcwd(),\"optimizer_func.py\"),'-iter', str(i) ,'-minRate', '1','-dual', '1','-bhaul', '1','-latency', '0', '-mipGP', '1'])\n\t\t\t#chat = last_chat_id(get_updates()) # Get the Bot Chat ID\n\t\t\tif i%chat_frequency == 0:\n\t\t\t\ttry:\n\t\t\t\t\tmessage = \"Execution of Iteration \" + str(i) + \" Completed for DA + MRT + BHCAP\"\n\t\t\t\t\tsend_message(message,chat) # Send the Message \n\t\t\t\texcept(RuntimeError, TypeError, NameError, IndexError):\n\t\t\t\t\tpass\n\t\texcept:\n\t\t\tmessage = \"Programme has encountered an error\"\n\t\t\tsend_message(message, chat) # Send the message if an error has been encountered in the code\n\t\t\tmessage = \"Ending the Processing for Debugging\"\n\t\t\tsend_message(message, chat) # Send the End process message\n\t\t\tsys.exit('Error Encountered')\t\n\ndef DA_MRT_BHCAP_LAT(MCMC_iter, chat_frequency):\n\tfor i in range(MCMC_iter):\n\t\tchat = last_chat_id(get_updates()) # Get the Bot Chat ID\n\t\ttry:\n\t\t\t#subprocess.check_call(['python',os.path.join(os.getcwd(),\"main.py\")]); # Open Main File for Generating the scenario\n\t\t\tsubprocess.call(['python',os.path.join(os.getcwd(),\"optimizer_func.py\"),'-iter', str(i) ,'-minRate', '1','-dual', '1','-bhaul', '1','-latency', '1', '-mipGP', '1'])\n\t\t\t#chat = last_chat_id(get_updates()) # Get the Bot Chat ID\n\t\t\tif i%chat_frequency == 0:\n\t\t\t\ttry:\n\t\t\t\t\tmessage = \"Execution of Iteration \" + str(i) + \" Completed for DA + MRT + BHCAP + LAT\"\n\t\t\t\t\tsend_message(message,chat) # Send the Message \n\t\t\t\texcept(RuntimeError, TypeError, NameError, IndexError):\n\t\t\t\t\tpass\n\t\texcept:\n\t\t\tmessage = \"Programme has encountered an error\"\n\t\t\tsend_message(message, chat) # Send the message if an error has been encountered in the code\n\t\t\tmessage = \"Ending the Processing for Debugging\"\n\t\t\tsend_message(message, chat) # Send the End process message\n\t\t\tsys.exit('Error Encountered')\t\n\n\ndef SA_MRT_BHCAP(MCMC_iter, chat_frequency):\n\tfor i in range(MCMC_iter):\n\t\tchat = last_chat_id(get_updates()) # Get the Bot Chat ID\n\t\ttry:\n\t\t\t#subprocess.check_call(['python',os.path.join(os.getcwd(),\"main.py\")]); # Open Main File for Generating the scenario\n\t\t\tsubprocess.call(['python',os.path.join(os.getcwd(),\"optimizer_func.py\"),'-iter', str(i) ,'-minRate', '1','-dual', '0','-bhaul', '1','-latency', '0', '-mipGP', '0'])\n\t\t\t#chat = last_chat_id(get_updates()) # Get the Bot Chat ID\n\t\t\tif i%chat_frequency == 0:\n\t\t\t\ttry:\n\t\t\t\t\tmessage = \"Execution of Iteration \" + str(i) + \" Completed for SA + MRT + BHCAP\"\n\t\t\t\t\tsend_message(message,chat) # Send the Message \n\t\t\t\texcept(RuntimeError, TypeError, NameError, IndexError):\n\t\t\t\t\tpass\n\t\texcept:\n\t\t\tmessage = \"Programme has encountered an error\"\n\t\t\tsend_message(message, chat) # Send the message if an error has been encountered in the code\n\t\t\tmessage = \"Ending the Processing for Debugging\"\n\t\t\tsend_message(message, chat) # Send the End process message\n\t\t\tsys.exit('Error Encountered')\t\n\ndef SA_MRT_BHCAP_LAT(MCMC_iter, chat_frequency):\n\tfor i in range(MCMC_iter):\n\t\tchat = last_chat_id(get_updates()) # Get the Bot Chat ID\n\t\ttry:\n\t\t\t#subprocess.check_call(['python',os.path.join(os.getcwd(),\"main.py\")]); # Open Main File for Generating the scenario\n\t\t\tsubprocess.call(['python',os.path.join(os.getcwd(),\"optimizer_func.py\"),'-iter', str(i) ,'-minRate', '1','-dual', '0','-bhaul', '1','-latency', '1', '-mipGP', '0'])\n\t\t\t#chat = last_chat_id(get_updates()) # Get the Bot Chat ID\n\t\t\tif i%chat_frequency == 0:\n\t\t\t\ttry:\n\t\t\t\t\tmessage = \"Execution of Iteration \" + str(i) + \" Completed for SA + MRT + BHCAP + LAT\"\n\t\t\t\t\tsend_message(message,chat) # Send the Message \n\t\t\t\texcept(RuntimeError, TypeError, NameError, IndexError):\n\t\t\t\t\tpass\n\t\texcept:\n\t\t\tmessage = \"Programme has encountered an error\"\n\t\t\tsend_message(message, chat) # Send the message if an error has been encountered in the code\n\t\t\tmessage = \"Ending the Processing for Debugging\"\n\t\t\tsend_message(message, chat) # Send the End process message\n\t\t\tsys.exit('Error Encountered')\t\n\n# ======================\n# Monte Carlo Simulation\n# ======================\n\n\nsys.path.append(os.getcwd()); # Add current working directory to python path\nos.chdir(os.getcwd()); # Change to the current working directory\nchat_frequency = 10; # Select the divider so as to obtain timely update messages\n#num_processors = int(int(subprocess.check_output(['nproc']))/2)*2; # Number of Processors to be utilized \nnum_processors = 2\nscn = scenario_var();\nMCMC_iter = scn.MCMC_iter; # Number of Monte Carlo Iterations\n\n\n# =============\n# Main Function \n\nif __name__ == '__main__':\n\n\tdat_gen_flag = path_checker(); # Get the Data generation flag value\n\n\tif dat_gen_flag == 1:\n\t\t#print \"In the Generator\"\n\t\tfile_indexer = 0; # For File Indexing\n\t\tpool = Pool(processes = num_processors); # Creates a pool of 10 parallel processes to be done\n\t\tfor i in range(0, MCMC_iter/num_processors):\n\t\t\tprint \"Entering Round \" + str(i) + \" of Processing\"\n\t\t\tprint \"------------------------------\"\n\t\t\tprint \"\"\n\t\t\tidx_range = np.arange(file_indexer, file_indexer + num_processors); # Data file Index numbers\n\t\t\tpool.map(parallel_executor,idx_range.tolist()); # Maps the function to parallel processes. \n\t\t\tfile_indexer = file_indexer + num_processors; # Increase the Iterator number\n\t\t\tprint file_indexer\n\t\tpool.close()\n\t\tpool.join()\n\t\t\n\tprint \"Entering the Optimizer\"\n\t\n\t# =====================================================\n\t# Multiple Processes for Parallel Scenario Optimization\n\n\tp1 = Process(target = Single_assoc, args = (MCMC_iter,chat_frequency))\n\tp2 = Process(target = Dual_assoc, args = (MCMC_iter, chat_frequency))\n\tp3 = Process(target = DA_MRT, args = (MCMC_iter, chat_frequency))\n\tp4 = Process(target = DA_BHCAP, args = (MCMC_iter, chat_frequency))\n\tp5 = Process(target = DA_BHCAP_LAT, args = (MCMC_iter, chat_frequency))\n\tp6 = Process(target = DA_LAT, args = (MCMC_iter, chat_frequency))\n\tp7 = Process(target = SA_MRT, args = (MCMC_iter, chat_frequency))\n\tp8 = Process(target = SA_LAT, args = (MCMC_iter, chat_frequency))\n\tp9 = Process(target = SA_BHCAP_LAT, args = (MCMC_iter, chat_frequency))\n\tp10 = Process(target = SA_BHCAP, args = (MCMC_iter, chat_frequency))\n\tp11 = Process(target = DA_MRT_LAT, args = (MCMC_iter, chat_frequency))\n\tp12 = Process(target = SA_MRT_LAT, args = (MCMC_iter, chat_frequency))\n\tp13 = Process(target = DA_MRT_BHCAP, args = (MCMC_iter, chat_frequency))\n\tp14 = Process(target = DA_MRT_BHCAP_LAT, args = (MCMC_iter, chat_frequency))\n\tp15 = Process(target = SA_MRT_BHCAP, args = (MCMC_iter, chat_frequency))\n\tp16 = Process(target = SA_MRT_BHCAP_LAT, args = (MCMC_iter, chat_frequency))\n\n\n\tp1.start()\n\tp2.start()\n\tp3.start()\n\tp4.start()\n\tp5.start()\n\tp6.start()\n\tp7.start()\n\tp8.start()\n\tp9.start()\n\tp10.start()\n\tp11.start()\n\tp12.start()\n\tp13.start()\n\tp14.start()\n\tp15.start()\n\tp16.start()\n\n\tp1.join()\n\tp2.join()\n\tp3.join()\n\tp4.join()\n\tp5.join()\n\tp6.join()\n\tp7.join()\n\tp8.join()\n\tp9.join()\n\tp10.join()\n\tp11.join()\n\tp12.join()\n\tp13.join()\n\tp14.join()\n\tp15.join()\n\tp16.join()\n\t\n\t\n\n\t#for i in range(MCMC_iter):\n\t#\ttry:\n\t#\t\t#subprocess.check_call(['python',os.path.join(os.getcwd(),\"main.py\")]); # Open Main File for Generating the scenario\n\t#\t\tsubprocess.check_call(['python',os.path.join(os.getcwd(),\"optimizer_func.py\"),'-iter', str(i) ,'-minRate', '0','-dual', '0','-bhaul', '0','-latency', '1'])\n\t#\t\tchat = last_chat_id(get_updates()) # Get the Bot Chat ID\n\t#\t\tif i%chat_frequency == 0:\n\t#\t\t\ttry:\n\t#\t\t\t\tmessage = \"Execution of Iteration \" + str(i) + \" Completed\"\n\t#\t\t\t\tsend_message(message,chat) # Send the Message \n\t#\t\t\texcept(RuntimeError, TypeError, NameError, IndexError):\n\t#\t\t\t\tpass\n\t#\texcept:\n\t#\t\tmessage = \"Programme has encountered an error\"\n\t#\t\tsend_message(message, chat) # Send the message if an error has been encountered in the code\n\t#\t\tmessage = \"Ending the Processing for Debugging\"\n\t#\t\tsend_message(message, chat) # Send the End process message\n\t#\t\tsys.exit('Error Encountered')\n",
"id": "1663867",
"language": "Python",
"matching_score": 2.172032356262207,
"max_stars_count": 1,
"path": "MCMC.py"
},
{
"content": "#!/usr/bin/env python\n\n# =============================\n# Import the necessary binaries\n# =============================\n\nfrom gurobipy import *\nimport numpy as np \nimport plotter\nimport os\nfrom scenario_var import scenario_var \nfrom argparse import ArgumentParser\nfrom rssi_assoc import baseline_assoc\nimport time\nimport csv\nimport csvsaver\n\n# =======================\n# Optimizer Configuration\n# =======================\n\nparser = ArgumentParser(description = 'The Optimizer Function for User Association'); # Initializing the class variable\n\n# =========================================================\n# Add the Command Line Arguments to Configure the Optimizer\n\nparser.add_argument('-iter', type = int, help = 'Iteration Number of the Simulation');\nparser.add_argument('-minRate', type = int, help = 'Minimum Rate Constraint Flag');\nparser.add_argument('-dual', type = int, help = 'Dual Connectivity Flag');\nparser.add_argument('-bhaul', type = int, help = 'Backhaul Capacity Constraint Flag');\nparser.add_argument('-latency', type = int, help = 'Path Latency Constraint Flag');\nparser.add_argument('-mipGP', type = int, help = 'Optimizer bound Interval'); \n\nargs = parser.parse_args(); # Parse the Arguments\n\n#print vars(args)['iter']\n\n# =====================================\n# Check Presence of Storage Directories\n# =====================================\n\npath = os.getcwd() + '/Data'; # This is the path we have to check for\nsubpath = os.getcwd() + '/Data/Process'; # This is the subdirectory to store data \nif os.path.isdir(path):\n\tprint \"Directory to save data found\"\n\tprint \"----------------------------\"\n\tprint \"\"\n\tif os.path.isdir(subpath):\n\t\tprint \"Subdirectory found\"\n\t\tprint \"------------------\"\n\t\tprint \"\"\n\telse: \n\t\tos.mkdir(subpath)\n\t\tprint \"Subdirectory Created\"\n\t\tprint \"--------------------\"\n\t\tprint \"\"\nelse:\n\tos.mkdir(path); # Create this directory \n\tos.mkdir(subpath); # Created the Subdirectory \n\tprint \"Created the Directory to save data\"\n\tprint \"----------------------------------\"\n\tprint \"\"\n\n# ==============================\n# Create the Model and Variables\n# ==============================\nscn = scenario_var(); # Initializing the class variables\n\nData = {}; # Dictionary that holds the data \nnum_iter = ((scn.num_users_max - scn.num_users_min)/scn.user_steps_siml); \n\n# ==============================\n# Load Massive Machine Type Data\n# ==============================\n\noptim_data_mMTC = np.load(os.getcwd() + '/Data/Temp/optim_var_mMTC'+ str(vars(args)['iter']) +'.npz', allow_pickle = True); # Extracting the mMTC data \n#Rx_power_mMTC = optim_data_mMTC['arr_11']; # Received Power from Small cells for all the mMTC devices\nnum_AP_mMTC = optim_data_mMTC['arr_0']; # Number of mMTC devices per AP (MC first and then SC)\ndata_rate_consum_BH_mMTC = np.empty((num_AP_mMTC.shape[0],1)) # Consumed Data Rate \n\n#print np.sum(num_AP_mMTC)\n# ====> The bandwidth consumption at each cell by the mMTC devices (In the Backhaul)\nfor i in range(num_AP_mMTC.shape[0]):\n\tdata_rate_consum_BH_mMTC[i] = np.sum(np.random.random_sample((num_AP_mMTC[i],1))*(scn.mMTC_maxrate[1] - scn.mMTC_maxrate[0]) + scn.mMTC_maxrate[0])\n\n#RX_power_mc_mMTC = optim_data_mMTC['arr_12']; # Received Power from Macro cells for all the mMTC devices\n#RX_power_mMTC = np.hstack((Rx_power_sc_mMTC, RX_power_mc_mMTC)); # Stack all the received powers for the mMTC users\n\nfor N in range(0,num_iter):\n\t#k = 1\n\tprint \"==============================================\"\n\tprint \"Dataset # \" + str(N) + \": Collecting the Stored Variables\"\n\n\toptim_data = np.load(os.getcwd() + '/Data/Temp/optim_var_'+ str(N) + str(vars(args)['iter']) +'.npz', allow_pickle = True)\n\tsinr_eMBB = optim_data['arr_0']; # Load the SINR data to be used for the optimization\n\t#user_AP_assoc = optim_data['arr_1'].item()['user_app' + str(N)]; # Load the User-Applications Association data to be used for the optimization\n\t#sinr_eMBB = np.empty([np.sum(user_AP_assoc[:,1]),sinr_APs.shape[1]],dtype=float); # Array that holds the Application SINR values\n\tsinr_pad_val = optim_data['arr_3']; # In the small cell calculation we use an sinr pad value for ease of computation\n\tnum_scbs = optim_data['arr_4']; # Number of Small cells\n\tnum_mcbs = optim_data['arr_5']; # Number of Macro cells\n\tmat_wlbh_sc = optim_data['arr_6']; # Wireless backhaul matrix for Small Cells\n\tmat_wrdbh_sc = optim_data['arr_7']; # Wired backhaul matrix for Macro cells\n\tHops_MC = optim_data['arr_8']; # Number of hops to the IMS core from Macro cells\n\tHops_SC = optim_data['arr_9']; # Number of hops to the IMS core from Small cells\n\tBH_Capacity_SC = optim_data['arr_10']; # Backhaul capacity for Small cells\n\tBH_Capacity_MC = scn.fib_BH_MC_capacity + 10*scn.avg_fib_BH_capacity; # Backhaul capacity for Macro cells\n\tSNR_eMBB = optim_data['arr_11']; # Small Cell Received Power \n\tSCBS_per_MCBS = optim_data['arr_12']; # Number of small cells per macro cell\n\t\n\t#RX_power_mc = optim_data['arr_13']; # Macro Cell Received Power \n\t#RX_power = np.hstack((RX_power_mc,RX_power_sc)); # Stack all the received powers for the eMBB users\n\t#SNR_eMBB = np.empty([np.sum(user_AP_assoc[:,1]),sinr_APs.shape[1]],dtype=float); # Array that holds the Application SINR values\n\t# ==================================\n\t# Print to Understand Matrix Formats\n\t#print num_scbs\n\t#print num_mcbs\n\n\t#print Hops_MC.shape\n\t#print \"===========\"\n\t#print Hops_SC.shape\n\t#print \"===========\"\n\t#print Hops_MC\n\t#print \"===========\"\n\t#print Hops_SC\n\t#print BH_Capacity_SC.shape \n\t#print mat_wlbh_sc\n\t#print mat_wlbh_sc.shape \n\t#print \"=========\"\n\t#print mat_wrdbh_sc\n\t#print mat_wrdbh_sc.shape\n\t#print sinr_APs.shape\n\n\t# ================================================================\n\t# Update the Available Backhaul Capacity based on mMTC Consumption\n\t#print num_scbs\n\tBH_Capacity_SC = BH_Capacity_SC - data_rate_consum_BH_mMTC[0:num_scbs]; # We reduce the available backhaul capacity for small cells\n\tBH_Capacity_MC = BH_Capacity_MC - data_rate_consum_BH_mMTC[num_scbs:num_scbs+num_mcbs]; # We reduce the available backhaul capacity for macro cells\n\n\t# ===> Add the consumption from mMTC devices associated to small cell, which is in turn associated with a macro cell\n\n\tini_idx = 0; # Initial index\n\tnex_idx = SCBS_per_MCBS[0]; # The next index\n\tfor i in range(SCBS_per_MCBS.shape[0]):\n\t\tBH_Capacity_MC[i] = BH_Capacity_MC[i] - np.sum(data_rate_consum_BH_mMTC[ini_idx:nex_idx]) # Reduce the Available MC backhaul capacity further due the mMTC devices on the SCs associated to it\n\t\tif i < (SCBS_per_MCBS.shape[0] - 1):\n\t\t\tini_idx = nex_idx # Update the indices\n\t\t\tnex_idx = nex_idx + SCBS_per_MCBS[i+1] # Update the indices\n\t\telse:\n\t\t\tbreak\n\n\t#print \"Creating the Application to Access Point SINR association matrix\"\n\t#iter = 0; # Application number tracking\n\t# for i in range(0,sinr_APs.shape[0]):\n\t# \t#sinr_eMBB [iter:iter + np.sum(user_AP_assoc[i,1]), :] = np.delete(np.outer(user_AP_assoc[i,1],sinr_APs[i,:]), np.where(user_AP_assoc[i,1] == 0), 0);# Application to Base Station SINR matrix \n\t# \tSNR_eMBB[iter:iter + np.sum(user_AP_assoc[i,1]), :] = np.delete(np.outer(user_AP_assoc[i,1],SNR_iter[i,:]), np.where(user_AP_assoc[i,1] == 0), 0);# Application to Base Station SINR matrix \n\t# \titer = iter + np.sum(user_AP_assoc[i,1]); # Incrementing the iterator for the next user-application sets\n\t\n\t#sinr_eMBB = sinr_APs\n\t#SNR_eMBB = SNR_iter\n\t#np.savetxt('SINR.csv',sinr_eMBB, delimiter=\",\")\n\t#print sinr_eMBB\n\tcsvsaver.csvsaver(sinr_eMBB, [], \"SINR\"+str(N)+\".csv\")\n\t#print sinr_eMBB\n\t#print sinr_applications[0]\n\n\tprint \"Calculating the Rate Matrix\"\n\trate = np.empty((sinr_eMBB.shape[0], sinr_eMBB.shape[1])); # Initializing the received data rate matrix\n\n\tfor i in range(0, sinr_eMBB.shape[1]):\n\t\t#if i <= num_scbs:\n\t\t\t#rate[:,i] = np.where(sinr_eMBB[:,i] == sinr_pad_val, 0, scn.sc_bw*np.log2(1 + 10**(sinr_eMBB[:,i]/10))); # Rate calculation for SC\n\t\t\t#rate[:,i] = np.where(sinr_eMBB[:,i] == sinr_pad_val, 0, scn.usr_scbw*np.log2(1 + 10**(sinr_eMBB[:,i]/10))); # Rate calculation for SC\n\t\t\t#rate[:,i] = np.where(sinr_eMBB[:,i] == sinr_pad_val, 0, np.log2(1 + 10**(sinr_eMBB[:,i]/10))); # Rate calculation for SC\t\t\t\t\n\t\t#else:\n\t\t\t#rate[:,i] = np.where(sinr_eMBB[:,i] == sinr_pad_val, 0, scn.mc_bw*np.log2(1 + 10**(sinr_eMBB[:,i]/10))); # Rate calculation for MC \n\t\t\trate[:,i] = np.where(sinr_eMBB[:,i] == sinr_pad_val, 0, np.log2(1 + 10**(sinr_eMBB[:,i]/10))); # Rate calculation for MC \n\n\tvar_row_num = sinr_eMBB.shape[0];\n\tvar_col_num = sinr_eMBB.shape[1];\n\n\t#print var_row_num\n\n\tprint \"Calculating the AP path latencies\"\n\t#print Hops_SC\n\tbh_paths = np.empty((num_scbs+num_mcbs)); # We consider just a single path per AP\n\tHops_sc = (np.sum((Hops_SC - 1), axis = 1)).reshape(Hops_SC.shape[0],1); # Reshaping the Small cells hops count matrix\n\t#print mat_wlbh_sc\n\t#print mat_wrdbh_sc\n\t#print Hops_sc\n\tfor i in range(0,Hops_sc.shape[0]):\n\t\tif np.nonzero(mat_wlbh_sc[i,:]):\n\t\t\tbh_paths[i] = Hops_sc[i,:]*scn.wrd_link_delay + scn.wl_link_delay; # Small cell path latency with wireless backhaul\n\t\telif np.nonzero(mat_wrdbh_sc[i,:]):\n\t\t\tbh_paths[i] = (Hops_sc[i,:] + 1)*scn.wrd_link_delay; # Small cell path latency with wired backhaul\n\t#bh_paths[:Hops_sc.shape[0],:] = Hops_sc*scn.wrd_link_delay + scn.wl_link_delay; # Small cell path latency\n\tbh_paths[Hops_sc.shape[0]:bh_paths.shape[0]] = Hops_MC*scn.wrd_link_delay; # Macro cell path latency \n\t#print bh_paths\n\t#print var_col_num\n\n\t#print rate\n\n\t# =================================\n\t# Baseline Cell Selection Algorithm\n\t# =================================\n\n\t\n\t#DR_eMBB_scbw, DR_eMBB_fscbw, DR_mMTC, DR_eMBB_sinr_scbw, DR_eMBB_sinr_fscbw, DR_mMTC_sinr = baseline_assoc(RX_power_eMBB, RX_power_mMTC, sinr_eMBB, sinr_mMTC, np, scn); # Baseline association function \n\t#np.savez_compressed(os.getcwd()+'/Data/Temp/Baseline'+ str(vars(args)['iter']) + str(k), DR_eMBB_scbw, DR_eMBB_fscbw, DR_mMTC, DR_eMBB_sinr_scbw, DR_eMBB_sinr_fscbw, DR_mMTC_sinr, allow_pickle = True); # Save these variables to be utilized by the optimizer\n\n\t\n\tTot_Data_Rate, Associated_users = baseline_assoc(SNR_eMBB, 0, sinr_eMBB, 0, BH_Capacity_SC, BH_Capacity_MC, num_scbs, num_mcbs, np, scn, 0); # Baseline association function \n\tnp.savez_compressed(os.getcwd()+'/Data/Process/Baseline'+ str(vars(args)['iter']) + str(N), Tot_Data_Rate, Associated_users, allow_pickle = True); # Save these variables to be utilized by the optimizer\n\t\n\tTot_Data_Rate_min, Associated_users_min = baseline_assoc(SNR_eMBB, 0, sinr_eMBB, 0, BH_Capacity_SC, BH_Capacity_MC, num_scbs, num_mcbs, np, scn, 1); # Baseline association function with minimum rate\n\tnp.savez_compressed(os.getcwd()+'/Data/Process/Baseline_minrate'+str(vars(args)['iter'])+str(N), Tot_Data_Rate_min, Associated_users_min, allow_pickle = True); # Save these variables to plot the baseline with min rate also \n\t\n\t# =========\n\t# Optimizer\n\t# =========\n\t#print rate \n\tprint \"Entering the Optimizer\"\n\n\ttry:\n\t\tm = Model(\"mip1\") # Creates the MIP model \n\t\tX = m.addVars(var_row_num, var_col_num , vtype = GRB.BINARY, name = \"X\"); # We create the X matrix that has to be found \n\t\tBW_MC = m.addVars(var_row_num, 5, vtype = GRB.BINARY, name = \"bwmc\"); # We create the MC bandwidth matrix \n\t\tBW_SC = m.addVars(var_row_num, 3, vtype = GRB.BINARY, name = \"bwsc\"); # We create the SC bandwidth matrix\n\t\tG_SC = m.addVars(int(var_row_num), int(num_scbs), 3, vtype = GRB.BINARY, name = \"GSC\"); # Linearizing variable for small cells\n\t\tG_MC = m.addVars(int(var_row_num), int(num_mcbs), 5, vtype = GRB.BINARY, name = \"GMC\"); # Linearizing variable for macro cells\n\n\t\t# ===> Establish the Objective Function\n\n\t\tobj_func = quicksum(G_SC[i,j,k]*scn.BW_SC[k]*rate[i,j] for i in range(var_row_num) for j in range(num_scbs) for k in range(3)) + quicksum(G_MC[i,j - num_scbs,k]*scn.BW_MC[k]*rate[i,j] for i in range(var_row_num) for j in range(num_scbs, num_scbs + num_mcbs) for k in range(5)); \n\t\t#obj_func = LinExpr(); # Initialize the objective function\n\t\t#obj_func.addTerms(rate,X);\n\t\t# for i in range(0,var_row_num):\n\t\t# \tfor j in range(0, var_col_num):\n\t\t# \t\t\tif j < num_scbs:\n\t\t# \t\t\t\tfor k in range(3):\n\t\t# \t\t\t\t\tobj_func = obj_func + (G_SC[i,j,k]*scn.BW_SC[k]*rate[i,j]); # Small cell contribution \n\t\t# \t\t\telif j >= num_scbs:\n\t\t# \t\t\t\tfor f in range(5):\n\t\t# \t\t\t\t\tobj_func = obj_func + (G_MC[i,j - num_scbs,f]*scn.BW_MC[k]*rate[i,j]); # Macro cell contribution\n\t\t# \t\t\t#print obj_func\n\n\t\t#print obj_func\n\n\t\t# ===================================================\n\t\t# Set up the Dual and Single Connectivity Constraints\n\n\t\tDC = m.addVars(var_row_num,1, name = \"DC\"); # Initializing the Constraint Variables\n\t\tfor i in range(0,var_row_num):\n\t\t\t#DC[i,0] = X.sum(i,'*'); # Constraint Expression\n\t\t\tDC[i,0] = quicksum(X[i,j] for j in range(var_col_num))\n\t\t\n\t\tMC = m.addVars(var_row_num,1, name = \"K\"); # Initializing the Constraint Variables with MC \n\t\tfor i in range(0,var_row_num):\n\t\t\t#MC[i,0] = X.sum(i,np.arange(num_scbs,num_scbs+num_mcbs).tolist()); # Macro Cell Constraint Expression\n\t\t\tMC[i,0] = quicksum(X[i,j] for j in range(num_scbs, num_mcbs+num_scbs)) # Macro Cell Constraint Expression\n\n\t\tSC = m.addVars(var_row_num,1, name = \"J\"); # Initializing the Constraint Variable with SC\n\t\tfor i in range(0,var_row_num):\n\t\t\t#SC[i,0] = X.sum(i,np.arange(0,num_scbs).tolist()); # Small Cell Constraint Expression\n\t\t\tSC[i,0] = quicksum(X[i,j] for j in range(num_scbs)) # Small Cell Constraint Expression\n\t\t# ======================================================= \n\t\t# Set up the Minimum Rate Constraint for the Applications\n\n\n\t\tmin_RATE = m.addVars(var_row_num, 1, name = \"min_RATE\"); # Initializing the Constraint variable\n\t\tfor i in range(0,var_row_num):\n\t\t\tmin_RATE[i,0] = quicksum(rate[i,j]*scn.BW_SC[k]*G_SC[i,j,k] for j in range(num_scbs) for k in range(3)) + quicksum(rate[i,j]*scn.BW_MC[k]*G_MC[i,j - num_scbs,k] for j in range(num_scbs, num_scbs+num_mcbs) for k in range(5))\n\t\t\t# for j in range(0, var_col_num):\n\t\t\t# \tif j < num_scbs:\n\t\t\t# \t\tfor k in range(3):\n\t\t\t# \t\t\tmin_RATE[i,0] = min_RATE[i,0] + rate[i,j]*scn.BW_SC[k]*G_SC[i,j,k]; # Constraint expression\n\n\t\t\t# \telif j >= num_scbs:\n\t\t\t# \t\tfor k in range(5):\n\t\t\t# \t\t\tmin_RATE[i,0] = min_RATE[i,0] + rate[i,j]*scn.BW_MC[k]*G_MC[i,j - num_scbs,k]; # Constraint expression\n\n\n\t\t# ===> Set up the Resource Allocation Constraint for an AP\n\n\t\tRB = m.addVars(var_col_num, 1, name = \"Subcarriers\"); # Allocated Subcarriers\n\t\tfor j in range(0, var_col_num):\n\t\t\tif j < num_scbs:\n\t\t\t\tRB[j,0] = quicksum(G_SC[i,j,k]*scn.BW_SC[k] for i in range(var_row_num) for k in range(3))\n\t\t\telif j >= num_scbs:\n\t\t\t\tRB[j,0] = quicksum(G_MC[i,j - num_scbs,k]*scn.BW_MC[k] for i in range(var_row_num) for k in range(5))\n\t\t\t# for i in range(0, var_row_num):\n\t\t\t# \tif j < num_scbs:\n\t\t\t# \t\t#RB[i,0] = LinExpr([scn.usr_scbw]*var_row_num,X.select('*',i)); # Constraint Expression for SCBS\n\t\t\t# \t\t#print LinExpr(G_SC.select(i,j,'*'), scn.BW_SC)\n\t\t\t# \t\tfor k in range(3):\n\t\t\t# \t\t\t#RB[j,0] = RB[j,0] + LinExpr(G_SC.select(i,j,'*'), scn.BW_SC)\n\t\t\t# \t\t\tRB[j,0] = RB[j,0] + G_SC[i,j,k]*scn.BW_SC[k]\n\t\t\t# \telif j >= num_scbs:\n\t\t\t# \t\t#RB[j,0] = RB[j,0] + LinExpr(G_MC.select(i,j - num_scbs,'*'), scn.BW_MC)\n\t\t\t# \t\tfor k in range(5):\n\t\t\t# \t\t\tRB[j,0] = RB[j,0] + G_MC[i,j - num_scbs,k]*scn.BW_MC[k]\n\t\t\t# \t#RB[i,0] = LinExpr([scn.mc_bw]*var_row_num, X.select('*',i)); # Constraint Expression for MCBS\n\t\t#max_BW = m.addVars(var_col_num,1, name=\"Max_BW\"); # Initializing the Constraint Variable\n\t\t#for i in range(0,)\n\n\t\t# =======================================\n\t\t# Set up the Backhaul Capacity constraint \n\n\t\tBH_CAP_RES = m.addVars(var_col_num, 1, name = \"BH_CAP_RES\"); # Initializing the Constraint variable\n\t\tfor j in range(0, num_scbs):\n\t\t\t# for i in range(0, var_row_num):\n\t\t\t# \tfor k in range(3):\n\t\t\t# BH_CAP_RES[j,0] = BH_CAP_RES[j,0] + rate[i,j]*G_SC[i,j,k]*scn.BW_SC[k]; # Constraint Expression\n\t\t\tBH_CAP_RES[j,0] = quicksum(rate[i,j]*G_SC[i,j,k]*scn.BW_SC[k] for i in range(var_row_num) for k in range(3)); # Constraint Expression\n\n\t\tcount_scbs = 0; # Counter to keep track of the SCBS for a given MCBS\n\t\tfor j in range(num_scbs, num_scbs + num_mcbs):\n\t\t\tini_idx = count_scbs; # Initial index\n\t\t\tout_idx = count_scbs + SCBS_per_MCBS[j - num_scbs];\n\t\t\tBH_CAP_RES[j,0] = quicksum(rate[i,j]*G_MC[i,j - num_scbs,k]*scn.BW_MC[k] for i in range(var_row_num) for k in range(5)) + quicksum(rate[i,l]*G_SC[i,l,k]*scn.BW_SC[k] for i in range(var_row_num) for l in range(ini_idx,out_idx) for k in range(3))\n\t\t\t# for i in range(var_row_num):\n\t\t\t# \tfor k in range(5):\n\t\t\t# \t\tBH_CAP_RES[j,0] = BH_CAP_RES[j,0] + rate[i,j]*G_MC[i,j - num_scbs,k]*scn.BW_MC[k]; # Macro cell backhaul capacity computation for constraint expression\n\t\t\t# for l in range(ini_idx, out_idx):\n\t\t\t# \tBH_CAP_RES[j,0] = BH_CAP_RES[j,0] + BH_CAP_RES[l,0];\n\t\t\tcount_scbs = out_idx; # Updated the counter for the next round \n\n\t\t\t#print BH_CAP_RES[i,0]\n\n\t\t# ================================== \n\t\t# Set up the Path Latency constraint\n\n\t\tAP_latency = m.addVars(var_row_num, var_col_num, name=\"AP_latency\"); # Initializing the Path Latency constraint\n\t\tfor i in range(0, var_row_num):\n\t\t\tfor j in range(0, var_col_num):\n\t\t\t\tAP_latency[i,j] = bh_paths[j]*X[i,j]; # Constraint Expression\n\t\t\n\n\t\t# ============================\n\t\t# Unity Assignment Constraints\n\n\t\tU_SC = m.addVars(var_row_num, int(num_scbs), name=\"USC\");\n\t\t\n\t\tfor i in range(var_row_num):\n\t\t\t#U_SC[i,j] = LinExpr([1]*3,G_SC.select(i,j,'*'))\n\t\t\tfor j in range(num_scbs):\n\t\t\t\tU_SC[i,j] = quicksum(G_SC[i,j,k] for k in range(3))\n\n\t\tU_MC = m.addVars(var_row_num, int(num_mcbs), name=\"UMC\")\n\t\tfor i in range(var_row_num):\n\t\t\t#U_MC[i,j] = LinExpr([1]*5,G_MC.select(i,j,'*'))\n\t\t\tfor j in range(num_mcbs):\n\t\t\t\tU_MC[i,j] = quicksum(G_MC[i,j,k] for k in range(5))\n\n\n\t\t# ==========================\n\t\t# Set up the mMTC Constraint\n\n\t\t#mMTC_BW = m.addVars()\n\n\n\t\t# ======================\n\t\t# Solve the MILP problem \n\n\t\tm.setObjective(obj_func, GRB.MAXIMIZE); # This is the objective function that we aim to maximize\n\t\t\n\t\t# We add a Compulsory Resource allocation Constraint \n\n\t\tm.addConstrs((RB[i,0] <= scn.sc_bw for i in range(num_scbs)), name = 'c0'); # Small cells have their bandwidth distributed \n\t\tm.addConstrs((RB[i,0] <= scn.eNB_bw for i in range(num_scbs, num_scbs+num_mcbs)), name = 'c10')\n\t\tm.addConstrs((G_SC[i,j,k] <= BW_SC[i,k] for i in range(var_row_num) for j in range(num_scbs) for k in range(3)), name = 'l1'); # Linearization constraint 1\n\t\tm.addConstrs((G_SC[i,j,k] <= X[i,j] for i in range(var_row_num) for j in range(num_scbs) for k in range(3)), name = 'l2'); # Linearization constraint 2\n\t\tm.addConstrs((G_SC[i,j,k] >= (BW_SC[i,k] + X[i,j] -1) for i in range(var_row_num) for j in range(num_scbs) for k in range(3)), name = 'l3'); # Linearization constraint 3\n\t\tm.addConstrs((G_MC[i,j - num_scbs,k] <= BW_MC[i,k] for i in range(var_row_num) for j in range(num_scbs, num_scbs+num_mcbs) for k in range(5)), name = 'l4'); # Linearization constraint 4\n\t\tm.addConstrs((G_MC[i,j - num_scbs,k] <= X[i,j] for i in range(var_row_num) for j in range(num_scbs, num_scbs+num_mcbs) for k in range(5)), name = 'l5'); # Linearization constraint 5\n\t\tm.addConstrs((G_MC[i,j - num_scbs,k] >= (BW_MC[i,k] + X[i,j] -1) for i in range(var_row_num) for j in range(num_scbs, num_scbs+num_mcbs) for k in range(5)), name = 'l6'); # Linearization constraint 6\n\t\tm.addConstrs((U_SC[i,j] <= 1 for i in range(var_row_num) for j in range(num_scbs)),name = 'U1')\n\t\tm.addConstrs((U_MC[i,j] <= 1 for i in range(var_row_num) for j in range(num_mcbs)),name = 'U2')\n\n\t\tif vars(args)['dual'] == 0:\n\t\t\tprint \"===================\"\n\t\t\tprint \"Single Connectivity\"\n\t\t\tprint \"===================\"\n\t\t\t#m.addConstrs((U_SC[i,j] == 0 for i in range(var_row_num) for j in range(num_scbs)))\n\t\t\t#m.addConstrs((U_MC[i,j] == 1 for i in range(var_row_num) for j in range(num_mcbs)))\n\t\t\tm.addConstrs((DC[i,0] <= 1 for i in range(var_row_num)), name ='c'); # Adding the Single Connectivity constraint \n\t\t\t#m.addConstrs((MC[i,0] == 1 for i in range(var_row_num)), name ='c'); # Adding the Single Connectivity constraint \n\t\t\t#m.addConstrs((SC[i,0] == 0 for i in range(var_row_num)), name ='c14'); # Adding the Single Connectivity constraint \n\t\t\t\n\t\t\tif vars(args)['minRate'] == 1:\n\t\t\t\tm.addConstrs((min_RATE[i,0] >= scn.eMBB_minrate for i in range(var_row_num)), name ='c1'); # Adding the minimum rate constraint\n\t\t\telse:\n\t\t\t\tm.addConstrs((min_RATE[i,0] >= 1 for i in range(var_row_num)), name ='c15'); # Adding the minimum rate constraint for other strategies, which is to provide some data rate atleast (1 bps)\n\t\t\t\n\t\t\tif vars(args)['bhaul'] == 1:\n\t\t\t\tm.addConstrs((BH_CAP_RES[i,0] <= BH_Capacity_SC[i,0] for i in range(num_scbs)), name = 'c2'); # Adding the Backhaul capacity constraint\n\t\t\t\tm.addConstrs((BH_CAP_RES[i,0] <= BH_Capacity_MC[i-num_scbs] for i in range(num_scbs,num_scbs + num_mcbs)), name = 'c3'); # Adding the Backhaul capacity constraint\n\t\t\t\t\n\t\t\tif vars(args)['latency'] == 1:\n\t\t\t\tm.addConstrs((AP_latency[i,j] <= scn.eMBB_latency_req for i in range(0, var_row_num) for j in range(0, var_col_num)), name = 'c4'); # Path latency constraint \n\t\t\t\n\t\telif vars(args)['dual'] == 1:\t\n\t\t\tprint \"=================\"\t\n\t\t\tprint \"Dual Connectivity\"\n\t\t\tprint \"=================\"\n\t\t\tm.addConstrs((DC[i,0] == 2 for i in range(var_row_num)), name ='c'); # Adding the Dual Connectivity constraint \n\t\t\t#m.addConstrs((U_SC[i,j] <= 1 for i in range(var_row_num) for j in range(num_scbs)))\n\t\t\t#m.addConstrs((U_MC[i,j] == 1 for i in range(var_row_num) for j in range(num_mcbs)))\n\t\t\t#m.addConstrs((MC[i,0] == 1 for i in range(var_row_num)), name ='c'); # Adding the Dual Connectivity constraint \n\t\t\t#m.addConstrs((quicksum(X[i,j] for j in range(num_scbs, num_scbs+num_mcbs)) == 1 for i in range(var_row_num)), name ='c8'); # Adding the Dual Connectivity constraint \n\t\t\t#m.addConstrs((SC[i,0] <= 1 for i in range(var_row_num)), name ='c5'); # Adding the Dual Connectivity constraint \n\t\t\t\n\n\t\t\t#m.addConstrs((DC[i,0] >= 1 for i in range(var_row_num)), name ='c5'); # Adding the Dual Connectivity constraint \n\t\t\tif vars(args)['minRate'] == 1:\n\t\t\t\tm.addConstrs((min_RATE[i,0] >= scn.eMBB_minrate for i in range(var_row_num)), name ='c1'); # Adding the minimum rate constraint\n\t\t\telse:\n\t\t\t\tm.addConstrs((min_RATE[i,0] >= 1 for i in range(var_row_num)), name ='c15'); # Adding the minimum rate constraint for other strategies, which is to provide some data rate atleast (1 bps)\n\t\t\t\t\n\t\t\tif vars(args)['bhaul'] == 1:\n\t\t\t\t#print \"here\"\n\t\t\t\tm.addConstrs((BH_CAP_RES[i,0] <= BH_Capacity_SC[i,0] for i in range(num_scbs)), name = 'c2'); # Adding the Backhaul capacity constraint\n\t\t\t\tm.addConstrs((BH_CAP_RES[i,0] <= BH_Capacity_MC[i-num_scbs] for i in range(num_scbs,num_scbs + num_mcbs)), name = 'c3'); # Adding the Backhaul capacity constraint\n\t\t\t\t#print \"Applied Constraints\"\n\n\t\t\tif vars(args)['latency'] == 1:\n\t\t\t\t#print \"LATHere\"\n\t\t\t\tm.addConstrs((AP_latency[i,j] <= scn.eMBB_latency_req for i in range(0, var_row_num) for j in range(0, var_col_num)), name = 'c4'); # Path latency constraint \n\t\t\t\t#print \"Applied Constraints\"\n\t\t\t\t\t\n\t\t#m.addConstrs((min_RATE[i,0] >= scn.eMBB_minrate for i in range(var_row_num)), name ='c1'); # Adding the minimum rate constraint \n\t\t#m.addConstrs((BH_CAP_RES[i,0] <= BH_Capacity_SC[i,0] for i in range(num_scbs)), name = 'c2'); # Adding the Backhaul capacity constraint\n\t\t#m.addConstrs((BH_CAP_RES[i,0] <= BH_Capacity_MC for i in range(num_scbs,num_scbs + num_mcbs)), name = 'c3'); # Adding the Backhaul capacity constraint\n\t\t#m.addConstrs((AP_latency[i,0] <= scn.eMBB_latency_req for i in range(var_row_num)), name = 'c4'); # Path latency constraint \n\t\t\n\t\t#if vars(args)['mipGP'] == 1:\n\t\tm.Params.MIPGap = 0.05; # Set the Upper and Lower Bound Gap to 5%\n\t\tm.Params.TIME_LIMIT = 600; # Set a timelimit of 10 seconds\n\t\tif vars(args)['dual'] == 1 and vars(args)['minRate'] == 1: \n\t\t\t\tm.Params.MIPFocus = 1; # To aid in convergence of the DC-MRT solution\n\t\t\t\tm.Params.Cuts = 3; # Aggressive Cuts for faster convergence\n\t\telse:\n\t\t\tpass\n\n\t\tm.update()\n\t\tm.write(\"debug.lp\");\n\n\t\tm.optimize()\n\t\tprint \"Optimized\"\n\t\tif m.status == 2:\n\t\t\t# ============================ \n\t\t\t# Print the Optimized Solution \n\n\t\t\tprint \"Plotting the Optimized Association\"\n\n\t\t\tX_optimal = []; # Initializing the Variable to store the optimal solution\n\n\t\t\tfor v in m.getVars():\n\t\t\t\t#print ('%s %g' % (v.varName, v.X))\n\t\t\t\tX_optimal.append(v.x); \n\t\t\t\t#print X_optimal\n\t\t\t\tif len(X_optimal) >= var_row_num*var_col_num:\n\t\t\t\t\tbreak\n\t\t\t#plotter.optimizer_plotter(np.asarray(X_optimal).reshape((var_row_num,var_col_num)));\n\t\t\tprint('Obj:', m.objVal)\n\t\t\t\n\t\t\tG_plt = []\n\t\t\tM_plt = []\n\t\t\tSCbw = []\n\t\t\tMCbw = []\n\t\t\tMC_sel = []\n\n\t\t\tfor v in m.getVars():\n\t\t\t\tif \"GSC\" in v.varName:\n\t\t\t\t\tG_plt.append(v.x)\n\t\t\t\tif \"GMC\" in v.varName:\n\t\t\t\t\tM_plt.append(v.x)\n\t\t\t\t#if \"\" in v.varName:\n\t\t\t\t#\tprint v.x\n\t\t\t#\tif \"bwmc\" in v.varName:\n\t\t\t#\t\tMCbw.append(v.x)\n\t\t\t#\tif \"bwsc\" in v.varName:\n\t\t\t#\t\tSCbw.append(v.x)\n\n\t\t\t# =============================\n\t\t\t# Visualization and Computation\t\t\t\n\n\t\t\t\n\t\t\t#fin_rate = np.zeros((var_row_num,1))\n\t\t\t#for i in range(var_row_num):\n\t\t\t#\tfor j in range(var_col_num):\n\t\t\t#\t\tif j < num_scbs:\n\t\t\t#\t\t\tfin_rate[i] = fin_rate[i] + X[i,j]*scn.BW_SC[np.where(G_plt[i,:] == 1)]*rate[i,j]\n\t\t\t#\t\telif j >= num_scbs:\n\t\t\t#\t\t\tfin_rate[i] = fin_rate[i] + X[i,j]*scn.BW_MC[np.where(M_plt[i,:] == 1)]*rate[i,j]\n\n\t\t\t#plotter.optimizer_plotter(np.asarray(G_plt).reshape((var_row_num,3)))\n\t\t\t#plotter.optimizer_plotter(np.asarray(M_plt).reshape((var_row_num,5)))\n\t\t\t#plotter.optimizer_plotter(fin_rate)\n\t\t\t#print np.sum(G_plt)\t\t\t\n\t\t\t#print np.sum(M_plt)\n\t\t\tG_plt_idx = np.asarray(G_plt).reshape((var_row_num,num_scbs,3))\n\t\t\tM_plt_idx = np.asarray(M_plt).reshape((var_row_num,num_mcbs,5))\n\t\t\tnew_rate = np.empty((rate.shape[0], rate.shape[1])); # The actual rate matrix\n\t\t\t\n\t\t\t#test = G_plt_idx[1,1,:]*scn.BW_SC\n\t\t\tGSC_compute = np.empty((var_row_num, num_scbs)); # This holds the bandwidth contribution from each small cell\n\t\t\tGMC_compute = np.empty((var_row_num, num_mcbs)); # This holds the bandwidth contribution from each macro cell\n\n\t\t\tfor i in range(var_row_num):\n\t\t\t\tfor j in range(num_scbs):\n\t\t\t\t\tGSC_compute[i,j] = sum(G_plt_idx[i,j,:]*np.asarray(scn.BW_SC[:]))\n\n\t\t\tfor i in range(var_row_num):\n\t\t\t\tfor j in range(num_mcbs):\n\t\t\t\t\tGMC_compute[i,j] = sum(M_plt_idx[i,j,:]*np.asarray(scn.BW_MC[:]))\n\n\t\t\tG_total_compute = np.concatenate((GSC_compute, GMC_compute), axis = 1) # Bandwidth Contribution matrix\n\t\t\tnew_rate = rate*G_total_compute; # New rate matrix\n\t\t\t#print new_rate\n\n\t\t\t# ==> Data rate each user gets from the SC and MC for MCSC - DC\n\n\t\t\tRate_data = np.empty((var_row_num,4))\n\t\t\tG_sum = np.empty((num_scbs,1))\n\t\t\tM_sum = np.empty((num_mcbs,1))\n\t\t\tfor i in range(var_row_num):\n\t\t\t\t# print \"==============\"\n\t\t\t\t# print (\"For user #\",i)\n\t\t\t\t# #print new_rate[i,num_scbs + np.nonzero(new_rate[i,num_scbs:num_scbs+num_mcbs])]\n\t\t\t\t# print(\"Small Cell Data Rate:\", new_rate[i, np.nonzero(new_rate[i,:num_scbs])])\n\t\t\t\t# print(\"Macro Cell Data Rate:\", new_rate[i, num_scbs + np.nonzero(new_rate[i,num_scbs:num_scbs+num_mcbs])])\n\t\t\t\tif new_rate[i, np.nonzero(new_rate[i,:num_scbs])].size == 0:\n\t\t\t\t\tRate_data[i,0] = 0\n\t\t\t\telse:\n\t\t\t\t\tRate_data[i,0] = np.sum(new_rate[i, np.nonzero(new_rate[i,:num_scbs])])\n\n\t\t\t\tif new_rate[i, num_scbs + np.nonzero(new_rate[i,num_scbs:num_scbs+num_mcbs])].size == 0:\n\t\t\t\t\tRate_data[i,1] = 0\n\t\t\t\telse:\n\t\t\t\t\tRate_data[i,1] = np.sum(new_rate[i, num_scbs + np.nonzero(new_rate[i,num_scbs:num_scbs+num_mcbs])])\n\t\t\t\t#print np.where(new_rate[i, num_scbs + np.nonzero(new_rate[i,num_scbs:num_scbs+num_mcbs])].size == 0 , \"Size is 0\", new_rate[i, num_scbs + np.nonzero(new_rate[i,num_scbs:num_scbs+num_mcbs])]) \n\n\t\t\t#csvsaver.csvsaver(Rate_data,[\"SC Rate\",\"MC Rate\"], \"OptimizedDataRate_user_MCSC.csv\")\n\t\t\t#print np.sum(new_rate, axis = 1)\n\t\t\tprint np.amin(np.sum(new_rate,axis = 1))\n\t\t\tprint np.amax(np.sum(new_rate,axis = 1))\n\t\t\tprint \"XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\"\n\t\t\tprint (\"MINRATE Constraint satisfied?:\", np.all(np.sum(new_rate,axis = 1)>=scn.eMBB_minrate))\n\t\t\tprint \"XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\"\n\t\t\t# #print \"============================\"\n\t\t\t#print \"BHCAP check\"\n\t\t\t#bhval = 0; # Initialize the Utilized Bhaul capacity value\n\t\t\t\n\t\t\t#\t\tnew_rate[i,j] = (np.sum(G_plt_idx[i,j,:]*np.asarray(scn.BW_SC),axis = 0) + np.sum(M_plt_idx[i,j,:]*np.asarray(scn.BW_MC),axis = 0))*rate[i,j]\t\t\t\n\t\n\t\t\tG_sum[:,0] = np.sum(G_plt_idx[:,:,0] + G_plt_idx[:,:,1] + G_plt_idx[:,:,2], axis = 0)\n\t\t\tM_sum[:,0] = np.sum(M_plt_idx[:,:,0] + M_plt_idx[:,:,1] + M_plt_idx[:,:,2] + M_plt_idx[:,:,3] + M_plt_idx[:,:,4], axis = 0)\n\t\t\t#print G_sum.shape\n\t\t\t#print M_sum.shape\n\t\t\t#print (\"SC:\", G_sum)\n\t\t\t#print (\"MC:\", M_sum)\n\t\t\tcsvsaver.csvsaver(G_sum,[\"Accepted Users per SC\"], \"GS.csv\")\n\t\t\tcsvsaver.csvsaver(M_sum,[\"Accepted Users per MC\"], \"MC.csv\")\t\t\t\n\n\t\t\t# if N == (num_iter-1) and (vars(args)['dual'] == 1 or vars(args)['bhaul'] == 1 or vars(args)['minRate'] == 1 or vars(args)['latency'] == 1):\n\t\t\t# \t#plotter.optimizer_plotter(new_rate) # We get the plot for the rates with maximum number of users\n\t\t\t# \twith open(\"Rate\" + str(vars(args)['iter']) + str(vars(args)['dual']) + str(vars(args)['bhaul']) + str(vars(args)['minRate']) + str(vars(args)['latency']) + \".csv\", \"w+\") as my_csv:\n\t\t\t# \t\tcsvWriter = csv.writer(my_csv,delimiter=',')\n\t\t\t# \t\tcsvWriter.writerows(new_rate) # We write the rate matrix to the csv file for visualization\n\t\t\t# \twith open(\"OptAssignment\" + str(vars(args)['iter']) + str(vars(args)['dual']) + str(vars(args)['bhaul']) + str(vars(args)['minRate']) + str(vars(args)['latency']) + \".csv\", \"w+\") as my_csv2:\n\t\t\t# \t\tcsvWriter = csv.writer(my_csv2,delimiter=',')\n\t\t\t# \t\tcsvWriter.writerows(np.asarray(X_optimal).reshape((var_row_num,var_col_num))) # We write the optimal association matrix to csv files for analysis purposes\n\t\t\t#plotter.optimizer_plotter(M_plt_idx[:,:,0] + M_plt_idx[:,:,1] + M_plt_idx[:,:,2] + M_plt_idx[:,:,3] + M_plt_idx[:,:,4])0\n\t\t\t#plotter.optimizer_plotter(G_plt_idx[:,:,0] + G_plt_idx[:,:,1] + G_plt_idx[:,:,2])\t\n\n\t\t\t# =========================\n\t\t\t# Store Optimized Variables\n\n\t\t\tprint \"Saving Data\"\n\n\t\t\t#Data['X_optimal_data' + str(N)] = np.asarray(X_optimal).reshape((var_row_num,var_col_num)); # Optimal Association Matrix\n\t\t\tData['X_optimal_data' + str(N)] = (G_total_compute>0)*1; # Optimal Association Matrix\n\t\t\tData['Net_Throughput' + str(N)] = m.objVal; # Network wide throughput\n\t\t\tData['Optimal_BW' + str(N)] = G_total_compute; # Stores the optimal Bandwidth \n\t\t\tData['Rates' + str(N)] = new_rate; # Data rate matrix \n\t\t\tData['Status' + str(N)] = m.status; # Insert the status\n\t\t\tData['Apps'+str(N)] = var_row_num;\n\t\t\tData['APs'+str(N)] = var_col_num;\n\t\t\tData['Time'+str(N)] = m.Runtime;\n\t\t\tData['SINR'+str(N)] = sinr_eMBB;\n\n\t\t\t#print np.sum((G_total_compute>0)*1, axis = 1) \n\t\t\t#print np.sum((np.asarray(X_optimal).reshape((var_row_num,var_col_num))>0)*1, axis =1)\n\t\t\t#print np.nonzero((np.asarray(X_optimal).reshape((var_row_num,var_col_num))>0)*1)\n\t\t\t#print np.array_equal((G_total_compute>0)*1, (np.asarray(X_optimal).reshape((var_row_num,var_col_num))>0)*1)\n\t\t\t# ========================\n\t\t\t# Validity of the Solution\n\n\t\t\t# ===> Backhaul Capacity Utilization\n\n\t\t\tbhutil = np.empty((var_col_num,1)) # This variable will hold the utilized Backhaul capacity\n\t\t\tct = 0; # Counting the number of small cells within a macro cell\n\t\t\tfor j in range(var_col_num):\n\t\t\t\tif j < num_scbs:\n\t\t\t\t\tbhutil[j] = np.sum(new_rate[:,j]) # We sum the data rate of the users getting access from a small cell\n\t\t\t\t\t#print (\"Available Backhaul Capacity for SC#\"+str(j),BH_Capacity_SC[j,0])\n\t\t\t\telif j >= num_scbs:\n\t\t\t\t\tidx_in = ct\n\t\t\t\t\tidx_ot = ct + SCBS_per_MCBS[j - num_scbs]\n\t\t\t\t\tbhutil[j] = np.sum(new_rate[:,j]) + np.sum(bhutil[idx_in:idx_ot]) # Total BH utilization for the BH on macro cell\n\t\t\t\t\t#print (\"Available Backhaul Capacity for MC#\"+str(j-num_scbs),BH_Capacity_MC)\n\t\t\t\t\tct = idx_ot # Update the count variable for the next Macro BS\n\n\t\t\t\t#print (\"Backhaul Utilization for AP#\" + str(j), bhutil[j])\n\t\t\tData['BHUTIL'+str(N)] = bhutil; # Save the Backhaul utilization values for visualization\n\t\t\tData['AvailBHUtil_SC'+str(N)] = BH_Capacity_SC; # Save the Available Small Cell BH Capacity. For MC it is constant and can be extracted from the scenario_file \n\t\t\t# ===> Latency Provisioning \n\n\t\t\tlat_prov = np.matmul(np.asarray(X_optimal).reshape((var_row_num,var_col_num)),np.diag(bh_paths)); # This variable will hold the actual latency offered\n\t\t\t# print np.asarray(X_optimal).reshape((var_row_num,var_col_num))\n\t\t\t# print lat_prov\n\t\t\t# print np.diag(bh_paths)\n\t\t\t# #print var_row_num\n\t\t\t# for i in range(var_row_num):\n\t\t\t# \tprint (\"Latency offered for element #\"+str(i),lat_prov[i,np.nonzero(lat_prov[i,:])])\n\n\t\t\tData['LatenOff'+str(N)] = lat_prov; # Save the latency provisioned by the algorithm \n\n\t\telse:\n\t\t\tData['Status' + str(N)] = m.status; # Add the status for detecting infeasible solution\n\t\t\tprint (\"Status_Flags:\" + str(N), str(m.status))\n\t\t\tData['Apps' + str(N)] = var_row_num;\n\t\t\tData['APs' + str(N)] = var_col_num;\n\t\t\tcontinue\n\texcept GurobiError:\n\t\tprint('Error Reported')\nnp.savez_compressed(os.getcwd() +'/Data/Process/_' + str(vars(args)['iter']) + 'dat_' + str(vars(args)['dual']) + str(vars(args)['minRate']) + str(vars(args)['bhaul']) + str(vars(args)['latency']), Data, allow_pickle = True); # Saving the necessary data to generate plots later \n",
"id": "10158087",
"language": "Python",
"matching_score": 4.698001384735107,
"max_stars_count": 1,
"path": "optimizer_func.py"
},
{
"content": "#!/usr/bin/env python\n\n# ==============================\n# Import the Necessary Libraries\n# ==============================\n\nimport collections\n\n# =============================\n# Baseline Association Function\n# =============================\n\ndef baseline_assoc(SNR_eMBB, SNR_mMTC, sinr_eMBB, sinr_mMTC, BHCAP_SC, BHCAP_MC, num_SCBS, num_MCBS, np, scn, flag):\n\n\n\t# ==================================\n\t# Compute the Highest Received Power\n\n\t#print RX_eMBB.shape\n\t#print sinr_eMBB.shape\n\tSNR_eMBB = np.where(np.isnan(SNR_eMBB) == True, -300 ,SNR_eMBB); # Taking care of Nan values\n\tnp.random.shuffle(SNR_eMBB); # Shuffle the matrix on the vertical axis \n\tSNR_max_eMBB = np.flip(np.sort(SNR_eMBB, axis = 1),axis=1); # Sorted Matrix for eMBB \n\tidx_max_eMBB = np.flip(np.argsort(SNR_eMBB, axis = 1),axis=1); # Maximum received power for each eMBB application\n\t\n\tnp.savetxt(\"MAXSNR.csv\",SNR_max_eMBB,delimiter=\",\")\n\n\t#idx_max_mMTC = np.argsort(RX_mMTC, axis = 1); # Maximum received power for each mMTC application \n\t#rx_max_mMTC = np.sort(RX_mMTC, axis = 1); # Sorted Matrix for mMTC\n\t#sinr_max_eMBB = np.empty((sinr_eMBB.shape[0],1))\n\t#for i in range(0,sinr_eMBB.shape[0]):\n\t\t#if sinr_eMBB[i,idx_max_eMBB[i,0]] == pad_value:\n\t#\tsinr_max_eMBB[i] = sinr_eMBB[i,idx_max_eMBB[i,0]]; # Capture the SINR from the BS which is the closest for the eMBB\n\tsinr_max_eMBB = np.empty((sinr_eMBB.shape[0],sinr_eMBB.shape[1]))\n\tfor i in range(sinr_max_eMBB.shape[0]):\n\t\tfor j in range(sinr_max_eMBB.shape[1]):\n\t\t\tsinr_max_eMBB[i,j] = sinr_eMBB[i,idx_max_eMBB[i,j]]; # Developing the SINR matrix\n\t\t\tif sinr_max_eMBB[i,j] == 350:\n\t\t\t\tsinr_max_eMBB[i,j] = -300;\n\tnp.savetxt('SINRMAX.csv',sinr_max_eMBB,delimiter=\",\")\n\t# ===> Establish the Resource Pool Variables\n\n\tnum_BS = num_MCBS + num_SCBS; # We calculate the total number of Base stations\n\n\taccess_bw = np.empty((num_BS,1)); # Initializing the Access BW resource\n\tbcap_sc = np.ones((num_SCBS,1))*BHCAP_SC; # Small Cell Backhaul Capacity variable\n\tbcap_mc = np.ones((num_MCBS,1))*BHCAP_MC; # Macro Cell Backhaul Capacity variable\n\n\taccess_bw[0:num_SCBS,0] = scn.sc_bw; # Small Cell Bandwidth\n\taccess_bw[num_SCBS:num_BS,0] = scn.eNB_bw; # Macro Cell Bandwidth\n\n\n\t#print \"Received Power ==>\"\n\t#print RX_eMBB\n\t#print \"Max Received Power ====>\"\n\t#print rx_max_eMBB[:,0]\n\t#print \"SINR ====>\"\n\t#print sinr_max_eMBB[:,0]\n\t#for j in range(0,sinr_mMTC.shape[0]):\n\t#\tsinr_max_mMTC[i] = sinr_mMTC[i,idx_max_mMTC[i,0]]; # Capture the SINR from the BS which is the closest for the mMTC\n\t\n\tcounter_eMBB = collections.Counter(idx_max_eMBB[:,0]); # Counts the frequency of occurence of each BS as being the strongest BS for eMBB type\n\t#counter_mMTC = collections.Counter(idx_max_mMTC[:,0]); # Counts the frequency of occurence of each BS as being the strongest BS for mMTC type\n\n\t#Data_Rate_eMBB_scbw = scn.usr_scbw*np.log2(1+10**(SNR_max_eMBB[:,0]/10)); # Data rate at each eMBB application when attaching to the AP with best RSSI\n\t#Data_Rate_eMBB_fscbw = scn.sc_bw*np.log2(1+10**(SNR_max_eMBB[:,0]/10)); # Data rate with 1GHz BW for each eMBB application\n\t#Data_Rate_mMTC = scn.mMTC_bw*np.log2(1+10**(rx_max_eMBB[:,0]/10)); # mMTC Data rate with standard data rate \n\n\t#print Data_Rate_eMBB_scbw\n\n\t# Data_rate_sinr_eMBB_scbw= np.empty((sinr_max_eMBB.shape[0], sinr_max_eMBB.shape[1])); # Initialize the Data Rate matrix\n\n\t# for i in range(sinr_max_eMBB.shape[0]):\n\t#\tfor j in range(sinr_max_eMBB.shape[1]):\n\t#\t\tif j <= num_SCBS:\n\t#\t\t\tData_rate_sinr_eMBB_scbw[i,j] = scn.usr_scbw*np.log2(1+10**(sinr_max_eMBB[i,j]/10)); # SINR based Data Rate\n\t#\t\telse:\n\t#\t\t\tData_rate_sinr_eMBB_scbw[i,j] = scn.mc_bw*np.log2(1+10**(sinr_max_eMBB[i,j]/10)); # SINR based Data Rate\n\n\t#Data_rate_sinr_eMBB_fscbw = scn.sc_bw*np.log2(1+10**(sinr_max_eMBB/10)); # SINR based Data Rate for full bandwidth\n\t#Data_rate_sinr_mMTC = scn.mMTC_bw*np.log2(1+10**(sinr_max_mMTC/10)); # SINR based Data Rate for mMTC\n\n\t#print Data_rate_sinr_eMBB_scbw\n\t\n\t# ================================\n\t# Baseline Association Methodology\n\n\tTot_Datarate = 0; # Total Data Rate \n\tAccepted_Users = 0; # Number of Accepted Users\n\tbest_AP_col = 0; # Best Access Point column number\n\tnext_best_AP_counter = np.ones(sinr_eMBB.shape[0], dtype = int); # Increment counter for the next best AP for all the apps\n\n\tassoc_vec = sinr_max_eMBB[:,best_AP_col]; # SINR on a UE from the given cells to which we initially associate them \n\tassoc_vec_idx = np.bincount(idx_max_eMBB[:,best_AP_col]); # The number of UEs attached to each of the APs \n\t#print assoc_vec_idx\n\t#print assoc_vec_idx.shape\n\t#prohib_idx = []; # List to hold the prohibited indexes given their requirements are not satisfied\n\n\t# ===> Check the resources now \n\tif flag == 0:\n\t\tfor j in range(assoc_vec_idx.shape[0]):\n\t\t\t#print (\"j =\",j) \n\t\t\tif assoc_vec_idx[j] > 0:\t\t\t\n\t\t\t\tbw_per_user = access_bw[j]/assoc_vec_idx[j]; \n\t\t\t\tcounter = 0;\n\t\t\t\tfor i in range(sinr_eMBB.shape[0]):\n\t\t\t\t\t#print (\"i =\", i) \n\t\t\t\t\tif idx_max_eMBB[i,0] == j:\n\t\t\t\t\t\t#if bw_per_user*np.log2(1+10**(assoc_vec[i]/10)) >= scn.eMBB_minrate:\n\t\t\t\t\t\tif (j < num_SCBS):\n\t\t\t\t\t\t\tif (bcap_sc[j,0] > bw_per_user*np.log2(1+10**(assoc_vec[i]/10))) & (access_bw[j,0] >= 0):\n\t\t\t\t\t\t\t\tTot_Datarate = Tot_Datarate + bw_per_user*np.log2(1+10**(assoc_vec[i]/10)); # Update the Total network throughput\n\t\t\t\t\t\t\t\taccess_bw[j,0] = access_bw[j,0] - bw_per_user; # Update the available access bw \n\t\t\t\t\t\t\t\tbcap_sc[j,0] = bcap_sc[j,0] - bw_per_user*np.log2(1+10**(assoc_vec[i]/10)); # Update the available bhaul capacity\n\t\t\t\t\t\t\t\tAccepted_Users = Accepted_Users + 1; # Increment the number of users that have been accepted into the system\n\t\t\t\t\t\t\t\tcounter = counter + 1;\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tidx_max_eMBB[i,best_AP_col] = idx_max_eMBB[i, best_AP_col + next_best_AP_counter[i]]; # Shift to the next best AP\n\t\t\t\t\t\t\t\tnext_best_AP_counter[i] = next_best_AP_counter[i] + 1; # Point to the new next best AP\n\t\t\t\t\t\t\t\tAccepted_Users = Accepted_Users - counter; \n\t\t\t\t\t\t\t\tcounter = 0;\n\t\t\t\t\t\t\t\tj = j - 1;\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\telif j >= num_SCBS:\n\t\t\t\t\t\t\tif (bcap_mc[j - num_SCBS,0] > bw_per_user*np.log2(1+10**(assoc_vec[i]/10))) & (access_bw[j,0] >= 0):\n\t\t\t\t\t\t\t\tTot_Datarate = Tot_Datarate + bw_per_user*np.log2(1+10**(assoc_vec[i]/10)); # Update the Total network throughput\n\t\t\t\t\t\t\t\taccess_bw[j,0] = access_bw[j,0] - bw_per_user; # Update the available access bw \n\t\t\t\t\t\t\t\t#print access_bw \n\t\t\t\t\t\t\t\tbcap_mc[j - num_SCBS,0] = bcap_mc[j - num_SCBS,0] - bw_per_user*np.log2(1+10**(assoc_vec[i]/10)); # Update the available bhaul capacity\n\t\t\t\t\t\t\t\tAccepted_Users = Accepted_Users + 1; # Increment the number of users that have been accepted into the system\n\t\t\t\t\t\t\t\tcounter = counter + 1;\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tidx_max_eMBB[i,best_AP_col] = idx_max_eMBB[i, best_AP_col + next_best_AP_counter[i]]; # Shift to the next best AP\n\t\t\t\t\t\t\t\tnext_best_AP_counter[i] = next_best_AP_counter[i] + 1; # Point to the new next best AP\n\t\t\t\t\t\t\t\tAccepted_Users = Accepted_Users - counter; \n\t\t\t\t\t\t\t\tcounter = 0;\n\t\t\t\t\t\t\t\tj = j - 1;\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\telse:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\n\t\t\telse:\n\t\t\t\tcontinue\n\t\t\t\t\n\n\tif flag == 1:\n\t\tfor j in range(assoc_vec_idx.shape[0]):\n\t\t\t#print (\"j =\",j) \n\t\t\tif assoc_vec_idx[j] > 0:\t\t\t\n\t\t\t\tbw_per_user = access_bw[j]/assoc_vec_idx[j]; \n\t\t\t\tcounter = 0;\n\t\t\t\tfor i in range(sinr_eMBB.shape[0]):\n\t\t\t\t\t#print (\"i =\", i) \n\t\t\t\t\tif idx_max_eMBB[i,0] == j:\n\t\t\t\t\t\tif bw_per_user*np.log2(1+10**(assoc_vec[i]/10)) >= scn.eMBB_minrate:\n\t\t\t\t\t\t\tif (j < num_SCBS):\n\t\t\t\t\t\t\t\tif (bcap_sc[j,0] > bw_per_user*np.log2(1+10**(assoc_vec[i]/10))) & (access_bw[j,0] >= 0):\n\t\t\t\t\t\t\t\t\tTot_Datarate = Tot_Datarate + bw_per_user*np.log2(1+10**(assoc_vec[i]/10)); # Update the Total network throughput\n\t\t\t\t\t\t\t\t\taccess_bw[j,0] = access_bw[j,0] - bw_per_user; # Update the available access bw \n\t\t\t\t\t\t\t\t\tbcap_sc[j,0] = bcap_sc[j,0] - bw_per_user*np.log2(1+10**(assoc_vec[i]/10)); # Update the available bhaul capacity\n\t\t\t\t\t\t\t\t\tAccepted_Users = Accepted_Users + 1; # Increment the number of users that have been accepted into the system\n\t\t\t\t\t\t\t\t\tcounter = counter + 1;\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tidx_max_eMBB[i,best_AP_col] = idx_max_eMBB[i, best_AP_col + next_best_AP_counter[i]]; # Shift to the next best AP\n\t\t\t\t\t\t\t\t\tnext_best_AP_counter[i] = next_best_AP_counter[i] + 1; # Point to the new next best AP\n\t\t\t\t\t\t\t\t\tAccepted_Users = Accepted_Users - counter; \n\t\t\t\t\t\t\t\t\tcounter = 0;\n\t\t\t\t\t\t\t\t\tj = j - 1;\n\t\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\telif j >= num_SCBS:\n\t\t\t\t\t\t\t\tif (bcap_mc[j - num_SCBS,0] > bw_per_user*np.log2(1+10**(assoc_vec[i]/10))) & (access_bw[j,0] >= 0):\n\t\t\t\t\t\t\t\t\tTot_Datarate = Tot_Datarate + bw_per_user*np.log2(1+10**(assoc_vec[i]/10)); # Update the Total network throughput\n\t\t\t\t\t\t\t\t\taccess_bw[j,0] = access_bw[j,0] - bw_per_user; # Update the available access bw \n\t\t\t\t\t\t\t\t\t#print access_bw \n\t\t\t\t\t\t\t\t\tbcap_mc[j - num_SCBS,0] = bcap_mc[j - num_SCBS,0] - bw_per_user*np.log2(1+10**(assoc_vec[i]/10)); # Update the available bhaul capacity\n\t\t\t\t\t\t\t\t\tAccepted_Users = Accepted_Users + 1; # Increment the number of users that have been accepted into the system\n\t\t\t\t\t\t\t\t\tcounter = counter + 1;\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tidx_max_eMBB[i,best_AP_col] = idx_max_eMBB[i, best_AP_col + next_best_AP_counter[i]]; # Shift to the next best AP\n\t\t\t\t\t\t\t\t\tnext_best_AP_counter[i] = next_best_AP_counter[i] + 1; # Point to the new next best AP\n\t\t\t\t\t\t\t\t\tAccepted_Users = Accepted_Users - counter; \n\t\t\t\t\t\t\t\t\tcounter = 0;\n\t\t\t\t\t\t\t\t\tj = j - 1;\n\t\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\telse:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\n\t\t\telse:\n\t\t\t\tcontinue\n\t\t\t\t\n\n\t#for i in range(sinr_eMBB.shape[0]):\n\t#\tfor j in range(sinr_eMBB.shape[1]):\n\t#\t\tif Data_rate_sinr_eMBB_scbw[i,j] >= scn.eMBB_minrate:\n\t#\t\t\tif idx_max_eMBB[i,j] <= num_SCBS: # If its a Small Cell\n\t#\t\t\t\tif access_bw[idx_max_eMBB[i,j],0] >= scn.usr_scbw and bcap_sc[idx_max_eMBB[i,j],0] >= scn.eMBB_minrate:\n\t#\t\t\t\t\tTot_Datarate = Tot_Datarate + Data_rate_sinr_eMBB_scbw[i,j]; # Update the Total network throughput\n\t#\t\t\t\t\taccess_bw[idx_max_eMBB[i,j],0] = access_bw[idx_max_eMBB[i,j],0] - scn.usr_scbw; # Update the available access bw \n\t#\t\t\t\t\tbcap_sc[idx_max_eMBB[i,j],0] = bcap_sc[idx_max_eMBB[i,j],0] - Data_rate_sinr_eMBB_scbw[i,j]; # Update the available bhaul capacity\n\t#\t\t\t\t\tAccepted_Users = Accepted_Users + 1; # Increment the number of users that have been accepted into the system\n\t#\t\t\t \telse:\n\t#\t\t\t \t\tpass\n\t#\t\t\telif idx_max_eMBB[i,j] > num_SCBS: # If its a macro cell\n\t#\t\t\t\tif access_bw[idx_max_eMBB[i,j],0] >= scn.mc_bw and bcap_mc[idx_max_eMBB[i,j] - num_SCBS,0] >= scn.eMBB_minrate:\n\t#\t\t\t\t\tTot_Datarate = Tot_Datarate + Data_rate_sinr_eMBB_scbw[i,j]; # Update the Total network throughput\n\t#\t\t\t\t\taccess_bw[idx_max_eMBB[i,j],0] = access_bw[idx_max_eMBB[i,j],0] - scn.mc_bw; # Update the Available access bw\n\t#\t\t\t\t\tbcap_mc[idx_max_eMBB[i,j] - num_SCBS,0] = bcap_mc[idx_max_eMBB[i,j] - num_SCBS,0] - Data_rate_sinr_eMBB_scbw[i,j]; \n\t#\t\t\t\t\tAccepted_Users = Accepted_Users + 1; # Increment the number of users that have been accepted into the system \n\t#\t\t\tbreak\n\t#\t\telse:\n\t#\t\t\tcontinue\n\t\t#print \"Total Datarate:\" \n\t\t#print Tot_Datarate\n\t\t#print \"================\"\n\t\t#print \"Accepted-Users:\"\n\t\t#print Accepted_Users\n\t# access_bw >= scn.usr_scbw and ((bcap_sc >= scn.eMBB_minrate and idx_max_eMBB[i,j]<=num_SCBS) or (bcap_mc >= scn.eMBB_minrate and idx_max_eMBB[i,j]>num_SCBS)):\n\t\t\t\t\n\tprint \"Generated the Baseline Data\"\n\n\t#np.savetxt('Avguser_DR.csv',Tot_Datarate/Accepted_Users, delimiter =\",\")\n\t\n\t#return Data_Rate_eMBB_scbw, Data_Rate_eMBB_fscbw, Data_Rate_mMTC, Data_rate_sinr_eMBB_scbw, Data_rate_sinr_eMBB_fscbw, Data_rate_sinr_mMTC\n\treturn Tot_Datarate, Accepted_Users\n\n\n\n",
"id": "578872",
"language": "Python",
"matching_score": 2.8881354331970215,
"max_stars_count": 1,
"path": "rssi_assoc.py"
},
{
"content": "#!/usr/bin/env python\n\n# =============================\n# Import the necessary binaries\n# =============================\n\nimport numpy as np\nimport scenario_gen\nimport dist_check as dsc \nimport plotter\nimport os, sys\nfrom scenario_var import scenario_var \nfrom argparse import ArgumentParser\nimport time\nimport csvsaver\n\n\n# ==================================\n# Capture the Command Line Arguments\n# ==================================\n\nparser = ArgumentParser(description = 'Scenario Generator Main Function'); # Initializing the class variable\nparser.add_argument('-iter', type = int, help = 'Iteration Number of the Simulation');\nparser.add_argument('-interf', type = int, help = 'Interference Limited Region Indicator')\nargs = parser.parse_args(); # Parse the Arguments\n\n# ==============================\n# Initialize the class variables\n# ==============================\nscn = scenario_var(); # Getting the class object\n\n# =====================================\n# Check Presence of Storage Directories\n# =====================================\n\npath = os.getcwd() + '/Data'; # This is the path we have to check for\nsubpath = os.getcwd() + '/Data/Temp'; # This is the subdirectory to store data \nlocpath = os.getcwd() + '/Data/loc'; # This is the subdirectory to store User, MC and SC location information. This should be used by any combination of Scenarios\nlcdata_flag = 1; # This flag indicates if location data exists or not\nif os.path.isdir(path):\n\tprint \"Directory to save data found\"\n\tprint \"----------------------------\"\n\tprint \"\"\n\tif os.path.isdir(locpath):\n\t\tif os.path.isfile(locpath+'/loc'+str(vars(args)['iter'])+'.npz'):\n\t\t\tprint \"Locations Directory and Data found\"\n\t\t\tprint \"-------------------------\"\n\t\t\tprint \"\"\n\t\t\tlcdata_flag = 1\n\t\telse:\n\t\t\tprint \"Locations directory found but Data missing\"\n\t\t\tprint \"------------------------------------------\"\n\t\t\tprint \"\"\n\t\t\tlcdata_flag = 0\n\telse:\n\t\tos.mkdir(locpath)\n\t\tprint \"Subdirectory Created\"\n\t\tprint \"--------------------\"\n\t\tprint \"\"\n\t\tlcdata_flag = 0\n\n\tif os.path.isdir(subpath):\n\t\tprint \"Subdirectory found\"\n\t\tprint \"------------------\"\n\t\tprint \"\"\n\telse: \n\t\tos.mkdir(subpath)\n\t\tprint \"Subdirectory Created\"\n\t\tprint \"--------------------\"\n\t\tprint \"\"\nelse:\n\tos.mkdir(path); # Create this directory \n\tos.mkdir(subpath); # Created the Subdirectory \n\tos.mkdir(locpath); #Created the User location Subdirectory\n\tprint \"Created the Directory to save data\"\n\tprint \"----------------------------------\"\n\tprint \"\"\n\tlcdata_flag = 0\t\n\n\n\n\n\n# ====================\n# Macro cell placement \n# ====================\ntry:\n\tif lcdata_flag == 0:\n\t\tmacro_cell_locations = scenario_gen.macro_cell(scn.simulation_area, scn.MCBS_intersite, np, dsc); # Get the macro cell locations\n\n\t\t#print macro_cell_locations\n\t\t#print \"===================\n\n\t\t# ===================================================================\n\t\t# Small cell placement and MCBS-SCBS Additional attributes generation\n\t\t# ===================================================================\n\n\t\tSCBS_per_MCBS = np.random.randint(3,10,size=macro_cell_locations.shape[0]); # Randomly choosing number of SCBS within an MCBS domain in the range 3 to 1\n\t\tSCBS_MCBS_assoc = np.zeros((sum(SCBS_per_MCBS),macro_cell_locations.shape[0]), dtype=int); # Create a MCBS and SCBS association matrix (distance based)\n\t\t#print sum(SCBS_per_MCBS)\n\t\tlocs_SCBS = np.empty([sum(SCBS_per_MCBS),2], dtype = int); # We create an empty list of numpy arrays\n\t\tl_idx = 0; # lower index for the association matrix \n\t\tu_idx = SCBS_per_MCBS[0]; # upper index for the association matrix\n\t\tfor i in range(0,macro_cell_locations.shape[0]):\n\t\t small_cell_locations = scenario_gen.small_cell(i, macro_cell_locations[i,:], scn.SCBS_intersite, SCBS_per_MCBS[i], scn.MCBS_intersite, np, dsc); #Get the small cell locations for each macro cell domain \n\t\t #print small_cell_locations\n\t\t locs_SCBS[l_idx:u_idx,:] = small_cell_locations; # Store the small cell locations in the list of numpy arrays\n\t\t SCBS_MCBS_assoc[l_idx:u_idx,i] = dsc.dist_calc(small_cell_locations,macro_cell_locations[i], 0, 0, '2d', np); # Insert ones in these indexes for the association matrix\n\t\t #print SCBS_MCBS_assoc[l_idx:u_idx,i]\n\t\t l_idx = l_idx + SCBS_per_MCBS[i]; # Update the lower index \n\t\t if i < (macro_cell_locations.shape[0]-1):\n\t\t u_idx = u_idx + SCBS_per_MCBS[i+1]; # Update the upper index\n\t\t #print locs_SCBS[:,:,i]\n\t\t #print \"===================\"\n\t\t#print SCBS_MCBS_assoc\n\n\t\t# ========================================================\n\t\t# Create the AP-Backhaul association for the scenario dump\n\t\t# ========================================================\n\n\t\tSC_wl_bh, SC_wrd_bh, MC_hops, SC_hops = scenario_gen.backhaul_dump(scn, SCBS_per_MCBS, macro_cell_locations, SCBS_MCBS_assoc, np); # We drop the backhaul into the scenario\n\t\tBH_capacity_SC = scenario_gen.backhaul_tput(SCBS_MCBS_assoc, SCBS_per_MCBS, SC_wl_bh, np, scn, dsc); # Also Calculate the# BH capacity vector\n\t\t#print BH_capacity_SC \n\n\t\t# ====================================\n\t\t# Dump the Users onto the scenario map \n\t\t# ====================================\n\n\t\tAP_locs = np.vstack((macro_cell_locations, locs_SCBS)); # All AP locations\n\t\tusr_locs,usr_apps_assoc = scenario_gen.user_dump(scn, SCBS_per_MCBS, macro_cell_locations.shape[0], AP_locs, np, dsc); # We also retrieve the user and applications association matrix\n\t\tgenerated_mMTC_locs = scenario_gen.mMTC_user_dump(scn,SCBS_per_MCBS,macro_cell_locations.shape[0],np); # Massive Machine Type User locations\n\t\tnum_mMTC_AP = scenario_gen.mMTC_user_selector(scn, np, generated_mMTC_locs, AP_locs, 0, dsc, 0); # We select the number of active mMTC devices in the scenario and cluster them with APs for BH consumption\n\t\t#mMTC_locs = scenario_gen.mMTC_user_dump(scn,SCBS_per_MCBS,macro_cell_locations.shape[0],np); # Massive Machine Type User locations\n\t\t#print usr_locs\n\t\tprint \"User and AP Dump completed\"\n\t\tnp.savez_compressed(os.getcwd()+'/Data/loc/loc'+str(vars(args)['iter'])+'.npz', SCBS_per_MCBS, macro_cell_locations, locs_SCBS, SC_wl_bh, SC_wrd_bh, MC_hops, SC_hops, BH_capacity_SC, SCBS_MCBS_assoc, num_mMTC_AP)\n\t\tnp.savez_compressed(os.getcwd()+'/Data/loc/loc_dct'+str(vars(args)['iter'])+'.npz', **usr_locs)\n\t\t#np.savez_compressed(os.getcwd()+'/Data/loc/usles_dct'+str(vars(args)['iter'])+'.npz', **usr_apps_assoc)\n\telse:\n\t\tpass\n\t# ======================================\n\t# Generate the SINR values for the users\n\t# ======================================\n\tLoc_Data = np.load(os.getcwd()+'/Data/loc/loc'+str(vars(args)['iter'])+'.npz') # Get the location data \n\tusr_locs = np.load(os.getcwd()+'/Data/loc/loc_dct'+str(vars(args)['iter'])+'.npz')\n\t#usr_apps_assoc = np.load(os.getcwd()+'/Data/loc/usles_dct'+str(vars(args)['iter'])+'.npz')\n\tSCBS_per_MCBS = Loc_Data['arr_0']\n\tmacro_cell_locations = Loc_Data['arr_1']\n\tlocs_SCBS = Loc_Data['arr_2']\n\tSC_wl_bh = Loc_Data['arr_3']\n\tSC_wrd_bh = Loc_Data['arr_4']\n\tMC_hops = Loc_Data['arr_5']\n\tSC_hops = Loc_Data['arr_6']\n\tBH_capacity_SC = Loc_Data['arr_7']\n\tSCBS_MCBS_assoc = Loc_Data['arr_8']\n\tnum_mMTC_AP = Loc_Data['arr_9']\n\t#sinr_sc_embb,locs_sc_ret, usr_lcs = scenario_gen.pathloss_tester(scn, np, dsc); # Testing the Pathloss function implementation\n\tfor i in range(0,len(usr_locs.keys())):\n\t\tprint \"Iteration #\" + str(i)\n\t\tprint \"=====================\"\n\t\tsinr_sorted, locs_sc_ret, usr_lcs, idx_sc, idx_mc, sinr_pad, num_SCBS, num_MCBS, num_MCBS_tot, RX_eMBB, l_nl = scenario_gen.sinr_gen (scn, sum(SCBS_per_MCBS), macro_cell_locations, np.asarray(locs_SCBS), usr_locs['user_locations'+str(i)], dsc, np, int(vars(args)['interf']) )\n\t\t#print sinr_sorted\n\t\tcsvsaver.csvsaver(sinr_sorted,[],\"SINR_rx1.csv\")\n\t\t\n\t\t#print sinr_sorted.shape \n\t\tsinr = dsc.reorganizer(sinr_sorted, idx_sc, idx_mc, num_SCBS, num_MCBS_tot, sinr_pad, np, scn); # We reorganize the SINR matrix for the optimization framework\n\t\t#print sinr\n\t\tcsvsaver.csvsaver(sinr,[],\"SINR_REOG.csv\")\n\t\t#print sinr.shape \n\t\tRX = dsc.reorganizer(RX_eMBB, idx_sc, idx_mc, num_SCBS, num_MCBS_tot, float('nan'), np, scn); # We reorganize the RX Power matrix for the Baseline framework\n\t\t# ================================\n\t\t# Create Compressed Variable Files\n\t\t\n\t\tnp.savez_compressed(os.getcwd()+'/Data/Temp/optim_var_'+ str(i) + str(vars(args)['iter']),sinr, usr_lcs, idx_sc, sinr_pad, num_SCBS, num_MCBS_tot, SC_wl_bh, SC_wrd_bh, MC_hops, SC_hops, BH_capacity_SC, RX, SCBS_per_MCBS, l_nl, allow_pickle = True); # Save these variables to be utilized by the optimizer\n\t\tnp.savez_compressed(os.getcwd()+'/Data/Temp/hmap_' + str(i) + str(vars(args)['iter']), usr_lcs, locs_SCBS, macro_cell_locations, SCBS_per_MCBS, SCBS_MCBS_assoc, l_nl, sinr) # Data necessary for heatmap is saved here\n\t\t#np.savez_compressed('/home/akshayjain/Desktop/Simulation/optim_var_1',sinr_sorted, usr_apps_assoc, usr_lcs, idx_sc, sinr_pad, num_SCBS, num_MCBS, SC_wl_bh, SC_wrd_bh, MC_hops, SC_hops, BH_capacity_SC); # Save these variables to be utilized by the optimizer\n\n\t\t# ===========================\n\t\t# Plotting and Proof Checking\n\n\t\t#plotter.plotter('dashline',locs_sc_ret,sinr_sc_embb,5,10,1,45,0,0,1,'major','both', 'yes', 'SNR profile of Small Cell', np)\n\t\t#plotter.plotter('heatmap',sinr,locs_sc_ret,5,10,1,45,0,0,1,'major','both', 'yes', 'SNR profile of Small Cell', np)\n\tnp.savez_compressed(os.getcwd()+'/Data/Temp/optim_var_mMTC'+ str(vars(args)['iter']), num_mMTC_AP, allow_pickle = True); # Save these variables to be utilized by the optimizer\n\t#sinr_sorted_mMTC, locs_sc_ret_mMTC, usr_lcs_mMTC, idx_sc_mMTC, idx_mc_mMTC, sinr_pad_mMTC, num_SCBS_mMTC, num_MCBS_mMTC, num_MCBS_tot_mMTC, RX_mMTC = scenario_gen.sinr_gen (scn, sum(SCBS_per_MCBS), macro_cell_locations, np.asarray(locs_SCBS), mMTC_locs['user_locations'], dsc, np)\n\t#sinr_mMTC = dsc.reorganizer(sinr_sorted_mMTC, idx_sc_mMTC, idx_mc_mMTC, num_SCBS_mMTC, num_MCBS_tot_mMTC, sinr_pad_mMTC, np, scn); # We reorganize the SINR matrix for the optimization framework\n\t#RX_mMTC_reorg = dsc.reorganizer(RX_mMTC, idx_sc_mMTC, idx_mc_mMTC, num_SCBS_mMTC, num_MCBS_tot_mMTC, float('nan'), np, scn); # We reorganize the RX Power matrix for the Baseline framework\n\t#np.savez_compressed(os.getcwd()+'/Data/Temp/optim_var_mMTC'+ str(vars(args)['iter']),sinr_mMTC, usr_lcs_mMTC, idx_sc_mMTC, sinr_pad_mMTC, num_SCBS_mMTC, num_MCBS_tot_mMTC, SC_wl_bh, SC_wrd_bh, MC_hops, SC_hops, BH_capacity_SC, RX_mMTC_reorg, allow_pickle = True); # Save these variables to be utilized by the optimizer\n\t\nexcept KeyboardInterrupt:\n\tsys.exit(\"Exiting this process with Iteration Number\" + str(vars(args)['iter']))\n",
"id": "10651401",
"language": "Python",
"matching_score": 5.504446983337402,
"max_stars_count": 1,
"path": "main.py"
},
{
"content": "# ==> This file enables the scenario generation process for the network to be analyzed.\n\n# ==============================\n# Import the necessary libraries\n# ==============================\n\n#import dist_check\nimport pathloss\nimport copy\nimport csv\nimport csvsaver\nimport sectbeam\nimport pdb\n#from multiprocessing import Process\n# ===================================================\n# Load/Generate the Macro Cell base station locations\n# ===================================================\n\ndef macro_cell(simulation_area,MCBS_intersite,np,dsc):\n \n # =====================================\n # We distribute the Macro BSs as a grid\n \n offset = MCBS_intersite/2; # Offset param\n locs_interim = np.arange(offset, np.sqrt(simulation_area).astype(int), MCBS_intersite); # Range of numbers from 0 to the end of the grid area, with intersite distance spacing \n #print locs_interim\n locs_MCBS = dsc.gridder(locs_interim,MCBS_intersite,np); # Calling a permutation function that generates the grid\n return locs_MCBS\n\n# ===================================================\n# Load/Generate the Small Cell base station locations\n# ===================================================\n\ndef small_cell(num, MCBS_locs, SCBS_intersite,SCBS_per_MCBS,MCBS_intersite,np,dsc):\n offset = MCBS_intersite/2; # Offset param\n while True:\t\n dist_from_MCBS = np.random.uniform(0,offset,(SCBS_per_MCBS,1))\n angular_disp = np.random.uniform(0,2*np.pi,(SCBS_per_MCBS,1))\n locs_SCBS_x = np.multiply(dist_from_MCBS,np.cos(angular_disp)) + MCBS_locs[0]\n locs_SCBS_y = np.multiply(dist_from_MCBS,np.sin(angular_disp)) + MCBS_locs[1]\n #print MCBS_locs\n #locs_SCBS_x = np.random.uniform(MCBS_locs[0] - offset,MCBS_locs[0] + offset,(SCBS_per_MCBS,1)); # Generating the X coordinate of the small cells for a given macro cell\n #locs_SCBS_y = np.random.uniform(MCBS_locs[1] - offset,MCBS_locs[1] + offset,(SCBS_per_MCBS,1)); # Generating the Y coordinate of the small cells for a given macro cell\n locs_SCBS = np.concatenate((locs_SCBS_x, locs_SCBS_y), axis=1); \n #print locs_SCBS\n if dsc.checker(locs_SCBS,SCBS_intersite,np)==1 and dsc.locs_checker(locs_SCBS, MCBS_locs,np, 'sc')==1:\n break\n return locs_SCBS\n\n# ================================\n# Load/Generate the User locations\n# ================================\n\ndef user_dump(scn, SCBS_per_MCBS, num_MCBS, AP_locs, np, dsc):\n\n # =============================================================\n # Compute total users and total applications in Simulation area\n while True:\n tot_users_scenario = np.arange(scn.num_users_min, scn.num_users_max, scn.user_steps_siml, dtype='int'); # Adding the users list for simulation \n #print tot_users_scenario\n #tot_dev_eMBB = (sum(SCBS_per_MCBS)+num_MCBS)*scn.UE_density_eMBB; # Total eMBB devices in the scenario\n #tot_dev_URLLC = scn.UE_density_URLLC*scn.simulation_area; # Total URLLC devices in the scenario\n #tot_dev_mMTC = scn.UE_density_mMTC*num_MCBS; # Total mMTC devices in the scenario\n \n # =======================================================\n # Generate User locations and User-App Association Matrix\n\n usr_locs = {}; # We establish an empty dictionary\n assoc_usapp = {}; # We establish an empty dictionary for USER and APPs association\n attr_name_usr = 'user_locations'; # Attribute name\n attr_name_assoc = 'user_app'; # Attribute name for the USER-APPs association matrix (eMBB)\n for i in range(0,tot_users_scenario.shape[0]):\n usr_locs[attr_name_usr + str(i)] = np.random.uniform(0,np.sqrt(scn.simulation_area),(tot_users_scenario[i],2)); # Generate User locations\n if dsc.locs_checker(usr_locs[attr_name_usr + str(i)], AP_locs,np,'user')==0:\n i = i - 1; # We go back and start the for loop from the current instance\n continue\n #assoc_usapp[attr_name_assoc + str(i)] = np.random.randint(2, size = (tot_users_scenario[i], scn.max_num_appl_UE)); # Generate User-App Association \n assoc_usapp[attr_name_assoc + str(i)] = np.ones((tot_users_scenario[i], scn.max_num_appl_UE), dtype = int); # Generate User-App Association \n with open(\"ActualUsers.csv\",'wb') as f:\n \t\tw = csv.DictWriter(f,assoc_usapp.keys())\n \t\tw.writeheader()\n \t\tw.writerow(assoc_usapp)\n return usr_locs, assoc_usapp\n \n\t#usr_locs_eMBB = np.random.uniform(0,np.sqrt(scn.simulation_area),(tot_dev_eMBB,2)); # We obtain a set of eMBB locations\n #usr_locs_URLLC = np.random.uniform(0,np.sqrt(scn.simulation_area),(int(tot_dev_URLLC),2)); # We obtain a set of URLLC locations\n #usr_locs_mMTC = np.random.uniform(0,np.sqrt(scn.simulation_area),(tot_dev_mMTC,2)); # We obtain a set of mMTC locations\n #return usr_locs_eMBB, usr_locs_URLLC, usr_locs_mMTC; # Return the locations of these applications/users with these applications\n\ndef mMTC_user_dump(scn, SCBS_per_MCBS, num_MCBS, np):\n\n # =============================================================\n # Compute total users and total applications in Simulation area\n #print tot_users_scenario\n #tot_dev_eMBB = (sum(SCBS_per_MCBS)+num_MCBS)*scn.UE_density_eMBB; # Total eMBB devices in the scenario\n #tot_dev_URLLC = scn.UE_density_URLLC*scn.simulation_area; # Total URLLC devices in the scenario\n tot_dev_mMTC = scn.UE_density_mMTC*num_MCBS; # Total mMTC devices in the scenario\n \n # =======================================================\n # Generate User locations and User-App Association Matrix\n\n usr_locs = {}; # We establish an empty dictionary\n #assoc_usapp = {}; # We establish an empty dictionary for USER and APPs association\n attr_name_usr = 'user_locations'; # Attribute name\n #attr_name_assoc = 'user_app'; # Attribute name for the USER-APPs association matrix (eMBB)\n usr_locs[attr_name_usr] = np.random.uniform(0,np.sqrt(scn.simulation_area),(tot_dev_mMTC,2)); # Generate User locations\n #assoc_usapp[attr_name_assoc + str(i)] = np.random.randint(2, size = (tot_dev_mMTC, scn.max_num_appl_UE)); # Generate User-App Association \n return usr_locs\n\n# ===========================\n# mMTC User Location Selector\n# ===========================\n\ndef mMTC_user_selector(scn, np, mmtc_usr_lcs, AP_locs, gencase, dsc, percentage):\n\n if gencase == 0: # Gencase = 0 indicates that we use an uniform distribution to select the user locations\n\n #print (\"Total mMTC devices:\", mmtc_usr_lcs['user_locations'].shape)\n num_mMTC = np.random.randint(0,mmtc_usr_lcs['user_locations'].shape[0]); # First we select the number of users from the total possible users using the Uniform distribution\n #num_mMTC = 20000\n #print (\"Number of mMTC:\", num_mMTC)\n mMTC_selected_idx = np.random.randint(0,mmtc_usr_lcs['user_locations'].shape[0],(num_mMTC,1)); # We select the indices of the mMTC devices\n #print (\"Selected Indices:\", mMTC_selected_idx)\n #selected_mmtc = np.take(mmtc_usr_lcs['user_locations'] ,mMTC_selected_idx, axis = 1); # THis is the variable that will store the chosen mMTC device locations\n selected_mmtc = np.empty((num_mMTC,2)); # THis is the variable that will store the chosen mMTC device locations\n for i in range(num_mMTC):\n selected_mmtc[i,:] = mmtc_usr_lcs['user_locations'][mMTC_selected_idx[i],:]; # Select Locations -- this is the inefficient way. Study np.take to understand the efficient way of getting user locations\n\n mMTC_AP_dist_mat = np.empty((num_mMTC, AP_locs.shape[0])) # Matrix that holds the distance between mMTC devices and APs\n for i in range(AP_locs.shape[0]):\n mMTC_AP_dist_mat[:,i] = dsc.dist_calc(selected_mmtc, AP_locs[i,:], 0, 0, '2d', np) # Calculate the distance of each mMTC device to each of the APs in the scenario\n\n mmtc_AP_asso = np.argmin(mMTC_AP_dist_mat, axis = 1); # We get the sorted matrix\n num_mMTC_AP = np.bincount(mmtc_AP_asso); # We get the number of users for a given AP \n print \"mMTC dump done\"\n\n elif gencase == 1: # Gencase = 1 indicates that we provision 5%-100% load per cell for mMTC devices\n\n mMTC_AP_dist_mat = np.empty((mmtc_usr_lcs.shape[0], AP_locs.shape[0])) # Matrix that holds the distance between mMTC devices and APs\n \n for i in range(AP_locs.shape[0]):\n mMTC_AP_dist_mat[:,i] = dsc.dist_calc(mmtc_usr_lcs, AP_locs[i,:], 0, 0, '2d', np) # Calculate the distance of each mMTC device to each of the APs in the scenario\n\n sorted_mMTC_arr = np.argmin(mMTC_AP_dist_mat, axis = 1)\n mMTC_assoc_dict = {}; # We have an empty dictionary to hold the AP and mMTC user association\n mMTC_assoc_dict['APasso'] = np.sort(sorted_mMTC_arr) # Store the sorted AP association array\n mMTC_assoc_dict['SortAsso'] = np.argsort(sorted_mMTC_arr) # Sort the association vector based on AP ID and store the indices of sorting \n mMTC_assoc_dict['Count'] = np.bincount(sorted_mMTC_arr) # Store the bincount for each AP\n\n num_users_perbin = np.floor(mMTC_assoc_dict['Count']*percentage/100) # We get the vector for the number of mMTC devices active per AP\n #for i in range() ==> Continue this when the basic case (gencase 0) is done\n \n #return selected_mmtc, mmtc_AP_asso, num_mMTC_AP\n return num_mMTC_AP \n\n# =============================\n# Generate the backhaul network\n# =============================\n\ndef backhaul_dump(scn, SCBS_per_MCBS, MCBS_locs, assoc_mat, np):\n\n # =====================================================================================================\n # We create the wired and wireless backhaul matrix (Restricting it to just one backhaul link currently)\n\n mat_wlbh_sc = np.where(assoc_mat != 0, (assoc_mat <= scn.wl_bh_bp)*1, 0); # Wireless backhaul enabled small cells\n #print mat_wlbh_sc\n mat_wrdbh_sc = (assoc_mat > scn.wl_bh_bp)*1; # Wired backhaul enabled small cells\n MC_hops = np.random.randint(scn.min_num_hops, scn.max_num_hops,size = MCBS_locs.shape[0]); # The macro cells always have wired backhaul (Local breakouts can be added later)\n SC_hops = ((assoc_mat > 0)*1)*np.transpose(MC_hops) + 1; # The number of hops for each small cells to the IMS core\n return mat_wlbh_sc, mat_wrdbh_sc, MC_hops, SC_hops # Return the hops and wired/wireless backhaul configuration \n\n# ===============================\n# SINR Calculator per Application\n# ===============================\n\ndef sinr_gen (scn, num_SCBS, mc_locs, sc_locs, usr_lcs, dsc, np, inter_limit_flag): # Generates the SINR per application \n #print tau_flag\n # ======================================================\n # First the distances to the base stations is calculated\n\n # ==> We declare a set of empty arrays\n dist_serv_cell = np.empty([usr_lcs.shape[0],mc_locs.shape[0]]);\n\n #dist_serv_cell_eMBB = np.empty([usr_locs_eMBB.shape[0],mc_locs.shape[0]]);\n #dist_serv_cell_URLLC = np.empty([usr_locs_URLLC.shape[0],mc_locs.shape[0]]);\n #dist_serv_cell_mMTC = np.empty([usr_locs_mMTC.shape[0],mc_locs.shape[0]]); \n\n dist_serv_cell_3d = np.empty([usr_lcs.shape[0],mc_locs.shape[0]]);\n \n #dist_serv_cell_eMBB_3d = np.empty([usr_locs_eMBB.shape[0],mc_locs.shape[0]]);\n #dist_serv_cell_URLLC_3d = np.empty([usr_locs_URLLC.shape[0],mc_locs.shape[0]]);\n #dist_serv_cell_mMTC_3d = np.empty([usr_locs_mMTC.shape[0],mc_locs.shape[0]]); \n\n for i in range(0,mc_locs.shape[0]): # Distance to all MC cells\n\n # ==> 2D distance calculation \n \n dist_serv_cell[:,i] = dsc.dist_calc(usr_lcs, mc_locs[i,:], 0, 0, '2d', np); # Calculate the distance of each eMBB application location with each MC and sort them\n \n #dist_serv_cell_eMBB[:,i] = dsc.dist_calc(usr_locs_eMBB, mc_locs[i,:], 0, 0, '2d', np); # Calculate the distance of each eMBB application location with each MC and sort them\n #dist_serv_cell_URLLC[:,i] = dsc.dist_calc(usr_locs_URLLC, mc_locs[i,:], 0, 0, '2d', np); # Calculate the distance of each URLLC application location with each MC and sort them\n #dist_serv_cell_mMTC[:,i] = dsc.dist_calc(usr_locs_mMTC, mc_locs[i,:], 0, 0,'2d', np); # Calculate the distance of each mMTC application location with each MC and sort them\n \n # ==> 3D distance calculation\n \n dist_serv_cell_3d[:,i] = dsc.dist_calc(usr_lcs, mc_locs[i,:], scn.bs_ht_mc, scn.usr_ht, '3d', np); # Calculate the distance of each eMBB application location with each MC and sort them\n \n\n #dist_serv_cell_eMBB_3d[:,i] = dsc.dist_calc(usr_locs_eMBB, mc_locs[i,:], scn.bs_ht_mc, scn.usr_ht, '3d', np); # Calculate the distance of each eMBB application location with each MC and sort them\n #dist_serv_cell_URLLC_3d[:,i] = dsc.dist_calc(usr_locs_URLLC, mc_locs[i,:], scn.bs_ht_mc, scn.usr_ht, '3d', np); # Calculate the distance of each URLLC application location with each MC and sort them\n #dist_serv_cell_mMTC_3d[:,i] = dsc.dist_calc(usr_locs_mMTC, mc_locs[i,:], scn.bs_ht_mc, scn.usr_ht,'3d', np); # Calculate the distance of each mMTC application location with each MC and sort them\n\n\n # ==> We declare empty arrays first\n #print sc_locs.shape\n \n dist_serv_sc = np.empty([usr_lcs.shape[0],num_SCBS]);\n \n #dist_serv_sc_eMBB = np.empty([usr_locs_eMBB.shape[0],num_SCBS]);\n #dist_serv_sc_URLLC = np.empty([usr_locs_URLLC.shape[0],num_SCBS]);\n #dist_serv_sc_mMTC = np.empty([usr_locs_mMTC.shape[0],num_SCBS]); \n\n dist_serv_sc_3d = np.empty([usr_lcs.shape[0],num_SCBS]);\n \n #dist_serv_sc_eMBB_3d = np.empty([usr_locs_eMBB.shape[0],num_SCBS]);\n #dist_serv_sc_URLLC_3d = np.empty([usr_locs_URLLC.shape[0],num_SCBS]);\n #dist_serv_sc_mMTC_3d = np.empty([usr_locs_mMTC.shape[0],num_SCBS]); \n\n for i in range(0,num_SCBS): # Distance to all small cells\n \n # ==> 2D distance calulation\n dist_serv_sc[:,i] = dsc.dist_calc(usr_lcs, sc_locs[i,:], 0, 0,'2d', np); # Distance of each eMBB application location with each SC\n #print dist_serv_sc[:,i]\n #dist_serv_sc_eMBB[:,i] = dsc.dist_calc(usr_locs_eMBB, sc_locs[i,:], 0, 0,'2d', np); # Distance of each eMBB application location with each SC\n #dist_serv_sc_URLLC[:,i] = dsc.dist_calc(usr_locs_URLLC, sc_locs[i,:], 0, 0,'2d', np); # Distance of each URLLC application location with each SC\n #dist_serv_sc_mMTC[:,i] = dsc.dist_calc(usr_locs_mMTC, sc_locs[i,:], 0, 0,'2d', np); # Distance of each mMTC application location with each SC\n\n # ==> 3D distance calculation\n dist_serv_sc_3d[:,i] = dsc.dist_calc(usr_lcs, sc_locs[i,:], scn.bs_ht_sc, scn.usr_ht, '3d', np); # Calculate the distance of each eMBB application location with each SC and sort them\n \n #dist_serv_sc_eMBB_3d[:,i] = dsc.dist_calc(usr_locs_eMBB, sc_locs[i,:], scn.bs_ht_sc, scn.usr_ht, '3d', np); # Calculate the distance of each eMBB application location with each MC and sort them\n #dist_serv_sc_URLLC_3d[:,i] = dsc.dist_calc(usr_locs_URLLC, sc_locs[i,:], scn.bs_ht_sc, scn.usr_ht, '3d', np); # Calculate the distance of each URLLC application location with each MC and sort them\n #dist_serv_sc_mMTC_3d[:,i] = dsc.dist_calc(usr_locs_mMTC, sc_locs[i,:], scn.bs_ht_sc, scn.usr_ht,'3d', np); # Calculate the distance of each mMTC application location with each MC and sort them\n\n print \"Finished Distance calculation\"\n # ======================================================\n # Limit the number of MC and SC for the SINR calculation\n\n #pdb.set_trace()\n \n # ==> eMBB users\n if inter_limit_flag == 1: # Interference limited scenario with no sectoring employed\n print \"=================================================\"\n print \"==========Interference Limited Regime============\"\n print \"=================================================\"\n\n num_MCBS_SINR = 4; # We choose the 4 closest MCs for the SINR calculation \n dist_SCBS_SINR = 100; # We choose the range of the farthest SC that will impact SINR calculation for a user to be 200 meters\n sorted_MCBS_mat, idx_MCBS_SINR = dsc.idx_mat(dist_serv_cell, num_MCBS_SINR,'minimum',np); # Distance based sorted matrix and index of the MCBS under consideration for the PL calculation\n sorted_SCBS_mat, idx_SCBS_SINR = dsc.idx_mat(dist_serv_sc, dist_SCBS_SINR, 'distance', np); # Distance based sorted matrix and index of the SCBS under consideration for the PL calculation\n #print sorted_MCBS_mat.shape\n #print sorted_SCBS_mat.shape\n #print idx_MCBS_SINR\n #print \"============\"\n #print idx_SCBS_SINR\n \n # ====================\n # Pathloss Calculation\n\n # Note: This part can be optimized even more -- Potential Compute time reduction\n\n # ==> For Small Cell \n\n print \"Initiating Pathloss Calculation for Small Cells\"\n\n PL_sc = np.empty((sorted_SCBS_mat.shape[0],sorted_SCBS_mat.shape[1])); # Initializing the Pathloss matrix \n l_nl = np.zeros((usr_lcs.shape[0], num_SCBS + mc_locs.shape[0])); # This variable will hold the LOS-NLOS values for the user. \n for i in range(0,sorted_SCBS_mat.shape[0]):\n for j in range(0,sorted_SCBS_mat.shape[1]):\n #print dist_serv_sc_3d[i][j]\n if sorted_SCBS_mat[i][j] != 0:\n PL_sc[i,j], l_nl[i,j] = pathloss.pathloss_CI(scn, sorted_SCBS_mat[i][j], np, dist_serv_sc_3d[i][int(idx_SCBS_SINR[i,j])], dsc, 1); # Calculating the pathloss for Small cells\n #snr_sc[i][j] = scn.transmit_power + scn.transmit_gain_sc + scn.receiver_gain - PL_sc - (scn.N + 10*np.log10(scn.sc_bw)); # This is the SNR from one Small cell \n else:\n PL_sc[i,j] = float('nan'); # Nan for no PL calc\n\n # ==> For Macro Cell\n\n print \"Initiating Pathloss Calculation for Macro Cells\"\n\n PL_mc = np.empty((sorted_MCBS_mat.shape[0], sorted_MCBS_mat.shape[1])); # Initializing the Pathloss matrix\n for i in range(0, sorted_MCBS_mat.shape[0]):\n for j in range(0, sorted_MCBS_mat.shape[1]):\n PL_mc[i,j], l_nl[i, j+ num_SCBS] = pathloss.pathloss_CI(scn, sorted_MCBS_mat[i][j], np, dist_serv_cell_3d[i][int(idx_MCBS_SINR[i,j])], dsc, 0); # Calculating the pathloss for Macro cells\n \n csvsaver.csvsaver(l_nl,[],\"LosNlos.csv\")\n\n #print l_nl.shape \n #print np.sum(l_nl)\n # ========================\n # Interference Calculation\n\n \n print \"Performing Interference Calculation\"\n interf_sc = dsc.interf(PL_sc, scn, np, scn.transmit_power, scn.transmit_gain_sc, scn.receiver_gain); # Calculate the interference matrix for small cells\n interf_mc = dsc.interf(PL_mc, scn, np, scn.max_tnsmtpow_MCBS, scn.ant_gain_MCBS, scn.rx_mc_gain); # Calculate the interference matrix for macro cells. MCs and SCs work on different frequency bands and hence do not interfere with each other\n csvsaver.csvsaver(interf_sc,[],\"InterferenceSC.csv\")\n #print interf_sc[1,:]\n\n # ====================\n # Rx Power Computation\n\n print \"Performing Received Power Calculation\"\n\n RX_sc = np.empty((PL_sc.shape[0], PL_sc.shape[1]));\n RX_mc = np.empty((PL_mc.shape[0], PL_mc.shape[1]));\n\n for i in range(0, RX_sc.shape[0]): # Small cell Received Power\n for j in range(0, RX_sc.shape[1]):\n RX_sc[i,j] = np.where(np.isnan(PL_sc[i,j]) != True, 10*np.log10((10**(scn.transmit_power/10)*(10**(scn.transmit_gain_sc/10))*(10**(scn.receiver_gain/10)*10**(-3))/(10**(PL_sc[i,j]/10)))/(10**(scn.N/10)*scn.usr_scbw*10**(-3))), float('nan'));\n\n for i in range(0, RX_mc.shape[0]): # Macro cell Received Power\n for j in range(0, RX_mc.shape[1]):\n RX_mc[i,j] = 10*np.log10((10**(scn.max_tnsmtpow_MCBS/10)*(10**(scn.ant_gain_MCBS/10))*(10**(scn.rx_mc_gain/10)*10**(-3))/(10**(PL_mc[i,j]/10)))/(10**(scn.N/10)*scn.mc_bw*10**(-3)))\n\n # ================\n # SINR Calculation\n\n print \"Performing SINR Calculation for Small cells\"\n\n sinr_sc = np.empty((sorted_SCBS_mat.shape[0],sorted_SCBS_mat.shape[1])); # Initialize SINR array\n sinr_pad_value = 350; # This is a pad value to be padded at the end of the vectors \n #nz_idx = np.nonzero(PL_sc); # We store the non zero indices to extract the right SINR values for each user-AP pair\n \n for i in range(0,PL_sc.shape[0]):\n for j in range(0,PL_sc.shape[1]):\n sinr_sc[i,j] = np.where(np.isnan(PL_sc[i,j]) != True, 10*np.log10((10**(scn.transmit_power/10)*(10**(scn.transmit_gain_sc/10))*(10**(scn.receiver_gain/10)*10**(-3))/(10**(PL_sc[i,j]/10)))/(interf_sc[i,j] + 10**(scn.N/10)*scn.usr_scbw*10**(-3))), float('nan')); # We subtract the received power from other small cells to obtain the sinr \n # print sinr_sc[i,:] 10*np.log10((10**(scn.transmit_power/10)*(10**(scn.transmit_gain_sc/10))*(10**(scn.receiver_gain/10)*10**(-3))/(10**(PL_sc[i,j]/10)))/(interf_sc[i,j] + 10**(scn.N/10)*scn.sc_bw*10**(-3)))\n # (scn.transmit_power - 30 + scn.transmit_gain_sc + scn.receiver_gain - PL_sc[i,j] - 10*np.log10(interf_sc[i,j] + 10**(scn.N/10)*scn.sc_bw*10**(-3)))\n sinr_sc[i,:] = np.where(np.isnan(sinr_sc[i,:]), sinr_pad_value, sinr_sc[i,:]);\n #sinr_sc[i, np.where(np.isnan(sinr_sc[i,:]) == True )] = np.amin(np.where(np.isnan(sinr_sc[i,:]) != True )); # Replace the None values with the minimum of that row \n #print sinr_sc[i,:]\n \n csvsaver.csvsaver(sinr_sc,[],\"SINR_SC.csv\")\n print \"Performing SINR Calculation for Macro cells\"\n\n sinr_mc = np.empty((sorted_MCBS_mat.shape[0], sorted_MCBS_mat.shape[1])); # Initialize SINR matrix\n for i in range(0,PL_mc.shape[0]):\n for j in range(0, PL_mc.shape[1]):\n sinr_mc[i,j] = 10*np.log10((10**(scn.max_tnsmtpow_MCBS/10)*(10**(scn.ant_gain_MCBS/10))*(10**(scn.rx_mc_gain/10)*10**(-3))/(10**(PL_mc[i,j]/10)))/(interf_mc[i,j] + 10**(scn.N/10)*scn.mc_bw*10**(-3)))\n #print sinr_mc[i,:]\n print \"Finished All Calculations and Returning to main Function\"\n return np.hstack((sinr_sc,sinr_mc)), sorted_SCBS_mat, usr_lcs, idx_SCBS_SINR, idx_MCBS_SINR, sinr_pad_value, PL_sc.shape[1], PL_mc.shape[1], mc_locs.shape[0], np.hstack((RX_sc, RX_mc)), l_nl\n # The above calculation has to be optimally calculated for N users and M small cells. \n\n \n \n else:\n print \"===================================================\"\n print \"=========Sectorized and Beamformed Regime==========\"\n print \"===================================================\"\n \n \n num_MCBS_SINR = 4; # We choose the 4 closest MCs for the SINR calculation \n dist_SCBS_SINR = 100; # We choose the range of the farthest SC that will impact SINR calculation for a user to be 200 meters\n sorted_MCBS_mat, idx_MCBS_SINR = dsc.idx_mat(dist_serv_cell, num_MCBS_SINR,'minimum',np); # Distance based sorted matrix and index of the MCBS under consideration for the PL calculation\n sorted_SCBS_mat, idx_SCBS_SINR = dsc.idx_mat(dist_serv_sc, dist_SCBS_SINR, 'distance', np); # Distance based sorted matrix and index of the SCBS under consideration for the PL calculation\n\n #print idx_SCBS_SINR\n #print sorted_MCBS_mat.shape\n # ==> For Macro Cell\n\n print \"\\n Sectorizing Macro Cells and Computing Interferers\"\n \n sector_UEMCBS = sectbeam.MCBS_sectorizer(np, scn, mc_locs.shape[0], mc_locs, usr_lcs) # Computing the Sectorization Matrix to compute the Interference\n l_nl = np.zeros((usr_lcs.shape[0], num_SCBS + mc_locs.shape[0])); # This variable will hold the LOS-NLOS values for the user. \n \n print \"Initiating Pathloss and Interference Calculation for Macro Cells\"\n \n PL_mc = np.empty((sorted_MCBS_mat.shape[0], sorted_MCBS_mat.shape[1])); # Initializing the Pathloss matrix\n interf_mc = np.zeros((sorted_MCBS_mat.shape[0], sorted_MCBS_mat.shape[1])); # This is the matrix that will hold the interference values for each UE and significant TXs \n #interf_sect_data = np.zeros((sorted_MCBS_mat.shape[0], sorted_MCBS_mat.shape[1]), dtype = object)\n for i in range(sorted_MCBS_mat.shape[0]):\n interf_sect = [] # An empty list of indices which will store the list of other interfering cells\n for j in range(sorted_MCBS_mat.shape[1]):\n interf_sect = np.where(sector_UEMCBS[i,:] == sector_UEMCBS[i,idx_MCBS_SINR[i,j]])[0] # The interfering cells\n #print (\"SOmething Else:\", np.where(sector_UEMCBS[i,:] == sector_UEMCBS[i,idx_MCBS_SINR[i,j]]))\n # print (\"MCBS of Interest:\", idx_MCBS_SINR[i,j])\n # print (\"Sector of MCBS of Interest:\", sector_UEMCBS[i,idx_MCBS_SINR[i,j]])\n # print (\"Interfering APs:\", interf_sect)\n #interf_sect_data[i,j] = interf_sect\n #PL_mc[i,j], l_nl[i, j+ num_SCBS] = pathloss.pathloss_CI(scn, sorted_MCBS_mat[i][j], np, dist_serv_cell_3d[i][j], dsc, 0); # Calculating the pathloss for Macro cells\n PL_mc[i,j], l_nl[i, j+ num_SCBS] = pathloss.pathloss_CI(scn, sorted_MCBS_mat[i][j], np, dist_serv_cell_3d[i][int(idx_MCBS_SINR[i,j])], dsc, 0); # Calculating the pathloss for Macro cells\n temp = np.empty((len(interf_sect)-1)); # An empty numpy array to hold the pathloss of interfereing cells\n #print len(interf_sect)\n #print interf_sect[0].shape[0]\n idx_temp = 0;\n for k in range(len(interf_sect)):\n #print interf_sect[k]\n if interf_sect[k] != idx_MCBS_SINR[i,j]:\n #print (\"Interference Calculation using indexes:\", interf_sect[k])\n temp[idx_temp], dummy = pathloss.pathloss_CI(scn, dist_serv_cell[i][interf_sect[k]], np, dist_serv_cell_3d[i][interf_sect[k]], dsc, 0); # Calculate the pathloss from the similar sector antennas to the UE\n idx_temp = idx_temp + 1; # Increment the temp vector index\n #print temp\n interf_mc[i,j] = np.sum((10**(scn.max_tnsmtpow_MCBS/10)*(10**(scn.ant_gain_MCBS/10))*(10**(scn.rx_mc_gain/10)*10**(-3)))/(10**(temp/10))); # Interference for User i and AP j\n \n print \"Performing SINR Calculation for Macro cells\"\n #csvsaver.csvsaver(interf_sect_data,[],'Interfering Sectors Data')\n sinr_mc = np.empty((sorted_MCBS_mat.shape[0], sorted_MCBS_mat.shape[1])); # Initialize SINR matrix\n for i in range(0,PL_mc.shape[0]):\n for j in range(0, PL_mc.shape[1]):\n sinr_mc[i,j] = 10*np.log10((10**(scn.max_tnsmtpow_MCBS/10)*(10**(scn.ant_gain_MCBS/10))*(10**(scn.rx_mc_gain/10)*10**(-3))/(10**(PL_mc[i,j]/10)))/(interf_mc[i,j] + 10**(scn.N/10)*scn.mc_bw*10**(-3)))\n \n #print interf_mc\n #print sinr_mc\n #print \"===============\"\n\n\n # ==> For Small Cell \n\n print \"Initiating Pathloss Calculation for Small Cells\"\n\n PL_sc = np.empty((sorted_SCBS_mat.shape[0],sorted_SCBS_mat.shape[1])); # Initializing the Pathloss matrix \n beam_sc = np.empty((sorted_SCBS_mat.shape[0], sorted_SCBS_mat.shape[1])); # Intializing the Beam ID matrix\n \n l_nl = np.zeros((usr_lcs.shape[0], num_SCBS + mc_locs.shape[0])); # This variable will hold the LOS-NLOS values for the user. \n for i in range(0,sorted_SCBS_mat.shape[0]):\n for j in range(0,sorted_SCBS_mat.shape[1]):\n #print dist_serv_sc_3d[i][j]\n if sorted_SCBS_mat[i][j] != 0:\n PL_sc[i,j], l_nl[i,j] = pathloss.pathloss_CI(scn, sorted_SCBS_mat[i][j], np, dist_serv_sc_3d[i][int(idx_SCBS_SINR[i,j])], dsc, 1); # Calculating the pathloss for Small cells\n #snr_sc[i][j] = scn.transmit_power + scn.transmit_gain_sc + scn.receiver_gain - PL_sc - (scn.N + 10*np.log10(scn.sc_bw)); # This is the SNR from one Small cell \n else:\n PL_sc[i,j] = float('nan'); # Nan for no PL calc\n\n \n # ===> Computing the Interference\n\n #print \"Computing Receive and Transmit beamforming based angles\"\n print \"Entering the Approximate Small Cell Interference Computation\"\n\n interf_sc = np.zeros((sorted_SCBS_mat.shape[0], sorted_SCBS_mat.shape[1])) # We consider 0 interference to analyze our results\n\n glob_angle_sc_rx = np.zeros((sorted_SCBS_mat.shape[0], sorted_SCBS_mat.shape[1])); # Initializing the matrix to hold UE to AP angles\n #glob_angle_sc_tx = np.empty((sorted_SCBS_mat.shape[1], sorted_SCBS_mat.shape[0])); # THis is the TX angle matrix\n \n #print usr_lcs.shape\n #print sc_locs.shape\n print \"Computing the UE based sector and APs located in it\"\n\n for i in range(sorted_SCBS_mat.shape[0]):\n for j in range(sorted_SCBS_mat.shape[1]):\n #print idx_SCBS_SINR[i,j]\n #rint usr_lcs[i,:]\n if idx_SCBS_SINR[i,j] != 'None':\n #print \"Here\"\n glob_angle_sc_rx[i,j] = dsc.angsc(usr_lcs[i,:],sc_locs[int(idx_SCBS_SINR[i,j]),:],np,scn) # Angle calculator to determine if \n else:\n glob_angle_sc_rx[i,j] = float('Nan') # Nan for the APs beyond 200m radius\n #print glob_angle_sc_rx\n csvsaver.csvsaver(usr_lcs,[],\"UELOCS.csv\")\n csvsaver.csvsaver(glob_angle_sc_rx,[],\"SCAngles.csv\")\n csvsaver.csvsaver(sc_locs,[],\"SCLocs.csv\")\n csvsaver.csvsaver(idx_SCBS_SINR,[],\"SelectSCIDX.csv\")\n csvsaver.csvsaver(PL_sc,[],\"PL_sc.csv\")\n print \"Common Sector and Average Interference Computation\"\n\n for i in range(sorted_SCBS_mat.shape[0]):\n for j in range(sorted_SCBS_mat.shape[1]):\n ap_int_idx = j; # This is our AP of interest\n interf_ap_idx = np.where(glob_angle_sc_rx[i,:] == glob_angle_sc_rx[i,ap_int_idx])[0] # These are the indexes of the APs that will be interfering with the AP of interest\n #print interf_ap_idx\n #interf_sc[i,ap_int_idx] = np.sum((scn.beam_hpbw_tx/(360))*PL_sc[i,interf_ap_idx]) \n # (10**(tx_power/10)*(10**(gain/10))*(10**(scn.receiver_gain/10)*10**(-3)))/(10**(PL_temp/10))\n #print PL_sc[i,interf_ap_idx]\n interf_sc[i,ap_int_idx] = np.sum((10**(scn.transmit_power/10)*(10**(scn.transmit_gain_sc/10))*(10**(scn.receiver_gain/10)*10**(-3)))/(10**(PL_sc[i,interf_ap_idx]/10))) - (10**(scn.transmit_power/10)*(10**(scn.transmit_gain_sc/10))*(10**(scn.receiver_gain/10)*10**(-3)))/(10**(PL_sc[i,ap_int_idx]/10)) # We just use the calculated PL\n csvsaver.csvsaver(interf_sc,[],\"InterferenceSC.csv\")\n\n # ===> We try the SNR regime (Best Case solution with extreme directivity)\n\n #interf_sc = np.zeros((sorted_SCBS_mat.shape[0], sorted_SCBS_mat.shape[1])) # We consider 0 interference to analyze our results\n\n sinr_sc = np.empty((sorted_SCBS_mat.shape[0],sorted_SCBS_mat.shape[1])); # Initialize SINR array\n sinr_pad_value = 350; # This is a pad value to be padded at the end of the vectors \n #nz_idx = np.nonzero(PL_sc); # We store the non zero indices to extract the right SINR values for each user-AP pair\n \n for i in range(0,PL_sc.shape[0]):\n for j in range(0,PL_sc.shape[1]):\n sinr_sc[i,j] = np.where(np.isnan(PL_sc[i,j]) != True, 10*np.log10((10**(scn.transmit_power/10)*(10**(scn.transmit_gain_sc/10))*(10**(scn.receiver_gain/10)*10**(-3))/(10**(PL_sc[i,j]/10)))/(interf_sc[i,j] + 10**(scn.N/10)*scn.usr_scbw*10**(-3))), float('nan')); # We subtract the received power from other small cells to obtain the sinr \n # print sinr_sc[i,:] 10*np.log10((10**(scn.transmit_power/10)*(10**(scn.transmit_gain_sc/10))*(10**(scn.receiver_gain/10)*10**(-3))/(10**(PL_sc[i,j]/10)))/(interf_sc[i,j] + 10**(scn.N/10)*scn.sc_bw*10**(-3)))\n # (scn.transmit_power - 30 + scn.transmit_gain_sc + scn.receiver_gain - PL_sc[i,j] - 10*np.log10(interf_sc[i,j] + 10**(scn.N/10)*scn.sc_bw*10**(-3)))\n sinr_sc[i,:] = np.where(np.isnan(sinr_sc[i,:]), sinr_pad_value, sinr_sc[i,:]);\n #sinr_sc[i, np.where(np.isnan(sinr_sc[i,:]) == True )] = np.amin(np.where(np.isnan(sinr_sc[i,:]) != True )); # Replace the None values with the minimum of that row \n #print sinr_sc[i,:] \n\n csvsaver.csvsaver(sinr_sc,[],\"SINR_SC.csv\")\n #print sinr_sc.shape \n #print sinr_mc.shape \n # ====================\n # Rx Power Computation\n\n print \"Performing Received Power Calculation\"\n\n RX_sc = np.empty((PL_sc.shape[0], PL_sc.shape[1]));\n RX_mc = np.empty((PL_mc.shape[0], PL_mc.shape[1]));\n\n for i in range(0, RX_sc.shape[0]): # Small cell Received Power\n for j in range(0, RX_sc.shape[1]):\n RX_sc[i,j] = np.where(np.isnan(PL_sc[i,j]) != True, 10*np.log10((10**(scn.transmit_power/10)*(10**(scn.transmit_gain_sc/10))*(10**(scn.receiver_gain/10)*10**(-3))/(10**(PL_sc[i,j]/10)))/(10**(scn.N/10)*scn.usr_scbw*10**(-3))), float('nan'));\n\n for i in range(0, RX_mc.shape[0]): # Macro cell Received Power\n for j in range(0, RX_mc.shape[1]):\n RX_mc[i,j] = 10*np.log10((10**(scn.max_tnsmtpow_MCBS/10)*(10**(scn.ant_gain_MCBS/10))*(10**(scn.rx_mc_gain/10)*10**(-3))/(10**(PL_mc[i,j]/10)))/(10**(scn.N/10)*scn.mc_bw*10**(-3)))\n \n print \"Finished All Calculations and Returning to main Function\"\n return np.hstack((sinr_sc,sinr_mc)), sorted_SCBS_mat, usr_lcs, idx_SCBS_SINR, idx_MCBS_SINR, sinr_pad_value, PL_sc.shape[1], PL_mc.shape[1], mc_locs.shape[0], np.hstack((RX_sc, RX_mc)), l_nl\n\n\n\n\n# =========================\n# Pathloss function Checker\n# =========================\n\ndef pathloss_tester(scn,np,dsc): # This function helps to test the pathloss model implementation \n \n # ======================================\n # Generate the test UE and eNB locations\n\n ue_sim_x = np.arange(10,300,1).reshape(((300-10)/1,1)); # Generates the location of a single UE along the x axis\n ue_sim_y = np.zeros((1,ue_sim_x.shape[0]),dtype='int').reshape(((300-10)/1,1)); # The UE is moving along the x axis only\n eNB_loc = [min(ue_sim_x),min(ue_sim_y)]; # We place the eNB at the start point of the UE trajectory\n\n # ================================\n # Calculate the 2D and 3D distance\n\n test_dist_2d = dsc.dist_calc(np.concatenate((ue_sim_x,ue_sim_y),axis=1),eNB_loc, 0, 0,'2d',np); # Calculate the 2d distance\n test_dist_3d = dsc.dist_calc(np.concatenate((ue_sim_x,ue_sim_y),axis=1),eNB_loc, scn.usr_ht , scn.bs_ht_sc ,'3d',np); \n\n # ======================\n # Calculate the Pathloss\n\n PL_sc = np.empty((ue_sim_x.shape[0],1)); # Empty array\n for i in range(0,ue_sim_x.shape[0]):\n PL_sc[i] = pathloss.pathloss_CI(scn, test_dist_2d[i], np, test_dist_3d[i], dsc, 1); # Calculating the pathloss for Small cells\n \n # ================\n # SINR Calculation\n snr_sc = 10*np.log10((10**(scn.transmit_power/10)*(10**(scn.transmit_gain_sc/10))*(10**(scn.receiver_gain/10)*10**(-3))/(10**(PL_sc/10)))/(10**(scn.N/10)*scn.sc_bw*10**(-3))); # This is the SNR from one Small cell \n #snr_sc_1 = 10*np.log10((10**(55/10)*(10**(scn.transmit_gain_sc/10))*(10**(scn.receiver_gain/10)*10**(-3))/(10**(PL_sc/10)))/(10**(scn.N/10)*scn.sc_bw*10**(-3))); # This is the SNR from one Small cell \n prx_sc_others = 0; # This is the received power from other Small cells\n sinr_sc = snr_sc - prx_sc_others; # We subtract the received power from other small cells to obtain the sinr \n return sinr_sc, ue_sim_x, eNB_loc\n\n# =============================\n# Backhaul Reliability Estimate\n# =============================\n\ndef bh_reliability(scn, np, critical_time):\n\n # ==============================================\n # Establishing the Outage probability Parameters\n\n fade_margin = [-20,-15,-10,-5,0,5]; # We randomly choose a fade margin between -20 dB and 5dB for a BH link. It indicates the ratio of sensitivity to received power in dB. \n fmgn_selector = np.random.randint(0,6,1); # Fade Margin value selector\n K = 10; # We choose the rician shape parameter to be 10 based on Mona Jaber's paper \n f = (3e9/(38*1e9))*(73*1e9/(3*1e8)); # We compute the doppler shift at 73 GHz due to scattering objects between T-R pair using 10 Hz from Mona Jaber's paper as a baseline\n \n # ==================================\n # Compute the Expected Fade Duration\n\n rho = 10**(fade_margin[fmgn_selector]/10); # Convert from dB to real power\n exp_fade_dur_numr = np.array([5.0187*1e-8, 5.244*1e-7, 7.709*1e-6, 7.387*1e-4, 5.4309*1e-1, 1]); # Values from Octave for the Marcum Q Function\n fad_dur_bess_func = np.array([1.011,1.1131,2.4421, 1.2016*1e2, 1.1286*1e8, 3.1529*1e27]); # Values from Octave for the Bessel Function \n exp_fade_dur_deno = np.sqrt(2*np.pi*(K+1.0))*f*rho*np.exp(-K-(K+1.0)*np.power(rho,2))*fad_dur_bess_func[fmgn_selector]; \n exp_fade_dur = exp_fade_dur_numr[fmgn_selector]/exp_fade_dur_deno; # Expected value of the Fade duration \n outage_prob = exp((-1*critical_time)/exp_fade_dur); # Given the critical time for a given application we can compute the outage probability for the BS. \n\n# ===================\n# Backhaul Throughput\n# ===================\n\ndef backhaul_tput(assoc_mat, SCBS_per_MCBS, wl_mat, np, scn, dsc):\n\n # ==========================================================\n # We compute the throughput for the backhaul link of each SC\n\n #print wl_mat\n PL_SC_MC = np.empty((wl_mat.shape[0],1)); # Initialize the Pathloss matrix\n tput_SC = copy.copy(PL_SC_MC); # Initialize the Throughput matrix\n dist_SC_MC = copy.copy(PL_SC_MC); # Initialize the 3D distance matrix\n #print (\"Matrix Shape:\",dist_SC_MC.shape)\n #print (\"Association Matrix:\", assoc_mat.shape)\n #print assoc_mat\n\n # ===> Computing the 3D distance \n for k in range(0,assoc_mat.shape[0]):\n #print (\"K:\",k) \n #print (\"Wireless Matrix:\", wl_mat[k,:])\n #print (\"Association Matrix Values:\", assoc_mat[k,next((i for i, x in enumerate(wl_mat[k,:].tolist()) if x), None)])\n #print (\"Distance:\", np.sqrt(assoc_mat[k,next((i for i, x in enumerate(wl_mat[k,:].tolist()) if x), None)]**2 + (scn.bs_ht_mc-scn.bs_ht_sc)**2))\n if next((i for i, x in enumerate(wl_mat[k,:].tolist()) if x), None) != None:\n dist_SC_MC[k] = np.sqrt(assoc_mat[k,next((i for i, x in enumerate(wl_mat[k,:].tolist()) if x), None)]**2 + (scn.bs_ht_mc-scn.bs_ht_sc)**2); # The 3D distance from the MC for a given SC\n\n # ===> Computing the Pathloss for the Small Cells to the Macro cells\n\n for l in range(0, tput_SC.shape[0]):\n if next((i for i, x in enumerate(wl_mat[l,:].tolist()) if x), None) != None:\n #print assoc_mat[l,next((i for i, x in enumerate(assoc_mat[l,:].tolist()) if x), None)]\n PL_SC_MC[l], flg = pathloss.pathloss_CI(scn, assoc_mat[l,next((i for i, x in enumerate(wl_mat[l,:].tolist()) if x), None)], np, dist_SC_MC[l], dsc, 2); # Calculating the pathloss for Small cells to Macro Cells\n else:\n idx_BH_chx = np.random.randint(0,3); # BH Relaxation choice is added here\n PL_SC_MC[l] = 0; # This is the Fiber based backhaul\n tput_SC[l] = scn.fib_BH_capacity + scn.perct_incr[idx_BH_chx]*scn.avg_fib_BH_capacity; # Fiber backhaul capacity\n\n #print PL_SC_MC\n # ===> Computing the Throughput for the Small Cells to Macro Cells\n\n #interf_sc_mc = dsc.interf(PL_SC_MC, scn, np); # Calculate the interference matrix for small cells\n l_idx = 0; \n u_idx = SCBS_per_MCBS[0];\n #print SCBS_per_MCBS\n for j in range(0,tput_SC.shape[0]):\n if j < u_idx: \n tput_SC[j] = np.where(PL_SC_MC[j] != 0, (scn.sc_bw/SCBS_per_MCBS[l_idx])*np.log2(1+(10**(scn.transmit_power/10)*(10**(scn.transmit_gain_sc/10))*(10**(scn.ant_gain_MCBS/10)*10**(-3))/(10**(PL_SC_MC[j]/10)))/(10**(scn.N/10)*(scn.sc_bw/SCBS_per_MCBS[l_idx])*10**(-3))), tput_SC[j]); # We subtract the received power from other small cells to obtain the sinr \n else:\n l_idx = l_idx + 1; # Increment the lower index\n u_idx = u_idx + SCBS_per_MCBS[l_idx]; # Increment the \n tput_SC[j] = np.where(PL_SC_MC[j] != 0, (scn.sc_bw/SCBS_per_MCBS[l_idx])*np.log2(1+(10**(scn.transmit_power/10)*(10**(scn.transmit_gain_sc/10))*(10**(scn.ant_gain_MCBS/10)*10**(-3))/(10**(PL_SC_MC[j]/10)))/(10**(scn.N/10)*(scn.sc_bw/SCBS_per_MCBS[l_idx])*10**(-3))), tput_SC[j]); # We subtract the received power from other small cells to obtain the sinr \n return tput_SC\n\n",
"id": "3006658",
"language": "Python",
"matching_score": 7.445730686187744,
"max_stars_count": 1,
"path": "scenario_gen.py"
},
{
"content": "# ========================================\n# This file defines the Scenario Variables\n# ========================================\n\nclass scenario_var:\n\t\n\tc = 3e8; # Speed of light in m/s\n\tfc_mc = 3.55e9; # Carrier frequency of the MC (Qualcomm white paper)\n\tfc_sc = 27e9; # Carrier frequency of the SC\n\tfc_bh_sc = 73e9; # Carrier frequency for the wireless BH in SC\n\tusr_ht = 1.5; # User height\n\tbs_ht_sc = 10; # Small cell height\n\tbs_ht_mc = 25; # Macro cell height\n\tsimulation_area = 0.36*1e6; #The area is in square meters; The shape is also a square \n\tMCBS_intersite = 200; #Intersite distance for Macro BS\n\tSCBS_intersite = 20; #Intersite distance for Small cell BS\n\t#SCBS_per_MCBS = np.random.randint(3,10,size=1); # Create a set for number of small cells per macro BS\n\t#UE_density_eMBB = 10; #Number of eMBB devices per TRP (5GPPP white paper)\n\tUE_density_URLLC = (4000/1e6); #Number of URLLC devices per sq. m (5GPPP white paper)\n\tUE_density_mMTC = 24000; #Number of mMTC devices per Macro (5GPPP white paper)\n\t#UE_density_eMBB = 24000; #Number of URLLC UEs per Macro BS\n\tant_gain_MCBS = 17; # dBi gain [5G PPP doc]\n\tmax_tnsmtpow_MCBS = 49; # dBm gain per band (in 20 MHz) [5G PPP doc]\n\ttransmit_gain_sc = 30; # This value is in dBi [5G PPP doc]\n\treceiver_gain = 14; # This value is in dBi for UE-SC network [KTH Paper]\n\trx_mc_gain = 0; # This value is in dB for UE-MC network [5G PPP doc] \n\ttransmit_power = 23; # This value is in dBm [5G PPP doc]\n\t#mMTC_tnsmt_power = 20; # This value is in dBm \n\t#other_losses = 20; # This is in dB (due to cable losses, penetration, reflection, etc. )\n\tsc_bw = 1e9; # 1 GHz bandwidth for the small cells\n\tmc_bw = 20*1e6; # 20 MHz bandwidth for the UE on LTE macro cells\n\tN = -174; # This is the noise spectral density in dbm/Hz\n\tmin_num_hops = 1; # If a local breakout exists\n\tmax_num_hops = 4; # Maximum hops to the IMS core\n\twl_bh_bp = 0.25*MCBS_intersite; # This is the distance beyond which a wired backhaul should be used (Can be changed later to the specifications) \n\tnum_appl_types = 3; # We current have a broad category of 3 application types\n\tmax_num_appl_UE = 3; # Maximum number of applications on any given UE\n\tnum_users_min = 150; # Minimum number of users in the scenario\n\tnum_users_max = 300; # Maximum number of users in the scenario\n\tuser_steps_siml = 25; # For the simulation we increase users in steps of 50\n\teMBB_minrate = 1e8; # 100 mbps minimum required data rate for most eMBB applications\n\tfib_BH_capacity = 1e9; # 1Gbps of fibre backhaul capacity (Find a reference for this)\n\tfib_BH_MC_capacity = 10e9; # 10Gbps of fiber backhaul for MCs\n\twl_link_delay = 1*1e-3; # 1 ms link delay for the wireless link [Mona Jaber Paper] \n\twrd_link_delay = 1*1e-3; # 1-7 ms link delay for the wired link [Mona Jaber Paper]\n\teMBB_latency_req = 3*1e-3; # 3 ms link latency requirement for the eMBB applications\n\tavg_fib_BH_capacity = 1.379*1e9; # This is the average backhaul usage for SC\n\tavg_fib_BH_MC_capacity = 9.325*1e9; # This is the average backhaul usage for MC \n\tperct_incr = [0.1, 0.2, 0.5, 1.0]; # This is the various percentages of increment of BH \n\tMCMC_iter = 100; # Number of Monte Carlo Iterations\n\tnum_Subcarriers_MCBS = 1200; # LTE number of subcarriers\n\tnum_Subcarriers_SCBS = 3300; # 5G NR number of subcarriers \n\tusr_scbw = 2*1e8; # 100 MHz bandwidth per user \n\tmMTC_bw = 180*1e3; # Device Bandwidth (Guard Band Operations considered)\n\tmMTC_maxrate = 1e6; # Device data rate\n\teNB_bw = 80*1e6; # Bandwidth for the Macro Cells (Qualcomm Media Release)\n\tmMTC_maxrate = [1e3, 1e4]; # Device data rate\n\tBW_SC = [50*1e6, 100*1e6, 200*1e6]; # Small cell bandwidth values (All values are in MHz)\n\tBW_MC = [1.5*1e6, 3*1e6, 5*1e6, 10*1e6, 20*1e6]; # Macro cell bandwidth values (All values are in MHz)\n\tbeam_hpbw_rx = 45 # Assuming a HPBW of 45 degrees at the receiver (UE) [KTH Paper]\n\t#beam_hpbw_tx = 30 # Assuming a HPBW of 30 degrees at the transmitter (Small Cells)\n \t\n",
"id": "962853",
"language": "Python",
"matching_score": 0.5466541647911072,
"max_stars_count": 1,
"path": "scenario_var.py"
},
{
"content": "#!/usr/bin/env python \n\n# =============================\n# Import the Necessary Binaries\n# =============================\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport pandas as pd\nfrom scenario_var import scenario_var \nimport copy \nimport os, sys\nimport plotter\nimport zipfile\nimport csvsaver\n\n# =======================\n# Generate Class Variable\n# =======================\n\nscn = scenario_var(); # Getting the class object\n\n# ==================================\n# Initialize the Necessary Variables\n# ==================================\n\n\nMCMC_iter = scn.MCMC_iter; # Number of Iterations to be analyzed\nsimdata_path = os.path.dirname(os.getcwd()) + '/UserAssociation/Data/Process/'\nconstraint_fp = {'Baseline':'0000', 'DC':'1000', 'DC_MRT':'1100','DC_BHCAP':'1010', 'DC_BHCAP_LAT':'1011', 'DC_LAT':'1001', 'DC_MRT_LAT':'1101', 'SA_MRT':'0100','SA_BHCAP':'0010','SA_BHCAP_LAT':'0011','SA_LAT':'0001','SA_MRT_LAT':'0101', 'DC_MRT_BHCAP':'1110', 'DC_MRT_BHCAP_LAT':'1111', 'SA_MRT_BHCAP':'0110','SA_MRT_BHCAP_LAT':'0111'}\nnum_iter = ((scn.num_users_max - scn.num_users_min)/scn.user_steps_siml); \n\nrate_matrix_DC = []\nrate_matrix_DC_MRT = []\nrate_matrix_DC_MRT_LAT = []\nrate_matrix_DC_BHCAP_LAT = []\nrate_matrix_DC_BHCAP = []\nrate_matrix_DC_LAT = []\nrate_matrix_SA_MRT = []\nrate_matrix_SA_LAT = []\nrate_matrix_SA_BHCAP = []\nrate_matrix_SA_BHCAP_LAT = []\nrate_matrix_SA_MRT_LAT = []\nrate_matrix_SA = []\nrate_matrix_DC_MRT_BHCAP = []\nrate_matrix_DC_MRT_BHCAP_LAT = []\nrate_matrix_SA_MRT_BHCAP = []\nrate_matrix_SA_MRT_BHCAP_LAT = []\n\n\nbhutil_val_DC = []\nbhutil_val_DC_BHCAP = []\nbhutil_val_DC_BHCAP_LAT = []\nbhutil_val_DC_MRT_BHCAP = []\nbhutil_val_DC_MRT_BHCAP_LAT = []\n\nlatprov_DC_MRT_LAT = []\nlatprov_DC = []\nlatprov_DC_BHCAP_LAT = []\nlatprov_DC_MRT_BHCAP_LAT = []\nlatprov_DC_LAT = []\navail_bh = []\n\nNet_Throughput = np.empty((MCMC_iter, num_iter));\nNet_Throughput_DC = copy.deepcopy(Net_Throughput);\nNet_Throughput_DC_MRT = copy.deepcopy(Net_Throughput);\nNet_Throughput_DC_BHCAP = copy.deepcopy(Net_Throughput);\nNet_Throughput_DC_BHCAP_LAT = copy.deepcopy(Net_Throughput);\nNet_Throughput_DC_LAT = copy.deepcopy(Net_Throughput);\nNet_Throughput_SA_MRT = copy.deepcopy(Net_Throughput);\nNet_Throughput_SA_LAT = copy.deepcopy(Net_Throughput);\nNet_Throughput_SA_BHCAP = copy.deepcopy(Net_Throughput);\nNet_Throughput_SA_BHCAP_LAT = copy.deepcopy(Net_Throughput);\nNet_Throughput_SA_MRT_LAT = copy.deepcopy(Net_Throughput);\nNet_Throughput_DC_MRT_LAT = copy.deepcopy(Net_Throughput);\nNet_Throughput_DC_MRT_BHCAP = copy.deepcopy(Net_Throughput);\nNet_Throughput_DC_MRT_BHCAP_LAT = copy.deepcopy(Net_Throughput);\nNet_Throughput_SA_MRT_BHCAP = copy.deepcopy(Net_Throughput);\nNet_Throughput_SA_MRT_BHCAP_LAT = copy.deepcopy(Net_Throughput);\n\n\nB_Dat_DR = copy.deepcopy(Net_Throughput);\nB_Dat_DR_fs = copy.deepcopy(Net_Throughput);\nB_Dat_DR_sn = copy.deepcopy(Net_Throughput);\nB_Dat_DR_sn_fs = copy.deepcopy(Net_Throughput);\n\niters_infeas = [0]*num_iter; # Infeasible iteration numbers \niters_infeas_DC = [0]*num_iter; \niters_infeas_DC_MRT = [0]*num_iter;\niters_infeas_DC_BHCAP = [0]*num_iter;\niters_infeas_DC_BHCAP_LAT = [0]*num_iter;\niters_infeas_DC_LAT = [0]*num_iter;\niters_infeas_SA_MRT = [0]*num_iter;\niters_infeas_SA_LAT = [0]*num_iter;\niters_infeas_SA_BHCAP = [0]*num_iter;\niters_infeas_SA_BHCAP_LAT = [0]*num_iter;\niters_infeas_SA_MRT_LAT = [0]*num_iter;\niters_infeas_DC_MRT_LAT = [0]*num_iter;\niters_infeas_DC_MRT_BHCAP = [0]*num_iter;\niters_infeas_DC_MRT_BHCAP_LAT = [0]*num_iter;\niters_infeas_SA_MRT_BHCAP = [0]*num_iter;\niters_infeas_SA_MRT_BHCAP_LAT = [0]*num_iter;\n\n\n\niters_timeout = [0]*num_iter; # Infeasible iteration numbers \niters_timeout_DC = [0]*num_iter; \niters_timeout_DC_MRT = [0]*num_iter;\niters_timeout_DC_BHCAP = [0]*num_iter;\niters_timeout_DC_BHCAP_LAT = [0]*num_iter;\niters_timeout_DC_LAT = [0]*num_iter;\niters_timeout_SA_MRT = [0]*num_iter;\niters_timeout_SA_LAT = [0]*num_iter;\niters_timeout_SA_BHCAP = [0]*num_iter;\niters_timeout_SA_BHCAP_LAT = [0]*num_iter;\niters_timeout_SA_MRT_LAT = [0]*num_iter;\niters_timeout_DC_MRT_LAT = [0]*num_iter;\niters_timeout_DC_MRT_BHCAP = [0]*num_iter;\niters_timeout_DC_MRT_BHCAP_LAT = [0]*num_iter;\niters_timeout_SA_MRT_BHCAP = [0]*num_iter;\niters_timeout_SA_MRT_BHCAP_LAT = [0]*num_iter;\n\n\nBase_DR = [];\nBase_DR_fs = [];\napplication_DR = [];\napplication_DR_DC = [];\napplication_DR_DC_MRT = [];\napplication_DR_DC_BHCAP = [];\napplication_DR_DC_BHCAP_LAT = [];\napplication_DR_DC_LAT = [];\napplication_DR_SA_MRT = [];\napplication_DR_SA_LAT = [];\napplication_DR_SA_BHCAP = [];\napplication_DR_SA_BHCAP_LAT = [];\napplication_DR_SA_MRT_LAT = [];\napplication_DR_DC_MRT_LAT = [];\napplication_DR_DC_MRT_BHCAP = [];\napplication_DR_DC_MRT_BHCAP_LAT = [];\napplication_DR_SA_MRT_BHCAP = [];\napplication_DR_SA_MRT_BHCAP_LAT = [];\n\n\nAU_Base_DR = np.zeros((MCMC_iter, num_iter))\nAU_Base_DR_fs = np.zeros((MCMC_iter, num_iter))\nAU_DR = np.zeros((MCMC_iter, num_iter))\nAU_DR_DC = np.zeros((MCMC_iter, num_iter))\nAU_DR_DC_MRT = np.zeros((MCMC_iter, num_iter))\nAU_DR_DC_BHCAP = np.zeros((MCMC_iter, num_iter))\nAU_DR_DC_BHCAP_LAT = np.zeros((MCMC_iter, num_iter))\nAU_DR_DC_LAT = np.zeros((MCMC_iter, num_iter))\nAU_DR_SA_MRT = np.zeros((MCMC_iter, num_iter))\nAU_DR_SA_LAT = np.zeros((MCMC_iter, num_iter))\nAU_DR_SA_BHCAP = np.zeros((MCMC_iter, num_iter))\nAU_DR_SA_BHCAP_LAT = np.zeros((MCMC_iter, num_iter))\nAU_DR_SA_MRT_LAT = np.zeros((MCMC_iter, num_iter)) \nAU_DR_DC_MRT_LAT = np.zeros((MCMC_iter, num_iter))\nAU_DR_DC_MRT_BHCAP = np.zeros((MCMC_iter, num_iter))\nAU_DR_DC_MRT_BHCAP_LAT = np.zeros((MCMC_iter, num_iter))\nAU_DR_SA_MRT_BHCAP = np.zeros((MCMC_iter, num_iter))\nAU_DR_SA_MRT_BHCAP_LAT = np.zeros((MCMC_iter, num_iter))\n\ntime_DC = np.zeros((MCMC_iter, num_iter))\ntime_DC_MRT = np.zeros((MCMC_iter, num_iter))\ntime_SA_MRT = np.zeros((MCMC_iter, num_iter))\ntime_DC_MRT_BHCAP = np.zeros((MCMC_iter, num_iter))\ntime_DC_MRT_BHCAP_LAT = np.zeros((MCMC_iter, num_iter))\ntime_DC_MRT_LAT = np.zeros((MCMC_iter, num_iter))\ntime_SA_MRT_BHCAP = np.zeros((MCMC_iter, num_iter))\ntime_SA_MRT_BHCAP_LAT = np.zeros((MCMC_iter, num_iter))\ntime_SA_MRT_LAT = np.zeros((MCMC_iter, num_iter))\ntime_SA = np.zeros((MCMC_iter, num_iter))\n\nDC_avg_rt = []\nDC_MRT_avg_rt = []\nSA_avg_rt = []\n\navg_idx = []; # This is for calculating the average application throughput \n\n\nDC_BW = []\nDC_MRT_BW_MC = []\nDC_MRT_BW_SC = []\nDC_MRT_BW_TOT = []\nSINR_DC_BW = []\nSINR_DC_MRT_BW_MC = []\nSINR_DC_MRT_BW_SC = []\nSINR_SA_BW = []\nidx_MRT_BW_SC = []\nidx_MRT_BW_MC = []\n# ========================\n# Jain's Fairness Function\n# ========================\n\ndef jains_fairness(Data, idx_vec):\n\t#print idx_vec\n\t#mean_vec = []; # List to hold the mean values of all iterations\n\t#x2_mean_vec = []; # List to hold the variance of all iterations\n\tjfr_vec = []; # Jain's Fairness Index\n\tidx_begin = 0; # Starting index \n\t# print idx_vec\n\t# print len(idx_vec)\n\t# print len(Data)\n\tfor z in range(0,len(idx_vec)):\n\t\tif idx_vec[z] != 0:\n\t\t\t#print idx_vec[z]\n\t\t\tD_Base = Data[idx_begin:idx_begin+int(idx_vec[z])];\n\t\t\tdenom = np.mean(np.power(D_Base,2));\n\t\t\tnum = np.mean(D_Base);\n\t\t\tjfr_vec.append((num**2)/denom);\n\t\t\tidx_begin = idx_begin + int(idx_vec[z]); # Increasing the index\t\n\n\treturn jfr_vec\n\n\n# =====================\n# Zero Division Checker\n# =====================\n\ndef zero_div(num, denom):\n\toutput = np.empty((num.shape[0],1))\n\tfor i in range(0, denom.shape[0]):\n\t\tif denom[i] == 0:\n\t\t\toutput[i] = 0\n\t\telse:\n\t\t\toutput[i] = num[i]/denom[i]\n\treturn output \n\ndef baseline_cal(i,k,simdata_path):\n\tfilename = simdata_path + 'Baseline' + str(i) + str(k) + '.npz';\n\n\tif zipfile.ZipFile(filename, mode='r',).testzip() == None:\n\t\t#print simdata_path + 'Baseline' + str(i) + str(k) + '.npz'\n\t\tB_Dat = np.load(simdata_path + 'Baseline' + str(i) + str(k) + '.npz')\n\t\t#B_Dat_fs = np.load(simdata_path + 'Baseline_minrate' + str(i) + str(k) + '.npz')\n\t\tB_Dat_DR = B_Dat['arr_0']\n\t\tB_DAT_user = B_Dat['arr_1']\n\t\t#B_DAT_DR_fs = B_Dat_fs['arr_0']\n\t\t#B_DAT_DR_user = B_Dat_fs['arr_1']\n\t\treturn B_Dat_DR, B_DAT_user\n\telse:\n\t\tprint (\"Erroneous CRC-32 for file:\", 'Baseline' + str(i) + str(k) + '.npz')\n\t\treturn 0,0\n\ndef user_count(Optim_mat):\n\tAssoc_users = 0; # Initialize the number of associated users\n\tfor i in range(Optim_mat.shape[0]):\n\t\tfor j in range(Optim_mat.shape[1]):\n\t\t\tif Optim_mat[i,j] == 1:\n\t\t\t\tAssoc_users = Assoc_users + 1;\n\t\t\t\tbreak;\n\treturn Assoc_users\n\n# ==============\n# Data Extractor\n# ==============\n\nfor i in range(0,MCMC_iter):\n\n\t# ================================\n\t# Load the Data from the Optimizer\n\n\tBaseline_dat = np.load(simdata_path +'_'+ str(i) +'dat_' + constraint_fp['Baseline'] + '.npz', allow_pickle='True') # Single Association\n\tDat_DC = np.load(simdata_path +'_'+ str(i) +'dat_' + constraint_fp['DC'] + '.npz', allow_pickle='True')\n\tDat_DC_MRT = np.load(simdata_path +'_'+ str(i) +'dat_' + constraint_fp['DC_MRT'] + '.npz', allow_pickle='True')\n\tDat_DC_BHCAP = np.load(simdata_path +'_'+ str(i) +'dat_' + constraint_fp['DC_BHCAP'] + '.npz', allow_pickle='True')\n\tDat_DC_BHCAP_Lat = np.load(simdata_path +'_'+ str(i) +'dat_' + constraint_fp['DC_BHCAP_LAT'] + '.npz', allow_pickle='True')\n\tDat_DC_Lat = np.load(simdata_path +'_'+ str(i) +'dat_' + constraint_fp['DC_LAT'] + '.npz', allow_pickle='True')\n\tDat_SA_MRT = np.load(simdata_path +'_'+ str(i) +'dat_' + constraint_fp['SA_MRT'] + '.npz', allow_pickle='True')\n\tDat_SA_LAT = np.load(simdata_path +'_'+ str(i) +'dat_' + constraint_fp['SA_LAT'] + '.npz', allow_pickle='True')\n\tDat_SA_BHCAP = np.load(simdata_path +'_'+ str(i) +'dat_' + constraint_fp['SA_BHCAP'] + '.npz', allow_pickle='True')\n\tDat_SA_BHCAP_LAT = np.load(simdata_path +'_'+ str(i) +'dat_' + constraint_fp['SA_BHCAP_LAT'] + '.npz', allow_pickle='True')\n\tDat_SA_MRT_LAT = np.load(simdata_path +'_'+ str(i) +'dat_' + constraint_fp['SA_MRT_LAT'] + '.npz', allow_pickle='True')\n\tDat_DC_MRT_LAT = np.load(simdata_path +'_'+ str(i) +'dat_' + constraint_fp['DC_MRT_LAT'] + '.npz', allow_pickle='True')\n\tDat_DC_MRT_BHCAP = np.load(simdata_path +'_'+ str(i) +'dat_' + constraint_fp['DC_MRT_BHCAP'] + '.npz', allow_pickle='True')\n\tDat_DC_MRT_BHCAP_LAT = np.load(simdata_path +'_'+ str(i) +'dat_' + constraint_fp['DC_MRT_BHCAP_LAT'] + '.npz', allow_pickle='True')\n\tDat_SA_MRT_BHCAP = np.load(simdata_path +'_'+ str(i) +'dat_' + constraint_fp['SA_MRT_BHCAP'] + '.npz', allow_pickle='True')\n\tDat_SA_MRT_BHCAP_LAT = np.load(simdata_path +'_'+ str(i) +'dat_' + constraint_fp['SA_MRT_BHCAP_LAT'] + '.npz', allow_pickle='True')\n\n\n\tData = Baseline_dat['arr_0'];\n\tData_DC = Dat_DC['arr_0'];\n\tData_DC_MRT = Dat_DC_MRT['arr_0'];\n\tData_DC_BHCAP = Dat_DC_BHCAP['arr_0'];\n\tData_DC_BHCAP_LAT = Dat_DC_BHCAP_Lat['arr_0'];\n\tData_DC_LAT = Dat_DC_Lat['arr_0'];\n\tData_SA_MRT = Dat_SA_MRT['arr_0'];\n\tData_SA_LAT = Dat_SA_LAT['arr_0'];\n\tData_SA_BHCAP = Dat_SA_BHCAP['arr_0'];\n\tData_SA_BHCAP_LAT = Dat_SA_BHCAP_LAT['arr_0'];\n\tData_SA_MRT_LAT = Dat_SA_MRT_LAT['arr_0'];\n\tData_DC_MRT_LAT = Dat_DC_MRT_LAT['arr_0'];\n\tData_DC_MRT_BHCAP = Dat_DC_MRT_BHCAP['arr_0'];\n\tData_DC_MRT_BHCAP_LAT = Dat_DC_MRT_BHCAP_LAT['arr_0'];\n\tData_SA_MRT_BHCAP = Dat_SA_MRT_BHCAP['arr_0'];\n\tData_SA_MRT_BHCAP_LAT = Dat_SA_MRT_BHCAP_LAT['arr_0'];\n\n\tif i == 1:\n\t\tif Data_DC.item()['Status'+str(num_iter-1)] == 2:\n\t\t\tbhutil_val_DC = (Data_DC.item()['BHUTIL'+str(num_iter-1)].tolist())\n\t\t\tlatprov_DC = (Data_DC.item()['LatenOff'+str(num_iter-1)].tolist())\n\t\telse:\n\t\t\tbhutil_val_DC = (np.zeros(Data_DC.item()['APs'+str(num_iter-1)]).tolist())\n\t\t\tlatprov_DC = (np.zeros(Data_DC.item()['APs'+str(num_iter-1)]).tolist())\n\t\t\n\t\tif Data_DC_LAT.item()['Status'+str(num_iter-1)] == 2:\n\t\t\tlatprov_DC_LAT = (Data_DC_LAT.item()['LatenOff'+str(num_iter-1)])\n\t\t\t#print latprov_DC_LAT\n\t\telse:\n\t\t\tlatprov_DC_LAT = (np.zeros(Data_DC_LAT.item()['APs'+str(num_iter-1)]).tolist())\n\n\t\tif Data_DC_BHCAP_LAT.item()['Status'+str(num_iter-1)] == 2: \n\t\t\tbhutil_val_DC_BHCAP_LAT = (Data_DC_BHCAP_LAT.item()['BHUTIL'+str(num_iter-1)].tolist())\n\t\t\tlatprov_DC_BHCAP_LAT = (Data_DC_BHCAP_LAT.item()['LatenOff'+str(num_iter-1)].tolist())\n\t\telse: \n\t\t\tbhutil_val_DC_BHCAP_LAT = (np.zeros(Data_DC_BHCAP_LAT.item()['APs'+str(num_iter-1)]).tolist())\n\t\t\tlatprov_DC_BHCAP_LAT = (np.zeros(Data_DC_BHCAP_LAT.item()['APs'+str(num_iter-1)]).tolist())\t\n\t\t\n\t\tif Data_DC_BHCAP.item()['Status'+str(num_iter-1)] == 2: \n\t\t\tbhutil_val_DC_BHCAP = (Data_DC_BHCAP.item()['BHUTIL'+str(num_iter-1)].tolist())\n\t\telse:\n\t\t\tbhutil_val_DC_BHCAP = (np.zeros(Data_DC_BHCAP.item()['APs'+str(num_iter-1)]).tolist())\n\n\t\tif Data_DC_MRT_BHCAP.item()['Status'+str(num_iter-1)] == 2: \n\t\t\tbhutil_val_DC_MRT_BHCAP = (Data_DC_MRT_BHCAP.item()['BHUTIL'+str(num_iter-1)].tolist())\n\t\telse:\n\t\t\tbhutil_val_DC_MRT_BHCAP = (np.zeros(Data_DC_MRT_BHCAP.item()['APs'+str(num_iter-1)]).tolist())\n\n\t\tif Data_DC_MRT_BHCAP_LAT.item()['Status'+str(num_iter-1)] == 2: \n\t\t\tbhutil_val_DC_MRT_BHCAP_LAT = (Data_DC_MRT_BHCAP_LAT.item()['BHUTIL'+str(num_iter-1)].tolist())\n\t\t\tlatprov_DC_MRT_BHCAP_LAT = (Data_DC_MRT_BHCAP_LAT.item()['LatenOff'+str(num_iter-1)].tolist())\n\t\telse:\n\t\t\tbhutil_val_DC_MRT_BHCAP_LAT = (np.zeros(Data_DC_MRT_BHCAP_LAT.item()['APs'+str(num_iter-1)]).tolist())\n\t\t\tlatprov_DC_MRT_BHCAP_LAT = (np.zeros(Data_DC_MRT_BHCAP_LAT.item()['APs'+str(num_iter-1)]).tolist())\n\n\t\tif Data_DC_MRT_LAT.item()['Status'+str(num_iter-1)] == 2: \n\t\t\tlatprov_DC_MRT_LAT = (Data_DC_MRT_LAT.item()['LatenOff'+str(num_iter-1)].tolist())\n\t\telse:\n\t\t\tlatprov_DC_MRT_LAT = (np.zeros(Data_DC_MRT_LAT.item()['APs'+str(num_iter-1)]).tolist())\t\n\n\n\n\t\t#print Data_DC_LAT.item()['X_optimal_data'+str(1)]\n\t\tavail_bh = Data_DC.item()['AvailBHUtil_SC'+str(num_iter-1)]\n\t\t#print avail_bh\n\n\t#if i == 1:\n\tif Data_DC.item()['Status'+str(0)] == 2:\n\t\t#temp = Data_DC.item()['Rates'+str(1)]\n\t\tif i == 1:\n\t\t\trate_matrix_DC = np.sum(Data_DC.item()['Rates'+str(0)], axis = 1).tolist()\n\t\t\t#print len(rate_matrix_DC)\n\t\tDC_avg_rt = DC_avg_rt + (np.sum(Data_DC.item()['Rates'+str(0)], axis = 1).tolist())\n\t\t#print len(DC_avg_rt)\n\n\t\t# if i == 0:\n\t\t# \tDC_avg_rt = rate_matrix_DC\n\t\t# else:\n\t\t# \tDC_avg_rt = [x + y for x, y in zip(rate_matrix_DC, DC_avg_rt)]\n\tif Data.item()['Status'+str(0)] == 2:\n\t\t#temp1 = Data.item()['Rates'+str(1)]\n\t\tif i == 1:\n\t\t\trate_matrix_SA = np.sum(Data.item()['Rates'+str(0)], axis = 1).tolist()\n\t\tSA_avg_rt = SA_avg_rt + (np.sum(Data.item()['Rates'+str(0)], axis = 1).tolist())\n\t\t# if i == 0:\n\t\t# \tSA_avg_rt = rate_matrix_SA\n\t\t# else:\n\t\t# \tSA_avg_rt = [x + y for x, y in zip(rate_matrix_SA, SA_avg_rt)]\n\n\tif Data_DC_MRT.item()['Status'+str(num_iter-1)] == 2:\n\t\tglobal optim_val\n\t\tglobal iter_num\n\n\t\titer_num = str(num_iter-1) + str(i); \n\t\toptim_val = Data_DC_MRT.item()['X_optimal_data'+str(num_iter-1)]\n\t\n\tif Data_DC_MRT.item()['Status'+str(0)] == 2:\n\t\t\n\t\trate_matrix_DC_MRT = np.sum(Data_DC_MRT.item()['Rates'+str(0)], axis=1).tolist()\n\n\n\t\t#print Data_DC_MRT.item()['Optimal_BW'+str(0)].shape\n\t\t#print len(Data_DC.item()['AvailBHUtil_SC'+str(0)])\n\t\t# SINR_DC_MRT_BW_TOT = np.ones((Data_DC_MRT.item()['SINR'+str(num_iter-1)].shape[0],Data_DC_MRT.item()['SINR'+str(num_iter-1)].shape[1]))\n\t\t# SINR_DC_MRT_BW_MC = np.ones((Data_DC_MRT.item()['SINR'+str(num_iter-1)].shape[0],1))*120\n\t\t# SINR_DC_MRT_BW_SC = np.ones((Data_DC_MRT.item()['SINR'+str(num_iter-1)].shape[0],1))*120\n\t\t\n\t\t# DC_MRT_BW_SC = np.sum(Data_DC_MRT.item()['Optimal_BW'+str(num_iter-1)][:,:len(Data_DC.item()['AvailBHUtil_SC'+str(num_iter-1)])], axis = 1).tolist()\n\t\t# DC_MRT_BW_MC = np.sum(Data_DC_MRT.item()['Optimal_BW'+str(num_iter-1)][:,len(Data_DC.item()['AvailBHUtil_SC'+str(num_iter-1)]):], axis = 1).tolist()\n\t\t# DC_MRT_BW_TOT = np.sum(Data_DC_MRT.item()['Optimal_BW'+str(num_iter-1)], axis = 1).tolist()\n\n\t\t\n\t\t# for ii in range(Data_DC_MRT.item()['SINR'+str(num_iter-1)].shape[0]):\n\t\t# \t#count = 0;\n\t\t# \tfor jj in range(Data_DC_MRT.item()['SINR'+str(num_iter-1)].shape[1]):\n\t\t# \t\t# if (Data_DC_MRT.item()['X_optimal_data'+str(num_iter-1)])[ii,jj] == 1 and count < 2 :\n\t\t# \t\t#if Data_DC_MRT.item()['SINR'+str(num_iter-1)][ii,jj] != 350:\n\t\t# \t\tSINR_DC_MRT_BW_TOT[ii,jj] = Data_DC_MRT.item()['SINR'+str(num_iter-1)][ii,jj]\n\t\t# \t\t#else:\n\t\t# \t\t#\tSINR_DC_MRT_BW_TOT[ii,jj] = np.amin(SINR_DC_MRT_BW_TOT[ii,jj])-10\n\t\t# \t\t# \tcount = count + 1\n\t\t# \t\tif jj < len(Data_DC.item()['AvailBHUtil_SC'+str(0)]):\n\t\t# \t\t\tif (Data_DC_MRT.item()['X_optimal_data'+str(num_iter-1)])[ii,jj] == 1:\n\t\t# \t\t\t\tSINR_DC_MRT_BW_SC[ii,0] = Data_DC_MRT.item()['SINR'+str(num_iter-1)][ii,jj]\n\t\t\t\t\t\t\n\t\t# \t\telse:\n\t\t# \t\t\tif (Data_DC_MRT.item()['X_optimal_data'+str(num_iter-1)])[ii,jj] == 1:\n\t\t# \t\t\t\tSINR_DC_MRT_BW_MC[ii,0] = Data_DC_MRT.item()['SINR'+str(num_iter-1)][ii,jj]\n\t\t\t\t\t\t\n\t\t#plt.bar(np.arange(len(DC_MRT_BW_MC)), DC_MRT_BW_MC)\n\t\t#plt.show()\n\t\tDC_MRT_avg_rt = DC_MRT_avg_rt + (np.sum(Data_DC_MRT.item()['Rates'+str(0)], axis=1).tolist())\n\t\t# if i == 0:\n\t\t# \tDC_MRT_avg_rt = rate_matrix_DC_MRT\n\t\t# else:\n\t\t# \tDC_MRT_avg_rt = [x + y for x, y in zip(rate_matrix_DC_MRT, DC_MRT_avg_rt)]\n\n\tif Data_DC_BHCAP_LAT.item()['Status'+str(0)] == 2:\n\t\trate_matrix_DC_BHCAP_LAT = np.sum(Data_DC_BHCAP_LAT.item()['Rates'+str(0)], axis=1).tolist()\n\n\tif Data_DC_LAT.item()['Status'+str(0)] == 2:\n\t\trate_matrix_DC_LAT = np.sum(Data_DC_LAT.item()['Rates'+str(0)], axis=1).tolist()\n\t\n\tif Data_DC_MRT_LAT.item()['Status'+str(0)] == 2:\n\t\trate_matrix_DC_MRT_LAT = np.sum(Data_DC_MRT_LAT.item()['Rates'+str(0)], axis=1).tolist()\n\n\tif Data_DC_BHCAP.item()['Status'+str(0)] == 2:\n\t\trate_matrix_DC_BHCAP = np.sum(Data_DC_BHCAP.item()['Rates'+str(0)], axis=1).tolist()\n\n\tif Data_SA_MRT.item()['Status'+str(0)] == 2:\n\t\trate_matrix_SA_MRT = np.sum(Data_SA_MRT.item()['Rates'+str(0)], axis=1).tolist()\n\n\tif Data_SA_LAT.item()['Status'+str(0)] == 2:\n\t\trate_matrix_SA_LAT = np.sum(Data_SA_LAT.item()['Rates'+str(0)], axis=1).tolist()\n\n\tif Data_SA_BHCAP.item()['Status'+str(0)] == 2:\n\t\trate_matrix_SA_BHCAP = np.sum(Data_SA_BHCAP.item()['Rates'+str(0)], axis=1).tolist()\n\t\n\tif Data_SA_BHCAP_LAT.item()['Status'+str(0)] == 2:\t\t\t\n\t\trate_matrix_SA_BHCAP_LAT = np.sum(Data_SA_BHCAP_LAT.item()['Rates'+str(0)], axis=1).tolist()\n\t\n\tif Data_SA_MRT_LAT.item()['Status'+str(0)] == 2:\t\t\t\n\t\trate_matrix_SA_MRT_LAT = np.sum(Data_SA_MRT_LAT.item()['Rates'+str(0)], axis=1).tolist()\n\t\t\t\t\n\n\tfor k in range(0,num_iter):\n\t\tif Data.item()['Status' + str(k)] == 2:\n\t\t\tNet_Throughput[i,k] = Data.item()['Net_Throughput'+str(k)];\n\t\t\ttime_SA[i,k] = Data.item()['Time'+str(k)];\n\t\telse:\n\t\t\tNet_Throughput[i,k] = 0; # Zero if its an infeasible or timed out solution\n\t\t\t#iters_infeas.append(str(i)+str(k)); # Inserting the iteration number for infeasible solution\n\t\t\tif Data.item()['Status' + str(k)] == 3:\n\t\t\t\titers_infeas[k] = iters_infeas[k] + 1; # Increment the number of Infeasible solution sets\n\t\t\t\ttime_SA[i,k] = 700;\n\t\t\telif Data.item()['Status' + str(k)] == 9:\n\t\t\t\titers_timeout[k] = iters_timeout[k] + 1; # Increment the number of Timedout solution sets\n\t\t\t\ttime_SA[i,k] = 700\n\n\t\tif Data_DC.item()['Status' + str(k)] == 2:\n\t\t\tNet_Throughput_DC[i,k] = Data_DC.item()['Net_Throughput'+str(k)];\n\t\t\ttime_DC[i,k] = Data_DC.item()['Time'+str(k)];\n\t\telse:\n\t\t\tNet_Throughput_DC[i,k] = 0; # Zero if its an infeasible or timed out solution\n\t\t\t#iters_infeas_DC.append(str(i)+str(k)); # Inserting the iteration number for infeasible solution\n\t\t\tif Data_DC.item()['Status' + str(k)] == 3:\t\n\t\t\t\titers_infeas_DC[k] = iters_infeas_DC[k] + 1; # Increment the number of Infeasible solution sets\n\t\t\t\ttime_DC[i,k] = 700\n\t\t\telif Data_DC.item()['Status' + str(k)] == 9:\n\t\t\t\titers_timeout_DC[k] = iters_timeout_DC[k] + 1; # Increment the number of Timedout solution sets\n\t\t\t\ttime_DC[i,k] = 700\t\n\t\t\t\t\n\n\t\tif Data_DC_MRT.item()['Status' + str(k)] == 2:\n\t\t\t#print Data_DC_MRT.item()['Status'+str(k)]\n\t\t\tNet_Throughput_DC_MRT[i,k] = Data_DC_MRT.item()['Net_Throughput'+str(k)];\n\t\t\ttime_DC_MRT[i,k] = Data_DC_MRT.item()['Time'+str(k)];\n\t\telse:\n\t\t\t#print Data_DC_MRT.item()['Status'+str(k)]\n\t\t\tNet_Throughput_DC_MRT[i,k] = 0; # Zero if its an infeasible or timed out solution\n\t\t\t#iters_infeas_DC_MRT.append(str(i)+str(k)); # Inserting the iteration number for infeasible solution\n\t\t\tif Data_DC_MRT.item()['Status' + str(k)] == 3:\n\t\t\t\titers_infeas_DC_MRT[k] = iters_infeas_DC_MRT[k] + 1; # Increment the number of infeasible solutions\n\t\t\t\ttime_DC_MRT[i,k] = 700\n\t\t\telif Data_DC_MRT.item()['Status' + str(k)] == 9:\n\t\t\t\titers_timeout_DC_MRT[k] = iters_timeout_DC_MRT[k] + 1; # Increment the number of Timedout solution sets\n\t\t\t\ttime_DC_MRT[i,k] = 700\n\n\t\tif Data_DC_BHCAP.item()['Status' + str(k)] == 2:\n\t\t\tNet_Throughput_DC_BHCAP[i,k] = Data_DC_BHCAP.item()['Net_Throughput'+str(k)];\n\t\telse:\n\t\t\tNet_Throughput_DC_BHCAP[i,k] = 0; # Zero if its an infeasible or timed out solution\n\t\t\t#iters_infeas_DC_BHCAP.append(str(i)+str(k)); # Inserting the iteration number for infeasible solution\n\t\t\tif Data_DC_BHCAP.item()['Status' + str(k)] == 3:\n\t\t\t\titers_infeas_DC_BHCAP[k] = iters_infeas_DC_BHCAP[k] + 1; # Increment the number of infeasible solutions\n\t\t\telif Data_DC_BHCAP.item()['Status' + str(k)] == 9:\n\t\t\t\titers_timeout_DC_BHCAP[k] = iters_timeout_DC_BHCAP[k] + 1; # Increment the number of Timedout solution sets\n\t\t\n\t\tif Data_DC_BHCAP_LAT.item()['Status' + str(k)] == 2:\n\t\t\tNet_Throughput_DC_BHCAP_LAT[i,k] = Data_DC_BHCAP_LAT.item()['Net_Throughput'+str(k)];\n\t\telse:\n\t\t\tNet_Throughput_DC_BHCAP_LAT[i,k] = 0; # Zero if its an infeasible or timed out solution\n\t\t\t#iters_infeas_DC_BHCAP_LAT.append(str(i)+str(k)); # Inserting the iteration number for infeasible solution\n\t\t\tif Data_DC_BHCAP_LAT.item()['Status' + str(k)] == 3:\n\t\t\t\titers_infeas_DC_BHCAP_LAT[k] = iters_infeas_DC_BHCAP_LAT[k] + 1; # Increment the number of infeasible solution\n\t\t\telif Data_DC_BHCAP_LAT.item()['Status' + str(k)] == 9:\n\t\t\t\titers_timeout_DC_BHCAP_LAT[k] = iters_timeout_DC_BHCAP_LAT[k] + 1; # Increment the number of Timedout solution sets\n\n\t\tif Data_DC_LAT.item()['Status' + str(k)] == 2:\n\t\t\tNet_Throughput_DC_LAT[i,k] = Data_DC_LAT.item()['Net_Throughput'+str(k)];\n\t\telse:\n\t\t\tNet_Throughput_DC_LAT[i,k] = 0; # Zero if its an infeasible or timed out solution\n\t\t\t#iters_infeas_DC_LAT.append(str(i)+str(k)); # Inserting the iteration number for infeasible solution\n\t\t\tif Data_DC_LAT.item()['Status' + str(k)] == 3:\n\t\t\t\titers_infeas_DC_LAT[k] = iters_infeas_DC_LAT[k] + 1; # Increment the number of infeasible solution\n\t\t\telif Data_DC_LAT.item()['Status' + str(k)] == 9:\n\t\t\t\titers_timeout_DC_LAT[k] = iters_timeout_DC_LAT[k] + 1; # Increment the number of Timedout solution sets\n\n\t\tif Data_SA_MRT.item()['Status' + str(k)] == 2:\n\t\t\tNet_Throughput_SA_MRT[i,k] = Data_SA_MRT.item()['Net_Throughput'+str(k)];\n\t\t\ttime_SA_MRT[i,k] = Data_SA_MRT.item()['Time'+str(k)];\n\t\telse:\n\t\t\tNet_Throughput_SA_MRT[i,k] = 0; # Zero if its an infeasible or timed out solution\n\t\t\t#iters_infeas_SA_MRT.append(str(i)+str(k)); # Inserting the iteration number for infeasible solution\n\t\t\tif Data_SA_MRT.item()['Status' + str(k)] == 3:\n\t\t\t\titers_infeas_SA_MRT[k] = iters_infeas_SA_MRT[k] + 1; # Increment the number of infeasible solution\n\t\t\t\ttime_SA_MRT[i,k] = 700\n\t\t\telif Data_SA_MRT.item()['Status' + str(k)] == 9:\n\t\t\t\titers_timeout_SA_MRT[k] = iters_timeout_SA_MRT[k] + 1; # Increment the number of Timedout solution sets\n\t\t\t\ttime_SA_MRT[i,k] = 700\n\n\n\t\tif Data_SA_LAT.item()['Status' + str(k)] == 2:\n\t\t\tNet_Throughput_SA_LAT[i,k] = Data_SA_LAT.item()['Net_Throughput'+str(k)];\n\t\telse:\n\t\t\tNet_Throughput_SA_LAT[i,k] = 0; # Zero if its an infeasible or timed out solution\n\t\t\t#iters_infeas_SA_LAT.append(str(i)+str(k)); # Inserting the iteration number for infeasible solution\n\t\t\tif Data_SA_LAT.item()['Status' + str(k)] == 3:\n\t\t\t\titers_infeas_SA_LAT[k] = iters_infeas_SA_LAT[k] + 1; # Increment the number of infeasible solution\n\t\t\telif Data_SA_LAT.item()['Status' + str(k)] == 9:\n\t\t\t\titers_timeout_SA_LAT[k] = iters_timeout_SA_LAT[k] + 1; # Increment the number of Timedout solution sets\n\n\t\tif Data_SA_BHCAP.item()['Status' + str(k)] == 2:\n\t\t\tNet_Throughput_SA_BHCAP[i,k] = Data_SA_BHCAP.item()['Net_Throughput'+str(k)];\n\t\telse: \n\t\t\tNet_Throughput_SA_BHCAP[i,k] = 0; #Zero if its an infeasible or timed out solution\n\t\t\t#iters_infeas_SA_BHCAP.append(str(i)+str(k)); # Inserting the iteration number for infeasible solution\n\t\t\tif Data_SA_BHCAP.item()['Status' + str(k)] == 3:\n\t\t\t\titers_infeas_SA_BHCAP[k] = iters_infeas_SA_BHCAP[k] + 1; # Increment the number of infeasible solution\n\t\t\telif Data_SA_BHCAP.item()['Status' + str(k)] == 9:\n\t\t\t\titers_timeout_SA_BHCAP[k] = iters_timeout_SA_BHCAP[k] + 1; # Increment the number of Timedout solution sets\n\n\t\tif Data_SA_BHCAP_LAT.item()['Status' + str(k)] == 2:\n\t\t\tNet_Throughput_SA_BHCAP_LAT[i,k] = Data_SA_BHCAP_LAT.item()['Net_Throughput'+str(k)];\n\t\telse:\n\t\t\tNet_Throughput_SA_BHCAP_LAT[i,k] = 0; #Zero if its an infeasible or timed out solution\n\t\t\t#iters_infeas_SA_BHCAP_LAT.append(str(i)+str(k)); # Inserting the iteration number for infeasible solution\n\t\t\tif Data_SA_BHCAP_LAT.item()['Status' + str(k)] == 3:\n\t\t\t\titers_infeas_SA_BHCAP_LAT[k] = iters_infeas_SA_BHCAP_LAT[k] + 1; # Increment the number of infeasible solution\n\t\t\telif Data_SA_BHCAP_LAT.item()['Status' + str(k)] == 9:\n\t\t\t\titers_timeout_SA_BHCAP_LAT[k] = iters_timeout_SA_BHCAP_LAT[k] + 1; # Increment the number of Timedout solution sets\n\n\t\tif Data_SA_MRT_LAT.item()['Status' + str(k)] == 2:\n\t\t\tNet_Throughput_SA_MRT_LAT[i,k] = Data_SA_MRT_LAT.item()['Net_Throughput'+str(k)];\n\t\t\ttime_SA_MRT_LAT[i,k] = Data_SA_MRT_LAT.item()['Time'+str(k)];\n\t\telse:\n\t\t\tNet_Throughput_SA_MRT_LAT[i,k] = 0; #Zero if its an infeasible or timed out solution\n\t\t\t#iters_infeas_SA_BHCAP_LAT.append(str(i)+str(k)); # Inserting the iteration number for infeasible solution\n\t\t\tif Data_SA_MRT_LAT.item()['Status' + str(k)] == 3:\n\t\t\t\titers_infeas_SA_MRT_LAT[k] = iters_infeas_SA_MRT_LAT[k] + 1; # Increment the number of infeasible solution\n\t\t\t\ttime_SA_MRT_LAT[i,k] = 700\n\t\t\tif Data_SA_MRT_LAT.item()['Status' + str(k)] == 9:\n\t\t\t\titers_timeout_SA_MRT_LAT[k] = iters_timeout_SA_MRT_LAT[k] + 1; # Increment the number of Timedout solution sets\n\t\t\t\ttime_SA_MRT_LAT[i,k] = 700\n\n\t\tif Data_DC_MRT_LAT.item()['Status' + str(k)] == 2:\n\t\t\tNet_Throughput_DC_MRT_LAT[i,k] = Data_DC_MRT_LAT.item()['Net_Throughput'+str(k)];\n\t\t\ttime_DC_MRT_LAT[i,k] = Data_DC_MRT_LAT.item()['Time'+str(k)];\n\t\telse:\n\t\t\tNet_Throughput_DC_MRT_LAT[i,k] = 0; #Zero if its an infeasible or timed out solution\n\t\t\t#iters_infeas_SA_BHCAP_LAT.append(str(i)+str(k)); # Inserting the iteration number for infeasible solution\n\t\t\tif Data_DC_MRT_LAT.item()['Status' + str(k)] == 3:\n\t\t\t\titers_infeas_DC_MRT_LAT[k] = iters_infeas_DC_MRT_LAT[k] + 1; # Increment the number of infeasible solution\n\t\t\t\ttime_DC_MRT_LAT[i,k] = 700\n\t\t\telif Data_DC_MRT_LAT.item()['Status' + str(k)] == 9:\n\t\t\t\titers_timeout_DC_MRT_LAT[k] = iters_timeout_DC_MRT_LAT[k] + 1; # Increment the number of Timedout solution sets\n\t\t\t\ttime_DC_MRT_LAT[i,k] = 700\n\n\n\t\tif Data_DC_MRT_BHCAP.item()['Status' + str(k)] == 2:\n\t\t\tNet_Throughput_DC_MRT_BHCAP[i,k] = Data_DC_MRT_BHCAP.item()['Net_Throughput'+str(k)];\n\t\t\ttime_DC_MRT_BHCAP[i,k] = Data_DC_MRT_BHCAP.item()['Time'+str(k)];\n\t\telse:\n\t\t\tNet_Throughput_DC_MRT_BHCAP[i,k] = 0; #Zero if its an infeasible or timed out solution\n\t\t\t#iters_infeas_SA_BHCAP_LAT.append(str(i)+str(k)); # Inserting the iteration number for infeasible solution\n\t\t\tif Data_DC_MRT_BHCAP.item()['Status' + str(k)] == 3:\n\t\t\t\titers_infeas_DC_MRT_BHCAP[k] = iters_infeas_DC_MRT_BHCAP[k] + 1; # Increment the number of infeasible solution\n\t\t\t\ttime_DC_MRT_BHCAP[i,k] = 700\n\t\t\telif Data_DC_MRT_BHCAP.item()['Status' + str(k)] == 9:\n\t\t\t\titers_timeout_DC_MRT_BHCAP[k] = iters_timeout_DC_MRT_BHCAP[k] + 1; # Increment the number of Timedout solution sets\n\t\t\t\ttime_DC_MRT_BHCAP[i,k] = 700\n\n\t\tif Data_DC_MRT_BHCAP_LAT.item()['Status' + str(k)] == 2:\n\t\t\tNet_Throughput_DC_MRT_BHCAP_LAT[i,k] = Data_DC_MRT_BHCAP_LAT.item()['Net_Throughput'+str(k)];\n\t\t\ttime_DC_MRT_BHCAP_LAT[i,k] = Data_DC_MRT_BHCAP_LAT.item()['Time'+str(k)];\n\t\telse:\n\t\t\tNet_Throughput_DC_MRT_BHCAP_LAT[i,k] = 0; #Zero if its an infeasible or timed out solution\n\t\t\t#iters_infeas_SA_BHCAP_LAT.append(str(i)+str(k)); # Inserting the iteration number for infeasible solution\n\t\t\tif Data_DC_MRT_BHCAP_LAT.item()['Status' + str(k)] == 3:\n\t\t\t\titers_infeas_DC_MRT_BHCAP_LAT[k] = iters_infeas_DC_MRT_BHCAP_LAT[k] + 1; # Increment the number of infeasible solution\n\t\t\t\ttime_DC_MRT_BHCAP_LAT[i,k] = 700\n\t\t\telif Data_DC_MRT_BHCAP_LAT.item()['Status' + str(k)] == 9:\n\t\t\t\titers_timeout_DC_MRT_BHCAP_LAT[k] = iters_timeout_DC_MRT_BHCAP_LAT[k] + 1; # Increment the number of Timedout solution sets\n\t\t\t\ttime_DC_MRT_BHCAP_LAT[i,k] = 700\n\t\t\n\t\tif Data_SA_MRT_BHCAP.item()['Status' + str(k)] == 2:\n\t\t\tNet_Throughput_SA_MRT_BHCAP[i,k] = Data_SA_MRT_BHCAP.item()['Net_Throughput'+str(k)];\n\t\t\ttime_SA_MRT_BHCAP[i,k] = Data_SA_MRT_BHCAP.item()['Time'+str(k)];\n\t\telse:\n\t\t\tNet_Throughput_SA_MRT_BHCAP[i,k] = 0; #Zero if its an infeasible or timed out solution\n\t\t\t#iters_infeas_SA_BHCAP_LAT.append(str(i)+str(k)); # Inserting the iteration number for infeasible solution\n\t\t\tif Data_SA_MRT_BHCAP.item()['Status' + str(k)] == 3:\n\t\t\t\titers_infeas_SA_MRT_BHCAP[k] = iters_infeas_SA_MRT_BHCAP[k] + 1; # Increment the number of infeasible solution\n\t\t\t\ttime_SA_MRT_BHCAP[i,k] = 700\n\t\t\telif Data_SA_MRT_BHCAP.item()['Status' + str(k)] == 9:\n\t\t\t\titers_timeout_SA_MRT_BHCAP[k] = iters_timeout_SA_MRT_BHCAP[k] + 1; # Increment the number of Timedout solution sets\n\t\t\t\ttime_SA_MRT_BHCAP[i,k] = 700\n\n\t\tif Data_SA_MRT_BHCAP_LAT.item()['Status' + str(k)] == 2:\n\t\t\tNet_Throughput_SA_MRT_BHCAP_LAT[i,k] = Data_SA_MRT_BHCAP_LAT.item()['Net_Throughput'+str(k)];\n\t\t\ttime_SA_MRT_BHCAP_LAT[i,k] = Data_SA_MRT_BHCAP_LAT.item()['Time'+str(k)];\n\t\telse:\n\t\t\tNet_Throughput_SA_MRT_BHCAP_LAT[i,k] = 0; #Zero if its an infeasible or timed out solution\n\t\t\t#iters_infeas_SA_BHCAP_LAT.append(str(i)+str(k)); # Inserting the iteration number for infeasible solution\n\t\t\tif Data_SA_MRT_BHCAP_LAT.item()['Status' + str(k)] == 3:\n\t\t\t\titers_infeas_SA_MRT_BHCAP_LAT[k] = iters_infeas_SA_MRT_BHCAP_LAT[k] + 1; # Increment the number of infeasible solution\n\t\t\t\ttime_SA_MRT_BHCAP_LAT[i,k] = 700\n\t\t\telif Data_SA_MRT_BHCAP_LAT.item()['Status' + str(k)] == 9:\n\t\t\t\titers_timeout_SA_MRT_BHCAP_LAT[k] = iters_timeout_SA_MRT_BHCAP_LAT[k] + 1; # Increment the number of Timedout solution sets\n\t\t\t\ttime_SA_MRT_BHCAP_LAT[i,k] = 700\n\n\t\tB_Dat_DR[i,k], AU_Base_DR[i,k] = baseline_cal(i,k,simdata_path)\n\t\n\t#print \"==================\"\n\t#print Net_Throughput\n\t#print \"==================\"\n\t#print Net_Throughput_DC_MRT\n\t# ================\n\t# User Throughputs\n\n\tX_Optimal_jfr = np.zeros((Data.item()['Apps'+str(k)], Data.item()['APs'+str(k)]));\n\tX_Optimal_DC_jfr = copy.deepcopy(X_Optimal_jfr);\n\tX_Optimal_DC_MRT_jfr = copy.deepcopy(X_Optimal_jfr);\n\tX_Optimal_DC_BHCAP_jfr = copy.deepcopy(X_Optimal_jfr);\n\tX_Optimal_DC_BHCAP_LAT_jfr = copy.deepcopy(X_Optimal_jfr);\n\tX_Optimal_DC_LAT_jfr = copy.deepcopy(X_Optimal_jfr);\n\tX_Optimal_SA_MRT_jfr = copy.deepcopy(X_Optimal_jfr);\n\tX_Optimal_SA_LAT_jfr = copy.deepcopy(X_Optimal_jfr);\n\tX_Optimal_SA_BHCAP_jfr = copy.deepcopy(X_Optimal_jfr);\n\tX_Optimal_SA_BHCAP_LAT_jfr = copy.deepcopy(X_Optimal_jfr);\n\tX_Optimal_SA_MRT_LAT_jfr = copy.deepcopy(X_Optimal_jfr);\n\tX_Optimal_DC_MRT_LAT_jfr = copy.deepcopy(X_Optimal_jfr);\n\tX_Optimal_DC_MRT_BHCAP_jfr = copy.deepcopy(X_Optimal_jfr);\n\tX_Optimal_DC_MRT_BHCAP_LAT_jfr = copy.deepcopy(X_Optimal_jfr);\n\tX_Optimal_SA_MRT_BHCAP_jfr = copy.deepcopy(X_Optimal_jfr);\n\tX_Optimal_SA_MRT_BHCAP_LAT_jfr = copy.deepcopy(X_Optimal_jfr);\n\n\tRate = np.zeros((Data.item()['Apps'+str(k)], Data.item()['APs'+str(k)]));\n\tRate_DC = copy.deepcopy(Rate);\n\tRate_DC_MRT = copy.deepcopy(Rate);\n\tRate_DC_BHCAP = copy.deepcopy(Rate);\n\tRate_DC_BHCAP_LAT = copy.deepcopy(Rate);\n\tRate_DC_LAT = copy.deepcopy(Rate);\n\tRate_SA_MRT = copy.deepcopy(Rate);\n\tRate_SA_LAT = copy.deepcopy(Rate);\n\tRate_SA_BHCAP = copy.deepcopy(Rate);\n\tRate_SA_BHCAP_LAT = copy.deepcopy(Rate);\n\tRate_SA_MRT_LAT = copy.deepcopy(Rate);\n\tRate_DC_MRT_LAT = copy.deepcopy(Rate);\n\tRate_DC_MRT_BHCAP = copy.deepcopy(Rate);\n\tRate_DC_MRT_BHCAP_LAT = copy.deepcopy(Rate);\n\tRate_SA_MRT_BHCAP = copy.deepcopy(Rate);\n\tRate_SA_MRT_BHCAP_LAT = copy.deepcopy(Rate);\n\n\n\tfor k in range(0,num_iter):\n\t\tX_Optimal = np.empty((Data.item()['Apps'+str(k)], Data.item()['APs'+str(k)]));\n\t\tX_Optimal_DC = copy.deepcopy(X_Optimal);\n\t\tX_Optimal_DC_MRT = copy.deepcopy(X_Optimal);\n\t\tX_Optimal_DC_BHCAP = copy.deepcopy(X_Optimal);\n\t\tX_Optimal_DC_BHCAP_LAT = copy.deepcopy(X_Optimal);\n\t\tX_Optimal_DC_LAT = copy.deepcopy(X_Optimal);\n\t\tX_Optimal_SA_MRT = copy.deepcopy(X_Optimal);\n\t\tX_Optimal_SA_LAT = copy.deepcopy(X_Optimal);\n\t\tX_Optimal_SA_BHCAP = copy.deepcopy(X_Optimal);\n\t\tX_Optimal_SA_BHCAP_LAT = copy.deepcopy(X_Optimal);\n\t\tX_Optimal_SA_MRT_LAT = copy.deepcopy(X_Optimal);\n\t\tX_Optimal_DC_MRT_LAT = copy.deepcopy(X_Optimal);\n\t\tX_Optimal_DC_MRT_BHCAP = copy.deepcopy(X_Optimal);\n\t\tX_Optimal_DC_MRT_BHCAP_LAT = copy.deepcopy(X_Optimal);\n\t\tX_Optimal_SA_MRT_BHCAP = copy.deepcopy(X_Optimal);\n\t\tX_Optimal_SA_MRT_BHCAP_LAT = copy.deepcopy(X_Optimal);\n\n\n\t\tRate_jfr = np.zeros((Data.item()['Apps'+str(k)], Data.item()['APs'+str(k)]));\n\t\tRate_DC_jfr = copy.deepcopy(Rate);\n\t\tRate_DC_MRT_jfr = copy.deepcopy(Rate);\n\t\tRate_DC_BHCAP_jfr = copy.deepcopy(Rate);\n\t\tRate_DC_BHCAP_LAT_jfr = copy.deepcopy(Rate);\n\t\tRate_DC_LAT_jfr = copy.deepcopy(Rate);\n\t\tRate_SA_MRT_jfr = copy.deepcopy(Rate);\n\t\tRate_SA_LAT_jfr = copy.deepcopy(Rate);\n\t\tRate_SA_BHCAP_jfr = copy.deepcopy(Rate);\n\t\tRate_SA_BHCAP_LAT_jfr = copy.deepcopy(Rate);\n\t\tRate_SA_MRT_LAT = copy.deepcopy(Rate);\n\t\tRate_DC_MRT_LAT = copy.deepcopy(Rate);\n\t\tRate_DC_MRT_BHCAP = copy.deepcopy(Rate);\n\t\tRate_DC_MRT_BHCAP_LAT = copy.deepcopy(Rate);\n\t\tRate_SA_MRT_BHCAP = copy.deepcopy(Rate);\n\t\tRate_SA_MRT_BHCAP_LAT = copy.deepcopy(Rate);\n\t\t\n\t\t\n\t\tif Data.item()['Status'+str(k)] == 2:\n\t\t\tX_Optimal = Data.item()['X_optimal_data'+str(k)];\n\t\t\tRate = Data.item()['Rates'+str(k)];\n\t\t\tAU_DR[i,k] = user_count(X_Optimal)\n\t\telse:\n\t\t\tpass\n\t\tif Data_DC.item()['Status'+str(k)] == 2:\n\t\t\tX_Optimal_DC = Data_DC.item()['X_optimal_data'+str(k)];\n\t\t\tRate_DC = Data_DC.item()['Rates'+str(k)];\n\t\t\tAU_DR_DC[i,k] = user_count(X_Optimal_DC)\n\t\telse:\n\t\t\tpass\n\t\tif Data_DC_MRT.item()['Status'+str(k)] == 2:\n\t\t\tX_Optimal_DC_MRT = Data_DC_MRT.item()['X_optimal_data'+str(k)];\n\t\t\tRate_DC_MRT = Data_DC_MRT.item()['Rates'+str(k)];\n\t\t\tAU_DR_DC_MRT[i,k] = user_count(X_Optimal_DC_MRT)\n\t\telse:\n\t\t\tpass\n\t\tif Data_DC_BHCAP.item()['Status'+str(k)] == 2:\n\t\t\tX_Optimal_DC_BHCAP = Data_DC_BHCAP.item()['X_optimal_data'+str(k)];\n\t\t\tRate_DC_BHCAP = Data_DC_BHCAP.item()['Rates'+str(k)];\n\t\t\tAU_DR_DC_BHCAP[i,k] = user_count(X_Optimal_DC_BHCAP)\n\t\telse:\n\t\t\tpass\n\t\tif Data_DC_BHCAP_LAT.item()['Status'+str(k)] == 2:\n\t\t\tX_Optimal_DC_BHCAP_LAT = Data_DC_BHCAP_LAT.item()['X_optimal_data'+str(k)];\n\t\t\tRate_DC_BHCAP_LAT = Data_DC_BHCAP_LAT.item()['Rates'+str(k)];\n\t\t\tAU_DR_DC_BHCAP_LAT[i,k] = user_count(X_Optimal_DC_BHCAP_LAT)\n\t\telse:\n\t\t\tpass\n\t\tif Data_DC_LAT.item()['Status'+str(k)] == 2:\n\t\t\tX_Optimal_DC_LAT = Data_DC_LAT.item()['X_optimal_data'+str(k)];\n\t\t\tRate_DC_LAT = Data_DC_LAT.item()['Rates'+str(k)];\n\t\t\tAU_DR_DC_LAT[i,k] = user_count(X_Optimal_DC_LAT)\n\t\telse:\n\t\t\tpass\n\t\tif Data_SA_MRT.item()['Status'+str(k)] == 2:\n\t\t\tX_Optimal_SA_MRT = Data_SA_MRT.item()['X_optimal_data'+str(k)];\n\t\t\tRate_SA_MRT = Data_SA_MRT.item()['Rates'+str(k)];\n\t\t\tAU_DR_SA_MRT[i,k] = user_count(X_Optimal_SA_MRT)\n\t\telse:\n\t\t\tpass\n\t\tif Data_SA_LAT.item()['Status'+str(k)] == 2:\n\t\t\tX_Optimal_SA_LAT = Data_SA_LAT.item()['X_optimal_data'+str(k)];\n\t\t\tRate_SA_LAT = Data_SA_LAT.item()['Rates'+str(k)];\n\t\t\tAU_DR_SA_LAT[i,k] = user_count(X_Optimal_SA_LAT)\n\t\telse:\n\t\t\tpass\n\t\tif Data_SA_BHCAP.item()['Status'+str(k)] == 2:\n\t\t\tX_Optimal_SA_BHCAP = Data_SA_BHCAP.item()['X_optimal_data'+str(k)];\n\t\t\tRate_SA_BHCAP = Data_SA_BHCAP.item()['Rates'+str(k)];\n\t\t\tAU_DR_SA_BHCAP[i,k] = user_count(X_Optimal_SA_BHCAP)\n\t\telse:\n\t\t\tpass\n\t\tif Data_SA_BHCAP_LAT.item()['Status'+str(k)] == 2:\n\t\t\tX_Optimal_SA_BHCAP_LAT = Data_SA_BHCAP_LAT.item()['X_optimal_data'+str(k)];\n\t\t\tRate_SA_BHCAP_LAT = Data_SA_BHCAP_LAT.item()['Rates'+str(k)];\n\t\t\tAU_DR_SA_BHCAP_LAT[i,k] = user_count(X_Optimal_SA_BHCAP_LAT)\n\t\telse:\n\t\t\tpass\n\t\tif Data_SA_MRT_LAT.item()['Status'+str(k)] == 2:\n\t\t\tX_Optimal_SA_MRT_LAT = Data_SA_MRT_LAT.item()['X_optimal_data'+str(k)];\n\t\t\tRate_SA_MRT_LAT = Data_SA_MRT_LAT.item()['Rates'+str(k)];\n\t\t\tAU_DR_SA_MRT_LAT[i,k] = user_count(X_Optimal_SA_MRT_LAT)\n\t\telse:\n\t\t\tpass\n\t\tif Data_DC_MRT_LAT.item()['Status'+str(k)] == 2:\n\t\t\tX_Optimal_DC_MRT_LAT = Data_DC_MRT_LAT.item()['X_optimal_data'+str(k)];\n\t\t\tRate_DC_MRT_LAT = Data_DC_MRT_LAT.item()['Rates'+str(k)];\n\t\t\tAU_DR_DC_MRT_LAT[i,k] = user_count(X_Optimal_DC_MRT_LAT)\n\t\telse:\n\t\t\tpass\n\t\tif Data_DC_MRT_BHCAP.item()['Status'+str(k)] == 2:\n\t\t\tX_Optimal_DC_MRT_BHCAP = Data_DC_MRT_BHCAP.item()['X_optimal_data'+str(k)];\n\t\t\tRate_DC_MRT_BHCAP = Data_DC_MRT_BHCAP.item()['Rates'+str(k)];\n\t\t\tAU_DR_DC_MRT_BHCAP[i,k] = user_count(X_Optimal_DC_MRT_BHCAP)\n\t\telse:\n\t\t\tpass\n\t\tif Data_DC_MRT_BHCAP_LAT.item()['Status'+str(k)] == 2:\n\t\t\tX_Optimal_DC_MRT_BHCAP_LAT = Data_DC_MRT_BHCAP_LAT.item()['X_optimal_data'+str(k)];\n\t\t\tRate_DC_MRT_BHCAP_LAT = Data_DC_MRT_BHCAP_LAT.item()['Rates'+str(k)];\n\t\t\tAU_DR_DC_MRT_BHCAP_LAT[i,k] = user_count(X_Optimal_DC_MRT_BHCAP_LAT)\n\t\telse:\n\t\t\tpass\n\t\tif Data_SA_MRT_BHCAP.item()['Status'+str(k)] == 2:\n\t\t\tX_Optimal_SA_MRT_BHCAP = Data_SA_MRT_BHCAP.item()['X_optimal_data'+str(k)];\n\t\t\tRate_SA_MRT_BHCAP = Data_SA_MRT_BHCAP.item()['Rates'+str(k)];\n\t\t\tAU_DR_SA_MRT_BHCAP[i,k] = user_count(X_Optimal_SA_MRT_BHCAP)\n\t\telse:\n\t\t\tpass\n\t\tif Data_SA_MRT_BHCAP_LAT.item()['Status'+str(k)] == 2:\n\t\t\tX_Optimal_SA_MRT_BHCAP_LAT = Data_SA_MRT_BHCAP_LAT.item()['X_optimal_data'+str(k)];\n\t\t\tRate_SA_MRT_BHCAP_LAT = Data_SA_MRT_BHCAP_LAT.item()['Rates'+str(k)];\n\t\t\tAU_DR_SA_MRT_BHCAP_LAT[i,k] = user_count(X_Optimal_SA_MRT_BHCAP_LAT)\n\t\telse:\n\t\t\tpass\n\t\n\tavg_idx.append(X_Optimal.shape[0])\n\t#print avg_idx\t\n\n\tfor j in range(0,X_Optimal.shape[0]):\n\t\tBase_DR.append(scn.eMBB_minrate); \n\t\tif Data.item()['Status'+str(num_iter-1)] == 2:\n\t\t\t# X_Optimal_jfr = Data.item()['Optimal_BW'+str(num_iter-1)];\n\t\t\t#Rate_jfr = Data.item()['Rates'+str(0)]; \n\t\t\tapplication_DR.append(sum(Rate[j,:]));\n\t\telse:\n\t\t\tpass\n\t\tif Data_DC.item()['Status'+str(num_iter-1)] == 2:\n\t\t\t# X_Optimal_DC_jfr = Data_DC.item()['Optimal_BW'+str(num_iter-1)];\n\t\t\t#Rate_DC_jfr = Data_DC.item()['Rates'+str(1)];\n\t\t\tapplication_DR_DC.append(sum(Rate_DC[j,:]));\n\t\telse:\n\t\t\tpass\n\t\tif Data_DC_MRT.item()['Status'+str(num_iter-1)] == 2:\n\t\t\t# X_Optimal_DC_MRT_jfr = Data_DC_MRT.item()['Optimal_BW'+str(num_iter-1)];\n\t\t\t# print X_Optimal_DC_MRT_jfr[j,:]\n\t\t\t# print Rate_DC_MRT[j,:]\n\t\t\t# #Rate_DC_MRT_jfr = Data_DC_MRT.item()['Rates'+str(1)];\n\t\t\tapplication_DR_DC_MRT.append(sum(Rate_DC_MRT[j,:]));\n\t\t\t#print application_DR_DC_MRT\n\t\telse:\n\t\t\tpass\n\t\tif Data_DC_BHCAP.item()['Status'+str(num_iter-1)] == 2:\n\t\t\t#X_Optimal_DC_BHCAP_jfr = Data_DC_BHCAP.item()['X_optimal_data'+str(1)];\n\t\t\t#Rate_DC_BHCAP_jfr = Data_DC_BHCAP.item()['Rates'+str(1)];\n\t\t\tapplication_DR_DC_BHCAP.append(sum(Rate_DC_BHCAP[j,:]*X_Optimal_DC_BHCAP[j,:]));\n\t\telse:\n\t\t\tpass\n\t\tif Data_DC_BHCAP_LAT.item()['Status'+str(num_iter-1)] == 2:\n\t\t\t#X_Optimal_DC_BHCAP_LAT_jfr = Data_DC_BHCAP_LAT.item()['X_optimal_data'+str(1)];\n\t\t\t#Rate_DC_BHCAP_LAT_jfr = Data_DC_BHCAP_LAT.item()['Rates'+str(1)];\n\t\t\tapplication_DR_DC_BHCAP_LAT.append(sum(Rate_DC_BHCAP_LAT[j,:]*X_Optimal_DC_BHCAP_LAT[j,:]));\n\t\telse:\n\t\t\tpass\n\t\tif Data_DC_LAT.item()['Status'+str(num_iter-1)] == 2:\n\t\t\t#X_Optimal_DC_LAT_jfr = Data_DC_LAT.item()['X_optimal_data'+str(1)];\n\t\t\t#Rate_DC_LAT_jfr = Data_DC_LAT.item()['Rates'+str(1)];\n\t\t\tapplication_DR_DC_LAT.append(sum(Rate_DC_LAT[j,:]*X_Optimal_DC_LAT[j,:]));\n\t\telse:\n\t\t\tpass\n\t\tif Data_SA_MRT.item()['Status'+str(num_iter-1)] == 2:\n\t\t\t#X_Optimal_SA_MRT_jfr = Data_SA_MRT.item()['X_optimal_data'+str(1)];\n\t\t\t#Rate_SA_MRT_jfr = Data_SA_MRT.item()['Rates'+str(1)];\n\t\t\tapplication_DR_SA_MRT.append(sum(Rate_SA_MRT[j,:]*X_Optimal_SA_MRT[j,:]));\n\t\telse:\n\t\t\tpass\n\t\tif Data_SA_LAT.item()['Status'+str(num_iter-1)] == 2:\n\t\t\t#X_Optimal_SA_LAT_jfr = Data_SA_LAT.item()['X_optimal_data'+str(1)];\n\t\t\t#Rate_SA_LAT_jfr = Data_SA_LAT.item()['Rates'+str(1)];\n\t\t\tapplication_DR_SA_LAT.append(sum(Rate_SA_LAT[j,:]*X_Optimal_SA_LAT[j,:]));\n\t\telse: \n\t\t\tpass\n\t\tif Data_SA_BHCAP.item()['Status'+str(num_iter-1)] == 2:\n\t\t\t#X_Optimal_SA_BHCAP_jfr = Data_SA_BHCAP.item()['X_optimal_data'+str(1)];\n\t\t\t#Rate_SA_BHCAP_jfr = Data_SA_BHCAP.item()['Rates'+str(1)];\n\t\t\tapplication_DR_SA_BHCAP.append(sum(Rate_SA_BHCAP[j,:]*X_Optimal_SA_BHCAP[j,:]));\n\t\telse:\n\t\t\tpass\n\t\tif Data_SA_BHCAP_LAT.item()['Status'+str(num_iter-1)] == 2:\n\t\t\t#X_Optimal_SA_BHCAP_LAT_jfr = Data_SA_BHCAP_LAT.item()['X_optimal_data'+str(1)];\n\t\t\t#Rate_SA_BHCAP_LAT_jfr = Data_SA_BHCAP_LAT.item()['Rates'+str(1)];\n\t\t\tapplication_DR_SA_BHCAP_LAT.append(sum(Rate_SA_BHCAP_LAT[j,:]*X_Optimal_SA_BHCAP_LAT[j,:]));\n\t\telse:\n\t\t\tpass\n\t\tif Data_SA_MRT_LAT.item()['Status'+str(num_iter-1)] == 2:\n\t\t\t#X_Optimal_SA_MRT_MAT_jfr = Data_SA_MRT_LAT.item()['X_optimal_data'+str(1)];\n\t\t\t#Rate_SA_MRT_LAT_jfr = Data_SA_MRT_LAT.item()['Rates'+str(1)];\n\t\t\tapplication_DR_SA_MRT_LAT.append(sum(Rate_SA_MRT_LAT[j,:]*X_Optimal_SA_MRT_LAT[j,:]));\n\t\telse:\n\t\t\tpass\n\t\tif Data_DC_MRT_LAT.item()['Status'+str(num_iter-1)] == 2:\n\t\t\t#X_Optimal_DC_MRT_LAT_jfr = Data_DC_MRT_LAT_.item()['X_optimal_data'+str(1)];\n\t\t\t#Rate_DC_MRT_LAT_jfr = Data_DC_MRT_LAT.item()['Rates'+str(1)];\n\t\t\tapplication_DR_DC_MRT_LAT.append(sum(Rate_DC_MRT_LAT[j,:]*X_Optimal_DC_MRT_LAT[j,:]));\n\t\telse:\n\t\t\tpass\n\n\t\tif Data_DC_MRT_BHCAP.item()['Status'+str(num_iter-1)] == 2:\n\t\t\t#X_Optimal_DC_MRT_LAT_jfr = Data_DC_MRT_LAT_.item()['X_optimal_data'+str(1)];\n\t\t\t#Rate_DC_MRT_LAT_jfr = Data_DC_MRT_LAT.item()['Rates'+str(1)];\n\t\t\tapplication_DR_DC_MRT_BHCAP.append(sum(Rate_DC_MRT_BHCAP[j,:]*X_Optimal_DC_MRT_BHCAP[j,:]));\n\t\telse:\n\t\t\tpass\n\t\tif Data_DC_MRT_BHCAP_LAT.item()['Status'+str(num_iter-1)] == 2:\n\t\t\t#X_Optimal_DC_MRT_LAT_jfr = Data_DC_MRT_LAT_.item()['X_optimal_data'+str(1)];\n\t\t\t#Rate_DC_MRT_LAT_jfr = Data_DC_MRT_LAT.item()['Rates'+str(1)];\n\t\t\tapplication_DR_DC_MRT_BHCAP_LAT.append(sum(Rate_DC_MRT_BHCAP_LAT[j,:]*X_Optimal_DC_MRT_BHCAP_LAT[j,:]));\n\t\telse:\n\t\t\tpass\n\t\tif Data_SA_MRT_BHCAP.item()['Status'+str(num_iter-1)] == 2:\n\t\t\t#X_Optimal_DC_MRT_LAT_jfr = Data_DC_MRT_LAT_.item()['X_optimal_data'+str(1)];\n\t\t\t#Rate_DC_MRT_LAT_jfr = Data_DC_MRT_LAT.item()['Rates'+str(1)];\n\t\t\tapplication_DR_SA_MRT_BHCAP.append(sum(Rate_SA_MRT_BHCAP[j,:]*X_Optimal_SA_MRT_BHCAP[j,:]));\n\t\telse:\n\t\t\tpass\n\t\tif Data_SA_MRT_BHCAP_LAT.item()['Status'+str(num_iter-1)] == 2:\n\t\t\t#X_Optimal_DC_MRT_LAT_jfr = Data_DC_MRT_LAT_.item()['X_optimal_data'+str(1)];\n\t\t\t#Rate_DC_MRT_LAT_jfr = Data_DC_MRT_LAT.item()['Rates'+str(1)];\n\t\t\tapplication_DR_SA_MRT_BHCAP_LAT.append(sum(Rate_SA_MRT_BHCAP_LAT[j,:]*X_Optimal_SA_MRT_BHCAP_LAT[j,:]));\n\t\telse:\n\t\t\tpass\n\n\n# ===============\n# Analysis Values\n# ===============\n\n# ==============\n# Net Throughput\n\n#print iters_infeas \n#print iters_timeout\nNet_Throughput_avg = zero_div(np.sum(Net_Throughput, axis = 0),(MCMC_iter - np.array(iters_infeas) - np.array(iters_timeout))) ; # We get the average throughput over MCMC Iteratios\nNet_Throughput_DC_avg = zero_div(np.sum(Net_Throughput_DC, axis = 0),(MCMC_iter - np.array(iters_infeas_DC) - np.array(iters_timeout_DC))); # Average throughput\nNet_Throughput_DC_MRT_avg = zero_div(np.sum(Net_Throughput_DC_MRT, axis = 0),(MCMC_iter - np.array(iters_infeas_DC_MRT) - np.array(iters_timeout_DC_MRT))); # DC + MRT Average throughput\nNet_Throughput_DC_BHCAP_avg = zero_div(np.sum(Net_Throughput_DC_BHCAP, axis = 0),(MCMC_iter - np.array(iters_infeas_DC_BHCAP) - np.array(iters_timeout_DC_BHCAP))); # DC + BHCAP Average throughput\nNet_Throughput_DC_LAT_avg = zero_div(np.sum(Net_Throughput_DC_LAT, axis = 0),(MCMC_iter - np.array(iters_infeas_DC_LAT) - np.array(iters_timeout_DC_LAT))); # DC + LAT Average throughput\nNet_Throughput_DC_BHCAP_LAT_avg = zero_div(np.sum(Net_Throughput_DC_BHCAP_LAT, axis = 0),(MCMC_iter - np.array(iters_infeas_DC_BHCAP_LAT) - np.array(iters_timeout_DC_BHCAP_LAT))); # DC + BHCAP + LAT Average throughput\nNet_Throughput_SA_MRT_avg = zero_div(np.sum(Net_Throughput_SA_MRT, axis = 0),(MCMC_iter - np.array(iters_infeas_SA_MRT) - np.array(iters_timeout_SA_MRT))); # SA + MRT average \nNet_Throughput_SA_LAT_avg = zero_div(np.sum(Net_Throughput_SA_LAT, axis = 0),(MCMC_iter - np.array(iters_infeas_SA_LAT) - np.array(iters_timeout_SA_LAT))); # SA + LAT average\nNet_Throughput_SA_BHCAP_avg = zero_div(np.sum(Net_Throughput_SA_BHCAP, axis = 0),(MCMC_iter - np.array(iters_infeas_SA_BHCAP) - np.array(iters_timeout_SA_BHCAP))); # SA + BHCAP average\nNet_Throughput_SA_BHCAP_LAT_avg = zero_div(np.sum(Net_Throughput_SA_BHCAP_LAT, axis = 0),(MCMC_iter - np.array(iters_infeas_SA_BHCAP_LAT) - np.array(iters_timeout_SA_BHCAP_LAT))); # SA + BHCAP + LAT average\nNet_Throughput_SA_MRT_LAT_avg = zero_div(np.sum(Net_Throughput_SA_MRT_LAT, axis = 0),(MCMC_iter - np.array(iters_infeas_SA_MRT_LAT) - np.array(iters_timeout_SA_MRT_LAT))); # SA + LAT average\nNet_Throughput_DC_MRT_LAT_avg = zero_div(np.sum(Net_Throughput_DC_MRT_LAT, axis = 0),(MCMC_iter - np.array(iters_infeas_DC_MRT_LAT) - np.array(iters_timeout_DC_MRT_LAT))); # SA + LAT average\nNet_Throughput_DC_MRT_BHCAP_avg = zero_div(np.sum(Net_Throughput_DC_MRT_BHCAP, axis = 0),(MCMC_iter - np.array(iters_infeas_DC_MRT_BHCAP) - np.array(iters_timeout_DC_MRT_BHCAP))); # SA + LAT average\nNet_Throughput_DC_MRT_BHCAP_LAT_avg = zero_div(np.sum(Net_Throughput_DC_MRT_BHCAP_LAT, axis = 0),(MCMC_iter - np.array(iters_infeas_DC_MRT_BHCAP_LAT) - np.array(iters_timeout_DC_MRT_BHCAP_LAT))); # SA + LAT average\nNet_Throughput_SA_MRT_BHCAP_avg = zero_div(np.sum(Net_Throughput_SA_MRT_BHCAP, axis = 0),(MCMC_iter - np.array(iters_infeas_SA_MRT_BHCAP) - np.array(iters_timeout_SA_MRT_BHCAP))); # SA + LAT average\nNet_Throughput_SA_MRT_BHCAP_LAT_avg = zero_div(np.sum(Net_Throughput_SA_MRT_BHCAP_LAT, axis = 0),(MCMC_iter - np.array(iters_infeas_SA_MRT_BHCAP_LAT) - np.array(iters_timeout_SA_MRT_BHCAP_LAT))); # SA + LAT average\n\n# Net_Throughput_avg = np.sum(Net_Throughput, axis = 0)/MCMC_iter ; # We get the average throughput over MCMC Iteratios\n# Net_Throughput_DC_avg = np.sum(Net_Throughput_DC, axis = 0)/MCMC_iter; # Average throughput\n# Net_Throughput_DC_MRT_avg = np.sum(Net_Throughput_DC_MRT, axis = 0)/MCMC_iter; # DC + MRT Average throughput\n# Net_Throughput_DC_BHCAP_avg = np.sum(Net_Throughput_DC_BHCAP, axis = 0)/MCMC_iter; # DC + BHCAP Average throughput\n# Net_Throughput_DC_LAT_avg = np.sum(Net_Throughput_DC_LAT, axis = 0)/MCMC_iter; # DC + LAT Average throughput\n# Net_Throughput_DC_BHCAP_LAT_avg = np.sum(Net_Throughput_DC_BHCAP_LAT, axis = 0)/MCMC_iter; # DC + BHCAP + LAT Average throughput\n# Net_Throughput_SA_MRT_avg = np.sum(Net_Throughput_SA_MRT, axis = 0)/MCMC_iter; # SA + MRT average \n# Net_Throughput_SA_LAT_avg = np.sum(Net_Throughput_SA_LAT, axis = 0)/MCMC_iter; # SA + LAT average\n# Net_Throughput_SA_BHCAP_avg = np.sum(Net_Throughput_SA_BHCAP, axis = 0)/MCMC_iter; # SA + BHCAP average\n# Net_Throughput_SA_BHCAP_LAT_avg = np.sum(Net_Throughput_SA_BHCAP_LAT, axis = 0)/MCMC_iter; # SA + BHCAP + LAT average\n# Net_Throughput_SA_MRT_LAT_avg = np.sum(Net_Throughput_SA_MRT_LAT, axis = 0)/MCMC_iter; # SA + LAT average\n# Net_Throughput_DC_MRT_LAT_avg = np.sum(Net_Throughput_DC_MRT_LAT, axis = 0)/MCMC_iter; # SA + LAT average\n# Net_Throughput_DC_MRT_BHCAP_avg = np.sum(Net_Throughput_DC_MRT_BHCAP, axis = 0)/MCMC_iter; # SA + LAT average\n# Net_Throughput_DC_MRT_BHCAP_LAT_avg = np.sum(Net_Throughput_DC_MRT_BHCAP_LAT, axis = 0)/MCMC_iter; # SA + LAT average\n# Net_Throughput_SA_MRT_BHCAP_avg = np.sum(Net_Throughput_SA_MRT_BHCAP, axis = 0)/MCMC_iter; # SA + LAT average\n# Net_Throughput_SA_MRT_BHCAP_LAT_avg = np.sum(Net_Throughput_SA_MRT_BHCAP_LAT, axis = 0)/MCMC_iter; # SA + LAT average\n\nB_Dat_DR_avg = np.sum(B_Dat_DR, axis =0)/MCMC_iter; # Baseline with BW restriction\nB_Dat_DR_fs_avg = np.sum(B_Dat_DR_fs, axis = 0)/MCMC_iter;\n#B_Dat_DR_fs_avg = np.sum(B_Dat_DR_fs, axis = 0)/MCMC_iter; # Baseline with 1GHz BW\n#B_Dat_DR_sn_avg = np.sum(B_Dat_DR_sn, axis = 0)/MCMC_iter; # Baseline with SINR and BW restriction\n#B_Dat_DR_sn_fs_avg = np.sum(B_Dat_DR_sn_fs, axis = 0)/MCMC_iter; #Baseline with SINR and 1GHz BW\n#print iters_infeas_DC_MRT\n\n\n# ====================\n# Satisfied User Count\n\n#AU_Base_DR_fs_avg = np.floor(np.sum(AU_Base_DR_fs, axis = 0)/MCMC_iter);\nAU_Base_DR_avg = np.floor(np.sum(AU_Base_DR, axis = 0)/(MCMC_iter)); \nAU_DR_avg = np.floor(zero_div(np.sum(AU_DR, axis = 0),(MCMC_iter- np.array(iters_infeas) - np.array(iters_timeout))));\nAU_DR_DC_avg = np.floor(zero_div(np.sum(AU_DR_DC, axis = 0),(MCMC_iter- np.array(iters_infeas_DC) - np.array(iters_timeout_DC))));\nAU_DR_DC_MRT_avg = np.floor(zero_div(np.sum(AU_DR_DC_MRT, axis = 0),(MCMC_iter- np.array(iters_infeas_DC_MRT) - np.array(iters_timeout_DC_MRT))));\nAU_DR_DC_BHCAP_avg = np.floor(zero_div(np.sum(AU_DR_DC_BHCAP, axis = 0),(MCMC_iter - np.array(iters_infeas_DC_BHCAP) - np.array(iters_timeout_DC_BHCAP))));\nAU_DR_DC_LAT_avg = np.floor(zero_div(np.sum(AU_DR_DC_LAT, axis = 0),(MCMC_iter- np.array(iters_infeas_DC_LAT) - np.array(iters_timeout_DC_LAT))));\nAU_DR_DC_BHCAP_LAT_avg = np.floor(zero_div(np.sum(AU_DR_DC_BHCAP_LAT, axis = 0),(MCMC_iter- np.array(iters_infeas_DC_BHCAP_LAT) - np.array(iters_timeout_DC_BHCAP_LAT))));\nAU_DR_SA_MRT_avg = np.floor(zero_div(np.sum(AU_DR_SA_MRT, axis = 0),(MCMC_iter- np.array(iters_infeas_SA_MRT) - np.array(iters_timeout_SA_MRT))));\nAU_DR_SA_LAT_avg = np.floor(zero_div(np.sum(AU_DR_SA_LAT, axis = 0),(MCMC_iter- np.array(iters_infeas_SA_LAT) - np.array(iters_timeout_SA_LAT))));\nAU_DR_SA_BHCAP_avg = np.floor(zero_div(np.sum(AU_DR_SA_BHCAP, axis = 0),(MCMC_iter- np.array(iters_infeas_SA_BHCAP) - np.array(iters_timeout_SA_BHCAP))));\nAU_DR_SA_BHCAP_LAT_avg = np.floor(zero_div(np.sum(AU_DR_SA_BHCAP_LAT, axis = 0),(MCMC_iter- np.array(iters_infeas_SA_BHCAP_LAT) - np.array(iters_timeout_SA_BHCAP_LAT))));\nAU_DR_SA_MRT_LAT_avg = np.floor(zero_div(np.sum(AU_DR_SA_MRT, axis = 0),(MCMC_iter- np.array(iters_infeas_SA_MRT) - np.array(iters_timeout_SA_MRT))));\nAU_DR_DC_MRT_LAT_avg = np.floor(zero_div(np.sum(AU_DR_DC_MRT_LAT, axis = 0),(MCMC_iter- np.array(iters_infeas_DC_MRT_LAT) - np.array(iters_timeout_DC_MRT_LAT))));\nAU_DR_DC_MRT_BHCAP_avg = np.floor(zero_div(np.sum(AU_DR_DC_MRT_BHCAP, axis = 0),(MCMC_iter- np.array(iters_infeas_DC_MRT_BHCAP) - np.array(iters_timeout_DC_MRT_BHCAP))));\nAU_DR_DC_MRT_BHCAP_LAT_avg = np.floor(zero_div(np.sum(AU_DR_DC_MRT_BHCAP_LAT, axis = 0),(MCMC_iter- np.array(iters_infeas_DC_MRT_BHCAP_LAT) - np.array(iters_timeout_DC_MRT_BHCAP_LAT))));\nAU_DR_SA_MRT_BHCAP_avg = np.floor(zero_div(np.sum(AU_DR_SA_MRT_BHCAP, axis = 0),(MCMC_iter- np.array(iters_infeas_SA_MRT_BHCAP) - np.array(iters_timeout_SA_MRT_BHCAP))));\nAU_DR_SA_MRT_BHCAP_LAT_avg = np.floor(zero_div(np.sum(AU_DR_SA_MRT_BHCAP_LAT, axis = 0),(MCMC_iter- np.array(iters_infeas_SA_MRT_BHCAP_LAT) - np.array(iters_timeout_SA_MRT_BHCAP_LAT))));\n# AU_Base_DR_avg = np.floor(np.sum(AU_Base_DR, axis = 0)/(MCMC_iter)); \n# AU_DR_avg = np.floor(np.sum(AU_DR, axis = 0)/(MCMC_iter));\n# AU_DR_DC_avg = np.floor(np.sum(AU_DR_DC, axis = 0)/(MCMC_iter));\n# AU_DR_DC_MRT_avg = np.floor(np.sum(AU_DR_DC_MRT, axis = 0)/(MCMC_iter));\n# AU_DR_DC_BHCAP_avg = np.floor(np.sum(AU_DR_DC_BHCAP, axis = 0)/(MCMC_iter));\n# AU_DR_DC_LAT_avg = np.floor(np.sum(AU_DR_DC_LAT, axis = 0)/(MCMC_iter));\n# AU_DR_DC_BHCAP_LAT_avg = np.floor(np.sum(AU_DR_DC_BHCAP_LAT, axis = 0)/(MCMC_iter));\n# AU_DR_SA_MRT_avg = np.floor(np.sum(AU_DR_SA_MRT, axis = 0)/(MCMC_iter));\n# AU_DR_SA_LAT_avg = np.floor(np.sum(AU_DR_SA_LAT, axis = 0)/(MCMC_iter));\n# AU_DR_SA_BHCAP_avg = np.floor(np.sum(AU_DR_SA_BHCAP, axis = 0)/(MCMC_iter));\n# AU_DR_SA_BHCAP_LAT_avg = np.floor(np.sum(AU_DR_SA_BHCAP_LAT, axis = 0)/(MCMC_iter));\n# AU_DR_SA_MRT_LAT_avg = np.floor(np.sum(AU_DR_SA_MRT, axis = 0)/(MCMC_iter));\n# AU_DR_DC_MRT_LAT_avg = np.floor(np.sum(AU_DR_DC_MRT_LAT, axis = 0)/(MCMC_iter));\n# AU_DR_DC_MRT_BHCAP_avg = np.floor(np.sum(AU_DR_DC_MRT_BHCAP, axis = 0)/(MCMC_iter));\n# AU_DR_DC_MRT_BHCAP_LAT_avg = np.floor(np.sum(AU_DR_DC_MRT_BHCAP_LAT, axis = 0)/(MCMC_iter));\n# AU_DR_SA_MRT_BHCAP_avg = np.floor(np.sum(AU_DR_SA_MRT_BHCAP, axis = 0)/(MCMC_iter));\n# AU_DR_SA_MRT_BHCAP_LAT_avg = np.floor(np.sum(AU_DR_SA_MRT_BHCAP_LAT, axis = 0)/(MCMC_iter));\n\n#np.savetxt(\"Accepted_USER.csv\",AU_Base_DR_fs_avg, AU_Base_DR_avg, AU_DR_avg, AU_DR_DC_avg, AU_DR_DC_MRT_avg, AU_DR_DC_BHCAP_avg, AU_DR_DC_LAT_avg, AU_DR_DC_BHCAP_LAT_avg, AU_DR_SA_MRT_avg, AU_DR_SA_LAT_avg, AU_DR_SA_BHCAP_avg, AU_DR_SA_BHCAP_LAT_avg, AU_DR_SA_MRT_LAT_avg, AU_DR_DC_MRT_LAT_avg, delimiter=\",\")\n\n# =================================\n# Save multiple Numpy arrays to CSV\n\n# df = pd.DataFrame({\"Baseline\": AU_Base_DR_avg, \"Single Association\": AU_DR_avg, \"Dual Association\": AU_DR_DC_avg, \"Dual Association MinRate\": AU_DR_DC_MRT_avg, \"Dual Association BHaul\": AU_DR_DC_BHCAP_avg, \"Dual Association LAT\": AU_DR_DC_LAT_avg, \"Dual Association Bhaul LAT\": AU_DR_DC_BHCAP_LAT_avg, \"Single Association MRT\": AU_DR_SA_MRT_avg, \"Single Association LAT\": AU_DR_SA_LAT_avg, \"Single Association Bhaul\": AU_DR_SA_BHCAP_avg, \"Single Association BHCAP+LAT\": AU_DR_SA_BHCAP_LAT_avg, \"Single Association MRT+LAT\": AU_DR_SA_MRT_LAT_avg, \"Dual Association MRT+LAT\": AU_DR_DC_MRT_LAT_avg, \"Dual Association MRT+BHCAP\": AU_DR_DC_MRT_BHCAP_avg, \"Dual Association MRT+BHCAP+LAT\": AU_DR_DC_MRT_BHCAP_LAT_avg, \"Single Association MRT+BHCAP\": AU_DR_SA_MRT_BHCAP_avg, \"Single Association MRT+BHCAP+LAT\": AU_DR_SA_MRT_BHCAP_LAT_avg})\n# df.to_csv(\"AcceptedUsers.csv\", index=False)\n\n# ========================================\n# Jain's Fairness Index and t-student test\n#print application_DR_DC_MRT\n#print X_Optimal_DC_MRT_jfr\njfr_SA = jains_fairness(application_DR, AU_DR[:, num_iter-1]);\njfr_DC = jains_fairness(application_DR_DC, AU_DR_DC[:, num_iter-1]);\njfr_DC_MRT = jains_fairness(application_DR_DC_MRT, AU_DR_DC_MRT[:, num_iter-1]); \njfr_DC_BHCAP = jains_fairness(application_DR_DC_BHCAP, AU_DR_DC_BHCAP[:, num_iter-1]);\njfr_DC_BHCAP_LAT = jains_fairness(application_DR_DC_BHCAP_LAT, AU_DR_DC_BHCAP_LAT[:, num_iter-1]);\njfr_DC_LAT = jains_fairness(application_DR_DC_LAT, AU_DR_DC_LAT[:, num_iter-1]);\njfr_SA_MRT = jains_fairness(application_DR_SA_MRT, AU_DR_SA_MRT[:, num_iter-1]);\njfr_SA_LAT = jains_fairness(application_DR_SA_LAT, AU_DR_SA_LAT[:, num_iter-1]);\njfr_SA_BHCAP = jains_fairness(application_DR_SA_BHCAP, AU_DR_SA_BHCAP[:, num_iter-1]);\njfr_SA_BHCAP_LAT = jains_fairness(application_DR_SA_BHCAP_LAT, AU_DR_SA_BHCAP_LAT[:, num_iter-1]);\njfr_SA_MRT_LAT = jains_fairness(application_DR_SA_MRT_LAT, AU_DR_SA_MRT_LAT[:, num_iter-1]);\njfr_DC_MRT_LAT = jains_fairness(application_DR_DC_MRT_LAT, AU_DR_DC_MRT_LAT[:, num_iter-1]);\njfr_DC_MRT_BHCAP = jains_fairness(application_DR_DC_MRT_BHCAP, AU_DR_DC_MRT_BHCAP[:, num_iter-1]);\njfr_DC_MRT_BHCAP_LAT = jains_fairness(application_DR_DC_MRT_BHCAP_LAT, AU_DR_DC_MRT_BHCAP_LAT[:, num_iter-1]);\njfr_SA_MRT_BHCAP = jains_fairness(application_DR_SA_MRT_BHCAP, AU_DR_SA_MRT_BHCAP[:, num_iter-1]);\njfr_SA_MRT_BHCAP_LAT = jains_fairness(application_DR_SA_MRT_BHCAP_LAT, AU_DR_SA_MRT_BHCAP_LAT[:, num_iter-1]);\n\n\n#jfr_Baseline = jains_fairness(B_Dat_DR_avg, avg_idx);\n\n#print jfr_SA\n#print jfr_DC\n#print jfr_DC_BHCAP\n#print jfr_DC_BHCAP_LAT\n\n\n\n# ===============\n# Throughput Plot\n\nx_axis = np.arange(scn.num_users_min, scn.num_users_max, scn.user_steps_siml);\n# y_min_1 = np.amin([np.amin(Net_Throughput_avg),np.amin(Net_Throughput_DC_avg), np.amin(Net_Throughput_DC_MRT_avg), np.amin(Net_Throughput_DC_BHCAP_avg), np.amin(Net_Throughput_DC_BHCAP_LAT_avg), np.amin(Net_Throughput_DC_LAT_avg), np.amin(Net_Throughput_SA_MRT_avg), np.amin(Net_Throughput_SA_LAT_avg), np.amin(Net_Throughput_SA_BHCAP_avg), np.amin(Net_Throughput_SA_BHCAP_LAT_avg), np.amin(Net_Throughput_SA_MRT_LAT_avg), np.amin(B_Dat_DR_avg), np.amin(Net_Throughput_DC_MRT_LAT_avg), np.amin(Net_Throughput_DC_MRT_BHCAP_avg), np.amin(Net_Throughput_DC_MRT_BHCAP_LAT_avg), np.amin(Net_Throughput_SA_MRT_BHCAP_avg), np.amin(Net_Throughput_SA_MRT_BHCAP_LAT_avg)]); #np.amin(B_Dat_DR_avg), , np.amin(B_Dat_DR_sn_avg)\n# y_max_1 = np.max([np.amax(Net_Throughput_avg), np.amax(Net_Throughput_DC_avg), np.amax(Net_Throughput_DC_MRT_avg), np.amax(Net_Throughput_DC_BHCAP_avg), np.amax(Net_Throughput_DC_BHCAP_LAT_avg), np.amax(Net_Throughput_DC_LAT_avg), np.amax(Net_Throughput_SA_MRT_avg), np.amax(Net_Throughput_SA_LAT_avg), np.amax(Net_Throughput_SA_BHCAP_avg), np.amax(Net_Throughput_SA_BHCAP_LAT_avg), np.amax(Net_Throughput_SA_MRT_LAT_avg), np.amax(B_Dat_DR_avg), np.amax(Net_Throughput_DC_MRT_LAT_avg), np.amax(Net_Throughput_DC_MRT_BHCAP_avg), np.amax(Net_Throughput_DC_MRT_BHCAP_LAT_avg), np.amax(Net_Throughput_SA_MRT_BHCAP_avg), np.amax(Net_Throughput_SA_MRT_BHCAP_LAT_avg)]); #np.amax(B_Dat_DR_avg),, np.amax(B_Dat_DR_sn_avg)\ny_min_1 = np.amin([np.amin(Net_Throughput_avg),np.amin(Net_Throughput_DC_avg), np.amin(Net_Throughput_DC_MRT_avg), np.amin(Net_Throughput_DC_BHCAP_avg), np.amin(Net_Throughput_DC_BHCAP_LAT_avg), np.amin(Net_Throughput_DC_LAT_avg), np.amin(Net_Throughput_SA_LAT_avg), np.amin(Net_Throughput_SA_BHCAP_avg), np.amin(Net_Throughput_SA_BHCAP_LAT_avg), np.amin(B_Dat_DR_avg), np.amin(Net_Throughput_DC_MRT_LAT_avg), np.amin(Net_Throughput_DC_MRT_BHCAP_avg), np.amin(Net_Throughput_DC_MRT_BHCAP_LAT_avg)]); #np.amin(B_Dat_DR_avg), , np.amin(B_Dat_DR_sn_avg)\ny_max_1 = np.max([np.amax(Net_Throughput_avg), np.amax(Net_Throughput_DC_avg), np.amax(Net_Throughput_DC_MRT_avg), np.amax(Net_Throughput_DC_BHCAP_avg), np.amax(Net_Throughput_DC_BHCAP_LAT_avg), np.amax(Net_Throughput_DC_LAT_avg), np.amax(Net_Throughput_SA_LAT_avg), np.amax(Net_Throughput_SA_BHCAP_avg), np.amax(Net_Throughput_SA_BHCAP_LAT_avg), np.amax(B_Dat_DR_avg), np.amax(Net_Throughput_DC_MRT_LAT_avg), np.amax(Net_Throughput_DC_MRT_BHCAP_avg), np.amax(Net_Throughput_DC_MRT_BHCAP_LAT_avg)]); #np.amax(B_Dat_DR_avg),, np.amax(B_Dat_DR_sn_avg)\n\ny_min_2 = np.amin([np.amin(B_Dat_DR_avg)]);\ny_max_2 = np.max([np.amax(B_Dat_DR_avg)]);\n#plotter.plotter('dashline',np.arange(scn.num_users_min, scn.num_users_max, scn.user_steps_siml),Net_Throughput_avg,5,10,1,45,0,0,1,'major','both', 'yes', 'Total Network Throughput', np)\n#print Net_Throughput_SA_LAT_avg\n# plt.plot(x_axis, Net_Throughput_avg, 'r-o', x_axis, Net_Throughput_DC_avg, 'b-o' , x_axis, Net_Throughput_DC_MRT_avg, 'g-.', x_axis, Net_Throughput_DC_BHCAP_avg, 'k--s', x_axis, Net_Throughput_DC_BHCAP_LAT_avg, 'm--d', x_axis , Net_Throughput_DC_LAT_avg, 'c--p', x_axis, Net_Throughput_SA_MRT_avg, 'k-.', x_axis, Net_Throughput_SA_LAT_avg, 'b:', x_axis, Net_Throughput_SA_BHCAP_avg, 'g--D', x_axis, Net_Throughput_SA_BHCAP_LAT_avg, 'r:', x_axis, Net_Throughput_SA_MRT_LAT_avg, 'b--*', x_axis, Net_Throughput_DC_MRT_LAT_avg, 'k--*', x_axis, Net_Throughput_DC_MRT_BHCAP_avg, 'c--o', x_axis, Net_Throughput_DC_MRT_BHCAP_LAT_avg, 'm-^', x_axis, Net_Throughput_SA_MRT_BHCAP_avg, 'g-^', x_axis, Net_Throughput_SA_MRT_BHCAP_LAT_avg, 'b-s'); \nplt.plot(x_axis, Net_Throughput_avg/1e9, 'r-x', x_axis, Net_Throughput_DC_avg/1e9, 'b-o' , x_axis, Net_Throughput_DC_MRT_avg/1e9, 'g-.', x_axis, Net_Throughput_SA_MRT_avg/1e9, 'm-.', x_axis, Net_Throughput_DC_BHCAP_avg/1e9, 'k--s', x_axis, Net_Throughput_DC_BHCAP_LAT_avg/1e9, 'm--d', x_axis , Net_Throughput_DC_LAT_avg/1e9, 'c--p', x_axis, Net_Throughput_SA_LAT_avg/1e9, 'b-x', x_axis, Net_Throughput_SA_BHCAP_avg/1e9, 'g--D', x_axis, Net_Throughput_SA_BHCAP_LAT_avg/1e9, 'r:', fillstyle = 'none'); \nplt.xticks(np.arange(scn.num_users_min, scn.num_users_max, scn.user_steps_siml));\n#plt.yticks(np.arange(y_min_1,y_max_1,5e10));\nplt.legend(['SA','DC', 'DC + MRT', 'SA + MRT', 'DC + CB', 'DC + CB + CPL', 'DC + CPL', 'SA + CPL', 'SA + CB', 'SA + CB + CPL'], loc='upper center', bbox_to_anchor=(0.5, -0.15), ncol = 4) #'Baseline (RSSI)',\nplt.grid(which= 'major',axis= 'both');\nplt.title('Network Wide Throughput')\nplt.xlabel('Number of eMBB users')\nplt.ylabel('Throughput (Gbps)')\nplt.savefig('NetThrough', dpi=1200, facecolor='w', edgecolor='w',\n orientation='landscape', papertype='letter', format='png',\n transparent=False, bbox_inches='tight', pad_inches=0.1,\n frameon=None, metadata=None)\n\n\n# ===================\n# Accepted Users Plot \n\n#ind = np.arange(1,14); # The x locations for the grouped plots\n#width = 0.20; # Width of the bars\n\n#fig, ax = plt.subplots()\n#rects1 = ax.bar(ind - 13*width/13, AU_Base_DR, width, label='Baseline')\n# label='Women')\n\n# Add some text for labels, title and custom x-axis tick labels, etc.\n#ax.set_ylabel('Scores')\n#ax.set_title('Scores by group and gender')\n#ax.set_xticks(ind)\n#ax.set_xticklabels(('G1', 'G2', 'G3', 'G4', 'G5'))\n#ax.legend()\n\nprint ('Baseline Accepted Users:', AU_Base_DR_avg)\n#print ('Baseline with Minimum rate Accepted Users:', AU_Base_DR_fs_avg)\nprint ('SA Accepted Users:', AU_DR_avg)\nprint ('DC Accepted Users:', AU_DR_DC_avg)\nprint ('DC+MRT Accepted Users:', AU_DR_DC_MRT_avg)\nprint ('DC+BHCAP Accepted Users:', AU_DR_DC_BHCAP_avg)\nprint ('DC+LAT Accepted Users:', AU_DR_DC_LAT_avg)\nprint ('DC+BHCAP+LAT Accepted Users:',AU_DR_DC_BHCAP_LAT_avg)\nprint ('SA+MRT Accepted Users:',AU_DR_SA_MRT_avg)\nprint ('SA+LAT Accepted Users:',AU_DR_SA_LAT_avg)\nprint ('SA+BHCAP Accepted Users:',AU_DR_SA_BHCAP_avg)\nprint ('SA+BHCAP+LAT Accepted Users:',AU_DR_SA_BHCAP_LAT_avg)\n#print ('SA+MRT+LAT Accepted Users:',AU_DR_SA_MRT_LAT_avg)\nprint ('DC+MRT+LAT Accepted Users:',AU_DR_DC_MRT_LAT_avg)\nprint ('DC+MRT+BHCAP Accepted Users:',AU_DR_DC_MRT_BHCAP_avg)\nprint ('DC+MRT+BHCAP+LAT Accepted Users:',AU_DR_DC_MRT_BHCAP_LAT_avg)\n#print ('SA+MRT+BHCAP Accepted Users:',AU_DR_SA_MRT_BHCAP_avg)\n#print ('SA+MRT+BHCAP+LAT Accepted Users:',AU_DR_SA_MRT_BHCAP_LAT_avg)\n\n# ===> Setting up a Broken plot to depict Values with distinct ranges\n\nf, (ax, ax2) = plt.subplots(2, 1, sharex = True, gridspec_kw={'height_ratios': [3, 1]})\n\n# plot the same data on both axes\n#ax.plot(x_axis, Net_Throughput_avg, 'r-o', x_axis, Net_Throughput_DC_avg, 'b-o' , x_axis, Net_Throughput_DC_MRT_avg, 'g-.', x_axis, Net_Throughput_DC_BHCAP_avg, 'm--', x_axis, Net_Throughput_DC_BHCAP_LAT_avg, 'm-.', x_axis , Net_Throughput_DC_LAT_avg, 'c--x',x_axis, Net_Throughput_SA_MRT_avg, 'k-.', x_axis, Net_Throughput_SA_LAT_avg, 'b:', x_axis, Net_Throughput_SA_BHCAP_avg, 'g--x', x_axis, Net_Throughput_SA_BHCAP_LAT_avg, 'r:', x_axis, Net_Throughput_SA_MRT_LAT_avg, 'g:', x_axis, Net_Throughput_DC_MRT_LAT_avg, 'k:')\n#ax.plot(x_axis, Net_Throughput_avg, 'r-o', x_axis, Net_Throughput_DC_avg, 'b-o' , x_axis, Net_Throughput_DC_MRT_avg, 'g-.', x_axis, Net_Throughput_DC_BHCAP_avg, 'k--s', x_axis, Net_Throughput_DC_BHCAP_LAT_avg, 'm--d', x_axis , Net_Throughput_DC_LAT_avg, 'c--p', x_axis, Net_Throughput_SA_MRT_avg, 'k-.', x_axis, Net_Throughput_SA_LAT_avg, 'b:', x_axis, Net_Throughput_SA_BHCAP_avg, 'g--D', x_axis, Net_Throughput_SA_BHCAP_LAT_avg, 'r:', x_axis, Net_Throughput_SA_MRT_LAT_avg, 'b--*', x_axis, Net_Throughput_DC_MRT_LAT_avg, 'k--*', x_axis, Net_Throughput_DC_MRT_BHCAP_avg, 'c--o', x_axis, Net_Throughput_DC_MRT_BHCAP_LAT_avg, 'm-^', x_axis, Net_Throughput_SA_MRT_BHCAP_avg, 'g-^', x_axis, Net_Throughput_SA_MRT_BHCAP_LAT_avg, 'b-s'); \nax.plot(x_axis, Net_Throughput_avg/1e9, 'r-o', x_axis, Net_Throughput_DC_avg/1e9, 'b-o' , x_axis, Net_Throughput_DC_MRT_avg/1e9, 'g-.', x_axis, Net_Throughput_SA_MRT_avg/1e9, 'm-.', x_axis, Net_Throughput_DC_BHCAP_avg/1e9, 'k--s', x_axis, Net_Throughput_DC_BHCAP_LAT_avg/1e9, 'm--d', x_axis , Net_Throughput_DC_LAT_avg/1e9, 'c--p', x_axis, Net_Throughput_SA_LAT_avg/1e9, 'b:', x_axis, Net_Throughput_SA_BHCAP_avg/1e9, 'g--D', x_axis, Net_Throughput_SA_BHCAP_LAT_avg/1e9, 'r:'); \nax2.plot(x_axis, B_Dat_DR_avg/1e9, 'k--x') #x_axis, B_Dat_DR_sn_avg, 'b--x', \n\nax2.set_ylim(0.8*(y_min_2/1e9),1.3*(y_max_2/1e9))\n#ax2.set_yticks((0,1.5*y_max_2,0.5*1e8))\n#ax.set_ylim(0.9*(y_min_1/1e9),1.1*(y_max_1/1e9))\n#ax.set_yticks((0.9*y_min_1,1.1*y_max_1,2*1e10))\n\nax.spines['bottom'].set_visible(False)\nax2.spines['top'].set_visible(False)\nax.xaxis.tick_top()\nax.tick_params(labeltop='off')\nax2.xaxis.tick_bottom()\n\nf.legend(['SA','DC', 'DC + MRT','SA + MRT', 'DC + CB', 'DC + CB + CPL', 'DC + CPL', 'SA + CPL', 'SA + CB', 'SA + CB + CPL', 'Baseline'], bbox_to_anchor=(0.5, 0.25), loc='lower left', ncol=2, prop={'size': 6.5})#, prop={'size': 7.5}) #'Baseline (RSSI)',\n\nd1 = 0.025\nd = .015 # how big to make the diagonal lines in axes coordinates\n# arguments to pass to plot, just so we don't keep repeating them\nkwargs = dict(transform=ax.transAxes, color='k', clip_on=False)\nax.plot((-d, +d), (-d, +d), **kwargs) # top-left diagonal\nax.plot((1 - d, 1 + d), (-d, +d), **kwargs) # top-right diagonal\n\nkwargs.update(transform=ax2.transAxes) # switch to the bottom axes\nax2.plot((-d, +d), (1 - d, 1 + d), **kwargs) # bottom-left diagonal\nax2.plot((1 - d, 1 + d), (1 - d, 1 + d), **kwargs) # bottom-right diagonal\n\n#plt.plot(x_axis, B_Dat_DR_avg, 'b-', x_axis, B_Dat_DR_sn_avg, 'r-')\nax.grid(which= 'both',axis= 'both')\nax2.grid(which= 'both',axis= 'both')\n\nf.suptitle('Total Network Throughput -- Baseline Comparison ')\nf.text(0.04, 0.5, 'Throughput (Gbps)', va='center', rotation='vertical')\nf.text(0.5, 0.04, 'Number of eMBB users', ha='center')\n#ax2.xlabel('Number of eMBB users')\n\nplt.savefig('NetThrough_Split', dpi=1200, facecolor='w', edgecolor='w',\n orientation='landscape', papertype='letter', format='png',\n transparent=False, bbox_inches='tight', pad_inches=0.1,\n frameon=None, metadata=None)\n\n# ================\n# Fairness BoxPlot\n\n#box_data = [jfr_DC, jfr_SA, jfr_DC_MRT, jfr_SA_MRT, jfr_DC_LAT, jfr_SA_LAT, jfr_DC_BHCAP, jfr_SA_BHCAP, jfr_DC_BHCAP_LAT, jfr_SA_BHCAP_LAT, jfr_DC_MRT_LAT, jfr_SA_MRT_LAT, jfr_DC_MRT_BHCAP, jfr_SA_MRT_BHCAP, jfr_DC_MRT_BHCAP_LAT, jfr_SA_MRT_BHCAP_LAT] \nbox_data = [jfr_DC, jfr_SA, jfr_DC_MRT, jfr_SA_MRT, jfr_DC_LAT, jfr_SA_LAT, jfr_DC_BHCAP, jfr_SA_BHCAP, jfr_DC_BHCAP_LAT, jfr_SA_BHCAP_LAT] \n\nfig, ax = plt.subplots()\nplt.title('Jain\\'s Fairness Index Deviation')\nplt.boxplot(box_data)\nax.set_ylim(0,1,0.1)\n#plt.xticks(range(1,17), ['DC', 'SA', 'DC+MRT', 'SA+MRT', 'DC+LAT', 'SA+LAT', 'DC+BHCAP', 'SA+BHCAP', 'DC+BHCAP+LAT', 'SA+BHCAP+LAT', 'DC+MRT+LAT', 'SA+MRT+LAT', 'DC+MRT+BHCAP', 'SA+MRT+BHCAP', 'DC+MRT+BHCAP+LAT', 'SA+MRT+BHCAP+LAT'], fontsize = 8, rotation = '90')\nplt.xticks(range(1,11), ['DC', 'SA', 'DC+MRT', 'SA+MRT', 'DC+CPL', 'SA+CPL', 'DC+CB', 'SA+CB', 'DC+CB+CPL', 'SA+CB+CPL'], fontsize = 8, rotation = '90')\n\nplt.savefig('Boxplot', dpi=1200, facecolor='w', edgecolor='w',\n orientation='landscape', papertype='letter', format='png',\n transparent=False, bbox_inches='tight', pad_inches=0.1,\n frameon=None, metadata=None)\n\n#print application_DR.tolist()\n#plt.bar(np.arange(1,Rate.shape[0]+1),application_DR_SA_BHCAP_LAT)\n#plt.plot(np.arange(1,Rate.shape[0]+1), Base_DR, 'r--')\n#plt.xticks(np.arange(1, Rate.shape[0] + 1, 25), rotation=45);\n#plt.yticks(np.arange(min(application_DR),max(application_DR),1e9));\n#plt.legend(['Single Association (SA)','Dual Association (DA)', 'DA + Minimum Rate', 'DA + Constrained Backhaul (CB) [1% Bound Gap]', 'DA + CB + Constrained Path Latency (CPL) [1% Bound Gap]', 'DA + Minimum Rate + CPL', 'SA + Minimum Rate', 'SA + Minimum Rate + CPL', 'SA + CB [1% Bound Gap]', 'SA + CB + CPL [1% Bound Gap]'])\n#plt.grid(which= 'major',axis= 'both');\n#plt.title('Per Application Data Rate (SA + CB + CPL)')\n#plt.show()\n\n\n# ===============\n# Heatmap Plotter\n\nplt.close(\"all\")\nhmap_data = np.load(os.path.dirname(os.getcwd()) +'/UserAssociation/Data/Temp/hmap_'+iter_num+'.npz', allow_pickle='True')\nusr_locs = hmap_data['arr_0']\nmc_locs = hmap_data['arr_2']\nsc_locs = hmap_data['arr_1']\n#print num_iter\n# plt.close('all')\n# f, ax1 = plt.subplots()\n# ax1.bar(np.arange(len(DC_MRT_BW_SC)), DC_MRT_BW_SC)\n# ax1.bar(np.arange(len(DC_MRT_BW_MC)), DC_MRT_BW_MC)\n# #ax1.bar(np.arange(len(DC_MRT_BW_TOT)), DC_MRT_BW_TOT)\n# ax2 = ax1.twinx()\n# ax2.plot(np.arange(len(DC_MRT_BW_SC)), SINR_DC_MRT_BW_SC, 'wo', markersize = 12)\n# ax2.plot(np.arange(len(DC_MRT_BW_MC)), SINR_DC_MRT_BW_MC, 'k^', markersize = 12)\n\n# f.tight_layout()\n# plt.show()\nplotter.hmap_creator(usr_locs, mc_locs, sc_locs, rate_matrix_DC, optim_val, np, scn)\n\n# SINR File\n#SINR_DC_MRT_BW_TOT[np.where(SINR_DC_MRT_BW_TOT==350)] = float('Nan')\n#csvsaver.csvsaver(SINR_DC_MRT_BW_TOT,[], \"SINRIFMCSC9users.csv\")\n#csvsaver.csvsaver(Data_DC_MRT.item()['X_optimal_data'+str(num_iter-1)], [], \"OptBFMCSC9users.csv\")\n\nplotter.hist_plotter(DC_avg_rt, SA_avg_rt, rate_matrix_DC_BHCAP, rate_matrix_SA_BHCAP, rate_matrix_SA_LAT, rate_matrix_SA_MRT, DC_MRT_avg_rt, rate_matrix_DC_LAT, rate_matrix_SA_MRT_LAT, rate_matrix_DC_MRT_LAT, rate_matrix_SA_BHCAP_LAT, rate_matrix_DC_BHCAP_LAT, np, scn)\n# #plotter.scatter_plotter(rate_matrix_DC, rate_matrix_DC_MRT,np,scn)\n# #plotter.accepted_user_plotter(AU_Base_DR_avg,AU_DR_avg,AU_DR_DC_avg,AU_DR_DC_MRT_avg,AU_DR_DC_BHCAP_avg,AU_DR_DC_LAT_avg,AU_DR_DC_BHCAP_LAT_avg,AU_DR_SA_MRT_avg,AU_DR_SA_LAT_avg,AU_DR_SA_BHCAP_avg,AU_DR_SA_BHCAP_LAT_avg,AU_DR_SA_MRT_LAT_avg,AU_DR_DC_MRT_LAT_avg,np,scn)\nplotter.bhutil_latprov_plotter(bhutil_val_DC, bhutil_val_DC_BHCAP, bhutil_val_DC_BHCAP_LAT, avail_bh, latprov_DC, latprov_DC_LAT, latprov_DC_MRT_LAT, latprov_DC_BHCAP_LAT, np, scn)\nplt.close('all') # Close all existing figures\nfor idx in range(1,num_iter+1):\n\tplotter.infeasible_iter_counter(iters_infeas[num_iter-idx], iters_infeas_DC[num_iter-idx], iters_infeas_DC_MRT[num_iter-idx], iters_infeas_SA_MRT_LAT[num_iter-idx], \n\t\titers_infeas_SA_MRT_BHCAP[num_iter-idx], iters_infeas_DC_MRT_BHCAP[num_iter-idx], iters_infeas_DC_MRT_BHCAP_LAT[num_iter-idx], iters_infeas_SA_MRT[num_iter-idx] , \n\t\titers_timeout[num_iter-idx], iters_timeout_DC[num_iter-idx], iters_timeout_DC_MRT[num_iter-idx], iters_timeout_SA_MRT_LAT[num_iter-idx], iters_timeout_SA_MRT_BHCAP[num_iter-idx], \n\t\titers_timeout_DC_MRT_BHCAP[num_iter-idx], iters_timeout_DC_MRT_BHCAP_LAT[num_iter-idx], iters_timeout_SA_MRT[num_iter-idx], iters_infeas_SA_MRT_BHCAP_LAT[num_iter-idx], iters_timeout_SA_MRT_BHCAP_LAT[num_iter-idx] , \n\t\titers_infeas_DC_MRT_LAT[num_iter-idx], iters_timeout_DC_MRT_LAT[num_iter-idx], iters_infeas_SA_BHCAP[num_iter-idx], iters_timeout_SA_BHCAP[num_iter-idx], iters_infeas_SA_LAT[num_iter-idx], iters_timeout_SA_LAT[num_iter-idx],\n\t\titers_infeas_SA_BHCAP_LAT[num_iter-idx], iters_timeout_SA_BHCAP_LAT[num_iter-idx], iters_infeas_DC_BHCAP[num_iter-idx], iters_timeout_DC_BHCAP[num_iter-idx], iters_infeas_DC_LAT[num_iter-idx], iters_timeout_DC_LAT[num_iter-idx],\n\t\titers_infeas_DC_BHCAP_LAT[num_iter-idx], iters_timeout_DC_BHCAP_LAT[num_iter-idx], np,scn)\nplotter.timecdf(time_DC, time_DC_MRT , time_SA_MRT , time_DC_MRT_BHCAP , time_DC_MRT_BHCAP_LAT , time_DC_MRT_LAT , time_SA_MRT_BHCAP , time_SA_MRT_BHCAP_LAT , time_SA_MRT_LAT ,time_SA, np, scn )",
"id": "11988162",
"language": "Python",
"matching_score": 6.478579521179199,
"max_stars_count": 1,
"path": "plotgen.py"
},
{
"content": "# ============================== #\n# Plotting and Display Functions #\n# ============================== #\n\n# This file includes functions for plotting. It reduces the clutter in the main function file. \n\n# =============================\n# Import the necessary binaries\n# =============================\n\nimport matplotlib.pyplot as plt\nimport scipy\nfrom scipy.stats import norm\nimport seaborn as sns\nimport os\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib import cm\nfrom mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes\nfrom mpl_toolkits.axes_grid1.inset_locator import mark_inset\nimport matplotlib.ticker as ticker\n\n# =======\n# Plotter\n# =======\n\ndef plotter(typ_plt, x_val, y_val, tick_space_x, tick_space_y, rtn_flag_x, rtn_angle_x, rtn_flag_y, rtn_angle_y, grid_flag, grid_type, grid_ax_type, title_flag, title_name, np):\n\tif typ_plt == 'dashline':\n\t\tplt.plot(x_val, y_val, 'r--');\n\t\tif rtn_flag_x:\n\t\t\tplt.xticks(np.arange(min(x_val),max(x_val),tick_space_x),rotation=rtn_angle_x);\n\t\telse:\n\t\t\tplt.xticks(np.arange(min(x_val),max(x_val),tick_space_x));\n\t\tif rtn_flag_y:\n\t\t\tplt.yticks(np.arange(min(y_val),max(y_val),tick_space_y),rotation=rtn_angle_y);\n\t\telse:\n\t\t\tplt.yticks(np.arange(min(y_val),max(y_val),tick_space_y));\n\t\tif grid_flag:\n\t\t\tplt.grid(which=grid_type,axis=grid_ax_type);\n\t\tif title_flag:\n\t\t\tplt.title(title_name);\n\n\tif typ_plt == 'heatmap':\n\t\tax = sns.heatmap(x_val); \n\t\tax.set_title(\"SINR heatmap\")\n\t\tax.set_xlabel(\"Small Cell Access Points in consideration\")\n\t\tax.set_ylabel(\"Number of eMBB users\")\n\t\t\n\t\t\n\tplt.show() \n\n# ====================================\n# Plotting Function for Optimized data\n# ====================================\n\ndef optimizer_plotter(data_plt):\n\t#sns.set_style(\"whitegrid\", {'axes.grid' : False})\n\tf = plt.figure(figsize=(12,10))\n\tax = sns.heatmap(data_plt); \n\t#x = Axes3D(f)\n\t#ax.scatter(data_plt.shape[0], data_plt.shape[1], data_plt)\n\t#g = plt.figure(2)\n\t#ax1 = sns.heatmap(data_plt[:,:,1]);\n\n\t#h = plt.figure(3)\n\t#ax2 = sns.heatmap(data_plt[:,:,2]); \n\tax.set_title(\"User Data Rate Heatmap\")\n\tax.set_xlabel(\"Access Points\")\n\tax.set_ylabel(\"eMBB Users\")\n\t#ax.set_zlabel(\"Data Rate\")\n\tplt.savefig(os.getcwd()+\"/CircularDeploy_MCSC.png\")\n\tplt.show()\t\n\n# =====================================\n# Geographical Heatmap creator Function\n# =====================================\n\ndef hmap_creator(usr_lcs, mc_lcs, sc_lcs, rate_mat, connect_info_mat, np, scn):\n\tf,ax = plt.subplots()\n\tax = plt.gca()\n\t#print usr_lcs.shape[0]\n\t#print rate_mat \n\n\ts1, = ax.plot(usr_lcs[:,0], usr_lcs[:,1], \"r*\", markersize=12) # Plot the User locations\n\ts2, = ax.plot(mc_lcs[:,0],mc_lcs[:,1],\"k^\", markersize=12) # Plot the macro cell locations\n\ts3, = ax.plot(sc_lcs[:,0],sc_lcs[:,1],\"g^\", markersize=8) # Plot the small cell locations\n\t\n\tfor i in range(connect_info_mat.shape[0]):\n\t\tfor j in range(connect_info_mat.shape[1]):\n\t\t\tif connect_info_mat[i,j] == 1 and j<sc_lcs.shape[0]:\n\t\t\t\tax.plot([usr_lcs[i,0],sc_lcs[j,0]],[usr_lcs[i,1],sc_lcs[j,1]],'c-')\n\t\t\telif connect_info_mat[i,j] == 1 and j>=sc_lcs.shape[0]:\n\t\t\t\tax.plot([usr_lcs[i,0],mc_lcs[j-sc_lcs.shape[0],0]],[usr_lcs[i,1],mc_lcs[j-sc_lcs.shape[0],1]],'m-')\n\t\n\t#Create the color range\n\t# range_rate = np.arange(np.amin(rate_mat),np.amax(rate_mat),(np.amax(rate_mat)-np.amin(rate_mat))/7) # These are rate bands for the circular colors\n\t# color_range = ['#ffa07a','m','b','r','#daa520','#b22222','#8b0000'] # These are the color codes\n\t# circle_size = np.arange(6,13,1) # Circle size range\n\t# # color_sel = [] # Empty list to hold the color code and circle sizes\n\t# # # # Establish rate based circles on the plot \n\t# for i in range(usr_lcs.shape[0]):\n\t# \tif rate_mat[i] >= range_rate[0] and rate_mat[i] < range_rate[1]:\n\t# \t\t#ax.plot(usr_lcs[i,0],usr_lcs[i,1], color_range[0], markersize=circle_size[0], fillstyle='none')\n\t# \t\ts4, = ax.plot(usr_lcs[i,0],usr_lcs[i,1], marker = 'o', markeredgecolor = color_range[0], markerfacecolor = color_range[0], markersize=circle_size[0])\n\t\t\t\t\t\t\n\t# \telif rate_mat[i] >= range_rate[1] and rate_mat[i] < range_rate[2]:\n\t# \t\t#ax.plot(usr_lcs[i,0],usr_lcs[i,1], color_range[1], markersize=circle_size[1], fillstyle='none')\n\t# \t\ts5, = ax.plot(usr_lcs[i,0],usr_lcs[i,1], marker = 'o', markeredgecolor = color_range[1], markerfacecolor = color_range[1], markersize=circle_size[1])\n\t\t\t\n\t# \telif rate_mat[i] >= range_rate[2] and rate_mat[i] < range_rate[3]:\n\t# \t\t#ax.plot(usr_lcs[i,0],usr_lcs[i,1], color_range[2], markersize=circle_size[2], fillstyle='none')\n\t# \t\ts6, = ax.plot(usr_lcs[i,0],usr_lcs[i,1], marker = 'o', markeredgecolor = color_range[2], markerfacecolor = color_range[2], markersize=circle_size[2])\n\t\t\t\n\t# \telif rate_mat[i] >= range_rate[3] and rate_mat[i] < range_rate[4]:\n\t# \t\t#ax.plot(usr_lcs[i,0],usr_lcs[i,1], color_range[3], markersize=circle_size[3], fillstyle='none')\n\t# \t\ts7, = ax.plot(usr_lcs[i,0],usr_lcs[i,1], marker = 'o',markeredgecolor = color_range[3], markerfacecolor = color_range[3], markersize=circle_size[3])\n\t\t\t\n\t# \telif rate_mat[i] >= range_rate[4] and rate_mat[i] < range_rate[5]:\n\t# \t\t#ax.plot(usr_lcs[i,0],usr_lcs[i,1], color_range[4], markersize=circle_size[4], fillstyle='none')\n\t# \t\ts8, = ax.plot(usr_lcs[i,0],usr_lcs[i,1], marker = 'o', markeredgecolor = color_range[4], markerfacecolor = color_range[4], markersize=circle_size[4])\n\t\t\t\n\t# \telif rate_mat[i] >= range_rate[5] and rate_mat[i] < range_rate[6]:\n\t# \t\t#ax.plot(usr_lcs[i,0],usr_lcs[i,1], color_range[5], markersize=circle_size[5], fillstyle='none')\n\t# \t\ts9, = ax.plot(usr_lcs[i,0],usr_lcs[i,1], marker = 'o', markeredgecolor = color_range[5], markerfacecolor = color_range[5], markersize=circle_size[5])\n\t\t\t\n\t# \telse:\n\t# \t\t#ax.plot(usr_lcs[i,0],usr_lcs[i,1], color_range[6], markersize=circle_size[6], fillstyle='none')\n\t# \t\ts10, = ax.plot(usr_lcs[i,0],usr_lcs[i,1], marker = 'o', markeredgecolor = color_range[6], markerfacecolor = color_range[6], markersize=circle_size[6])\n\n\t# #legend_cols = [ s2, s3, s4, s5, s6, s7, s8, s9, s10]\n\t# legend_cols = [ s2, s3, s4, s6, s7, s8, s10]\n\t# plt.legend(legend_cols,[\"Macro Cells\", \"Small Cells\", str(format(range_rate[0],'0.6e'))+'--'+str(format(range_rate[1],'0.6e')),str(format(range_rate[1],'.6e'))+'--'+str(format(range_rate[2],'.6e')),str(format(range_rate[2],'0.6e'))+'--'+str(format(range_rate[3],'0.6e')),str(format(range_rate[3],'0.6e'))+'--'+str(format(range_rate[4],'0.6e')),str(format(range_rate[4],'0.6e'))+'--'+str(format(range_rate[5],'0.6e')),str(format(range_rate[5],'0.6e'))+'--'+str(format(range_rate[6],'0.6e'))],loc='upper center', bbox_to_anchor=(0.5, -0.05), ncol = 4)\t\n\tplt.title(\"Heatmap of Individual User Data Rates (DC+MRT Scenario)\")\n\tplt.show()\n\n# =============================\n# Histogram and Scatter Plotter\n# =============================\n\ndef hist_plotter(rate_matrix_DC, rate_matrix_SA, rate_matrix_DC_BHCAP, rate_matrix_SA_BHCAP, rate_matrix_SA_LAT, rate_matrix_SA_MRT, rate_matrix_DC_MRT, rate_matrix_DC_LAT, rate_matrix_SA_MRT_LAT, rate_matrix_DC_MRT_LAT, rate_matrix_SA_BHCAP_LAT, rate_matrix_DC_BHCAP_LAT, np,scn):\n\t#rc_mat_DC = np.array(rate_matrix_DC).reshape(())\n\n\n\t# y1,binEdges1 = np.histogram(rate_matrix_DC,bins=200)\n\t# y2,binEdges2 = np.histogram(rate_matrix_SA,bins=200)\n\t# y3,binEdges3 = np.histogram(rate_matrix_DC_BHCAP,bins=200)\n\t# y4,binEdges4 = np.histogram(rate_matrix_SA_BHCAP,bins=200)\n\t# y5,binEdges5 = np.histogram(rate_matrix_DC_MRT,bins=200)\n\t# y6,binEdges6 = np.histogram(rate_matrix_DC_LAT,bins=200)\n\t# y7,binEdges7 = np.histogram(rate_matrix_DC_BHCAP_LAT,bins=200)\n\t# y8,binEdges8 = np.histogram(rate_matrix_DC_MRT_LAT,bins=200)\n\t# y9,binEdges9 = np.histogram(rate_matrix_SA_MRT,bins=200)\n\t# y10,binEdges10 = np.histogram(rate_matrix_SA_LAT,bins=200)\n\t# y11,binEdges11 = np.histogram(rate_matrix_SA_BHCAP_LAT,bins=200)\n\t# y12,binEdges12 = np.histogram(rate_matrix_SA_MRT_LAT,bins=200)\n\t\n\t# bincenters1 = 0.5*(binEdges1[1:]+binEdges1[:-1])\n\t# bincenters2 = 0.5*(binEdges2[1:]+binEdges2[:-1])\n\t# bincenters3 = 0.5*(binEdges3[1:]+binEdges3[:-1])\n\t# bincenters4 = 0.5*(binEdges4[1:]+binEdges4[:-1])\n\t# bincenters5 = 0.5*(binEdges5[1:]+binEdges5[:-1])\n\t# bincenters6 = 0.5*(binEdges6[1:]+binEdges6[:-1])\n\t# bincenters7 = 0.5*(binEdges7[1:]+binEdges7[:-1])\n\t# bincenters8 = 0.5*(binEdges8[1:]+binEdges8[:-1])\n\t# bincenters9 = 0.5*(binEdges9[1:]+binEdges9[:-1])\n\t# bincenters10 = 0.5*(binEdges10[1:]+binEdges10[:-1])\n\t# bincenters11 = 0.5*(binEdges11[1:]+binEdges11[:-1])\n\t# bincenters12 = 0.5*(binEdges12[1:]+binEdges12[:-1])\n\t\n\t# #print np.sum(y5)\n\n\t# plt.plot(bincenters1[np.where(y1!=0)],y1[np.where(y1!=0)],'rx',fillstyle='none', markersize=8)\n\t# plt.plot(bincenters2[np.where(y2!=0)],y2[np.where(y2!=0)],'b^',fillstyle='none', markersize=8)\n\t# #plt.plot(bincenters3,y3,'b-o',fillstyle='none')\n\t# plt.plot(bincenters5[np.where(y5!=0)],y5[np.where(y5!=0)],'ko',fillstyle='none', markersize=8)\n\n\tn_bins = 300; # Number of bins for the histogram\n\tfig, ax = plt.subplots()\n\t#ax.set_title(\"User Distribution CDF\")\n\t#print len(rate_matrix_DC)\n\t#print time_DC[:,5]\n\tn1, bins1, patches1 = ax.hist(rate_matrix_DC, n_bins,density=True, histtype='step',\n cumulative=True, label='DC')\n\tn2, bins2, patches2 = ax.hist(rate_matrix_SA, n_bins,density=True, histtype='step',\n cumulative=True, label='SA')\n\tn3, bins3, patches3 = ax.hist(rate_matrix_DC_MRT, n_bins,density=True, histtype='step',\n cumulative=True, label='DC+MRT')\n\n\t#ax.set_xlabel('Throughput(bps)')\n\t\n\t#plt.plot(bincenters1[y1.tolist().index(np.amax(y1))],np.amax(y1),'rs')\n\t#plt.plot(np.ones((np.arange(0,np.amax(y1)).shape[0]+1,1))*bincenters1[y1.tolist().index(np.amax(y1))],np.arange(0,np.amax(y1)+1),'k--')\n\t#plt.plot(bincenters2[y2.tolist().index(np.amax(y2))],np.amax(y2),'bs')\n\t#plt.plot(np.ones((np.arange(0,np.amax(y2)).shape[0]+1,1))*bincenters2[y2.tolist().index(np.amax(y2))],np.arange(0,np.amax(y2)+1),'g--')\n\t#plt.plot(bincenters3[y3.tolist().index(np.amax(y3))],np.amax(y3),'go')\n\t#plt.plot(np.ones((np.arange(0,np.amax(y3)).shape[0],1))*bincenters3[y3.tolist().index(np.amax(y3))],np.arange(0,np.amax(y3)),'g--')\n\t#plt.plot(bincenters7[y7.tolist().index(np.amax(y7))],np.amax(y7),'go')\n\t#plt.plot(np.ones((np.arange(0,np.amax(y7)).shape[0],1))*bincenters7[y7.tolist().index(np.amax(y7))],np.arange(0,np.amax(y7)),'g--')\n\t#plt.plot(bincenters5[y5.tolist().index(np.amax(y5))],np.amax(y5),'ks')\n\t#plt.plot(np.ones((np.arange(0,np.amax(y5)).shape[0],1))*bincenters5[y5.tolist().index(np.amax(y5))],np.arange(0,np.amax(y5)),'g--')\n\t\t\n\n\t#plt.plot(bincenters3,y3,'k--')\n\t#lt.plot(bincenters4,y4,'g-o')\n\t#plt.plot(bincenters5,y5,'m--')\n\t#plt.plot(bincenters6,y6,'c--')\n\t#plt.plot(bincenters7,y7,'r-*')\n\t#plt.plot(bincenters8,y8,'b:')\n\t#plt.plot(bincenters9,y9,'k-*')\n\t#plt.plot(bincenters10,y10,'g:')\n\t#plt.plot(bincenters11,y11,'m-*')\n\t#plt.plot(bincenters12,y12,'c:')\n\t\n\t# plt.legend([\"DC\",\"SA\",\"DC+MRT\"])\n\t# plt.xlabel('Throughput(bps)')\n\t# plt.ylabel('Number of Users')\n\t# plt.title('User Distribution')\n\t#plt.grid()\n\tplt.close()\n\n\tf, ax1 = plt.subplots(figsize=(8,4))\n\tax1.plot(bins1[:-1]/1e9,n1,'r-', label=\"DC\")\n\tax1.plot(bins2[:-1]/1e9,n2,'b-', label=\"SA\")\n\tax1.plot(bins3[:-1]/1e9,n3,'k-', label=\"DC+MRT\")\n\tax1.plot([0.1]*len(np.arange(0,1.1,0.1)), np.arange(0,1.1,0.1), 'g--', label=\"Minimum Rate = 100 Mbps\")\n\tax1.plot(bins3[0]/1e9, n3[0], 'ko', markersize=12, label=\"Minimum Rate with DC+MRT: \"+str(format(bins3[0]/1e6,'0.2f'))+\" Mbps\")\n\tax1.set_title(\"User Distribution CDF\", fontsize = 14)\n\tax1.set_xlabel('Throughput(Gbps)', fontsize = 12)\n\tax1.set_ylim(0,1,0.1)\n\tax1.set_xlim(min(min(bins1[:-1]/1e9), min(bins2[:-1]/1e9), min(bins3[:-1]/1e9)),max(max(bins1[:-1]/1e9), max(bins2[:-1]/1e9), max(bins3[:-1]/1e9)) )\n\tax1.legend(prop={'size': 12})\n\tax1.yaxis.set_ticks_position('none')\n\tax1.xaxis.set_ticks_position('none')\n\taxins = zoomed_inset_axes(ax1, 15.5, loc=10, bbox_to_anchor=(1101.,405.))\n\taxins.plot(bins1[:-1]/1e9,n1,'r-')\n\taxins.plot(bins2[:-1]/1e9,n2,'b-')\n\taxins.plot(bins3[:-1]/1e9,n3,'k-')\n\taxins.plot([0.1]*len(np.arange(0,1.1,0.1)), np.arange(0,1.1,0.1), 'g--')\n\taxins.plot(bins3[0]/1e9, n3[0], 'ko', markersize=8)\n\taxins.set_ylim(0.01,0.03)\n\taxins.set_xlim(0.06,0.12)\n\taxins.yaxis.set_ticks_position('none')\n\taxins.xaxis.set_ticks_position('none')\n\taxins.yaxis.set_ticks(np.arange(0.01,0.035,0.01))\n\taxins.xaxis.set_ticks(np.arange(0.07,0.14,0.02))\n\taxins.xaxis.set_major_formatter(ticker.FormatStrFormatter('%0.2f'))\n\t#plt.yticks(visible = False)\n\t#plt.xticks(visible = False)\n\tmark_inset(ax1, axins, loc1=2, loc2=4, fc=\"none\", ec=\"0.5\")\n\t#ax1.grid()\n\t\n\tplt.show()\n\ndef scatter_plotter(rate_matrix_DC,rate_matrix_DC_MRT,np,scn):\n\t\n\tf,ax = plt.subplots(2)\n\tf.suptitle('Data Rate Scatter Plot of Users -- DC Scenario (top); DC+MRT Scenario (bottom)')\n\tax[0].scatter(np.arange(1,len(rate_matrix_DC) +1),rate_matrix_DC,alpha=0.5)\n\tax[0].plot(np.arange(1,len(rate_matrix_DC) +1),np.ones((np.arange(1,len(rate_matrix_DC) +1).shape[0],1))*1e8, 'r--')\n\t#ax[0].xlabel('Users')\n\t#ax[0].ylabel('Throughput (in bps)')\n\t#ax[0].title('a')\n\tax[0].legend([\"100 Mbps\"])\n\tax[1].scatter(np.arange(1,len(rate_matrix_DC_MRT) +1),rate_matrix_DC_MRT,alpha=0.5)\n\tax[1].plot(np.arange(1,len(rate_matrix_DC_MRT) +1),np.ones((np.arange(1,len(rate_matrix_DC_MRT) +1).shape[0],1))*1e8, 'r--')\n\t#ax[1].xlabel('Users')\n\t#ax[1].ylabel('Throughput (in bps)')\n\t#ax[1].title('b')\n\tax[1].legend([\"100 Mbps\"])\n\tf.text(0.5, 0.04, 'Users', ha='center')\n\tf.text(0.04, 0.5, 'Throughput (in bps)', va='center', rotation='vertical')\n\n\tplt.show()\n\n# ========================================\n# Accepted and Rejected User Visualization\n# ========================================\n\ndef accepted_user_plotter(accepted_usr_list_baseline, accepted_usr_list_SA, accepted_usr_list_DC, accepted_usr_list_DC_MRT,accepted_usr_list_DC_BHCAP,accepted_usr_list_DC_LAT,accepted_usr_list_DC_BHCAP_LAT,accepted_usr_list_SA_MRT,accepted_usr_list_SA_LAT,accepted_usr_list_SA_BHCAP,accepted_usr_list_SA_BHCAP_LAT,accepted_usr_list_SA_MRT_LAT,accepted_usr_list_DC_MRT_LAT,np,scn):\n\tactual_user_list = [500,600,700,800,900,1000]\n\tlabels = ['500','600','700','800','900','1000']\n\tbaseline_ar = -1*accepted_usr_list_baseline + actual_user_list;\n\tSA_ar = -1*accepted_usr_list_SA + actual_user_list\n\tDC_ar = -1*accepted_usr_list_DC + actual_user_list\n\tDC_MRT_ar = -1*accepted_usr_list_DC_MRT + actual_user_list\n\tDC_BHCAP_ar = -1*accepted_usr_list_DC_BHCAP + actual_user_list\n\tDC_LAT_ar = -1*accepted_usr_list_DC_LAT + actual_user_list\n\tSA_MRT_ar = -1*accepted_usr_list_SA_MRT + actual_user_list\n\tSA_LAT_ar = -1*accepted_usr_list_SA_LAT + actual_user_list\n\tSA_BHCAP_ar = -1*accepted_usr_list_SA_BHCAP + actual_user_list\n\tSA_BHCAP_LAT_ar = -1*accepted_usr_list_SA_BHCAP_LAT + actual_user_list\n\tSA_MRT_LAT_ar = -1*accepted_usr_list_SA_MRT_LAT + actual_user_list\n\tDC_MRT_LAT_ar = -1*accepted_usr_list_DC_MRT_LAT + actual_user_list\n\t\n\tx = np.arange(len(labels)) # The label locations\n\twidth = 0.15 # Width of the bars\n\tf,ax = plt.subplots()\n\tr1 = ax.bar(x - width/2, baseline_ar, width, label='Baseline')\n\tr2 = ax.bar(x - 5*width/12, SA_ar, width, label='SA')\n\tr3 = ax.bar(x - width/3, DC_ar, width, label='DC')\n\tr4 = ax.bar(x - width/4, DC_MRT_ar, width, label='DC+MRT')\n\tr5 = ax.bar(x - width/6, DC_BHCAP_ar, width, label='DC+BHCAP')\n\tr6 = ax.bar(x - width/12, DC_LAT_ar, width, label='DC+LAT')\n\tr7 = ax.bar(x + width/12, SA_MRT_ar, width, label='SA+MRT')\n\tr8 = ax.bar(x + width/6, SA_LAT_ar, width, label='SA+LAT')\n\tr9 = ax.bar(x + width/4, SA_BHCAP_ar, width, label='SA+BHCAP')\n\tr1 = ax.bar(x + width/3, SA_BHCAP_LAT_ar, width, label='SA+BHCAP+LAT')\n\tr1 = ax.bar(x + 5*width/12, SA_MRT_LAT_ar, width, label='SA+MRT+LAT')\n\tr1 = ax.bar(x + width/2, DC_MRT_LAT_ar, width, label='DC+MRT+LAT')\n\n\tax.set_ylabel('Number of Unaccepted users')\n\tax.set_title(\"User Acceptance\")\n\tax.set_xticks(x)\n\tax.set_xticklabels(labels)\n\tax.legend()\n\tf.tight_layout()\n\tplt.show()\n\n# ============================================\n# BH Utilization and Latency Provision Plotter\n# ============================================\n\ndef bhutil_latprov_plotter(bhutil_val_DC, bhutil_val_DC_BHCAP, bhutil_val_DC_BHCAP_LAT, avail_bh, latprov_DC, latprov_DC_LAT, latprov_DC_MRT_LAT, latprov_DC_BHCAP_LAT, np, scn):\n\n\t# ====> BH Utilization Plots\n\n\tbhvbh = [item for sublist in bhutil_val_DC_BHCAP for item in sublist]\n\tbhvbh_DC = [item for sublist in bhutil_val_DC for item in sublist]\n\tavbh = [item for sublist in avail_bh for item in sublist]\n\t#print avbh\n\tprint np.amax(np.array(avbh))\n\ttot_avail_bh = avbh + [scn.fib_BH_MC_capacity]*(len(bhvbh)-len(avail_bh))\n\t#print tot_avail_bh\n\t#f,axs = plt.subplots(2)\n\t#f.suptitle('Backhaul Resource Utilization -- Constrained (top) and Unconstrained (bottom) BH')\n\t# axs[0].bar(np.arange(len(bhvbh)), [x1 - x2 for (x1, x2) in zip(bhvbh, tot_avail_bh)])\n\t# #ax.bar(np.arange(len(bhvbh)), bhvbh)\n\t# #ax.plot(np.arange(len(bhvbh)), tot_avail_bh, 'r--')\n\t# axs[0].set_ylim(-1*(max(tot_avail_bh)+1e9), 1e9)\n\t# axs[0].grid()\n\t# #axs[0].set_title('Backhaul Resource Utilization -- Constrained BH')\n\t# #axs[0].set_xlabel('(a)')\n\t# #axs[0].set_ylabel('Demand to Available BW Difference (bps)')\n\n\tx = np.arange(len(bhvbh))\n\twidth = 0.35 # Width of the bars\n\n\n\tfig, ax = plt.subplots()\n\n\tl1 = [x1 - x2 for (x1, x2) in zip(bhvbh, tot_avail_bh)]\n\tl2 = [x1 - x2 for (x1, x2) in zip(bhvbh_DC, tot_avail_bh)]\n\tax.bar(x - width/2, [a2/1e9 for a2 in l1] , width, label='Constrained Backhaul')\n\tax.bar(x + width/2, [a1/1e9 for a1 in l2], width, label='Unconstrained Backhaul')\n\n\t\t#ax.set_xticklabels(labels, rotation = 90)\n\t\n\tax.plot(np.arange(len(bhvbh)), [-1*np.amax(np.array(avbh))/1e9]*len(bhvbh),'b-', label='Maximum Available SC capacity')\n\tax.plot(np.arange(len(bhvbh)), [-1*scn.fib_BH_MC_capacity/1e9]*len(bhvbh), 'k-', label='Maximum Available MC capacity')\n\t\n\thandles,labels = ax.get_legend_handles_labels()\n\n\thandles = [handles[2], handles[3], handles[0], handles[1]]\n\tlabels = [labels[2], labels[3], labels[0], labels[1]]\n\n\tax.grid()\n\tax.set_ylabel('Demand to Available BW Difference (Gbps)')\n\tax.set_xlabel('Access Points')\n\tax.set_title('Backhaul Resource Utilization')\n\tax.set_xticks(x)\n\tplt.xticks(rotation=90)\n\n\t#ax.set_xticklabels(ax.get_xticklabels(), rotation=90)\n\n\tax.legend(handles, labels, loc=\"best\")\n\tfor i in range(len(bhvbh)-len(avail_bh)):\n\t\tax.get_xticklabels()[len(bhvbh)-1 -i].set_color(\"red\") \n\n\t# axs[1].bar(np.arange(len(bhvbh_DC)), [x1 - x2 for (x1, x2) in zip(bhvbh_DC, tot_avail_bh)])\n\t# #ax.bar(np.arange(len(bhvbh)), bhvbh)\n\t# #ax.plot(np.arange(len(bhvbh)), tot_avail_bh, 'r--')\n\t# axs[1].set_ylim(-1*(max(tot_avail_bh)+1e9), max(tot_avail_bh)+1e9)\n\t# axs[1].grid()\n\t# #axs[1].set_title('Backhaul Resource Utilization -- Unconstrained BH')\n\t# #axs[1].set_xlabel('(b)')\n\t# #axs[1].set_ylabel('Demand to Available BW Difference (bps)')\n\n\n\t# f.text(0.5, 0.04, 'Access Points', ha='center')\n\t#f.text(0.04, 0.5, 'Demand to Available BW Difference (bps)', va='center', rotation='vertical')\n\tfig.tight_layout()\n\tplt.show()\n\tplt.close(\"all\")\n\t# ====> Latency Plot\n\t#print latprov_DC_LAT\n\tlprov_DC_LAT = np.empty((latprov_DC_LAT.shape[0],2))\n\tfor i in range(latprov_DC_LAT.shape[0]):\n\t\ttemp = latprov_DC_LAT[i,np.nonzero(latprov_DC_LAT[i,:])]\n\t\tif temp.shape[1] == 2:\n\t\t\tlprov_DC_LAT[i,:] = temp\n\t\telse:\n\t\t\tlprov_DC_LAT[i,0] = temp\n\t\t\tlprov_DC_LAT[i,1] = temp\n\t#print lprov_DC_LAT[:,0].shape\n\tplt.scatter(np.arange(1,lprov_DC_LAT.shape[0]+1), lprov_DC_LAT[:,0], c = 'b', marker = 'o', alpha = 0.5)\n\tplt.scatter(np.arange(1,lprov_DC_LAT.shape[0]+1), lprov_DC_LAT[:,1], c = 'b', marker = 'o', alpha = 0.5)\n\tplt.plot(np.arange(latprov_DC_LAT.shape[0]), [scn.eMBB_latency_req]*latprov_DC_LAT.shape[0],'r-')\n\tplt.xlabel('Users')\n\tplt.ylabel('Latency (in seconds)')\n\tplt.title(\"Latency Scatter Plot of Users (DC LAT Scenario)\")\n\tplt.legend([\"3 ms\"])\n\tplt.show()\ndef infeasible_iter_counter(iters_infeas, iters_infeas_DC, iters_infeas_DC_MRT, iters_infeas_SA_MRT_LAT, iters_infeas_SA_BHCAP_MRT, iters_infeas_DC_BHCAP_MRT, iters_infeas_DC_BHCAP_MRT_LAT, iters_infeas_SA_MRT , iters_timeout, iters_timeout_DC, iters_timeout_DC_MRT, iters_timeout_SA_MRT_LAT, iters_timeout_SA_BHCAP_MRT, iters_timeout_DC_BHCAP_MRT, iters_timeout_DC_BHCAP_MRT_LAT, iters_timeout_SA_MRT , iters_infeas_SA_MRT_BHCAP_LAT, iters_timeout_SA_MRT_BHCAP_LAT, iters_infeas_DC_MRT_LAT,iters_timeout_DC_MRT_LAT, iters_infeas_SA_BHCAP, iters_timeout_SA_BHCAP, iters_infeas_SA_LAT, iters_timeout_SA_LAT,\n\t\titers_infeas_SA_BHCAP_LAT, iters_timeout_SA_BHCAP_LAT, iters_infeas_DC_BHCAP, iters_timeout_DC_BHCAP, iters_infeas_DC_LAT, iters_timeout_DC_LAT,\n\t\titers_infeas_DC_BHCAP_LAT, iters_timeout_DC_BHCAP_LAT, np,scn):\n\n\tlabels = ['SA', 'DC', 'SA + CB', 'DC + CB', 'SA + CPL', 'DC + CPL', 'SA + CPL + CB', 'DC + CPL + CB', 'SA + MRT', 'DC + MRT', 'SA + MRT + CPL', 'DC + MRT + CPL', 'SA + CB + MRT', 'DC + CB + MRT', 'SA + CB + MRT + CPL', 'DC + CB + MRT + CPL']\n\n\tx = np.arange(len(labels))\n\twidth = 0.25 # Width of the bars\n\n\tfig, ax = plt.subplots()\n\n\trects1 = ax.bar(x - width/2, [iters_infeas, iters_infeas_DC, iters_infeas_SA_BHCAP, iters_infeas_DC_BHCAP, iters_infeas_SA_LAT, iters_infeas_DC_LAT, iters_infeas_SA_BHCAP_LAT, iters_infeas_DC_BHCAP_LAT, iters_infeas_SA_MRT, iters_infeas_DC_MRT, iters_infeas_SA_MRT_LAT, iters_infeas_DC_MRT_LAT, iters_infeas_SA_BHCAP_MRT, iters_infeas_DC_BHCAP_MRT, iters_infeas_SA_MRT_BHCAP_LAT, iters_infeas_DC_BHCAP_MRT_LAT], width, label='Infeasible Iterations')\n\trects2 = ax.bar(x + width/2, [iters_timeout, iters_timeout_DC, iters_timeout_SA_BHCAP, iters_timeout_DC_BHCAP, iters_timeout_SA_LAT, iters_timeout_DC_LAT, iters_timeout_SA_BHCAP_LAT, iters_timeout_DC_BHCAP_LAT, iters_timeout_SA_MRT, iters_timeout_DC_MRT, iters_timeout_SA_MRT_LAT, iters_timeout_DC_MRT_LAT, iters_timeout_SA_BHCAP_MRT, iters_timeout_DC_BHCAP_MRT, iters_timeout_SA_MRT_BHCAP_LAT, iters_timeout_DC_BHCAP_MRT_LAT], width, label='Timed out iterations')\n\n\tax.set_ylabel('Number of Iterations')\n\tax.set_title('Infeasible and Timed out Iterations')\n\tax.set_xticks(x)\n\tax.set_xticklabels(labels, rotation = 90)\n\tax.legend()\n\n\tdef autolabel(rects):\n\t\t\"\"\"Attach a text label above each bar in *rects*, displaying its height.\"\"\"\n\t\tfor rect in rects:\n\t\t\theight = rect.get_height()\n\t\t\tax.annotate('{}'.format(height),\n\t\t\t\txy=(rect.get_x() + rect.get_width() / 2, height),\n\t\t\t\txytext=(0, 2), # 3 points vertical offset\n\t\t\t\ttextcoords=\"offset points\",\n\t\t\t\tha='center', va='bottom')\n\n\n\tautolabel(rects1)\n\tautolabel(rects2)\n\n\tfig.tight_layout()\n\tplt.show()\n\n# ===========\n# CDF Builder\n# ===========\n\n\ndef timecdf(time_DC, time_DC_MRT , time_SA_MRT , time_DC_MRT_BHCAP , time_DC_MRT_BHCAP_LAT , time_DC_MRT_LAT , time_SA_MRT_BHCAP , time_SA_MRT_BHCAP_LAT , time_SA_MRT_LAT ,time_SA, np, scn ):\n\t#plt.close('all')\n\t#print time_SA_MRT[:,5]\n\tn_bins = 300; # Number of bins for the histogram\n\t#fig, axs = plt.subplots()\n\tfig, axs = plt.subplots()\n\t#fig.suptitle(\"CDF for Optimizer Processing Times at Maximum User Density\")\n\t#print time_DC[:,5]\n\tn1, bins1, patches1 = axs.hist(time_DC[:,4], n_bins,density=True, histtype='step',\n cumulative=True, label='Empirical', color='r')\n\tseq = np.array([0])\n\tseq_bins1 = np.array([bins1[0]])\n\tn1 = np.concatenate((seq,n1), axis =0)\n\tbins1 = np.concatenate((seq_bins1,bins1), axis =0)\n\n\tn5, bins5, patches5 = axs.hist(time_SA[:,4], n_bins,density=True, histtype='step',\n cumulative=True, label='Empirical', color='c')\n\t\n\tseq_bins5 = np.array([bins5[0]])\n\tn5 = np.concatenate((seq,n5), axis =0)\n\tbins5 = np.concatenate((seq_bins5,bins5), axis =0)\n\t#axs[0,0].legend()\n\t#axs[0,0].set_xlim(min(min(bins1), min(bins5)), max(max(bins1), max(bins5)))\n\t#axs[0,0].grid()\n\t#ax1.plot(len(bins1),np.ones((1,len(bins1))),'g-')\n\n\n\tn2, bins2, patches2 = axs.hist(time_DC_MRT_BHCAP[:,4], n_bins,density=True, histtype='step',\n cumulative=True, label='Empirical', color='k')\n\t\n\tseq_bins2 = np.array([bins2[0]])\n\tn2 = np.concatenate((seq,n2), axis =0)\n\tbins2 = np.concatenate((seq_bins2,bins2), axis =0)\n\n\t#ax2.plot(len(bins2),np.ones((1,len(bins2))),'g-')\t\n\t#len_plt = max(len(bins1), len(bins2))\n\t#print n1\n\t#axs[1,0].set_title(\"DC+MRT+BHCAP\")\n\t#axs[1,0].set_xlim(min(bins2), max(bins2))\n\t#axs[1,0].grid()\n\t\n\tn3, bins3, patches3 = axs.hist(time_DC_MRT[:,4], n_bins,density=True, histtype='step',\n cumulative=True, label='Empirical', color='b')\n\t\n\tseq_bins3 = np.array([bins3[0]])\n\tn3 = np.concatenate((seq,n3), axis =0)\n\tbins3 = np.concatenate((seq_bins3,bins3), axis =0)\n\t#xs[0,1].set_title(\"DC+MRT\")\n\t#axs[0,1].set_xlim(min(bins3), max(bins3))\n\t#axs[0,1].grid()\n\n\tn4, bins4, patches4 = axs.hist(time_DC_MRT_BHCAP_LAT[:,4], n_bins,density=True, histtype='step',\n cumulative=True, label='Empirical', color='g')\n\t\n\tseq_bins4 = np.array([bins4[0]])\n\tn4 = np.concatenate((seq,n4), axis =0)\n\tbins4 = np.concatenate((seq_bins4,bins4), axis =0)\n\n\tn6, bins6, patches6 = axs.hist(time_DC_MRT_LAT[:,4], n_bins,density=True, histtype='step',\n cumulative=True, label='Empirical', color='g')\n\t\n\tseq_bins6 = np.array([bins6[0]])\n\tn6 = np.concatenate((seq,n6), axis =0)\n\tbins6 = np.concatenate((seq_bins6,bins6), axis =0)\n\n\tn7, bins7, patches7 = axs.hist(time_SA_MRT[:,4], n_bins,density=True, histtype='step',\n cumulative=True, label='Empirical', color='g')\n\t\n\tseq_bins7 = np.array([bins7[0]])\n\tn7 = np.concatenate((seq,n7), axis =0)\n\tbins7 = np.concatenate((seq_bins7,bins7), axis =0)\n\t#axs[1,1].set_title(\"DC+MRT+BHCAP+LAT\")\n\t#axs[1,1].set_xlim(min(bins4), max(bins4))\n\t#axs[1,1].grid()\n\n\tplt.close()\n\tf, ax1 = plt.subplots(2,1)\n\tax1[0].plot(bins1[:-1],n1,'r-', label=\"DC\")\n\tax1[0].plot(bins5[:-1],n5,'c-', label=\"SA\")\n\tax1[1].plot(bins7[:-1],n7,'c-', label=\"SA+MRT\")\n\tax1[1].plot(bins2[:-1],n2,'b-', label=\"DC+MRT+BHCAP\")\n\tax1[1].plot(bins3[:-1],n3,'k-', label=\"DC+MRT\")\n\tax1[1].plot(bins6[:-1],n6,'m-', label=\"DC+MRT+LAT\")\n\tax1[1].plot(bins4[bins4<=600],n4[bins4[:-1]<=600],'g-', label=\"DC+MRT+BHCAP+LAT\")\n\t#labels_y_1 = [str(min(min(bins1), min(bins5))),'0.2','0.4','0.6','0.8','1.0']\n\t# labels = [item.get_text() for item in ax1[0].get_yticklabels()]\n\t# labels = [str(format(min(min(n1),min(n5)),'0.3e')),'0.2','0.4','0.6','0.8','1.0']\n\t# #print labels\n\t# ax1[0].set_yticklabels(labels)\n\t#print n1\n\t#print bins1\n\t# extraticks = [min(min(n1), min(n5))]\n\t# ax1[0].set_yticks(list(ax1[0].get_yticks()) + extraticks)\n\t# #[0].yaxis.set_major_formatter(ticker.FormatStrFormatter('%0.3f'))\n\tax1[0].set_xlim(min(min(bins1), min(bins5))-0.1, max(max(bins1), max(bins5))+0.1)\n\tax1[0].set_ylim(0,1)\n\tax1[0].grid(alpha = 0.2, linestyle = '--')\n\tax1[0].set_xlabel(\"Processing time (in seconds)\")\n\n\t# extraticks_y = [min(min(n2),min(n3),min(n4))]\n\t#extraticks_x = [min(min(bins2),min(bins3),min(bins4),min(bins6))-3]\n\t# ax1[1].set_yticks(list(ax1[1].get_yticks()) + extraticks_y)\n\t#ax1[1].set_xticks(list(ax1[1].get_xticks()) + extraticks_x)\n\tax1[1].set_xlim(0, 600)\n\tax1[1].set_ylim(0,1)\n\tax1[1].grid(alpha = 0.2, linestyle = '--')\n\tax1[1].set_xlabel(\"Processing time (in seconds)\")\n\t#ax1[1,].set_xlim(min(bins3), max(bins3))\n\t#ax1[1,1].set_xlim(min(bins4), max(bins4))\n\t#ax1[0].xaxis.set_ticks(np.arange(min(min(bins1), min(bins5))-0.1,max(max(bins1), max(bins5))+0.1,0.5))\n\t#ax1[0].yaxis.set_major_formatter(ticker.FormatStrFormatter('%0.2f'))\n\tax1[0].xaxis.set_major_formatter(ticker.FormatStrFormatter('%0.2f'))\n\t#ax1[0].xaxis.set_ticks(np.arange(min(min(bins2),min(bins3),min(bins4))-3,600,100))\n\n\t# #ax1.plot([0.1]*len(np.arange(0,1.1,0.1)), np.arange(0,1.1,0.1), 'g--', label=\"Minimum Rate = 100 Mbps\")\n\t# #ax1.plot(bins3[0]/1e9, n3[0], 'ko', markersize=12, label=\"Minimum Rate with DC+MRT: \"+str(format(bins3[0]/1e6,'0.2f'))+\" Mbps\")\n\tf.suptitle(\"CDF for Optimizer Processing Times at Maximum User Density\", fontsize = 14)\n\t# ax1.set_xlabel('Processing Time (in seconds)', fontsize = 12)\n\t# ax1.set_ylim(0,1,0.1)\n\t# ax1.set_xlim(min(min(bins1[:-1]), min(bins2[:-1]), min(bins3[:-1]), min(bins4[:-1])),max(max(bins1[:-1]), max(bins2[:-1]), max(bins3[:-1]), max(bins4[:-1]) ) )\n\tax1[0].legend(loc=4, prop={'size': 12})\n\t#ax1[0,1].legend(prop={'size': 12})\n\tax1[1].legend(loc=1, prop={'size': 12})\n\t#ax1[1,1].legend(prop={'size': 12})\n\t\n\t# ax1.yaxis.set_ticks_position('none')\n\t# ax1.xaxis.set_ticks_position('none')\n\t# axins = zoomed_inset_axes(ax1, 0.5, loc=10, bbox_to_anchor=(1201.,405.))\n\t# axins.plot(bins1[:-1],n1,'r-')\n\t# #axins.plot(bins2[:-1],n2,'b-')\n\t# #axins.plot(bins3[:-1],n3,'k-')\n\t# #axins.plot(bins4[:-1],n4,'g-')\n\t# #axins.plot([0.1]*len(np.arange(0,1.1,0.1)), np.arange(0,1.1,0.1), 'g--')\n\t# #axins.plot(bins3[0]/1e9, n3[0], 'ko', markersize=8)\n\t# axins.set_ylim(0,1)\n\t# axins.set_xlim(0,5.5)\n\t# axins.yaxis.set_ticks_position('none')\n\t# axins.xaxis.set_ticks_position('none')\n\t# #axins.yaxis.set_ticks(np.arange(0,0.03,0.01))\n\t# #axins.xaxis.set_ticks(np.arange(0.09,0.12,0.01))\n\t# #axins.xaxis.set_major_formatter(ticker.FormatStrFormatter('%0.2f'))\n\t# #plt.yticks(visible = False)\n\t# #plt.xticks(visible = False)\n\t# mark_inset(ax1, axins, loc1=2, loc2=4, fc=\"none\", ec=\"0.5\")\n\t# #ax1.grid()\n\t\n\t#bincenters1 = 0.5*(bins1[1:]+bins1[:-1])\n\t#bincenters2 = 0.5*(bins2[1:]+bins2[:-1])\n\n\t#plt.plot(bincenters1,n1,'r-', bincenters2, n2, 'k-', fillstyle='none')\n\n\tplt.show()\n# \n#plt.plot(usr_lcs[0], usr_lcs[1],'k+');\n#plt.plot(macro_cell_locations[:,0], macro_cell_locations[:,1],'rs'); # Plot the macro cells\n#for j in range(0,macro_cell_locations.shape[0]):\n# print_element = locs_SCBS[j]; #Accessing the numpy array of SC locations corresponding to the Macro Cell \n# plt.plot(print_element[:,0], print_element[:,1], 'b*'); # Plot the small cells\n# plt.plot(usr_loc_eMBB[:,0],usr_loc_eMBB[:,1],'k+')\n# plt.plot(usr_loc_URLLC[:,0],usr_loc_URLLC[:,1],'cs')\n# #plt.plot(usr_loc_mMTC[:,0],usr_loc_mMTC[:,1],'go')\n\n\t#dist_names = ['rayleigh', 'rice','norm','expon']\n\t# val_dist =[]\n\t# # ===> Perform the KS Test for distribution fit\n\n\t# dist_results = []\n\t# for i in range(len(dist_names)):\n\t# \tdist = getattr(scipy.stats, dist_names[i])\n\t# \tparam = dist.fit(y)\n\t# \tD,p = scipy.stats.kstest(y,dist_names[i],args=param)\n\t# \tdist_results.append((dist_names[i],p))\n\n\t# dist_selected, p = (max(dist_results,key=lambda item:item[1]))\n\t# dist_fin = getattr(scipy.stats, dist_selected)\n\t# val_dist = dist_fin.rvs(*(dist_fin.fit(y))[:-2], loc = param[-2], scale=param[-1], size=len(y))\n\t# plt.hist(val_dist, alpha=0.5)\n\t# plt.hist(y, alpha=0.5)\n\t# plt.legend(loc='upper right')",
"id": "5818699",
"language": "Python",
"matching_score": 1.9140633344650269,
"max_stars_count": 1,
"path": "plotter.py"
},
{
"content": "# ==================================================================================================================================\n# This is a function file enables the program to check if the ISD rule, according to the considered specifications, is satisfied\n# This function file also consists of a grid creator function to place the Macro BS\n# This function file also consists of other miscellaneous functions\n# ==================================================================================================================================\n\n# ======================\n# Import Necessary Files\n# ======================\n\nfrom bp_assister import bp_assist\nimport copy\nimport csvsaver\n\n# ===========================\n# Inter-site distance checker\n# ===========================\n\ndef checker(locs,isd,np): # Check the ISD for the selected \n flag = [None]*locs.shape[0]; #Empty array for the flag list\n i = 0; # Initialize the iterator variable\n for i in range(0,(locs.shape[0])):\n \tdist_X = locs[i,0]*np.ones((locs.shape[0],1), dtype=int) - np.reshape(locs[:,0],(locs[:,0].shape[0],1)); #X coordinate diff\t\n \t#print dist_X\n \tdist_Y = locs[i,1]*np.ones((locs.shape[0],1), dtype=int) - np.reshape(locs[:,1],(locs[:,1].shape[0],1)); #Y coordinate diff\n \t#print dist_Y\n \ttot_dist = np.sqrt(dist_X**2 + dist_Y**2); # Distance from other base stations\n \t#print tot_dist\n \ttot_dist[i,:] = tot_dist[i,:] + isd; # Set the self distance to be the minimum to avoid the base stations own location from affecting the decision\n \t#print tot_dist\n \tflag[i] = (tot_dist>=isd).all(); # Generate the flag (False if ISD violated; True if not)\n #print flag\n if all(flag) == False:\n \t#print all(flag)\n\t return 0\n else:\t\n\t return 1 # Return 1 if none of the above checks return 0\t\n\n# =====================\n# Same Location Checker\n# =====================\n\ndef locs_checker(locs, other_locs, np, indix):\n if indix == 'sc':\n flag = [None]*locs.shape[0]; #Empty array for the flag list\n for i in range(locs.shape[0]):\n for j in range(other_locs.shape[0]):\n if locs[i,0] == other_locs[0] and locs[i,1] == other_locs[1]:\n flag[i] = False # If the location matches any of the previous generated locations then regenerate the location\n else:\n flag[i] = True # If not, then pass\n if all(flag) == False:\n return 0 \n else:\n return 1\n elif indix == 'user':\n flag = [None]*locs.shape[0]; #Empty array for the flag list\n for i in range(locs.shape[0]):\n if i != locs.shape[0]:\n comp_locs = np.vstack((other_locs,locs[i+1:,:]))\n else:\n comp_locs = other_locs\n for j in range(comp_locs.shape[0]):\n if locs[i,0] == comp_locs[j,0] and locs[i,1] == comp_locs[j,1]:\n flag[i] = False # If the location matches any of the previous generated locations then regenerate the location\n else:\n flag[i] = True # If not, then pass\n if all(flag) == False:\n return 0 \n else:\n return 1\n# =======================\n# Macro Cell Grid Creator\n# =======================\n\ndef gridder(locs_interim,MCBS_intersite,np): # Create the grid through random permutation \n #print locs_interim.shape[0]\n locs_MCBS = np.empty([locs_interim.shape[0]**2,2]); # Create the 2-D vector for holding the X-Y coordinates of the MCBS \n idx = 0; # Iterator variable\n for i in range(0,locs_MCBS.shape[0]):\n # print i\n if i%(locs_interim.shape[0]) == 0:\n locs_MCBS[i,0]=locs_interim[idx];\n locs_MCBS[i,1]=locs_interim[0];\n idx = idx + 1; #Iterator to go through the grid\n else: \n locs_MCBS[i,0]=locs_interim[idx-1];\n locs_MCBS[i,1]=locs_MCBS[i-1,1]+MCBS_intersite; \n \n return locs_MCBS\n\n# =========================\n# LOS Probability Generator\n# =========================\n\ndef los_prob_var_gen(h): # Generates the variables needed for the los probability calculation\n if h <= 13:\n C = 0; \n elif h > 13 and h <= 23:\n C = ((h-13)/10)**(3/2);\n return C\n\n# ==============================\n# Breakpoint distance calculator\n# ==============================\n\ndef breakpt_dist (scn, dist, flag_sc, np): # Generates the breakpoint distance parameter\n # We first initiate some local parameters to calculate the breakpoint distance\n # Calculating the effective environmental height\n if flag_sc: # This is for small cells\n eff_ht = 1; # Effective environment height \n bs_ht = scn.bs_ht_sc; # Small cell height\n fc = scn.fc_sc; #Small cell frequency\n else: # This is when we have an urban macro\n # We have to calculate the probability for the effective environmental height\n bs_ht = scn.bs_ht_mc; # Macro cell height\n fc = scn.fc_mc; # Macro cell frequency\n bp = bp_assist(); # breakpoint distance\n prob_1m = 1/(1+bp.bp_assister(dist,scn.usr_ht)); # Probability of effective environmental height being 1m\n if prob_1m > 0.5:\n eff_ht = 1; # Effective environment height\n else: \n eff_ht = 12 + ((scn.usr_ht-1.5)-12)*np.random_integers(np.floor((scn.usr_ht-13.5)/3.)-1)/(np.floor((scn.usr_ht-13.5)/3.));\n \n # Final Breakpoint distance calculation\n bs_eff = bs_ht - eff_ht; \n usr_eff = scn.usr_ht - eff_ht; \n bp_dist = 4*bs_eff*usr_eff*fc/scn.c; # Breakpoint dist \n return bp_dist\n\n# ===========================\n# Generic Distance Calculator\n# ===========================\n\ndef dist_calc(locs_src, locs_tgt, usr_ht, bs_ht, dist_type, np):\n #print locs_src\n #print locs_tgt\n if dist_type == '2d':\n x_diff = locs_src[:,0] - locs_tgt[0]; # X coordinate difference\n y_diff = locs_src[:,1] - locs_tgt[1]; # Y coordinate difference\n return np.sqrt(np.power(x_diff,2) + np.power(y_diff,2)) # Returning the 2-D distance between two points\n\n elif dist_type == '3d':\n x_diff = locs_src[:,0] - locs_tgt[0]; # X coordinate difference\n y_diff = locs_src[:,1] - locs_tgt[1]; # Y coordinate difference\n z_diff = bs_ht - usr_ht; # Z coordinate difference\n return np.sqrt(np.power(x_diff,2) + np.power(y_diff,2) + np.power(z_diff,2)) # Returning the 3-D distance between two points\n\n# =======================================\n# Matrix Array Element Locator and Sorter\n# =======================================\n\ndef idx_mat(src_mat, param_val, srch_type, np): # This function works as an element locator and distance based Sorter\n \n if srch_type == 'minimum':\n #print src_mat\n sorted_mat = np.sort(src_mat,kind='mergesort')[:,:param_val]; # Sort the matrix first\n #print sorted_mat\n sorted_idx = np.argsort(src_mat,kind='mergesort')[:,:param_val]; #Indices of the sorted matrix\n #print sorted_idx\n return sorted_mat,sorted_idx # Returning the sorted matrix and the index of the requested elements in the original matrix\n \n # This function can be extended further for maximum or non-maximal/minimal scenarios\n \n elif srch_type == 'distance':\n #print src_mat[1,:]\n sorted_mat = np.sort(src_mat,kind='mergesort'); # Sort the SC distance matrix\n #print sorted_mat[1,:]\n sorted_idx = np.where(sorted_mat<=100,np.argsort(src_mat,kind='mergesort'),'None'); # Indices of the SCs that are within 200m and can impact the UE through interference\n #print sorted_idx[1]\n return np.where(sorted_mat>100,0,sorted_mat),sorted_idx # Return Sorted Matrix and the indices \n \n# ============================================================\n# Interference Limited Scenario Interference Matrix Calculator\n# ============================================================\n\ndef interf(PL, scn, np, tx_power, gain, rx_gain): # This function returns the overall interference matrix given a Pathloss matrix\n interf = np.empty((PL.shape[0],PL.shape[1])); # Initialize the interference matrix\n PR_interf = interf; # This is a temporary matrix to hold Rx Power values due to all other APs other than AP of interest\n #print PL.shape \n #print PL[1,:]\n #print \"Next\"\n for i in range(0, PL.shape[1]):\n PL_temp = copy.copy(PL); # This is a temporary array store\n PL_temp[:,i] = float('nan'); # So the array now has Nan where we have our AP of interest\n #print (\"PL matrix is:\", PL_temp)\n #csvsaver.csvsaver(PL_temp,[],\"PL_temp\"+str(i)+\".csv\")\n PR_interf = (10**(tx_power/10)*(10**(gain/10))*(10**(rx_gain/10)*10**(-3)))/(10**(PL_temp/10)); # Compute the received power on each UE-AP pair\n #print (\"Received Interference is:\", PR_interf)\n #csvsaver.csvsaver(PR_interf,[],\"PR_interf\"+str(i)+\".csv\")\n interf[:,i] = np.sum(np.where(np.isnan(PR_interf), 0, PR_interf), axis=1); #Interference from other APs for a given UE-AP pair\n return interf\n\n# ===========================================================\n# Small Cell Beamforming Based Interference Matrix Calculator\n# ===========================================================\n \ndef angsc(usr_loc, sc_loc, np, scn): # This function will find the beam that points in the direction of the user\n \n beam_angles = np.arange(scn.beam_hpbw_rx, 360 + scn.beam_hpbw_rx , scn.beam_hpbw_rx) # UE RX beam angles\n #print beam_angles\n\n #csvsaver.csvsaver(usr_loc,[],\"USERlocsFORSC.csv\")\n coord_diff = usr_loc - sc_loc # Compute the Perpendicular\n theta_diff = np.degrees(np.arctan2(coord_diff[1],coord_diff[0])) # Computes the vector of angles each UE makes with a MCBS\n #print (\"Calculated SC angles:\", theta_diff)\n #csvsaver.csvsaver(theta_diff,[],\"ComputedAngleForSC.csv\")\n\n theta_diff = np.where(theta_diff >= 0 , theta_diff, 360 + theta_diff)\n #print theta_diff\n\n\n\n angle_diff = beam_angles - theta_diff; # Subtract the angle of the current AP of interest with the beam sectors\n #print angle_diff\n\n sect_ang = np.where(angle_diff>=0)[0][0]; # Stores the index where a given AP is located within the UE beam\n return sect_ang\n #csvsaver.csvsaver(sect_ang,[],\"SectorsSC.csv\")\n #print (\"Determined Sectors are:\", sect_ang)\n \n# def interf_sect(interf, idx_curr_MCBS, scn, np): # This function returns the overall interference matrix given a Pathloss matrix and the sectorization matrix\n# interf = np.empty((PL.shape[0],PL.shape[1])); # Initialize the interference matrix\n# PR_interf = interf; # This is a temporary matrix to hold Rx Power values due to all other APs other than AP of interest\n# #print PL[1,:]\n# #print \"Next\"\n# for i in range(0, PL.shape[1]):\n# PL_temp = copy.copy(PL); # This is a temporary array store\n# PL_temp[:,i] = float('nan'); # So the array now has Nan where we have our AP of interest\n# PR_interf = (10**(scn.transmit_power/10)*(10**(scn.transmit_gain_sc/10))*(10**(scn.receiver_gain/10)*10**(-3)))/(10**(PL_temp/10)); # Compute the received power on each UE-AP pair\n# interf[:,i] = np.sum(np.where(np.isnan(PR_interf), 0, PR_interf), axis=1); #Interference from other APs for a given UE-AP pair\n# return interf\n\n# ==================\n# Matrix Reorganizer\n# ==================\n\ndef reorganizer(SRC_mat, IDX_mat_SC, IDX_mat_MC, num_scbs, num_mcbs, sinr_pad, np, scn):\n \n reorg_mat = np.zeros((IDX_mat_SC.shape[0], num_scbs+num_mcbs)); # The total matrix to hold the SINR values\n \n # ===================================\n # First we reorganize the Small Cells\n\n for i in range(0,IDX_mat_SC.shape[0]):\n for j in range(0, IDX_mat_SC.shape[1]):\n if IDX_mat_SC[i,j] != 'None':\n reorg_mat[i,int(IDX_mat_SC[i,j])] = SRC_mat[i,j]; # Reorganizing the Small cells\n\n # ==================================\n # We reorganize the Macro Cells\n #print reorg_mat \n #print \"=======\"\n for i in range(0,IDX_mat_MC.shape[0]):\n for j in range(0, IDX_mat_MC.shape[1]):\n reorg_mat[i, num_scbs + IDX_mat_MC[i,j]] = SRC_mat[i, num_scbs+j]; # Reorganizing the Macro Cells \n\n #print reorg_mat\n reorg_mat = np.where(reorg_mat == 0, sinr_pad, reorg_mat)\n return reorg_mat",
"id": "983918",
"language": "Python",
"matching_score": 2.724095582962036,
"max_stars_count": 1,
"path": "dist_check.py"
},
{
"content": "#!/usr/bin/env python\n\nimport csvsaver\n# This file contains the functions where the beamforming and Macro Cell sectorization has been performed for \n# interference calculation purposes\n\n# ====================================================\n# MCBS Sectorization and Interference Compute Function\n# ====================================================\n\ndef MCBS_sectorizer(np,scn,num_mcbs,mcbs_locs,usr_locs):\n\n\t#===> Compute the angular displacement for determining the influence of an AP\n\ttheta_diff = np.zeros((usr_locs.shape[0],1)) # This matrix holds the angular displacement of each user with a given AP\n\tsector_mat = np.zeros((usr_locs.shape[0], num_mcbs), dtype=int) # This matrix holds the sector number to which a given user belongs to for a given MC AP\n\ttheta_mat = np.zeros((usr_locs.shape[0], num_mcbs))\n\tfor i in range(num_mcbs):\n\t\tx_diff = usr_locs[:,0] - mcbs_locs[i,0] # Compute the Perpendicular\n\t\ty_diff = usr_locs[:,1] - mcbs_locs[i,1] # Compute the base\n\t\ttheta_diff[:,0] = np.degrees(np.arctan2(y_diff,x_diff)) # Computes the vector of angles each UE makes with a MCBS\n\t\ttheta_mat[:,i] = theta_diff[:,0]\n\t\t# sector_mat[np.nonzero(theta_diff<=60 or theta_diff>300),i] = 1 # Sector 1 for MCBS i is between +60 and -60\n\t\t# sector_mat[np.nonzero(theta_diff>60 or theta_diff<=180),i] = 2 # Sector 1 for MCBS i is between +60 and 180\n\t\t# sector_mat[np.nonzero(theta_diff>180 or theta_diff<=300),i] = 3 # Sector 1 for MCBS i is between +180 and +300\n\t\tsector_mat[np.nonzero(np.all(np.hstack(((theta_diff<=60)*1,(theta_diff>-60)*1)), axis = 1)),i] = 1 # Sector 1 for MCBS i is between +60 and -60\n\t\tsector_mat[np.nonzero(np.all(np.hstack(((theta_diff>60)*1,(theta_diff<=180)*1)), axis = 1)),i] = 2 # Sector 2 for MCBS i is between +60 and 180\n\t\tsector_mat[np.nonzero(np.all(np.hstack(((theta_diff>-180)*1,(theta_diff<=-60)*1)), axis = 1)),i] = 3 # Sector 3 for MCBS i is between +180 and 300\n\tcsvsaver.csvsaver(sector_mat,[], 'sector_mat.csv')\n\tcsvsaver.csvsaver(usr_locs,[],'USERlocs.csv')\n\tcsvsaver.csvsaver(mcbs_locs, [], 'MCBSlocs.csv')\n\tcsvsaver.csvsaver(theta_mat, [], 'Theta.csv')\n\treturn sector_mat\n\t",
"id": "4708716",
"language": "Python",
"matching_score": 0.1684580296278,
"max_stars_count": 1,
"path": "sectbeam.py"
},
{
"content": "#!/usr/bin/env python\n\n# This script allows the user to utilize the functions stated here in and save any intended Numpy matrix data as a CSV file \n\n# ==================\n# Import the Modules\n# ==================\n\nimport csv\nimport pandas as pd\nimport numpy as np\n\n\ndef csvsaver(data, col_names, filname):\n\tdframe = {} # Empty dictionary for the pandas Dataframe\n\tif len(col_names)!=0:\n\t\tfor i in range(len(col_names)):\n\t\t\tdframe[col_names[i]] = data[:,i]\n\n\t\tdf = pd.DataFrame(dframe)\n\t\tdf.to_csv(filname, index=False)\n\n\telse:\n\t\tfor i in range(data.shape[1]):\n\t\t\tdframe[str(i)] = data[:,i]\n\t\tdf = pd.DataFrame(dframe)\n\t\tdf.to_csv(filname, index=False)",
"id": "8345983",
"language": "Python",
"matching_score": 0.02273130789399147,
"max_stars_count": 1,
"path": "csvsaver.py"
},
{
"content": "# ====================================================================================================\n# This file provides class with the necessary sub functions for the calculation of breakpoint distance\n# ====================================================================================================\n\nclass bp_assist:\n \n def bp_assister(self, dist, usr_ht):\n if usr_ht < 13: \n C = 0;\n elif usr_ht >= 13 and usr_ht <= 23: \n C = ((usr_ht-13)/10)**1.5*self.g(dist)\n return C\n\n def g(self, dist):\n if dist <18:\n G = 0; \n return G\n else: \n G = 0\n for i in range(0,1000):\n intr_G = ((5/4)*(dist/100)**3)*np.exp(-1*(dist/150));\n G = G + intr_G\n return G/i # Returning the expected value\n\n",
"id": "12733350",
"language": "Python",
"matching_score": 0.5231795310974121,
"max_stars_count": 1,
"path": "bp_assister.py"
},
{
"content": "#!/usr/bin/env\n\n# ====> This file consists of the pathloss generator function for all the considered scenarios\n\n#from dsc import breakpt_dist\n#from dsc import los_prob_var_gen\nimport los_probability\n\n# ============================================\n# CI Model Pathloss \n# ============================================\n\ndef pathloss_CI(scn, dist, np, d3d, dsc, sc_flag):\n \n # =====================================================\n # We implement the NYU CI model for full distance range\n \n los_flag = 0 # Line of Sight and Non Line of Sight flag \n if sc_flag == 0 or sc_flag == 1:\n FSPL = 20*np.log10((4*np.pi*np.where(sc_flag,scn.fc_sc,scn.fc_mc))/scn.c); # Calculate the Free Space Path loss\n\n # =====================================================================\n # We consider a LOS scenario if the los probability is greater than 50%\n \n #print (\"The Threshold for SC LOS-NLOS is:\", scn.tau_sc[tau_flag])\n #print (\"The Threshold for MC LOS-NLOS is:\", scn.tau_mc[tau_flag])\n\n if (np.random.rand(1) <= los_probability.los_prob(np,dist,sc_flag) and sc_flag == 1) or (np.random.rand(1) <= los_probability.los_prob(np,dist,sc_flag) and sc_flag == 0):\n los_flag = 1 # Return the los flag for the plots later\n #print (\"LOS with flag:\",sc_flag)\n n = np.where(sc_flag, 2.1, 2.0); # For LOS scenarios UMa has PLE = 2.0 and UMi at 28 and 73GHz has PLE = 2.1\n SF_dev = np.where(sc_flag, 4.4, 2.4); # For LOS Scenarios UMa has a SF dev of 2.4 and UMi at 28 and 73 GHz has SF dev of 4.4 \n shadowing = np.random.normal(0,SF_dev);\n if dist < 1:\n return (FSPL + shadowing), los_flag # Below 1m it is PL is just FSPL in CI model\n else: \n PL_CI = FSPL + 10*n*np.log10(d3d) + shadowing; # CI model Pathloss \n return PL_CI, los_flag\n\n else:\n los_flag = 0 # Return the los flag for the plots later (NLOS)\n #print (\"NLOS with flag:\",sc_flag)\n n = np.where(sc_flag, 3.2, 2.9); # For NLOS scenarios UMa has PLE = 2.0 and UMi at 28 and 73GHz has PLE = 2.1\n SF_dev = np.where(sc_flag, 8.0, 5.7); # For NLOS Scenarios UMa has a SF dev of 2.4 and UMi at 28 and 73 GHz has SF dev of 4.4 \n shadowing = np.random.normal(0,SF_dev);\n if dist < 1:\n return (FSPL + shadowing), los_flag # Below 1m it is PL is just FSPL in CI model\n else: \n PL_CI = FSPL + 10*n*np.log10(d3d) + shadowing; # CI model Pathloss \n return PL_CI, los_flag\n \n elif sc_flag == 2:\n \n #if los_probability.los_prob_sc(np,dist) >= 0.5: # Small Cells will always be in LOS for a MC\n los_flag = 1\n FSPL = 20*np.log10((4*np.pi*scn.fc_bh_sc)/scn.c); # Calculate the Free Space Path loss for Backhaul\n n = 2.0; # We consider BH to be in LOS scenario with a pathloss exponent of 2.1\n SF_dev = 4.2; # Standard deviation for Shadow Fading\n shadowing = np.random.normal(0, SF_dev);\n if dist < 1:\n return (FSPL+shadowing), los_flag; # Unusual between SC and MC but can happen\n else: \n PL_SC_MC_CI = FSPL + 10*n*np.log10(d3d) + shadowing; # CI model Pathloss between SC and MC\n return PL_SC_MC_CI, los_flag\n # else:\n # los_flag = 1\n # FSPL = 20*np.log10((4*np.pi*scn.fc_bh_sc)/scn.c); # Calculate the Free Space Path loss for Backhaul\n # n = 3.5; # We consider BH to be in LOS scenario with a pathloss exponent of 2.1\n # SF_dev = 7.9; # Standard deviation for Shadow Fading\n # shadowing = np.random.normal(0, SF_dev);\n # if dist < 1:\n # return (FSPL+shadowing); # Unusual between SC and MC but can happen\n # else: \n # PL_SC_MC_CI = FSPL + 10*n*np.log10(d3d) + shadowing; # CI model Pathloss between SC and MC\n # return PL_SC_MC_CI, los_flag\n\n\n\n\n# ============================================\n# 3GPP Small Cell Pathloss \n# ============================================\n\n# def pathloss_SC_3GPP(scn, dist, np, d3d, dsc):\n \n# # =====================================================================\n# # We consider a LOS scenario if the los probability is greater than 50%\n \n# pathloss_sc = 0; # Declaring the pathloss variable\n# #bs_ht = 10; # Small cell base station height is 10m\n# if los_probability.los_prob_sc(np,dist) >= 0.5:\n# bp_dst = dsc.breakpt_dist(scn, dist, 1, np); # Breakpoint distance \n# if dist>=10 and dist<bp_dst:\n# pathloss_sc = 32.4 + 21*np.log10(d3d)+20*np.log10(scn.fc_sc); \n# elif dist >= bp_dst and dist <= 5000:\n# pathloss_sc = 32.4 + 40*np.log10(d3d) + 20*np.log10(scn.fc_sc) -9.5*np.log10((bp_dst)**2 + (scn.bs_ht_sc - scn.usr_ht)**2)\n# shadowing = np.random.normal(0,4); # We introduce shadowing\n# return (pathloss_sc+shadowing) # We return the total large scale fading\n\n# # ===================================================\n# # We now consider the N-LOS scenario\n \n# else:\n# bp_dst = dsc.breakpt_dist(scn, dist, 1, np); # Breakpoint distance \n# if dist>=10 and dist<=5000:\n# if dist>=10 and dist<bp_dst:\n# los_sc = 32.4 + 21*np.log10(d3d)+20*np.log10(scn.fc_sc); \n# elif dist >= bp_dst and dist <= 5000:\n# los_sc = 32.4 + 40*np.log10(d3d) + 20*np.log10(scn.fc_sc) -9.5*np.log10((bp_dst)**2 + (scn.bs_ht_sc - scn.usr_ht)**2);\n# nlos_sc = 35.3*np.log10(d3d)+22.4+21.3*np.log10(scn.fc_sc)-0.3*(scn.usr_ht-1.5);\n# pathloss_sc = np.maximum(los_sc,nlos_sc) \n# shadowing = np.random.normal(0,7.82); #Shadowing in NLOS scenarios\n# return (pathloss_sc+shadowing)\n\n\n# =====================================================\n# 3GPP Macro Cell Pathloss\n# =====================================================\n\n\n# def pathloss_MC_3GPP(scn, dist, np, d3d, dsc):\n\n# # ==================================================\n# # We consider a LOS scenario if the los probability is greater than 50%\n \n# #bs_ht = 25; # Macro cell base station height is 25m\n# if los_probability.los_prob_mc(np,dist,dsc.los_prob_var_gen(scn.usr_ht)) >= 0.5:\n# bp_dst = dsc.breakpt_dist(scn, dist, 0, np); # Breakpoint distance \n# if dist>=10 and dist<bp_dst:\n# pathloss_mc = 32.4 + 20*np.log10(d3d)+20*np.log10(scn.fc); \n# elif dist >= bp_dst and dist <= 5000:\n# pathloss_mc = 32.4 + 40*np.log10(d3d) + 20*np.log10(scn.fc) -10*np.log10((bp_dst)**2 + (scn.bs_ht - scn.usr_ht)**2)\n# shadowing = np.random.normal(0,4); # We introduce shadowing\n# return (pathoss_mc+shadowing) # We return the total large scale fading\n\n# # ===================================================\n# # We now consider the N-LOS scenario\n \n# else:\n# bp_dst = dsc.breakpt_dist(scn, dist, 0, np); # Breakpoint distance \n# if dist>=10 and dist<=5000:\n# if dist>=10 and dist<bp_dst:\n# los_mc = 32.4 + 20*np.log10(d3d)+20*np.log10(scn.fc); \n# elif dist >= bp_dst and dist <= 5000:\n# los_mc = 32.4 + 40*np.log10(d3d) + 20*np.log10(scn.fc) -10*np.log10((bp_dst)**2 + (scn.bs_ht - scn.usr_ht)**2);\n# nlos_mc = 39.08*np.log10(d3d)+13.54+20*np.log10(scn.fc)-0.6*(scn.usr_ht-1.5);\n# pathloss_mc = np.maximum(los_sc,nlos_sc) \n# shadowing = np.random.normal(0,7.8); #Shadowing in NLOS scenarios\n# return (pathloss_mc+shadowing)\n \n",
"id": "9622724",
"language": "Python",
"matching_score": 2.531787872314453,
"max_stars_count": 1,
"path": "pathloss.py"
},
{
"content": "# =========================\n# LOS Probability Generator\n# =========================\n\nimport dist_check as dsc\nfrom scenario_var import scenario_var \n\n# ==================================================\n# Los Probability for UMi Street Canyon (Small Cell)\n\n\nscn = scenario_var();\ndef los_prob(np, dist,sc_flag):\n if sc_flag == 1: # Small Cell\n los_prob = 0; # Probability holder variable\n if dist<=18:\n los_prob = 1; # The UE is in a LOS zone surely given that the distance is less than 18m\n #print (\"SC within dist:\", los_prob)\n return los_prob # Return the los probability\n else:\n for i in range(0,1000):\n intr_prob = (18/dist)*(np.exp(-1*(dist/36)))*(1-18/dist); # Probability of a UE being in LOS zone given that the distance is beyond 18m\n los_prob = los_prob + intr_prob; # We sum and average to obtain the average probability value \n #print (\"Small Cell LOS Probability\", los_prob/i)\n return los_prob/i # We return the expected value of LOS Probability\n\n elif sc_flag == 0: # Macro Cell\n los_prob = 0; # Los probability holder variable\n C = dsc.los_prob_var_gen(scn.usr_ht); # Compute the Necessary Variables\n if dist<=18:\n los_prob = 1; # The UE is in LOS always given that the distance is less than 18m\n #print (\"MC within dist:\", los_prob)\n return los_prob\n else: \n for i in range(0,1000):\n intr_prob = (18/dist + np.exp(-1*dist/63)*(1-18/dist))*(1 + C*(5/4)*((dist/100)**3)*np.exp(-1*dist/150)); # Probability of a UE being in LOS given that the distance between UE and MC is more than 18m\n los_prob = los_prob + intr_prob\n #print (\"Macro Cell LOS Probability\",los_prob/i )\n return los_prob/i # Return the average which will be close to the expected value \n\n\n\n# def los_prob_sc (np,dist):\n# # We take the expected value, i.e. a value over a 1000 iterations\n# los_prob = 0; # Probability holder variable\n# if dist<=18:\n# los_prob = 1; # The UE is in a LOS zone surely given that the distance is less than 18m\n# print (\"SC within dist:\", los_prob)\n# return los_prob # Return the los probability\n# else:\n# for i in range(0,1000):\n# intr_prob = (18/dist)*(np.exp(-1*(dist/36)))*(1-18/dist); # Probability of a UE being in LOS zone given that the distance is beyond 18m\n# los_prob = los_prob + intr_prob; # We sum and average to obtain the average probability value \n# print (\"Small Cell LOS Probability\", los_prob/i)\n# return los_prob/i # We return the expected value of LOS Probability\n\n\n# # ==============================================================\n# # Los Probability for the UMa (Macro Cell in Urban Environments)\n\n# def los_prob_mc(np,dist,C):\n# # We take the expected value, i.e. an average over 1000 iterations\n# los_prob = 0; # Los probability holder variable\n# if dist<=18:\n# los_prob = 1; # The UE is in LOS always given that the distance is less than 18m\n# #print (\"MC within dist:\", los_prob)\n# return los_prob\n# else: \n# for i in range(0,1000):\n# intr_prob = (18/dist + np.exp(-1*dist/63)*(1-18/dist))*(1 + C*(5/4)*((dist/100)**3)*np.exp(-1*dist/150)); # Probability of a UE being in LOS given that the distance between UE and MC is more than 18m\n# los_prob = los_prob + intr_prob\n# #print (\"Macro Cell LOS Probability\",los_prob/i )\n# return los_prob/i # Return the average which will be close to the expected value \n\n\n\n",
"id": "10887870",
"language": "Python",
"matching_score": 2.3072311878204346,
"max_stars_count": 1,
"path": "los_probability.py"
}
] | 2.41951 |
jhinAza | [
{
"content": "{% if cookiecutter.test_suite == \"unittest\" %}\nimport unittest\n\nclass TestTrue(unittest.TestCase):\n\n def test_true(self):\n self.assertTrue(True)\n{% elif cookiecutter.test_suite == \"pytest\" %}\nimport pytest\n\nclass TestTrue():\n\n def test_true(self):\n assert True\n{% endif %}\n\n",
"id": "2941408",
"language": "Python",
"matching_score": 0,
"max_stars_count": 2,
"path": "{{cookiecutter.project}}/tests/{{cookiecutter.project_slug}}_test.py"
},
{
"content": "from flask import Blueprint, current_app, render_template\n\n{{cookiecutter.project_slug}} = Blueprint(\"{{cookiecutter.project_slug}}\", __name__)\n\n@{{cookiecutter.project_slug}}.route('/')\ndef root():\n return \"Hello world\"\n",
"id": "6117056",
"language": "Python",
"matching_score": 2.0377073287963867,
"max_stars_count": 2,
"path": "{{cookiecutter.project}}/{{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}_root.py"
},
{
"content": "from flask import Flask\nfrom {{cookiecutter.project_slug}}.{{cookiecutter.project_slug}}_root import {{cookiecutter.project_slug}}\n\n__author__ = \"\"\"{{cookiecutter.mantainer_name}}\"\"\"\n__email__ = '{{cookiecutter.mantainer_email}}'\n__version__ = '0.1.0'\n\n\ndef create_app():\n app = Flask(__name__)\n app.register_blueprint({{cookiecutter.project_slug}})\n return app\n",
"id": "43008",
"language": "Python",
"matching_score": 2.290661096572876,
"max_stars_count": 2,
"path": "{{cookiecutter.project}}/{{cookiecutter.project_slug}}/__init__.py"
},
{
"content": "import {{cookiecutter.project_slug}}\n\napp = {{cookiecutter.project_slug}}.create_app()\n\ndef main():\n app.run(host=\"127.0.0.1\", port={{cookiecutter.port}}, debug=True)\n\nif __name__ == '__main__':\n main()\n",
"id": "5030151",
"language": "Python",
"matching_score": 1.7969861030578613,
"max_stars_count": 2,
"path": "{{cookiecutter.project}}/main.py"
}
] | 1.917347 |
ephraim-kamau54 | [
{
"content": "from flask import Blueprint\nemail= Blueprint('email', __name__)\nfrom . import views,forms",
"id": "6307382",
"language": "Python",
"matching_score": 1.085750699043274,
"max_stars_count": 0,
"path": "app/emails/__init__.py"
},
{
"content": "from flask_uploads import UploadSet, configure_uploads, IMAGES\nfrom config import config_options\nfrom flask_mail import Mail\nfrom dotenv import load_dotenv\nimport os\nfrom flask_migrate import Migrate, MigrateCommand\nfrom flask_login import LoginManager\nfrom flask_bcrypt import Bcrypt\nfrom flask_simplemde import SimpleMDE\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_bootstrap import Bootstrap\nfrom flask import Flask, Blueprint\nmain = Blueprint('main', __name__)\n\n\nload_dotenv()\nMAIL_USERNAME = os.getenv(\"MAIL_USERNAME\")\nMAIL_PASSWORD = os.getenv(\"MAIL_PASSWORD\")\n\nbootstrap = Bootstrap()\nsimple = SimpleMDE()\ndb = SQLAlchemy()\nbcrypt = Bcrypt()\nmail = Mail()\nphotos = UploadSet('photos', IMAGES)\n\n\nlogin_manager = LoginManager()\nlogin_manager.login_view = 'auth.login'\nlogin_manager.login_message_category = 'info'\nlogin_manager.session_protection = 'strong'\n\n\ndef create_app(config_name):\n\n # Initializing application\n app = Flask(__name__)\n\n # Creating the app configurations\n app.config.from_object(config_options[config_name])\n\n # Initializing flask extensions\n bootstrap.init_app(app)\n db.init_app(app)\n login_manager.init_app(app)\n mail.init_app(app)\n bcrypt.init_app(app)\n simple.init_app(app)\n\n # Registration blueprint\n from .main import main as main_blueprint\n app.register_blueprint(main_blueprint)\n\n #authentication blueprint\n from .auth import auth as auth_blueprint\n app.register_blueprint(auth_blueprint, url_prefix='/authenticate')\n\n # configure UploadSet\n configure_uploads(app, photos)\n\n return app\n",
"id": "12458713",
"language": "Python",
"matching_score": 2.0248501300811768,
"max_stars_count": 0,
"path": "app/__init__.py"
},
{
"content": "from . import auth\nfrom flask import render_template, url_for, flash, redirect, request, abort\nfrom flask_login import login_user, current_user, logout_user, login_required\nfrom ..models import User\nfrom .forms import LoginForm, RegistrationForm\nfrom ..email import mail_message\nfrom ..import db, bcrypt\n\n\n@auth.route(\"/register\", methods=['GET', 'POST'])\ndef register():\n if current_user.is_authenticated:\n return redirect(url_for('main.home'))\n form = RegistrationForm()\n if form.validate_on_submit():\n hashed_password = bcrypt.generate_password_hash(\n form.password.data).decode('utf-8')\n user = User(username=form.username.data,\n email=form.email.data, password=<PASSWORD>)\n db.session.add(user)\n db.session.commit()\n\n mail_message(\"Welcome to postPip\", \"email/welcome_user\",\n user.email, user=user)\n\n flash('Your account has been created! You are now able to log in', 'success')\n return redirect(url_for('auth.login'))\n return render_template('register.html', title='Register', form=form)\n\n\n@auth.route(\"/login\", methods=['GET', 'POST'])\ndef login():\n if current_user.is_authenticated:\n return redirect(url_for('main.home'))\n form = LoginForm()\n if form.validate_on_submit():\n user = User.query.filter_by(email=form.email.data).first()\n if user and bcrypt.check_password_hash(user.password, form.password.data):\n login_user(user, remember=form.remember.data)\n return redirect(request.args.get('next') or url_for('main.home'))\n else:\n flash('Login Unsuccessful. Please check email and password', 'danger')\n return render_template('login.html', title='Login', form=form)\n\n\n@auth.route(\"/logout\")\ndef logout():\n logout_user()\n return redirect(url_for('main.home'))\n",
"id": "4518782",
"language": "Python",
"matching_score": 4.506946563720703,
"max_stars_count": 0,
"path": "app/auth/views.py"
},
{
"content": "from flask import render_template, url_for, flash, redirect, request, abort\nfrom . import main\nfrom ..models import User, Post, Comment\nfrom .forms import UpdateProfile, PostForm, CommentForm\nfrom .. import db, photos\nfrom flask_login import login_user, current_user, logout_user, login_required\n\n\n@main.route(\"/\")\n@main.route(\"/home\")\ndef home():\n posts = Post.query.order_by(Post.date_posted.desc())\n return render_template('home.html', posts=posts, title='Home')\n\n\n@main.route(\"/about\")\ndef about():\n return render_template('about.html', title='About')\n\n\n@main.route(\"/account/<uname>\")\n@login_required\ndef account(uname):\n user = User.query.filter_by(username=uname).first()\n if user is None:\n abort(404)\n posts = Post.query.filter_by(user_id=current_user.id).all()\n return render_template('account.html', title='Account', user=user, posts=posts)\n\n\n@main.route('/account/<uname>/update', methods=['GET', 'POST'])\n@login_required\ndef update_profile(uname):\n user = User.query.filter_by(username=uname).first()\n if user is None:\n abort(404)\n\n form = UpdateProfile()\n\n if form.validate_on_submit():\n user.bio = form.bio.data\n\n db.session.add(user)\n db.session.commit()\n\n return redirect(url_for('main.account', uname=user.username, title='Update Profile'))\n\n return render_template('update.html', form=form)\n\n\n@main.route('/user/<uname>/update/pic', methods=['POST'])\n@login_required\ndef update_pic(uname):\n user = User.query.filter_by(username=uname).first()\n if 'photo' in request.files:\n filename = photos.save(request.files['photo'])\n path = f'photos/{filename}'\n user.image_file = path\n db.session.commit()\n return redirect(url_for('main.account', uname=uname))\n\n\n@main.route(\"/post/new\", methods=['GET', 'POST'])\n@login_required\ndef new_post():\n form = PostForm()\n if form.validate_on_submit():\n post = Post(title=form.title.data, content=form.content.data,\n category=form.category.data, author=current_user, upvotes=0, downvotes=0)\n db.session.add(post)\n db.session.commit()\n flash('Your post has been created', 'success')\n return redirect(url_for('main.home'))\n return render_template('create_post.html', title='New Post', form=form, legend='New Post')\n\n\n@main.route(\"/post/<int:post_id>\", methods=['GET', 'POST'])\ndef post(post_id):\n post = Post.query.get_or_404(post_id)\n\n if request.args.get(\"upvote\"):\n post.upvotes += 1\n db.session.add(post)\n db.session.commit()\n return redirect(\"/post/{post_id}\".format(post_id=post.id))\n\n elif request.args.get(\"downvote\"):\n post.downvotes += 1\n db.session.add(post)\n db.session.commit()\n return redirect(\"/post/{post_id}\".format(post_id=post.id))\n\n form = CommentForm()\n if form.validate_on_submit():\n comment = form.text.data\n\n new_comment = Comment(content=comment, post_id=post.id)\n\n new_comment.save_comment()\n comments = Post.get_comments(post)\n\n return render_template('post.html', title=post.title, post=post, comments=comments, form=form)\n\n\n@main.route(\"/post/<int:post_id>/update\", methods=['GET', 'POST'])\n@login_required\ndef update_post(post_id):\n post = Post.query.get_or_404(post_id)\n if post.author != current_user:\n abort(403)\n form = PostForm()\n if form.validate_on_submit():\n post.title = form.title.data\n post.content = form.content.data\n db.session.commit()\n flash('Your post has been updated', 'success')\n return redirect(url_for('main.post', post_id=post.id))\n elif request.method == 'GET':\n form.title.data = post.title\n form.content.data = post.content\n return render_template('create_post.html', title='Update Post', form=form, legend='Update Post')\n\n\n@main.route(\"/post/<int:post_id>/delete\", methods=['POST'])\n@login_required\ndef delete_post(post_id):\n post = Post.query.get_or_404(post_id)\n if post.author != current_user:\n abort(403)\n db.session.delete(post)\n db.session.commit()\n flash('Your post has been deleted!', 'success')\n return redirect(url_for('main.home'))\n\n\n@main.route(\"/posts_by_category/<string:postname>\")\ndef posts_by_category(postname):\n posts = Post.query.filter_by(category=postname).all()\n return render_template('posts_by_category', title='Posts By Category', posts=posts)\n",
"id": "6656445",
"language": "Python",
"matching_score": 3.5610525608062744,
"max_stars_count": 0,
"path": "app/main/views.py"
},
{
"content": "from datetime import datetime\nfrom app import db, login_manager\nfrom flask_login import UserMixin\n\n\n@login_manager.user_loader\ndef load_user(user_id):\n return User.query.get(int(user_id))\n\n\nclass User(db.Model, UserMixin):\n __tablename__ = 'user'\n id = db.Column(db.Integer, primary_key=True)\n username = db.Column(db.String(20), unique=True, nullable=False)\n email = db.Column(db.String(120), unique=True, nullable=False)\n bio = db.Column(db.String(255))\n image_file = db.Column(db.String())\n password = db.Column(db.String(60), nullable=False)\n posts = db.relationship('Post', backref='author', lazy=True)\n\n def __repr__(self):\n return f\"User('{self.username}', '{self.email}', '{self.image_file}')\"\n\n\nclass Post(db.Model):\n __tablename__ = 'post'\n id = db.Column(db.Integer, primary_key=True)\n title = db.Column(db.String(100), nullable=False)\n date_posted = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)\n content = db.Column(db.Text, nullable=False)\n user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)\n upvotes = db.Column(db.Integer)\n downvotes = db.Column(db.Integer)\n comments = db.relationship('Comment', backref='post', lazy='dynamic')\n\n def get_comments(self):\n post = Post.query.filter_by(id=self.id).first()\n comments = Comment.query.filter_by(\n post_id=post.id).order_by(Comment.timestamp.desc())\n return comments\n\n #def get_comments(self):\n #post = Post.query.filter_by(id=self.id).first()\n #comments = Comment.query.filter_by(id = post.id).all()\n #return comments\n\n def __repr__(self):\n return f\"Post('{self.title}', '{self.date_posted}','{self.category})\"\n\n\nclass Comment(db.Model):\n __tablename__ = 'comments'\n id = db.Column(db.Integer, primary_key=True)\n post_id = db.Column(db.Integer, db.ForeignKey('post.id'))\n user_id = db.Column(db.Integer, db.ForeignKey('user.id'))\n timestamp = db.Column(db.DateTime)\n content = db.Column(db.String)\n\n def save_comment(self):\n db.session.add(self)\n db.session.commit()\n\n @classmethod\n def get_comments(cls, id):\n comments = Comment.query.filter_by(post_id=id).all()\n return comments\n\n def __repr__(self):\n return f'User {self.content}'\n",
"id": "6381059",
"language": "Python",
"matching_score": 1.352102279663086,
"max_stars_count": 0,
"path": "app/models.py"
},
{
"content": "# Generated by Django 3.2.5 on 2021-07-17 19:41\n\nfrom django.conf import settings\nimport django.core.validators\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Post',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('project_title', models.CharField(max_length=100)),\n ('project_image', models.ImageField(upload_to='post_images')),\n ('project_description', models.TextField(max_length=300)),\n ('project_url', models.CharField(max_length=500)),\n ('created_on', models.DateTimeField(auto_now_add=True)),\n ('last_modified', models.DateTimeField(auto_now=True)),\n ('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.CreateModel(\n name='Ratings',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('design_rating', models.PositiveIntegerField(default=0, validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(10)])),\n ('usability_rating', models.PositiveIntegerField(default=0, validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(10)])),\n ('content_rating', models.PositiveIntegerField(default=0, validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(10)])),\n ('comment', models.TextField()),\n ('created_on', models.DateTimeField(auto_now_add=True)),\n ('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),\n ('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='awwardposts.post')),\n ],\n ),\n ]\n",
"id": "11992823",
"language": "Python",
"matching_score": 5.954510688781738,
"max_stars_count": 0,
"path": "awardsposts/migrations/0001_initial.py"
},
{
"content": "from django.db import models\nfrom django.contrib.auth.models import User\nfrom django.core.validators import MaxValueValidator, MinValueValidator \n\n# Create your models here.\nclass Post(models.Model):\n project_title = models.CharField(max_length=100)\n project_image = models.ImageField(upload_to='post_images')\n project_description = models.TextField(max_length=300)\n project_url = models.CharField(max_length=500)\n created_on = models.DateTimeField(auto_now_add=True)\n last_modified = models.DateTimeField(auto_now=True)\n author = models.ForeignKey(User, on_delete=models.CASCADE)\n \n def __str__(self):\n return self.project_title\n\n @classmethod\n def no_of_ratings(self):\n ratings = Ratings.objects.filter(post=self)\n return len(ratings)\n\n @classmethod\n def avg_rating(self):\n sum = 0\n ratings = Ratings.objects.filter(post=self)\n for rating in ratings:\n sum += rating.stars\n\n if len(ratings) > 0:\n return sum/len(ratings)\n else:\n return 0\n\nclass Ratings(models.Model):\n post = models.ForeignKey(Post, on_delete=models.CASCADE)\n author = models.ForeignKey(User, on_delete=models.CASCADE)\n design_rating = models.PositiveIntegerField(default=0, validators=[MinValueValidator(1), MaxValueValidator(10)])\n usability_rating = models.PositiveIntegerField(default=0, validators=[MinValueValidator(1), MaxValueValidator(10)])\n content_rating = models.PositiveIntegerField(default=0, validators=[MinValueValidator(1), MaxValueValidator(10)])\n comment = models.TextField()\n created_on = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return self.author\n\n",
"id": "4521482",
"language": "Python",
"matching_score": 3.7746195793151855,
"max_stars_count": 1,
"path": "awardsposts/models.py"
},
{
"content": "from django.db import models\nfrom django.utils import timezone\nfrom django.contrib.auth.models import User\nfrom django.urls import reverse\nfrom cloudinary.models import CloudinaryField\n\n\nclass Post(models.Model):\n title = models.CharField(max_length=100)\n content = models.TextField()\n date_posted = models.DateTimeField(default=timezone.now)\n author = models.ForeignKey(User, on_delete=models.CASCADE)\n image = CloudinaryField('image', null=True)\n\n def __str__(self):\n return self.title\n\n def get_absolute_url(self):\n return reverse('post-detail', kwargs={'pk': self.pk})\n\nclass Neighborhood(models.Model):\n neighborhood_name = models.CharField(max_length=100)\n neighborhood_location = models.CharField(max_length=100)\n occupants_count = models.IntegerField()\n admin = models.ForeignKey(User, on_delete=models.CASCADE)\n neighborhood_image = CloudinaryField('image')\n \n\n def __str__(self):\n return self.neighborhood_name\n\n def get_absolute_url(self):\n return reverse('neighborhood-detail', kwargs={'pk': self.pk})\n\nclass Business(models.Model):\n business_name = models.CharField(max_length=100)\n business_location = models.CharField(max_length=100)\n business_email = models.EmailField()\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n neighborhood = models.ForeignKey(Neighborhood, on_delete=models.CASCADE, null=True, blank=True)\n business_description=models.TextField()\n business_image = CloudinaryField('image')\n\n def __str__(self):\n return self.business_name\n\n def get_absolute_url(self):\n return reverse('business-detail', kwargs={'pk': self.pk})\n\nclass Contact(models.Model):\n contact_name = models.CharField(max_length=100)\n contact_email = models.EmailField()\n contact_number=models.TextField()\n contact_address = models.TextField()\n user = models.ForeignKey(User, on_delete=models.CASCADE, null=True)\n contact_logo = CloudinaryField('image')\n \n\n def __str__(self):\n return self.contact_name\n\n def get_absolute_url(self):\n return reverse('contact-detail', kwargs={'pk': self.pk})\n",
"id": "10010837",
"language": "Python",
"matching_score": 3.851217269897461,
"max_stars_count": 0,
"path": "mtaa_watch/models.py"
},
{
"content": "from django.db import models\nfrom django.contrib.auth.models import User\nfrom PIL import Image\nfrom cloudinary.models import CloudinaryField\n\n\nneighborhood = (\n ('1', 'Vaal'),\n ('2', 'Red Cross'),\n ('3', 'The Park'),\n ('4', 'BEACH'),\n ('5', 'BMW'),\n ('6', 'Deer'),\n ('7', 'Kenya Police'),\n ('8', 'She Wolf'),\n )\n\n\nclass Profile(models.Model):\n user = models.OneToOneField(User, on_delete=models.CASCADE)\n image = CloudinaryField('image', null=True)\n neighborhood = models.CharField(max_length=25,choices=neighborhood, default='Red Ville')\n\n def __str__(self):\n return f'{self.user.username} Profile'\n\n # def save(self, *args, **kwargs):\n # super().save(*args, **kwargs)\n\n # img = Image.open(self.image.path)\n\n # if img.height > 300 or img.width > 300:\n # output_size = (300, 300)\n # img.thumbnail(output_size)\n # img.save(self.image.path)\n",
"id": "1522471",
"language": "Python",
"matching_score": 1.9077714681625366,
"max_stars_count": 0,
"path": "users/models.py"
},
{
"content": "# Generated by Django 3.2.5 on 2021-07-26 12:05\n\nimport cloudinary.models\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('mtaa_watch', '0001_initial'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='post',\n name='post_image',\n ),\n migrations.AddField(\n model_name='post',\n name='image',\n field=cloudinary.models.CloudinaryField(max_length=255, null=True, verbose_name='image'),\n ),\n migrations.AlterField(\n model_name='business',\n name='business_image',\n field=cloudinary.models.CloudinaryField(max_length=255, verbose_name='image'),\n ),\n migrations.AlterField(\n model_name='contact',\n name='contact_logo',\n field=cloudinary.models.CloudinaryField(max_length=255, verbose_name='image'),\n ),\n migrations.AlterField(\n model_name='neighborhood',\n name='neighborhood_image',\n field=cloudinary.models.CloudinaryField(max_length=255, verbose_name='image'),\n ),\n ]\n",
"id": "9530104",
"language": "Python",
"matching_score": 3.950993299484253,
"max_stars_count": 0,
"path": "mtaa_watch/migrations/0002_auto_20210726_1505.py"
},
{
"content": "# Generated by Django 3.2.5 on 2021-07-26 12:05\n\nimport cloudinary.models\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('users', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='profile',\n name='image',\n field=cloudinary.models.CloudinaryField(max_length=255, null=True, verbose_name='image'),\n ),\n migrations.AlterField(\n model_name='profile',\n name='neighborhood',\n field=models.CharField(choices=[('1', 'Red Ville'), ('2', 'Green View'), ('3', 'The Park'), ('4', 'BEACH'), ('5', 'BMW'), ('6', 'Deer')], default='Red Ville', max_length=25),\n ),\n ]\n",
"id": "2927003",
"language": "Python",
"matching_score": 4.523736953735352,
"max_stars_count": 0,
"path": "users/migrations/0002_auto_20210726_1505.py"
},
{
"content": "# Generated by Django 3.2.5 on 2021-07-27 13:23\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('users', '0002_auto_20210726_1505'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='profile',\n name='neighborhood',\n field=models.CharField(choices=[('1', 'Red Ville'), ('2', 'Red Cross'), ('3', 'The Park'), ('4', 'BEACH'), ('5', 'BMW'), ('6', 'Deer'), ('7', 'Kenya Police')], default='Red Ville', max_length=25),\n ),\n ]\n",
"id": "9384069",
"language": "Python",
"matching_score": 0.47958630323410034,
"max_stars_count": 0,
"path": "users/migrations/0003_alter_profile_neighborhood.py"
},
{
"content": "from flask_wtf import FlaskForm\nfrom wtforms import StringField, PasswordField, SubmitField, BooleanField, TextAreaField, SelectField\nfrom wtforms.validators import DataRequired, Length, Email, EqualTo, ValidationError\n\nclass UpdateProfile(FlaskForm):\n bio = StringField('Tell us about you.', validators=[DataRequired()])\n submit = SubmitField('Submit')\n\nclass PostForm(FlaskForm):\n title = StringField('Title', validators=[DataRequired()])\n content = TextAreaField('Content', validators=[DataRequired()])\n category = SelectField(\"Category\",\n choices=[(\"Interview\", \"Interview\"), (\"Motivation\", \"Motivation\"), (\"Product\", \"Product\"), (\"Promotion\", \"Promotion\")], validators=[DataRequired()])\n submit = SubmitField('Post')\n\nclass CommentForm(FlaskForm):\n text = TextAreaField('Leave a comment:', validators=[DataRequired()])\n submit = SubmitField('Submit')\n",
"id": "6817242",
"language": "Python",
"matching_score": 4.782727241516113,
"max_stars_count": 0,
"path": "app/main/forms.py"
},
{
"content": "from flask_wtf import FlaskForm\nfrom wtforms import StringField, SubmitField\nfrom wtforms.validators import DataRequired, Email\n\n\nclass subscription_form(FlaskForm):\n email = StringField('Email', validators=[DataRequired(), Email()])\n submit = SubmitField('Subscribe')\n",
"id": "1097453",
"language": "Python",
"matching_score": 1.433752417564392,
"max_stars_count": 0,
"path": "app/emails/forms.py"
},
{
"content": "from django import forms\n\nclass PostForm(forms.Form):\n project_title = forms.CharField(widget=forms.TextInput(attrs={\"class\": \"form-control\",\"placeholder\": \"Project Title\"}))\n project_image = forms.ImageField()\n project_description = forms.CharField(widget=forms.Textarea(attrs={\"class\": \"form-control\",\"placeholder\": \"Project Description\"}))\n project_url = forms.CharField(widget=forms.TextInput(attrs={\"class\": \"form-control\",\"placeholder\": \"Link to live Site\"}))\n\nclass RatingForm(forms.Form):\n design_rating = forms.IntegerField()\n usability_rating = forms.IntegerField()\n content_rating = forms.IntegerField()\n comment = forms.CharField(widget=forms.Textarea(attrs={\"class\": \"form-control\",\"placeholder\": \"Leave a comment\"}))\n\n ",
"id": "3646673",
"language": "Python",
"matching_score": 2.17594313621521,
"max_stars_count": 1,
"path": "awardsposts/forms.py"
},
{
"content": "from django.shortcuts import render, redirect, HttpResponse\nfrom django.contrib import messages\nfrom .forms import PostForm\nfrom .models import Post\nfrom django.views.decorators.csrf import csrf_protect\nfrom django.contrib.auth.decorators import login_required\n\n# Create your views here.\n\n@login_required\n@csrf_protect\ndef new_post(request):\n form = PostForm(request.POST,request.FILES)\n if request.method == 'POST':\n form = PostForm(request.POST,request.FILES)\n if form.is_valid():\n post = Post(\n project_title = form.cleaned_data[\"project_title\"],\n project_image = form.cleaned_data[\"project_image\"],\n project_description = form.cleaned_data[\"project_description\"],\n project_url = form.cleaned_data[\"project_url\"],\n author = request.user\n )\n \n post.save()\n\n post_name = form.cleaned_data.get('project_title')\n messages.success(request, f'Your post has been created for {post_name} !')\n return redirect('awards-home')\n else:\n form = PostForm()\n\n return render(request, 'awardsposts/newPost.html',{'form': form})\n\n\n",
"id": "8966618",
"language": "Python",
"matching_score": 1.2355339527130127,
"max_stars_count": 1,
"path": "awardsposts/views.py"
},
{
"content": "from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.home, name='awards-home'),\n path('about', views.about, name='awards-about'),\n path('<int:pk>/', views.rating, name='rating'),\n]",
"id": "4984637",
"language": "Python",
"matching_score": 1.4801459312438965,
"max_stars_count": 1,
"path": "awwardshome/urls.py"
},
{
"content": "from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('Phome', views.home, name='instagramHome-home'),\n path('Pabout/', views.about, name='instagramHome-about'),\n path('new_post/', views.add_post, name='add_post'),\n path(\"<int:pk>/\", views.post_detail, name=\"post_detail\"),\n path('<int:pk>',views.like, name='likes')\n]",
"id": "4818041",
"language": "Python",
"matching_score": 2.0406017303466797,
"max_stars_count": 1,
"path": "instagramHome/urls.py"
},
{
"content": "from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('newpost', views.new_post, name='new_post'), \n]",
"id": "5718671",
"language": "Python",
"matching_score": 0.23459532856941223,
"max_stars_count": 1,
"path": "awardsposts/urls.py"
},
{
"content": "#!/usr/bin/env python3.8\nfrom account import Account\nfrom credentials import Credentials\n\n\ndef create_account(account_name, user_name, password, email):\n '''\n Function to create a new account\n '''\n new_account = Account(account_name, user_name, password, email)\n return new_account\n\n\ndef save_accounts(account):\n '''\n Function to save account\n '''\n account.save_account()\n\n\ndef del_account(account):\n '''\n Function to delete_account\n '''\n account.delete_account()\n\n\ndef find_account(name):\n '''\n Function that finds a account by name and returns the account\n '''\n return Account.find_by_name(name)\n\n\ndef check_existing_accounts(name):\n '''\n Function that check if an account exists with that name and return a Boolean\n '''\n return Account.account_exist(name)\n\n\ndef display_accounts():\n '''\n Funcntion that returns all the saved accounts\n '''\n return Account.display_accounts()\n\n# _________________________________________CREDENTIALS____________________________________________________________________\n\n\ndef create_credentials(credentials_name, usr_name, password, email):\n '''\n Function to create a new account\n '''\n new_credentials = Credentials(credentials_name, usr_name, password, email)\n return new_credentials\n\n\ndef save_credentials(credentials):\n '''\n Function to save account\n '''\n credentials.save_credentials()\n\n\ndef del_credentials(credentials):\n '''\n Function to delete a account\n '''\n credentials.delete_credentials()\n\n\ndef find_credentials(name):\n '''\n Function that finds a account by nane and returns the account\n '''\n return Credentials.find_by_name(name)\n\n\ndef check_existing_credentials(name):\n '''\n Function that check if an account exists with that name and return a Boolean\n '''\n return Credentials.credentials_exist(name)\n\n\ndef display_credentials():\n '''\n Function that returns all the saved accounts\n '''\n return Credentials.display_credentials()\n\n\ndef main():\n print(\"Hello Welcome to your Pass Word Locker. What is your name?\")\n user_name = input()\n print(\n f\"Hello {user_name}, sign up to Pass Word Locker to create an account.\")\n print('\\n')\n while True:\n print(\"Use these known short codes to operate :\\n SU -> SIGN UP.\\n DA -> Display your account.\\n LN ->LOGIN.\\n ex ->exit Pass Word Locker. \")\n short_code = input().lower()\n if short_code == 'su':\n print(\"Create a Pass Word Locker Account\")\n print(\"_\"*100)\n account_name = input('Account name:')\n print('\\n')\n u_name = input('User name:')\n print('\\n')\n pwd = input('Password : ')\n print('\\n')\n e_address = input('Email address:')\n save_accounts(create_account(account_name, u_name, pwd, e_address))\n print('\\n')\n print(\n f\"A New {account_name} Account with the user name {u_name} has been created.\")\n print(\n f\"You can now login to your {account_name} account using your password.\")\n print('\\n')\n elif short_code == 'da':\n if display_accounts():\n print(\"Here is your account and your details\")\n print('\\n')\n for account in display_accounts():\n print(\n f\"Account name:{account.account_name} User name: {account.user_name} Password:{<PASSWORD>}\")\n print('\\n')\n else:\n print('\\n')\n print(\n \"You dont seem to have created an account.Sign up to create a new account.\")\n print('\\n')\n elif short_code == 'ln':\n print(\"Enter your password to login.\")\n search_account = input()\n if check_existing_accounts(search_account):\n search_cred = find_account(search_account)\n print(\"\\033[1;32;1m \\n\")\n print(f\"You are now logged in to your {account_name} account\")\n print(\"\\033[1;37;1m \\n\")\n # ========================================CREDENTIALS AREA=======================================================================\n while True:\n print('''\n Use these short codes:\n CA -> Create new credential.\n DC -> Display your credentials list\n ex ->Log out your credentials account.''')\n short_code = input().lower()\n if short_code == \"ca\":\n print(\"Create new credential\")\n print('_' * 20)\n credentials_name = input('Credential name:')\n print('\\n')\n usr_name = input(f\"{credentials_name} user name:\")\n print('\\n')\n print('*' * 20)\n pwd = input(f\"{credentials_name} password:\")\n save_credentials(create_credentials(credentials_name, u_name,pwd,e_address))\n print('\\n')\n print(\n f\"A New {credentials_name} Account with the user name {usr_name} has been created.\")\n print('\\n')\n elif short_code == 'dc':\n if display_credentials():\n print(\"Here is your credentials\")\n print('\\n')\n for credentials in display_credentials():\n print(\n f\"Credential name:{credentials.credentials_name} User name: {credentials.usr_name} Password:{<PASSWORD>}\")\n print('\\n')\n else:\n print('\\n')\n print(\n \"You don't seem to have created any account yet\")\n print('\\n')\n elif short_code == \"ex\":\n print('\\n')\n print(\n f\"You have logged out your {account_name} account\")\n print('\\n')\n break\n\n else:\n print('\\n')\n print(\"WRONG PASSWORD!! PLEASE ENTER CORRECT PASSWORD TO LOGIN\")\n print('\\n')\n print('\\n')\n\n elif short_code == \"ex\":\n print(f\"Thanks {user_name} for your time.I hope you enjoyed my service.Bye...\")\n break\n else:\n print(\"I really didn't get that. Please use the short codes\")\n\n\nif __name__ == '__main__':\n main()\n",
"id": "7335814",
"language": "Python",
"matching_score": 2.8659119606018066,
"max_stars_count": 0,
"path": "run.py"
},
{
"content": "import unittest \nfrom credentials import Credentials \n\n\nclass TestCredentials(unittest.TestCase):\n def setUp(self):\n\n self.new_credentials = Credentials(\n \"junior\", \"kamau\", \"123456\", \"<EMAIL>\") \n\n def test_init(self):\n '''\n test_init test case to test if the object is initialized properly\n '''\n\n self.assertEqual(self.new_credentials.credentials_name, \"junior\")\n self.assertEqual(self.new_credentials.usr_name, \"kamau\")\n self.assertEqual(self.new_credentials.password, \"<PASSWORD>\")\n self.assertEqual(self.new_credentials.email, \"<EMAIL>\")\n\n def test_save_credentials(self):\n '''\n test_save_account test case to test if the account object is saved into\n the account list\n '''\n self.new_credentials.save_credentials() \n self.assertEqual(len(Credentials.credentials_list), 1)\n\n def tearDown(self):\n '''\n tearDown method that does clean up after each test case has run.\n '''\n Credentials.credentials_list = []\n\n def test_save_multiple_credentials(self):\n '''\n test_save_multiple_account to check if we can save multiple account\n objects to our account_list\n '''\n self.new_credentials.save_credentials()\n test_credentials = Credentials(\n \"Test\", \"user\", \"0745734706\", \"<EMAIL>\") \n test_credentials.save_credentials()\n self.assertEqual(len(Credentials.credentials_list), 2)\n\n def test_delete_credentials(self):\n '''\n test_delete_account to test if we can remove an account from our account list\n '''\n self.new_credentials.save_credentials()\n test_credentials = Credentials(\n \"Test\", \"user\", \"0745734706\", \"<EMAIL>\") \n test_credentials.save_credentials()\n\n self.new_credentials.delete_credentials() \n self.assertEqual(len(Credentials.credentials_list), 1)\n\n def test_find_credentials_by_credentials_name(self):\n '''\n test to check if we can find an account by account_name and display information\n '''\n\n self.new_credentials.save_credentials()\n test_credentials = Credentials(\n \"Test\", \"user\", \"0711223344\", \"<EMAIL>\") \n test_credentials.save_credentials()\n\n found_credentials = Credentials.find_by_name(\"Test\")\n\n self.assertEqual(found_credentials.email, test_credentials.email)\n\n def test_credentials_exists(self):\n '''\n test to check if we can return a Boolean if we cannot find the account.\n '''\n\n self.new_credentials.save_credentials()\n test_credentials = Credentials(\n \"Test\", \"user\", \"0711223344\", \"<EMAIL>\") \n test_credentials.save_credentials()\n\n credentials_exists = Credentials.credentials_exist(\"0711223344\")\n self.assertTrue(credentials_exists)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"id": "12511797",
"language": "Python",
"matching_score": 0.5685482621192932,
"max_stars_count": 0,
"path": "credentials_test.py"
},
{
"content": "import urllib.request,json\nfrom .models import Sources,Articles\nfrom datetime import datetime\n\napi_key=None\nbase_url=None\narticles_url=None\n\ndef configure_request(app):\n global api_key,base_url,articles_url\n api_key=app.config['NEWS_API_KEY']\n base_url=app.config['NEWS_SOURCES_BASE_URL']\n articles_url=app.config['ARTICLES_BASE_URL']\n\ndef get_sources(category):\n '''\n function that gets the jsn response\n '''\n get_sources_url=base_url.format(category,api_key)\n \n with urllib.request.urlopen(get_sources_url) as url:\n get_sources_data=url.read()\n get_sources_response=json.loads(get_sources_data)\n sources_results=None\n if get_sources_response['sources']:\n sources_results_list=get_sources_response['sources']\n sources_results=process_sources(sources_results_list)\n return sources_results\n\ndef process_sources(sources_list):\n '''\n function that process the news sources results\n '''\n\n sources_results=[]\n\n for source_item in sources_list:\n id=source_item.get('id')\n name=source_item.get('name')\n description = source_item.get('description')\n url=source_item.get('url')\n category=source_item.get('category')\n language=source_item.get('language')\n country=source_item.get('country')\n \n sources_object=Sources(id,name,description,url,category,country,language)\n sources_results.append(sources_object)\n return sources_results\n\n\n\ndef get_articles(id):\n '''\n function to return a list \n '''\n get_articles_url=articles_url.format(id,api_key)\n\n with urllib.request.urlopen(get_articles_url)as url:\n articles_results=json.loads(url.read())\n articles_object=None\n if articles_results['articles']:\n articles_object=process_articles(articles_results['articles'])\n return articles_object\n\ndef process_articles(articles_list):\n '''\n function to list all articles\n '''\n articles_object=[]\n for articles_item in articles_list:\n\n id=articles_item.get('id')\n \n author=articles_item.get('author ')\n title= articles_item.get('title')\n\n description = articles_item.get('description ')\n url=articles_item.get('url')\n\n image=articles_item.get('urlToImage')\n date=articles_item.get('publishedAt')\n\n if image:\n articles_result=Articles(id,author,title,description,url,image,date)\n articles_object.append(articles_result)\n return articles_object\n ",
"id": "10364785",
"language": "Python",
"matching_score": 3.1384196281433105,
"max_stars_count": 0,
"path": "app/requests.py"
},
{
"content": "NEWS_API_KEY='fc005049647046a5a0538fead2b71a42'\n",
"id": "5870468",
"language": "Python",
"matching_score": 0,
"max_stars_count": 0,
"path": "instance/config.py"
},
{
"content": "from django.apps import AppConfig\n\n\nclass InstagramhomeConfig(AppConfig):\n name = 'instagramHome'\n",
"id": "5326289",
"language": "Python",
"matching_score": 2.3046302795410156,
"max_stars_count": 1,
"path": "instagramHome/apps.py"
},
{
"content": "from django.apps import AppConfig\n\n\nclass InstagramusersConfig(AppConfig):\n name = 'instagramUsers'\n\n def ready(self):\n import instagramUsers.signals\n",
"id": "8379795",
"language": "Python",
"matching_score": 2.118300437927246,
"max_stars_count": 0,
"path": "instagramUsers/apps.py"
},
{
"content": "from django.apps import AppConfig\n\n\nclass AwardspostsConfig(AppConfig):\n name = 'awardsposts'\n",
"id": "11536809",
"language": "Python",
"matching_score": 2.282942295074463,
"max_stars_count": 1,
"path": "awardsposts/apps.py"
},
{
"content": "from django.apps import AppConfig\n\n\nclass AwwardusersConfig(AppConfig):\n name = 'awwardusers'\n",
"id": "1825795",
"language": "Python",
"matching_score": 2.118300437927246,
"max_stars_count": 0,
"path": "awwardusers/apps.py"
}
] | 2.1183 |
ZhenyueChin | [
{
"content": "import numpy as np\nimport argparse\nfrom baseline import *\nfrom os import sys, path, makedirs\n\nparser = argparse.ArgumentParser(description='Train a text classifier')\nparser.add_argument('--visdom', help='Turn on visdom reporting', type=str2bool, default=False)\nparser.add_argument('--tensorboard', help='Turn on tensorboard reporting', type=str2bool, default=False)\nparser.add_argument('--eta', help='Initial learning rate', default=0.01, type=float)\nparser.add_argument('--mom', help='SGD Momentum', default=0.9, type=float)\nparser.add_argument('--start_decay_epoch', type=int, help='At what epoch should we start decaying')\nparser.add_argument('--decay_rate', default=0.0, type=float, help='Learning rate decay')\nparser.add_argument('--decay_type', help='What learning rate decay schedule')\nparser.add_argument('--embed', help='Word2Vec embeddings file', required=True)\nparser.add_argument('--train', help='Training file', required=True)\nparser.add_argument('--valid', help='Validation file')\nparser.add_argument('--test', help='Test file', required=True)\nparser.add_argument('--save', help='Save basename', default='classify_sentence_pytorch')\nparser.add_argument('--nogpu', help='Do not use GPU', default=False)\nparser.add_argument('--optim', help='Optim method', default='adam', choices=['adam', 'adagrad', 'adadelta', 'sgd', 'asgd'])\nparser.add_argument('--dropout', help='Dropout probability', default=0.5, type=float)\nparser.add_argument('--unif', help='Initializer bounds for embeddings', default=0.25)\nparser.add_argument('--epochs', help='Number of epochs', default=25, type=int)\nparser.add_argument('--batchsz', help='Batch size', default=50, type=int)\nparser.add_argument('--mxlen', help='Max length', default=100, type=int)\nparser.add_argument('--patience', help='Patience', default=10, type=int)\nparser.add_argument('--cmotsz', help='Hidden layer size', default=100, type=int)\nparser.add_argument('--filtsz', help='Filter sizes', nargs='+', default=[3, 4, 5], type=int)\nparser.add_argument('--clean', help='Do cleaning', action='store_true', default=True)\nparser.add_argument('--static', help='Fix pre-trained embeddings weights', action='store_true')\nparser.add_argument('--valsplit', help='Validation split if no valid set', default=0.15, type=float)\nparser.add_argument('--outfile', help='Output file base', default='./classify-model')\nparser.add_argument('--backend', help='Which deep learning framework to use', default='tf')\nparser.add_argument('--keep_unused', help='Keep unused vocabulary terms as word vectors', default=False, type=str2bool)\nparser.add_argument('--do_early_stopping', help='Should we do early stopping?', default=True, type=str2bool)\nparser.add_argument('--early_stopping_metric', help='What metric should we use if stopping early', default='acc')\nparser.add_argument('--model_type', help='Name of model to load and train', default='default')\nparser.add_argument('--trainer_type', help='Name of trainer to load and train', default='default')\nparser.add_argument('--rev', help='Time reverse input text', default=False, type=str2bool)\nparser.add_argument('--bounds', type=int, default=16000, help='Tell optim decay functionality how many steps before applying decay')\nparser.add_argument('--verbose', type=str2bool, default=False, help='print confusion matrix for the test data')\nparser.add_argument('--gpus', help='GPUs', type=int)\n\n\nargs = parser.parse_args()\n\n\nif args.backend == 'pytorch':\n from baseline.pytorch import long_0_tensor_alloc as vec_alloc\n from baseline.pytorch import tensor_reverse_2nd as rev2nd\n import baseline.pytorch.classify as classify\n zeropadding = np.max(args.filtsz)\nelse:\n # Everything else uses numpy\n from numpy import zeros as vec_alloc\n from baseline.data import reverse_2nd as rev2nd\n if args.backend == 'keras':\n zeropadding = np.max(args.filtsz)\n\n import baseline.keras.classify as classify\n else:\n import baseline.tf.classify as classify\n # For tensorflow, use tf.pad internally in the model\n zeropadding = 0\n\nargs.reporting = setup_reporting(**vars(args))\n\nclean_fn = TSVSeqLabelReader.do_clean if args.clean else None\nsrc_vec_trans = rev2nd if args.rev else None\n\nprint(clean_fn, src_vec_trans)\nreader = create_pred_reader(args.mxlen, zeropadding, clean_fn, vec_alloc, src_vec_trans)\nvocab, labels = reader.build_vocab([args.train, args.test, args.valid])\nunif = 0 if args.static else args.unif\n\nEmbeddingsModelType = GloVeModel if args.embed.endswith(\".txt\") else Word2VecModel\nembeddings = {}\nembeddings['word'] = EmbeddingsModelType(args.embed, vocab['word'], unif_weight=args.unif, keep_unused=args.keep_unused)\nfeature2index = {}\nfeature2index['word'] = embeddings['word'].vocab\n\nts = reader.load(args.train, feature2index, args.batchsz, shuffle=True)\nprint('Loaded training data')\n\nvs = reader.load(args.valid, feature2index, args.batchsz)\nprint('Loaded valid data')\n\nes = reader.load(args.test, feature2index, 2)\nprint('Loaded test data')\nprint('Number of labels found: [%d]' % len(labels))\n\nmodel = classify.create_model(embeddings, labels,\n model_type=args.model_type,\n mxlen=args.mxlen,\n unif=args.unif,\n filtsz=args.filtsz,\n cmotsz=args.cmotsz,\n dropout=args.dropout,\n finetune=not args.static,\n gpus=args.gpus)\n\nclassify.fit(model, ts, vs, es, **vars(args))\n",
"id": "4900380",
"language": "Python",
"matching_score": 2.531586170196533,
"max_stars_count": 2,
"path": "python/classify_sentence.py"
},
{
"content": "import argparse\nimport codecs\nimport re\nfrom baseline.progress import create_progress_bar\nfrom baseline.utils import load_user_model, lowercase\nfrom baseline.featurizers import create_featurizer \nimport json\nimport numpy as np\n\n\ndef read_lines(tsfile):\n txts = []\n labels = []\n txt = []\n label = []\n with codecs.open(tsfile, encoding='utf-8', mode='r') as f:\n for line in f:\n states = re.split(\"\\s\", line.strip())\n if len(states) > 1:\n txt.append(states[:-1])\n label.append(states[-1])\n else:\n txts.append(txt)\n labels.append(label)\n txt = []\n label = []\n return txts, labels\n\n\nparser = argparse.ArgumentParser(description='Loads an RNNTaggerModel, predicts the labels for an input conll file and '\n 'produces the output in the same format. The second column is the '\n 'predicted label')\nparser.add_argument('--input', help='input conll', required=True)\nparser.add_argument('--output', help='output conll', required=True)\nparser.add_argument('--model', help='model file: tagger-model-tf-*', required=True)\nparser.add_argument('--mxlen', help='max. length of the sentence (provided during training)', type=int, required=True)\nparser.add_argument('--mxwlen', help='max. length of a word (provided during training)', type=int, required=True)\nparser.add_argument('--backend', choices=['tf', 'pytorch'], default='tf', help='Deep Learning Framework backend')\nparser.add_argument('--features', default=None, help='JSON file with the feature name (must match with training config)'\n 'and the feature index in the CONLL file example: {\"gaz\":1}, when '\n 'the conll file has gazetteer feature in column 2')\nparser.add_argument('--model_type', default='default', help='tagger model type')\nparser.add_argument('--featurizer_type', default='default', help='featurizer type')\n\nargs = parser.parse_args()\n\nif args.backend == 'tf':\n import baseline.tf.tagger.model as tagger\nelse:\n import baseline.pytorch.tagger.model as tagger\nmodel = tagger.load_model(args.model, model_type=args.model_type)\n\npredicted_labels = []\ninput_texts, gold_labels = read_lines(args.input)\nvocab_keys = {'word': 0, 'char': None}\n\nif args.features is not None:\n features = json.load(open(args.features))\n vocab_keys.update(features)\n\n\npg = create_progress_bar(len(input_texts))\nfeaturizer = create_featurizer(model, vocab_keys=vocab_keys, featurizer_type=args.featurizer_type)\nwith codecs.open(args.output, encoding=\"utf-8\", mode=\"w\") as f:\n for index, sen in enumerate(input_texts):\n predicted_label_sen = [x[1] for x in model.predict_text(sen, featurizer=featurizer, word_trans_fn=lowercase)]\n gold_label_sen = gold_labels[index]\n for word_feature, predicted_label, gold_label in zip(sen, predicted_label_sen, gold_label_sen):\n f.write(\"{} {} {}\\n\".format(\" \".join(word_feature), gold_label, predicted_label))\n f.write(\"\\n\")\n pg.update()\n\n",
"id": "7106897",
"language": "Python",
"matching_score": 0.8062412142753601,
"max_stars_count": 2,
"path": "python/tag.py"
},
{
"content": "from baseline.pytorch.torchy import *\nfrom baseline.utils import listify, to_spans, f_score, revlut, get_model_file\nfrom baseline.reporting import basic_reporting\nfrom baseline.progress import create_progress_bar\nfrom baseline.train import EpochReportingTrainer, create_trainer\n\n\nclass TaggerTrainerPyTorch(EpochReportingTrainer):\n\n def __init__(self, model, **kwargs):\n super(TaggerTrainerPyTorch, self).__init__()\n self.gpu = not bool(kwargs.get('nogpu', False))\n # By default support IOB1/IOB2\n self.span_type = kwargs.get('span_type', 'iob')\n print('Setting span type {}'.format(self.span_type))\n self.model = model\n self.idx2label = revlut(self.model.labels)\n self.clip = float(kwargs.get('clip', 5))\n self.optimizer, self.scheduler = pytorch_prepare_optimizer(self.model, **kwargs)\n if self.gpu:\n self.model = model.to_gpu()\n\n def process_output(self, guess, truth, sentence_lengths, ids, handle=None, txts=None):\n\n correct_labels = 0\n total_labels = 0\n truth_n = truth.cpu().numpy()\n # For fscore\n gold_count = 0\n guess_count = 0\n overlap_count = 0\n\n # For each sentence\n for b in range(len(guess)):\n\n sentence = guess[b].cpu().numpy()\n\n sentence_length = sentence_lengths[b]\n gold = truth_n[b, :sentence_length]\n correct_labels += np.sum(np.equal(sentence, gold))\n total_labels += sentence_length\n gold_chunks = to_spans(gold, self.idx2label, self.span_type)\n gold_count += len(gold_chunks)\n guess_chunks = to_spans(sentence, self.idx2label, self.span_type)\n guess_count += len(guess_chunks)\n\n overlap_chunks = gold_chunks & guess_chunks\n overlap_count += len(overlap_chunks)\n\n # Should we write a file out? If so, we have to have txts\n if handle is not None:\n id = ids[b]\n txt = txts[id]\n self._write_sentence_conll(handle, sentence, gold, txt)\n\n return correct_labels, total_labels, overlap_count, gold_count, guess_count\n\n def _write_sentence_conll(self, handle, sentence, gold, txt):\n\n if len(txt) != len(sentence):\n txt = txt[:len(sentence)]\n\n try:\n for word, truth, guess in zip(txt, gold, sentence):\n handle.write('%s %s %s\\n' % (word, self.idx2label[truth], self.idx2label[guess]))\n handle.write('\\n')\n except:\n print('ERROR: Failed to write lines... closing file')\n handle.close()\n\n def _test(self, ts, **kwargs):\n\n self.model.eval()\n total_correct = 0\n total_sum = 0\n total_gold_count = 0\n total_guess_count = 0\n total_overlap_count = 0\n metrics = {}\n steps = len(ts)\n conll_output = kwargs.get('conll_output', None)\n txts = kwargs.get('txts', None)\n handle = None\n if conll_output is not None and txts is not None:\n handle = open(conll_output, \"w\")\n pg = create_progress_bar(steps)\n for batch_dict in ts:\n\n x, xch, lengths, y, ids = self.model.make_input(batch_dict)\n inputs = (x, xch, lengths)\n pred = self.model(inputs)\n correct, count, overlaps, golds, guesses = self.process_output(pred, y.data, lengths, ids, handle, txts)\n total_correct += correct\n total_sum += count\n total_gold_count += golds\n total_guess_count += guesses\n total_overlap_count += overlaps\n pg.update()\n\n pg.done()\n total_acc = total_correct / float(total_sum)\n # Only show the fscore if requested\n metrics['f1'] = f_score(total_overlap_count, total_gold_count, total_guess_count)\n metrics['acc'] = total_acc\n return metrics\n\n def _train(self, ts):\n self.model.train()\n total_loss = 0\n metrics = {}\n steps = len(ts)\n if self.scheduler is not None:\n self.scheduler.step()\n #print(self.optimizer.param_groups[0]['lr'])\n pg = create_progress_bar(steps)\n for batch_dict in ts:\n\n inputs = self.model.make_input(batch_dict)\n self.optimizer.zero_grad()\n loss = self.model.compute_loss(inputs)\n total_loss += loss.item()\n loss.backward()\n torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.clip)\n self.optimizer.step()\n pg.update()\n\n pg.done()\n metrics['avg_loss'] = float(total_loss)/steps\n return metrics\n\n\ndef fit(model, ts, vs, es, **kwargs):\n\n do_early_stopping = bool(kwargs.get('do_early_stopping', True))\n epochs = int(kwargs.get('epochs', 20))\n model_file = get_model_file(kwargs, 'tagger', 'pytorch')\n conll_output = kwargs.get('conll_output', None)\n txts = kwargs.get('txts', None)\n\n if do_early_stopping:\n early_stopping_metric = kwargs.get('early_stopping_metric', 'acc')\n patience = kwargs.get('patience', epochs)\n print('Doing early stopping on [%s] with patience [%d]' % (early_stopping_metric, patience))\n\n reporting_fns = listify(kwargs.get('reporting', basic_reporting))\n print('reporting', reporting_fns)\n\n #validation_improvement_fn = kwargs.get('validation_improvement', None)\n\n after_train_fn = kwargs.get('after_train_fn', None)\n trainer = create_trainer(TaggerTrainerPyTorch, model, **kwargs)\n\n last_improved = 0\n max_metric = 0\n for epoch in range(epochs):\n\n trainer.train(ts, reporting_fns)\n if after_train_fn is not None:\n after_train_fn(model)\n test_metrics = trainer.test(vs, reporting_fns, phase='Valid')\n\n if do_early_stopping is False:\n model.save(model_file)\n\n elif test_metrics[early_stopping_metric] > max_metric:\n #if validation_improvement_fn is not None:\n # validation_improvement_fn(early_stopping_metric, test_metrics, epoch, max_metric, last_improved)\n last_improved = epoch\n max_metric = test_metrics[early_stopping_metric]\n print('New max %.3f' % max_metric)\n model.save(model_file)\n\n\n elif (epoch - last_improved) > patience:\n print('Stopping due to persistent failures to improve')\n break\n\n if do_early_stopping is True:\n print('Best performance on max_metric %.3f at epoch %d' % (max_metric, last_improved))\n\n if es is not None:\n print('Reloading best checkpoint')\n model = torch.load(model_file)\n trainer = create_trainer(TaggerTrainerPyTorch, model, **kwargs)\n trainer.test(es, reporting_fns, conll_output=conll_output, txts=txts, phase='Test')\n",
"id": "6045809",
"language": "Python",
"matching_score": 5.123575687408447,
"max_stars_count": 2,
"path": "python/baseline/pytorch/tagger/train.py"
},
{
"content": "from baseline.utils import listify, get_model_file\nfrom baseline.progress import create_progress_bar\nfrom baseline.confusion import ConfusionMatrix\nfrom baseline.reporting import basic_reporting\nfrom baseline.train import EpochReportingTrainer, create_trainer\nimport torch\nimport torch.autograd\n\n\ndef _add_to_cm(cm, y, pred):\n _, best = pred.max(1)\n yt = y.cpu().int()\n yp = best.cpu().int()\n cm.add_batch(yt.data.numpy(), yp.data.numpy())\n\n\nclass ClassifyTrainerPyTorch(EpochReportingTrainer):\n\n def __init__(self, model, **kwargs):\n super(ClassifyTrainerPyTorch, self).__init__()\n eta = kwargs.get('eta', kwargs.get('lr', 0.01))\n print('using eta [%.3f]' % eta)\n optim = kwargs.get('optim', 'sgd')\n weight_decay = float(kwargs.get('weight_decay', 0))\n print('using optim [%s]' % optim)\n self.clip = float(kwargs.get('clip', 5))\n parameters = filter(lambda p: p.requires_grad, model.parameters())\n self.labels = model.labels\n if optim == 'adadelta':\n print('Using adadelta, ignoring learning rate')\n self.optimizer = torch.optim.Adadelta(parameters, weight_decay=weight_decay)\n elif optim == 'adam':\n self.optimizer = torch.optim.Adam(parameters, weight_decay=weight_decay)\n elif optim == 'adagrad':\n self.optimizer = torch.optim.Adagrad(parameters, weight_decay=weight_decay)\n elif optim == 'rmsprop':\n self.optimizer = torch.optim.RMSprop(model.parameters(), lr=eta, weight_decay=weight_decay)\n elif optim == 'asgd':\n self.optimizer = torch.optim.ASGD(model.parameters(), lr=eta)\n else:\n mom = kwargs.get('mom', 0.9)\n print('using mom [%.3f]' % mom)\n self.optimizer = torch.optim.SGD(parameters, lr=eta, momentum=mom, weight_decay=weight_decay)\n\n self.crit = model.create_loss().cuda()\n self.model = torch.nn.DataParallel(model).cuda()\n\n def _make_input(self, batch_dict):\n return self.model.module.make_input(batch_dict)\n\n def _test(self, loader, **kwargs):\n self.model.eval()\n total_loss = 0\n steps = len(loader)\n pg = create_progress_bar(steps)\n cm = ConfusionMatrix(self.labels)\n verbose = kwargs.get(\"verbose\", False)\n\n for batch_dict in loader:\n vec = self._make_input(batch_dict)\n y = vec[-1]\n pred = self.model(vec[:-1])\n loss = self.crit(pred, y)\n total_loss += loss.item()\n _add_to_cm(cm, y, pred)\n pg.update()\n pg.done()\n\n metrics = cm.get_all_metrics()\n metrics['avg_loss'] = total_loss/float(steps)\n if verbose:\n print(cm)\n\n return metrics\n\n def _train(self, loader):\n self.model.train()\n steps = len(loader)\n pg = create_progress_bar(steps)\n cm = ConfusionMatrix(self.labels)\n total_loss = 0\n for batch_dict in loader:\n self.optimizer.zero_grad()\n vec = self._make_input(batch_dict)\n y = vec[-1]\n pred = self.model(vec[:-1])\n loss = self.crit(pred, y)\n total_loss += loss.item()\n loss.backward()\n torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.clip)\n _add_to_cm(cm, y, pred)\n self.optimizer.step()\n pg.update()\n pg.done()\n\n metrics = cm.get_all_metrics()\n metrics['avg_loss'] = total_loss/float(steps)\n return metrics\n\n\ndef fit(model, ts, vs, es, **kwargs):\n \"\"\"\n Train a classifier using PyTorch\n :param model: The model to train\n :param ts: A training data set\n :param vs: A validation data set\n :param es: A test data set, can be None\n :param kwargs: See below\n \n :Keyword Arguments:\n * *do_early_stopping* (``bool``) -- Stop after eval data is not improving. Default to True\n * *epochs* (``int``) -- how many epochs. Default to 20\n * *outfile* -- Model output file, defaults to classifier-model.pyth\n * *patience* -- \n How many epochs where evaluation is no longer improving before we give up\n * *reporting* --\n Callbacks which may be used on reporting updates\n * *optim* --\n Optimizer to use, defaults to `sgd`\n * *eta, lr* (``float``) --\n Learning rate, defaults to 0.01\n * *mom* (``float``) --\n Momentum (SGD only), defaults to 0.9 if optim is `sgd`\n :return: \n \"\"\"\n do_early_stopping = bool(kwargs.get('do_early_stopping', True))\n verbose = bool(kwargs.get('verbose', False))\n epochs = int(kwargs.get('epochs', 20))\n model_file = get_model_file(kwargs, 'classify', 'pytorch')\n if do_early_stopping:\n early_stopping_metric = kwargs.get('early_stopping_metric', 'acc')\n patience = kwargs.get('patience', epochs)\n print('Doing early stopping on [%s] with patience [%d]' % (early_stopping_metric, patience)) \n\n reporting_fns = listify(kwargs.get('reporting', basic_reporting))\n print('reporting', reporting_fns)\n\n\n trainer = create_trainer(ClassifyTrainerPyTorch, model, **kwargs)\n\n max_metric = 0\n last_improved = 0\n\n for epoch in range(epochs):\n trainer.train(ts, reporting_fns)\n test_metrics = trainer.test(vs, reporting_fns)\n \n if do_early_stopping is False:\n model.save(model_file)\n\n elif test_metrics[early_stopping_metric] > max_metric:\n last_improved = epoch\n max_metric = test_metrics[early_stopping_metric]\n print('New max %.3f' % max_metric)\n model.save(model_file)\n\n elif (epoch - last_improved) > patience:\n print('Stopping due to persistent failures to improve')\n break\n \n if do_early_stopping is True:\n print('Best performance on max_metric %.3f at epoch %d' % (max_metric, last_improved))\n\n if es is not None:\n print('Reloading best checkpoint')\n model = torch.load(model_file)\n trainer = create_trainer(ClassifyTrainerPyTorch, model, **kwargs)\n trainer.test(es, reporting_fns, phase='Test', verbose=verbose)\n",
"id": "3539771",
"language": "Python",
"matching_score": 2.006821393966675,
"max_stars_count": 2,
"path": "python/baseline/pytorch/classify/train.py"
},
{
"content": "import torch\nimport numpy as np\nfrom baseline.utils import lookup_sentence, get_version\nfrom baseline.utils import crf_mask as crf_m\nfrom torch.autograd import Variable\nimport torch.autograd\nimport torch.nn as nn\nimport torch.nn.functional\nimport math\nimport copy\n\nPYT_MAJOR_VERSION = get_version(torch)\n\n\ndef sequence_mask(lengths):\n lens = lengths.cpu()\n max_len = torch.max(lens)\n # 1 x T\n row = torch.arange(0, max_len.item()).type_as(lens).view(1, -1)\n # B x 1\n col = lens.view(-1, 1)\n # Broadcast to B x T, compares increasing number to max\n mask = row < col\n return mask\n\n\ndef classify_bt(model, batch_time):\n tensor = torch.from_numpy(batch_time) if type(batch_time) == np.ndarray else batch_time\n probs = model(torch.autograd.Variable(tensor, requires_grad=False).cuda()).exp().data\n probs.div_(torch.sum(probs))\n results = []\n batchsz = probs.size(0)\n for b in range(batchsz):\n outcomes = [(model.labels[id_i], prob_i) for id_i, prob_i in enumerate(probs[b])]\n results.append(outcomes)\n return results\n\n\ndef predict_seq_bt(model, x, xch, lengths):\n x_t = torch.from_numpy(x) if type(x) == np.ndarray else x\n xch_t = torch.from_numpy(xch) if type(xch) == np.ndarray else xch\n len_v = torch.from_numpy(lengths) if type(lengths) == np.ndarray else lengths\n x_v = torch.autograd.Variable(x_t, requires_grad=False).cuda()\n xch_v = torch.autograd.Variable(xch_t, requires_grad=False).cuda()\n #len_v = torch.autograd.Variable(len_t, requires_grad=False)\n results = model((x_v, xch_v, len_v))\n #print(results)\n #if type(x) == np.ndarray:\n # # results = results.cpu().numpy()\n # # Fix this to not be greedy\n # results = np.argmax(results, -1)\n\n return results\n\n\ndef to_scalar(var):\n # returns a python float\n return var.view(-1).data.tolist()[0]\n\n\ndef argmax(vec):\n # return the argmax as a python int\n _, idx = torch.max(vec, 1)\n return to_scalar(idx)\n\n\ndef log_sum_exp(vec):\n max_score = vec[0, argmax(vec)]\n max_score_broadcast = max_score.view(1, -1).expand(1, vec.size()[1])\n return max_score + torch.log(torch.sum(torch.exp(vec - max_score_broadcast)))\n\n\nclass SequenceCriterion(nn.Module):\n\n def __init__(self, LossFn=nn.NLLLoss):\n super(SequenceCriterion, self).__init__()\n self.crit = LossFn(ignore_index=0, size_average=False)\n\n def forward(self, inputs, targets):\n # This is BxT, which is what we want!\n total_sz = targets.nelement()\n loss = self.crit(inputs.view(total_sz, -1), targets.view(total_sz))\n return loss\n\n\nclass StackedLSTMCell(nn.Module):\n def __init__(self, num_layers, input_size, rnn_size, dropout):\n super(StackedLSTMCell, self).__init__()\n self.dropout = nn.Dropout(dropout)\n self.num_layers = num_layers\n self.layers = nn.ModuleList()\n\n for i in range(num_layers):\n self.layers.append(nn.LSTMCell(input_size=input_size, hidden_size=rnn_size, bias=False))\n input_size = rnn_size\n\n def forward(self, input, hidden):\n h_0, c_0 = hidden\n hs, cs = [], []\n for i, layer in enumerate(self.layers):\n h_i, c_i = layer(input, (h_0[i], c_0[i]))\n input = h_i\n if i != self.num_layers - 1:\n input = self.dropout(input)\n hs += [h_i]\n cs += [c_i]\n\n hs = torch.stack(hs)\n cs = torch.stack(cs)\n\n return input, (hs, cs)\n\n\nclass StackedGRUCell(nn.Module):\n def __init__(self, num_layers, input_size, rnn_size, dropout):\n super(StackedGRUCell, self).__init__()\n self.dropout = nn.Dropout(dropout)\n self.num_layers = num_layers\n self.layers = nn.ModuleList()\n\n for i in range(num_layers):\n self.layers.append(nn.GRUCell(input_size=input_size, hidden_size=rnn_size))\n input_size = rnn_size\n\n def forward(self, input, hidden):\n h_0 = hidden\n hs = []\n for i, layer in enumerate(self.layers):\n h_i = layer(input, (h_0[i]))\n input = h_i\n if i != self.num_layers:\n input = self.dropout(input)\n hs += [h_i]\n\n hs = torch.stack(hs)\n\n return input, hs\n\n\ndef pytorch_rnn_cell(insz, hsz, rnntype, nlayers, dropout):\n\n if rnntype == 'gru':\n rnn = StackedGRUCell(nlayers, insz, hsz, dropout)\n else:\n rnn = StackedLSTMCell(nlayers, insz, hsz, dropout)\n return rnn\n\n\ndef pytorch_embedding(x2vec, finetune=True):\n dsz = x2vec.dsz\n lut = nn.Embedding(x2vec.vsz + 1, dsz, padding_idx=0)\n del lut.weight\n lut.weight = nn.Parameter(torch.FloatTensor(x2vec.weights),\n requires_grad=finetune)\n return lut\n\n\ndef pytorch_activation(name=\"relu\"):\n if name == \"tanh\":\n return nn.Tanh()\n if name == \"hardtanh\":\n return nn.Hardtanh()\n if name == \"prelu\":\n return nn.PReLU()\n if name == \"sigmoid\":\n return nn.Sigmoid()\n if name == \"log_sigmoid\":\n return nn.LogSigmoid()\n return nn.ReLU()\n\n\ndef pytorch_conv1d(in_channels, out_channels, fsz, unif=0, padding=0, initializer=None):\n c = nn.Conv1d(in_channels, out_channels, fsz, padding=padding)\n if unif > 0:\n c.weight.data.uniform_(-unif, unif)\n elif initializer == \"ortho\":\n nn.init.orthogonal(c.weight)\n elif initializer == \"he\" or initializer == \"kaiming\":\n nn.init.kaiming_uniform(c.weight)\n else:\n nn.init.xavier_uniform_(c.weight)\n return c\n\n\ndef pytorch_linear(in_sz, out_sz, unif=0, initializer=None):\n l = nn.Linear(in_sz, out_sz)\n if unif > 0:\n l.weight.data.uniform_(-unif, unif)\n elif initializer == \"ortho\":\n nn.init.orthogonal(l.weight)\n elif initializer == \"he\" or initializer == \"kaiming\":\n nn.init.kaiming_uniform(l.weight)\n else:\n nn.init.xavier_uniform_(l.weight)\n\n l.bias.data.zero_()\n return l\n\n\ndef pytorch_clone_module(module_, N):\n return nn.ModuleList([copy.deepcopy(module_) for _ in range(N)])\n\n\ndef _cat_dir(h):\n return torch.cat([h[0:h.size(0):2], h[1:h.size(0):2]], dim=-1)\n\n\nclass BiRNNWrapper(nn.Module):\n\n def __init__(self, rnn, nlayers):\n super(BiRNNWrapper, self).__init__()\n self.rnn = rnn\n self.nlayers = nlayers\n\n def forward(self, seq):\n output, hidden = self.rnn(seq)\n if isinstance(hidden, tuple):\n hidden = tuple(_cat_dir(h) for h in hidden)\n else:\n hidden = _cat_dir(hidden)\n return output, hidden\n\n\ndef pytorch_rnn(insz, hsz, rnntype, nlayers, dropout):\n if nlayers == 1:\n dropout = 0.0\n\n if rnntype == 'gru':\n rnn = torch.nn.GRU(insz, hsz, nlayers, dropout=dropout)\n elif rnntype == 'blstm':\n rnn = torch.nn.LSTM(insz, hsz, nlayers, dropout=dropout, bidirectional=True)\n rnn = BiRNNWrapper(rnn, nlayers)\n else:\n rnn = torch.nn.LSTM(insz, hsz, nlayers, dropout=dropout)\n return rnn\n\n\nclass ParallelConv(nn.Module):\n\n def __init__(self, insz, outsz, filtsz, activation_type, pdrop):\n super(ParallelConv, self).__init__()\n convs = []\n outsz_filts = outsz\n\n if type(outsz) == int:\n outsz_filts = len(filtsz) * [outsz]\n\n self.outsz = sum(outsz_filts)\n for i, fsz in enumerate(filtsz):\n pad = fsz//2\n conv = nn.Sequential(\n nn.Conv1d(insz, outsz_filts[i], fsz, padding=pad),\n pytorch_activation(activation_type)\n )\n convs.append(conv)\n # Add the module so its managed correctly\n self.convs = nn.ModuleList(convs)\n self.conv_drop = nn.Dropout(pdrop)\n\n def forward(self, input_bct):\n mots = []\n for conv in self.convs:\n # In Conv1d, data BxCxT, max over time\n conv_out = conv(input_bct)\n mot, _ = conv_out.max(2)\n mots.append(mot)\n mots = torch.cat(mots, 1)\n return self.conv_drop(mots)\n\n\nclass Highway(nn.Module):\n\n def __init__(self,\n input_size):\n super(Highway, self).__init__()\n self.proj = nn.Linear(input_size, input_size)\n self.transform = nn.Linear(input_size, input_size)\n self.transform.bias.data.fill_(-2.0)\n\n def forward(self, input):\n proj_result = nn.functional.relu(self.proj(input))\n proj_gate = nn.functional.sigmoid(self.transform(input))\n gated = (proj_gate * proj_result) + ((1 - proj_gate) * input)\n return gated\n\n\nclass LayerNorm(nn.Module):\n \"\"\"\n Applies Layer Normalization over a mini-batch of inputs as described in\n the paper `Layer Normalization`_ .\n\n .. math::\n y = \\frac{x - \\mathrm{E}[x]}{ \\sqrt{\\mathrm{Var}[x]} + \\epsilon} * \\gamma + \\beta\n\n This is provided in pytorch's master, and can be replaced in the near future.\n For the time, being, this code is adapted from:\n http://nlp.seas.harvard.edu/2018/04/03/attention.html\n https://github.com/pytorch/pytorch/pull/2019\n \"\"\"\n def __init__(self, num_features, eps=1e-6):\n super(LayerNorm, self).__init__()\n self.a = nn.Parameter(torch.ones(num_features))\n self.b = nn.Parameter(torch.zeros(num_features))\n self.eps = eps\n\n def forward(self, x):\n mean = x.mean(-1, keepdim=True)\n std = ((x - mean).pow(2).sum(-1, keepdim=True).div(x.size(-1) - 1) + self.eps).sqrt()\n d = (std + self.eps) + self.b\n return self.a * (x - mean) / d\n\n\ndef pytorch_lstm(insz, hsz, rnntype, nlayers, dropout, unif=0, batch_first=False, initializer=None):\n if nlayers == 1:\n dropout = 0.0\n ndir = 2 if rnntype.startswith('b') else 1\n #print('ndir: %d, rnntype: %s, nlayers: %d, dropout: %.2f, unif: %.2f' % (ndir, rnntype, nlayers, dropout, unif))\n rnn = torch.nn.LSTM(insz, hsz, nlayers, dropout=dropout, bidirectional=True if ndir > 1 else False, batch_first=batch_first)#, bias=False)\n if unif > 0:\n for weight in rnn.parameters():\n weight.data.uniform_(-unif, unif)\n elif initializer == \"ortho\":\n nn.init.orthogonal(rnn.weight_hh_l0)\n nn.init.orthogonal(rnn.weight_ih_l0)\n elif initializer == \"he\" or initializer == \"kaiming\":\n nn.init.kaiming_uniform(rnn.weight_hh_l0)\n nn.init.kaiming_uniform(rnn.weight_ih_l0)\n else:\n nn.init.xavier_uniform_(rnn.weight_hh_l0)\n nn.init.xavier_uniform_(rnn.weight_ih_l0)\n\n return rnn, ndir*hsz\n\n\ndef pytorch_prepare_optimizer(model, **kwargs):\n\n mom = kwargs.get('mom', 0.9)\n optim = kwargs.get('optim', 'sgd')\n eta = kwargs.get('eta', kwargs.get('lr', 0.01))\n decay_rate = float(kwargs.get('decay_rate', 0.0))\n decay_type = kwargs.get('decay_type', None)\n\n if optim == 'adadelta':\n optimizer = torch.optim.Adadelta(model.parameters(), lr=eta)\n elif optim == 'adam':\n optimizer = torch.optim.Adam(model.parameters(), lr=eta)\n elif optim == 'rmsprop':\n optimizer = torch.optim.RMSprop(model.parameters(), lr=eta)\n elif optim == 'asgd':\n optimizer = torch.optim.ASGD(model.parameters(), lr=eta)\n else:\n optimizer = torch.optim.SGD(model.parameters(), lr=eta, momentum=mom)\n\n scheduler = None\n if decay_rate > 0.0 and decay_type is not None:\n if decay_type == 'invtime':\n scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 1, gamma=decay_rate)\n\n return optimizer, scheduler\n\n\ndef append2seq(seq, modules):\n\n for i, module in enumerate(modules):\n seq.add_module('%s-%d' % (str(module).replace('.', 'dot'), i), module)\n\n\ndef tensor_max(tensor):\n return tensor.max()\n\n\ndef tensor_shape(tensor):\n return tensor.size()\n\n\ndef tensor_reverse_2nd(tensor):\n idx = torch.LongTensor([i for i in range(tensor.size(1)-1, -1, -1)])\n return tensor.index_select(1, idx)\n\n\ndef long_0_tensor_alloc(dims, dtype=None):\n lt = long_tensor_alloc(dims)\n lt.zero_()\n return lt\n\n\ndef long_tensor_alloc(dims, dtype=None):\n if type(dims) == int or len(dims) == 1:\n return torch.LongTensor(dims)\n return torch.LongTensor(*dims)\n\n\ndef prepare_src(model, tokens, mxlen=100):\n src_vocab = model.get_src_vocab()\n length = min(len(tokens), mxlen)\n x = torch.LongTensor(length).zero_()\n\n for j in range(length):\n word = tokens[j]\n if word not in src_vocab:\n if word != '':\n print(word)\n idx = 0\n else:\n idx = src_vocab[word]\n x[j] = idx\n return torch.autograd.Variable(x.view(-1, 1))\n\n\n#def beam_decode_tokens(model, src_tokens, K, idx2word, mxlen=50):\n# src = prepare_src(model, src_tokens, mxlen)\n# paths, scores = beam_decode(model, src, K)\n# path_str = []\n# for j, path in enumerate(paths):\n# path_str.append([idx2word[i] for i in path])\n# return path_str, scores\n #return beam_decode(model, src, K)\n\n\ndef show_examples_pytorch(model, es, rlut1, rlut2, embed2, mxlen, sample, prob_clip, max_examples, reverse):\n si = np.random.randint(0, len(es))\n\n batch_dict = es[si]\n\n src_array = batch_dict['src']\n tgt_array = batch_dict['dst']\n src_len = batch_dict['src_len']\n\n if max_examples > 0:\n max_examples = min(max_examples, src_array.size(0))\n src_array = src_array[0:max_examples]\n tgt_array = tgt_array[0:max_examples]\n src_len = src_len[0:max_examples]\n\n # TODO: fix this, check for GPU first\n src_array = src_array.cuda()\n \n for src_len_i, src_i, tgt_i in zip(src_len, src_array, tgt_array):\n\n print('========================================================================')\n src_len_i = torch.ones(1).fill_(src_len_i).type_as(src_len)\n\n sent = lookup_sentence(rlut1, src_i.cpu().numpy(), reverse=reverse)\n print('[OP] %s' % sent)\n sent = lookup_sentence(rlut2, tgt_i.cpu().numpy())\n print('[Actual] %s' % sent)\n src_dict = {'src': torch.autograd.Variable(src_i.view(1, -1), requires_grad=False),\n 'src_len': torch.autograd.Variable(src_len_i, requires_grad=False)}\n dst_i = model.run(src_dict)[0][0]\n dst_i = [idx.item() for idx in dst_i]\n sent = lookup_sentence(rlut2, dst_i)\n print('Guess: %s' % sent)\n print('------------------------------------------------------------------------')\n\n\n# Some of this code is borrowed from here:\n# https://github.com/rguthrie3/DeepLearningForNLPInPytorch\ndef argmax(vec):\n # return the argmax as a python int\n _, idx = torch.max(vec, 1)\n return idx.data[0]\n\n\n# Compute log sum exp in a numerically stable way for the forward algorithm\ndef log_sum_exp(vec):\n max_score = vec[0, argmax(vec)]\n max_score_broadcast = max_score.view(1, -1).expand(1, vec.size()[1])\n return max_score + torch.log(torch.sum(torch.exp(vec - max_score_broadcast)))\n\n\ndef vec_log_sum_exp(vec, dim):\n \"\"\"Vectorized version of log-sum-exp\n\n :param vec: Vector\n :param dim: What dimension to operate on\n :return:\n \"\"\"\n max_scores, idx = torch.max(vec, dim, keepdim=True)\n max_scores_broadcast = max_scores.expand_as(vec)\n return max_scores + torch.log(torch.sum(torch.exp(vec - max_scores_broadcast), dim, keepdim=True))\n\ndef crf_mask(vocab, span_type, s_idx, e_idx, pad_idx=None):\n \"\"\"Create a CRF mask.\n\n Returns a Tensor with valid transitions as a 0 and invalid as a 1 for easy use with `masked_fill`\n \"\"\"\n np_mask = crf_m(vocab, span_type, s_idx, e_idx, pad_idx=pad_idx)\n return (torch.from_numpy(np_mask) == 0)\n\nclass CRF(nn.Module):\n\n def __init__(self, n_tags, idxs=None, vocab=None, span_type=None, pad_idx=None):\n \"\"\"Initialize the object.\n\n :param n_tags: int The number of tags in your output (emission size)\n :param idxs: Tuple(int. int) The index of the start and stop symbol\n in emissions.\n :param vocab: The label vocab of the form vocab[string]: int\n :param span_type: The tagging span_type used. `IOB`, `IOB2`, or `IOBES`\n :param pds_idx: The index of the pad symbol in the vocab\n\n Note:\n if idxs is none then the CRF adds these symbols to the emission\n vectors and n_tags is assumed to be the number of output tags.\n\n if idxs is not none then the first element is assumed to be the\n start index and the second idx is assumed to be the end index. In\n this case n_tags is assumed to include the start and end symbols.\n\n if vocab is not None then a transition mask will be created that\n limits illegal transitions.\n \"\"\"\n super(CRF, self).__init__()\n\n if idxs is None:\n self.start_idx = n_tags\n self.end_idx = n_tags + 1\n self.n_tags = n_tags + 2\n self.add_ends = True\n else:\n self.start_idx, self.end_idx = idxs\n self.n_tags = n_tags\n self.add_ends = False\n self.span_type = None\n if vocab is not None:\n assert span_type is not None, \"To mask transitions you need to provide a tagging span_type, choices are `IOB`, `BIO` (or `IOB2`), and `IOBES`\"\n # If there weren't start and end idx provided we need to add them.\n if idxs is None:\n vocab = vocab.copy()\n vocab['<GO>'] = self.start_idx\n vocab['<EOS>'] = self.end_idx\n self.span_type = span_type\n self.register_buffer('mask', crf_mask(vocab, span_type, self.start_idx, self.end_idx, pad_idx))\n else:\n self.mask = None\n\n self.transitions_p = nn.Parameter(torch.Tensor(self.n_tags, self.n_tags).zero_())\n\n @property\n def transitions(self):\n if self.mask is not None:\n return self.transitions_p.masked_fill(self.mask, -1e4)\n return self.transitions_p\n\n def extra_repr(self):\n str_ = \"n_tags=%d\" % self.n_tags\n if self.mask is not None:\n str_ += \", masked=True, span_type=%s\" % self.span_type\n return str_\n\n @staticmethod\n def _prep_input(input_):\n ends = torch.Tensor(input_.size()[0], 2).fill_(-1000.).to(input_.device)\n return torch.cat([input_, ends], dim=1)\n\n def neg_log_loss(self, unary, tags):\n if self.add_ends:\n unary = CRF._prep_input(unary)\n viterbi_score = self.forward(unary)\n gold_score = self.score_sentence(unary, tags)\n return viterbi_score - gold_score\n\n def score_sentence(self, unary, tags):\n \"\"\"\"Get the score of a provided tag sequence.\"\"\"\n # Don't apply the mask each time use use self.transitions, save compute\n transitions = self.transitions\n score = torch.autograd.Variable(torch.Tensor([0]).cuda())\n tags = torch.cat([torch.LongTensor([self.start_idx]).cuda(), tags])\n for i, unary_t in enumerate(unary):\n score = score + transitions[tags[i + 1], tags[i]] + unary_t[tags[i + 1]]\n score = score + transitions[self.end_idx, tags[-1]]\n return score\n\n def forward(self, unary):\n \"\"\"Vectorized forward algorithm for CRF layer\n\n :param unary: The observations\n :param transitions: The transitions\n :param start_idx: The index of the start position\n :param end_idx: The index of the end position\n :return: Alphas\n \"\"\"\n # Do the forward algorithm to compute the partition function\n init_alphas = torch.Tensor(1, self.n_tags).fill_(-1000.).to(unary.device)\n # START_TAG has all of the score.\n init_alphas[0][self.start_idx] = 0.\n\n # Wrap in a variable so that we will get automatic backprop\n alphas = torch.autograd.Variable(init_alphas)\n\n # Don't apply the mask each time use use self.transitions, save compute\n transitions = self.transitions\n\n # Iterate through the sentence\n for t, unary_t in enumerate(unary):\n emit_scores_transpose = unary_t.view(-1, 1)\n next_tag_var = alphas + emit_scores_transpose + transitions\n scores = vec_log_sum_exp(next_tag_var, 1).transpose(0, 1)\n alphas = scores\n\n terminal_var = alphas + transitions[self.end_idx]\n alpha = log_sum_exp(terminal_var)\n return alpha\n\n def decode(self, unary):\n if self.add_ends:\n unary = CRF._prep_input(unary)\n backpointers = []\n # Don't apply the mask each time use use self.transitions, save compute\n transitions = self.transitions\n\n inits = torch.Tensor(1, self.n_tags).fill_(-10000.).cuda()\n inits[0][self.start_idx] = 0\n\n # alphas at step i holds the viterbi variables for step i-1\n alphas = torch.autograd.Variable(inits)\n\n for unary_t in unary:\n next_tag_var = alphas + transitions\n viterbi, best_tag_ids = torch.max(next_tag_var, 1)\n backpointers.append(best_tag_ids.data)\n alphas = (viterbi + unary_t).view(1, -1)\n\n # Transition to STOP_TAG\n terminal_var = alphas + transitions[self.end_idx]\n best_tag_id = argmax(terminal_var)\n path_score = terminal_var[0][best_tag_id]\n\n # Follow the back pointers to decode the best path.\n best_path = [best_tag_id]\n for backpointers_t in reversed(backpointers):\n best_tag_id = backpointers_t[best_tag_id]\n best_path.append(best_tag_id)\n # Pop off the start tag (we dont want to return that to the caller)\n start = best_path.pop()\n assert start == self.start_idx\n best_path.reverse()\n return torch.LongTensor(best_path), path_score\n",
"id": "9467373",
"language": "Python",
"matching_score": 2.5248336791992188,
"max_stars_count": 2,
"path": "python/baseline/pytorch/torchy.py"
},
{
"content": "import os\nimport json\nimport pytest\nimport numpy as np\ntorch = pytest.importorskip('torch')\nfrom torch.optim import SGD\nfrom baseline.w2v import RandomInitVecModel\nfrom baseline.pytorch.tagger.model import create_model\nfrom baseline.pytorch.torchy import CRF, crf_mask\nfrom baseline.utils import crf_mask as np_crf\n\nHSZ = 100\nWSZ = 30\nS = '<GO>'\nE = '<EOS>'\nP = '<PAD>'\nSPAN_TYPE=\"IOB2\"\nLOC = os.path.dirname(os.path.realpath(__file__))\n\n@pytest.fixture\ndef label_vocab():\n LOC = os.path.dirname(os.path.realpath(__file__))\n vocab_loc = os.path.join(LOC, \"test_data\", \"crf_vocab\")\n return json.load(open(vocab_loc))\n\n@pytest.fixture\ndef crf(label_vocab):\n return CRF(\n len(label_vocab),\n (label_vocab[S], label_vocab[E]),\n label_vocab, SPAN_TYPE, label_vocab[P]\n )\n\n@pytest.fixture\ndef embeds():\n embeds = {}\n embeds['word'] = RandomInitVecModel(HSZ, {chr(i): i for i in range(100)})\n embeds['char'] = RandomInitVecModel(WSZ, {chr(i): i for i in range(100)})\n return embeds\n\n@pytest.fixture\ndef model(label_vocab, embeds):\n return create_model(\n label_vocab, embeds,\n crf=True, crf_mask=True, span_type=SPAN_TYPE,\n hsz=HSZ, cfiltsz=[3], wsz=WSZ,\n layers=2, rnntype=\"blstm\"\n )\n\ndef test_mask_is_applied(label_vocab, crf):\n t = crf.transitions.detach().numpy()\n assert t[label_vocab['<GO>'], label_vocab['O']] == -1e4\n\ndef test_mask_skipped(label_vocab):\n crf = CRF(\n len(label_vocab),\n (label_vocab[S], label_vocab[E]),\n )\n t = crf.transitions.detach().numpy()\n assert t[label_vocab['<GO>'], label_vocab['O']] != -1e4\n\ndef test_error_without_type(label_vocab):\n with pytest.raises(AssertionError):\n _ = CRF(\n len(label_vocab),\n (label_vocab[S], label_vocab[E]),\n label_vocab\n )\n\n# Using .cuda() in pytest call is having problems\n# From turning CUDA_VISIBLE_DEVICES off for tensorflow?\n\n# def test_mask_follows_crf_device(crf):\n# assert crf.mask.device == crf.transitions_p.device\n# crf = crf.cuda()\n# assert crf.mask.device == crf.transitions_p.device\n\n# def test_mask_same_after_update(label_vocab, crf):\n# crf = crf.cuda()\n# opt = SGD(crf.parameters(), lr=0.01)\n# m1 = crf.mask.cpu().numpy()\n# t1 = crf.transitions_p.cpu().detach().numpy()\n# gold = torch.LongTensor([3, 9, 9, 4, 6, 7, 5]).cuda()\n# emissions = torch.rand(len(gold), len(label_vocab)).cuda()\n# l = crf.neg_log_loss(emissions, gold)\n# l.backward()\n# opt.step()\n# m2 = crf.mask.cpu().numpy()\n# t2 = crf.transitions_p.cpu().detach().numpy()\n# np.testing.assert_allclose(m1, m2)\n# with pytest.raises(AssertionError):\n# np.testing.assert_allclose(t1, t2)\n\ndef test_mask_used_in_model(label_vocab, model):\n t = model.crf.transitions.detach().numpy()\n assert t[label_vocab['<GO>'], label_vocab['O']] == -1e4\n\ndef test_mask_not_used_in_model(label_vocab, embeds):\n model = create_model(\n label_vocab, embeds,\n crf=True,\n hsz=HSZ, cfiltsz=[3], wsz=WSZ,\n layers=2, rnntype=\"blstm\"\n )\n t = model.crf.transitions.detach().numpy()\n assert t[label_vocab['<GO>'], label_vocab['O']] != -1e4\n\ndef test_error_when_mask_and_no_span(label_vocab, embeds):\n with pytest.raises(AssertionError):\n model = create_model(\n label_vocab, embeds,\n crf=True, crf_mask=True,\n hsz=HSZ, cfiltsz=[3], wsz=WSZ,\n layers=2, rnntype=\"blstm\"\n )\n",
"id": "9716863",
"language": "Python",
"matching_score": 1.2192057371139526,
"max_stars_count": 2,
"path": "python/tests/test_crf_pytorch.py"
},
{
"content": "import tensorflow as tf\nimport numpy as np\nfrom google.protobuf import text_format\nfrom tensorflow.python.platform import gfile\nimport json\nfrom tensorflow.contrib.layers import fully_connected, xavier_initializer\nfrom baseline.utils import fill_y, listify\nfrom baseline.model import Classifier, load_classifier_model, create_classifier_model\nfrom baseline.tf.tfy import lstm_cell_w_dropout, parallel_conv, get_vocab_file_suffixes, pool_chars\nfrom baseline.version import __version__\nimport os\n\n\nclass ClassifyParallelModel(Classifier):\n\n def __init__(self, create_fn, embeddings, labels, **kwargs):\n super(ClassifyParallelModel, self).__init__()\n # We need to remove these because we may be calling back to our caller, and we need\n # the condition of calling to be non-parallel\n gpus = kwargs.pop('gpus', -1)\n # If the gpu ID is set to -1, use CUDA_VISIBLE_DEVICES to figure it out\n if gpus == -1:\n gpus = len(os.getenv('CUDA_VISIBLE_DEVICES', os.getenv('NV_GPU', '0')).split(','))\n print('Num GPUs', gpus)\n\n self.labels = labels\n nc = len(labels)\n\n self.saver = None\n self.replicas = []\n\n self.mxlen = int(kwargs.get('mxlen', 100))\n self.mxwlen = int(kwargs.get('mxwlen', 40))\n\n # This only exists to make exporting easier\n self.pdrop_value = kwargs.get('dropout', 0.5)\n # This only exists to make exporting easier\n self.x = kwargs.get('x', tf.placeholder(tf.int32, [None, self.mxlen], name=\"x_parallel\"))\n self.y = kwargs.get('y', tf.placeholder(tf.int32, [None, nc], name=\"y_parallel\"))\n self.lengths = kwargs.get('lengths', tf.placeholder(tf.int32, [None], name=\"lengths_parallel\"))\n self.pkeep = kwargs.get('pkeep', tf.placeholder_with_default(1.0, shape=(), name=\"pkeep\"))\n self.pdrop_value = kwargs.get('dropout', 0.5)\n\n x_splits = tf.split(self.x, gpus)\n y_splits = tf.split(self.y, gpus)\n lengths_splits = tf.split(self.lengths, gpus)\n xch_splits = None\n c2v = embeddings.get('char')\n if c2v is not None:\n self.xch = kwargs.get('xch', tf.placeholder(tf.int32, [None, self.mxlen, self.mxwlen], name='xch_parallel'))\n xch_splits = tf.split(self.xch, gpus)\n\n losses = []\n self.labels = labels\n\n with tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)) as sess:\n with tf.device(tf.DeviceSpec(device_type=\"CPU\")):\n self.inference = create_fn(embeddings, labels, sess=sess, **kwargs)\n for i in range(gpus):\n with tf.device(tf.DeviceSpec(device_type='GPU', device_index=i)):\n replica = create_fn(embeddings, labels, sess=sess, x=x_splits[i], y=y_splits[i],\n xch=xch_splits[i] if xch_splits is not None else None,\n lengths=lengths_splits[i],\n pkeep=self.pkeep, **kwargs)\n self.replicas.append(replica)\n loss_op = replica.create_loss()\n losses.append(loss_op)\n\n self.loss = tf.reduce_mean(tf.stack(losses))\n\n self.sess = sess\n self.best = self.inference.best\n\n def create_loss(self):\n return self.loss\n\n def create_test_loss(self):\n return self.inference.create_test_loss()\n\n def save(self, model_base):\n return self.inference.save(model_base)\n\n def set_saver(self, saver):\n self.inference.saver = saver\n self.saver = saver\n\n def make_input(self, batch_dict, do_dropout=False):\n if do_dropout is False:\n return self.inference.make_input(batch_dict)\n x = batch_dict['x']\n y = batch_dict.get('y', None)\n xch = batch_dict.get('xch')\n lengths = batch_dict.get('lengths')\n pkeep = 1.0 - self.pdrop_value\n feed_dict = {self.x: x, self.pkeep: pkeep}\n\n if hasattr(self, 'lengths') and self.lengths is not None:\n feed_dict[self.lengths] = lengths\n if hasattr(self, 'xch') and xch is not None and self.xch is not None:\n feed_dict[self.xch] = xch\n\n if y is not None:\n feed_dict[self.y] = fill_y(len(self.labels), y)\n return feed_dict\n\n\nclass WordClassifierBase(Classifier):\n \"\"\"Base for all baseline implementations of word-based classifiers\n \n This class provides a loose skeleton around which the baseline models (currently all word-based)\n are built. This essentially consists of dividing up the network into a logical separation between \"pooling\",\n or the conversion of temporal data to a fixed representation, and \"stacking\" layers, which are (optional)\n fully-connected layers below, finally followed with a penultimate layer that is projected to the output space.\n \n For instance, the baseline convolutional and LSTM models implement pooling as CMOT, and LSTM last time\n respectively, whereas, neural bag-of-words (NBoW) do simple max or mean pooling followed by multiple fully-\n connected layers.\n \n \"\"\"\n def __init__(self):\n \"\"\"Base\n \"\"\"\n super(WordClassifierBase, self).__init__()\n\n def set_saver(self, saver):\n self.saver = saver\n\n def save_values(self, basename):\n self.saver.save(self.sess, basename)\n\n def save_md(self, basename):\n\n path = basename.split('/')\n base = path[-1]\n outdir = '/'.join(path[:-1])\n\n state = {\"mxlen\": self.mxlen, \"version\": __version__, 'use_chars': False}\n if self.mxwlen is not None:\n state[\"mxwlen\"] = self.mxwlen\n if self.xch is not None:\n state['use_chars'] = True\n with open(basename + '.state', 'w') as f:\n json.dump(state, f)\n\n #tf.train.export_meta_graph(filename=os.path.join(outdir, base + '.meta'),\n # as_text=True)\n #sub_graph = remove_parallel_nodes(self.sess.graph_def)\n tf.train.write_graph(self.sess.graph_def, outdir, base + '.graph', as_text=False)\n #tf.train.write_graph(sub_graph, outdir, base + '.graph', as_text=False)\n with open(basename + '.saver', 'w') as f:\n f.write(str(self.saver.as_saver_def()))\n\n with open(basename + '.labels', 'w') as f:\n json.dump(self.labels, f)\n\n for key in self.vocab.keys():\n with open(basename + '-{}.vocab'.format(key), 'w') as f:\n json.dump(self.vocab[key], f)\n\n def load_md(self, basename):\n\n state_file = basename + '.state'\n # Backwards compat for now\n if not os.path.exists(state_file):\n return\n\n with open(state_file, 'r') as f:\n state = json.load(f)\n self.mxlen = state.get('mxlen')\n self.mxwlen = state.get('mxwlen')\n\n def save(self, basename):\n self.save_md(basename)\n self.save_values(basename)\n\n def create_test_loss(self):\n with tf.name_scope(\"test_loss\"):\n loss = tf.nn.softmax_cross_entropy_with_logits(logits=self.logits, labels=tf.cast(self.y, \"float\"))\n all_loss = tf.reduce_mean(loss)\n return all_loss\n\n def create_loss(self):\n \"\"\"The loss function is currently provided here, although this is not a great place for it\n as it provides a coupling between the model and its loss function. Just here for convenience at the moment.\n \n :return: \n \"\"\"\n with tf.name_scope(\"loss\"):\n loss = tf.nn.softmax_cross_entropy_with_logits(logits=self.logits, labels=tf.cast(self.y, \"float\"))\n all_loss = tf.reduce_mean(loss)\n return all_loss\n\n def classify(self, batch_dict):\n \"\"\"This method provides a basic routine to run \"inference\" or predict outputs based on data.\n It runs the `x` tensor in (`BxT`), and turns dropout off, running the network all the way to a softmax\n output\n \n :param batch_dict: (``dict``) contains `x` tensor of input (`BxT`)\n :return: Each outcome as a ``list`` of tuples `(label, probability)`\n \"\"\"\n feed_dict = self.make_input(batch_dict)\n probs = self.sess.run(tf.nn.softmax(self.logits), feed_dict=feed_dict)\n results = []\n batchsz = probs.shape[0]\n for b in range(batchsz):\n outcomes = [(self.labels[id_i], prob_i) for id_i, prob_i in enumerate(probs[b])]\n results.append(outcomes)\n return results\n\n def make_input(self, batch_dict, do_dropout=False):\n x = batch_dict['x']\n y = batch_dict.get('y', None)\n xch = batch_dict.get('xch')\n lengths = batch_dict.get('lengths')\n pkeep = 1.0 - self.pdrop_value if do_dropout else 1.0\n feed_dict = {self.x: x, self.pkeep: pkeep}\n\n if hasattr(self, 'lengths') and self.lengths is not None:\n feed_dict[self.lengths] = lengths\n if hasattr(self, 'xch') and xch is not None and self.xch is not None:\n feed_dict[self.xch] = xch\n\n if y is not None:\n feed_dict[self.y] = fill_y(len(self.labels), y)\n return feed_dict\n\n def get_labels(self):\n \"\"\"Get the string labels back\n \n :return: labels\n \"\"\"\n return self.labels\n\n def get_vocab(self, name='word'):\n \"\"\"Get the vocab back, as a ``dict`` of ``str`` keys mapped to ``int`` values\n \n :return: A ``dict`` of words mapped to indices\n \"\"\"\n return self.vocab.get(name)\n\n @classmethod\n def load(cls, basename, **kwargs):\n \"\"\"Reload the model from a graph file and a checkpoint\n \n The model that is loaded is independent of the pooling and stacking layers, making this class reusable\n by sub-classes.\n \n :param basename: The base directory to load from\n :param kwargs: See below\n \n :Keyword Arguments:\n * *session* -- An optional tensorflow session. If not passed, a new session is\n created\n \n :return: A restored model\n \"\"\"\n sess = kwargs.get('session', kwargs.get('sess', tf.Session()))\n model = cls()\n with open(basename + '.saver') as fsv:\n saver_def = tf.train.SaverDef()\n text_format.Merge(fsv.read(), saver_def)\n\n checkpoint_name = kwargs.get('checkpoint_name', basename)\n checkpoint_name = checkpoint_name or basename\n\n with gfile.FastGFile(basename + '.graph', 'rb') as f:\n gd = tf.GraphDef()\n gd.ParseFromString(f.read())\n sess.graph.as_default()\n tf.import_graph_def(gd, name='')\n try:\n sess.run(saver_def.restore_op_name, {saver_def.filename_tensor_name: checkpoint_name})\n except:\n # Backwards compat\n sess.run(saver_def.restore_op_name, {saver_def.filename_tensor_name: checkpoint_name + \".model\"})\n\n model.x = tf.get_default_graph().get_tensor_by_name('x:0')\n model.y = tf.get_default_graph().get_tensor_by_name('y:0')\n try:\n model.xch = tf.get_default_graph().get_tensor_by_name('xch:0')\n except:\n model.xch = None\n try:\n model.lengths = tf.get_default_graph().get_tensor_by_name('lengths:0')\n except:\n model.lengths = None\n model.pkeep = tf.get_default_graph().get_tensor_by_name('pkeep:0')\n model.best = tf.get_default_graph().get_tensor_by_name('output/best:0')\n model.logits = tf.get_default_graph().get_tensor_by_name('output/logits:0')\n with open(basename + '.labels', 'r') as f:\n model.labels = json.load(f)\n\n model.vocab = {}\n\n # Backwards compat\n if os.path.exists(basename + '.vocab'):\n with open(basename + '.vocab', 'r') as f:\n model.vocab['word'] = json.load(f)\n # Grep for all features\n else:\n vocab_suffixes = get_vocab_file_suffixes(basename)\n for ty in vocab_suffixes:\n vocab_file = '{}-{}.vocab'.format(basename, ty)\n print('Reading {}'.format(vocab_file))\n with open(vocab_file, 'r') as f:\n model.vocab[ty] = json.load(f)\n\n model.sess = sess\n model.load_md(basename)\n return model\n\n @classmethod\n def create(cls, embeddings, labels, **kwargs):\n \"\"\"The main method for creating all :class:`WordBasedModel` types.\n \n This method instantiates a model with pooling and optional stacking layers.\n Many of the arguments provided are reused by each implementation, but some sub-classes need more\n information in order to properly initialize. For this reason, the full list of keyword args are passed\n to the :method:`pool` and :method:`stacked` methods.\n \n :param embeddings: This is a dictionary of embeddings, mapped to their numerical indices in the lookup table\n :param labels: This is a list of the `str` labels\n :param kwargs: See below\n \n :Keyword Arguments:\n * *model_type* -- The string name for the model (defaults to `default`)\n * *session* -- An optional tensorflow session. If not passed, a new session is\n created\n * *finetune* -- Are we doing fine-tuning of word embeddings (defaults to `True`)\n * *mxlen* -- The maximum signal (`x` tensor temporal) length (defaults to `100`)\n * *dropout* -- This indicates how much dropout should be applied to the model when training.\n * *pkeep* -- By default, this is a `tf.placeholder`, but it can be passed in as part of a sub-graph.\n This is useful for exporting tensorflow models or potentially for using input tf queues\n * *x* -- By default, this is a `tf.placeholder`, but it can be optionally passed as part of a sub-graph.\n * *y* -- By default, this is a `tf.placeholder`, but it can be optionally passed as part of a sub-graph.\n * *filtsz* -- This is actually a top-level param due to an unfortunate coupling between the pooling layer\n and the input, which, for convolution, requires input padding.\n \n :return: A fully-initialized tensorflow classifier \n \"\"\"\n\n gpus = kwargs.get('gpus')\n # If we are parallelized, we will use the wrapper object ClassifyParallelModel and this creation function\n if gpus is not None:\n return ClassifyParallelModel(cls.create, embeddings, labels, **kwargs)\n sess = kwargs.get('sess', tf.Session())\n finetune = bool(kwargs.get('finetune', True))\n w2v = embeddings['word']\n c2v = embeddings.get('char')\n\n model = cls()\n word_dsz = w2v.dsz\n wchsz = 0\n model.labels = labels\n nc = len(labels)\n\n model.vocab = {}\n for k in embeddings.keys():\n model.vocab[k] = embeddings[k].vocab\n\n model.mxlen = int(kwargs.get('mxlen', 100))\n model.mxwlen = None\n # This only exists to make exporting easier\n model.pkeep = kwargs.get('pkeep', tf.placeholder_with_default(1.0, shape=(), name=\"pkeep\"))\n model.pdrop_value = kwargs.get('dropout', 0.5)\n # This only exists to make exporting easier\n model.x = kwargs.get('x', tf.placeholder(tf.int32, [None, model.mxlen], name=\"x\"))\n model.y = kwargs.get('y', tf.placeholder(tf.int32, [None, nc], name=\"y\"))\n model.lengths = kwargs.get('lengths', tf.placeholder(tf.int32, [None], name=\"lengths\"))\n model.xch = None\n\n with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE):\n\n seed = np.random.randint(10e8)\n init = tf.random_uniform_initializer(-0.05, 0.05, dtype=tf.float32, seed=seed)\n xavier_init = xavier_initializer(True, seed)\n\n # Use pre-trained embeddings from word2vec\n with tf.name_scope(\"LUT\"):\n W = tf.get_variable(\"W\",\n initializer=tf.constant_initializer(w2v.weights, dtype=tf.float32,\n verify_shape=True),\n shape=[len(w2v.vocab), w2v.dsz], trainable=finetune)\n e0 = tf.scatter_update(W, tf.constant(0, dtype=tf.int32, shape=[1]), tf.zeros(shape=[1, word_dsz]))\n with tf.control_dependencies([e0]):\n word_embeddings = tf.nn.embedding_lookup(W, model.x)\n\n if c2v is not None:\n model.mxwlen = int(kwargs.get('mxwlen', 40))\n model.xch = kwargs.get('xch', tf.placeholder(tf.int32, [None, model.mxlen, model.mxwlen], name='xch'))\n char_dsz = c2v.dsz\n with tf.name_scope(\"CharLUT\"):\n Wch = tf.get_variable(\"Wch\",\n initializer=tf.constant_initializer(c2v.weights, dtype=tf.float32,\n verify_shape=True),\n shape=[len(c2v.vocab), c2v.dsz], trainable=True)\n ech0 = tf.scatter_update(Wch, tf.constant(0, dtype=tf.int32, shape=[1]), tf.zeros(shape=[1, char_dsz]))\n char_comp, wchsz = pool_chars(model.xch, Wch, ech0, char_dsz, **kwargs)\n word_embeddings = tf.concat(values=[word_embeddings, char_comp], axis=2)\n\n input_sz = word_dsz + wchsz\n pooled = model.pool(word_embeddings, input_sz, init, **kwargs)\n stacked = model.stacked(pooled, init, **kwargs)\n\n # For fully connected layers, use xavier (glorot) transform\n with tf.contrib.slim.arg_scope(\n [fully_connected],\n weights_initializer=xavier_init):\n with tf.name_scope(\"output\"):\n model.logits = tf.identity(fully_connected(stacked, nc, activation_fn=None), name=\"logits\")\n model.best = tf.argmax(model.logits, 1, name=\"best\")\n model.sess = sess\n # writer = tf.summary.FileWriter('blah', sess.graph)\n return model\n\n def pool(self, word_embeddings, dsz, init, **kwargs):\n \"\"\"This method performs a transformation between a temporal signal and a fixed representation\n \n :param word_embeddings: The output of the embedded lookup, which is the starting point for this operation\n :param dsz: The depth of the embeddings\n :param init: The tensorflow initializer to use for these methods\n :param kwargs: Model-specific arguments\n :return: A fixed representation of the data\n \"\"\"\n pass\n\n def stacked(self, pooled, init, **kwargs):\n \"\"\"Stack 1 or more hidden layers, optionally (forming an MLP)\n\n :param pooled: The fixed representation of the model\n :param init: The tensorflow initializer\n :param kwargs: See below\n\n :Keyword Arguments:\n * *hsz* -- (``int``) The number of hidden units (defaults to `100`)\n\n :return: The final layer\n \"\"\"\n\n hszs = listify(kwargs.get('hsz', []))\n if len(hszs) == 0:\n return pooled\n\n in_layer = pooled\n for i, hsz in enumerate(hszs):\n with tf.variable_scope('fc-{}'.format(i)):\n with tf.contrib.slim.arg_scope(\n [fully_connected],\n weights_initializer=init):\n fc = fully_connected(in_layer, hsz, activation_fn=tf.nn.relu)\n in_layer = tf.nn.dropout(fc, self.pkeep)\n return in_layer\n\n\nclass ConvModel(WordClassifierBase):\n \"\"\"Current default model for `baseline` classification. Parallel convolutions of varying receptive field width\n \n \"\"\"\n def __init__(self):\n \"\"\"Constructor \n \"\"\"\n super(ConvModel, self).__init__()\n\n def pool(self, word_embeddings, dsz, init, **kwargs):\n \"\"\"Do parallel convolutional filtering with varied receptive field widths, followed by max-over-time pooling\n \n :param word_embeddings: The word embeddings, which are inputs here\n :param dsz: The depth of the word embeddings\n :param init: The tensorflow initializer\n :param kwargs: See below\n \n :Keyword Arguments:\n * *cmotsz* -- (``int``) The number of convolutional feature maps for each filter\n These are MOT-filtered, leaving this # of units per parallel filter\n * *filtsz* -- (``list``) This is a list of filter widths to use\n \n \n :return: \n \"\"\"\n cmotsz = kwargs['cmotsz']\n filtsz = kwargs['filtsz']\n\n combine, _ = parallel_conv(word_embeddings, filtsz, dsz, cmotsz)\n # Definitely drop out\n with tf.name_scope(\"dropout\"):\n combine = tf.nn.dropout(combine, self.pkeep)\n return combine\n\n\nclass LSTMModel(WordClassifierBase):\n \"\"\"A simple single-directional single-layer LSTM. No layer-stacking.\n \n \"\"\"\n def __init__(self):\n super(LSTMModel, self).__init__()\n\n def pool(self, word_embeddings, dsz, init, **kwargs):\n \"\"\"LSTM with dropout yielding a final-state as output\n \n :param word_embeddings: The input word embeddings\n :param dsz: The input word embedding depth\n :param init: The tensorflow initializer to use (currently ignored)\n :param kwargs: See below\n \n :Keyword Arguments:\n * *hsz* -- (``int``) The number of hidden units (defaults to `100`)\n * *cmotsz* -- (``int``) An alias for `hsz`\n \n :return: \n \"\"\"\n hsz = kwargs.get('rnnsz', kwargs.get('hsz', 100))\n if type(hsz) is list:\n hsz = hsz[0]\n char_rnnfwd = lstm_cell_w_dropout(hsz, self.pkeep)\n rnnout, final_state = tf.nn.dynamic_rnn(char_rnnfwd, word_embeddings, dtype=tf.float32, sequence_length=self.lengths)\n\n output_state = final_state.h\n combine = tf.reshape(output_state, [-1, hsz])\n return combine\n\n\nclass NBowBase(WordClassifierBase):\n \"\"\"Neural Bag-of-Words Model base class. Defines stacking of fully-connected layers, but leaves pooling to derived\n \"\"\"\n def __init__(self):\n super(NBowBase, self).__init__()\n\n def stacked(self, pooled, init, **kwargs):\n \"\"\"Force at least one hidden layer here\n\n :param pooled:\n :param init:\n :param kwargs:\n :return:\n \"\"\"\n kwargs['hsz'] = kwargs.get('hsz', [100])\n return super(NBowBase, self).stacked()\n\n\nclass NBowModel(NBowBase):\n \"\"\"Neural Bag-of-Words average pooling (standard) model\"\"\"\n def __init__(self):\n super(NBowModel, self).__init__()\n\n def pool(self, word_embeddings, dsz, init, **kwargs):\n \"\"\"Do average pooling on input embeddings, yielding a `dsz` output layer\n \n :param word_embeddings: The word embedding input\n :param dsz: The word embedding depth\n :param init: The tensorflow initializer\n :param kwargs: None\n :return: The average pooling representation\n \"\"\"\n return tf.reduce_mean(word_embeddings, 1, keep_dims=False)\n\n\nclass NBowMaxModel(NBowBase):\n \"\"\"Max-pooling model for Neural Bag-of-Words. Sometimes does better than avg pooling\n \"\"\"\n def __init__(self):\n super(NBowMaxModel, self).__init__()\n\n def pool(self, word_embeddings, dsz, init, **kwargs):\n \"\"\"Do max pooling on input embeddings, yielding a `dsz` output layer\n \n :param word_embeddings: The word embedding input\n :param dsz: The word embedding depth\n :param init: The tensorflow initializer\n :param kwargs: None\n :return: The max pooling representation\n \"\"\"\n return tf.reduce_max(word_embeddings, 1, keep_dims=False)\n\nBASELINE_CLASSIFICATION_MODELS = {\n 'default': ConvModel.create,\n 'lstm': LSTMModel.create,\n 'nbow': NBowModel.create,\n 'nbowmax': NBowMaxModel.create\n}\nBASELINE_CLASSIFICATION_LOADERS = {\n 'default': ConvModel.load,\n 'lstm': LSTMModel.load,\n 'nbow': NBowModel.create,\n 'nbowmax': NBowMaxModel.create\n}\n\n\ndef create_model(embeddings, labels, **kwargs):\n return create_classifier_model(BASELINE_CLASSIFICATION_MODELS, embeddings, labels, **kwargs)\n\n\ndef load_model(outname, **kwargs):\n return load_classifier_model(BASELINE_CLASSIFICATION_LOADERS, outname, **kwargs)\n",
"id": "7273794",
"language": "Python",
"matching_score": 6.175027370452881,
"max_stars_count": 2,
"path": "python/baseline/tf/classify/model.py"
},
{
"content": "from baseline.tf.tfy import *\nfrom baseline.model import create_lang_model\nimport json\n\n\nclass AbstractLanguageModel(object):\n\n def __init__(self):\n self.layers = None\n self.hsz = None\n self.rnntype = 'lstm'\n self.pkeep = None\n self.saver = None\n\n def save_using(self, saver):\n self.saver = saver\n\n def _rnnlm(self, inputs, vsz):\n\n #rnnfwd = stacked_lstm(self.hsz, self.pkeep, self.layers)\n def cell():\n return lstm_cell_w_dropout(self.hsz, self.pkeep)\n rnnfwd = tf.contrib.rnn.MultiRNNCell([cell() for _ in range(self.layers)], state_is_tuple=True)\n\n self.initial_state = rnnfwd.zero_state(self.batchsz, tf.float32)\n rnnout, state = tf.nn.dynamic_rnn(rnnfwd, inputs, initial_state=self.initial_state, dtype=tf.float32)\n output = tf.reshape(tf.concat(rnnout, 1), [-1, self.hsz])\n\n softmax_w = tf.get_variable(\n \"softmax_w\", [self.hsz, vsz], dtype=tf.float32)\n softmax_b = tf.get_variable(\"softmax_b\", [vsz], dtype=tf.float32)\n\n self.logits = tf.nn.xw_plus_b(output, softmax_w, softmax_b, name=\"logits\")\n self.final_state = state\n\n\n def save_values(self, basename):\n self.saver.save(self.sess, basename)\n\n def save(self, basename):\n self.save_md(basename)\n self.save_values(basename)\n\n def create_loss(self):\n with tf.variable_scope(\"Loss\"):\n targets = tf.reshape(self.y, [-1])\n loss = tf.contrib.legacy_seq2seq.sequence_loss_by_example(\n [self.logits],\n [targets],\n [tf.ones([tf.size(targets)], dtype=tf.float32)])\n loss = tf.reduce_sum(loss) / self.batchsz\n return loss\n\n def get_vocab(self, vocab_type='word'):\n pass\n\n\nclass WordLanguageModel(AbstractLanguageModel):\n\n def __init__(self):\n AbstractLanguageModel.__init__(self)\n\n def make_input(self, batch_dict, do_dropout=False):\n x = batch_dict['x']\n y = batch_dict['y']\n pkeep = 1.0 - self.pdrop_value if do_dropout else 1.0\n feed_dict = {self.x: x, self.y: y, self.pkeep: pkeep}\n return feed_dict\n\n @classmethod\n def create(cls, embeddings, **kwargs):\n\n lm = cls()\n word_vec = embeddings['word']\n\n lm.batchsz = kwargs['batchsz']\n lm.mxlen = kwargs.get('mxlen', kwargs['nbptt'])\n lm.maxw = kwargs['maxw']\n lm.sess = kwargs.get('sess', tf.Session())\n lm.x = kwargs.get('x', tf.placeholder(tf.int32, [None, lm.mxlen], name=\"x\"))\n lm.y = kwargs.get('y', tf.placeholder(tf.int32, [None, lm.mxlen], name=\"y\"))\n lm.rnntype = kwargs.get('rnntype', 'lstm')\n lm.pkeep = kwargs.get('pkeep', tf.placeholder(tf.float32, name=\"pkeep\"))\n pdrop = kwargs.get('pdrop', 0.5)\n lm.pdrop_value = pdrop\n lm.hsz = kwargs['hsz']\n lm.word_vocab = word_vec.vocab\n vsz = word_vec.vsz + 1\n\n with tf.name_scope(\"WordLUT\"):\n Ww = tf.Variable(tf.constant(word_vec.weights, dtype=tf.float32), name=\"W\")\n we0 = tf.scatter_update(Ww, tf.constant(0, dtype=tf.int32, shape=[1]), tf.zeros(shape=[1, word_vec.dsz]))\n with tf.control_dependencies([we0]):\n wembed = tf.nn.embedding_lookup(Ww, lm.x, name=\"embeddings\")\n\n inputs = tf.nn.dropout(wembed, lm.pkeep)\n ##inputs = tf.unstack(inputs, num=lm.mxlen, axis=1)\n lm.layers = kwargs.get('layers', kwargs.get('nlayers', 1))\n lm._rnnlm(inputs, vsz)\n return lm\n\n def get_vocab(self, vocab_type='word'):\n if vocab_type == 'word':\n return self.word_vocab\n return None\n\n def save_md(self, basename):\n\n path = basename.split('/')\n base = path[-1]\n outdir = '/'.join(path[:-1])\n\n state = {\"mxlen\": self.mxlen, \"maxw\": self.maxw,\n 'hsz': self.hsz, 'batchsz': self.batchsz,\n 'layers': self.layers}\n with open(basename + '.state', 'w') as f:\n json.dump(state, f)\n\n tf.train.write_graph(self.sess.graph_def, outdir, base + '.graph', as_text=False)\n with open(basename + '.saver', 'w') as f:\n f.write(str(self.saver.as_saver_def()))\n\n if len(self.word_vocab) > 0:\n with open(basename + '-word.vocab', 'w') as f:\n json.dump(self.word_vocab, f)\n\n\nclass CharCompLanguageModel(AbstractLanguageModel):\n\n def __init__(self):\n AbstractLanguageModel.__init__(self)\n\n def make_input(self, batch_dict, do_dropout=False):\n x = batch_dict['x']\n xch = batch_dict['xch']\n y = batch_dict['y']\n\n pkeep = 1.0 - self.pdrop_value if do_dropout else 1.0\n feed_dict = {self.x: x, self.xch: xch, self.y: y, self.pkeep: pkeep}\n return feed_dict\n\n @classmethod\n def create(cls, embeddings, **kwargs):\n\n lm = cls()\n word_vec = embeddings['word']\n char_vec = embeddings['char']\n lm.batchsz = kwargs['batchsz']\n kwargs['mxlen'] = kwargs.get('mxlen', kwargs['nbptt'])\n lm.mxlen = kwargs['mxlen']\n lm.maxw = kwargs['maxw']\n lm.sess = kwargs.get('sess', tf.Session())\n lm.x = kwargs.get('x', tf.placeholder(tf.int32, [None, lm.mxlen], name=\"x\"))\n lm.xch = kwargs.get('xch', tf.placeholder(tf.int32, [None, lm.mxlen, lm.maxw], name=\"xch\"))\n lm.y = kwargs.get('y', tf.placeholder(tf.int32, [None, lm.mxlen], name=\"y\"))\n lm.pkeep = kwargs.get('pkeep', tf.placeholder(tf.float32, name=\"pkeep\"))\n lm.rnntype = kwargs.get('rnntype', 'lstm')\n vsz = word_vec.vsz + 1\n lm.char_vocab = char_vec.vocab\n lm.word_vocab = word_vec.vocab\n lm.pdrop_value = kwargs.get('pdrop', 0.5)\n lm.layers = kwargs.get('layers', kwargs.get('nlayers', 1))\n char_dsz = char_vec.dsz\n with tf.name_scope(\"CharLUT\"):\n Wch = tf.Variable(tf.constant(char_vec.weights, dtype=tf.float32), name=\"Wch\", trainable=True)\n ech0 = tf.scatter_update(Wch, tf.constant(0, dtype=tf.int32, shape=[1]), tf.zeros(shape=[1, char_dsz]))\n word_char, wchsz = pool_chars(lm.xch, Wch, ech0, char_dsz, **kwargs)\n\n lm.use_words = kwargs.get('use_words', False)\n if lm.use_words:\n with tf.name_scope(\"WordLUT\"):\n Ww = tf.Variable(tf.constant(word_vec.weights, dtype=tf.float32), name=\"W\")\n we0 = tf.scatter_update(Ww, tf.constant(0, dtype=tf.int32, shape=[1]), tf.zeros(shape=[1, word_vec.dsz]))\n with tf.control_dependencies([we0]):\n wembed = tf.nn.embedding_lookup(Ww, lm.x, name=\"embeddings\")\n word_char = tf.concat(values=[wembed, word_char], axis=2)\n\n inputs = tf.nn.dropout(word_char, lm.pkeep)\n #inputs = tf.unstack(inputs, num=lm.mxlen, axis=1)\n lm.hsz = kwargs['hsz']\n lm._rnnlm(inputs, vsz)\n return lm\n\n def get_vocab(self, vocab_type='word'):\n if vocab_type == 'char':\n return self.char_vocab\n return None\n\n def save_values(self, basename):\n self.saver.save(self.sess, basename)\n\n def save_md(self, basename):\n\n path = basename.split('/')\n base = path[-1]\n outdir = '/'.join(path[:-1])\n\n state = {\"mxlen\": self.mxlen, \"maxw\": self.maxw, 'use_words': self.use_words,\n 'layers': self.layers}\n with open(basename + '.state', 'w') as f:\n json.dump(state, f)\n\n tf.train.write_graph(self.sess.graph_def, outdir, base + '.graph', as_text=False)\n with open(basename + '.saver', 'w') as f:\n f.write(str(self.saver.as_saver_def()))\n\n if len(self.word_vocab) > 0:\n with open(basename + '-word.vocab', 'w') as f:\n json.dump(self.word_vocab, f)\n\n with open(basename + '-char.vocab', 'w') as f:\n json.dump(self.char_vocab, f)\n\n\nBASELINE_LM_MODELS = {\n 'default': WordLanguageModel.create,\n 'convchar': CharCompLanguageModel.create\n}\n\n# TODO:\n# BASELINE_LM_LOADERS = {\n# 'default': WordLanguageModel.load,\n# 'convchar': CharCompLanguageModel.load\n# }\n\n\n# TODO: move the scoping and weight initialization into the model itself\ndef create_model(embeddings, **kwargs):\n unif = kwargs['unif']\n\n if 'sess' not in kwargs:\n kwargs['sess'] = tf.Session()\n\n weight_initializer = tf.random_uniform_initializer(-unif, unif)\n with tf.variable_scope('Model', initializer=weight_initializer):\n lm = create_lang_model(BASELINE_LM_MODELS, embeddings, **kwargs)\n return lm\n",
"id": "2961597",
"language": "Python",
"matching_score": 0.27401003241539,
"max_stars_count": 2,
"path": "python/baseline/tf/lm/model.py"
},
{
"content": "import baseline\nimport json\nimport numpy as np\nimport logging\nimport logging.config\nimport mead.utils\nimport os\nfrom mead.downloader import EmbeddingDownloader, DataDownloader\nfrom mead.mime_type import mime_type\nfrom baseline.utils import export, read_config_file, read_json, write_json\n\n__all__ = []\nexporter = export(__all__)\n\n@exporter\nclass Task(object):\n TASK_REGISTRY = {}\n\n def __init__(self, logger_file, mead_config):\n super(Task, self).__init__()\n self.config_params = None\n self.ExporterType = None\n self.mead_config = mead_config\n if os.path.exists(mead_config):\n mead_settings = read_json(mead_config)\n else:\n mead_settings = {}\n if 'datacache' not in mead_settings:\n self.data_download_cache = os.path.expanduser(\"~/.bl-data\")\n mead_settings['datacache'] = self.data_download_cache\n write_json(mead_settings, mead_config)\n else:\n self.data_download_cache = os.path.expanduser(mead_settings['datacache'])\n print(\"using {} as data/embeddings cache\".format(self.data_download_cache))\n self._configure_logger(logger_file)\n\n def _configure_logger(self, logger_file):\n \"\"\"Use the logger file (logging.json) to configure the log, but overwrite the filename to include the PID\n\n :param logger_file: The logging configuration JSON file\n :return: A dictionary config derived from the logger_file, with the reporting handler suffixed with PID\n \"\"\"\n with open(logger_file) as f:\n config = json.load(f)\n config['handlers']['reporting_file_handler']['filename'] = 'reporting-{}.log'.format(os.getpid())\n logging.config.dictConfig(config)\n\n @staticmethod\n def get_task_specific(task, logging_config, mead_config):\n \"\"\"Get the task from the task registry associated with the name\n\n :param task: The task name\n :param logging_config: The configuration to read from\n :return:\n \"\"\"\n config = Task.TASK_REGISTRY[task](logging_config, mead_config)\n return config\n\n def read_config(self, config_params, datasets_index):\n \"\"\"\n Read the config file and the datasets index\n\n Between the config file and the dataset index, we have enough information\n to configure the backend and the models. We can also initialize the data readers\n\n :param config_file: The config file\n :param datasets_index: The index of datasets\n :return:\n \"\"\"\n datasets_set = mead.utils.index_by_label(datasets_index)\n self.config_params = config_params\n self._setup_task()\n self._configure_reporting()\n self.dataset = datasets_set[self.config_params['dataset']]\n self.reader = self._create_task_specific_reader()\n\n def initialize(self, embeddings_index):\n \"\"\"\n Load the vocabulary using the readers and then load any embeddings required\n\n :param embeddings_index: The index of embeddings\n :return:\n \"\"\"\n pass\n\n def _create_task_specific_reader(self):\n \"\"\"\n Create a task specific reader, based on the config\n :return:\n \"\"\"\n pass\n\n def _setup_task(self):\n \"\"\"\n This (pure) method provides the task-specific setup\n :return:\n \"\"\"\n pass\n\n def _load_dataset(self):\n pass\n\n def _create_model(self):\n pass\n\n def train(self):\n \"\"\"\n Do training\n :return:\n \"\"\"\n self._load_dataset()\n model = self._create_model()\n self.task.fit(model, self.train_data, self.valid_data, self.test_data, **self.config_params['train'])\n return model\n\n def _configure_reporting(self):\n reporting = {\n \"logging\": True,\n \"visdom\": self.config_params.get('visdom', False),\n \"tensorboard\": self.config_params.get('tensorboard', False)\n }\n reporting = baseline.setup_reporting(**reporting)\n self.config_params['train']['reporting'] = reporting\n logging.basicConfig(level=logging.DEBUG)\n\n @staticmethod\n def _create_embeddings_from_file(embed_file, embed_dsz, embed_sha1, data_download_cache, vocab, unif, keep_unused):\n embed_file = EmbeddingDownloader(embed_file, embed_dsz, embed_sha1, data_download_cache).download()\n EmbeddingT = baseline.GloVeModel if mime_type(embed_file) == 'text/plain' else baseline.Word2VecModel\n return EmbeddingT(embed_file, vocab, unif_weight=unif, keep_unused=keep_unused)\n\n def _create_embeddings(self, embeddings_set, vocabs):\n\n unif = self.config_params['unif']\n keep_unused = self.config_params.get('keep_unused', False)\n\n if 'word' in vocabs:\n embeddings_section = self.config_params['word_embeddings']\n embed_label = embeddings_section.get('label', None)\n\n embeddings = dict()\n if embed_label is not None:\n embed_file = embeddings_set[embed_label]['file']\n embed_dsz = embeddings_set[embed_label]['dsz']\n embed_sha1 = embeddings_set[embed_label].get('sha1',None)\n embeddings['word'] = Task._create_embeddings_from_file(embed_file, embed_dsz, embed_sha1,\n self.data_download_cache, vocabs['word'],\n unif=unif, keep_unused=keep_unused)\n else:\n dsz = embeddings_section['dsz']\n embeddings['word'] = baseline.RandomInitVecModel(dsz, vocabs['word'], unif_weight=unif)\n\n if 'char' in vocabs:\n if self.config_params.get('charsz', -1) > 0:\n embeddings['char'] = baseline.RandomInitVecModel(self.config_params['charsz'], vocabs['char'], unif_weight=unif)\n\n extended_embed_info = self.config_params.get('extended_embed_info', {})\n for key, vocab in vocabs.items():\n if key in extended_embed_info:\n print('Adding extended feature embeddings {}'.format(key))\n ext_embed = None if extended_embed_info[key].get(\"embedding\", None) is None \\\n else extended_embed_info[key][\"embedding\"]\n ext_emb_dsz = extended_embed_info[key].get(\"dsz\", None)\n if ext_embed is not None:\n EmbeddingT = baseline.GloVeModel if ext_embed.endswith('.txt') else baseline.Word2VecModel\n print(\"using {} to read external embedding file {}\".format(EmbeddingT, ext_embed))\n embeddings[key] = EmbeddingT(ext_embed, known_vocab=vocab, unif_weight=unif, keep_unused=False)\n else:\n print(\"randomly initializing external feature with dimension {}\".format(ext_emb_dsz))\n embeddings[key] = baseline.RandomInitVecModel(ext_emb_dsz, vocab, unif_weight=unif)\n elif key not in ['word', 'char']:\n raise Exception(\"Error: must specify a field '{}' in 'extended_embed_sz' dictionary for embedding dim size\".format(key))\n\n out_vocabs = {}\n for key, value in embeddings.items():\n out_vocabs[key] = value.vocab\n return embeddings, out_vocabs\n\n @staticmethod\n def _log2json(log):\n s = []\n with open(log) as f:\n for line in f:\n x = line.replace(\"'\", '\"')\n s.append(json.loads(x))\n return s\n\n def create_exporter(self):\n return self.ExporterType(self)\n\n\n@exporter\nclass ClassifierTask(Task):\n\n def __init__(self, logging_file, mead_config, **kwargs):\n super(ClassifierTask, self).__init__(logging_file, mead_config, **kwargs)\n self.task = None\n\n def _create_task_specific_reader(self):\n return baseline.create_pred_reader(self.config_params['preproc']['mxlen'],\n zeropadding=0,\n clean_fn=self.config_params['preproc']['clean_fn'],\n vec_alloc=self.config_params['preproc']['vec_alloc'],\n src_vec_trans=self.config_params['preproc']['src_vec_trans'],\n mxwlen=self.config_params['preproc'].get('mxwlen', -1),\n trim=self.config_params['preproc'].get('trim', False),\n **self.config_params['loader'])\n\n def _setup_task(self):\n backend = self.config_params.get('backend', 'tensorflow')\n if backend == 'pytorch':\n print('PyTorch backend')\n from baseline.pytorch import long_0_tensor_alloc\n from baseline.pytorch import tensor_reverse_2nd as rev2nd\n import baseline.pytorch.classify as classify\n self.config_params['preproc']['vec_alloc'] = long_0_tensor_alloc\n\n else:\n self.config_params['preproc']['vec_alloc'] = np.zeros\n\n if backend == 'keras':\n print('Keras backend')\n import baseline.keras.classify as classify\n if backend == 'dynet':\n print('Dynet backend')\n import _dynet\n dy_params = _dynet.DynetParams()\n dy_params.from_args()\n dy_params.set_requested_gpus(1)\n if 'autobatchsz' in self.config_params['train']:\n self.config_params['model']['batched'] = False\n dy_params.set_autobatch(True)\n dy_params.init()\n import baseline.dy.classify as classify\n from baseline.data import reverse_2nd as rev2nd\n else:\n print('TensorFlow backend')\n import baseline.tf.classify as classify\n from baseline.data import reverse_2nd as rev2nd\n import mead.tf\n self.ExporterType = mead.tf.ClassifyTensorFlowExporter\n\n self.task = classify\n\n if self.config_params['preproc'].get('clean', False) is True:\n self.config_params['preproc']['clean_fn'] = baseline.TSVSeqLabelReader.do_clean\n print('Clean')\n elif self.config_params['preproc'].get('lower', False) is True:\n self.config_params['preproc']['clean_fn'] = baseline.lowercase\n print('Lower')\n else:\n self.config_params['preproc']['clean_fn'] = None\n\n self.config_params['preproc']['src_vec_trans'] = rev2nd if self.config_params['preproc'].get('rev', False) else None\n\n def initialize(self, embeddings):\n embeddings_set = mead.utils.index_by_label(embeddings)\n self.dataset = DataDownloader(self.dataset, self.data_download_cache).download()\n print(\"[train file]: {}\\n[valid file]: {}\\n[test file]: {}\".format(self.dataset['train_file'], self.dataset['valid_file'], self.dataset['test_file']))\n vocab, self.labels = self.reader.build_vocab([self.dataset['train_file'], self.dataset['valid_file'], self.dataset['test_file']])\n self.embeddings, self.feat2index = self._create_embeddings(embeddings_set, vocab)\n\n\n def _create_model(self):\n model = self.config_params['model']\n model['mxlen'] = self.reader.max_sentence_length\n model['mxwlen'] = self.reader.max_word_length\n return self.task.create_model(self.embeddings, self.labels, **model)\n\n def _load_dataset(self):\n self.train_data = self.reader.load(self.dataset['train_file'], self.feat2index, self.config_params['batchsz'], shuffle=True)\n self.valid_data = self.reader.load(self.dataset['valid_file'], self.feat2index, self.config_params['batchsz'])\n self.test_data = self.reader.load(self.dataset['test_file'], self.feat2index, self.config_params.get('test_batchsz', 1))\n\nTask.TASK_REGISTRY['classify'] = ClassifierTask\n\n\n@exporter\nclass TaggerTask(Task):\n\n def __init__(self, logging_file, mead_config, **kwargs):\n super(TaggerTask, self).__init__(logging_file, mead_config, **kwargs)\n self.task = None\n\n def _create_task_specific_reader(self):\n preproc = self.config_params['preproc']\n reader = baseline.create_seq_pred_reader(preproc['mxlen'],\n preproc['mxwlen'],\n preproc['word_trans_fn'],\n preproc['vec_alloc'],\n preproc['vec_shape'],\n preproc['trim'],\n **self.config_params['loader'])\n return reader\n\n def _setup_task(self):\n backend = self.config_params.get('backend', 'tensorflow')\n if backend == 'pytorch':\n print('PyTorch backend')\n from baseline.pytorch import long_0_tensor_alloc as vec_alloc\n from baseline.pytorch import tensor_shape as vec_shape\n import baseline.pytorch.tagger as tagger\n self.config_params['preproc']['vec_alloc'] = vec_alloc\n self.config_params['preproc']['vec_shape'] = vec_shape\n self.config_params['preproc']['trim'] = True\n else:\n self.config_params['preproc']['vec_alloc'] = np.zeros\n self.config_params['preproc']['vec_shape'] = np.shape\n print('TensorFlow backend')\n self.config_params['preproc']['trim'] = False\n import baseline.tf.tagger as tagger\n import mead.tf\n self.ExporterType = mead.tf.TaggerTensorFlowExporter\n\n self.task = tagger\n if self.config_params['preproc'].get('web-cleanup', False) is True:\n self.config_params['preproc']['word_trans_fn'] = baseline.CONLLSeqReader.web_cleanup\n print('Web-ish data cleanup')\n elif self.config_params['preproc'].get('lower', False) is True:\n self.config_params['preproc']['word_trans_fn'] = baseline.lowercase\n print('Lower')\n else:\n self.config_params['preproc']['word_trans_fn'] = None\n\n def initialize(self, embeddings):\n self.dataset = DataDownloader(self.dataset, self.data_download_cache).download()\n print(\"[train file]: {}\\n[valid file]: {}\\n[test file]: {}\".format(self.dataset['train_file'], self.dataset['valid_file'], self.dataset['test_file']))\n embeddings_set = mead.utils.index_by_label(embeddings)\n vocabs = self.reader.build_vocab([self.dataset['train_file'], self.dataset['valid_file'], self.dataset['test_file']])\n self.embeddings, self.feat2index = self._create_embeddings(embeddings_set, vocabs)\n\n def _create_model(self):\n labels = self.reader.label2index\n self.config_params['model']['span_type'] = self.config_params['train'].get('span_type')\n self.config_params['model'][\"unif\"] = self.config_params[\"unif\"]\n self.config_params['model']['maxs'] = self.reader.max_sentence_length\n self.config_params['model']['maxw'] = self.reader.max_word_length\n return self.task.create_model(labels, self.embeddings, **self.config_params['model'])\n\n def _load_dataset(self):\n self.train_data, _ = self.reader.load(self.dataset['train_file'], self.feat2index, self.config_params['batchsz'], shuffle=True)\n self.valid_data, _ = self.reader.load(self.dataset['valid_file'], self.feat2index, self.config_params['batchsz'])\n self.test_data, self.txts = self.reader.load(self.dataset['test_file'], self.feat2index, self.config_params.get('test_batchsz', 1), shuffle=False, do_sort=False)\n\n def train(self):\n self._load_dataset()\n model = self._create_model()\n conll_output = self.config_params.get(\"conll_output\", None)\n self.task.fit(model, self.train_data, self.valid_data, self.test_data, conll_output=conll_output, txts=self.txts, **self.config_params['train'])\n return model\n\nTask.TASK_REGISTRY['tagger'] = TaggerTask\n\n\n@exporter\nclass EncoderDecoderTask(Task):\n\n def __init__(self, logging_file, mead_config, **kwargs):\n super(EncoderDecoderTask, self).__init__(logging_file, mead_config, **kwargs)\n self.task = None\n\n def _create_task_specific_reader(self):\n preproc = self.config_params['preproc']\n reader = baseline.create_parallel_corpus_reader(preproc['mxlen'],\n preproc['vec_alloc'],\n preproc['trim'],\n preproc['word_trans_fn'],\n **self.config_params['loader'])\n return reader\n\n def _setup_task(self):\n\n # If its not vanilla seq2seq, dont bother reversing\n do_reverse = self.config_params['model']['model_type'] == 'default'\n backend = self.config_params.get('backend', 'tensorflow')\n if backend == 'pytorch':\n print('PyTorch backend')\n from baseline.pytorch import long_0_tensor_alloc as vec_alloc\n from baseline.pytorch import tensor_shape as vec_shape\n from baseline.pytorch import tensor_reverse_2nd as rev2nd\n import baseline.pytorch.seq2seq as seq2seq\n self.config_params['preproc']['vec_alloc'] = vec_alloc\n self.config_params['preproc']['vec_shape'] = vec_shape\n src_vec_trans = rev2nd if do_reverse else None\n self.config_params['preproc']['word_trans_fn'] = src_vec_trans\n self.config_params['preproc']['show_ex'] = baseline.pytorch.show_examples_pytorch\n self.config_params['preproc']['trim'] = True\n else:\n import baseline.tf.seq2seq as seq2seq\n import mead.tf\n self.ExporterType = mead.tf.Seq2SeqTensorFlowExporter\n self.config_params['preproc']['vec_alloc'] = np.zeros\n self.config_params['preproc']['vec_shape'] = np.shape\n self.config_params['preproc']['trim'] = False\n src_vec_trans = baseline.reverse_2nd if do_reverse else None\n self.config_params['preproc']['word_trans_fn'] = src_vec_trans\n self.config_params['preproc']['show_ex'] = baseline.tf.show_examples_tf\n\n self.task = seq2seq\n\n def initialize(self, embeddings):\n embeddings_set = mead.utils.index_by_label(embeddings)\n self.dataset = DataDownloader(self.dataset, self.data_download_cache, True).download()\n print(\"[train file]: {}\\n[valid file]: {}\\n[test file]: {}\\n[vocab file]: {}\".format(self.dataset['train_file'], self.dataset['valid_file'], self.dataset['test_file'], self.dataset.get('vocab_file',\"None\")))\n vocab_file = self.dataset.get('vocab_file',None)\n if vocab_file is not None:\n vocab1, vocab2 = self.reader.build_vocabs([vocab_file])\n else:\n vocab1, vocab2 = self.reader.build_vocabs([self.dataset['train_file'], self.dataset['valid_file'], self.dataset['test_file']])\n self.embeddings1, self.feat2index1 = self._create_embeddings(embeddings_set, {'word': vocab1})\n self.embeddings2, self.feat2index2 = self._create_embeddings(embeddings_set, {'word': vocab2})\n\n def _load_dataset(self):\n self.train_data = self.reader.load(self.dataset['train_file'], self.feat2index1['word'], self.feat2index2['word'], self.config_params['batchsz'], shuffle=True)\n self.valid_data = self.reader.load(self.dataset['valid_file'], self.feat2index1['word'], self.feat2index2['word'], self.config_params['batchsz'], shuffle=True)\n self.test_data = self.reader.load(self.dataset['test_file'], self.feat2index1['word'], self.feat2index2['word'], self.config_params.get('test_batchsz', 1))\n\n def _create_model(self):\n return self.task.create_model(self.embeddings1['word'], self.embeddings2['word'], **self.config_params['model'])\n\n def train(self):\n\n num_ex = self.config_params['num_valid_to_show']\n\n if num_ex > 0:\n print('Showing examples')\n preproc = self.config_params['preproc']\n show_ex_fn = preproc['show_ex']\n rlut1 = baseline.revlut(self.feat2index1['word'])\n rlut2 = baseline.revlut(self.feat2index2['word'])\n self.config_params['train']['after_train_fn'] = lambda model: show_ex_fn(model,\n self.valid_data, rlut1, rlut2,\n self.embeddings2['word'],\n preproc['mxlen'], False, 0,\n num_ex, reverse=False)\n super(EncoderDecoderTask, self).train()\n\nTask.TASK_REGISTRY['seq2seq'] = EncoderDecoderTask\n\n\n@exporter\nclass LanguageModelingTask(Task):\n\n def __init__(self, logging_file, mead_config, **kwargs):\n super(LanguageModelingTask, self).__init__(logging_file, mead_config, **kwargs)\n self.task = None\n\n def _create_task_specific_reader(self):\n mxwlen = self.config_params['preproc']['mxwlen']\n nbptt = self.config_params['nbptt']\n reader = baseline.create_lm_reader(mxwlen,\n nbptt,\n self.config_params['preproc']['word_trans_fn'],\n reader_type=self.config_params['loader']['reader_type'])\n return reader\n\n def _setup_task(self):\n\n backend = self.config_params.get('backend', 'tensorflow')\n if backend == 'pytorch':\n print('PyTorch backend')\n from baseline.pytorch import long_0_tensor_alloc as vec_alloc\n from baseline.pytorch import tensor_shape as vec_shape\n import baseline.pytorch.lm as lm\n self.config_params['preproc']['vec_alloc'] = vec_alloc\n self.config_params['preproc']['vec_shape'] = vec_shape\n self.config_params['preproc']['trim'] = True\n else:\n self.config_params['preproc']['vec_alloc'] = np.zeros\n self.config_params['preproc']['vec_shape'] = np.shape\n print('TensorFlow backend')\n self.config_params['preproc']['trim'] = False\n import baseline.tf.lm as lm\n self.task = lm\n\n if self.config_params.get('web-cleanup', False) is True:\n self.config_params['preproc']['word_trans_fn'] = baseline.CONLLSeqReader.web_cleanup\n print('Web-ish data cleanup')\n elif self.config_params.get('lower', False) is True:\n self.config_params['preproc']['word_trans_fn'] = baseline.lowercase\n print('Lower')\n else:\n self.config_params['preproc']['word_trans_fn'] = None\n\n def initialize(self, embeddings):\n embeddings_set = mead.utils.index_by_label(embeddings)\n self.dataset = DataDownloader(self.dataset, self.data_download_cache).download()\n print(\"[train file]: {}\\n[valid file]: {}\\n[test file]: {}\".format(self.dataset['train_file'], self.dataset['valid_file'], self.dataset['test_file']))\n vocab, self.num_words = self.reader.build_vocab([self.dataset['train_file'], self.dataset['valid_file'], self.dataset['test_file']])\n self.embeddings, self.feat2index = self._create_embeddings(embeddings_set, vocab)\n\n def _load_dataset(self):\n mxwlen = self.config_params['preproc']['mxwlen']\n if mxwlen > 0:\n self.reader.max_word_length = max(mxwlen, self.reader.max_word_length)\n self.train_data = self.reader.load(self.dataset['train_file'], self.feat2index, self.num_words[0], self.config_params['batchsz'])\n self.valid_data = self.reader.load(self.dataset['valid_file'], self.feat2index, self.num_words[1], self.config_params['batchsz'])\n self.test_data = self.reader.load(self.dataset['test_file'], self.feat2index, self.num_words[2], self.config_params['batchsz'])\n\n def _create_model(self):\n\n model = self.config_params['model']\n model['unif'] = self.config_params['unif']\n model['batchsz'] = self.config_params['batchsz']\n model['nbptt'] = self.config_params['nbptt']\n model['maxw'] = self.reader.max_word_length\n return self.task.create_model(self.embeddings, **model)\n\n @staticmethod\n def _num_steps_per_epoch(num_examples, nbptt, batchsz):\n rest = num_examples // batchsz\n return rest // nbptt\n\n def train(self):\n # TODO: This should probably get generalized and pulled up\n if self.config_params['train'].get('decay_type', None) == 'zaremba':\n batchsz = self.config_params['batchsz']\n nbptt = self.config_params['nbptt']\n steps_per_epoch = LanguageModelingTask._num_steps_per_epoch(self.num_words[0], nbptt, batchsz)\n first_range = int(self.config_params['train']['start_decay_epoch'] * steps_per_epoch)\n\n self.config_params['train']['bounds'] = [first_range] + list(np.arange(self.config_params['train']['start_decay_epoch'] + 1,\n self.config_params['train']['epochs'] + 1,\n dtype=np.int32) * steps_per_epoch)\n\n super(LanguageModelingTask, self).train()\n\nTask.TASK_REGISTRY['lm'] = LanguageModelingTask\n",
"id": "8627844",
"language": "Python",
"matching_score": 4.225329399108887,
"max_stars_count": 2,
"path": "python/mead/tasks.py"
},
{
"content": "import baseline.data\nimport numpy as np\nfrom collections import Counter\nimport re\nimport codecs\nfrom baseline.utils import import_user_module, revlut, export\nimport os\n\n__all__ = []\nexporter = export(__all__)\n\n\n@exporter\ndef num_lines(filename):\n lines = 0\n with codecs.open(filename, encoding='utf-8', mode='r') as f:\n for _ in f:\n lines = lines + 1\n return lines\n\n\ndef _build_vocab_for_col(col, files):\n vocab = Counter()\n vocab['<GO>'] = 1\n vocab['<EOS>'] = 1\n vocab['<UNK>'] = 1\n for file in files:\n if file is None:\n continue\n with codecs.open(file, encoding='utf-8', mode='r') as f:\n for line in f:\n cols = re.split(\"\\t\", line)\n text = re.split(\"\\s\", cols[col])\n\n for w in text:\n w = w.strip()\n if w:\n vocab[w] += 1\n return vocab\n\n\n@exporter\nclass ParallelCorpusReader(object):\n\n def __init__(self,\n max_sentence_length=1000,\n vec_alloc=np.zeros,\n src_vec_trans=None,\n trim=False):\n self.vec_alloc = vec_alloc\n self.src_vec_trans = src_vec_trans\n self.max_sentence_length = max_sentence_length\n self.trim = trim\n\n def build_vocabs(self, files):\n pass\n\n def load_examples(self, tsfile, vocab1, vocab2):\n pass\n\n def load(self, tsfile, vocab1, vocab2, batchsz, shuffle=False):\n examples = self.load_examples(tsfile, vocab1, vocab2)\n return baseline.data.Seq2SeqDataFeed(examples, batchsz,\n shuffle=shuffle, src_vec_trans=self.src_vec_trans,\n vec_alloc=self.vec_alloc, trim=self.trim)\n\n\n@exporter\nclass TSVParallelCorpusReader(ParallelCorpusReader):\n\n def __init__(self,\n max_sentence_length=1000,\n vec_alloc=np.zeros,\n src_vec_trans=None,\n trim=False, src_col_num=0, dst_col_num=1):\n super(TSVParallelCorpusReader, self).__init__(max_sentence_length, vec_alloc, src_vec_trans, trim)\n self.src_col_num = src_col_num\n self.dst_col_num = dst_col_num\n\n def build_vocabs(self, files):\n src_vocab = _build_vocab_for_col(self.src_col_num, files)\n dst_vocab = _build_vocab_for_col(self.dst_col_num, files)\n return src_vocab, dst_vocab\n\n def load_examples(self, tsfile, vocab1, vocab2):\n GO = vocab2['<GO>']\n EOS = vocab2['<EOS>']\n mxlen = self.max_sentence_length\n ts = []\n with codecs.open(tsfile, encoding='utf-8', mode='r') as f:\n for line in f:\n splits = re.split(\"\\t\", line.strip())\n src = list(filter(lambda x: len(x) != 0, re.split(\"\\s+\", splits[0])))\n dst = list(filter(lambda x: len(x) != 0, re.split(\"\\s+\", splits[1])))\n srcl = self.vec_alloc(mxlen, dtype=np.int)\n tgtl = self.vec_alloc(mxlen, dtype=np.int)\n src_len = len(src)\n tgt_len = len(dst) + 2 # <GO>,...,<EOS>\n end1 = min(src_len, mxlen)\n end2 = min(tgt_len, mxlen)-1\n tgtl[0] = GO\n src_len = end1\n tgt_len = end2+1\n\n for j in range(end1):\n srcl[j] = vocab1[src[j]]\n for j in range(end2-1):\n tgtl[j + 1] = vocab2[dst[j]]\n\n tgtl[end2] = EOS\n\n ts.append((srcl, tgtl, src_len, tgt_len))\n\n return baseline.data.Seq2SeqExamples(ts)\n\n\n@exporter\nclass MultiFileParallelCorpusReader(ParallelCorpusReader):\n\n def __init__(self, src_suffix, dst_suffix,\n max_sentence_length=1000,\n vec_alloc=np.zeros,\n src_vec_trans=None,\n trim=False):\n super(MultiFileParallelCorpusReader, self).__init__(max_sentence_length, vec_alloc, src_vec_trans, trim)\n self.src_suffix = src_suffix\n self.dst_suffix = dst_suffix\n if not src_suffix.startswith('.'):\n self.src_suffix = '.' + self.src_suffix\n if not dst_suffix.startswith('.'):\n self.dst_suffix = '.' + self.dst_suffix\n\n # 2 possibilities here, either we have a vocab file, e.g. vocab.bpe.32000, or we are going to generate\n # from each column\n def build_vocabs(self, files):\n if len(files) == 1 and os.path.exists(files[0]):\n src_vocab = _build_vocab_for_col(0, files)\n dst_vocab = src_vocab\n else:\n src_vocab = _build_vocab_for_col(0, [f + self.src_suffix for f in files])\n dst_vocab = _build_vocab_for_col(0, [f + self.dst_suffix for f in files])\n return src_vocab, dst_vocab\n\n def load_examples(self, tsfile, vocab1, vocab2):\n PAD = vocab1['<PAD>']\n GO = vocab2['<GO>']\n EOS = vocab2['<EOS>']\n UNK1 = vocab1['<UNK>']\n UNK2 = vocab2['<UNK>']\n mxlen = self.max_sentence_length\n ts = []\n\n with codecs.open(tsfile + self.src_suffix, encoding='utf-8', mode='r') as fsrc:\n with codecs.open(tsfile + self.dst_suffix, encoding='utf-8', mode='r') as fdst:\n for src, dst in zip(fsrc, fdst):\n\n src = re.split(\"\\s+\", src.strip())\n dst = re.split(\"\\s+\", dst.strip())\n srcl = self.vec_alloc(mxlen, dtype=np.int)\n tgtl = self.vec_alloc(mxlen, dtype=np.int)\n src_len = len(src)\n tgt_len = len(dst) + 2\n end1 = min(src_len, mxlen)\n end2 = min(tgt_len, mxlen)-1\n tgtl[0] = GO\n src_len = end1\n tgt_len = end2+1\n\n for j in range(end1):\n srcl[j] = vocab1.get(src[j], UNK1)\n for j in range(end2-1):\n tgtl[j + 1] = vocab2.get(dst[j], UNK2)\n\n tgtl[end2] = EOS\n ts.append((srcl, tgtl, src_len, tgt_len))\n\n return baseline.data.Seq2SeqExamples(ts)\n\n\n@exporter\ndef create_parallel_corpus_reader(mxlen, alloc_fn, trim, src_vec_trans, **kwargs):\n\n reader_type = kwargs.get('reader_type', 'default')\n\n if reader_type == 'default':\n print('Reading parallel file corpus')\n pair_suffix = kwargs.get('pair_suffix')\n reader = MultiFileParallelCorpusReader(pair_suffix[0], pair_suffix[1],\n mxlen, alloc_fn,\n src_vec_trans, trim)\n elif reader_type == 'tsv':\n print('Reading tab-separated corpus')\n reader = TSVParallelCorpusReader(mxlen, alloc_fn, src_vec_trans, trim)\n else:\n mod = import_user_module(\"reader\", reader_type)\n return mod.create_parallel_corpus_reader(mxlen, alloc_fn,\n src_vec_trans, trim, **kwargs)\n return reader\n\n\n@exporter\ndef identity_trans_fn(x):\n return x\n\n\n@exporter\nclass SeqPredictReader(object):\n\n def __init__(self, max_sentence_length=-1, max_word_length=-1, word_trans_fn=None,\n vec_alloc=np.zeros, vec_shape=np.shape, trim=False, extended_features=dict()):\n self.cleanup_fn = identity_trans_fn if word_trans_fn is None else word_trans_fn\n self.max_sentence_length = max_sentence_length\n self.max_word_length = max_word_length\n self.vec_alloc = vec_alloc\n self.vec_shape = vec_shape\n self.trim = trim\n self.extended_features = extended_features\n self.label2index = {\"<PAD>\": 0, \"<GO>\": 1, \"<EOS>\": 2}\n self.idx = 2 # GO=0, START=1, EOS=2\n\n def build_vocab(self, files):\n pass\n\n def read_lines(self):\n pass\n\n def load(self, filename, vocabs, batchsz, shuffle=False, do_sort=True):\n\n ts = []\n words_vocab = vocabs['word']\n chars_vocab = vocabs['char']\n\n mxlen = self.max_sentence_length\n maxw = self.max_word_length\n extracted = self.read_lines(filename)\n texts = extracted['texts']\n labels = extracted['labels']\n\n for i in range(len(texts)):\n\n xs_ch = self.vec_alloc((mxlen, maxw), dtype=np.int)\n xs = self.vec_alloc((mxlen), dtype=np.int)\n ys = self.vec_alloc((mxlen), dtype=np.int)\n\n keys = self.extended_features.keys()\n\n item = {}\n for key in keys:\n item[key] = self.vec_alloc((mxlen), dtype=np.int)\n\n text = texts[i]\n lv = labels[i]\n\n length = mxlen\n for j in range(mxlen):\n\n if j == len(text):\n length = j\n break\n\n w = text[j]\n nch = min(len(w), maxw)\n label = lv[j]\n\n if label not in self.label2index:\n self.idx += 1\n self.label2index[label] = self.idx\n\n ys[j] = self.label2index[label]\n xs[j] = words_vocab.get(self.cleanup_fn(w), 0)\n # Extended features\n for key in keys:\n item[key][j] = vocabs[key].get(extracted[key][i][j])\n for k in range(nch):\n xs_ch[j, k] = chars_vocab.get(w[k], 0)\n item.update({'x': xs, 'xch': xs_ch, 'y': ys, 'lengths': length, 'ids': i})\n ts.append(item)\n examples = baseline.data.SeqWordCharTagExamples(ts, do_shuffle=shuffle, do_sort=do_sort)\n return baseline.data.SeqWordCharLabelDataFeed(examples, batchsz=batchsz, shuffle=shuffle,\n vec_alloc=self.vec_alloc, vec_shape=self.vec_shape), texts\n\n\n@exporter\nclass CONLLSeqReader(SeqPredictReader):\n\n UNREP_EMOTICONS = (\n ':)',\n ':(((',\n ':D',\n '=)',\n ':-)',\n '=(',\n '(=',\n '=[[',\n )\n\n def __init__(self, max_sentence_length=-1, max_word_length=-1, word_trans_fn=None,\n vec_alloc=np.zeros, vec_shape=np.shape, trim=False, extended_features=dict()):\n super(CONLLSeqReader, self).__init__(max_sentence_length, max_word_length, word_trans_fn, vec_alloc, vec_shape, trim, extended_features)\n\n @staticmethod\n def web_cleanup(word):\n if word.startswith('http'): return 'URL'\n if word.startswith('@'): return '@@@@'\n if word.startswith('#'): return '####'\n if word == '\"': return ','\n if word in CONLLSeqReader.UNREP_EMOTICONS: return ';)'\n if word == '<3': return '<3'\n return word\n\n def build_vocab(self, files):\n vocab_word = Counter()\n vocab_ch = Counter()\n vocab_word['<UNK>'] = 1\n vocabs = {}\n keys = self.extended_features.keys()\n for key in keys:\n vocabs[key] = Counter()\n\n maxw = 0\n maxs = 0\n for file in files:\n if file is None:\n continue\n\n sl = 0\n with codecs.open(file, encoding='utf-8', mode='r') as f:\n for line in f:\n\n line = line.strip()\n if line == '':\n maxs = max(maxs, sl)\n sl = 0\n\n else:\n states = re.split(\"\\s\", line)\n sl += 1\n w = states[0]\n vocab_word[self.cleanup_fn(w)] += 1\n maxw = max(maxw, len(w))\n for k in w:\n vocab_ch[k] += 1\n for key, index in self.extended_features.items():\n vocabs[key][states[index]] += 1\n\n self.max_word_length = min(maxw, self.max_word_length) if self.max_word_length > 0 else maxw\n self.max_sentence_length = min(maxs, self.max_sentence_length) if self.max_sentence_length > 0 else maxs\n print('Max sentence length %d' % self.max_sentence_length)\n print('Max word length %d' % self.max_word_length)\n\n vocabs.update({'char': vocab_ch, 'word': vocab_word})\n return vocabs\n\n def read_lines(self, tsfile):\n\n txts = []\n lbls = []\n txt = []\n lbl = []\n features = {}\n # Extended feature values\n xfv = {}\n\n for key in self.extended_features.keys():\n features[key] = []\n xfv[key] = []\n with codecs.open(tsfile, encoding='utf-8', mode='r') as f:\n for line in f:\n states = re.split(\"\\s\", line.strip())\n\n if len(states) > 1:\n txt.append(states[0])\n lbl.append(states[-1])\n for key, value in self.extended_features.items():\n xfv[key].append(states[value])\n else:\n txts.append(txt)\n lbls.append(lbl)\n for key in self.extended_features.keys():\n features[key].append(xfv[key])\n xfv[key] = []\n txt = []\n lbl = []\n\n features.update({'texts': txts, 'labels': lbls})\n return features\n\n\n@exporter\ndef create_seq_pred_reader(mxlen, mxwlen, word_trans_fn, vec_alloc, vec_shape, trim, **kwargs):\n\n reader_type = kwargs.get('reader_type', 'default')\n\n if reader_type == 'default':\n print('Reading CONLL sequence file corpus')\n reader = CONLLSeqReader(mxlen, mxwlen, word_trans_fn,\n vec_alloc, vec_shape, trim, extended_features=kwargs.get('extended_features', {}))\n else:\n mod = import_user_module(\"reader\", reader_type)\n reader = mod.create_seq_pred_reader(mxlen, mxwlen, word_trans_fn,\n vec_alloc, vec_shape, trim, **kwargs)\n return reader\n\n\n@exporter\nclass SeqLabelReader(object):\n\n def __init__(self):\n pass\n\n def build_vocab(self, files, **kwargs):\n pass\n\n def load(self, filename, index, batchsz, **kwargs):\n pass\n\n\n@exporter\nclass TSVSeqLabelReader(SeqLabelReader):\n\n REPLACE = { \"'s\": \" 's \",\n \"'ve\": \" 've \",\n \"n't\": \" n't \",\n \"'re\": \" 're \",\n \"'d\": \" 'd \",\n \"'ll\": \" 'll \",\n \",\": \" , \",\n \"!\": \" ! \",\n }\n\n def __init__(\n self,\n max_sentence_length=-1, max_word_length=-1, mxfiltsz=0,\n clean_fn=None, vec_alloc=np.zeros, src_vec_trans=None,\n do_chars=False, data_format='objs', trim=False\n ):\n super(TSVSeqLabelReader, self).__init__()\n\n self.vocab = None\n self.label2index = {}\n self.clean_fn = clean_fn\n self.data_format = data_format\n self.max_sentence_length = max_sentence_length\n self.max_word_length = max_word_length\n self.mxfiltsz = mxfiltsz\n self.vec_alloc = vec_alloc\n if self.clean_fn is None:\n self.clean_fn = lambda x: x\n self.src_vec_trans = src_vec_trans\n self.do_chars = do_chars\n self.trim = trim\n\n SPLIT_ON = '[\\t\\s]+'\n\n @staticmethod\n def splits(text):\n return list(filter(lambda s: len(s) != 0, re.split('\\s+', text)))\n\n @staticmethod\n def do_clean(l):\n l = l.lower()\n l = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", l)\n for k, v in TSVSeqLabelReader.REPLACE.items():\n l = l.replace(k, v)\n return l.strip()\n\n @staticmethod\n def label_and_sentence(line, clean_fn):\n label_text = re.split(TSVSeqLabelReader.SPLIT_ON, line)\n label = label_text[0]\n text = label_text[1:]\n text = ' '.join(list(filter(lambda s: len(s) != 0, [clean_fn(w) for w in text])))\n text = list(filter(lambda s: len(s) != 0, re.split('\\s+', text)))\n return label, text\n\n def build_vocab(self, files, **kwargs):\n \"\"\"Take a directory (as a string), or an array of files and build a vocabulary\n \n Take in a directory or an array of individual files (as a list). If the argument is\n a string, it may be a directory, in which case, all files in the directory will be loaded\n to form a vocabulary.\n \n :param files: Either a directory (str), or an array of individual files\n :return: \n \"\"\"\n label_idx = len(self.label2index)\n if type(files) == str:\n if os.path.isdir(files):\n base = files\n files = filter(os.path.isfile, [os.path.join(base, x) for x in os.listdir(base)])\n else:\n files = [files]\n maxs = 0\n maxw = 0\n vocab_words = Counter()\n if self.do_chars:\n vocab_chars = Counter()\n for file in files:\n if file is None:\n continue\n with codecs.open(file, encoding='utf-8', mode='r') as f:\n for il, line in enumerate(f):\n label, text = TSVSeqLabelReader.label_and_sentence(line, self.clean_fn)\n if len(text) == 0:\n continue\n maxs = max(maxs, len(text))\n for w in text:\n maxw = max(maxw, len(w))\n if self.do_chars:\n for ch in w:\n vocab_chars[ch] += 1\n\n vocab_words[w] += 1\n if label not in self.label2index:\n self.label2index[label] = label_idx\n label_idx += 1\n\n vocab = {'word': vocab_words}\n if self.do_chars:\n vocab['char'] = vocab_chars\n\n if self.max_sentence_length < 0:\n self.max_sentence_length = maxs\n if self.max_word_length < 0:\n self.max_word_length = maxw\n print('Max sentence length {}, requested length {}'.format(maxs, self.max_sentence_length))\n\n if self.do_chars:\n print('Max word length {}, requested length {}'.format(maxw, self.max_word_length))\n return vocab, self.get_labels()\n\n def get_labels(self):\n labels = [''] * len(self.label2index)\n for label, index in self.label2index.items():\n labels[index] = label\n return labels\n\n def load(self, filename, vocabs, batchsz, **kwargs):\n\n words_vocab = vocabs['word']\n mxlen = self.max_sentence_length\n maxw = self.max_word_length\n PAD = words_vocab['<PAD>']\n shuffle = kwargs.get('shuffle', False)\n do_sort = kwargs.get('do_sort', False)\n halffiltsz = self.mxfiltsz // 2\n nozplen = mxlen - 2*halffiltsz\n\n if self.data_format == 'objs':\n examples = []\n with codecs.open(filename, encoding='utf-8', mode='r') as f:\n for il, line in enumerate(f):\n label, text = TSVSeqLabelReader.label_and_sentence(line, self.clean_fn)\n if len(text) == 0:\n continue\n y = self.label2index[label]\n mx = min(len(text), nozplen)\n text = text[:mx]\n length = mx\n x = self.vec_alloc(mxlen, dtype=int)\n xch = None\n if self.do_chars:\n xch = self.vec_alloc((mxlen, maxw), dtype=int)\n for j in range(len(text)):\n w = text[j]\n offset = j + halffiltsz\n if self.do_chars:\n nch = min(maxw, len(w))\n for k in range(nch):\n key = vocabs['char'].get(w[k], PAD)\n xch[offset, k] = key\n key = words_vocab.get(w, PAD)\n x[offset] = key\n example = {'x': x, 'y': y, 'lengths': length}\n if self.do_chars:\n example['xch'] = xch\n examples.append(example)\n else:\n num_samples = num_lines(filename)\n x = self.vec_alloc((num_samples, mxlen), dtype=int)\n y = self.vec_alloc(num_samples, dtype=int)\n lengths = self.vec_alloc(num_samples, dtype=int)\n if self.do_chars:\n xch = self.vec_alloc((num_samples, mxlen, maxw), dtype=int)\n\n with codecs.open(filename, encoding='utf-8', mode='r') as f:\n for i, line in enumerate(f):\n label, text = TSVSeqLabelReader.label_and_sentence(line, self.clean_fn)\n y[i] = self.label2index[label]\n mx = min(len(text), nozplen)\n text = text[:mx]\n lengths[i] = mx\n for j in range(len(text)):\n w = text[j]\n offset = j + halffiltsz\n if self.do_chars:\n nch = min(maxw, len(w))\n for k in range(nch):\n key = vocabs['char'].get(w[k], PAD)\n xch[i, offset, k] = key\n key = words_vocab.get(w, PAD)\n x[i, offset] = key\n\n examples = {'x': x, 'y': y, 'lengths': lengths}\n if self.do_chars:\n examples['xch'] = xch\n return baseline.data.SeqLabelDataFeed(baseline.data.SeqLabelExamples(examples, do_shuffle=shuffle, do_sort=do_sort),\n batchsz=batchsz, shuffle=shuffle, trim=self.trim,\n vec_alloc=self.vec_alloc, src_vec_trans=self.src_vec_trans)\n\n@exporter\ndef create_pred_reader(mxlen, zeropadding, clean_fn, vec_alloc, src_vec_trans, **kwargs):\n reader_type = kwargs.get('reader_type', 'default')\n\n if reader_type == 'default':\n do_chars = kwargs.get('do_chars', False)\n data_format = kwargs.get('data_format', 'objs')\n trim = kwargs.get('trim', False)\n #splitter = kwargs.get('splitter', '[\\t\\s]+')\n reader = TSVSeqLabelReader(mxlen, kwargs.get('mxwlen', -1), zeropadding, clean_fn, vec_alloc, src_vec_trans,\n do_chars=do_chars, data_format=data_format, trim=trim)\n else:\n mod = import_user_module(\"reader\", reader_type)\n reader = mod.create_pred_reader(mxlen, zeropadding, clean_fn, vec_alloc, src_vec_trans, **kwargs)\n return reader\n\n\n@exporter\nclass PTBSeqReader(object):\n\n def __init__(self, max_word_length, nbptt, word_trans_fn):\n self.max_word_length = max_word_length\n self.nbptt = nbptt\n self.cleanup_fn = identity_trans_fn if word_trans_fn is None else word_trans_fn\n\n def build_vocab(self, files):\n vocab_word = Counter()\n vocab_ch = Counter()\n maxw = 0\n num_words_in_files = []\n for file in files:\n if file is None:\n continue\n\n with codecs.open(file, encoding='utf-8', mode='r') as f:\n num_words = 0\n for line in f:\n sentence = line.split()\n sentence = [w for w in sentence] + ['<EOS>']\n num_words += len(sentence)\n for w in sentence:\n vocab_word[self.cleanup_fn(w)] += 1\n maxw = max(maxw, len(w))\n for k in w:\n vocab_ch[k] += 1\n num_words_in_files.append(num_words)\n\n self.max_word_length = min(maxw, self.max_word_length) if self.max_word_length > 0 else maxw\n\n print('Max word length %d' % self.max_word_length)\n\n vocab = {'char': vocab_ch, 'word': vocab_word }\n return vocab, num_words_in_files\n\n def load(self, filename, word2index, num_words, batchsz):\n\n words_vocab = word2index['word']\n chars_vocab = word2index['char']\n xch = np.zeros((num_words, self.max_word_length), np.int)\n x = np.zeros(num_words, np.int)\n i = 0\n with codecs.open(filename, encoding='utf-8', mode='r') as f:\n for line in f:\n sentence = line.split() + ['<EOS>']\n num_words += len(sentence)\n for w in sentence:\n x[i] = words_vocab.get(self.cleanup_fn(w), 0)\n nch = min(len(w), self.max_word_length)\n for k in range(nch):\n xch[i, k] = chars_vocab.get(w[k], 0)\n i += 1\n\n return baseline.data.SeqWordCharDataFeed(x, xch, self.nbptt, batchsz, self.max_word_length)\n\n\n@exporter\ndef create_lm_reader(max_word_length, nbptt, word_trans_fn, **kwargs):\n reader_type = kwargs.get('reader_type', 'default')\n\n if reader_type == 'default':\n reader = PTBSeqReader(max_word_length, nbptt, word_trans_fn)\n else:\n mod = import_user_module(\"reader\", reader_type)\n reader = mod.create_lm_reader(max_word_length, nbptt, word_trans_fn, **kwargs)\n return reader\n",
"id": "12298591",
"language": "Python",
"matching_score": 2.0993804931640625,
"max_stars_count": 2,
"path": "python/baseline/reader.py"
},
{
"content": "import os\nimport sys\nimport importlib\nfrom functools import partial, update_wrapper, wraps\nimport numpy as np\nimport addons\nimport json\n\n__all__ = []\n\ndef parameterize(func):\n @wraps(func)\n def decorator(*args, **kwargs):\n return lambda x: func(x, *args, **kwargs)\n return decorator\n\n@parameterize\ndef export(obj, all_list=None):\n \"\"\"Add a function or class to the __all__.\n\n When exporting something with out using as a decorator do it like so:\n `func = exporter(func)`\n \"\"\"\n all_list.append(obj.__name__)\n return obj\n\nexporter = export(__all__)\n\n\n@exporter\ndef crf_mask(vocab, span_type, s_idx, e_idx, pad_idx=None):\n \"\"\"Create a CRF mask.\n\n Returns a mask with invalid moves as 0 and valid as 1.\n \"\"\"\n rev_lut = {v: k for k, v in vocab.items()}\n start = rev_lut[s_idx]\n end = rev_lut[e_idx]\n pad = None if pad_idx is None else rev_lut[pad_idx]\n if span_type.upper() == \"IOB\":\n mask = iob_mask(vocab, start, end, pad)\n if span_type.upper() == \"IOB2\" or span_type.upper() == \"BIO\":\n mask = iob2_mask(vocab, start, end, pad)\n if span_type.upper() == \"IOBES\":\n mask = iobes_mask(vocab, start, end, pad)\n return mask\n\ndef iob_mask(vocab, start, end, pad=None):\n small = 0\n mask = np.ones((len(vocab), len(vocab)), dtype=np.float32)\n for from_ in vocab:\n for to in vocab:\n # Can't move to start\n if to is start:\n mask[vocab[to], vocab[from_]] = small\n # Can't move from end\n if from_ is end:\n mask[vocab[to], vocab[from_]] = small\n # Can only move from pad to pad or end\n if from_ is pad:\n if not(to is pad or to is end):\n mask[vocab[to], vocab[from_]] = small\n elif from_ is start:\n # Can't move from start to a B\n if to.startswith(\"B-\"):\n mask[vocab[to], vocab[from_]] = small\n else:\n if from_.startswith(\"B-\"):\n # Can't move from a B to a B of another type\n if to.startswith(\"B-\"):\n from_type = from_.split(\"-\")[1]\n to_type = to.split(\"-\")[1]\n if from_type != to_type:\n mask[vocab[to], vocab[from_]] = small\n elif from_.startswith(\"I-\"):\n # Can't move from an I to a B of another type\n if to.startswith(\"B-\"):\n from_type = from_.split(\"-\")[1]\n to_type = to.split(\"-\")[1]\n if from_type != to_type:\n mask[vocab[to], vocab[from_]] = small\n elif from_.startswith(\"O\"):\n # Can't move from an O to a B\n if to.startswith(\"B-\"):\n mask[vocab[to], vocab[from_]] = small\n return mask\n\ndef iob2_mask(vocab, start, end, pad=None):\n small = 0\n mask = np.ones((len(vocab), len(vocab)), dtype=np.float32)\n for from_ in vocab:\n for to in vocab:\n # Can't move to start\n if to is start:\n mask[vocab[to], vocab[from_]] = small\n # Can't move from end\n if from_ is end:\n mask[vocab[to], vocab[from_]] = small\n # Can only move from pad to pad or end\n if from_ is pad:\n if not(to is pad or to is end):\n mask[vocab[to], vocab[from_]] = small\n elif from_ is start:\n # Can't move from start to a I\n if to.startswith(\"I-\"):\n mask[vocab[to], vocab[from_]] = small\n else:\n if from_.startswith(\"B-\"):\n # Can't move from a B to an I of a different type\n if to.startswith(\"I-\"):\n from_type = from_.split(\"-\")[1]\n to_type = to.split(\"-\")[1]\n if from_type != to_type:\n mask[vocab[to], vocab[from_]] = small\n elif from_.startswith(\"I-\"):\n # Can't move from an I to an I of another type\n if to.startswith(\"I-\"):\n from_type = from_.split(\"-\")[1]\n to_type = to.split(\"-\")[1]\n if from_type != to_type:\n mask[vocab[to], vocab[from_]] = small\n elif from_.startswith(\"O\"):\n # Can't move from an O to an I\n if to.startswith(\"I-\"):\n mask[vocab[to], vocab[from_]] = small\n return(mask)\n\n\ndef iobes_mask(vocab, start, end, pad=None):\n small = 0\n mask = np.ones((len(vocab), len(vocab)), dtype=np.float32)\n for from_ in vocab:\n for to in vocab:\n # Can't move to start\n if to is start:\n mask[vocab[to], vocab[from_]] = small\n # Can't move from end\n if from_ is end:\n mask[vocab[to], vocab[from_]] = small\n # Can only move from pad to pad or to end\n if from_ is pad:\n if not(to is pad or to is end):\n mask[vocab[to], vocab[from_]] = small\n elif from_ is start:\n # Can't move from start to I or E\n if to.startswith(\"I-\") or to.startswith(\"E-\"):\n mask[vocab[to], vocab[from_]] = small\n else:\n if from_.startswith(\"B-\"):\n # Can't move from B to B, S, O, End, or Pad\n if (\n to.startswith(\"B-\") or\n to.startswith(\"S-\") or\n to.startswith(\"O\") or\n to is end or\n to is pad\n ):\n mask[vocab[to], vocab[from_]] = small\n # Can only move to matching I or E\n elif to.startswith(\"I-\") or to.startswith(\"E-\"):\n from_type = from_.split(\"-\")[1]\n to_type = to.split(\"-\")[1]\n if from_type != to_type:\n mask[vocab[to], vocab[from_]] = small\n elif from_.startswith(\"I-\"):\n # Can't move from I to B, S, O, End or Pad\n if (\n to.startswith(\"B-\") or\n to.startswith(\"S-\") or\n to.startswith(\"O\") or\n to is end or\n to is pad\n ):\n mask[vocab[to], vocab[from_]] = small\n # Can only move to matching I or E\n elif to.startswith(\"I-\") or to.startswith(\"E-\"):\n from_type = from_.split(\"-\")[1]\n to_type = to.split(\"-\")[1]\n if from_type != to_type:\n mask[vocab[to], vocab[from_]] = small\n elif (\n from_.startswith(\"E-\") or\n from_.startswith(\"I-\") or\n from_.startswith(\"S-\") or\n from_.startswith(\"O\")\n ):\n # Can't move from E to I or E\n # Can't move from I to I or E\n # Can't move from S to I or E\n # Can't move from O to I or E\n if to.startswith(\"I-\") or to.startswith(\"E-\"):\n mask[vocab[to], vocab[from_]] = small\n return mask\n\n@exporter\ndef listify(x):\n \"\"\"Take a scalar or list and make it a list\n\n :param x: The input to convert\n :return: A list\n \"\"\"\n if isinstance(x, (list, tuple, np.ndarray)):\n return x\n if x is None:\n return []\n return [x]\n\n@exporter\ndef get_version(pkg):\n s = '.'.join(pkg.__version__.split('.')[:2])\n return float(s)\n\n@exporter\ndef revlut(lut):\n return {v: k for k, v in lut.items()}\n\n\n@exporter\ndef str2bool(v):\n if v.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n else:\n raise Exception('Boolean value expected.')\n\n\n@exporter\ndef lowercase(x):\n return x.lower()\n\n\n@exporter\ndef read_json(filepath, default_value={}):\n \"\"\"Read a JSON file in. If no file is found and default value is set, return that instead. Otherwise error\n\n :param filepath: A file to load\n :param default_value: If the file doesnt exist, an alternate object to return, or if None, throw FileNotFoundError\n :return: A JSON object\n \"\"\"\n if not os.path.exists(filepath):\n if default_value is None:\n raise FileNotFoundError('No file [] found'.format(filepath))\n return default_value\n with open(filepath) as f:\n return json.load(f)\n\n\n@exporter\ndef read_config_file(config_file):\n \"\"\"Read a config file. This method optionally supports YAML, if the dependency was already installed. O.W. JSON plz\n\n :param config_file: (``str``) A path to a config file which should be a JSON file, or YAML if pyyaml is installed\n :return: (``dict``) An object\n \"\"\"\n with open(config_file) as f:\n if config_file.endswith('.yml'):\n import yaml\n return yaml.load(f)\n return json.load(f)\n\n\n@exporter\ndef write_json(content, filepath):\n with open(filepath, \"w\") as f:\n json.dump(content, f, indent=True)\n\n@exporter\ndef import_user_module(module_type, model_type):\n \"\"\"Load a module that is in the python path with a canonical name\n\n This method loads a user-defined model, which must exist in the `PYTHONPATH` and must also\n follow a fixed naming convention of `{module_type}_{model_type}.py`. The module is dynamically\n loaded, at which point its creator or loader function should be called to instantiate the model.\n This is essentially a plugin, but its implementation is trivial.\n\n :param module_type: one of `classifier`, `tagger`, `seq2seq`, `lang`\n :param model_type: A name for the model, which is the suffix\n :return:\n \"\"\"\n sys.path.append(os.path.dirname(os.path.realpath(addons.__file__)))\n module_name = \"%s_%s\" % (module_type, model_type)\n print('Loading user model %s' % module_name)\n mod = importlib.import_module(module_name)\n return mod\n\n@exporter\ndef create_user_model(input_, output_, **kwargs):\n \"\"\"Create a user-defined model\n\n This creates a model defined by the user.\n It first imports a module that must exist in the `PYTHONPATH`, with a named defined as\n `{task_type}_{model_type}.py`. Once created, this user-defined model can be trained within\n the existing training programs\n\n :param input_: Some type of word vectors for the input\n :param output_: Things passed dealing with the output\n :param kwargs:\n :return: A user-defined model\n \"\"\"\n model_type = kwargs['model_type']\n mod = import_user_module(kwargs['task_type'], model_type)\n return mod.create_model(input_, output_, **kwargs)\n\ndef wrapped_partial(func, name=None, *args, **kwargs):\n \"\"\"\n When we use `functools.partial` the `__name__` is not defined which breaks\n our export function so we use update wrapper to give it a `__name__`.\n\n :param name: A new name that is assigned to `__name__` so that the name\n of the partial can be different than the wrapped function.\n \"\"\"\n partial_func = partial(func, *args, **kwargs)\n update_wrapper(partial_func, func)\n if name is not None:\n partial_func.__name__ = name\n return partial_func\n\ncreate_user_classifier_model = exporter(\n wrapped_partial(\n create_user_model,\n task_type='classify',\n name='create_user_classifier_model'\n )\n)\ncreate_user_tagger_model = exporter(\n wrapped_partial(\n create_user_model,\n task_type='tagger',\n name='create_user_tagger_model'\n )\n)\ncreate_user_seq2seq_model = exporter(\n wrapped_partial(\n create_user_model,\n task_type='seq2seq',\n name='create_user_seq2seq_model'\n )\n)\n\n\n@exporter\ndef create_user_lang_model(embeddings, **kwargs):\n model_type = kwargs['model_type']\n mod = import_user_module('lang', model_type)\n return mod.create_model(embeddings, **kwargs)\n\n\n@exporter\ndef create_user_trainer(model, **kwargs):\n \"\"\"Create a user-defined trainer\n\n Given a model, create a custom trainer that will train the model. This requires that the trainer\n module lives in the `PYTHONPATH`, and is named `trainer_{trainer_type}`. Once instantiated, this trainer\n can be used by the `fit()` function within each task type\n\n :param model: The model to train\n :param kwargs:\n :return: A user-defined trainer\n \"\"\"\n model_type = kwargs['trainer_type']\n mod = import_user_module(\"trainer\", model_type)\n return mod.create_trainer(model, **kwargs)\n\n\n@exporter\ndef load_user_model(outname, **kwargs):\n \"\"\"Loads a user-defined model\n\n This loads a previously serialized model defined by the user.\n It first imports a module that must exist in the `PYTHONPATH`, with a named defined as\n `{task_type}_{model_type}.py`. Once loaded, this user-defined model can be used within the driver programs\n\n :param outname: The name of the file where the model is serialized\n :param kwargs:\n :return: A user-defined model\n \"\"\"\n model_type = kwargs['model_type']\n mod = import_user_module(kwargs['task_type'], model_type)\n return mod.load_model(outname, **kwargs)\n\n\nload_user_classifier_model = exporter(\n wrapped_partial(\n load_user_model,\n task_type='classify',\n name='load_user_classifier_model'\n )\n)\nload_user_tagger_model = exporter(\n wrapped_partial(\n load_user_model,\n task_type='tagger',\n name='load_user_tagger_model'\n )\n)\nload_user_seq2seq_model = exporter(\n wrapped_partial(\n load_user_model,\n task_type='seq2seq',\n name='load_user_seq2seq_model'\n )\n)\nload_user_lang_model = exporter(\n wrapped_partial(\n load_user_model,\n task_type='lm',\n name='load_user_lang_model'\n )\n)\n\n\n@exporter\ndef get_model_file(dictionary, task, platform):\n \"\"\"Model name file helper to abstract different DL platforms (FWs)\n\n :param dictionary:\n :param task:\n :param platform:\n :return:\n \"\"\"\n base = dictionary.get('outfile', './%s-model' % task)\n rid = os.getpid()\n if platform.startswith('pyt'):\n name = '%s-%d.pyt' % (base, rid)\n else:\n name = '%s-%s-%d' % (base, platform, rid)\n print('model file [%s]' % name)\n return name\n\n\n@exporter\ndef lookup_sentence(rlut, seq, reverse=False, padchar=''):\n \"\"\"Lookup a sentence by id and return words\n\n :param rlut: an index -> word lookup table\n :param seq: A temporal sequence\n :param reverse: (``bool``) Should reverse?\n :param padchar: What padding character to use when replacing with words\n :return:\n \"\"\"\n s = seq[::-1] if reverse else seq\n return (' '.join([rlut[idx] if rlut[idx] != '<PAD>' else padchar for idx in s])).strip()\n\n\n@exporter\ndef topk(k, probs):\n \"\"\"Get a sparse index (dictionary of top values).\n\n Note:\n mutates input for efficiency\n \"\"\"\n\n lut = {}\n i = 0\n\n while i < k:\n idx = np.argmax(probs)\n lut[idx] = probs[idx]\n probs[idx] = 0\n i += 1\n return lut\n\n@exporter\ndef beam_multinomial(k, probs):\n \"\"\"Prune all elements in a large probability distribution below the top K.\n \n Renormalize the distribution with only top K, and then sample n times out of that.\n \"\"\"\n\n tops = topk(k, probs)\n i = 0\n n = len(tops.keys())\n ary = np.zeros((n))\n idx = []\n for abs_idx, v in tops.items():\n ary[i] = v\n idx.append(abs_idx)\n i += 1\n\n ary /= np.sum(ary)\n sample_idx = np.argmax(np.random.multinomial(1, ary))\n return idx[sample_idx]\n\n\n@exporter\ndef fill_y(nc, yidx):\n \"\"\"Convert a `B` sparse array to a dense one, to expand labels \n \n :param nc: (``int``) The number of labels\n :param yidx: The sparse array of the labels\n :return: A dense array\n \"\"\"\n xidx = np.arange(0, yidx.shape[0], 1)\n dense = np.zeros((yidx.shape[0], nc), dtype=int)\n dense[xidx, yidx] = 1\n return dense\n\n\n@exporter\ndef seq_fill_y(nc, yidx):\n \"\"\"Convert a `BxT` sparse array to a dense one, to expand labels \n \n :param nc: (``int``) The number of labels\n :param yidx: The sparse array of the labels\n :return: A dense array\n \"\"\"\n batchsz = yidx.shape[0]\n siglen = yidx.shape[1]\n dense = np.zeros((batchsz, siglen, nc), dtype=np.int)\n for i in range(batchsz):\n for j in range(siglen):\n idx = int(yidx[i, j])\n if idx > 0:\n dense[i, j, idx] = 1\n\n return dense\n\n\n@exporter\ndef convert_iob_to_bio(ifile, ofile):\n \"\"\"Convert from IOB to BIO (IOB2)\n\n This code is copied from <NAME> (though I added comments)\n https://github.com/XuezheMax/NeuroNLP2/issues/9\n\n :param ifile: Original IOB format CONLL file\n :param ofile: BIO/IOB2 format\n \"\"\"\n with open(ifile, 'r') as reader, open(ofile, 'w') as writer:\n prev = 'O'\n for line in reader:\n line = line.strip()\n if len(line) == 0:\n prev = 'O'\n writer.write('\\n')\n continue\n\n tokens = line.split()\n # print tokens\n label = tokens[-1]\n # If this label is B or I and not equal to the previous\n if label != 'O' and label != prev:\n # If the last was O, it has to be a B\n if prev == 'O':\n label = 'B-' + label[2:]\n # Otherwise if the tags are different, it also has to be a B\n elif label[2:] != prev[2:]:\n label = 'B-' + label[2:]\n\n writer.write(' '.join(tokens[:-1]) + ' ' + label + '\\n')\n prev = tokens[-1]\n\n\n@exporter\ndef convert_bio_to_iobes(ifile, ofile):\n\n with open(ifile, 'r') as reader, open(ofile, 'w') as writer:\n lines = [line.strip() for line in reader]\n for i, line in enumerate(lines):\n\n tokens = line.split()\n if len(tokens) == 0:\n writer.write('\\n')\n continue\n\n label = tokens[-1]\n\n if i + 1 != len(lines):\n next_tokens = lines[i+1].split()\n if len(next_tokens) > 1:\n next_tag = next_tokens[-1]\n else:\n next_tag = None\n\n # Nothing to do for label == 'O'\n if label == 'O':\n updated_label = label\n\n # It could be S\n elif label[0] == 'B':\n if next_tag and next_tag[0] == 'I' and next_tag[2:] == label[2:]:\n updated_label = label\n else:\n updated_label = label.replace('B-', 'S-')\n\n elif label[0] == 'I':\n if next_tag and next_tag[0] == 'I':\n updated_label = label\n else:\n updated_label = label.replace('I-', 'E-')\n else:\n raise Exception('Invalid IOBES format!')\n\n writer.write(' '.join(tokens[:-1]) + ' ' + updated_label + '\\n')\n prev = tokens[-1]\n\n@exporter\ndef to_spans(sequence, lut, span_type):\n \"\"\"Turn a sequence of IOB chunks into single tokens.\"\"\"\n\n if span_type == 'iobes':\n return to_spans_iobes(sequence, lut)\n\n strict_iob2 = (span_type == 'iob2') or (span_type == 'bio')\n iobtype = 2 if strict_iob2 else 1\n chunks = []\n current = None\n\n for i, y in enumerate(sequence):\n label = lut[y]\n\n #if label.startswith('B-'):\n if not label.startswith('I-') and not label == 'O':\n if current is not None:\n chunks.append('@'.join(current))\n current = [label.replace('B-', ''), '%d' % i ]\n\n elif label.startswith('I-'):\n\n if current is not None:\n base = label.replace('I-', '')\n if base == current[0]:\n current.append('%d' % i)\n else:\n chunks.append('@'.join(current))\n if iobtype == 2:\n print('Warning, type=IOB2, unexpected format ([%s] follows other tag type [%s] @ %d)' % (label, current[0], i))\n\n current = [base, '%d' % i]\n\n else:\n current = [label.replace('I-', ''), '%d' % i]\n if iobtype == 2:\n print('Warning, unexpected format (I before B @ %d) %s' % (i, label))\n else:\n if current is not None:\n chunks.append('@'.join(current))\n current = None\n\n if current is not None:\n chunks.append('@'.join(current))\n\n return set(chunks)\n\n\ndef to_spans_iobes(sequence, lut):\n chunks = []\n current = None\n\n for i, y in enumerate(sequence):\n label = lut[y]\n\n # This indicates a multi-word chunk start\n if label.startswith('B-'):\n\n # Flush existing chunk\n if current is not None:\n chunks.append('@'.join(current))\n # Create a new chunk\n current = [label.replace('B-', ''), '%d' % i]\n\n # This indicates a single word chunk\n elif label.startswith('S-'):\n\n # Flush existing chunk, and since this is self-contained, we will clear current\n if current is not None:\n chunks.append('@'.join(current))\n current = None\n\n base = label.replace('S-', '')\n # Write this right into the chunks since self-contained\n chunks.append('@'.join([base, '%d' % i]))\n\n # Indicates we are inside of a chunk already\n elif label.startswith('I-'):\n\n # This should always be the case!\n if current is not None:\n base = label.replace('I-', '')\n if base == current[0]:\n current.append('%d' % i)\n else:\n chunks.append('@'.join(current))\n print('Warning: I without matching previous B/I @ %d' % i)\n current = [base, '%d' % i]\n\n else:\n print('Warning: I without a previous chunk @ %d' % i)\n current = [label.replace('I-', ''), '%d' % i]\n\n # We are at the end of a chunk, so flush current\n elif label.startswith('E-'):\n\n # Flush current chunk\n if current is not None:\n base = label.replace('E-', '')\n if base == current[0]:\n current.append('%d' % i)\n chunks.append('@'.join(current))\n current = None\n else:\n chunks.append('@'.join(current))\n print('Warning: E doesnt agree with previous B/I type!')\n current = [base, '%d' % i]\n chunks.append('@'.join(current))\n current = None\n\n # This should never happen\n else:\n current = [label.replace('E-', ''), '%d' % i]\n print('Warning, E without previous chunk! @ %d' % i)\n chunks.append('@'.join(current))\n current = None\n # Outside\n else:\n if current is not None:\n chunks.append('@'.join(current))\n current = None\n\n # If something is left, flush\n if current is not None:\n chunks.append('@'.join(current))\n\n return set(chunks)\n\n\n@exporter\ndef f_score(overlap_count, gold_count, guess_count, f=1):\n beta_sq = f*f\n if guess_count == 0: return 0.0\n precision = overlap_count / float(guess_count)\n recall = overlap_count / float(gold_count)\n if precision == 0.0 or recall == 0.0:\n return 0.0\n f = (1. + beta_sq) * (precision * recall) / (beta_sq * precision + recall)\n return f\n\n",
"id": "6380677",
"language": "Python",
"matching_score": 1.2635650634765625,
"max_stars_count": 2,
"path": "python/baseline/utils.py"
},
{
"content": "__version__ = \"0.2.4dev\"\n",
"id": "1440826",
"language": "Python",
"matching_score": 0.02020411007106304,
"max_stars_count": 2,
"path": "python/baseline/version.py"
}
] | 2.053101 |
Gladtbam | [
{
"content": "import os\nimport json\nimport xml.etree.ElementTree as ET\nimport requests\nfrom zipfile import ZipFile\nfrom io import BytesIO\nimport time\nfrom traceback import print_exc\nimport subprocess\n\n\nHEADERS = {\"Referer\": \"https://github.com/MCOfficer/scoop-nirsoft\"}\n\n\ndef probe_for_exe(url):\n print(\"Downloading \" + url + \"...\")\n r = requests.get(\n url, headers=HEADERS)\n r.raise_for_status()\n with ZipFile(BytesIO(r.content)) as z:\n for name in z.namelist():\n if name.endswith(\".exe\"):\n return name\n\n\nif __name__ == '__main__':\n print(\"Fetching Padfile links\")\n pads = requests.get(\"https://www.nirsoft.net/pad/pad-links.txt\").text\n\n i = 0\n for line in pads.splitlines():\n i += 1\n print(\"\")\n print(\"Generating from \" + line + \" (\" + str(i) +\n \"/\" + str(len(pads.splitlines())) + \")\")\n\n try:\n padfile = requests.get(line).text\n root = ET.fromstring(padfile)\n\n info = root.find(\"Program_Info\")\n version = info.find(\"Program_Version\").text\n full_name = info.find(\"Program_Name\").text\n\n web_info = root.find(\"Web_Info\")\n website = web_info.find(\"Application_URLs\").find(\n \"Application_Info_URL\").text.replace(\"http:\", \"https:\")\n download = web_info.find(\"Download_URLs\").find(\n \"Primary_Download_URL\").text.replace(\"http:\", \"https:\")\n download64 = download.replace(\".zip\", \"-x64.zip\")\n name = os.path.splitext(os.path.basename(line))[0]\n\n bin = probe_for_exe(download)\n if not bin:\n print(\"No executable found! Skipping\")\n\n description = \"\"\n shortcut = \"NirSoft\\\\\" + full_name\n try:\n descriptions = root.find(\n \"Program_Descriptions\").find(\"English\")\n description = descriptions.find(\"Char_Desc_80\").text\n except AttributeError:\n pass\n\n print(\"Checking 64-bit download url\")\n r = requests.head(download64, headers=HEADERS)\n x64 = bool(r.ok)\n if not x64:\n print(\"64-bit download unavailable\")\n\n manifest = {\n \"version\": version,\n \"homepage\": website,\n \"url\": download,\n \"bin\": bin,\n \"shortcuts\": [\n [bin, shortcut]\n ],\n \"persist\": [\n name + \"_lng.ini\",\n name + \".cfg\"\n ],\n \"hash\": \"tbd\",\n \"architecture\": \"\",\n \"description\": description,\n \"license\": \"Freeware\",\n \"notes\": \"If this application is useful to you, please consider donating to nirsoft.\",\n \"checkver\": {\n \"url\": \"https://www.nirsoft.net/pad/\" + name + \".xml\",\n \"xpath\": \"/XML_DIZ_INFO/Program_Info/Program_Version\"\n },\n \"autoupdate\": {\n \"url\": download\n }\n }\n\n if x64:\n manifest.pop(\"url\")\n manifest.pop(\"hash\")\n manifest[\"autoupdate\"] = {\n \"architecture\": {\n \"64bit\": {\n \"url\": download64\n },\n \"32bit\": {\n \"url\": download\n }\n },\n }\n manifest[\"architecture\"] = {\n \"64bit\": {\n \"url\": download64,\n \"hash\": \"tbd\"\n },\n \"32bit\": {\n \"url\": download,\n \"hash\": \"tbd\"\n }\n }\n else:\n manifest.pop(\"architecture\")\n manifest[\"url\"] = download\n manifest[\"hash\"] = \"tbd\"\n\n with open(\"bucket/\" + name + \".json\", \"w\") as j:\n json.dump(manifest, j, indent=1)\n\n except Exception as e:\n print_exc()\n\n print(\"\")\n print(\"Running checkver -f\")\n subprocess.run([\"powershell\", \"-Command\", r\".\\bin\\checkver.ps1\", \"-f\"])\n",
"id": "9348502",
"language": "Python",
"matching_score": 0,
"max_stars_count": 3,
"path": "nirsoft/update.py"
}
] | 0 |
anthemwingate | [
{
"content": "import pytest\nimport traceback\nimport logging\nfrom collections import defaultdict\nfrom contextlib import contextmanager\n\nimport tensorflow as tf\ntf.compat.v1.enable_eager_execution()\nimport mesh_tensorflow as mtf\nfrom mesh_tensorflow import placement_mesh_impl\n\nfrom inputs import mlm_sample_text\nfrom models.gpt2 import gpt2\nfrom models.utils import biasmask_attn_weights, entmax, sample_categorical\n\nfrom sample import sample_autoregressive\n\n# helper functions\n\n@contextmanager\ndef not_raises(exception):\n try:\n yield\n except exception:\n logging.error(traceback.format_exc())\n raise pytest.fail(\"DID RAISE {0}\".format(exception))\n\n# fixtures\n\nparams = defaultdict(lambda: None, {\n \"n_head\": 1,\n \"n_ctx\": 4,\n \"n_embd\": 1,\n \"n_vocab\": 256,\n \"embed_dropout\": 0.,\n \"n_layer\": 2,\n \"num_microbatches\": 1,\n \"train_batch_size\": 1,\n \"causal\": True,\n \"attention_types\": ['global', 'local'],\n \"res_dropout\": 0.1,\n \"axial_pos_emb\": (32, 32),\n \"activation_function\": \"gelu\",\n \"moe_layers\": (1,),\n \"num_mem_kv\": 16,\n \"no_weight_tie\": True,\n \"moe_params\": {\n 'moe_dropout_rate': 0.0\n },\n \"mesh_shape\": [],\n \"layout\": {},\n \"local_attention_radius\": 128,\n \"share_parameters\": True,\n \"rezero\": True\n})\n\n# tests\n\ndef test_model():\n graph = mtf.Graph()\n mesh = mtf.Mesh(graph, \"my_mesh\")\n\n seq_len = params[\"n_ctx\"]\n\n batch_dim = mtf.Dimension(\"batch\", 1)\n sequence_dim = mtf.Dimension(\"sequence\", seq_len)\n\n features = {\n 'inputs': mtf.ones(mesh, mtf.Shape((batch_dim, sequence_dim)), tf.int32),\n 'labels': mtf.ones(mesh, mtf.Shape((batch_dim, sequence_dim)), tf.int32)\n } \n\n # create mask\n\n num_mem_kv = params.get('num_mem_kv', 0)\n length_dim = mtf.Dimension('sequence', seq_len)\n memory_length_dim = mtf.Dimension('memory_length', seq_len + num_mem_kv)\n embed_sequence_dim = mtf.Dimension('embed_sequence', seq_len)\n embd_dim = mtf.Dimension(\"embd\", params[\"n_embd\"])\n vocab_dim = mtf.Dimension(\"vocab\", params[\"n_vocab\"])\n\n other_features = {}\n variable_dtype = mtf.VariableDType(tf.float32, tf.float32, tf.float32)\n\n other_features[\"attn_bias\"] = biasmask_attn_weights(mesh, length_dim, memory_length_dim, variable_dtype)\n other_features[\"embd_dim\"] = embd_dim\n other_features[\"vocab_dim\"] = vocab_dim\n other_features[\"embed_sequence_dim\"] = embed_sequence_dim\n other_features[\"memory_length_dim\"] = memory_length_dim\n\n with not_raises(Exception):\n logits, _, _ = gpt2.model(features, other_features, params, mesh, variable_dtype=variable_dtype)\n\n mesh_impl = placement_mesh_impl.PlacementMeshImpl(shape=[], layout={}, devices=[\"\"])\n lowering = mtf.Lowering(graph, {mesh: mesh_impl})\n logits = lowering.export_to_tf_tensor(logits)\n\n\ndef test_sampling():\n graph = mtf.Graph()\n mesh = mtf.Mesh(graph, \"my_mesh\")\n\n batch_dim = mtf.Dimension(\"batch\", 1)\n sequence_dim = mtf.Dimension(\"sequence\", 1)\n\n inputs = mtf.ones(mesh, mtf.Shape((batch_dim, sequence_dim)), tf.int32)\n inputs = mtf.pad(inputs, [0, 3], sequence_dim.name)\n\n # create mask\n\n seq_len = params[\"n_ctx\"]\n num_mem_kv = params.get('num_mem_kv', 0)\n length_dim = mtf.Dimension('sequence', seq_len)\n memory_length_dim = mtf.Dimension('memory_length', seq_len + num_mem_kv)\n embed_sequence_dim = mtf.Dimension('embed_sequence', seq_len)\n embd_dim = mtf.Dimension(\"embd\", params[\"n_embd\"])\n vocab_dim = mtf.Dimension(\"vocab\", params[\"n_vocab\"])\n\n other_features = {}\n\n other_features[\"attn_bias\"] = biasmask_attn_weights(mesh, length_dim, memory_length_dim, mtf.VariableDType(tf.float32))\n other_features[\"embd_dim\"] = embd_dim\n other_features[\"vocab_dim\"] = vocab_dim\n other_features[\"embed_sequence_dim\"] = embed_sequence_dim\n other_features[\"memory_length_dim\"] = memory_length_dim\n\n params[\"mode\"] = \"predict\"\n\n with not_raises(Exception):\n samples = sample_autoregressive(\n inputs, other_features=other_features, params=params, variable_dtype=mtf.VariableDType(),\n remove_partial_sequences=params[\"remove_partial_sequences\"], stop_at_token=params[\"eos_id\"], sampling_use_entmax=True)\n\n mesh_impl = placement_mesh_impl.PlacementMeshImpl(shape=[], layout={}, devices=[\"\"])\n lowering = mtf.Lowering(graph, {mesh: mesh_impl})\n samples = lowering.export_to_tf_tensor(samples)\n\n# mlm\n\nmlm_params = defaultdict(lambda: None, {\n \"n_head\": 1,\n \"n_ctx\": 4,\n \"n_embd\": 1,\n \"n_vocab\": 256,\n \"embed_dropout\": 0.,\n \"n_layer\": 2,\n \"num_microbatches\": 1,\n \"train_batch_size\": 1,\n \"attention_types\": ['global', 'local'],\n \"res_dropout\": 0.1,\n \"mesh_shape\": [],\n \"layout\": {},\n \"share_parameters\": True,\n \"mlm_training\": True,\n \"mlm_mask_id\": 3,\n \"mlm_cls_token_id\": 4,\n \"mlm_random_token_prob\": 0.1\n})\n\ndef test_mlm_sample_text():\n document = tf.random.normal((16,))\n with not_raises(Exception):\n features, labels = mlm_sample_text(mlm_params, document, random_documents = True)\n assert features.shape == (mlm_params['n_ctx'],)\n\n# entmax\n\ndef test_entmax():\n graph = mtf.Graph()\n mesh = mtf.Mesh(graph, \"my_mesh\")\n length = mtf.Dimension(\"tensor_length\", 8)\n tensor = mtf.range(mesh, length, tf.float32)\n output = entmax(tensor)\n grad = mtf.gradients([output], [tensor])[0]\n sample = sample_categorical(output, length)\n\n mesh_impl = placement_mesh_impl.PlacementMeshImpl(shape=[], layout={}, devices=[\"\"])\n lowering = mtf.Lowering(graph, {mesh: mesh_impl})\n sample = lowering.export_to_tf_tensor(sample)\n grad = lowering.export_to_tf_tensor(grad)\n",
"id": "10301246",
"language": "Python",
"matching_score": 0,
"max_stars_count": 1,
"path": "test_models.py"
}
] | 0 |
mmorandi | [
{
"content": "from dearpygui.core import *\nfrom dearpygui.simple import *\n\nset_main_window_size(500, 500)\n\n# callback\ndef retrieve_callback(sender, callback):\n\n show_logger()\n log_info(get_value(\"Regular##inputtext\"))\n log_info(get_value(\"With hint##inputtext\"))\n log_info(get_value(\"No spaces##inputtext\"))\n log_info(get_value(\"Uppercase##inputtext\"))\n log_info(get_value(\"Decimal##inputtext\"))\n log_info(get_value(\"Hexadecimal##inputtext\"))\n log_info(get_value(\"Read Only##inputtext\"))\n log_info(get_value(\"Password##inputtext\"))\n log_info(get_value(\"Multiline##inputtext\"))\n\nadd_text(\"This example demonstrates the input text widget.\", bullet=True)\nadd_text(\"Press the 'Retrieve' button to display the inputed values in the logger\", wrap = 500, bullet=True)\n\nadd_input_text(\"Regular##inputtext\")\nadd_input_text(\"With hint##inputtext\", hint=\"A hint\")\nadd_input_text(\"No spaces##inputtext\", no_spaces=True)\nadd_input_text(\"Uppercase##inputtext\", uppercase=True)\nadd_input_text(\"Decimal##inputtext\", decimal=True)\nadd_input_text(\"Hexadecimal##inputtext\", hexadecimal=True)\nadd_input_text(\"Read Only##inputtext\", readonly=True, default_value=\"read only\")\nadd_input_text(\"Password##inputtext\", password=True)\nadd_input_text(\"Multiline##inputtext\", multiline=True)\n\nadd_button(\"Retrieve\", callback=retrieve_callback)\n\nstart_dearpygui()\n",
"id": "6632247",
"language": "Python",
"matching_score": 1.5967433452606201,
"max_stars_count": 0,
"path": "Examples/input_text_example.py"
},
{
"content": "from dearpygui.core import *\nfrom dearpygui.simple import *\nshow_logger()\ndef window_creator(sender, data):\n\n title = get_value(\"Window Title##input\")\n width = get_value(\"Width\")\n height = get_value(\"Height\")\n start_x = get_value(\"Start x\")\n start_y = get_value(\"Start y\")\n autosize = get_value(\"No Autosize\")\n resizable = get_value(\"No Resizable\")\n movable = get_value(\"No Movable\")\n title_bar = get_value(\"No Title bar\")\n\n if title_bar:\n show_item(\"Close Window\")\n\n with window(title, width, height, x_pos=start_x, y_pos=start_y,\n autosize=autosize, no_resize=resizable, no_title_bar=title_bar,\n no_move=movable, on_close=on_window_close):\n for i in range(0, 10):\n add_button(\"button\" + str(i))\n\n hide_item(\"Create Window\")\n\n\ndef on_window_close(sender, data):\n show_item(\"Create Window\")\n delete_item(sender)\n\n\ndef close_window(sender, data):\n show_item(\"Create Window\")\n hide_item(\"Close Window\")\n delete_item(get_value(\"Window Title##input\"))\n\nadd_input_text(\"Window Title##input\", default_value=\"Window Title\")\nadd_input_int(\"Width\", default_value=400)\nadd_input_int(\"Height\", default_value=500)\nadd_input_int(\"Start x\", default_value=50)\nadd_input_int(\"Start y\", default_value=50)\nadd_checkbox(\"No Autosize\")\nadd_checkbox(\"No Resizable\", default_value=True)\nadd_checkbox(\"No Movable\", default_value=True)\nadd_checkbox(\"No Title bar\")\nadd_button(\"Create Window\", callback=window_creator)\nadd_button(\"Close Window\", callback=close_window)\nhide_item(\"Close Window\")\n\nstart_dearpygui()\n",
"id": "7783241",
"language": "Python",
"matching_score": 1.0459134578704834,
"max_stars_count": 0,
"path": "Examples/window_example.py"
},
{
"content": "from dearpygui.core import *\nfrom dearpygui.simple import *\n\n# callbacks\ndef hideMenu(sender, data):\n hide_item(\"Tools\")\n\ndef showMenu(sender, data):\n show_item(\"Tools\")\n\ndef changeCallback(sender, data):\n callbackName=get_item_callback(\"Show Docs\")\n print(callbackName)\n if callbackName == showDocs:\n set_item_callback(\"Show Docs\", showMetrics)\n else:\n set_item_callback(\"Show Docs\", showDocs)\n\ndef showDocs(sender, data):\n show_documentation()\n\ndef showMetrics(sender, data):\n show_metrics()\n\ndef addThemes(sender, data):\n with menu(\"Themes\", parent=\"MenuBar\"):\n pass\n add_color_picker4(\"Color Selector\", source=\"color1\", parent=\"Themes\")\n add_color_edit4(\"Color Item\", source=\"color1\")\n show_item(\"Delete Themes\")\n hide_item(\"Add Themes\")\n\ndef deleteThemes(sender, data):\n delete_item(\"Themes\")\n delete_item(\"Color Item\")\n show_item(\"Add Themes\")\n hide_item(\"Delete Themes\")\n\nwith menu_bar(\"MenuBar\"):\n with menu(\"Show/Hide\"):\n add_menu_item(\"Show Tools\", callback=showMenu)\n add_menu_item(\"Hide Tools\", callback=hideMenu)\n add_menu_item('Change \"Show Docs\" Callback', callback=changeCallback)\n with tooltip('Change \"Show Docs\" Callback', \"tooltip1\"):\n add_text('this will change the \"show Docs\" callback to a show metrics callback')\n with menu(\"Empty Menu\"):\n add_menu_item(\"Nothing\")\n with menu(\"Tools\"):\n add_menu_item(\"Show Docs\", callback=showDocs)\n with menu(\"Add/Remove\"):\n add_menu_item(\"Add Themes\", callback=addThemes)\n add_menu_item(\"Delete Themes\", callback=deleteThemes)\n hide_item(\"Delete Themes\")\n\n\n\nadd_text(\"This menu bar demonstrates:\")\nadd_text('standard menu bar, menus, and menu items', bullet=True)\nadd_text('adding menus to menus', bullet=True)\nadd_text('showing and hiding the \"Tools menu\"', bullet=True)\nadd_text(\"changing the callback of an already existing menu item\", bullet=True)\nadd_text(\"adding and deleting menus, menu items, app widgets from a menu item\", bullet=True)\nadd_text(\"placing a widget into the menu that controlling another widget on the body of the app\", bullet=True)\nadd_spacing(count = 50)\n\nstart_dearpygui()",
"id": "8226519",
"language": "Python",
"matching_score": 1.717048168182373,
"max_stars_count": 0,
"path": "Examples/menus_advanced_example.py"
},
{
"content": "from dearpygui.core import *\nfrom dearpygui.simple import *\n\nadd_text(\"Tips\")\nadd_text(\"The data storage system allows you to save python objects and \"\n \"retrieve them later. Usually between different callbacks\", bullet=True)\nadd_text(\"It can also be used to have widgets with similar underlying \"\n \"data to share their data live.\", bullet=True)\nadd_text(\"Move one of the widgets around\", bullet=True)\n\nadd_spacing()\nadd_separator()\nadd_spacing()\n\nadd_slider_float(\"Slider 1\", default_value=0.5, vertical=True, source=\"float1\", max_value=1)\nadd_progress_bar(\"Progress 1\", 0.5, source=\"float1\")\n\nadd_spacing(count=5)\nadd_separator()\nadd_spacing(count=5)\n\nadd_input_float3(\"Input Float3\", default_value=[23, 42, 53, 255], source=\"color1\")\nadd_drag_float3(\"Drag Float3\", min_value=0, max_value=255, speed=1, source=\"color1\")\nadd_slider_float3(\"Slider Float3\", min_value=0, max_value=255, source=\"color1\")\nadd_color_edit3(\"Color Edit3\",source=\"color1\")\nadd_color_picker3(\"Color Picker3\", width=300,source=\"color1\")\n\nstart_dearpygui()",
"id": "10366352",
"language": "Python",
"matching_score": 1.1541436910629272,
"max_stars_count": 0,
"path": "Examples/data_storage_example.py"
},
{
"content": "from dearpygui.core import *\nfrom dearpygui.simple import *\n\n# callback\ndef Update(sender, data):\n uvmin = get_value(\"uv_min\")\n uvmax = get_value(\"uv_max\")\n uvminx = uvmin[0]\n uvminy = uvmin[1]\n uvmaxx = uvmax[0]\n uvmaxy = uvmax[1]\n\n add_data(\"TextureCoordinates\", [uvminx, uvminy, uvmaxx, uvmaxy])\n\n print(get_data(\"TextureCoordinates\"))\n\nadd_input_float2(\"uv_min\", default_value=(0, 0), callback=Update)\nadd_input_float2(\"uv_max\", default_value=(1, 1), callback=Update)\n\nadd_data(\"TextureCoordinates\", [0, 0, 1, 1])\n\nadd_image(\"Partial\", \"../../Examples/SpriteMapExample.png\")\nadd_image(\"Partial\", \"../../Examples/SpriteMapExample.png\", uv_min=[get_data(\"TextureCoordinates\")[0],get_data(\"TextureCoordinates\")[1]], uv_max=[get_data(\"TextureCoordinates\")[2],get_data(\"TextureCoordinates\")[3]])\nshow_logger()\nstart_dearpygui()",
"id": "11494878",
"language": "Python",
"matching_score": 1.0423871278762817,
"max_stars_count": 0,
"path": "Examples/image_example.py"
},
{
"content": "from dearpygui.core import *\nfrom dearpygui.simple import *\nfrom math import sin, cos\nimport random\n\n########################################################################################################################\n# Helpers\n########################################################################################################################\n\ndef helpmarker(message):\n add_same_line()\n add_text(\"(?)\", color=[150, 150, 150], tip=message)\n\ndef hsv_to_rgb(h, s, v):\n if s == 0.0: return (v, v, v)\n i = int(h*6.) # XXX assume int() truncates!\n f = (h*6.)-i; p,q,t = v*(1.-s), v*(1.-s*f), v*(1.-s*(1.-f)); i%=6\n if i == 0: return (255*v, 255*t, 255*p)\n if i == 1: return (255*q, 255*v, 255*p)\n if i == 2: return (255*p, 255*v, 255*t)\n if i == 3: return (255*p, 255*q, 255*v)\n if i == 4: return (255*t, 255*p, 255*v)\n if i == 5: return (255*v, 255*p, 255*q)\n\ndef demo_main_callback(sender, data):\n\n set_value(\"Mouse Position##demo\", str(get_mouse_pos()))\n\n # keys\n if is_key_down(mvKey_A):\n set_value(\"A key Down##demo\", \"True\")\n else:\n set_value(\"A key Down##demo\", \"False\")\n\n if is_key_pressed(mvKey_W):\n set_value(\"W key Pressed##demo\", \"True\")\n else:\n set_value(\"W key Pressed##demo\", \"False\")\n\n if is_key_released(mvKey_Q):\n set_value(\"Q key Released##demo\", \"True\")\n else:\n set_value(\"Q key Released##demo\", \"False\")\n\n # mouse dragging\n if is_mouse_button_dragging(mvMouseButton_Left, 10):\n set_value(\"Left Mouse Dragging##demo\", \"True\")\n else:\n set_value(\"Left Mouse Dragging##demo\", \"False\")\n\n if is_mouse_button_dragging(mvMouseButton_Right, 10):\n set_value(\"Right Mouse Dragging##demo\", \"True\")\n else:\n set_value(\"Right Mouse Dragging##demo\", \"False\")\n\n if is_mouse_button_dragging(mvMouseButton_Middle, 10):\n set_value(\"Middle Mouse Dragging##demo\", \"True\")\n else:\n set_value(\"Middle Mouse Dragging##demo\", \"False\")\n\n # mouse down\n if is_mouse_button_down(mvMouseButton_Left):\n set_value(\"Left Mouse Down##demo\", \"True\")\n else:\n set_value(\"Left Mouse Down##demo\", \"False\")\n\n if is_mouse_button_down(mvMouseButton_Right):\n set_value(\"Right Mouse Down##demo\", \"True\")\n else:\n set_value(\"Right Mouse Down##demo\", \"False\")\n\n if is_mouse_button_down(mvMouseButton_Middle):\n set_value(\"Middle Mouse Down##demo\", \"True\")\n else:\n set_value(\"Middle Mouse Down##demo\", \"False\")\n\n # mouse clicked\n if is_mouse_button_clicked(mvMouseButton_Left):\n set_value(\"Left Mouse Clicked##demo\", \"True\")\n else:\n set_value(\"Left Mouse Clicked\", \"False\")\n\n if is_mouse_button_clicked(mvMouseButton_Right):\n set_value(\"Right Mouse Clicked##demo\", \"True\")\n else:\n set_value(\"Right Mouse Clicked##demo\", \"False\")\n\n if is_mouse_button_clicked(mvMouseButton_Middle):\n set_value(\"Middle Mouse Clicked##demo\", \"True\")\n else:\n set_value(\"Middle Mouse Clicked##demo\", \"False\")\n\n # mouse double clicked\n if is_mouse_button_double_clicked(mvMouseButton_Left):\n set_value(\"Left Mouse Double Clicked##demo\", \"True\")\n else:\n set_value(\"Left Mouse Double Clicked##demo\", \"False\")\n\n if is_mouse_button_double_clicked(mvMouseButton_Right):\n set_value(\"Right Mouse Double Clicked##demo\", \"True\")\n else:\n set_value(\"Right Mouse Double Clicked##demo\", \"False\")\n\n if is_mouse_button_double_clicked(mvMouseButton_Middle):\n set_value(\"Middle Mouse Double Clicked##demo\", \"True\")\n else:\n set_value(\"Middle Mouse Double Clicked##demo\", \"False\")\n\n # mouse released\n if is_mouse_button_released(mvMouseButton_Left):\n set_value(\"Left Mouse Released##demo\", \"True\")\n else:\n set_value(\"Left Mouse Released##demo\", \"False\")\n\n if is_mouse_button_released(mvMouseButton_Right):\n set_value(\"Right Mouse Released##demo\", \"True\")\n else:\n set_value(\"Right Mouse Released##demo\", \"False\")\n\n if is_mouse_button_released(mvMouseButton_Middle):\n set_value(\"Middle Mouse Released##demo\", \"True\")\n else:\n set_value(\"Middle Mouse Released##demo\", \"False\")\n\ndef show_demo():\n\n set_render_callback(demo_main_callback)\n\n def on_demo_close(sender, data):\n delete_item(\"Dear PyGui Demo\")\n set_render_callback(None)\n set_mouse_down_callback(None)\n set_mouse_drag_callback(None, 10)\n set_mouse_move_callback(None)\n set_mouse_double_click_callback(None)\n set_mouse_click_callback(None)\n set_mouse_release_callback(None)\n set_mouse_wheel_callback(None)\n set_key_down_callback(None)\n set_key_press_callback(None)\n set_key_release_callback(None)\n\n with window(\"Dear PyGui Demo\", x_pos=100, y_pos=100, width=800, height=800, on_close=on_demo_close):\n\n with menu_bar(\"MenuBar##demo\"):\n\n with menu(\"Menu##demo\"):\n add_menu_item(\"New##demomenu\")\n add_menu_item(\"Open##demomenu\", shortcut=\"Ctrl+O\")\n with menu(\"Open Recent##demo\"):\n add_menu_item(\"harrell.c##demo\")\n add_menu_item(\"patty.h##demo\")\n add_menu_item(\"nick.py##demo\")\n add_menu_item(\"Save##demo\", shortcut=\"Ctrl+S\")\n add_menu_item(\"Save As..#demo\")\n add_separator()\n with menu(\"Options##demomenu\"):\n add_menu_item(\"Enabled##demo\", check=True)\n with child(\"childmenu##demo\", height=60, autosize_x=True):\n for i in range(0, 10):\n add_text(f\"Scrolling Text {i}\")\n add_slider_float(\"Value##demomenu\")\n add_input_float(\"Input##demomenu\")\n add_combo(\"Combo##demomenu\", items=[\"Yes\", \"No\", \"Maybe\"])\n add_checkbox(\"Some Option##demomenu\")\n\n\n with menu(\"Themes##demo\"):\n add_menu_item(\"Dark\", callback = lambda sender, data: set_theme(sender), check=True)\n add_menu_item(\"Light\", callback = lambda sender, data: set_theme(sender), check=True)\n add_menu_item(\"Classic\", callback = lambda sender, data: set_theme(sender), check=True)\n add_menu_item(\"Dark 2\", callback = lambda sender, data: set_theme(sender), check=True)\n add_menu_item(\"Grey\", callback = lambda sender, data: set_theme(sender), check=True)\n add_menu_item(\"Dark Grey\", callback = lambda sender, data: set_theme(sender), check=True)\n add_menu_item(\"Cherry\", callback = lambda sender, data: set_theme(sender), check=True)\n add_menu_item(\"Purple\", callback = lambda sender, data: set_theme(sender), check=True)\n add_menu_item(\"Gold\", callback = lambda sender, data: set_theme(sender), check=True)\n add_menu_item(\"Red\", callback = lambda sender, data: set_theme(sender), check=True)\n\n with menu(\"Tools##demo\"):\n add_menu_item(\"Show Logger##demo\", callback=show_logger)\n add_menu_item(\"Show About##demo\", callback=show_about)\n add_menu_item(\"Show Metrics##demo\", callback=show_metrics)\n add_menu_item(\"Show Documentation##demo\", callback=show_documentation)\n add_menu_item(\"Show Debug##demo\", callback=show_debug)\n add_menu_item(\"Show Style Editor##demo\", callback=show_style_editor)\n\n add_text(f'Dear PyGui says hello. ({get_dearpygui_version()})')\n add_text(\"This demo is not complete but will continue to be added to throughout the 0.4.x releases!\")\n\n with collapsing_header(\"Window options##demo\", default_open=True):\n with managed_columns(\"Window Options Col##demo\", 3, border=False):\n add_checkbox(\"No titlebar##demo\", callback=lambda sender, data: configure_item(\"Dear PyGui Demo\", no_title_bar=get_value(sender)))\n add_checkbox(\"No scrollbar##demo\", callback=lambda sender, data: configure_item(\"Dear PyGui Demo\", no_scrollbar=get_value(sender)))\n add_checkbox(\"No menu##demo\", callback=lambda sender, data: configure_item(\"Dear PyGui Demo\", menubar=not get_value(sender)))\n\n add_checkbox(\"No move##demo\", callback=lambda sender, data: configure_item(\"Dear PyGui Demo\", no_move=get_value(sender)))\n add_checkbox(\"No resize##demo\", callback=lambda sender, data: configure_item(\"Dear PyGui Demo\", no_resize=get_value(sender)))\n add_checkbox(\"No collapse##demo\", callback=lambda sender, data: configure_item(\"Dear PyGui Demo\", no_collapse=get_value(sender)))\n \n add_checkbox(\"No close##demo\", callback=lambda sender, data: configure_item(\"Dear PyGui Demo\", no_close=get_value(sender)))\n add_checkbox(\"No background##demo\", callback=lambda sender, data: configure_item(\"Dear PyGui Demo\", no_background=get_value(sender)))\n add_checkbox(\"No bring to front##demo\", callback=lambda sender, data: configure_item(\"Dear PyGui Demo\", no_bring_to_front_on_focus=get_value(sender)))\n\n with collapsing_header(\"Widgets##demo\"):\n\n with tree_node(\"Basic##demo\"):\n\n add_button(\"Button##demo\")\n add_checkbox(\"checkbox##demo\")\n add_radio_button(\"radiobutton##demo\", items=[\"radio a\", \"radio b\", \"radio c\"], horizontal=True)\n \n for i in range(0, 7):\n if i > 0:\n add_same_line()\n add_button(f\"Click##{i}\")\n set_item_color(f\"Click##{i}\", mvGuiCol_Button, hsv_to_rgb(i/7.0, 0.6, 0.6))\n set_item_color(f\"Click##{i}\", mvGuiCol_ButtonHovered, hsv_to_rgb(i/7.0, 0.7, 0.7))\n set_item_color(f\"Click##{i}\", mvGuiCol_ButtonActive, hsv_to_rgb(i/7.0, 0.8, 0.8))\n\n add_text(\"Press a button: \")\n add_same_line()\n add_button(\"Left##demo\", arrow=True, direction=mvDir_Left, \n callback=lambda sender, data: set_value(\"value\", int(get_value(\"value\"))-1))\n add_same_line()\n add_button(\"Right##demo\", arrow=True, direction=mvDir_Right,\n callback=lambda sender, data: set_value(\"value\", int(get_value(\"value\"))+1))\n add_same_line()\n add_text(\"value\", default_value=\"0\")\n\n add_separator()\n\n add_label_text(\"label##demo\", value=\"Value\")\n add_combo(\"combo##demo\", items=[\"AAAA\", \"BBBB\", \"CCCC\", \"DDDD\", \"EEEE\", \"FFFF\", \"GGGG\", \"HHHH\", \"IIII\", \"JJJJ\", \"KKKK\"], \n default_value=\"AAAA\")\n add_input_text(\"input text##demo\", default_value=\"Hello, world!\")\n helpmarker(\n \"USER:\\n\"\n \"Hold SHIFT or use mouse to select text.\\n\"\n \"CTRL+Left/Right to word jump.\\n\"\n \"CTRL+A or double-click to select all.\\n\"\n \"CTRL+X,CTRL+C,CTRL+V clipboard.\\n\"\n \"CTRL+Z,CTRL+Y undo/redo.\\n\"\n \"ESCAPE to revert.\\n\\n\")\n add_input_text(\"input text (w/ hint)##demo\", hint=\"enter text here\")\n add_input_int(\"input int##demo\")\n add_input_float(\"input float##demo\")\n add_input_float(\"input scientific##demo\", format=\"%e\")\n add_input_float3(\"input float3##example##demo\")\n add_drag_int(\"drag int\")\n helpmarker(\n \"Click and drag to edit value.\\n\"\n \"Hold SHIFT/ALT for faster/slower edit.\\n\"\n \"Double-click or CTRL+click to input value.\")\n add_drag_int(\"drag int 0..100##demo\", format=\"%d%%\")\n add_drag_float(\"drag float##demo\")\n add_drag_float(\"drag small float##demo\", default_value=0.0067, format=\"%.06f ns\")\n add_slider_int(\"slider int##demo\", max_value=3)\n helpmarker(\"CTRL+click to enter value.\")\n add_slider_float(\"slider float##demo\", max_value=1.0, format=\"ratio = %.3f\")\n add_slider_int(\"slider angle##demo\", min_value=-360, max_value=360, format=\"%d deg\")\n add_color_edit3(\"color 1##demo\", default_value=[255, 0, 51])\n helpmarker(\n \"Click on the colored square to open a color picker.\\n\"\n \"Click and hold to use drag and drop.\\n\"\n \"Right-click on the colored square to show options.\\n\"\n \"CTRL+click on individual component to input value.\\n\")\n add_color_edit4(\"color 2##demo\", default_value=[102, 179, 0, 128])\n add_listbox(\"listbox##demo\", items=[\"Apple\", \"Banana\", \"Cherry\", \"Kiwi\", \"Mango\", \"Orange\", \"Pineapple\", \"Strawberry\", \"Watermelon\"], num_items=4)\n\n with tree_node(\"Bullets##demo\"):\n add_text(\"Bullet point 1\", bullet=True)\n add_text(\"Bullet point 2\\nOn multiple lines\", bullet=True)\n with tree_node(\"Tree node\"):\n add_text(\"Another bullet point\", bullet=True)\n add_text(\"\", bullet=True)\n add_same_line()\n add_button(\"Button##bullets##demo\", small=True)\n\n with tree_node(\"Text##demo\"):\n\n with tree_node(\"Colored Text\"):\n add_text(\"Pink\", color=[255, 0, 255])\n add_text(\"Yellow\", color=[255, 255, 0])\n \n with tree_node(\"Images##demo\"):\n add_text(\"Below we are displaying the font texture (which is the only texture we have access to in this demo).\")\n add_image(\"image##demo\", \"INTERNAL_DPG_FONT_ATLAS\", uv_max=[1, 1])\n add_text(\"Here is an image button using a portion of the font atlas\")\n add_image_button(\"#image##button1\", \"INTERNAL_DPG_FONT_ATLAS\", uv_max=[0.1, 0.1])\n add_same_line()\n add_image_button(\"#image##button2\", \"INTERNAL_DPG_FONT_ATLAS\", uv_min=[0.1, 0.1], uv_max=[0.2, 0.2])\n\n with tree_node(\"Text Input##demo\"):\n \n with tree_node(\"Multi-line Text Input##demo\"):\n add_input_text(\"##multiline##demo\", multiline=True, default_value=\"/*\\n\"\n \" The Pentium F00F bug, shorthand for F0 0F C7 C8,\\n\"\n \" the hexadecimal encoding of one offending instruction,\\n\"\n \" more formally, the invalid operand with locked CMPXCHG8B\\n\"\n \" instruction bug, is a design flaw in the majority of\\n\"\n \" Intel Pentium, Pentium MMX, and Pentium OverDrive\\n\"\n \" processors (all in the P5 microarchitecture).\\n\"\n \"*/\\n\\n\"\n \"label:\\n\"\n \"\\tlock cmpxchg8b eax\\n\", height=300)\n\n with tree_node(\"Filtered Text Input##demo\"):\n add_input_text(\"default##demo\")\n add_input_text(\"decimal##demo\", decimal=True)\n add_input_text(\"hexdecimal##demo\", hexadecimal=True)\n add_input_text(\"uppercase##demo\", uppercase=True)\n add_input_text(\"no blank##demo\", no_spaces=True)\n add_input_text(\"scientific##demo\", scientific=True)\n \n with tree_node(\"Password Input##demo\"):\n add_input_text(\"password##demo\", password=True, source=\"password\")\n add_input_text(\"password (w/ hint)##demo\", password=True, hint=\"<password>\", source=\"password\")\n add_input_text(\"password (clear)##demo\", source=\"password\")\n\n with tree_node(\"Simple Plot Widgets##demo\"):\n add_simple_plot(\"Frame Times##demo\", value=[0.6, 0.1, 1.0, 0.5, 0.92, 0.1, 0.2])\n add_simple_plot(\"Histogram##demo\", value=(0.6, 0.1, 1.0, 0.5, 0.92, 0.1, 0.2), height=80, histogram=True, minscale=0.0)\n\n data1 = []\n for i in range(0, 70):\n data1.append(cos(3.14*6*i/180))\n add_simple_plot(\"Lines##sin##demo\", value=data1, height=80)\n add_simple_plot(\"Histogram##sin##demo\", value=data1, height=80, histogram=True)\n add_progress_bar(\"Progress Bar##demo\", value=0.78, overlay=\"78%\")\n add_same_line()\n add_text(\"Progress Bar##text##demo\")\n set_value(\"Progress Bar##text##demo\", \"Progress Bar\")\n add_progress_bar(\"##Progress Bar##demo\", value=0.78, overlay=\"1367/1753\")\n\n with tree_node(\"Color/Picker Widgets##demo\"):\n # wrapper to apply configuration to all items passed in as a list\n def configure_items(names, **kwargs):\n for name in names:\n configure_item(name, **kwargs)\n color_edit_names = [\"MyColor##1\", \"MyColor##2\"]\n\n with managed_columns(\"##demowidgetscolor\", 3, border=False):\n add_checkbox(\"With Alpha Preview\", callback=lambda sender, data: configure_items(color_edit_names, alpha_preview = get_value(sender)))\n add_checkbox(\"With Half Alpha Preview\", callback=lambda sender, data: configure_items(color_edit_names, alpha_preview_half = get_value(sender)))\n add_checkbox(\"With No Small Preview\", callback=lambda sender, data: configure_items(color_edit_names, no_small_preview = get_value(sender)))\n add_checkbox(\"With No Inputs\", callback=lambda sender, data: configure_items(color_edit_names, no_inputs = get_value(sender)))\n add_checkbox(\"With No Tooltip\", callback=lambda sender, data: configure_items(color_edit_names, no_tooltip = get_value(sender)))\n add_checkbox(\"With RGB\", callback=lambda sender, data: configure_items(color_edit_names, display_rgb = get_value(sender)))\n add_checkbox(\"With HSV\", callback=lambda sender, data: configure_items(color_edit_names, display_hsv = get_value(sender)))\n add_checkbox(\"With HEX\", callback=lambda sender, data: configure_items(color_edit_names, display_hex = get_value(sender)))\n add_checkbox(\"With Ints\", callback=lambda sender, data: configure_items(color_edit_names, uint8 = get_value(sender)))\n add_checkbox(\"With Floats\", callback=lambda sender, data: configure_items(color_edit_names, floats = get_value(sender)))\n helpmarker(\"Right-click on the individual color widget to show options.\")\n add_checkbox(\"With No Drag and Drop\", callback=lambda sender, data: configure_items(color_edit_names, no_drag_drop = get_value(sender)))\n helpmarker(\"Click and drag a preview square, drop on another color widget to apply the color\")\n add_checkbox(\"With No Options Menu\", callback=lambda sender, data: configure_items(color_edit_names, no_options = get_value(sender)))\n helpmarker(\"Right clicking a color widget brings up an options context menu\")\n \n add_value(\"colorvalue\", [0.0, 0.0, 0.0, 0.0])\n add_text(\"Color Widget:\")\n add_color_edit3(color_edit_names[0], source=\"colorvalue\")\n \n add_text(\"Color Widget HSV with Alpha:\")\n add_color_edit4(color_edit_names[1], source=\"colorvalue\", display_hsv=True, alpha_preview=True)\n add_text(\"Color button with Picker:\")\n helpmarker(\"using no inputs and no label leaves only the preview\\n\"\n \"click the color edit preview will reveal the color picker.\")\n add_color_edit4(\"Color Edit 4##2\", source=\"colorvalue\", no_inputs=True, no_label=True)\n \n add_text(\"Color button with Custom Picker Popup:\")\n add_color_edit4(\"Color Edit 4 (with custom popup)\", source=\"colorvalue\", no_inputs=True, no_picker=True, popup=\"custom picker popup\")\n helpmarker(\"we can override the popup with our own custom popup that includes a color pallet\")\n with popup(\"Color Edit 4 (with custom popup)\", \"custom picker popup\", mousebutton=0):\n add_color_picker4(\"custom picker\", no_tooltip=True, picker_hue_wheel=True)\n add_text(\"Color Pallet\")\n for i in range(30):\n add_color_button(f\"color button {i}\", hsv_to_rgb(i/30,1,1))\n if i<9: add_same_line()\n if i>9 and i<19: add_same_line()\n if i>19 and i<29: add_same_line()\n \n add_text(\"Color button only:\")\n add_checkbox(\"no_border\", callback=lambda sender, data: configure_item(\"Color Button\", no_border=get_value(sender)))\n add_color_button(\"Color Button\", (255, 50, 255, 0), width=50, height=50)\n with managed_columns(\"##demowidgetscolor2\", 2, border=False):\n add_checkbox(\"With Alpha\", default_value=True, callback=lambda sender, data: configure_item(\"Color Picker 4\", alpha_preview = get_value(sender)))\n add_checkbox(\"With Alpha Bar\", default_value=True, callback=lambda sender, data: configure_item(\"Color Picker 4\", alpha_bar = get_value(sender)))\n add_checkbox(\"With Side Preview\", callback=lambda sender, data: configure_item(\"Color Picker 4\", no_side_preview = get_value(sender)))\n add_checkbox(\"Display RGB\", callback=lambda sender, data: configure_item(\"Color Picker 4\", display_rgb = get_value(sender)))\n add_checkbox(\"Display HSV\", callback=lambda sender, data: configure_item(\"Color Picker 4\", display_hsv = get_value(sender)))\n add_checkbox(\"Display HEX\", callback=lambda sender, data: configure_item(\"Color Picker 4\", display_hex = get_value(sender)))\n def apply_hue(sender, data):\n log_debug(get_value(sender))\n if(get_value(sender) == 0): \n configure_item(\"Color Picker 4\", picker_hue_bar = True)\n elif(get_value(sender) == 1): \n configure_item(\"Color Picker 4\", picker_hue_wheel = True)\n add_radio_button(\"Display Type\", items=[\"Hue Bar\", \"Hue Wheel\"], callback=apply_hue)\n add_color_picker4(\"Color Picker 4\", source=\"colorvalue\", alpha_preview= True, alpha_bar=True)\n add_value(\"list_color_value\", [0.5,0.5,0.5,1.0])\n add_color_edit4(\"Color Edit 4 (float values)\", source=\"list_color_value\", alpha_preview= True, floats=True, callback=lambda sender, data: configure_item(\"float_values\", label=f\"{get_value('list_color_value')}\", color=hsv_to_rgb(get_value('list_color_value')[0],get_value('list_color_value')[1],get_value('list_color_value')[2])))\n helpmarker(\"Color item values given to the widget as a list will cause the \\n\"\n \"color item to store and return colors as scalar floats from 0.0-1.0.\\n\"\n \"setting floats=True will turn the inputs also to a float (although this is not necessary)\")\n add_label_text(\"float_values\", value=\"Color List: \",label=f\"{get_value('list_color_value')}\",color=hsv_to_rgb(get_value('list_color_value')[0],get_value('list_color_value')[1],get_value('list_color_value')[2]))\n add_color_edit4(\"Color Edit 4 (ints value)\", default_value=(125,125,125,255), alpha_preview= True, callback=lambda sender, data: configure_item(\"ints_values\", label=f\"({get_value(sender)[0]}, {get_value(sender)[1]}, {get_value(sender)[2]}, {get_value(sender)[3]})\",color=get_value(sender)))\n helpmarker(\"Color item values given to the widget as a tuple will cause the \\n\"\n \"color item to store and return colors as ints from 0-255.\")\n add_label_text(\"ints_values\", value=\"Color Tuple: \", label=f\"{get_value('Color Edit 4 (ints value)')}\", color=get_value('Color Edit 4 (ints value)'))\n\n with tree_node(\"Multi-component Widgets##demo\"):\n \n add_input_float2(\"input float2##demo\", source=\"float2\")\n add_drag_float2(\"drag float2##demo\", source=\"float2\")\n add_slider_float2(\"slider float2##demo\", source=\"float2\")\n add_input_int2(\"input int2##demo\", source=\"int2\")\n add_drag_int2(\"drag int2##demo\", source=\"int2\")\n add_slider_int2(\"slider int2##demo\", source=\"int2\")\n add_spacing()\n add_input_float3(\"input float3##demo\", source=\"float3\")\n add_drag_float3(\"drag float3##demo\", source=\"float3\")\n add_slider_float3(\"slider float3##demo\", source=\"float3\")\n add_input_int3(\"input int3##demo\", source=\"int3\")\n add_drag_int3(\"drag int3##demo\", source=\"int3\")\n add_slider_int3(\"slider int3##demo\", source=\"int3\")\n add_spacing()\n add_input_float4(\"input float4##demo\", source=\"float4\")\n add_drag_float4(\"drag float4##demo\", source=\"float4\")\n add_slider_float4(\"slider float4##demo\", source=\"float4\")\n add_input_int4(\"input int4##demo\", source=\"int4\")\n add_drag_int4(\"drag int4##demo\", source=\"int4\")\n add_slider_int4(\"slider int4##demo\", source=\"int4\")\n\n with tree_node(\"Vertical Sliders##demo\"):\n\n add_slider_int(f\"##vi\", default_value=1, vertical=True, max_value=5, height=160)\n add_same_line()\n with group(\"v group 1##demo\"):\n values = [ 0.0, 0.60, 0.35, 0.9, 0.70, 0.20, 0.0 ]\n for i in range(0, 7):\n if i > 0:\n add_same_line()\n add_slider_float(f\"##v{i}##demo\", default_value=values[i], vertical=True, max_value=1.0, height=160)\n set_item_color(f\"##v{i}##demo\", mvGuiCol_FrameBg, hsv_to_rgb(i/7.0, 0.5, 0.5))\n set_item_color(f\"##v{i}##demo\", mvGuiCol_FrameBgHovered, hsv_to_rgb(i/7.0, 0.6, 0.5))\n set_item_color(f\"##v{i}##demo\", mvGuiCol_FrameBgActive, hsv_to_rgb(i/7.0, 0.7, 0.5))\n set_item_color(f\"##v{i}##demo\", mvGuiCol_SliderGrab, hsv_to_rgb(i/7.0, 0.9, 0.9))\n\n add_same_line()\n with group(\"v group 2##demo\"):\n for i in range(0, 3):\n with group(f\"v group 2{i}##demo\"):\n values = [ 0.20, 0.80, 0.40, 0.25 ]\n for j in range(0, 4):\n add_slider_float(f\"##v{j}{i}##demo\", default_value=values[j], vertical=True, max_value=1.0, height=50, source=f\"v{j}\")\n if i != 3:\n add_same_line() \n \n add_same_line()\n with group(\"v group 3##demo\"):\n add_slider_float(\"##vs1##demo\", vertical=True, max_value=1.0, height=160, source=\"##v1\", width=40)\n add_same_line()\n add_slider_float(\"##vs2##demo\", vertical=True, max_value=1.0, height=160, source=\"##v2\", width=40)\n add_same_line()\n add_slider_float(\"##vs3##demo\", vertical=True, max_value=1.0, height=160, source=\"##v3\", width=40)\n add_same_line()\n add_slider_float(\"##vs4##demo\", vertical=True, max_value=1.0, height=160, source=\"##v4\", width=40)\n\n with tree_node(\"Time/Date Widgets##demo\"):\n add_time_picker(\"Time Picker##demo\", default_value={'hour': 14, 'min': 32, 'sec': 23})\n add_separator()\n with managed_columns(\"Date Columns##demo\", 3):\n add_date_picker(\"Date Picker1##demo\", level=0, default_value={'month_day': 8, 'year':93, 'month':5})\n add_date_picker(\"Date Picker2##demo\", level=1, default_value={'month_day': 8, 'year':93, 'month':5})\n add_date_picker(\"Date Picker3##demo\", level=2, default_value={'month_day': 8, 'year':93, 'month':5})\n add_separator()\n\n with collapsing_header(\"Layout & Scolling##demo\"):\n\n add_text(\"This section is not ready! But will completed sometime during the 0.4.x releases!\")\n\n with tree_node(\"Child windows##demo\"):\n\n with child(\"child##demo\", border=False, width=400):\n for i in range(0, 100):\n add_text(f\"{i}: scrollable region\")\n\n add_same_line()\n with child(\"decorated child##demo\", autosize_x=True):\n with menu_bar(\"MenuBarChild##demo\"):\n with menu(\"Menu##child##demo\"):\n pass\n with group(\"decorated child group##demo\", width=-20):\n for i in range(0, 100):\n add_button(f\"{i}##childbutton##demo\")\n\n with tree_node(\"Child Window Flags##demo\"):\n\n with managed_columns(\"##childwindowcol\", 3, border=False):\n add_checkbox(\"autosize_x##demo\", callback=lambda sender, data: configure_item(\"testchild##demo\", autosize_x=get_value(sender)))\n add_checkbox(\"autosize_y##demo\", callback=lambda sender, data: configure_item(\"testchild##demo\", autosize_y=get_value(sender)))\n add_checkbox(\"menubar##childdemo\", default_value=True, callback=lambda sender, data: configure_item(\"testchild##demo\", menubar=get_value(sender)))\n add_checkbox(\"no_scrollbar##childdemo\", callback=lambda sender, data: configure_item(\"testchild##demo\", no_scrollbar=get_value(sender)))\n add_checkbox(\"horizontal_scrollbar##childdemo\", callback=lambda sender, data: configure_item(\"testchild##demo\", horizontal_scrollbar=get_value(sender)))\n add_checkbox(\"border##childdemo\", default_value=True, callback=lambda sender, data: configure_item(\"testchild##demo\", border=get_value(sender)))\n with child(\"testchild##demo\", width=500, height=500):\n set_item_color(\"testchild##demo\", mvGuiCol_ChildBg, [255, 0, 0, 100])\n with menu_bar(\"MenuBartestChild##demo\"):\n with menu(\"Menu##testchild##demo\"):\n pass\n for i in range(0, 100):\n add_text(\"A pretty long sentence if you really think about it. It's also pointless.\")\n\n with tree_node(\"Widgets Width##demo\"):\n \n add_text(\"Width=100\")\n add_drag_float(\"float##demowidths1\", width=100)\n\n add_text(\"Width=-100\")\n add_drag_float(\"float##demowidths2\", width=-100)\n\n add_text(\"Width=-1\")\n add_drag_float(\"float##demowidths3\", width=-1)\n\n add_text(\"group with width=75\")\n with group(\"##demowidgetWidthgroup\", width=75):\n add_drag_float(\"float##demowidths4\")\n add_drag_float(\"float##demowidths5\")\n add_drag_float(\"float##demowidths6\")\n\n with tree_node(\"Basic Horizontal Layout##demo\"):\n add_text(\"(Use add_same_line(), to keep adding items to the right of the preceding item)\")\n add_text(\"Normal buttons\")\n add_same_line()\n add_button(\"Banana##demo\")\n add_same_line()\n add_button(\"Apple##demo\")\n add_same_line()\n add_button(\"Corniflower##demo\")\n\n add_text(\"Small buttons\")\n add_same_line()\n add_button(\"Like this one##demo\", small=True)\n add_same_line()\n add_text(\"can fit within a text block\")\n\n add_text(\"Aligned\")\n add_same_line(xoffset=150)\n add_text(\"x=150\")\n add_same_line(xoffset=300)\n add_text(\"x=300\")\n\n add_text(\"Aligned\")\n add_same_line(xoffset=150)\n add_button(\"x=150##demo1\", small=True)\n add_same_line(xoffset=300)\n add_button(\"x=300##demo1\", small=True)\n\n add_checkbox(\"My##demo\")\n add_same_line()\n add_checkbox(\"Tailor##demo\")\n add_same_line()\n add_checkbox(\"is##demo\")\n add_same_line()\n add_checkbox(\"rich##demo\")\n\n add_text(\"Lists:\")\n add_listbox(\"##demolistbox1\", items=[\"AAAA\", \"BBBB\", \"CCCC\", \"DDDD\"], default_value=0, width=100)\n add_same_line()\n add_listbox(\"##demolistbox2\", items=[\"AAAA\", \"BBBB\", \"CCCC\", \"DDDD\"], default_value=1, width=100)\n add_same_line()\n add_listbox(\"##demolistbox3\", items=[\"AAAA\", \"BBBB\", \"CCCC\", \"DDDD\"], default_value=2, width=100)\n add_same_line()\n add_listbox(\"##demolistbox4\", items=[\"AAAA\", \"BBBB\", \"CCCC\", \"DDDD\"], default_value=3, width=100)\n \n\n add_text(\"Spacing(100):\")\n add_button(\"A##demospacing\", width=50, height=50)\n add_same_line(spacing=100)\n add_button(\"B##demospacing\", width=50, height=50)\n\n with tree_node(\"Tabs##demo\"):\n with tree_node(\"Basic##tabs##demo\"):\n with tab_bar(\"Basic Tabbar##demo\"):\n with tab(\"Avocado##demo\"):\n add_text(\"This is the avocado tab!\")\n with tab(\"Broccoli##demo\"):\n add_text(\"This is the broccoli tab!\")\n with tab(\"Cucumber##demo\"):\n add_text(\"This is the cucumber tab!\")\n\n with tree_node(\"Groups##demo123\"):\n add_text(\"Groups can be used to bundle widths together so that you can use functions such as is_item_hovered or add_same_line on the whole group.\")\n with group(\"group##demotabexamples\", tip=\"The group is hovered\"):\n add_button(\"AAA##demogroup\")\n add_button(\"BBB##demogroup\")\n add_button(\"CCC##demogroup\")\n\n add_text(\"Horizontal group:\")\n with group(\"group##demotabexamples1\", horizontal=True):\n add_button(\"AAA##demogroup1\")\n add_button(\"BBB##demogroup1\")\n add_button(\"CCC##demogroup1\")\n\n with collapsing_header(\"Tooltips##demo\"):\n add_text(\"Hover me for a fancy tooltip\")\n with tooltip(\"Hover me for a fancy tooltip\", \"tool_tip##tooltips\"):\n add_simple_plot(\"Simpleplot##tooltips##demo\", value=(0.3, 0.9, 2.5, 8.9), height = 80)\n\n with collapsing_header(\"Popups, Modal windows, & Dialogs##demo\"):\n\n with tree_node(\"Popups##demo\"):\n add_text(\"When a popup is active, it inhibits interacting with windows that are behind the popup. Clicking outside the popup closes it.\")\n add_button(\"Select..##popups##demo\")\n add_same_line()\n add_text(\"<None>\")\n with popup(\"Select..##popups##demo\", \"popup1\"):\n add_text(\"Aquariam\")\n add_separator()\n add_selectable(\"Bream##demo\", callback=lambda sender, data: set_value(\"<None>\", sender))\n add_selectable(\"Haddock##demo\", callback=lambda sender, data: set_value(\"<None>\", sender))\n add_selectable(\"Mackerel##demo\", callback=lambda sender, data: set_value(\"<None>\", sender))\n add_selectable(\"Pollock##demo\", callback=lambda sender, data: set_value(\"<None>\", sender))\n add_selectable(\"Tilefish##demo\", callback=lambda sender, data: set_value(\"<None>\", sender))\n\n with tree_node(\"Modals##demo\"):\n add_text(\"Modal windows are like popups but the user cannot close them by clicking outside.\")\n add_button(\"Delete..##modals##demo\")\n with popup(\"Delete..##modals##demo\", \"Delete?\", modal=True):\n add_text(\"All those beautiful files will be deleted.\\nThis operation cannot be undone!\")\n add_separator()\n add_checkbox(\"Don't ask me next time##demo\")\n add_button(\"OK##modal##demo\", width=75, callback=lambda sender, data: close_popup())\n add_same_line()\n add_button(\"Cancel##modal##demo\", width=75, callback=lambda sender, data: close_popup())\n\n with tree_node(\"File Selector##demo\"):\n def file_selected(sender, data):\n log_info(data)\n add_button(\"Select Python File##demo\", callback = lambda sender, data: open_file_dialog(file_selected, \".*,.py\"))\n add_button(\"Select C++ File##demo\", callback = lambda sender, data: open_file_dialog(file_selected, \".*,.cpp\"))\n\n with tree_node(\"Directory Selector##demo\"):\n def directory_selected(sender, data):\n log_info(data)\n add_button(\"Select Directory##demo\", callback = lambda sender, data: select_directory_dialog(directory_selected))\n\n with tree_node(\"Menus inside a regular window##demo\"):\n add_text(\"Below we are testing adding menu items to a regular window. It's rather unusual but should work\")\n add_separator()\n add_menu_item(\"Menu item##demotestingmenus\", shortcut=\"CTRL+M\")\n with menu(\"Menu inside a regular window##demo\"):\n add_menu_item(\"Disabled item##demotestingmenus\", enabled=False)\n add_menu_item(\"New##demotestingmenus\")\n\n with collapsing_header(\"Columns##demo\"):\n\n with tree_node(\"Basic##columns##demo\"):\n add_text(\"This uses managed columns (add_managed_columns)\")\n add_text(\"Without border:\")\n add_separator()\n with managed_columns(\"columns1##demo\", 3, border=False):\n for i in range(0, 14):\n add_selectable(f\"Item {i}##columns1##demo\")\n add_separator()\n\n add_text(\"With border:\")\n add_separator()\n with managed_columns(\"columns2##demo\", 4):\n add_text(\"ID\")\n add_text(\"Name\")\n add_text(\"Path\")\n with group(\"Just to get separator in the same cell##demo\"):\n add_text(\"Hovered\")\n add_separator()\n\n add_selectable(\"0000##demo\", span_columns=True)\n add_text(\"One\")\n add_text(\"/path/one\")\n add_text(\"0\")\n\n add_selectable(\"0001##demo\", span_columns=True)\n add_text(\"Two\")\n add_text(\"/path/two\")\n add_text(\"0\")\n\n add_selectable(\"0003##demo\", span_columns=True)\n add_text(\"Three\")\n add_text(\"/path/three\")\n add_text(\"0\")\n add_separator()\n\n with tree_node(\"Borders##columns##demo\"):\n\n add_text(\"This uses managed columns (add_managed_columns)\")\n with managed_columns(\"Columns3##demo\", 4):\n\n def replicated_cell(i):\n with group(f\"replicated_group##{i}##demo\"):\n if i % 4 == 0:\n add_separator()\n add_text(f\"aaa##{i}\")\n add_input_text(f\"##inputcolumns{i}\")\n add_button(f\"Button##repl{i}##demo\")\n\n for i in range(0, 12):\n replicated_cell(i)\n add_separator()\n\n with tree_node(\"Mixed items##columns##demo\"):\n add_text(\"This uses raw columns (add_columns/add_next_column)\")\n add_separator()\n add_columns(\"demo##columns\", 3)\n add_text(\"Hello\")\n add_next_column()\n add_text(\"PyGui\")\n add_next_column()\n add_text(\"Sailer\")\n add_next_column()\n add_button(\"Banana##democolumns\")\n add_next_column()\n add_button(\"Apple##democolumns\")\n add_next_column()\n add_button(\"Corniflower##democolumns\")\n add_next_column()\n add_next_column()\n add_input_float(\"red##democoluns\")\n add_next_column()\n add_input_float(\"blue##democoluns\")\n add_next_column()\n add_next_column()\n add_text(\"An extra line here\")\n add_next_column()\n add_next_column()\n with collapsing_header(\"Category A##democolumns\"):\n add_text(\"blah blah\")\n add_separator()\n add_next_column()\n with collapsing_header(\"Category B##democolumns\"):\n add_text(\"blah blah\")\n add_next_column()\n with collapsing_header(\"Category C##democolumns\"):\n add_text(\"blah blah\")\n add_columns(\"demo##columnsend\", 1)\n add_separator()\n\n with collapsing_header(\"Drawings##demo\"):\n add_text(\"This section is not ready! But will completed sometime during the 0.4.x releases!\")\n def UpdateDrawing(sender, data):\n set_drawing_origin(\"drawing##widget##demo\", get_value(\"X Origin##demo\"), get_value(\"Y Origin##demo\"))\n set_drawing_scale(\"drawing##widget##demo\", get_value(\"X Scale##demo\"), get_value(\"Y Scale##demo\"))\n\n with group(\"Drawing Controls Group##demo\"):\n add_slider_float(\"X Origin##demo\", vertical=True, min_value = -100, max_value=100, default_value=0, callback=UpdateDrawing)\n add_same_line(spacing=20)\n add_slider_float(\"Y Origin##demo\", vertical=True, min_value = -100, max_value=100, default_value=0, callback=UpdateDrawing)\n add_slider_float(\"X Scale##demo\", vertical=True, max_value=10, default_value=1, callback=UpdateDrawing)\n add_same_line(spacing=20)\n add_slider_float(\"Y Scale##demo\", vertical=True, max_value=10, default_value=1, callback=UpdateDrawing)\n add_same_line(spacing=20)\n add_drawing(\"drawing##widget##demo\", width=800, height=500)\n draw_rectangle(\"drawing##widget##demo\", (0, 500), (800, 0), (255, 0, 0, 255), fill=(0, 0, 25, 255), rounding=12, thickness = 1.0)\n draw_line(\"drawing##widget##demo\", (10, 10), (100, 100), (255, 0, 0, 255), 1)\n draw_triangle(\"drawing##widget##demo\", (300, 500), (200, 200), (500, 200), (255, 255, 0, 255), thickness = 3.0)\n draw_quad(\"drawing##widget##demo\", (50, 50), (150, 50), (150, 150), (50, 150), (255, 255, 0, 255), thickness = 3.0)\n draw_text(\"drawing##widget##demo\", (50, 300), \"Some Text\", color=(255, 255, 0, 255), size=15)\n draw_text(\"drawing##widget##demo\", (0, 0), \"Origin\", color=(255, 255, 0, 255), size=15)\n draw_circle(\"drawing##widget##demo\", (400, 250), 50, (255, 255, 0, 255))\n draw_polyline(\"drawing##widget##demo\", ((320, 490), (185, 200), (500, 710)), (255, 255, 0, 255), thickness=1.0)\n draw_polygon(\"drawing##widget##demo\", ((363, 471), (153, 498), (59, 220), (363, 471)), (255, 125, 0, 255), thickness=1.0, fill=(255, 125, 0, 50))\n draw_bezier_curve(\"drawing##widget##demo\", (50, 200), (150, 250), (300, 150), (600, 250), (255, 255, 0, 255), thickness = 2.0)\n draw_arrow(\"drawing##widget##demo\", (50, 70), (100, 65), (0, 200, 255), 1, 10)\n draw_image(\"drawing##widget##demo\", \"INTERNAL_DPG_FONT_ATLAS\", pmin=[0,400], uv_max=[0.1, 0.1])\n\n with collapsing_header(\"Plots##demo\"):\n\n add_text(\"This section is not ready! But will completed sometime during the 0.4.x releases!\")\n\n with tree_node(\"Help##plots##demo\"):\n add_text(\"Plotting User Guide\")\n add_text(\"Left click and drag within the plot area to pan X and Y axes.\", bullet=True)\n add_indent()\n add_text(\"Left click and drag on an axis to pan an individual axis.\", bullet=True)\n unindent()\n\n add_text(\"Scoll in the plot area to zoom both X and Y axes.\", bullet=True)\n add_indent()\n add_text(\"Scroll on an axis to zoom an individual axis.\", bullet=True)\n unindent()\n\n add_text(\"Right click and drag to box select data.\", bullet=True)\n add_indent()\n add_text(\"Hold Alt to expand box selection horizontally.\", bullet=True)\n add_text(\"Hold Shift to expand box selection vertically.\", bullet=True)\n add_text(\"Left click while box selecting to cance the selection.\", bullet=True)\n unindent()\n\n add_text(\"Double left click to fit all visible data.\", bullet=True)\n add_indent()\n add_text(\"Double left click on an axis to fit the individual axis\", bullet=True)\n unindent()\n\n add_text(\"Double right click to open the plot context menu.\", bullet=True)\n add_text(\"Click legend label icons to show/hide plot items.\", bullet=True)\n add_text(\"IMPORTANT: By default, anti-aliased lines are turned OFF.\", bullet=True)\n\n with tree_node(\"Line Plots##demo\"):\n\n sindata = []\n for i in range(0, 1000):\n #sindata.append([3.14*i/180, 0.5+ 0.5*cos(50*3.14*i/180)])\n sindata.append([i/1000, 0.5 + 0.5*sin(50*i/1000)])\n\n x2data = []\n for i in range(0, 100):\n x2data.append([1/(i+1), (1/(i+1))**2])\n\n add_text(\"Anti-aliasing can be enabled from the plot's context menu (see Help).\", bullet=True)\n add_plot(\"Line Plot##demo\", x_axis_name=\"x\", y_axis_name=\"y\", height=400)\n add_line_series(\"Line Plot##demo\", \"0.5 + 0.5 * sin(x)\", sindata)\n add_line_series(\"Line Plot##demo\", \"x^2\", x2data)\n\n with tree_node(\"Time Plots##demo\"):\n\n timedata = []\n \n time_index = 0\n while time_index < 739497600:\n timedata.append([time_index, time_index/(60*60*24)])\n time_index+=60*60*24*7\n \n add_text(\"When time is enabled, x-axis values are interpreted as UNIX timestamps in seconds (e.g. 1599243545).\", bullet=True)\n add_text(\"UNIX timestamps are seconds since 00:00:00 UTC on 1 January 1970\", bullet=True)\n add_plot(\"Time Plot##demo\", y_axis_name=\"Days since 1970\", height=400, xaxis_time=True)\n add_line_series(\"Time Plot##demo\", \"Days\", timedata)\n \n\n with tree_node(\"Shade Plots##demo\"):\n\n stock_data1 = []\n stock_data2 = []\n stock_data3 = []\n for i in range(0, 100):\n stock_data1.append([i, 400 + 50*abs(random.random())])\n stock_data2.append([i, 275 + 75*abs(random.random())])\n stock_data3.append([i, 150 + 75*abs(random.random())])\n\n add_plot(\"Stock Prices##demo\", x_axis_name=\"Days\", y_axis_name=\"Price\", height=400)\n add_line_series(\"Stock Prices##demo\", \"Stock 1\", stock_data1, color=[0, 0, 255, 255])\n add_line_series(\"Stock Prices##demo\", \"Stock 2\", stock_data2, color=[255, 0, 0, 255])\n add_line_series(\"Stock Prices##demo\", \"Stock 3\", stock_data3, color=[0, 255, 0, 255])\n add_shade_series(\"Stock Prices##demo\", \"Stock 1\", stock_data1, fill=[0, 0, 255, 64])\n add_shade_series(\"Stock Prices##demo\", \"Stock 2\", stock_data2, fill=[255, 0, 0, 64])\n add_shade_series(\"Stock Prices##demo\", \"Stock 3\", stock_data3, fill=[0, 255, 0, 64])\n\n with tree_node(\"Scatter Plots##demo\"):\n\n scatter_data1 = []\n scatter_data2 = []\n for i in range(0, 100):\n scatter_data1.append([i/100, (i + random.random())/100])\n\n for i in range(0, 100):\n scatter_data2.append([0.25 + 0.25*random.random(), 0.65 + 0.25*random.random()])\n\n add_plot(\"Scatter Plot##demo\", height=400)\n add_scatter_series(\"Scatter Plot##demo\", \"Data 1\", scatter_data1)\n add_scatter_series(\"Scatter Plot##demo\", \"Data 2\", scatter_data2, size=7, marker=mvPlotMarker_Square, fill=[255, 0, 0, 100])\n\n with tree_node(\"Bar Plots##demo\"):\n\n add_plot(\"Bar Plot##demo\", x_axis_name=\"Student\", y_axis_name=\"Score\", height=400)\n set_plot_xlimits(\"Bar Plot##demo\", 9, 33)\n set_plot_ylimits(\"Bar Plot##demo\", 0, 110)\n set_xticks(\"Bar Plot##demo\", [[\"S1\", 11], [\"S2\", 21], [\"S3\", 31]])\n add_bar_series(\"Bar Plot##demo\", \"Final Exam\", [[10, 100], [20, 75], [30,90]], weight=1)\n add_bar_series(\"Bar Plot##demo\", \"Midterm Exam\", [[11, 83], [21, 75], [31,72]], weight=1)\n add_bar_series(\"Bar Plot##demo\", \"Course Grade\", [[12, 42], [22, 68], [32,23]], weight=1)\n\n with tree_node(\"Error Bars##demo\"):\n\n add_plot(\"##errorbars##demo\", height=400)\n\n error_data1 = [[1, 1, 0.2, 0.4], [2, 2, 0.4, 0.2], [3, 5, 0.2, 0.4], [4, 3, 0.6, 0.8], [5, 4, 0.4, 0.6]] # bars\n error_data2 = [[1, 8, 0.2, 0.4], [2, 8, 0.4, 0.2], [3, 9, 0.2, 0.4], [4, 7, 0.6, 0.8], [5, 8, 0.4, 0.6]] # lines\n\n add_bar_series(\"##errorbars##demo\", \"Bar\", error_data1, weight=0.25)\n add_error_series(\"##errorbars##demo\", \"Bar\", error_data1)\n\n add_line_series(\"##errorbars##demo\", \"Line\", error_data2)\n add_error_series(\"##errorbars##demo\", \"Line\", error_data2, color=[0, 255, 0])\n\n with tree_node(\"Stem Plots##demo\"):\n add_plot(\"Stem Plots##plot##demo\", height=400)\n\n stem_data1 = []\n stem_data2 = []\n for i in range(0, 51):\n stem_data1.append([i * 0.02, 1.0 + 0.5*sin(25*i*0.02)*cos(2*i*0.02)])\n stem_data2.append([i * 0.02, 0.5 + 0.25*sin(25*i*0.02)*cos(2*i*0.02)])\n\n add_stem_series(\"Stem Plots##plot##demo\", \"Data 1\", stem_data1)\n add_stem_series(\"Stem Plots##plot##demo\", \"Data 2\", stem_data2, marker=mvPlotMarker_Diamond)\n\n with tree_node(\"Pie Charts##demo\"):\n add_plot(\"##PieChart1##demo\", no_mouse_pos=True, \n xaxis_no_gridlines=True, xaxis_no_tick_marks=True, xaxis_no_tick_labels=True,\n yaxis_no_gridlines=True, yaxis_no_tick_marks=True, yaxis_no_tick_labels=True, width=250, height=250)\n add_same_line()\n add_plot(\"##PieChart2##demo\", no_mouse_pos=True, \n xaxis_no_gridlines=True, xaxis_no_tick_marks=True, xaxis_no_tick_labels=True,\n yaxis_no_gridlines=True, yaxis_no_tick_marks=True, yaxis_no_tick_labels=True, \n width=250, height=250)\n set_plot_xlimits(\"##PieChart1##demo\", 0, 1)\n set_plot_xlimits(\"##PieChart2##demo\", 0, 1)\n set_plot_ylimits(\"##PieChart1##demo\", 0, 1)\n set_plot_ylimits(\"##PieChart2##demo\", 0, 1)\n add_pie_series(\"##PieChart1##demo\", \"PieChart1\", [[\"fish\", 0.25], [\"Cow\", 0.30], [\"Chicken\", 0.30]], 0.5, 0.5, 0.5)\n add_pie_series(\"##PieChart2##demo\", \"PieChart2\", [[\"A\", 1], [\"B\", 1], [\"C\", 2], [\"D\", 3], [\"E\", 5]], 0.5, 0.5, 0.5, \n normalize=True, format=\"%.0f\")\n set_color_map(\"##PieChart2##demo\", mvPlotColormap_Deep)\n\n with tree_node(\"Heatmaps##demo\"):\n add_plot(\"Heat Plot##demo\", show_color_scale=True, scale_min=0.0, scale_max=6.0, \n scale_height=400, no_legend=True, \n no_mouse_pos=True, xaxis_lock_min=True, xaxis_lock_max=True, xaxis_no_gridlines=True, xaxis_no_tick_marks=True,\n yaxis_no_gridlines=True, yaxis_no_tick_marks=True, yaxis_lock_min=True, yaxis_lock_max=True, height=400)\n values = [[0.8, 2.4, 2.5, 3.9, 0.0, 4.0, 0.0],\n [2.4, 0.0, 4.0, 1.0, 2.7, 0.0, 0.0],\n [1.1, 2.4, 0.8, 4.3, 1.9, 4.4, 0.0],\n [0.6, 0.0, 0.3, 0.0, 3.1, 0.0, 0.0],\n [0.7, 1.7, 0.6, 2.6, 2.2, 6.2, 0.0],\n [1.3, 1.2, 0.0, 0.0, 0.0, 3.2, 5.1],\n [0.1, 2.0, 0.0, 1.4, 0.0, 1.9, 6.3]]\n add_heat_series(\"Heat Plot##demo\", \"heat data\", values, 7, 7, 0, 6)\n\n with tree_node(\"Annotations##demo\"):\n \n add_plot(\"Annotations##plotsdemo\", height=400)\n add_annotation(\"Annotations##plotsdemo\", \"BL\", 0.25, 0.25, -15, 15, color=[255, 255, 0, 255])\n add_annotation(\"Annotations##plotsdemo\", \"BR\", 0.75, 0.25, 15, 15, color=[255, 255, 0, 255])\n add_annotation(\"Annotations##plotsdemo\", \"TR not clampled\", 0.75, 0.75, -15, -15, color=[255, 255, 0, 255], clamped=False)\n add_annotation(\"Annotations##plotsdemo\", \"TL\", 0.25, 0.75, -15, -15, color=[255, 255, 0, 255])\n add_annotation(\"Annotations##plotsdemo\", \"Center\", 0.5, 0.5, 0, 0, color=[255, 255, 0, 255])\n\n with tree_node(\"Drag Lines and Points##demo\"):\n \n add_plot(\"##dragplotsdemo\", height=400)\n add_drag_line(\"##dragplotsdemo\", \"dline1\", source=\"floatpp\", color=[255, 0, 0, 255])\n add_drag_line(\"##dragplotsdemo\", \"dline2\", source=\"float4pp\", color=[255, 255, 0, 255], y_line=True)\n add_drag_point(\"##dragplotsdemo\", \"dpoint1\", source=\"float2pp\", color=[255, 0, 255, 255])\n\n with tree_node(\"Querying##demo\"):\n add_text(\"Click and drag the middle mouse button!\")\n def query(sender, data):\n set_plot_xlimits(\"Plot2##demoquery\", data[0], data[1])\n set_plot_ylimits(\"Plot2##demoquery\", data[2], data[3])\n\n def plot_callback(sender, data):\n clear_plot(\"Plot1##demoquery\")\n clear_plot(\"Plot2##demoquery\")\n\n sindata = []\n for i in range(0, 1000):\n sindata.append([i/1000, 0.5 + 0.5*sin(50*i/1000)])\n\n sindata = []\n for i in range(0, 1000):\n sindata.append([i/1000, 0.5 + 0.5*sin(50*i/1000)])\n \n add_plot(\"Plot1##demoquery\", height=400, query_callback=query, query=True, no_menus=True)\n add_plot(\"Plot2##demoquery\", height=400, query_callback=query, no_menus=True, no_legend=True)\n add_line_series(\"Plot1##demoquery\", \"0.5 + 0.5 * sin(x)\", sindata)\n add_line_series(\"Plot2##demoquery\", \"0.5 + 0.5 * sin(x)\", sindata)\n\n with collapsing_header(\"Simple Tables##demo\"):\n\n add_button(\"Delete row 6##demo\", callback=lambda sender, data: delete_row(\"Table##widget##demo\", 6))\n add_same_line()\n add_button(\"Delete col 1##demo\", callback=lambda sender, data: delete_column(\"Table##widget##demo\", 1)) \n add_same_line()\n add_button(\"Add row##demo\", callback=lambda sender, data: add_row(\"Table##widget##demo\", [\"new1\", \"new2\", \"new3\", 53]))\n add_same_line()\n add_button(\"Add col##demo\", callback=lambda sender, data: add_column(\"Table##widget##demo\", \"New Column\", [\"new1\", \"new2\", \"new3\", \"new4\"]))\n add_same_line()\n add_button(\"Insert row 5##demo\", callback=lambda sender, data: insert_row(\"Table##widget##demo\", 5, [\"inew1\", \"inew2\", \"inew3\", \"inew4\"]))\n add_same_line()\n add_button(\"Insert col 1##demo\", callback=lambda sender, data:insert_column(\"Table##widget##demo\", 1, \"Inserted Column\", [\"inew1\", \"inew2\", \"inew3\", \"inew4\"]))\n add_same_line()\n add_button(\"Clear Table##demo\", callback=lambda sender, data: clear_table(\"Table##widget##demo\"))\n add_table(\"Table##widget##demo\", [\"Column 1\", \"Column 2\", \"Column 3\", \"Column 4\"], height=400)\n\n tabledata = []\n for i in range(0, 10):\n row = []\n for j in range(0, 4):\n row.append(\"Item\"+str(i)+\"-\"+str(j))\n tabledata.append(row)\n\n set_table_data(\"Table##widget##demo\", tabledata)\n\n with collapsing_header(\"Logging##demo\"):\n\n def LogCallback(sender, data):\n show_logger()\n clear_log()\n loglevel = get_value(\"Log Level##logging##demo\")\n set_log_level(loglevel)\n log(\"Trace Message\")\n log_debug(\"Debug Message\")\n log_info(\"Info Message\")\n log_warning(\"Warning Message\")\n log_error(\"Error Message\")\n\n add_button(\"Test Logger##demo\", callback=LogCallback)\n add_same_line(spacing=10)\n with group(\"LoggingGroup##demo\"):\n add_text(\"Log Level##demo\")\n add_radio_button(\"Log Level##logging##demo\", items=(\"Trace\", \"Debug\", \"Info\", \"Warning\", \"Error\", \"Off\"))\n\n with collapsing_header(\"Filtering##demo\"):\n add_text(\"This section is not ready! But will completed sometime during the 0.4.x releases!\")\n\n with collapsing_header(\"Inputs, Navigation, & Focus##demo\"):\n add_text(\"This section is not ready! But will completed sometime during the 0.4.x releases!\")\n\n with tree_node(\"Polling##demoinputs\"):\n add_text(\"Key Polling:\")\n add_label_text(\"A key Down##demo\", value=\"False\", color=(0,200,255))\n add_label_text(\"W key Pressed##demo\", value=\"False\", color=(0,200,255))\n add_label_text(\"Q key Released##demo\", value=\"False\", color=(0,200,255))\n add_spacing()\n add_text(\"Mouse Polling:\")\n add_label_text(\"Mouse Position##demo\", value=\"(0,0)\", color=(0,200,255))\n add_label_text(\"Left Mouse Dragging##demo\", value=\"False\", color=(0,200,255))\n add_label_text(\"Middle Mouse Dragging##demo\", value=\"False\", color=(0,200,255))\n add_label_text(\"Right Mouse Dragging##demo\", value=\"False\", color=(0,200,255))\n add_label_text(\"Left Mouse Clicked##demo\", value=\"False\", color=(0,200,255))\n add_label_text(\"Middle Mouse Clicked##demo\", value=\"False\", color=(0,200,255))\n add_label_text(\"Right Mouse Clicked##demo\", value=\"False\", color=(0,200,255))\n add_label_text(\"Left Mouse Double Clicked##demo\", value=\"False\", color=(0,200,255))\n add_label_text(\"Middle Mouse Double Clicked##demo\", value=\"False\", color=(0,200,255))\n add_label_text(\"Right Mouse Double Clicked##demo\", value=\"False\", color=(0,200,255))\n add_label_text(\"Left Mouse Down##demo\", value=\"False\", color=(0,200,255))\n add_label_text(\"Middle Mouse Down##demo\", value=\"False\", color=(0,200,255))\n add_label_text(\"Right Mouse Down##demo\", value=\"False\", color=(0,200,255))\n add_label_text(\"Left Mouse Released##demo\", value=\"False\", color=(0,200,255))\n add_label_text(\"Middle Mouse Released##demo\", value=\"False\", color=(0,200,255))\n add_label_text(\"Right Mouse Released##demo\", value=\"False\", color=(0,200,255))\n\n\n\n with tree_node(\"Event Callbacks##demoinputs\"):\n add_text(\"Note: these only show the last event!\")\n set_mouse_down_callback(lambda sender, data: set_value(\"Mouse down##demoevents\", data))\n set_mouse_drag_callback(lambda sender, data: set_value(\"Mouse drag##demoevents\", data), 10)\n set_mouse_move_callback(lambda sender, data: set_value(\"Mouse pos##demoevents\", data))\n set_mouse_double_click_callback(lambda sender, data: set_value(\"Mouse dblclick##demoevents\", data))\n set_mouse_click_callback(lambda sender, data: set_value(\"Mouse clicked##demoevents\", data))\n set_mouse_release_callback(lambda sender, data: set_value(\"Mouse released##demoevents\", data))\n set_mouse_wheel_callback(lambda sender, data: set_value(\"Mouse wheel##demoevents\", data))\n set_key_down_callback(lambda sender, data: set_value(\"Keys down##demoevents\", data))\n set_key_press_callback(lambda sender, data: set_value(\"Keys pressed##demoevents\", data))\n set_key_release_callback(lambda sender, data: set_value(\"Keys released##demoevents\", data))\n\n add_label_text(\"Mouse pos##demoevents\")\n add_label_text(\"Mouse drag##demoevents\")\n add_label_text(\"Mouse down##demoevents\")\n add_label_text(\"Mouse clicked##demoevents\")\n add_label_text(\"Mouse dblclick##demoevents\")\n add_label_text(\"Mouse released##demoevents\")\n add_label_text(\"Mouse wheel##demoevents\")\n \n add_label_text(\"Keys down##demoevents\")\n add_label_text(\"Keys pressed##demoevents\")\n add_label_text(\"Keys released##demoevents\")\n",
"id": "5539032",
"language": "Python",
"matching_score": 3.531674385070801,
"max_stars_count": 0,
"path": "DearPyGui/dearpygui/demo.py"
},
{
"content": "from dearpygui.core import *\nfrom dearpygui.simple import *\n\ndef update_drawing(sender, data):\n set_drawing_origin(\"drawing##widget\", get_value(\"X Origin\"), get_value(\"Y Origin\"))\n set_drawing_scale(\"drawing##widget\", get_value(\"X Scale \"), get_value(\"Y Scale\"))\n\nwith group(\"Drawing Controls Group\"):\n add_slider_float(\"X Origin\", vertical=True, min_value=-100, max_value=100, default_value=0, callback=update_drawing)\n add_same_line(spacing=20)\n add_slider_float(\"Y Origin\", vertical=True, min_value=-100, max_value=100, default_value=0, callback=update_drawing)\n add_slider_float(\"X Scale \", vertical=True, max_value=10, default_value=1, callback=update_drawing)\n add_same_line(spacing=20)\n add_slider_float(\"Y Scale\", vertical=True, max_value=10, default_value=1, callback=update_drawing)\n\nadd_same_line(spacing=20)\nadd_drawing(\"drawing##widget\", width=800, height=500)\ndraw_rectangle(\"drawing##widget\", [0, 500], [800, 0], [255, 0, 0, 255], fill=[0, 0, 25, 255], rounding=12,\n thickness=1.0)\ndraw_line(\"drawing##widget\", [10, 10], [100, 100], [255, 0, 0, 255], 1)\ndraw_triangle(\"drawing##widget\", [300, 500], [200, 200], [500, 200], [255, 255, 0, 255], thickness=3.0)\ndraw_quad(\"drawing##widget\", [50, 50], [150, 50], [150, 150], [50, 150], [255, 255, 0, 255], thickness=3.0)\ndraw_text(\"drawing##widget\", [50, 300], \"Some Text\", color=[255, 255, 0, 255], size=15)\ndraw_text(\"drawing##widget\", [0, 0], \"Origin\", color=[255, 255, 0, 255], size=15)\ndraw_circle(\"drawing##widget\", [400, 250], 50, [255, 255, 0, 255])\ndraw_polyline(\"drawing##widget\", [[300, 500], [200, 200], [500, 700]], [255, 255, 0, 255])\ndraw_polygon(\"drawing##widget\", [[363, 471], [100, 498], [50, 220]], [255, 125, 0, 255])\ndraw_bezier_curve(\"drawing##widget\", [50, 200], [150, 250], [300, 150], [600, 250], [255, 255, 0, 255], thickness=2.0)\ndraw_arrow(\"drawing##widget\", [50, 70], [100, 65], [0, 200, 255], 1, 10)\n\nstart_dearpygui()\n",
"id": "4569996",
"language": "Python",
"matching_score": 3.3035264015197754,
"max_stars_count": 0,
"path": "Examples/drawing_example.py"
}
] | 1.596743 |
yemingrujing | [
{
"content": "# -*- coding: utf-8 -*-\n\nimport requests\nimport re\n\n'''\n登录CSDN帐号后爬取我的博客评论管理列表\n\nCustomer opener Cookie\n'''\n\n\nclass CsdnSpider(object):\n def __init__(self):\n self.url_login = \"https://passport.csdn.net/v1/register/pc/login/doLogin\"\n self.url_feedback = \"http://write.blog.csdn.net/feedback/in/\"\n self.header = {\n 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36',\n 'referer': 'https://passport.csdn.net/login',\n 'origin': 'https://passport.csdn.net',\n 'content-Type': 'application/json;charset=UTF-8',\n 'x-requested-with': 'XMLHttpRequest',\n 'accept': 'application/json, text/plain, */*',\n 'accept-encoding': 'gzip, deflate, br',\n 'accept-language': 'zh-CN,zh;q=0.9',\n 'connection': 'keep-alive',\n 'Host': 'passport.csdn.net'\n }\n self.proxies = {\n 'https': 'https://172.16.31.10:8888'\n }\n\n def get_random_webflow_form(self):\n '''\n uaToken 网页js生成\n webUmidToken 网页js生成\n :return: 返回FORM表单流水字典\n '''\n return {'loginType': '1', 'uaToken': '', 'webUmidToken': ''}\n\n def login(self, user_name=None, password=None):\n '''\n 登录CSDN账号\n :param user_name: 用户名\n :param password: 密码\n :return: 返回登陆后的cookie\n '''\n if user_name is None or password is None:\n print('You need use a valied user name and password to login!')\n return None\n post_form = self.get_random_webflow_form()\n post_form['userIdentification'] = user_name\n post_form['pwdOrVerifyCode'] = password\n print(str(post_form))\n try:\n response = requests.post(self.url_login, data=str(post_form), headers=self.header, proxies=self.proxies, verify=False)\n cookies = requests.utils.dict_from_cookiejar(response.cookies)\n print(response.text)\n return cookies\n except Exception as e:\n print(\"login Exception.\" + str(e))\n return None\n\n def get_page_feedback_dict(self, cookies=None, page_index=1):\n '''\n 获取CSDN我的博客页面的评论管理页面我文章的评论列表(按照评论页数获取)\n :return: {'maxPage'100:, 'dict':[{'article':'xxx', 'url':'xxx', 'commentator':'xxx', 'time':'xxx', 'content':'xxx'}]}\n '''\n content = requests.get(self.url_feedback + str(page_index), proxies=self.proxies, verify=False, cookies=cookies).text\n page_content_search = re.search(re.compile(r'<div class=\"page_nav\"><span>.*?共(\\d+)页</span>'), content)\n if page_content_search is not None:\n max_page = re.search(re.compile(r'<div class=\"page_nav\"><span>.*?共(\\d+)页</span>'), content).group(1)\n reg_main = re.compile(\n r\"<tr class='altitem'>.*?<a href='(.*?)'.*?>(.*?)</a></td><td><a.*?class='user_name' target=_blank>(.*?)</a></td><td>(.*?)</td>.*?<div class='recon'>(.*?)</div></td></tr>\",\n re.S)\n main_items = re.findall(reg_main, content)\n dict_list = list()\n for item in main_items:\n dict_list.append({\n 'url': item[0],\n 'article': item[1],\n 'commentator': item[2],\n 'time': item[3],\n 'content': item[4]\n })\n return {'maxPage': max_page, 'dict': dict_list}\n return None\n\n def run(self, name=None, pwd=None):\n cookies = self.login(name, pwd)\n\n total_feedback = 0;\n cur_page = 1\n max_page = 1\n while cur_page <= max_page:\n print(\"start get \" + str(cur_page) + \" page feedback.\")\n page_dict = self.get_page_feedback_dict(cookies=cookies, page_index=cur_page)\n if page_dict is None:\n break\n total_feedback = total_feedback + len(page_dict['dict'])\n max_page = int(page_dict['maxPage'])\n cur_page = cur_page + 1\n print(\"Finish! Toal valid feedback is:\" + str(total_feedback))\n\n\nif __name__ == \"__main__\":\n CsdnSpider().run(\"Your UserName\", \"Your PassWord\")\n",
"id": "7136167",
"language": "Python",
"matching_score": 4.383264064788818,
"max_stars_count": 0,
"path": "CsdnDiscussSpider/spider_main.py"
},
{
"content": "# -*- coding: utf-8 -*-\n\nimport requests\nfrom urllib import parse\nimport xlwt, random, os\nfrom lxml import html\n\netree = html.etree\n\n\nclass DouBanSpider(object):\n def __init__(self):\n self.login_url = 'https://accounts.douban.com/j/mobile/login/basic'\n self.comment_url = 'https://movie.douban.com/subject/26794435/comments?start=start&limit=20&sort=new_score&status=P&comments_only=1'\n self.login_header = {\n 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36',\n 'Referer': 'https://accounts.douban.com/passport/login_popup?login_source=anony',\n 'Origin': 'https://accounts.douban.com',\n 'content-Type': 'application/x-www-form-urlencoded',\n 'x-requested-with': 'XMLHttpRequest',\n 'accept': 'application/json',\n 'accept-encoding': 'gzip, deflate, br',\n 'accept-language': 'zh-CN,zh;q=0.9',\n 'connection': 'keep-alive',\n 'Host': 'accounts.douban.com'\n }\n self.proxies = {\n 'https': 'https://172.16.31.10:8888'\n }\n self.agent_list = [\n \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv2.0.1) Gecko/20100101 Firefox/4.0.1\",\n \"Mozilla/5.0 (Windows NT 6.1; rv2.0.1) Gecko/20100101 Firefox/4.0.1\",\n \"Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; en) Presto/2.8.131 Version/11.11\",\n \"Opera/9.80 (Windows NT 6.1; U; en) Presto/2.8.131 Version/11.11\",\n \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36\"\n ]\n\n def login(self, username, password):\n data = {'ck': '', 'name': username, 'password': password, 'remember': '', 'ticket': ''}\n data = parse.urlencode(data)\n response = requests.post(self.url, headers=self.login_header, data=data, proxies=self.proxies, verify=False, )\n cooikes = requests.utils.dict_from_cookiejar(response.cookies)\n return cooikes\n\n def getComment(self, cooikes=None):\n start = 0\n w = xlwt.Workbook(encoding='ascii')\n ws = w.add_sheet('短评')\n index = 1\n headers = {\n 'User-Agent': random.choice(self.agent_list)\n }\n while True:\n try:\n url = self.comment_url.replace('start', str(start))\n start += 20\n if cooikes is None:\n req = requests.get(url, headers=headers, proxies=self.proxies)\n else:\n req = requests.get(url, headers=headers, cooikes=cooikes, proxies=self.proxies)\n respose = req.json()\n sourPre = etree.HTML(respose['html'])\n nodes = sourPre.xpath('//div[contains(@class, \"comment-item\")]')\n for node in nodes:\n name = node.xpath('.//span[@class=\"comment-info\"]/a/text()')[0]\n star = node.xpath('.//span[@class=\"comment-info\"]/span[2]/@class')[0][7]\n comment = node.xpath('.//div[@class=\"comment\"]/p/span/text()')[0]\n ws.write(index, 0, index)\n ws.write(index, 1, name)\n ws.write(index, 2, star)\n ws.write(index, 3, comment)\n index += 1\n if index > 60:\n break\n except Exception as e:\n print(e)\n break\n dirName = 'F:/py/xls'\n if not os.path.exists(dirName):\n os.makedirs(dirName)\n w.save('%s/%s' % (dirName, 'nezha.xls'))\n\n\nif __name__ == \"__main__\":\n # 获取cookie,以获取更多的短评数据\n # cookies = DouBanSpider.login('YOUR USERNAME', 'YOUR PASSWORD')\n DouBanSpider().getComment()\n",
"id": "10916919",
"language": "Python",
"matching_score": 3.6620559692382812,
"max_stars_count": 0,
"path": "MySpiderDemo/dou_ban_spider.py"
},
{
"content": "# -*-coding:utf-8-*-\n\n# 天堂图片网爬取高质量图片\n\nimport urllib.request as urllib2\nimport random, re, os\nfrom lxml import html\nfrom urllib.parse import quote\nfrom bs4 import BeautifulSoup\nfrom http import cookiejar\nimport string\nimport ssl\n\n'''\n# userAgent是爬虫与反爬虫斗争的第一步\nua_headers = {\n 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:57.0) Gecko/20100101 Firefox/57.0',\n}'''\n# 用于模拟http头的User-agent\nua_list = [\n \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv2.0.1) Gecko/20100101 Firefox/4.0.1\",\n \"Mozilla/5.0 (Windows NT 6.1; rv2.0.1) Gecko/20100101 Firefox/4.0.1\",\n \"Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; en) Presto/2.8.131 Version/11.11\",\n \"Opera/9.80 (Windows NT 6.1; U; en) Presto/2.8.131 Version/11.11\",\n \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11\"\n]\n\netree = html.etree\nuserAgent = random.choice(ua_list)\n\n# 要爬取的关键词,中文编码出错,待解决\nImg_Name = '美女'\nurlPre = \"http://www.ivsky.com/search.php?q=\" + Img_Name + \"&PageNo=\"\n# 声明一个CookieJar对象实例来保存cookie\ncookie = cookiejar.CookieJar()\n# 利用urllib.request库的HTTPCookieProcessor对象来创建cookie处理器,也就CookieHandler\ncookieHandler = urllib2.HTTPCookieProcessor(cookie)\n# 设置代理,创建ProxyHandler\nhttpProxyHandler = urllib2.ProxyHandler({\"https\": \"192.168.127.12:8888\"})\n# 创建opener\nopener = urllib2.build_opener(httpProxyHandler, cookieHandler)\n# 安装opener\nurllib2.install_opener(opener)\n# 构造图片页数\npageCount = 1\nflag = True\nwhile flag:\n urlPre = quote(urlPre, safe=string.printable)\n requestPre = urllib2.Request(url=urlPre + str(pageCount))\n requestPre.add_header('User-Agent', userAgent)\n # 使用自己安装好的opener\n responsePre = urllib2.urlopen(requestPre)\n sourPre = etree.HTML(responsePre.read())\n aaa = sourPre.xpath('//*[@class=\"pagelist\"]/a/text()')\n pageCount = int(aaa[-2])\n if aaa[-1] != '下一页' or int(aaa[-2]) == 7:\n flag = False\n\npageNumberS = 0\n# 图片总页数,待更新自动获取总页数。\npageCount=1\nprint('计算完成,关键词为%s的图片总计有%s页' % (Img_Name, pageCount))\n\nprint('现在开始下载...')\nfor p in range(pageCount):\n pageNumberS = pageNumberS + 1\n pageNumber = str(pageNumberS)\n\n # 构建URL\n url = urlPre + pageNumber\n\n # 通过Request()方法构造一个请求对象\n\n request1 = urllib2.Request(url=url)\n # 把头添加进去\n request1.add_header('User-Agent', userAgent)\n # 向指定的url地址发送请求,并返回服务器响应的类文件对象\n response1 = urllib2.urlopen(request1)\n # 服务器返回的类文件对象支持python文件对象的操作方法\n soup1 = etree.HTML(response1.read())\n imageUrls = soup1.xpath('//*[contains(@class, \"il_img\")]/a/@href')\n dirName = 'F:/py/img'\n if not os.path.exists(dirName):\n os.makedirs(dirName)\n\n for imageSkipUrl in imageUrls:\n # 可以直接取属性获得href内容 https://bbs.csdn.net/topics/392161042?list=lz\n url2 = 'https://www.ivsky.com' + imageSkipUrl\n context = ssl._create_unverified_context()\n request2 = urllib2.Request(url=url2)\n request2.add_header('User_Agent', userAgent)\n response2 = urllib2.urlopen(request2, context=context)\n soup2 = BeautifulSoup(response2, \"html.parser\")\n pattern = re.compile(r\"var imgURL='(.*?)';\", re.MULTILINE | re.DOTALL)\n # //img.ivsky.com'+imgURL+'?download\n script = soup2.find(\"script\", text=pattern)\n imageUrlFoot = pattern.search(script.text).group(1)\n if len(imageUrlFoot.strip()) != 0:\n # 匹配网址URL中最后一个反斜杠/后面的内容\n imageName = re.search(r\"[^/]+(?!.*/)\", imageUrlFoot, re.MULTILINE | re.DOTALL).group()\n\n imageUrl = 'https://img.ivsky.com' + imageUrlFoot\n # 请求头加Cache-Control: no-cahce,网站会将图片缓存在本地memory cache中,实际下载不需要缓存\n headers = {\n 'Cache-Control': 'no-cahce',\n 'Referer': ' https://www.ivsky.com/download_pic.html?picurl=' + imageUrlFoot,\n 'Sec-Fetch-Mode': ' no-cors',\n 'User-Agent': userAgent\n }\n request = urllib2.Request(url=imageUrl, data=None, headers=headers)\n response = urllib2.urlopen(request, context=context)\n data = response.read()\n with open('%s/%s_%s.jpg' % (dirName, pageNumberS, imageName), 'wb') as f:\n f.write(data)\n print('正在下载第%s页第%s张图片,总计%s页' % (pageNumberS, imageName, pageCount))\n print('存储为%s/%s_%s.jpg' % (dirName, pageNumberS, imageName))\nprint(\"已经全部下载完毕!\")\n",
"id": "4416249",
"language": "Python",
"matching_score": 2.9269204139709473,
"max_stars_count": 0,
"path": "AndroidSpider/bs_test.py"
},
{
"content": "# -*- coding: utf-8 -*-\n\n# Define here the models for your spider middleware\n#\n# See documentation in:\n# https://doc.scrapy.org/en/latest/topics/spider-middleware.html\n\nimport random\nfrom scrapy import signals\n\n\nclass CartoonSpiderMiddleware(object):\n # Not all methods need to be defined. If a method is not defined,\n # scrapy acts as if the spider middleware does not modify the\n # passed objects.\n\n @classmethod\n def from_crawler(cls, crawler):\n # This method is used by Scrapy to create your spiders.\n s = cls()\n crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)\n return s\n\n def process_spider_input(self, response, spider):\n # Called for each response that goes through the spider\n # middleware and into the spider.\n\n # Should return None or raise an exception.\n return None\n\n def process_spider_output(self, response, result, spider):\n # Called with the results returned from the Spider, after\n # it has processed the response.\n\n # Must return an iterable of Request, dict or Item objects.\n for i in result:\n yield i\n\n def process_spider_exception(self, response, exception, spider):\n # Called when a spider or process_spider_input() method\n # (from other spider middleware) raises an exception.\n\n # Should return either None or an iterable of Response, dict\n # or Item objects.\n pass\n\n def process_start_requests(self, start_requests, spider):\n # Called with the start requests of the spider, and works\n # similarly to the process_spider_output() method, except\n # that it doesn’t have a response associated.\n\n # Must return only requests (not items).\n for r in start_requests:\n yield r\n\n def spider_opened(self, spider):\n spider.logger.info('Spider opened: %s' % spider.name)\n\n\nclass UserAgentMiddleware(object):\n def __init__(self):\n self.user_agent_list = [\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36',\n 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50',\n 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50',\n 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0;',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv,2.0.1) Gecko/20100101 Firefox/4.0.1',\n 'Mozilla/5.0 (Windows NT 6.1; rv,2.0.1) Gecko/20100101 Firefox/4.0.1',\n 'Opera/9.80 (Windows NT 6.1; U; en) Presto/2.8.131 Version/11.11',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11',\n 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SE 2.X MetaSr 1.0; SE 2.X MetaSr 1.0; .NET CLR 2.0.50727; SE 2.X MetaSr 1.0)',\n 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; 360SE)',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 UBrowser/6.2.4094.1 Safari/537.36',\n 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; The World)',\n 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)',\n ]\n\n self.proxies = [\n 'https://172.16.58.3:1081',\n 'https://172.16.58.3:8080',\n 'https://192.168.3.11:8080',\n 'https://172.16.58.3:1080',\n ]\n\n def process_request(self, request, spider):\n # 使用HTTPS代理\n if request.url.startswith(\"https://\"):\n request.meta[\"proxy\"] = random.choice(self.proxies)\n # 使用随机User-Agent\n request.headers['User-Agent'] = random.choice(self.user_agent_list)\n",
"id": "11819301",
"language": "Python",
"matching_score": 1.8153355121612549,
"max_stars_count": 0,
"path": "cartoon/cartoon/middlewares.py"
},
{
"content": "# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html\n\nimport json\nimport codecs\nimport pymongo\n\n\nclass W3schoolPipeline(object):\n def __init__(self):\n self.file = codecs.open('w3school_data_utf8.json', 'wb', encoding='utf-8')\n\n def process_item(self, item, spider):\n if spider.name == \"w3school\":\n line = json.dumps(dict(item), cls=MyEncoder, indent=4, ensure_ascii=False) + '\\n'\n self.file.write(line)\n return item\n else:\n pass\n\n\nclass DBNovelPipeline(object):\n def __init__(self, mongo_uri, mongo_db):\n self.mongo_uri = mongo_uri\n self.mongo_db = mongo_db\n\n @classmethod\n def from_crawler(cls, crawler):\n return cls(\n mongo_uri=crawler.settings.get('MONGO_URI'),\n mongo_db=crawler.settings.get('MONGO_DB')\n )\n\n def open_spider(self, spider):\n self.client = pymongo.MongoClient(self.mongo_uri)\n self.db = self.client[self.mongo_db]\n self.db.authenticate('test', '123456789')\n\n def process_item(self, item, spider):\n if spider.name == \"dbnovel\":\n name = item.collection\n self.db[name].insert(dict(item))\n return item\n else:\n pass\n\n def close_spider(self, spider):\n self.client.close()\n\n\nclass MyEncoder(json.JSONEncoder):\n def default(self, obj):\n if isinstance(obj, bytes):\n return str(obj, encoding='utf-8')\n return json.JSONEncoder.default(self, obj)\n",
"id": "10130056",
"language": "Python",
"matching_score": 1.0847501754760742,
"max_stars_count": 0,
"path": "cartoon/cartoon/pipelines.py"
},
{
"content": "# -*- coding: utf-8 -*-\nimport scrapy\nfrom scrapy.spiders import Spider\nfrom scrapy.selector import Selector\nfrom ..items import Noveltem\n\n\n# https://www.jianshu.com/p/7c1a084853d8\nclass DbnovelSpider(Spider):\n name = 'dbnovel'\n custom_settings = {\n 'ITEM_PIPELINES': {\n 'cartoon.pipelines.DBNovelPipeline': 1,\n },\n 'DEFAULT_REQUEST_HEADERS': {\n 'Accept': \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3\",\n 'Accept-Language': \"zh-CN,zh;q=0.9\",\n \"Accept-Encoding\": \"gzip, deflate, br\",\n \"Connection\": \"keep-alive\",\n \"Host\": \"www.qb5200.tw\",\n \"Referer\": \"https://www.qb5200.tw/\",\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36\",\n 'Upgrade-Insecure-Requests': '1',\n 'Content-Type': 'application/x-www-form-urlencoded'\n },\n }\n allowed_domains = ['www.qb5200.tw']\n start_urls = ['https://www.qb5200.tw/']\n\n def parse(self, response):\n sel = Selector(response)\n details = sel.xpath('//*[@class=\"item\"]/div/dl')\n for detail in details:\n item = Noveltem()\n item['title'] = detail.xpath('dt/a/text()').extract_first()\n item['url'] = ''.join([\"https://www.qb5200.tw\", detail.xpath('dt/a/@href').extract_first().strip()])\n yield scrapy.Request(item['url'], meta={'item': item}, callback=self.parse_profile)\n # item['profile'] = detail.xpath('dd/text()').extract_first().strip()\\\n # .replace(u'\\u3000', u' ').replace(u'\\xa0', u' ')\n\n def parse_profile(self, response):\n item = response.meta['item']\n sel = Selector(response)\n info = sel.xpath('//*[@class=\"info\"]')\n item['profile'] = info.xpath('div[@class=\"intro\"]/text()').extract_first().strip().replace(u'\\u3000', u' ').replace(u'\\xa0', u' ')\n item['author'] = info.xpath('div[@class=\"small\"]/span[1]/text()').extract_first().split(\":\")[-1]\n item['category'] = info.xpath('div[@class=\"small\"]/span[2]/text()').extract_first().split(\":\")[-1]\n item['status'] = info.xpath('div[@class=\"small\"]/span[3]/text()').extract_first().split(\":\")[-1]\n item['wordNum'] = info.xpath('div[@class=\"small\"]/span[4]/text()').extract_first().split(\":\")[-1]\n item['updateTime'] = info.xpath('div[@class=\"small\"]/span[5]/text()').extract_first().split(\":\")[-1]\n yield item\n",
"id": "10070036",
"language": "Python",
"matching_score": 3.07163143157959,
"max_stars_count": 0,
"path": "cartoon/cartoon/spiders/dbnovel.py"
},
{
"content": "# -*- coding: utf-8 -*-\n\n# Define here the models for your scraped items\n#\n# See documentation in:\n# https://doc.scrapy.org/en/latest/topics/items.html\n\nfrom scrapy import Item, Field\n\n\nclass W3schoolItem(Item):\n title = Field()\n link = Field()\n desc = Field()\n\n\nclass Noveltem(Item):\n collection = table = 'recommend'\n title = Field()\n url = Field()\n profile = Field()\n author = Field()\n category = Field()\n status = Field()\n wordNum = Field()\n updateTime = Field()\n",
"id": "6791377",
"language": "Python",
"matching_score": 1.2442541122436523,
"max_stars_count": 0,
"path": "cartoon/cartoon/items.py"
}
] | 2.92692 |
georgschweflinghaus | [
{
"content": "# -*- coding: iso-8859-1 -*-\n\"\"\"\n MoinMoin - Navitree Macro\n\n Originally authored by <NAME> (<EMAIL>)\n Based on Navigation.py\n\n Copyright(c) 2021 <NAME>\n Usage:\n 1. copy to wiki/data/plugin/macro/\n 2. in wiki if editing page content add e.g.:\n <<Navitree(childtree,2)>> for a childtree depths 2 (default unordered list)\n <<Navitree(childtree,2,ol)>> for a childtree depth 2 (ordered list style)\n\"\"\"\n\nimport re\nfrom MoinMoin import config\nfrom MoinMoin.Page import Page\n\nDependencies = [\"namespace\"]\n\ndef _getParent(pagename):\n \"\"\" Return parent of pagename.\n \"\"\"\n pos = pagename.rfind('/')\n if pos >= 0:\n return pagename[:pos]\n return None\n\n\ndef _getPages(request, filter_regex=None):\n \"\"\" Return a (filtered) list of pages names.\n \"\"\"\n filter = None\n if filter_regex:\n filter = re.compile(filter_regex).match\n pages = request.rootpage.getPageList(filter=filter)\n pages.sort()\n return pages\n\n\ndef _getLinks(request, pagename, filter_regex=None):\n \"\"\" Return pagename for up, first, prev, next, last; each can be None.\n \"\"\"\n pos, size, first, prev, next, last = 0, 0, None, None, None, None\n\n all_pages = _getPages(request, filter_regex)\n size = len(all_pages)\n\n if all_pages:\n try:\n pos = all_pages.index(pagename)\n except ValueError:\n # this should never happen in theory, but let's be sure\n pass\n else:\n if pos > 0:\n first = all_pages[0]\n prev = all_pages[pos-1]\n if pos+1 < len(all_pages):\n next = all_pages[pos+1]\n last = all_pages[-1]\n\n return pos, size, (first, prev, next, last)\n\n\nclass Navitree:\n \"\"\" Dispatcher class implementing the navitree schemes.\n \"\"\"\n\n # querystring for slideshow links\n PROJECTION = 'action=print&media=projection'\n\n def __init__(self, macro, args):\n \"\"\" Prepare common values used during processing.\n \"\"\"\n self.macro = macro\n self.args = args.split(',')\n self._ = self.macro.request.getText\n\n self.pagename = self.macro.formatter.page.page_name\n self.print_mode = self.macro.request.form.has_key('action') \\\n and self.macro.request.form['action'][0] == 'print'\n self.media = self.macro.request.form.get('media', [None])[0]\n self.querystr = self.print_mode and self.PROJECTION or ''\n\n\n def dispatch(self):\n \"\"\" Return None if in plain print mode (no navigational\n elements in printouts), else the proper HTML code.\n \"\"\"\n if self.print_mode and self.media != 'projection':\n return None\n\n scheme = self.args[0] or '<default>'\n return getattr(self, 'do_'+scheme, self.badscheme)()\n\n\n def badscheme(self):\n \"\"\" Bad scheme argument.\n \"\"\"\n _ = self._\n scheme = self.args[0]\n return (self.macro.formatter.sysmsg(1) +\n self.macro.formatter.text(\n _(\"Unsupported navitree scheme '%(scheme)s'!\") %\n {'scheme': scheme}) +\n self.macro.formatter.sysmsg(0))\n\n\n def do_childtree(self):\n return self.do_tree(root=self.pagename, currentdepth=0, parents={})\n\n def do_tree(self, root=None, currentdepth=0, parents={}):\n \"\"\" Navigate to subpages from a parent page.\n \"\"\"\n _ = self._\n try:\n depth = int(self.args[1])\n except (IndexError, TypeError, ValueError):\n depth = 0\n\n try:\n liststyle = self.args[2]\n except (IndexError, TypeError, ValueError):\n liststyle = \"ul\"\n\n # get parent page name if no root was specified\n parent = root or _getParent(self.pagename)\n if not parent:\n return (self.macro.formatter.sysmsg(1) +\n self.macro.formatter.text(_('No parent page found!'))+\n self.macro.formatter.sysmsg(0))\n else:\n parents[parent] = 1\n\n # limit depth if a depth was specified\n if depth and currentdepth >= depth:\n return ''\n\n # iterate over children, adding links to all of them\n result = []\n result.append('<%s>' % liststyle)\n children = _getPages(self.macro.request, '^%s/' % parent)\n for child in children:\n # display short page name, leaving out the parent path\n # (and make sure the name doesn't get wrapped)\n shortname = child[len(parent)+1:]\n\n if shortname.count('/') > 0:\n shortname = re.sub('/.*$', '', shortname)\n child = parent + '/' + shortname\n\n if parents.has_key(child):\n continue\n\n result.append('<li>')\n\n if child == self.pagename:\n # do not link to focus\n result.append(self.macro.formatter.text(shortname))\n else:\n # link to child\n result.append(Page(self.macro.request, child).link_to(self.macro.request, text=shortname, querystr=self.querystr))\n\n result.append('</li>')\n\n result.append(self.do_tree(root=child, currentdepth=currentdepth+1, parents=parents))\n\n result.append('</%s>' % liststyle)\n return ''.join(result)\n\n\ndef execute(macro, args):\n # get HTML code with the links\n navi = Navitree(macro, args or '').dispatch()\n\n if navi:\n return '%s' % navi\n\n # navigation disabled in plain print mode\n return ''\n",
"id": "2710104",
"language": "Python",
"matching_score": 0,
"max_stars_count": 0,
"path": "macro/Navitree.py"
}
] | 0 |
Joytora126 | [
{
"content": "# Copyright 2021 Google LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Any, Dict, Iterable, Tuple, Union\n\nimport ml_collections\n\n\ndef get_config():\n \"\"\"Returns config values other than model parameters.\"\"\"\n\n config = ml_collections.ConfigDict()\n\n # Where to search for pretrained ViT models.\n # Can be downloaded from gs://vit_models/imagenet21k\n config.pretrained_dir = '.'\n # Which dataset to finetune on. This can be the name of a tfds dataset\n # (see https://www.tensorflow.org/datasets/catalog/overview), or the path to\n # a directory with the following structure ($filename can be arbitrary):\n # \"{train,test}/$class_name/$filename.jpg\"\n config.dataset = ''\n # Path to manually downloaded dataset\n config.tfds_manual_dir = None\n # Path to tensorflow_datasets directory\n config.tfds_data_dir = None\n # Number of steps; determined by hyper module if not specified.\n config.total_steps = None\n\n # Resizes global gradients.\n config.grad_norm_clip = 1.0\n # Datatype to use for momentum state (\"bfloat16\" or \"float32\").\n config.optim_dtype = 'bfloat16'\n # Accumulate gradients over multiple steps to save on memory.\n config.accum_steps = 8\n\n # Batch size for training.\n config.batch = 512\n # Batch size for evaluation.\n config.batch_eval = 512\n # Shuffle buffer size.\n config.shuffle_buffer = 50_000\n # Run prediction on validation set every so many steps\n config.eval_every = 100\n # Log progress every so many steps.\n config.progress_every = 10\n # How often to write checkpoints. Specifying 0 disables checkpointing.\n config.checkpoint_every = 1_000\n\n # Number of batches to prefetch to device.\n config.prefetch = 2\n\n # Base learning-rate for fine-tuning.\n config.base_lr = 0.03\n # How to decay the learning rate (\"cosine\" or \"linear\").\n config.decay_type = 'cosine'\n # How to decay the learning rate.\n config.warmup_steps = 500\n\n # Alternatives : inference_time.\n config.trainer = 'train'\n\n # Will be set from ./models.py\n config.model = None\n # Only used in ./augreg.py configs\n config.model_or_filename = None\n # Must be set via `with_dataset()`\n config.dataset = None\n config.pp = None\n\n return config.lock()\n\n\n# We leave out a subset of training for validation purposes (if needed).\nDATASET_PRESETS = {\n 'cifar10': ml_collections.ConfigDict(\n {'total_steps': 10_000,\n 'pp': ml_collections.ConfigDict(\n {'train': 'train[:98%]',\n 'test': 'test',\n 'crop': 384})\n }),\n 'cifar100': ml_collections.ConfigDict(\n {'total_steps': 10_000,\n 'pp': ml_collections.ConfigDict(\n {'train': 'train[:98%]',\n 'test': 'test',\n 'crop': 384})\n }),\n 'imagenet2012': ml_collections.ConfigDict(\n {'total_steps': 20_000,\n 'pp': ml_collections.ConfigDict(\n {'train': 'train[:99%]',\n 'test': 'validation',\n 'crop': 384})\n }),\n \n 'mnist': ml_collections.ConfigDict(\n {'total_steps': 20_000,\n 'pp': ml_collections.ConfigDict(\n {'train': 'train[:99%]',\n 'test': 'validation',\n 'crop': 384})\n }),\n}\n\n\ndef with_dataset(config: ml_collections.ConfigDict,\n dataset: str) -> ml_collections.ConfigDict:\n config = ml_collections.ConfigDict(config.to_dict())\n print(\"1\")\n config.dataset = dataset\n config.update(DATASET_PRESETS[dataset])\n \n return config\n\n\ndef flatten(\n config: Union[ml_collections.ConfigDict, Dict[str, Any]],\n prefix: Tuple[str, ...] = ('config',)\n) -> Iterable[Tuple[str, Any]]:\n \"\"\"Returns a flat representation of `config`, e.g. for use in sweeps.\"\"\"\n for k, v in config.items():\n if isinstance(v, (dict, ml_collections.ConfigDict)):\n yield from flatten(v, prefix + (k,))\n else:\n yield ('.'.join(prefix + (k,)), v)\n",
"id": "2448223",
"language": "Python",
"matching_score": 0,
"max_stars_count": 0,
"path": "vit_jax/configs/common.py"
}
] | 0 |
lukefredrickson | [
{
"content": "from utils.globals import *\nimport os\nimport pandas as pd\n\n\ndef main():\n # get all housing files\n pums_files = [file for file in os.listdir(PUMS_CSV_FILE_DIRECTORY)\n if 'h' in file]\n combined_df = None\n for pums_csv in pums_files:\n print(f\"Analyzing {pums_csv}\", end='')\n # open csv\n with open(os.path.join(PUMS_CSV_FILE_DIRECTORY, pums_csv)) as df_file:\n df = pd.read_csv(df_file, low_memory=False)\n # create a geoid from state code and puma code (leading zeros are critical here)\n geoid = df['ST'].map('{:02d}'.format) + df['PUMA'].map('{:05d}'.format)\n # move geoid column to the front\n df.insert(0, 'GEOID', geoid)\n # group by geoid and count rows\n df_count = df.groupby('GEOID').size().to_frame('size').reset_index()\n # combine dataframe with all previous dfs\n if combined_df is None:\n combined_df = df_count\n else:\n combined_df = pd.concat([combined_df, df_count], axis=0)\n print(\" ... done!\")\n print(\"Exporting to 'pums.csv'\")\n # export to csv\n combined_df.to_csv(os.path.join(PUMS_CSV_FILE_DIRECTORY, 'pums.csv'), index=False)\n\n\nif __name__ == '__main__':\n main()\n",
"id": "1716396",
"language": "Python",
"matching_score": 2.7392385005950928,
"max_stars_count": 0,
"path": "analyze_pums.py"
},
{
"content": "import plotly.express as px\nimport dash\nfrom dash import dcc\nfrom dash import html\nimport dash_bootstrap_components as dbc\nfrom dash.dependencies import Input, Output, State\nimport os\n\nimport pandas as pd\nimport json\n\nfrom utils.globals import *\n\n\n# Initialize app\n\napp = dash.Dash(\n __name__,\n external_stylesheets=[dbc.themes.BOOTSTRAP],\n meta_tags=[\n {\"name\": \"viewport\", \"content\": \"width=device-width, initial-scale=1\"}\n ]\n)\napp.title = \"ACS Dashboard\"\nserver = app.server\n\n\n# Load data\nwith open(os.path.join(PUMAS_GEOJSON_DIRECTORY, PUMAS_GEOJSON_FILE)) as geojson_file:\n pumas = json.load(geojson_file)\n\nwith open(os.path.join(PUMS_CSV_FILE_DIRECTORY, 'pums.csv')) as df_file:\n df = pd.read_csv(df_file, dtype={'GEOID': str})\n\n# Map figure\n\nmap_fig = px.choropleth_mapbox(df, geojson=pumas, locations='GEOID', featureidkey=\"properties.GEOID10\", color='size',\n color_continuous_scale=px.colors.sequential.Viridis,\n mapbox_style=\"carto-positron\",\n zoom=7, center={\"lat\": 43.9, \"lon\": -72.75},\n opacity=0.7,\n )\nmap_fig.update_layout(margin={\"r\": 0, \"t\": 0, \"l\": 0, \"b\": 0})\n\n\n# App layout\n\nheader = dbc.Row(\n id=\"header\",\n justify=\"between\",\n align=\"start\",\n children=[\n dbc.Col(\n width=8,\n children=[\n html.H1(children=\"American Community Survey Analysis\"),\n ]\n ),\n ],\n)\n\nmain_map = html.Div(\n id=\"map-container\",\n children=[\n dbc.Row(\n dbc.Col(\n width=12,\n children=[\n html.H2(\n \"PUMAs\",\n id=\"map-title\",\n ),\n ]\n )\n ),\n dbc.Row(\n dbc.Col(\n width=12,\n children=[\n dcc.Graph(\n id=\"map-figure\",\n figure=map_fig\n ),\n ]\n )\n )\n ]\n)\n\nstyles = {\n 'pre': {\n 'border': 'thin lightgrey solid',\n 'overflowX': 'scroll'\n }\n}\n\ngraphs = html.Div(\n id=\"graph-container\",\n children=[\n dbc.Row(\n children=[\n dbc.Col(\n children=[\n ]\n ),\n dbc.Col(\n html.Div(\n children=[\n dcc.Markdown(\"\"\"\n **Click Data**\n\n Click on PUMAs on the map.\n \"\"\"),\n html.Pre(id='click-data', style=styles['pre']),\n ],\n className='three columns'),\n ),\n ]\n ),\n dbc.Row(\n children=[\n dbc.Col(\n\n ),\n dbc.Col(\n\n ),\n ]\n )\n ]\n)\n\napp.layout = dbc.Container(\n id=\"root\",\n fluid=True,\n className=\"p-5\",\n children=[\n header,\n main_map,\n graphs,\n ],\n)\n\n\n@app.callback(\n Output('click-data', 'children'),\n Input('map-figure', 'clickData')\n)\ndef display_click_data(clickData):\n try:\n return clickData[\"points\"][0][\"location\"]\n except TypeError:\n return\n\n\nif __name__ == \"__main__\":\n app.run_server(debug=True)",
"id": "7715375",
"language": "Python",
"matching_score": 2.157193899154663,
"max_stars_count": 0,
"path": "dashboard.py"
},
{
"content": "import os\nimport json\nfrom utils.globals import *\nimport subprocess\n\n\ndef main():\n convert_pumas()\n consolidate_pumas()\n simplify_pumas()\n print(\"Done!\")\n\n\ndef consolidate_pumas():\n # setup the skeleton for the GEOJSON file\n # we'll read all the pumas into the 'features' list\n pumas_dict = {\n 'type': 'FeatureCollection',\n 'name': 'pumas',\n 'crs': {\n 'type': 'name',\n 'properties': {\n 'name': 'urn:ogc:def:crs:EPSG::4269'\n }\n },\n 'features': []\n }\n # get list of json files in the pumas directory\n puma_files = [file for file in os.listdir(PUMAS_GEOJSON_DIRECTORY)\n if file.endswith('.json') and file != PUMAS_GEOJSON_FILE]\n # loop through list and open all json files as python dicts\n # then add the puma data to the pumas_dict 'features' list\n print(f'Consolidating all PUMA GEOJSON files into \\'{PUMAS_GEOJSON_FILE}\\'')\n for puma_filename in puma_files:\n print(f'Incorporating \\'{puma_filename}\\'', end='')\n with open(os.path.join(PUMAS_GEOJSON_DIRECTORY, puma_filename)) as puma:\n puma_data = json.load(puma)\n for feature in puma_data['features']:\n pumas_dict['features'].append(feature)\n print(' ... done!')\n # export the pumas_dict to a json file\n print(\"Exporting consolidated geojson ...\")\n with open(os.path.join(PUMAS_GEOJSON_DIRECTORY, PUMAS_GEOJSON_FILE), 'w') as outfile:\n json.dump(pumas_dict, outfile)\n\n\ndef convert_pumas():\n print(\"Converting PUMA shapefiles to geojson ...\")\n completed_process = subprocess.run([\"node\", \"convert_pumas.js\"], text=True, capture_output=True)\n print(completed_process.stdout)\n\n\ndef simplify_pumas():\n print(\"Simplifying consolidated PUMA geojson ...\")\n completed_process = subprocess.run([\"node\", \"simplify_pumas.js\"], text=True, capture_output=True)\n print(completed_process.stdout)\n\n\nif __name__ == '__main__':\n main()\n",
"id": "3123494",
"language": "Python",
"matching_score": 2.3513145446777344,
"max_stars_count": 0,
"path": "consolidate_pumas.py"
},
{
"content": "DATA_DIRECTORY = './data/'\n\nPUMS_DOWNLOAD_URL = 'https://www2.census.gov/programs-surveys/acs/data/pums/2019/5-Year/'\nPUMS_FILE_NAMES = './pums_file_names.txt'\nPUMS_CSV_FILE_DIRECTORY = './data/pums/'\nPUMS_ZIP_FILE_DIRECTORY = './data/pums/zip'\n\nPUMAS_DOWNLOAD_URL = 'https://www2.census.gov/geo/tiger/TIGER2019/PUMA/'\nPUMAS_DIRECTORY = './data/pumas/'\nPUMAS_FILE_NAMES = './puma_file_names.txt'\nPUMAS_GEOJSON_DIRECTORY = './data/pumas/geojson/'\nPUMAS_SHAPE_FILE_DIRECTORY = './data/pumas/shp/'\nPUMAS_ZIP_FILE_DIRECTORY = './data/pumas/zip/'\nPUMAS_GEOJSON_FILE = 'pumas.json'\n\n\nSHAPEFILE_TO_GEOJSON_CONVERTER = 'http://ogre.adc4gis.com/convert'",
"id": "2808365",
"language": "Python",
"matching_score": 1.5160356760025024,
"max_stars_count": 0,
"path": "utils/globals.py"
},
{
"content": "import urllib.request\nimport zipfile\nimport os\nimport sys\nfrom utils.download_utils import ReportHook, fix_bad_zip_file\nfrom utils.globals import *\n\n# script globals\ncurrent_file = \"\" # keep track of the current file being worked on\n# read in filenames to download from text file\nwith open(PUMS_FILE_NAMES) as file_names:\n files = file_names.read().splitlines()\n\n\ndef main():\n # create data path folders if they don't already exist\n for directory in [DATA_DIRECTORY, PUMS_CSV_FILE_DIRECTORY, PUMS_ZIP_FILE_DIRECTORY]:\n try:\n os.mkdir(directory)\n except FileExistsError:\n pass\n\n for file_name in files:\n global current_file\n current_file = file_name\n \n # download file if it doesn't already exist\n # check in csv directory\n if os.path.exists(os.path.join(PUMS_CSV_FILE_DIRECTORY, file_name)):\n file_path = os.path.join(PUMS_CSV_FILE_DIRECTORY, file_name)\n print(\"Downloading file '%s' ... done!\" % file_name, end=\"\")\n # check in zip file directory\n elif os.path.exists(os.path.join(PUMS_ZIP_FILE_DIRECTORY, file_name)):\n file_path = os.path.join(PUMS_ZIP_FILE_DIRECTORY, file_name)\n print(\"Downloading file '%s' ... done!\" % file_name, end=\"\")\n # download file\n else:\n try:\n file_path = os.path.join(PUMS_CSV_FILE_DIRECTORY, file_name)\n report_hook = ReportHook() # hook to report download progress\n report_hook.current_file = current_file\n urllib.request.urlretrieve((PUMS_DOWNLOAD_URL + file_name), file_path, reporthook=report_hook.reporthook)\n print(\" ... done!\", end=\"\")\n sys.stdout.flush()\n except urllib.error.HTTPError:\n print(\"Downloading file '%s' ... failed, invalid file!\" % file_name)\n continue\n \n # unzip file\n try:\n with zipfile.ZipFile(file_path, 'r') as zip_ref:\n zip_ref.extractall(PUMS_CSV_FILE_DIRECTORY)\n print(\" ... zip file extracted!\")\n # deal with bad zip files\n except zipfile.BadZipFile:\n fix_bad_zip_file(file_path)\n try:\n with zipfile.ZipFile(file_path, 'r') as zip_ref:\n zip_ref.extractall(PUMS_CSV_FILE_DIRECTORY)\n print(\" ... zip file extracted!\")\n except zipfile.BadZipFile:\n continue\n \n # move zip file to ./data/pums/zip if it's in ./data/pums\n new_file_path = os.path.join(PUMS_ZIP_FILE_DIRECTORY, file_name)\n if file_path != new_file_path:\n os.rename(file_path, new_file_path)\n\n print(\"All files downloaded and extracted.\")\n\n # prompt user to delete leftover zip files to free up space\n del_zips = \"\"\n while del_zips != \"y\" and del_zips != \"n\":\n del_zips = input(\"Do you wish to delete leftover zip files? (y/n): \")\n\n files_in_directory = os.listdir(PUMS_ZIP_FILE_DIRECTORY)\n zip_files = [f for f in files_in_directory if f.endswith(\".zip\")]\n if del_zips == \"y\":\n print(\"Removing leftover zip files.\")\n for f in zip_files:\n path_to_file = os.path.join(PUMS_ZIP_FILE_DIRECTORY, f)\n os.remove(path_to_file)\n else:\n print(\"Leftover zip files transfered to '%s'.\" % PUMS_ZIP_FILE_DIRECTORY)\n\n\nif __name__ == '__main__':\n try:\n main()\n except KeyboardInterrupt:\n print('\\nInterrupted, deleting incomplete downloads...\\nExiting...')\n try:\n os.remove(PUMS_CSV_FILE_DIRECTORY + current_file)\n except FileNotFoundError:\n pass\n try:\n sys.exit(0)\n except SystemExit:\n os._exit(0)",
"id": "948588",
"language": "Python",
"matching_score": 6.023849964141846,
"max_stars_count": 0,
"path": "download_pums.py"
},
{
"content": "import urllib.request\nimport os\nimport sys\nimport zipfile\nfrom utils.download_utils import ReportHook\nfrom utils.globals import *\nfrom urllib import error, request\n\n# script globals\ncurrent_file = \"\" # keep track of the current file being worked on\n\n\ndef main():\n # create data path folders if they don't already exist\n for directory in [DATA_DIRECTORY, PUMAS_DIRECTORY, PUMAS_GEOJSON_DIRECTORY,\n PUMAS_SHAPE_FILE_DIRECTORY, PUMAS_ZIP_FILE_DIRECTORY]:\n try:\n os.mkdir(directory)\n except FileExistsError:\n pass\n\n # read in filenames to download from text file\n with open(PUMAS_FILE_NAMES) as file_names:\n files = file_names.read().splitlines()\n\n # download and extract shapefile from zips\n for file_name in files:\n global current_file\n current_file = file_name\n # download zip file if it doesn't already exist\n if os.path.exists(os.path.join(PUMAS_ZIP_FILE_DIRECTORY, file_name)):\n file_path = os.path.join(PUMAS_ZIP_FILE_DIRECTORY, file_name)\n print(\"Downloading file '%s' ... done!\" % file_name)\n else:\n # attempt download\n try:\n file_path = os.path.join(PUMAS_ZIP_FILE_DIRECTORY, file_name)\n report_hook = ReportHook()\n report_hook.current_file = current_file\n request.urlretrieve((PUMAS_DOWNLOAD_URL + file_name), file_path, reporthook=report_hook.reporthook)\n print(\" ... done!\")\n sys.stdout.flush()\n except urllib.error.HTTPError:\n print(\"Downloading file '%s' ... failed, invalid file!\" % file_name)\n continue\n\n # extract shapefile from zip and move it to the shp/ directory\n with zipfile.ZipFile(file_path, 'r') as zip_ref:\n base_file_name = os.path.splitext(file_name)[0]\n for ext in [\".shp\", \".dbf\", \".prj\"]:\n extract = base_file_name + ext\n zip_ref.extract(extract, PUMAS_SHAPE_FILE_DIRECTORY)\n print(\" ... zip file extracted!\")\n\n\n\nif __name__ == '__main__':\n try:\n main()\n except KeyboardInterrupt:\n print('\\nInterrupted, deleting incomplete downloads...\\nExiting...')\n try:\n os.remove(PUMAS_ZIP_FILE_DIRECTORY + current_file)\n except FileNotFoundError:\n pass\n try:\n sys.exit(0)\n except SystemExit:\n os._exit(0)",
"id": "10683001",
"language": "Python",
"matching_score": 2.2261149883270264,
"max_stars_count": 0,
"path": "download_pumas.py"
},
{
"content": "import time\nimport sys\n\n\ndef fix_bad_zip_file(zip_file):\n f = open(zip_file, 'r+b')\n data = f.read()\n pos = data.find(b'\\x50\\x4b\\x05\\x06') # End of central directory signature\n if (pos > 0):\n print(\" ... Truncating file at location \" + str(pos + 22) + \".\", end=\"\")\n f.seek(pos + 22) # size of 'ZIP end of central directory record'\n f.truncate()\n f.close()\n else:\n # raise error, file is truncated\n print(\" ... Error: File is truncated\")\n\n\nclass ReportHook:\n def __init__(self):\n self.current_file = \"\"\n self.start_time = None\n\n def reporthook(self, count, block_size, total_size):\n if count == 0:\n self.start_time = time.time()\n return\n duration = time.time() - self.start_time\n progress_size = int(count * block_size)\n speed = int(progress_size / ((1024 * duration) + 1))\n sys.stdout.write(\"\\rDownloading file '%s' ... %d KB, %d KB/s, %0.2f sec elapsed\" %\n (self.current_file, progress_size, speed, duration))\n sys.stdout.flush()\n",
"id": "7975748",
"language": "Python",
"matching_score": 1.2662495374679565,
"max_stars_count": 0,
"path": "utils/download_utils.py"
}
] | 2.226115 |
livcms | [
{
"content": "#!/usr/bin/env python3\n\"\"\"This is an example to train a task with DDPG algorithm.\n\nHere it creates a gym environment InvertedDoublePendulum. And uses a DDPG with\n1M steps.\n\nResults:\n AverageReturn: 250\n RiseTime: epoch 499\n\"\"\"\nimport tensorflow as tf\nimport numpy as np\n\nfrom garage import wrap_experiment\nfrom garage.envs import GymEnv\nfrom garage.experiment.deterministic import set_seed\nfrom garage.np.exploration_policies import AddOrnsteinUhlenbeckNoise, EpsilonGreedyPolicy, AddGaussianNoise\nfrom garage.replay_buffer import PathBuffer\nfrom garage.sampler import FragmentWorker, LocalSampler\nfrom garage.tf.algos import DDPG\nfrom garage.tf.policies import ContinuousMLPPolicy\nfrom garage.tf.q_functions import ContinuousMLPQFunction\nfrom garage.trainer import TFTrainer\nfrom garage.envs import PointEnv \n#from garage.envs import ParticleEnv, ParticleEnvPrev, ParticleEnvPrevManyFiles, ParticleEnvKalman\nfrom garage.envs import ParticleEnvKalman, ParticleEnvSimple, ParticleEnvGnnLike, OneParticleEnv, ParticlePointEnv\nfrom garage.sampler import LocalSampler, DefaultWorker\n\nfrom dowel import logger, tabular \n\n@wrap_experiment(archive_launch_repo=False, snapshot_mode=\"last\")\ndef ddpg_partcile(ctxt=None, seed=1):\n \"\"\"Train DDPG with InvertedDoublePendulum-v2 environment.\n\n Args:\n ctxt (garage.experiment.ExperimentContext): The experiment\n configuration used by Trainer to create the snapshotter.\n seed (int): Used to seed the random number generator to produce\n determinism.\n\n \"\"\"\n set_seed(seed)\n with TFTrainer(snapshot_config=ctxt) as trainer:\n env = ParticleEnvGnnLike() \n\n policy = ContinuousMLPPolicy(env_spec=env.spec,\n hidden_sizes=[64, 64],\n hidden_nonlinearity=tf.nn.relu,\n output_nonlinearity=tf.nn.tanh)\n\n exploration_policy = AddOrnsteinUhlenbeckNoise(env.spec,\n policy,\n sigma=np.array([0.2, 0.2]))\n\n #exploration_policy = EpsilonGreedyPolicy(\n # env_spec=env.spec,\n # policy=policy,\n # total_timesteps=10000,\n # max_epsilon=1.0,\n # min_epsilon=0.02,\n # decay_ratio=0.1)\n\n\n # exploration_policy = AddGaussianNoise(env.spec,\n # policy,\n # total_timesteps=1000,\n # max_sigma=100,\n # min_sigma=10)\n\n\n\n qf = ContinuousMLPQFunction(env_spec=env.spec,\n hidden_sizes=[64, 64],\n hidden_nonlinearity=tf.nn.relu)\n\n replay_buffer = PathBuffer(capacity_in_transitions=int(1e6))\n\n sampler = LocalSampler(agents=exploration_policy,\n envs=env,\n max_episode_length=env.spec.max_episode_length,\n is_tf_worker=True,\n #worker_class=DefaultWorker, \n worker_class = FragmentWorker\n #n_workers=1\n )\n\n ddpg = DDPG(env_spec=env.spec,\n policy=policy,\n policy_lr=1e-4,\n qf_lr=1e-3,\n qf=qf,\n replay_buffer=replay_buffer,\n sampler=sampler,\n steps_per_epoch=10,\n target_update_tau=1e-2,\n n_train_steps=50,\n discount=0.9,\n min_buffer_size=int(1e4),\n exploration_policy=exploration_policy,\n policy_optimizer=tf.compat.v1.train.AdamOptimizer,\n qf_optimizer=tf.compat.v1.train.AdamOptimizer)\n\n trainer.setup(algo=ddpg, env=env)\n\n trainer.train(n_epochs=100, batch_size=4000)\n\n\n\n\n#@wrap_experiment\n#def log_experiment(snapshot_mode):\n# for i in range(100):\n # # Log str directly\n # logger.log('Logging messages:')\n # Log scalar values with the key 'AverageReturn'\n# tabular.record('AverageReturn', i)\n\n # The Trainer will do these steps for you, if you log things in\n # the algorithms.\n# logger.log(tabular)\n# logger.dump_all()\n\n#log_experiment()\n\nddpg_partcile(seed=1)\n",
"id": "12335186",
"language": "Python",
"matching_score": 11.077467918395996,
"max_stars_count": 1,
"path": "src/garage/examples/tf/particle_DDPG.py"
},
{
"content": "#!/usr/bin/env python3\n\"\"\"This is an example to train a task with DDPG algorithm.\n\nHere it creates a gym environment InvertedDoublePendulum. And uses a DDPG with\n1M steps.\n\nResults:\n AverageReturn: 250\n RiseTime: epoch 499\n\"\"\"\nimport tensorflow as tf\nimport numpy as np\n\nfrom garage import wrap_experiment\nfrom garage.envs import GymEnv\nfrom garage.experiment.deterministic import set_seed\nfrom garage.np.exploration_policies import AddOrnsteinUhlenbeckNoise, EpsilonGreedyPolicy, AddGaussianNoise\nfrom garage.replay_buffer import PathBuffer\nfrom garage.sampler import FragmentWorker, LocalSampler\nfrom garage.tf.algos import DDPG, SAC\nfrom garage.tf.policies import ContinuousMLPPolicy\nfrom garage.tf.q_functions import ContinuousMLPQFunction\nfrom garage.trainer import TFTrainer\nfrom garage.envs import PointEnv \n#from garage.envs import ParticleEnv, ParticleEnvPrev, ParticleEnvPrevManyFiles, ParticleEnvKalman\nfrom garage.envs import ParticleEnvKalman, ParticleEnvSimple, ParticleEnvGnnLike, OneParticleEnv, ParticlePointEnv\nfrom garage.sampler import LocalSampler, DefaultWorker\n\n@wrap_experiment(archive_launch_repo=False)\ndef ddpg_partcile(ctxt=None, seed=1):\n \"\"\"Train DDPG with InvertedDoublePendulum-v2 environment.\n\n Args:\n ctxt (garage.experiment.ExperimentContext): The experiment\n configuration used by Trainer to create the snapshotter.\n seed (int): Used to seed the random number generator to produce\n determinism.\n\n \"\"\"\n set_seed(seed)\n with TFTrainer(snapshot_config=ctxt) as trainer:\n env = ParticlePointEnv() \n\n policy = ContinuousMLPPolicy(env_spec=env.spec,\n hidden_sizes=[64, 64],\n hidden_nonlinearity=tf.nn.relu,\n output_nonlinearity=tf.nn.tanh)\n\n exploration_policy = AddOrnsteinUhlenbeckNoise(env.spec,\n policy,\n sigma=np.array([2, 2]))\n\n #exploration_policy = EpsilonGreedyPolicy(\n # env_spec=env.spec,\n # policy=policy,\n # total_timesteps=10000,\n # max_epsilon=1.0,\n # min_epsilon=0.02,\n # decay_ratio=0.1)\n\n\n #exploration_policy = AddGaussianNoise(env.spec,\n # policy,\n # total_timesteps=1000,\n # max_sigma=1,\n # min_sigma=1)\n\n\n\n qf = ContinuousMLPQFunction(env_spec=env.spec,\n hidden_sizes=[64, 64],\n hidden_nonlinearity=tf.nn.relu)\n\n replay_buffer = PathBuffer(capacity_in_transitions=int(1e6))\n\n sampler = LocalSampler(agents=exploration_policy,\n envs=env,\n max_episode_length=env.spec.max_episode_length,\n is_tf_worker=True,\n #worker_class=DefaultWorker, \n worker_class = FragmentWorker\n #n_workers=1\n )\n\n ddpg = SAC(env_spec=env.spec,\n policy=policy,\n policy_lr=1e-4,\n qf_lr=1e-3,\n qf=qf,\n replay_buffer=replay_buffer,\n sampler=sampler,\n steps_per_epoch=10,\n target_update_tau=1e-2,\n n_train_steps=10,\n discount=0.9,\n min_buffer_size=int(1e4),\n exploration_policy=exploration_policy,\n policy_optimizer=tf.compat.v1.train.AdamOptimizer,\n qf_optimizer=tf.compat.v1.train.AdamOptimizer)\n\n trainer.setup(algo=ddpg, env=env)\n\n trainer.train(n_epochs=10, batch_size=100)\n\n\nddpg_partcile(seed=1)\n",
"id": "814388",
"language": "Python",
"matching_score": 4.5404438972473145,
"max_stars_count": 1,
"path": "src/garage/examples/tf/particle_SAC.py"
},
{
"content": "#!/usr/bin/env python3\n\"\"\"This is an example to add a simple VPG algorithm.\"\"\"\nimport numpy as np\nimport tensorflow as tf\n\nfrom garage import EpisodeBatch, log_performance, wrap_experiment\nfrom garage.envs import PointEnv, TestPointEnv, ParticlePointEnv, ParticleEnvGnnLike, ParticleEnvKalman\nfrom garage.experiment.deterministic import set_seed\nfrom garage.np import discount_cumsum\nfrom garage.sampler import LocalSampler\nfrom garage.tf.policies import GaussianMLPPolicy\nfrom garage.trainer import TFTrainer\nfrom garage import wrap_experiment\nfrom dowel import logger, tabular\nfrom dowel import CsvOutput\n\n\nclass SimpleVPG:\n \"\"\"Simple Vanilla Policy Gradient.\n\n Args:\n env_spec (EnvSpec): Environment specification.\n policy (garage.tf.policies.StochasticPolicy): Policy.\n sampler (garage.sampler.Sampler): Sampler.\n\n \"\"\"\n\n def __init__(self, env_spec, policy, sampler):\n self.env_spec = env_spec\n self.policy = policy\n self._sampler = sampler\n self.max_episode_length = env_spec.max_episode_length\n self._discount = 0.99\n self.init_opt()\n\n def init_opt(self):\n \"\"\"Initialize optimizer and build computation graph.\"\"\"\n observation_dim = self.policy.observation_space.flat_dim\n action_dim = self.policy.action_space.flat_dim\n with tf.name_scope('inputs'):\n self._observation = tf.compat.v1.placeholder(\n tf.float32, shape=[None, observation_dim], name='observation')\n self._action = tf.compat.v1.placeholder(tf.float32,\n shape=[None, action_dim],\n name='action')\n self._returns = tf.compat.v1.placeholder(tf.float32,\n shape=[None],\n name='return')\n policy_dist = self.policy.build(self._observation, name='policy').dist\n with tf.name_scope('loss'):\n ll = policy_dist.log_prob(self._action, name='log_likelihood')\n loss = -tf.reduce_mean(ll * self._returns)\n with tf.name_scope('train'):\n self._train_op = tf.compat.v1.train.AdamOptimizer(1e-3).minimize(\n loss)\n\n def train(self, trainer):\n \"\"\"Obtain samplers and start actual training for each epoch.\n\n Args:\n trainer (Trainer): Experiment trainer.\n\n \"\"\"\n for epoch in trainer.step_epochs():\n samples = trainer.obtain_samples(epoch)\n log_performance(epoch,\n EpisodeBatch.from_list(self.env_spec, samples),\n self._discount)\n self._train_once(samples)\n\n def _train_once(self, samples):\n \"\"\"Perform one step of policy optimization given one batch of samples.\n\n Args:\n samples (list[dict]): A list of collected samples.\n\n Returns:\n numpy.float64: Average return.\n\n \"\"\"\n obs = np.concatenate([path['observations'] for path in samples])\n actions = np.concatenate([path['actions'] for path in samples])\n returns = []\n for path in samples:\n returns.append(discount_cumsum(path['rewards'], self._discount))\n returns = np.concatenate(returns)\n sess = tf.compat.v1.get_default_session()\n sess.run(self._train_op,\n feed_dict={\n self._observation: obs,\n self._action: actions,\n self._returns: returns,\n })\n return np.mean(returns)\n\n def __getstate__(self):\n \"\"\"Parameters to save in snapshot.\n\n Returns:\n dict: Parameters to save.\n\n \"\"\"\n data = self.__dict__.copy()\n del data['_observation']\n del data['_action']\n del data['_returns']\n del data['_train_op']\n return data\n\n def __setstate__(self, state):\n \"\"\"Parameters to restore from snapshot.\n\n Args:\n state (dict): Parameters to restore from.\n\n \"\"\"\n self.__dict__ = state\n self.init_opt()\n\n\n@wrap_experiment(archive_launch_repo=False, snapshot_mode=\"last\")\ndef tutorial_vpg(ctxt=None):\n \"\"\"Train VPG with PointEnv environment.\n\n Args:\n ctxt (ExperimentContext): The experiment configuration used by\n :class:`~Trainer` to create the :class:`~Snapshotter`.\n\n \"\"\"\n set_seed(100)\n with TFTrainer(ctxt) as trainer:\n env = ParticleEnvGnnLike(max_episode_length=200)\n policy = GaussianMLPPolicy(env.spec)\n sampler = LocalSampler(agents=policy,\n envs=env,\n max_episode_length=env.spec.max_episode_length,\n is_tf_worker=True)\n algo = SimpleVPG(env.spec, policy, sampler)\n trainer.setup(algo, env)\n trainer.train(n_epochs=100, batch_size=4000)\n\n\n# @wrap_experiment\n# def log_experiment(ctxt=None):\n# for i in range(100):\n# # Log str directly\n# logger.add_output(CsvOutput('log_folder/table.csv'))\n# #logger.log('Logging messages:')\n# # Log scalar values with the key 'AverageReturn'\n# #tabular.record('AverageReturn', i)\n# #logger.log('test'+ str(i))\n# # The Trainer will do these steps for you, if you log things in\n# # the algorithms.\n# env.get_log_vals()\n \n# logger.log(tabular)\n# logger.dump_all()\n \n\n#log_experiment()\n\n\ntutorial_vpg()\n\n\n",
"id": "11470807",
"language": "Python",
"matching_score": 3.63615083694458,
"max_stars_count": 1,
"path": "src/garage/examples/tf/old_tutorial_vpg.py"
},
{
"content": "#!/usr/bin/env python3\n\"\"\"An example to train a task with DQN algorithm.\n\nHere it creates a gym environment CartPole, and trains a DQN with 50k steps.\n\"\"\"\nfrom garage import wrap_experiment\nfrom garage.envs import GymEnv\nfrom garage.experiment.deterministic import set_seed\nfrom garage.np.exploration_policies import EpsilonGreedyPolicy\nfrom garage.replay_buffer import PathBuffer\nfrom garage.sampler import FragmentWorker, LocalSampler\nfrom garage.tf.algos import DQN\nfrom garage.tf.policies import DiscreteQFArgmaxPolicy\nfrom garage.tf.q_functions import DiscreteMLPQFunction\nfrom garage.trainer import TFTrainer\nfrom garage.envs import ModuleTrackMLEnv\n\n@wrap_experiment\ndef dqn_cartpole(ctxt=None, seed=1):\n \"\"\"Train TRPO with CubeCrash-v0 environment.\n\n Args:\n ctxt (garage.experiment.ExperimentContext): The experiment\n configuration used by Trainer to create the snapshotter.\n seed (int): Used to seed the random number generator to produce\n determinism.\n\n \"\"\"\n set_seed(seed)\n with TFTrainer(ctxt) as trainer:\n n_epochs = 100\n steps_per_epoch = 10\n sampler_batch_size = 10\n num_timesteps = n_epochs * steps_per_epoch * sampler_batch_size\n env = ModuleTrackMLEnv()\n replay_buffer = PathBuffer(capacity_in_transitions=int(1e2))\n qf = DiscreteMLPQFunction(env_spec=env.spec, hidden_sizes=(64, 64))\n policy = DiscreteQFArgmaxPolicy(env_spec=env.spec, qf=qf)\n exploration_policy = EpsilonGreedyPolicy(env_spec=env.spec,\n policy=policy,\n total_timesteps=num_timesteps,\n max_epsilon=1.0,\n min_epsilon=0.02,\n decay_ratio=0.1)\n\n sampler = LocalSampler(agents=exploration_policy,\n envs=env,\n max_episode_length=env.spec.max_episode_length,\n is_tf_worker=True,\n worker_class=FragmentWorker)\n\n algo = DQN(env_spec=env.spec,\n policy=policy,\n qf=qf,\n exploration_policy=exploration_policy,\n replay_buffer=replay_buffer,\n sampler=sampler,\n steps_per_epoch=steps_per_epoch,\n qf_lr=1e-3,\n discount=0.99,\n min_buffer_size=int(1e1),\n double_q=False,\n n_train_steps=30,\n target_network_update_freq=1,\n buffer_batch_size=32)\n\n trainer.setup(algo, env)\n trainer.train(n_epochs=n_epochs, batch_size=sampler_batch_size)\n\n\ndqn_cartpole()\n",
"id": "3472899",
"language": "Python",
"matching_score": 4.750948429107666,
"max_stars_count": 1,
"path": "src/garage/examples/tf/dqn_cartpole.py"
},
{
"content": "from garage import wrap_experiment\nfrom garage.envs import TestPointEnv\nfrom garage.envs import normalize\nfrom garage.experiment.deterministic import set_seed\nfrom garage.np.baselines import LinearFeatureBaseline\nfrom garage.sampler import LocalSampler\nfrom garage.tf.algos import TRPO\nfrom garage.tf.policies import ContinuousMLPPolicy\nfrom garage.trainer import TFTrainer\nfrom garage.tf.algos import TD3\nfrom garage.tf.q_functions import ContinuousMLPQFunction\nfrom garage.replay_buffer import PathBuffer\nfrom garage.envs import ParticleEnvGnnLike\nimport tensorflow as tf \n\n@wrap_experiment\ndef trpo_point(ctxt=None, seed=1):\n set_seed(seed)\n with TFTrainer(ctxt) as trainer:\n env = ParticleEnvGnnLike()\n\n policy = ContinuousMLPPolicy(name='policy',\n env_spec=env.spec,\n hidden_sizes=(32, 32))\n\n baseline = LinearFeatureBaseline(env_spec=env.spec)\n\n sampler = LocalSampler(\n agents=policy,\n envs=env,\n max_episode_length=env.spec.max_episode_length,\n is_tf_worker=True)\n\n\n\n qf = ContinuousMLPQFunction(name='ContinuousMLPQFunction',\n env_spec=env.spec,\n hidden_sizes=[400, 300],\n action_merge_layer=0,\n hidden_nonlinearity=tf.nn.relu)\n\n qf2 = ContinuousMLPQFunction(name='ContinuousMLPQFunction2',\n env_spec=env.spec,\n hidden_sizes=[400, 300],\n action_merge_layer=0,\n hidden_nonlinearity=tf.nn.relu)\n\n replay_buffer = PathBuffer(capacity_in_transitions=int(1e6))\n\n algo = TD3(env_spec=env.spec,\n policy=policy,\n sampler=sampler,\n discount=0.99,\n qf=qf,\n qf2=qf2,\n replay_buffer=replay_buffer,)\n\n trainer.setup(algo, env)\n trainer.train(n_epochs=100, batch_size=4000)\n\n\ntrpo_point()\n",
"id": "9194816",
"language": "Python",
"matching_score": 6.285952568054199,
"max_stars_count": 1,
"path": "src/garage/examples/tf/test_trpo_point.py"
},
{
"content": "from garage import wrap_experiment\nfrom garage.envs import PointEnv\nfrom garage.envs import normalize\nfrom garage.experiment.deterministic import set_seed\nfrom garage.np.baselines import LinearFeatureBaseline\nfrom garage.sampler import LocalSampler\nfrom garage.tf.algos import TRPO\nfrom garage.tf.policies import CategoricalMLPPolicy\nfrom garage.trainer import TFTrainer\n\nfrom garage.tf.policies import GaussianMLPPolicy\n\n\n@wrap_experiment\ndef trpo_point(ctxt=None, seed=1):\n set_seed(seed)\n with TFTrainer(ctxt) as trainer:\n env = normalize(PointEnv())\n\n policy = GaussianMLPPolicy(name='policy',\n env_spec=env.spec,\n hidden_sizes=(32, 32))\n\n baseline = LinearFeatureBaseline(env_spec=env.spec)\n\n sampler = LocalSampler(\n agents=policy,\n envs=env,\n max_episode_length=env.spec.max_episode_length,\n is_tf_worker=True)\n\n algo = TRPO(env_spec=env.spec,\n policy=policy,\n baseline=baseline,\n sampler=sampler,\n discount=0.99,\n max_kl_step=0.01)\n\n trainer.setup(algo, env)\n trainer.train(n_epochs=100, batch_size=4000)\n\n\ntrpo_point()\n",
"id": "2268277",
"language": "Python",
"matching_score": 0.2660321295261383,
"max_stars_count": 1,
"path": "src/garage/examples/tf/trpo_point.py"
},
{
"content": "\"\"\"Simple 2D environment containing a point and a goal location.\"\"\"\nfrom concurrent.futures import process\nimport math\n\nimport akro\nimport numpy as np\nimport circle_fit as cf\nimport pandas as pd \nfrom garage import Environment, EnvSpec, EnvStep, StepType\nimport random \nfrom gym.spaces import Box, Discrete\nimport csv \nimport trackml.dataset\nimport json\n\n\nwith open(\"/home/lhv14/module_map.json\", 'r') as j:\n module_map = json.loads(j.read())\n\nremap_modules_dic = ''\nwith open(r'/home/lhv14/remap_modules.txt','r') as f:\n for i in f.readlines():\n remap_modules_dic=i #string\nremap_modules_dic = eval(remap_modules_dic) # this is orignal dict with instace dict\n\n\n\ndetector = pd.read_csv('/home/lhv14/exatrkx/Tracking-ML-Exa.TrkX/data/detectors.csv')\ndetector['new_module_id'] = detector['volume_id']*100000000+ detector['layer_id']*100000 + detector['module_id']\ndetector['cr'] = np.sqrt(detector.cx**2 + detector.cy**2)/10\ndetector['cz'] = detector['cz']/10\ndetector['cr'] = detector['cr']/10\nzlist = detector['cz'].tolist()\nrlist = detector['cr'].tolist() \nmods = detector.set_index(['volume_id', 'layer_id', 'module_id']).index\n\ndetector['discrete_module_id'] = [remap_modules_dic[val] for val in mods]\n\nprefix = '/home/lhv14/exatrkx/Tracking-ML-Exa.TrkX/alldata/train_1/event000001000'\nf = open(\"garage_outputs.csv\", \"w\")\nwriter = csv.writer(f)\nwriter.writerow([\"filenumber\", \"particle_id\", \"mc_z\", \"mc_r\", \"pred_z\", \"pred_r\", \"action_z\", \"action_r\"])\n\n\n\n\nclass ModuleTrackMLEnv(Environment):\n \"\"\"A simple 2D point environment.\n\n Args:\n goal (np.ndarray): A 2D array representing the goal position\n arena_size (float): The size of arena where the point is constrained\n within (-arena_size, arena_size) in each dimension\n done_bonus (float): A numerical bonus added to the reward\n once the point as reached the goal\n never_done (bool): Never send a `done` signal, even if the\n agent achieves the goal\n max_episode_length (int): The maximum steps allowed for an episode.\n\n \"\"\"\n\n def __init__(self,\n # goal=np.array((1., 1.), dtype=np.float32),\n arena_size=5.,\n done_bonus=0.,\n never_done=False,\n max_episode_length=500):\n #goal = np.array(goal, dtype=np.float32)\n #self._goal = goal\n self._done_bonus = done_bonus\n self._never_done = never_done\n self._arena_size = arena_size\n self._total_step_cnt = 0 \n self.new_count = 0 \n self.done_visual = False \n self.file_counter = 0 \n #self.event = pd.read_hdf('~/gnnfiles/data/ntuple_PU200_numEvent1000/ntuple_PU200_event0.h5')\n #self.event = pd.read_hdf('/media/lucas/MicroSD/ntuple_PU200_numEvent1000/ntuple_PU200_event0.h5')\n #self.event['z'] = np.abs(self.event['z'])\n self.average_reward = 0 \n self.hit_buffer = []\n self.dz_buffer = []\n self.dr_buffer = []\n\n\n # assert ((goal >= -arena_size) & (goal <= arena_size)).all()\n\n self._step_cnt = None\n self._max_episode_length = max_episode_length\n self._visualize = False\n\n #self._point = np.zeros_like(self._goal)\n #self._task = {'goal': self._goal}\n self._observation_space = akro.Box(low=np.array([-300, 0, 0]), high=np.array([300, 30, 18728]), dtype=np.float32)\n\n self._action_space = akro.Discrete(18728)\n\n self._spec = EnvSpec(action_space=self.action_space,\n observation_space=self.observation_space,\n max_episode_length=max_episode_length)\n\n self.record_z = [] \n self.record_r = []\n self.record_pid = []\n self.record_event_counter = [] \n self.record_reward = [] \n self.record_a0 = [] \n self.record_a1 = [] \n self.record_filenumber = [] \n\n @property\n def action_space(self):\n \"\"\"akro.Space: The action space specification.\"\"\"\n return self._action_space\n\n @property\n def observation_space(self):\n \"\"\"akro.Space: The observation space specification.\"\"\"\n return self._observation_space\n\n @property\n def spec(self):\n \"\"\"EnvSpec: The environment specification.\"\"\"\n return self._spec\n\n @property\n def render_modes(self):\n \"\"\"list: A list of string representing the supported render modes.\"\"\"\n return [\n 'ascii',\n ]\n\n def reset(self):\n \"\"\"Reset the environment.\n\n Returns:\n numpy.ndarray: The first observation conforming to\n `observation_space`.\n dict: The episode-level information.\n Note that this is not part of `env_info` provided in `step()`.\n It contains information of he entire episode, which could be\n needed to determine the first action (e.g. in the case of\n goal-conditisoned or MTRL.)\n\n \"\"\"\n \n if self._total_step_cnt%100 ==0: \n self.file_counter += 1 \n self.event = process_trackml(self.file_counter, pt_min=2)\n #self.event['z'] = np.abs(self.event['z'])\n print(\"jumping file\")\n \n\n random_particle_id = random.choice(self.event.particle_id.values)\n self.particle = self.event[self.event['particle_id']==random_particle_id]\n\n self.original_pid = random_particle_id\n start_hit = self.particle.iloc[0,:]\n #next_hit = self.particle.iloc[1,:]\n self._point = start_hit[['z', 'r']].values \n self.dr_buffer = []\n self.dz_buffer = []\n\n self.num_track_hits = 1\n self.state = start_hit.squeeze(axis=0) \n\n self.dr_buffer.append(0)\n self.dz_buffer.append(0)\n \n row = pd.DataFrame({'filenumber': [self.file_counter], \n 'particle_id': [self.original_pid], \n 'mc_z': [start_hit.z],\n 'mc_r' : [start_hit.r],\n 'pred_z': [start_hit.z],\n 'pred_r': [start_hit.r],\n 'action_z': [np.nan],\n 'action_r': [np.nan]})\n row.to_csv(f, mode='a', header=None, index=None)\n\n observation=self._point\n\n \n self.state = [start_hit.z, start_hit.r, start_hit.discrete_module_id]\n self.module_id = start_hit.new_module_id\n self._step_cnt = 0\n self.original_particle = self.event[self.event['particle_id']==self.original_pid].reset_index()\n\n return self.state, dict(something=[1,1])\n\n def step(self, action):\n \"\"\"Step the environment.\n\n Args:\n action (np.ndarray): An action provided by the agent.\n\n Returns:\n EnvStep: The environment step resulting from the action.\n\n Raises:\n RuntimeError: if `step()` is called after the environment\n has been\n constructed and `reset()` has not been called.\n\n \"\"\"\n if self._step_cnt is None:\n raise RuntimeError('reset() must be called before step()!')\n \n self.new_count += 1 \n\n # enforce action space\n a = action.copy() # NOTE: we MUST copy the action before modifying it\n\n module = detector[detector['discrete_module_id']==a]\n\n\n #a_clipped = np.clip(a, self.action_space.low, self.action_space.high)\n #naive_estimate = self._point + np.array([max(0, self.dz_buffer[-1]), max(0, self.dr_buffer[-1])])\n #nearest_module, module_id = find_nearest_module(naive_estimate[0], naive_estimate[1], self.module_id)\n\n predicted_point_z = module.cz.values[0]\n predicted_point_r = module.cr.values[0]\n\n\n\n\n #naive_estimate = self._point + np.array([max(0, self.dz_buffer[-1]), max(0, self.dr_buffer[-1])])\n #nearest_module, module_id = find_nearest_module(naive_estimate[0], naive_estimate[1], self.module_id)\n #nearest_module_point = detector[detector['new_module_id'] == nearest_module][['cz', 'cr']].values.flatten()\n #self.module_id = module_id \n\n #predicted_point_z = np.clip(naive_estimate[0] + a_clipped[0] , -300, 300)\n #predicted_point_r = np.clip(naive_estimate[1] + a_clipped[1], 0, 30)\n\n\n predicted_point = [predicted_point_z, predicted_point_r]\n\n #new_module = find_nearest_module(predicted_point_z, predicted_point_r, self.module_id)\n #adjusted_predicted_point = detector[detector['new_module_id'] == new_module][['cz', 'cr']].values.flatten()\n #print(predicted_point)\n #self.module_id = new_module \n\n if self._visualize:\n print(self.render('ascii'))\n\n #mag_dist_prev_hit = np.sqrt(self.state.z-new_hit.z)**2 + (self.state.r-new_hit.r)**2\n self.previous_state = self.state\n #self.state = np.concatenate(predicted_point, [a])\n self.state = np.array([predicted_point_z, predicted_point_r, a])\n\n # print(predicted_point, a, self.state)\n\n next_index = self.num_track_hits + 1 \n if next_index > len(self.original_particle) -1: \n next_index = len(self.original_particle) - 1\n next_hit = self.original_particle.loc[next_index,: ]\n #self.hit_buffer.append([predicted_point_z, predicted_point_r])\n\n #reward given based on how close this new hit was to the next hit in the df \n #distance = np.sqrt((new_hit.z - next_hit.z)**2 + (new_hit.r - next_hit.r)**2)\n distance = np.sqrt((predicted_point_z-next_hit.z)**2 + (predicted_point_r-next_hit.r)**2)\n\n #print(predicted_point_z, distance)\n #print(distance)\n ##reward = -distance\n correct_module = next_hit['discrete_module_id']\n allowed_modules = module_map[str(int(self.previous_state[2]))]\n\n #reward = - np.abs(correct_module - a)\n reward = distance \n if a not in allowed_modules: \n reward = -100\n\n self.num_track_hits += 1 \n \n\n dr = self.state[1] - self.previous_state[1]\n dz = self.state[0] - self.previous_state[0]\n \n self.dr_buffer.append(dr)\n self.dz_buffer.append(dz)\n \n self._step_cnt += 1\n self._total_step_cnt += 1\n \n\n row = pd.DataFrame({'filenumber': [self.file_counter], \n 'particle_id': [self.original_pid], \n 'mc_z': [next_hit.z], \n 'mc_r' : [next_hit.r], \n 'pred_z': [predicted_point_z], \n 'pred_r': [predicted_point_r]\n })\n #'action_z': [a[0]], \n #'action_r': [a[1]] })\n row.to_csv(f, mode='a', header=None, index=None)\n\n\n #stopping = np.mean(np.abs(np.diff(self.dz_buffer[-4:]))) + np.mean(np.abs(np.diff(self.dr_buffer[-4:])))\n \n if self.num_track_hits > 6: \n #if a[2] > 0.5:\n done = True \n else: \n done = False \n #self.episode_counter +=1 \n\n self._point = predicted_point\n #print(self._point, \"\\n\")\n observation = self.state\n step_type = StepType.get_step_type(\n step_cnt=self._step_cnt,\n max_episode_length=self._max_episode_length,\n done=done)\n \n if step_type in (StepType.TERMINAL, StepType.TIMEOUT):\n self._step_cnt = None\n\n\n self.average_reward = (self.average_reward + reward)/2\n #if self._total_step_cnt%100==0: \n # print(self.average_reward)\n\n return EnvStep(env_spec=self.spec,\n action=action,\n reward=reward,\n observation=observation,\n env_info={\n #'task': self._task,\n 'filenumber': self.file_counter, \n 'particle_id': self.original_pid, \n 'mc_z': next_hit.z, \n 'mc_r' : next_hit.r, \n 'pred_z': predicted_point_z, \n 'pred_r': predicted_point_r, \n # 'action_z': a[0], \n # 'action_r': a[1]\n #'success': succ\n },\n step_type=step_type)\n\n def render(self, mode):\n \"\"\"Renders the environment.\n\n Args:\n mode (str): the mode to render with. The string must be present in\n `self.render_modes`.\n\n Returns:\n str: the point and goal of environment.\n\n \"\"\"\n #return f'Point: {self._point}, Goal: {self._goal}'\n return self._point \n\n def visualize(self):\n \"\"\"Creates a visualization of the environment.\"\"\"\n #self._visualize = True\n #print(self.render('ascii'))\n #visualise(self.state.r, )\n #visualise() \n #wrap(self.event, r, z, pids, self.original_pid)\n print(self.original_pid)\n #print(\"i now visualise\")\n\n def my_visualise(self): \n print(\"now calling visualise\")\n #wrap(self.event)\n #visualise(self.event, self.record_pid, self.record_r, self.record_z)\n \n def close(self):\n \"\"\"Close the env.\"\"\"\n\n # pylint: disable=no-self-use\n def sample_tasks(self, num_tasks):\n \"\"\"Sample a list of `num_tasks` tasks.\n\n Args:\n num_tasks (int): Number of tasks to sample.\n\n Returns:\n list[dict[str, np.ndarray]]: A list of \"tasks\", where each task is\n a dictionary containing a single key, \"goal\", mapping to a\n point in 2D space.\n\n \"\"\"\n #goals = np.random.uniform(-2, 2, size=(num_tasks, 2))\n #tasks = [{'goal': goal} for goal in goals]\n #return tasks\n return 0 \n\n\n def set_task(self, task):\n \"\"\"Reset with a task.\n\n Args:\n task (dict[str, np.ndarray]): A task (a dictionary containing a\n single key, \"goal\", which should be a point in 2D space).\n\n \"\"\"\n #self._task = task\n #self._goal = task['goal']\n x = 10 \n\n def dump_summary(self):\n print(\"dr: \", \"\\n dz: \" ) \n\ndef process_trackml(filenumber, pt_min): \n prefix = '/home/lhv14/exatrkx/Tracking-ML-Exa.TrkX/alldata/train_1/event00000'+str(1000+filenumber)\n\n hits, particles, truth = trackml.dataset.load_event(\n prefix, parts=['hits', 'particles', 'truth'])\n \n hits['r'] = np.sqrt(hits.x**2 + hits.y**2)/10\n\n pt = np.sqrt(particles.px**2 + particles.py**2)\n particles['pt'] = pt\n\n # Applies pt cut, removes all noise hits.\n #particles = particles[pt > pt_min]\n truth = (truth[['hit_id', 'particle_id']]\n .merge(particles[['particle_id', 'pt', 'nhits']], on='particle_id'))\n # Calculate derived hits variables\n #r = np.sqrt(hits.x**2 + hits.y**2)\n phi = np.arctan2(hits.y, hits.x)\n # Select the data columns we need\n hits = (hits[['hit_id', 'z', 'r', 'layer_id', 'volume_id', 'module_id']]\n .assign(phi=phi)\n .merge(truth[['hit_id', 'particle_id', 'pt', 'nhits']], on='hit_id'))\n # Remove duplicate hits\n #hits = hits.loc[hits.groupby(['particle_id', 'layer_id']).r.idxmin().values]\n #hits['r'] = r/10\n hits['z'] = hits['z']/10\n hits = hits.sort_values('r')\n hits['new_module_id'] = hits['volume_id']*100000000+ hits['layer_id']*100000 + hits['module_id']\n\n #hits.to_csv('test_these_hits.csv')\n\n #hits = hits[hits['volume_id'] < 10] \n #nh = hits.groupby('particle_id').agg('count').iloc[:,0]\n # only pick the pids that has a certain number of hits \n #hits = hits[hits['particle_id'].isin(np.array(nh[nh > 7].index))]\n hits = hits[hits['nhits'] > 7]\n mods = hits.set_index(['volume_id', 'layer_id', 'module_id']).index\n\n hits['discrete_module_id'] = [remap_modules_dic[val] for val in mods]\n \n return hits\n\ndef find_nearest_module(pred_z, pred_r, prev_module):\n\n #only select from modules that are connected to the previous module \n #t = module_map[prev_module]\n\n #json only supports strings as keys \n #prev_module_pos = \n compatible_modules = detector[detector['new_module_id'].isin(module_map[str(int(prev_module))])]\n # only radially outwards\n #print(compatible_modules)\n #compatible_modules = compatible_modules[(np.abs(compatible_modules['cz']) > np.abs((pred_z-10))) & (compatible_modules['cr'] > (pred_r-1))]\n \n\n #print(compatible_modules[['cz', 'cr']])\n zlist = compatible_modules.cz.values\n rlist = compatible_modules.cr.values\n\n distances = np.sqrt((zlist-pred_z)**2+(rlist - pred_r)**2) \n index = np.argmin(distances)\n #print(compatible_modules['new_module_id'].values[index])\n module = detector.iloc[index, ]\n module_position = module[['cz', 'cr']].values\n module_id = module.new_module_id\n \n #return compatible_modules['new_module_id'].values[index]\n\n return module_position, module_id\n\ndef find_nearest_module_by_position(z, r): \n distances = np.sqrt((zlist-z)**2+(rlist - r)**2) \n #print(distances)\n index = np.argmin(distances)\n module_position = detector.iloc[index, ][['cz', 'cr']].values\n return module_position\n",
"id": "9024789",
"language": "Python",
"matching_score": 8.423077583312988,
"max_stars_count": 1,
"path": "src/garage/envs/module_trackml_env.py"
},
{
"content": "\"\"\"Simple 2D environment containing a point and a goal location.\"\"\"\nimport math\n\nimport akro\nimport numpy as np\nimport pandas as pd \nfrom garage import Environment, EnvSpec, EnvStep, StepType\nimport random \nfrom gym.spaces import Box\n#from visualise_track import visualise \nfrom animate_particle import wrap \n\nr = pd.read_csv('~/garage/src/garage/examples/tf/g_r.csv', header=None)\nz = pd.read_csv('~/garage/src/garage/examples/tf/g_z.csv', header=None)\npids = pd.read_csv('~/garage/src/garage/examples/tf/g_pids.csv', header=None)\n\n#i = np.where(pids.values.flatten()==-17737)\n\n#my_r = r.values[i]\n#my_z = z.values[i]\ndone_ani = False \n\nevent = pd.read_hdf('~/gnnfiles/data/ntuple_PU200_numEvent1000/ntuple_PU200_event0.h5')\n\n\nclass ParticleEnv(Environment):\n \"\"\"A simple 2D point environment.\n\n Args:\n goal (np.ndarray): A 2D array representing the goal position\n arena_size (float): The size of arena where the point is constrained\n within (-arena_size, arena_size) in each dimension\n done_bonus (float): A numerical bonus added to the reward\n once the point as reached the goal\n never_done (bool): Never send a `done` signal, even if the\n agent achieves the goal\n max_episode_length (int): The maximum steps allowed for an episode.\n\n \"\"\"\n\n def __init__(self,\n goal=np.array((1., 1.), dtype=np.float32),\n arena_size=5.,\n done_bonus=0.,\n never_done=False,\n max_episode_length=math.inf):\n goal = np.array(goal, dtype=np.float32)\n self._goal = goal\n self._done_bonus = done_bonus\n self._never_done = never_done\n self._arena_size = arena_size\n self._total_step_cnt = 0 \n self.new_count = 0 \n self.done_visual = False \n\n\n\n assert ((goal >= -arena_size) & (goal <= arena_size)).all()\n\n self._step_cnt = None\n self._max_episode_length = max_episode_length\n self._visualize = False\n\n self._point = np.zeros_like(self._goal)\n self._task = {'goal': self._goal}\n self._observation_space = akro.Box(low=np.array([-266, 0]), high=np.array([266, 26]), dtype=np.float64)\n self._action_space = akro.Box(low=-4,\n high=20,\n shape=(2, ),\n dtype=np.float32)\n self._spec = EnvSpec(action_space=self.action_space,\n observation_space=self.observation_space,\n max_episode_length=max_episode_length)\n\n self.record_z = [] \n self.record_r = []\n self.record_pid = []\n print(\"INIIIITIALLIIISED\")\n\n @property\n def action_space(self):\n \"\"\"akro.Space: The action space specification.\"\"\"\n return self._action_space\n\n @property\n def observation_space(self):\n \"\"\"akro.Space: The observation space specification.\"\"\"\n return self._observation_space\n\n @property\n def spec(self):\n \"\"\"EnvSpec: The environment specification.\"\"\"\n return self._spec\n\n @property\n def render_modes(self):\n \"\"\"list: A list of string representing the supported render modes.\"\"\"\n return [\n 'ascii',\n ]\n\n def reset(self):\n \"\"\"Reset the environment.\n\n Returns:\n numpy.ndarray: The first observation conforming to\n `observation_space`.\n dict: The episode-level information.\n Note that this is not part of `env_info` provided in `step()`.\n It contains information of he entire episode, which could be\n needed to determine the first action (e.g. in the case of\n goal-conditioned or MTRL.)\n\n \"\"\"\n\n #self.event = pd.read_hdf('~/gnnfiles/data/ntuple_PU200_numEvent1000/ntuple_PU200_event0.h5')\n self.event = event[event['sim_pt'] > 2]\n #subset by the number of hits \n nh = self.event.groupby('particle_id').agg('count').iloc[:,0]\n # only pick the pids that has a certain number of hits \n self.event = self.event[self.event['particle_id'].isin(np.array(nh[nh > 7].index))]\n\n random_particle_id = random.choice(self.event.particle_id.values)\n self.particle = self.event[self.event['particle_id']==random_particle_id]\n self.original_pid = random_particle_id\n # This relies on an ordered df! \n start_hit = self.particle.iloc[0,:]\n self._point = start_hit[['z', 'r']].values \n next_hit = self.particle.iloc[1,:]\n self.num_track_hits = 0 \n dist = np.linalg.norm(start_hit[['z', 'r']].values - next_hit[['z', 'r']].values) \n #print(self._point, dist)\n self.state = start_hit.squeeze(axis=0) \n\n first_obs = np.concatenate([self._point, (dist, )])\n\n self._step_cnt = 0\n self.original_particle = self.event[self.event['particle_id']==self.original_pid].reset_index()\n\n return self._point, dict(goal=self._goal)\n\n def step(self, action):\n \"\"\"Step the environment.\n\n Args:\n action (np.ndarray): An action provided by the agent.\n\n Returns:\n EnvStep: The environment step resulting from the action.\n\n Raises:\n RuntimeError: if `step()` is called after the environment\n has been\n constructed and `reset()` has not been called.\n\n \"\"\"\n if self._step_cnt is None:\n raise RuntimeError('reset() must be called before step()!')\n \n self.new_count += 1 \n\n #print(\"i am stepping so new count is \", self.new_count)\n # enforce action space\n a = action.copy() # NOTE: we MUST copy the action before modifying it\n a = np.clip(a, self.action_space.low, self.action_space.high)\n\n #self._point = np.clip(self._point + a, -266)\n # 266)\n \n \n if self._visualize:\n print(self.render('ascii'))\n\n other_hits = self.event[self.event['hit_id']!=self.state.hit_id]\n # it's a big search, converting to list from pandas save an order of magnitude in time,a also just search a small part of the df \n zlist = other_hits.z.tolist()\n rlist = other_hits.r.tolist() \n\n distances = np.sqrt((zlist-self._point[0])**2+(rlist - self._point[1])**2) \n index = np.argmin(distances)\n \n new_hit = other_hits.iloc[index, ] \n distance_prev_hit = np.sqrt((self.state.r - new_hit.r)**2 + (self.state.z - new_hit.z)**2)\n \n self.state = new_hit \n\n # this is dangerous - relies on ordered df! \n next_hit = self.original_particle.loc[self.num_track_hits +1,: ]\n #reward given based on how close this new hit was to the next hit in the df \n distance = np.sqrt((new_hit.z - next_hit.z)**2 + (new_hit.r - next_hit.r)**2)\n reward = -distance\n #if (distance_prev_hit < 1): \n # reward -=100\n\n self.num_track_hits += 1 \n \n self.record_pid.append(self.original_pid)\n self.record_z.append(new_hit.z)\n self.record_r.append(new_hit.r)\n\n self._step_cnt += 1\n self._total_step_cnt += 1\n #print(self._step_cnt)\n\n if (self._total_step_cnt > 2000) & (self._total_step_cnt < 2010): \n print(\"I will now save the files \")\n np.savetxt('g_pids.csv', self.record_pid, delimiter=',')\n np.savetxt('g_z.csv', self.record_z, delimiter=',')\n np.savetxt('g_r.csv', self.record_r, delimiter=',')\n # pass \n\n if (self._total_step_cnt ==2011) & (self.done_visual == False) : \n print(self.done_visual, self._total_step_cnt)\n self.my_visualise()\n self.done_visual =True \n print(\"it shouldnt happen again\")\n #x = 2\n \n if self.num_track_hits > 6:\n done = True \n else: \n done = False \n #self.episode_counter +=1 \n\n self._point = [new_hit.z, new_hit.r]\n\n step_type = StepType.get_step_type(\n step_cnt=self._step_cnt,\n max_episode_length=self._max_episode_length,\n done=done)\n\n if step_type in (StepType.TERMINAL, StepType.TIMEOUT):\n self._step_cnt = None\n\n return EnvStep(env_spec=self.spec,\n action=action,\n reward=reward,\n observation=self._point,\n env_info={\n 'task': self._task,\n #'success': succ\n },\n step_type=step_type)\n\n def render(self, mode):\n \"\"\"Renders the environment.\n\n Args:\n mode (str): the mode to render with. The string must be present in\n `self.render_modes`.\n\n Returns:\n str: the point and goal of environment.\n\n \"\"\"\n return f'Point: {self._point}, Goal: {self._goal}'\n\n def visualize(self):\n \"\"\"Creates a visualization of the environment.\"\"\"\n #self._visualize = True\n #print(self.render('ascii'))\n #visualise(self.state.r, )\n #visualise() \n #wrap(self.event, r, z, pids, self.original_pid)\n print(self.original_pid)\n #print(\"i now visualise\")\n\n def my_visualise(self): \n print(\"now calling wrap\")\n wrap(self.event)\n \n def close(self):\n \"\"\"Close the env.\"\"\"\n\n # pylint: disable=no-self-use\n def sample_tasks(self, num_tasks):\n \"\"\"Sample a list of `num_tasks` tasks.\n\n Args:\n num_tasks (int): Number of tasks to sample.\n\n Returns:\n list[dict[str, np.ndarray]]: A list of \"tasks\", where each task is\n a dictionary containing a single key, \"goal\", mapping to a\n point in 2D space.\n\n \"\"\"\n goals = np.random.uniform(-2, 2, size=(num_tasks, 2))\n tasks = [{'goal': goal} for goal in goals]\n return tasks\n\n def set_task(self, task):\n \"\"\"Reset with a task.\n\n Args:\n task (dict[str, np.ndarray]): A task (a dictionary containing a\n single key, \"goal\", which should be a point in 2D space).\n\n \"\"\"\n self._task = task\n self._goal = task['goal']\n",
"id": "2248509",
"language": "Python",
"matching_score": 3.714590311050415,
"max_stars_count": 1,
"path": "src/garage/envs/test_particle_env.py"
},
{
"content": "import akro\nimport gym\nimport numpy as np\nimport math \n\nfrom garage import Environment, EnvSpec, EnvStep, StepType\n\nclass TestPointEnv(Environment):\n\n # ...\n def __init__(self, max_episode_length=math.inf): \n \n self._spec = EnvSpec(action_space=self.action_space,\n observation_space=self.observation_space,\n max_episode_length=max_episode_length)\n\n\n def step(self, action):\n self._state = self._state + action\n x, y = self._state\n reward = - (x**2 + y**2) ** 0.5\n done = abs(x) < 0.01 and abs(y) < 0.01\n next_observation = np.copy(self._state)\n return next_observation, reward, done, None\n\n\n\n \n @property\n def observation_space(self):\n return akro.Box(low=-np.inf, high=np.inf, shape=(2,))\n\n @property\n def action_space(self):\n return akro.Box(low=-0.1, high=0.1, shape=(2,))\n\n @property\n def render_modes(self):\n \"\"\"list: A list of string representing the supported render modes.\"\"\"\n return [\n 'ascii',\n ]\n @property\n def spec(self):\n \"\"\"EnvSpec: The environment specification.\"\"\"\n return self._spec\n\n \n\n def reset(self):\n self._state = np.random.uniform(-1, 1, size=(2,))\n observation = np.copy(self._state)\n return observation\n def render(self):\n print ('current state:', self._state)\n\n def close(self):\n \"\"\"Close the env.\"\"\"\n\n def visualize(self):\n \"\"\"Creates a visualization of the environment.\"\"\"\n self._visualize = True\n print(self.render('ascii'))\n \n def sample_tasks(self, num_tasks):\n \"\"\"Sample a list of `num_tasks` tasks.\n\n Args:\n num_tasks (int): Number of tasks to sample.\n\n Returns:\n list[dict[str, np.ndarray]]: A list of \"tasks\", where each task is\n a dictionary containing a single key, \"goal\", mapping to a\n point in 2D space.\n\n \"\"\"\n goals = np.random.uniform(-2, 2, size=(num_tasks, 2))\n tasks = [{'goal': goal} for goal in goals]\n return tasks",
"id": "3113797",
"language": "Python",
"matching_score": 3.2563626766204834,
"max_stars_count": 1,
"path": "src/garage/envs/testpoint_env.py"
},
{
"content": "import akro\nimport gym\nimport numpy as np\n\nfrom garage import Environment, EnvSpec, EnvStep, StepType\n\nclass PointEnv(Environment):\n\n # ...\n\n def step(self, action):\n self._state = self._state + action\n x, y = self._state\n reward = - (x**2 + y**2) ** 0.5\n done = abs(x) < 0.01 and abs(y) < 0.01\n next_observation = np.copy(self._state)\n return next_observation, reward, done, None\n @property\n def observation_space(self):\n return akro.Box(low=-np.inf, high=np.inf, shape=(2,))\n\n @property\n def action_space(self):\n return akro.Box(low=-0.1, high=0.1, shape=(2,))\n def reset(self):\n self._state = np.random.uniform(-1, 1, size=(2,))\n observation = np.copy(self._state)\n return observation\n def render(self):\n print ('current state:', self._state)\n\n",
"id": "1446446",
"language": "Python",
"matching_score": 0.5246303081512451,
"max_stars_count": 1,
"path": "src/garage/scripts/sim_env.py"
},
{
"content": "import numpy as np\nimport pandas as pd\n\n\n# Load the policy and the env in which it was trained\nfrom garage.experiment import Snapshotter\nfrom garage import rollout, obtain_evaluation_episodes\nimport tensorflow as tf # optional, only for TensorFlow as we need a tf.Session\n#from garage.envs import InferenceParticleEnvGnnLike, InferenceGnnLike \nfrom garage.envs import ParticleEnvGnnLike\nsnapshotter = Snapshotter()\nwith tf.compat.v1.Session(): # optional, only for TensorFlow\n data = snapshotter.load('/home/lhv14/garage/src/garage/examples/tf/data/local/experiment/tutorial_vpg_13')\n # data = snapshotter.load('/home/lhv14/garage/src/garage/examples/tf/data/local/experiment/td3_garage_tf_4')\n policy = data['algo'].policy\n env = data['env']\n #env = InferenceParticleEnvGnnLike() \n #print(\"THIS IS EVN\", env)\n #env = InferenceGnnLike()\n\n # See what the trained policy can accomplish\n \n pids = np.array([])\n rewards = np.array([]) \n actions_z = np.array([])\n actions_r = np.array([])\n observations_z = np.array([]) \n observations_r = np.array([]) \n actual_actions_z = np.array([])\n actual_actions_r = np.array([])\n predicted_point_z = np.array([])\n predicted_point_r = np.array([])\n\n\n for i in range(1000): \n path = rollout(env, policy, animated=False)\n #print(path['env_infos'])\n #print(path['env_infos'])\n pids = np.append(pids, path['env_infos']['particle_id'].flatten())\n rewards = np.append(rewards, path['rewards'])\n actions_z = np.append(actions_z, path['actions'][:,0])\n #if path['actions'][:,0] > 1: \n actions_r = np.append(actions_r, path['actions'][:,1]) \n actual_actions_z = np.append(actual_actions_z, path['env_infos']['actual_actions_z'].flatten())\n actual_actions_r = np.append(actual_actions_r, path['env_infos']['acutal_actions_r'].flatten())\n observations_z = np.append(observations_z, path['observations'][:,0]) \n observations_r = np.append(observations_r, path['observations'][:,1]) \n \n predicted_point_z = np.append(predicted_point_z, path['env_infos']['predicted_point_z'].flatten())\n predicted_point_r = np.append(predicted_point_r, path['env_infos']['predicted_point_r'].flatten())\n\n #print(rewards)\n #np.savetxt('test_rewards.csv', rewards, delimiter=',')\n #np.savetxt(\"test_actions.csv\", actions, delimiter=',')\n #np.savetxt(\"test_observations.csv\", observations, delimiter=',')\n #np.savetxt(\"test_particle_ids.csv\", pids, delimiter=',')\n df = pd.DataFrame({'particle_id':pids.flatten(), \n 'action_z':actions_z, \n 'action_r':actions_r, \n 'actual_action_z': actual_actions_z, \n 'actual_action_r': actual_actions_r,\n 'rewards':rewards, \n 'observation_z':observations_z, \n 'observation_r':observations_r, \n 'predicted_point_z':predicted_point_z, \n 'predicted_point_r':predicted_point_r\n })\n\n print(df) \n df.to_csv('test_inference_results.csv') \n print(\"it is above\", np.where(np.abs(actions_z > 1)))\n print(\"it is above\", np.where(np.abs(actions_r > 1)))\n #obtain_evaluation_episodes(policy, env)\n",
"id": "5420736",
"language": "Python",
"matching_score": 6.133256435394287,
"max_stars_count": 1,
"path": "src/garage/examples/tf/test_inference.py"
},
{
"content": "import numpy as np\nimport pandas as pd\n\n\n# Load the policy and the env in which it was trained\nfrom garage.experiment import Snapshotter\nfrom garage import rollout, obtain_evaluation_episodes\nimport tensorflow as tf # optional, only for TensorFlow as we need a tf.Session\n#from garage.envs import InferenceParticleEnvGnnLike, InferenceGnnLike \nfrom garage.envs import ParticleEnvGnnLike\nsnapshotter = Snapshotter()\nwith tf.compat.v1.Session(): # optional, only for TensorFlow\n data = snapshotter.load('/home/lhv14/garage/src/garage/examples/tf/data/local/experiment/tutorial_vpg_64', itr='last')\n # data = snapshotter.load('/home/lhv14/garage/src/garage/examples/tf/data/local/experiment/td3_garage_tf_4')\n policy = data['algo'].policy\n env = data['env']\n #env = InferenceParticleEnvGnnLike() \n #print(\"THIS IS EVN\", env)\n #env = InferenceGnnLike()\n\n # See what the trained policy can accomplish\n\n \n keys = ['filenumber', 'particle_id', 'mc_z', 'mc_r', 'pred_z', 'pred_r', 'action_z', 'action_r']\n #res = {key: [] for key in keys} \n res = dict([(key, []) for key in keys]) \n for i in range(10): \n path = rollout(env, policy, animated=False)\n for key in path['env_infos'].keys(): \n res[key].extend(path['env_infos'][key].flatten())\n # res[key] = 0\n #pids = np.append(pids, path['env_infos']['particle_id'].flatten())\n #rewards = np.append(rewards, path['rewards'])\n #actions_z = np.append(actions_z, path['actions'][:,0])\n #if path['actions'][:,0] > 1: \n #actions_r = np.append(actions_r, path['actions'][:,1]) \n #actual_actions_z = np.append(actual_actions_z, path['env_infos']['actual_actions_z'].flatten())\ndf = pd.DataFrame(res)\ndf.to_csv('inference_resuts.csv')\n",
"id": "3644292",
"language": "Python",
"matching_score": 2.453230619430542,
"max_stars_count": 1,
"path": "src/garage/examples/tf/evaluate_trained_model.py"
},
{
"content": "\"\"\"Garage wrappers for gym environments.\"\"\"\n\nfrom garage.envs.grid_world_env import GridWorldEnv\nfrom garage.envs.gym_env import GymEnv\nfrom garage.envs.metaworld_set_task_env import MetaWorldSetTaskEnv\nfrom garage.envs.multi_env_wrapper import MultiEnvWrapper\nfrom garage.envs.normalized_env import normalize\nfrom garage.envs.point_env import PointEnv\nfrom garage.envs.testpoint_env import TestPointEnv\nfrom garage.envs.task_name_wrapper import TaskNameWrapper\nfrom garage.envs.task_onehot_wrapper import TaskOnehotWrapper\n#from garage.envs.test_particle_env import ParticleEnv\n#from garage.envs.particle_env_prev import ParticleEnvPrev\n#from garage.envs.particle_env_prev_manyfiles import ParticleEnvPrevManyFiles\nfrom garage.envs.particle_env_kalman import ParticleEnvKalman\nfrom garage.envs.particle_env_simple import ParticleEnvSimple\nfrom garage.envs.particle_env_gnnlike import ParticleEnvGnnLike\nfrom garage.envs.inference_particle_env_gnnlike import InferenceParticleEnvGnnLike \nfrom garage.envs.inference_gnnlike import InferenceGnnLike \nfrom garage.envs.one_particle_env import OneParticleEnv\nfrom garage.envs.particle_point_env import ParticlePointEnv\nfrom garage.envs.particle_env_gnnlike_position_only import ParticleEnvGnnLikePositionOnly \nfrom garage.envs.trackml_env import TrackMLEnv\nfrom garage.envs.trackml_xyz_env import TrackMLxyzEnv\nfrom garage.envs.module_trackml_env import ModuleTrackMLEnv\nfrom garage.envs.seed_env import SeedEnv\nfrom garage.envs.trackml_env_benchmark import TrackMLEnvBenchmark\n__all__ = [\n 'GymEnv',\n 'GridWorldEnv',\n 'MetaWorldSetTaskEnv',\n 'MultiEnvWrapper',\n 'normalize',\n 'PointEnv',\n 'TestPointEnv',\n 'TaskOnehotWrapper',\n 'TaskNameWrapper',\n# 'ParticleEnv', \n# 'ParticleEnvPrev',\n# 'ParticleEnvPrevManyFiles',\n 'ParticleEnvKalman',\n 'ParticleEnvSimple',\n 'ParticleEnvGnnLike',\n 'InferenceParticleEnvGnnLike',\n 'InferenceGnnLike',\n 'OneParticleEnv',\n 'ParticlePointEnv',\n 'ParticleEnvGnnLikePositionOnly',\n 'TrackMLEnv',\n 'TrackMLxyzEnv',\n 'ModuleTrackMLEnv',\n 'SeedEnv',\n 'TrackMLEnvBenchmark',\n]\n",
"id": "1284719",
"language": "Python",
"matching_score": 0.7505789995193481,
"max_stars_count": 1,
"path": "src/garage/envs/__init__.py"
},
{
"content": "#from del_test import draw_label \n#from second import visualise, add_point, visualise_particle \n\nimport third \n\n\n\n\nx = 10\ny = 10 \n\n#draw_label(x,y)\n##visualise(0)\n#visualise_particle(-17737) \n#add_point(60, 60, 3) \nthird.visualise() \n",
"id": "9682482",
"language": "Python",
"matching_score": 0.457307904958725,
"max_stars_count": 1,
"path": "src/garage/pyglet/running.py"
},
{
"content": "import pyglet \nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt \nfrom pyglet import shapes\nfrom pyglet.gl import glClearColor\nimport time \n\n\n#event = pd.read_hdf('~/gnnfiles/data/ntuple_PU200_numEvent1000/ntuple_PU200_event0.h5')\n#r = pd.read_csv('~/garage/src/garage/examples/tf/g_r.csv', header=None)\n#z = pd.read_csv('~/garage/src/garage/examples/tf/g_z.csv', header=None)\n#pids = pd.read_csv('~/garage/src/garage/examples/tf/g_pids.csv', header=None)\n\nwindow_length = 800\nwindow_height = 800 \n\n\nscale_z = window_length/(267+267)\nscale_r = window_height/27\n\nbatch = pyglet.graphics.Batch()\n\nwindow = pyglet.window.Window(window_length+100, window_height+100)\n#pyglet.sprite.Sprite(a['head'][0], x=self.cor_x, y=self.cor_y, batch=batch, group=foreground)\n\nglClearColor(255, 255, 255, 1.0) # red, green, blue, and alpha(transparency)\n\n\n\nclass Tracks: \n def __init__(self, event, pids, rlist, zlist): \n self.pid = pids\n \n event['z'] = event['z'].values * scale_z + 266 \n event['r'] = event['r'].values * scale_r\n\n self.data = event[event['particle_id'] == self.pid] \n self.i = 0 \n self.last_add = 0 \n self.circles = [] \n self.rlist = rlist.values* scale_r \n self.zlist = zlist.values * scale_z + 266 \n\n def add_point(self, dt, pid): \n for i in len(self.data):\n if time.time() - self.last_add > dt: \n hit = self.data.iloc[self.i, :]\n self.circles.append(shapes.Circle(hit.z, hit.r, 5, color=(60,60,60), batch=batch))\n self.last_add = time.time() \n \n\n def track_length(self):\n return len(self.data) \n\n def is_finished(self):\n if self.i > self.track_length()-1: \n finished = True\n else: \n finished = False\n return finished \n\n def add_point_rf(self, dt, pid): \n \n indices = np.where(self.pids.values.flatten() == pid)\n for i in indices: \n if time.time() - self.last_add > dt: \n self.circles.append(shapes.Circle(self.zlist[self.i], self.rlist[self.i], 5, color=(250,0,0), batch=batch))\n self.last_add = time.time() \n\n def track_length_rf(self):\n return len(self.rlist) \n\n def is_finished_rf(self):\n if self.i > self.track_length() - 1: \n finished = True\n# window.close() \n else: \n finished = False\n return finished \n\n def plot_tracks(self): \n for particle in np.unique(self.pids): \n self.add_point(0.5, particle)\n self.add_point_rf(0.5, particle)\n \n label = pyglet.text.Label(\"Particle id: \"+str(particle), font_size=20, x=0, y=0, color=(255, 0, 0, 0), bacth=batch) \n label.color = (0, 0, 100, 255)\n\n\n\n\n@window.event\ndef on_draw():\n window.clear()\n\n\ndef dummy(dt): \n #print(\"dummy called\") \n dummy = 0 \n\n\ndef visualise(event, pids, rlist, zlist): \n tracks = Tracks(event, pids, rlist, zlist)\n tracks.plot_tracks() \n pyglet.clock.schedule_interval(dummy, 0.1)\n pyglet.app.run()\n",
"id": "346653",
"language": "Python",
"matching_score": 6.452510356903076,
"max_stars_count": 1,
"path": "src/garage/examples/tf/new_animate_particle.py"
},
{
"content": "import pyglet \nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt \nfrom pyglet import shapes\nfrom pyglet.gl import glClearColor\nimport time \n\n\ndef wrap(event): \n event = pd.read_hdf('~/gnnfiles/data/ntuple_PU200_numEvent1000/ntuple_PU200_event0.h5')\n r = pd.read_csv('~/garage/src/garage/examples/tf/g_r.csv', header=None)\n z = pd.read_csv('~/garage/src/garage/examples/tf/g_z.csv', header=None)\n pids = pd.read_csv('~/garage/src/garage/examples/tf/g_pids.csv', header=None)\n unique_pids = np.unique(pids)\n\n #todo: \n # Draw detector lines \n\n\n circles = [] \n batch = pyglet.graphics.Batch()\n\n i = 0 \n\n\n\n window_length = 800\n window_height = 800 \n\n\n scale_z = window_length/(267+267)\n scale_r = window_height/27\n\n event['z'] = event['z'].values * scale_z + 266 \n event['r'] = event['r'].values * scale_r\n\n\n window = pyglet.window.Window(window_length+100, window_height+100)\n #pyglet.sprite.Sprite(a['head'][0], x=self.cor_x, y=self.cor_y, batch=batch, group=foreground)\n\n class Track:\n def __init__(self, pid): \n self.pid = pid\n self.data = event[event['particle_id'] == self.pid] \n self.i = 0 \n self.last_add = 0 \n\n def add_point(self, dt): \n if self.i > (self.track_length() -1): \n #time_ended = time.time() \n pass\n else: \n if time.time() - self.last_add > dt: \n hit = self.data.iloc[self.i, :]\n circles.append(shapes.Circle(hit.z, hit.r, 5, color=(60,60,60), batch=batch))\n self.last_add = time.time() \n self.i +=1 \n\n def track_length(self):\n return len(self.data) \n\n def is_finished(self):\n if self.i > self.track_length()-1: \n finished = True\n else: \n finished = False\n return finished \n\n class rfTrack:\n def __init__(self, rlist, zlist, pid): \n self.i = 0 \n self.last_add = 0 \n indices = np.where(pids.values.flatten() == pid)\n self.rlist = rlist.values[indices] * scale_r \n self.zlist = zlist.values[indices] * scale_z + 266 \n\n def add_point(self, dt): \n if self.i > self.track_length() -1: \n pass\n else:\n if time.time() - self.last_add > dt: \n circles.append(shapes.Circle(self.zlist[self.i], self.rlist[self.i], 5, color=(250,0,0), batch=batch))\n self.last_add = time.time() \n self.i +=1 \n\n def track_length(self):\n return len(self.rlist) \n\n def is_finished(self):\n if self.i > self.track_length() - 1: \n finished = True\n # window.close() \n else: \n finished = False\n return finished \n\n glClearColor(255, 255, 255, 1.0) # red, green, blue, and alpha(transparency)\n\n pid = pids.values[0][0]\n\n track1 = Track(pid)\n track2 = rfTrack(r, z, pid)\n\n label = pyglet.text.Label(\"Particle id: \"+str(pid), font_size=20, x=0, y=0, color=(255, 0, 0, 0)) \n label.color = (0, 0, 100, 255)\n\n \n\n\n def dummy(dt): \n #print(\"dummy called\") \n dummy = 0 \n\n @window.event\n def on_draw():\n window.clear()\n label.draw()\n track1.add_point(0.1)\n if track1.is_finished():\n track2.add_point(0.1)\n \n closing_time()\n\n batch.draw() \n\n @window.event\n def closing_time(): \n if track2.is_finished() ==True: \n pyglet.app.exit()\n\n def visualise(): \n \n #breaks without the dummy call, no idea why \n pyglet.clock.schedule_interval(dummy, 0.1)\n\n # if __name__ == '__main__':\n\n pyglet.app.run()\n\n visualise() \n\n",
"id": "799156",
"language": "Python",
"matching_score": 2.625746965408325,
"max_stars_count": 1,
"path": "src/garage/examples/tf/animate_particle.py"
},
{
"content": "from tkinter import N\nfrom turtle import window_width\nimport pyglet \nimport pandas as pd \nimport numpy as np \nfrom pyglet import shapes\nfrom pyglet.gl import glClearColor\nfrom pyglet import clock\n\nrf_file = pd.read_csv('/home/lhv14/garage/src/garage/examples/tf/inference_resuts.csv')\n\nbatch = pyglet.graphics.Batch()\nwindow_length = 1000\nwindow_height = 1000 \n\nscale_z = window_length/(2*267) \nscale_r = window_height/27 \n\nn_track_hits = 6\n\nrf_file['mc_z'] = np.abs(rf_file['mc_z'].values)*scale_z + 266\nrf_file['mc_r'] = rf_file['mc_r'].values*scale_r \nrf_file['pred_z'] = np.abs(rf_file['pred_z'].values)*scale_z + 266\nrf_file['pred_r'] = rf_file['pred_r'].values*scale_r\n\n\np1 = rf_file[rf_file['particle_id']==-18951]\n\n# sample every 100th particle id \npids = rf_file.particle_id.values\nfiles = rf_file.filenumber.values\n\nwindow = pyglet.window.Window(window_length, window_height)\n\n\nlabel = pyglet.text.Label('Hello, world',\n font_name='Times New Roman',\n font_size=36,\n x=window.width//2, y=window.height//2,\n anchor_x='center', anchor_y='center')\n\n\n\nclass Point:\n def __init__(self): \n self.circles = [] \n self.i = 0 \n self.pid_counter = 0 \n self.pid = pids[0]\n self.filenumber = files[0]\n \n def plot_point(self, dt): \n self.particle = rf_file[(rf_file['particle_id']==self.pid) & (rf_file['filenumber']==self.filenumber)]\n\n \n color1 = 160\n #print(\"i is now \", self.i)\n print(self.particle)\n if self.i < (n_track_hits-1): \n hit = self.particle.iloc[self.i, ] \n self.circles.append(shapes.Circle(hit.mc_z, hit.mc_r, 5, color=(color1,60,60), batch=batch)) \n self.circles.append(pyglet.text.Label(\"Particle id: \" + str(self.pid) + \" After training on \" + str(self.pid_counter*10) +\"tracks\", font_size=12, batch=batch))\n\n self.i += 1 \n\n elif (self.i > (n_track_hits-2)) & (self.i < (n_track_hits*2-2)): \n hit = self.particle.iloc[self.i-(n_track_hits-1), ]\n color3 = 2014\n self.circles.append(shapes.Circle(hit.pred_z, hit.pred_r, 5, color=(0,60,color3), batch=batch)) \n self.i+=1 \n\n else: \n self.i = 0 \n self.pid_counter += 1 \n self.pid = pids[self.pid_counter]\n self.filenumber = files[self.pid_counter]\n #del(self.circles)\n self.circles = []\n #self.particle = rf_file[rf_file['particle_id']==self.pid]\n\n\n\np = Point() \n\nclock.schedule_interval(p.plot_point, 0.1)\n\n\n@window.event\ndef on_draw():\n window.clear()\n #label.draw()\n batch.draw() \n\npyglet.app.run()\n",
"id": "8750008",
"language": "Python",
"matching_score": 4.98964262008667,
"max_stars_count": 1,
"path": "src/garage/examples/tf/inference_track_vis.py"
},
{
"content": "import pyglet \nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt \nfrom pyglet import shapes\nfrom pyglet.gl import glClearColor\n\n\n\n\n#todo: \n# Make sure it's animated\n# Draw detector lines \n# Integrate with rf\n# Commadn line animation \n\n#line1 = shapes.Line(co_x1, co_y1, co_x2, co_y2, width, color = (50, 225, 30), batch = batch)\n\n\n\nevent = pd.read_hdf('~/gnnfiles/data/ntuple_PU200_numEvent1000/ntuple_PU200_event0.h5')\nr = pd.read_csv('~/garage/src/garage/examples/tf/g_r.csv', header=None)\nz = pd.read_csv('~/garage/src/garage/examples/tf/g_z.csv', header=None)\npids = pd.read_csv('~/garage/src/garage/examples/tf/g_pids.csv', header=None)\ngevent = pd.DataFrame({'particle_id': pids.values.flatten(), \n 'z': z.values.flatten(),\n 'r': r.values.flatten()})\n\n\ne_particle = event[event['particle_id']==-17737] \nrf_particle = gevent[gevent['particle_id']==-17737] \n\nbatch = pyglet.graphics.Batch()\nwindow_length = 1000\nwindow_height = 1000 \n\nscale_z = 1000/(267+267) \nscale_r = 1000/27 \n\n\ne_particle['z'] = e_particle['z'].values*scale_z + 266\ne_particle['r'] = e_particle['r'].values*scale_r \nrf_particle['z'] = rf_particle['z'].values*scale_z + 266\nrf_particle['r'] = rf_particle['r'].values*scale_r\n\n\nwindow = pyglet.window.Window(window_length, window_height)\n#pyglet.sprite.Sprite(a['head'][0], x=self.cor_x, y=self.cor_y, batch=batch, group=foreground)\n\n\nglClearColor(255, 255, 255, 1.0) # red, green, blue, and alpha(transparency)\n\n#circle = shapes.Circle(-41, 3.11, 300, color=(50,225,30), batch=batch) \n\n\nlabel = pyglet.text.Label('Hello, world',\n font_name='Times New Roman',\n font_size=36,\n x=window.width//2, y=window.height//2,\n anchor_x='center', anchor_y='center')\n\n#circle = shapes.Circle(700, 150, 100, color=(50, 225, 30), batch=batch)\n\n\ncircles = [] \ndef add_point(index, is_MC): \n if is_MC: \n hit = e_particle.iloc[index, ]\n color1 = 60\n else: \n hit = rf_particle.iloc[index, ] \n color1 = 160 \n\n circles.append(shapes.Circle(hit.z, hit.r, 15, color=(color1,60,60), batch=batch)) \n\n\ndef update(dt): \n for i in range(10): \n add_point(i, 1) \n add_point(i, 0)\n\n@window.event\ndef on_draw():\n window.clear()\n #label.draw()\n batch.draw() \n\npyglet.clock.schedule_interval(update, 3)\nif __name__ == '__main__':\n# for i in np.arange(0,10000, radius):\n# for j in np.arange(0,10000, radius):\n# itcount = 0\n# if i==180*5 and j ==180*5 and itcount<1: \n# state=1\n# itcount = 1\n# intbpstate = 'head'\n# else: \n# state=0\n# intbpstate = 'nostate'\n# cell_dict[(i,j)].draws(intbpstate)\n# batch.draw()\n# for i in range(10): \n # add_point(i, 1) \n x = 10\n # batch.draw() \n\n\npyglet.app.run()\n\n",
"id": "9548128",
"language": "Python",
"matching_score": 2.6354146003723145,
"max_stars_count": 1,
"path": "src/garage/pyglet/pyglet_test.py"
},
{
"content": "# importing pyglet module\nimport pyglet\nimport pyglet.window.key\n \n# width of window\nwidth = 500\n \n# height of window\nheight = 500\n \n# caption i.e title of the window\ntitle = \"Geeksforgeeks\"\n \n# creating a window\nwindow = pyglet.window.Window(width, height, title)\n \n# text\ntext = \"GeeksforGeeks\"\n \n# creating a label with font = times roman\n# font size = 36\n# aligning it to the center\nlabel = pyglet.text.Label(text,\n font_name ='Times New Roman',\n font_size = 36,\n x = window.width//2, y = window.height//2,\n anchor_x ='center', anchor_y ='center')\n \n# on draw event\n@window.event\ndef on_draw():\n \n # clearing the window\n window.clear()\n \n # drawing the label on the window\n label.draw()\n #window.close() \n \n# key press event \n@window.event\ndef on_key_press(symbol, modifier):\n \n # key \"E\" get press\n if symbol == pyglet.window.key.E:\n \n # close the window\n window.close()\n \n \n# start running the application\npyglet.app.run()\n",
"id": "11299905",
"language": "Python",
"matching_score": 1.8754652738571167,
"max_stars_count": 1,
"path": "src/garage/pyglet/window_close.py"
}
] | 3.636151 |
kylemannock | [
{
"content": "import unittest\nfrom pygame.tests.event_test import race_condition_notification\nimport pygame\nfrom pygame import event, fastevent\nfrom pygame.compat import geterror\n\n################################################################################\n\nclass FasteventModuleTest(unittest.TestCase):\n\n def setUp(self):\n pygame.display.init()\n fastevent.init()\n event.clear()\n self.assert_(not event.get())\n\n def tearDown(self):\n pygame.display.quit()\n\n def test_get(self):\n # __doc__ (as of 2008-08-02) for pygame.fastevent.get:\n\n # pygame.fastevent.get() -> list of Events\n # get all events from the queue\n\n for _ in range(1, 11):\n event.post(event.Event(pygame.USEREVENT))\n\n self.assertEquals (\n [e.type for e in fastevent.get()], [pygame.USEREVENT] * 10,\n race_condition_notification\n )\n\n def todo_test_init(self):\n # __doc__ (as of 2008-08-02) for pygame.fastevent.init:\n\n # pygame.fastevent.init() -> None\n # initialize pygame.fastevent.\n\n self.fail()\n\n def test_poll(self):\n\n # __doc__ (as of 2008-08-02) for pygame.fastevent.poll:\n\n # pygame.fastevent.poll() -> Event\n # get an available event\n #\n # Returns next event on queue. If there is no event waiting on the\n # queue, this will return an event with type NOEVENT.\n\n self.assertEquals (\n fastevent.poll().type, pygame.NOEVENT, race_condition_notification\n )\n\n def test_post(self):\n\n # __doc__ (as of 2008-08-02) for pygame.fastevent.post:\n\n # pygame.fastevent.post(Event) -> None\n # place an event on the queue\n #\n # This will post your own event objects onto the event queue.\n # You can past any event type you want, but some care must be\n # taken. For example, if you post a MOUSEBUTTONDOWN event to the\n # queue, it is likely any code receiving the event will expect\n # the standard MOUSEBUTTONDOWN attributes to be available, like\n # 'pos' and 'button'.\n #\n # Because pygame.fastevent.post() may have to wait for the queue\n # to empty, you can get into a dead lock if you try to append an\n # event on to a full queue from the thread that processes events.\n # For that reason I do not recommend using this function in the\n # main thread of an SDL program.\n\n for _ in range(1, 11):\n fastevent.post(event.Event(pygame.USEREVENT))\n\n self.assertEquals (\n [e.type for e in event.get()], [pygame.USEREVENT] * 10,\n race_condition_notification\n )\n\n try:\n # Special case for post: METH_O.\n fastevent.post(1)\n except TypeError:\n e = geterror()\n msg = (\"argument 1 must be %s, not %s\" %\n (fastevent.Event.__name__, type(1).__name__))\n self.assertEqual(str(e), msg)\n else:\n self.fail()\n\n def todo_test_pump(self):\n\n # __doc__ (as of 2008-08-02) for pygame.fastevent.pump:\n\n # pygame.fastevent.pump() -> None\n # update the internal messages\n #\n # For each frame of your game, you will need to make some sort\n # of call to the event queue. This ensures your program can internally\n # interact with the rest of the operating system. If you are not using\n # other event functions in your game, you should call pump() to allow\n # pygame to handle internal actions.\n #\n # There are important things that must be dealt with internally in the\n # event queue. The main window may need to be repainted. Certain joysticks\n # must be polled for their values. If you fail to make a call to the event\n # queue for too long, the system may decide your program has locked up.\n\n self.fail()\n\n def test_wait(self):\n\n # __doc__ (as of 2008-08-02) for pygame.fastevent.wait:\n\n # pygame.fastevent.wait() -> Event\n # wait for an event\n #\n # Returns the current event on the queue. If there are no messages\n # waiting on the queue, this will not return until one is\n # available. Sometimes it is important to use this wait to get\n # events from the queue, it will allow your application to idle\n # when the user isn't doing anything with it.\n\n event.post(pygame.event.Event(1))\n self.assertEquals(fastevent.wait().type, 1, race_condition_notification)\n\n################################################################################\n\nif __name__ == '__main__':\n unittest.main()\n",
"id": "6512222",
"language": "Python",
"matching_score": 2.334386110305786,
"max_stars_count": 0,
"path": "test/fastevent_test.py"
},
{
"content": "#################################### IMPORTS ###################################\n# -*- encoding: utf-8 -*-\n\n\nimport unittest\nfrom pygame.tests.test_utils import unordered_equality\nimport pygame\nfrom pygame import sprite\n\n################################# MODULE LEVEL #################################\n\nclass SpriteModuleTest( unittest.TestCase ):\n pass\n\n######################### SPRITECOLLIDE FUNCTIONS TEST #########################\n\nclass SpriteCollideTest( unittest.TestCase ):\n def setUp(self):\n self.ag = sprite.AbstractGroup()\n self.ag2 = sprite.AbstractGroup()\n self.s1 = sprite.Sprite(self.ag)\n self.s2 = sprite.Sprite(self.ag2)\n self.s3 = sprite.Sprite(self.ag2)\n\n self.s1.image = pygame.Surface((50,10), pygame.SRCALPHA, 32)\n self.s2.image = pygame.Surface((10,10), pygame.SRCALPHA, 32)\n self.s3.image = pygame.Surface((10,10), pygame.SRCALPHA, 32)\n\n self.s1.rect = self.s1.image.get_rect()\n self.s2.rect = self.s2.image.get_rect()\n self.s3.rect = self.s3.image.get_rect()\n self.s2.rect.move_ip(40, 0)\n self.s3.rect.move_ip(100, 100)\n\n def test_spritecollide__works_if_collided_cb_is_None(self):\n # Test that sprites collide without collided function.\n self.assertEqual (\n sprite.spritecollide (\n self.s1, self.ag2, dokill = False, collided = None\n ),\n [self.s2]\n )\n\n def test_spritecollide__works_if_collided_cb_not_passed(self):\n # Should also work when collided function isn't passed at all.\n self.assertEqual(sprite.spritecollide (\n self.s1, self.ag2, dokill = False),\n [self.s2]\n )\n\n def test_spritecollide__collided_must_be_a_callable(self):\n # Need to pass a callable.\n self.assertRaises (\n TypeError,\n sprite.spritecollide, self.s1, self.ag2, dokill = False, collided = 1\n )\n\n def test_spritecollide__collided_defaults_to_collide_rect(self):\n # collide_rect should behave the same as default.\n self.assertEqual (\n sprite.spritecollide (\n self.s1, self.ag2, dokill = False, collided = sprite.collide_rect\n ),\n [self.s2]\n )\n\n def test_collide_rect_ratio__ratio_of_one_like_default(self):\n # collide_rect_ratio should behave the same as default at a 1.0 ratio.\n self.assertEqual (\n sprite.spritecollide (\n self.s1, self.ag2, dokill = False,\n collided = sprite.collide_rect_ratio(1.0)\n ),\n [self.s2]\n )\n\n def test_collide_rect_ratio__collides_all_at_ratio_of_twenty(self):\n # collide_rect_ratio should collide all at a 20.0 ratio.\n self.assert_ (\n unordered_equality (\n sprite.spritecollide (\n self.s1, self.ag2, dokill = False,\n collided = sprite.collide_rect_ratio(20.0)\n ),\n [self.s2, self.s3]\n )\n )\n\n def test_collide_circle__no_radius_set(self):\n # collide_circle with no radius set.\n self.assertEqual (\n sprite.spritecollide (\n self.s1, self.ag2, dokill = False, collided = sprite.collide_circle\n ),\n [self.s2]\n )\n\n def test_collide_circle_ratio__no_radius_and_ratio_of_one(self):\n # collide_circle_ratio with no radius set, at a 1.0 ratio.\n self.assertEqual (\n sprite.spritecollide (\n self.s1, self.ag2, dokill = False,\n collided = sprite.collide_circle_ratio(1.0)\n ),\n [self.s2]\n )\n\n def test_collide_circle_ratio__no_radius_and_ratio_of_twenty(self):\n # collide_circle_ratio with no radius set, at a 20.0 ratio.\n self.assert_ (\n unordered_equality (\n sprite.spritecollide (\n self.s1, self.ag2, dokill = False,\n collided = sprite.collide_circle_ratio(20.0)\n ),\n [self.s2, self.s3]\n )\n )\n\n def test_collide_circle__with_radii_set(self):\n # collide_circle with a radius set.\n\n self.s1.radius = 50\n self.s2.radius = 10\n self.s3.radius = 400\n\n self.assert_ (\n unordered_equality (\n sprite.spritecollide (\n self.s1, self.ag2, dokill = False,\n collided = sprite.collide_circle\n ),\n [self.s2, self.s3]\n )\n )\n\n def test_collide_circle_ratio__with_radii_set(self):\n self.s1.radius = 50\n self.s2.radius = 10\n self.s3.radius = 400\n\n # collide_circle_ratio with a radius set.\n self.assert_ (\n unordered_equality (\n sprite.spritecollide (\n self.s1, self.ag2, dokill = False,\n collided = sprite.collide_circle_ratio(0.5)\n ),\n [self.s2, self.s3]\n )\n )\n\n def test_collide_mask__opaque(self):\n # make some fully opaque sprites that will collide with masks.\n self.s1.image.fill((255,255,255,255))\n self.s2.image.fill((255,255,255,255))\n self.s3.image.fill((255,255,255,255))\n\n # masks should be autogenerated from image if they don't exist.\n self.assertEqual (\n sprite.spritecollide (\n self.s1, self.ag2, dokill = False,\n collided = sprite.collide_mask\n ),\n [self.s2]\n )\n\n self.s1.mask = pygame.mask.from_surface(self.s1.image)\n self.s2.mask = pygame.mask.from_surface(self.s2.image)\n self.s3.mask = pygame.mask.from_surface(self.s3.image)\n\n # with set masks.\n self.assertEqual (\n sprite.spritecollide (\n self.s1, self.ag2, dokill = False,\n collided = sprite.collide_mask\n ),\n [self.s2]\n )\n\n def test_collide_mask__transparent(self):\n # make some sprites that are fully transparent, so they won't collide.\n self.s1.image.fill((255,255,255,0))\n self.s2.image.fill((255,255,255,0))\n self.s3.image.fill((255,255,255,0))\n\n self.s1.mask = pygame.mask.from_surface(self.s1.image, 255)\n self.s2.mask = pygame.mask.from_surface(self.s2.image, 255)\n self.s3.mask = pygame.mask.from_surface(self.s3.image, 255)\n\n self.assertFalse (\n sprite.spritecollide (\n self.s1, self.ag2, dokill = False, collided = sprite.collide_mask\n )\n )\n\n def test_spritecollideany__without_collided_callback(self):\n\n # pygame.sprite.spritecollideany(sprite, group) -> sprite\n # finds any sprites that collide\n\n # if collided is not passed, all\n # sprites must have a \"rect\" value, which is a\n # rectangle of the sprite area, which will be used\n # to calculate the collision.\n\n # s2 in, s3 out\n self.assert_(\n sprite.spritecollideany(self.s1, self.ag2)\n )\n\n # s2 and s3 out\n self.s2.rect.move_ip(0, 10)\n self.assertFalse(sprite.spritecollideany(self.s1, self.ag2))\n\n # s2 out, s3 in\n self.s3.rect.move_ip(-105, -105)\n self.assert_(sprite.spritecollideany(self.s1, self.ag2))\n\n # s2 and s3 in\n self.s2.rect.move_ip(0, -10)\n self.assert_(sprite.spritecollideany(self.s1, self.ag2))\n\n def test_spritecollideany__with_collided_callback(self):\n\n # pygame.sprite.spritecollideany(sprite, group) -> sprite\n # finds any sprites that collide\n\n # collided is a callback function used to calculate if\n # two sprites are colliding. it should take two sprites\n # as values, and return a bool value indicating if\n # they are colliding.\n\n # This collision test can be faster than pygame.sprite.spritecollide()\n # since it has less work to do.\n\n arg_dict_a = {}\n arg_dict_b = {}\n return_container = [True]\n\n # This function is configurable using the mutable default arguments!\n def collided_callback(spr_a, spr_b,\n arg_dict_a=arg_dict_a, arg_dict_b=arg_dict_b,\n return_container=return_container):\n\n count = arg_dict_a.get(spr_a, 0)\n arg_dict_a[spr_a] = 1 + count\n\n count = arg_dict_b.get(spr_b, 0)\n arg_dict_b[spr_b] = 1 + count\n\n return return_container[0]\n\n # This should return True because return_container[0] is True\n self.assert_(\n sprite.spritecollideany(self.s1, self.ag2, collided_callback)\n )\n\n # The callback function should have been called only once, so self.s1\n # should have only been passed as an argument once\n self.assert_(len(arg_dict_a) == 1 and arg_dict_a[self.s1] == 1)\n\n # The callback function should have been called only once, so self.s2\n # exclusive-or self.s3 should have only been passed as an argument\n # once\n self.assert_(\n len(arg_dict_b) == 1 and list(arg_dict_b.values())[0] == 1 and\n (self.s2 in arg_dict_b or self.s3 in arg_dict_b)\n )\n\n arg_dict_a.clear()\n arg_dict_b.clear()\n return_container[0] = False\n\n # This should return False because return_container[0] is False\n self.assertFalse(\n sprite.spritecollideany(self.s1, self.ag2, collided_callback)\n )\n\n # The callback function should have been called as many times as\n # there are sprites in self.ag2\n self.assert_(len(arg_dict_a) == 1 and arg_dict_a[self.s1] == 2)\n\n # The callback function should have been twice because self.s2 and\n # self.s3 should have been passed once each\n self.assert_(\n len(arg_dict_b) == 2 and\n arg_dict_b[self.s2] == 1 and arg_dict_b[self.s3] == 1\n )\n\n def test_groupcollide__without_collided_callback(self):\n\n # pygame.sprite.groupcollide(groupa, groupb, dokilla, dokillb) -> dict\n # collision detection between group and group\n\n # test no kill\n crashed = pygame.sprite.groupcollide(self.ag, self.ag2, False, False)\n self.assert_(crashed == {self.s1: [self.s2]})\n\n crashed = pygame.sprite.groupcollide(self.ag, self.ag2, False, False)\n self.assert_(crashed == {self.s1: [self.s2]})\n\n # test killb\n crashed = pygame.sprite.groupcollide(self.ag, self.ag2, False, True)\n self.assert_(crashed == {self.s1: [self.s2]})\n\n crashed = pygame.sprite.groupcollide(self.ag, self.ag2, False, False)\n self.assert_(crashed == {})\n\n # test killa\n self.s3.rect.move_ip(-100, -100)\n\n crashed = pygame.sprite.groupcollide(self.ag, self.ag2, True, False)\n self.assert_(crashed == {self.s1: [self.s3]})\n\n crashed = pygame.sprite.groupcollide(self.ag, self.ag2, False, False)\n self.assert_(crashed == {})\n\n def test_groupcollide__with_collided_callback(self):\n\n collided_callback_true = lambda spr_a, spr_b: True\n collided_callback_false = lambda spr_a, spr_b: False\n\n # test no kill\n crashed = pygame.sprite.groupcollide(self.ag, self.ag2, False, False,\n collided_callback_false)\n self.assert_(crashed == {})\n\n crashed = pygame.sprite.groupcollide(self.ag, self.ag2, False, False,\n collided_callback_true)\n self.assert_(crashed == {self.s1: [self.s2, self.s3]} or\n crashed == {self.s1: [self.s3, self.s2]})\n\n crashed = pygame.sprite.groupcollide(self.ag, self.ag2, False, False,\n collided_callback_true)\n self.assert_(crashed == {self.s1: [self.s2, self.s3]} or\n crashed == {self.s1: [self.s3, self.s2]})\n\n # test killb\n crashed = pygame.sprite.groupcollide(self.ag, self.ag2, False, True,\n collided_callback_false)\n self.assert_(crashed == {})\n\n crashed = pygame.sprite.groupcollide(self.ag, self.ag2, False, True,\n collided_callback_true)\n self.assert_(crashed == {self.s1: [self.s2, self.s3]} or\n crashed == {self.s1: [self.s3, self.s2]})\n\n crashed = pygame.sprite.groupcollide(self.ag, self.ag2, False, True,\n collided_callback_true)\n self.assert_(crashed == {})\n\n # test killa\n self.ag.add(self.s2)\n self.ag2.add(self.s3)\n\n crashed = pygame.sprite.groupcollide(self.ag, self.ag2, True, False,\n collided_callback_false)\n self.assert_(crashed == {})\n\n crashed = pygame.sprite.groupcollide(self.ag, self.ag2, True, False,\n collided_callback_true)\n self.assert_(crashed == {self.s1: [self.s3], self.s2: [self.s3]})\n\n crashed = pygame.sprite.groupcollide(self.ag, self.ag2, True, False,\n collided_callback_true)\n self.assert_(crashed == {})\n\n def test_collide_rect(self):\n\n # Test colliding - some edges touching\n self.assert_(pygame.sprite.collide_rect(self.s1, self.s2))\n self.assert_(pygame.sprite.collide_rect(self.s2, self.s1))\n\n # Test colliding - all edges touching\n self.s2.rect.center = self.s3.rect.center\n self.assert_(pygame.sprite.collide_rect(self.s2, self.s3))\n self.assert_(pygame.sprite.collide_rect(self.s3, self.s2))\n\n # Test colliding - no edges touching\n self.s2.rect.inflate_ip(10, 10)\n self.assert_(pygame.sprite.collide_rect(self.s2, self.s3))\n self.assert_(pygame.sprite.collide_rect(self.s3, self.s2))\n\n # Test colliding - some edges intersecting\n self.s2.rect.center = (self.s1.rect.right, self.s1.rect.bottom)\n self.assert_(pygame.sprite.collide_rect(self.s1, self.s2))\n self.assert_(pygame.sprite.collide_rect(self.s2, self.s1))\n\n # Test not colliding\n self.assertFalse(pygame.sprite.collide_rect(self.s1, self.s3))\n self.assertFalse(pygame.sprite.collide_rect(self.s3, self.s1))\n\n################################################################################\n\nclass AbstractGroupTypeTest( unittest.TestCase ):\n def setUp(self):\n self.ag = sprite.AbstractGroup()\n self.ag2 = sprite.AbstractGroup()\n self.s1 = sprite.Sprite(self.ag)\n self.s2 = sprite.Sprite(self.ag)\n self.s3 = sprite.Sprite(self.ag2)\n self.s4 = sprite.Sprite(self.ag2)\n\n self.s1.image = pygame.Surface((10, 10))\n self.s1.image.fill(pygame.Color('red'))\n self.s1.rect = self.s1.image.get_rect()\n\n self.s2.image = pygame.Surface((10, 10))\n self.s2.image.fill(pygame.Color('green'))\n self.s2.rect = self.s2.image.get_rect()\n self.s2.rect.left = 10\n\n self.s3.image = pygame.Surface((10, 10))\n self.s3.image.fill(pygame.Color('blue'))\n self.s3.rect = self.s3.image.get_rect()\n self.s3.rect.top = 10\n\n self.s4.image = pygame.Surface((10, 10))\n self.s4.image.fill(pygame.Color('white'))\n self.s4.rect = self.s4.image.get_rect()\n self.s4.rect.left = 10\n self.s4.rect.top = 10\n\n self.bg = pygame.Surface((20, 20))\n self.scr = pygame.Surface((20, 20))\n self.scr.fill(pygame.Color('grey'))\n\n def test_has( self ):\n \" See if AbstractGroup.has() works as expected. \"\n\n self.assertEqual(True, self.s1 in self.ag)\n\n self.assertEqual(True, self.ag.has(self.s1))\n\n self.assertEqual(True, self.ag.has([self.s1, self.s2]))\n\n # see if one of them not being in there.\n self.assertNotEqual(True, self.ag.has([self.s1, self.s2, self.s3]))\n self.assertNotEqual(True, self.ag.has(self.s1, self.s2, self.s3))\n self.assertNotEqual(True, self.ag.has(self.s1,\n sprite.Group(self.s2, self.s3)))\n self.assertNotEqual(True, self.ag.has(self.s1, [self.s2, self.s3]))\n\n # test empty list processing\n self.assertFalse(self.ag.has(*[]))\n self.assertFalse(self.ag.has([]))\n self.assertFalse(self.ag.has([[]]))\n\n # see if a second AbstractGroup works.\n self.assertEqual(True, self.ag2.has(self.s3))\n\n def test_add(self):\n\n ag3 = sprite.AbstractGroup()\n self.assertFalse(self.s1 in ag3)\n self.assertFalse(self.s2 in ag3)\n self.assertFalse(self.s3 in ag3)\n self.assertFalse(self.s4 in ag3)\n\n ag3.add(self.s1, [self.s2], self.ag2)\n self.assert_(self.s1 in ag3)\n self.assert_(self.s2 in ag3)\n self.assert_(self.s3 in ag3)\n self.assert_(self.s4 in ag3)\n\n def test_add_internal(self):\n\n self.assertFalse(self.s1 in self.ag2)\n self.ag2.add_internal(self.s1)\n self.assert_(self.s1 in self.ag2)\n\n def test_clear(self):\n\n self.ag.draw(self.scr)\n self.ag.clear(self.scr, self.bg)\n self.assertEqual((0, 0, 0, 255),\n self.scr.get_at((5, 5)))\n self.assertEqual((0, 0, 0, 255),\n self.scr.get_at((15, 5)))\n\n def test_draw(self):\n\n self.ag.draw(self.scr)\n self.assertEqual((255, 0, 0, 255),\n self.scr.get_at((5, 5)))\n self.assertEqual((0, 255, 0, 255),\n self.scr.get_at((15, 5)))\n\n def test_empty(self):\n\n self.ag.empty()\n self.assertFalse(self.s1 in self.ag)\n self.assertFalse(self.s2 in self.ag)\n\n def test_has_internal(self):\n\n self.assert_(self.ag.has_internal(self.s1))\n self.assertFalse(self.ag.has_internal(self.s3))\n\n def test_remove(self):\n\n # Test removal of 1 sprite\n self.ag.remove(self.s1)\n self.assertFalse(self.ag in self.s1.groups())\n self.assertFalse(self.ag.has(self.s1))\n\n # Test removal of 2 sprites as 2 arguments\n self.ag2.remove(self.s3, self.s4)\n self.assertFalse(self.ag2 in self.s3.groups())\n self.assertFalse(self.ag2 in self.s4.groups())\n self.assertFalse(self.ag2.has(self.s3, self.s4))\n\n # Test removal of 4 sprites as a list containing a sprite and a group\n # containing a sprite and another group containing 2 sprites.\n self.ag.add(self.s1, self.s3, self.s4)\n self.ag2.add(self.s3, self.s4)\n g = sprite.Group(self.s2)\n self.ag.remove([self.s1, g], self.ag2)\n self.assertFalse(self.ag in self.s1.groups())\n self.assertFalse(self.ag in self.s2.groups())\n self.assertFalse(self.ag in self.s3.groups())\n self.assertFalse(self.ag in self.s4.groups())\n self.assertFalse(self.ag.has(self.s1, self.s2, self.s3, self.s4))\n\n def test_remove_internal(self):\n\n self.ag.remove_internal(self.s1)\n self.assertFalse(self.ag.has_internal(self.s1))\n\n def test_sprites(self):\n\n sprite_list = self.ag.sprites()\n self.assert_(sprite_list == [self.s1, self.s2] or\n sprite_list == [self.s2, self.s1])\n\n def test_update(self):\n\n class test_sprite(pygame.sprite.Sprite):\n sink = []\n def __init__(self, *groups):\n pygame.sprite.Sprite.__init__(self, *groups)\n def update(self, *args):\n self.sink += args\n\n s = test_sprite(self.ag)\n self.ag.update(1, 2, 3)\n\n self.assertEqual(test_sprite.sink, [1, 2, 3])\n\n\n################################################################################\n\n# A base class to share tests between similar classes\n\nclass LayeredGroupBase:\n def test_get_layer_of_sprite(self):\n self.assert_(len(self.LG._spritelist)==0)\n spr = self.sprite()\n self.LG.add(spr, layer=666)\n self.assert_(len(self.LG._spritelist)==1)\n self.assert_(self.LG.get_layer_of_sprite(spr)==666)\n self.assert_(self.LG.get_layer_of_sprite(spr)==self.LG._spritelayers[spr])\n\n\n def test_add(self):\n self.assert_(len(self.LG._spritelist)==0)\n spr = self.sprite()\n self.LG.add(spr)\n self.assert_(len(self.LG._spritelist)==1)\n self.assert_(self.LG.get_layer_of_sprite(spr)==self.LG._default_layer)\n\n def test_add__sprite_with_layer_attribute(self):\n #test_add_sprite_with_layer_attribute\n\n self.assert_(len(self.LG._spritelist)==0)\n spr = self.sprite()\n spr._layer = 100\n self.LG.add(spr)\n self.assert_(len(self.LG._spritelist)==1)\n self.assert_(self.LG.get_layer_of_sprite(spr)==100)\n\n def test_add__passing_layer_keyword(self):\n # test_add_sprite_passing_layer\n\n self.assert_(len(self.LG._spritelist)==0)\n spr = self.sprite()\n self.LG.add(spr, layer=100)\n self.assert_(len(self.LG._spritelist)==1)\n self.assert_(self.LG.get_layer_of_sprite(spr)==100)\n\n def test_add__overriding_sprite_layer_attr(self):\n # test_add_sprite_overriding_layer_attr\n\n self.assert_(len(self.LG._spritelist)==0)\n spr = self.sprite()\n spr._layer = 100\n self.LG.add(spr, layer=200)\n self.assert_(len(self.LG._spritelist)==1)\n self.assert_(self.LG.get_layer_of_sprite(spr)==200)\n\n def test_add__adding_sprite_on_init(self):\n # test_add_sprite_init\n\n spr = self.sprite()\n lrg2 = sprite.LayeredUpdates(spr)\n self.assert_(len(lrg2._spritelist)==1)\n self.assert_(lrg2._spritelayers[spr]==lrg2._default_layer)\n\n def test_add__sprite_init_layer_attr(self):\n # test_add_sprite_init_layer_attr\n\n spr = self.sprite()\n spr._layer = 20\n lrg2 = sprite.LayeredUpdates(spr)\n self.assert_(len(lrg2._spritelist)==1)\n self.assert_(lrg2._spritelayers[spr]==20)\n\n def test_add__sprite_init_passing_layer(self):\n # test_add_sprite_init_passing_layer\n\n spr = self.sprite()\n lrg2 = sprite.LayeredUpdates(spr, layer=33)\n self.assert_(len(lrg2._spritelist)==1)\n self.assert_(lrg2._spritelayers[spr]==33)\n\n def test_add__sprite_init_overiding_layer(self):\n # test_add_sprite_init_overiding_layer\n\n spr = self.sprite()\n spr._layer = 55\n lrg2 = sprite.LayeredUpdates(spr, layer=33)\n self.assert_(len(lrg2._spritelist)==1)\n self.assert_(lrg2._spritelayers[spr]==33)\n\n def test_add__spritelist(self):\n # test_add_spritelist\n\n self.assert_(len(self.LG._spritelist)==0)\n sprites = []\n for i in range(10):\n sprites.append(self.sprite())\n self.LG.add(sprites)\n self.assert_(len(self.LG._spritelist)==10)\n for i in range(10):\n self.assert_(self.LG.get_layer_of_sprite(sprites[i])==self.LG._default_layer)\n\n def test_add__spritelist_with_layer_attr(self):\n # test_add_spritelist_with_layer_attr\n\n self.assert_(len(self.LG._spritelist)==0)\n sprites = []\n for i in range(10):\n sprites.append(self.sprite())\n sprites[-1]._layer = i\n self.LG.add(sprites)\n self.assert_(len(self.LG._spritelist)==10)\n for i in range(10):\n self.assert_(self.LG.get_layer_of_sprite(sprites[i])==i)\n\n def test_add__spritelist_passing_layer(self):\n # test_add_spritelist_passing_layer\n\n self.assert_(len(self.LG._spritelist)==0)\n sprites = []\n for i in range(10):\n sprites.append(self.sprite())\n self.LG.add(sprites, layer=33)\n self.assert_(len(self.LG._spritelist)==10)\n for i in range(10):\n self.assert_(self.LG.get_layer_of_sprite(sprites[i])==33)\n\n def test_add__spritelist_overriding_layer(self):\n # test_add_spritelist_overriding_layer\n\n self.assert_(len(self.LG._spritelist)==0)\n sprites = []\n for i in range(10):\n sprites.append(self.sprite())\n sprites[-1].layer = i\n self.LG.add(sprites, layer=33)\n self.assert_(len(self.LG._spritelist)==10)\n for i in range(10):\n self.assert_(self.LG.get_layer_of_sprite(sprites[i])==33)\n\n def test_add__spritelist_init(self):\n # test_add_spritelist_init\n\n self.assert_(len(self.LG._spritelist)==0)\n sprites = []\n for i in range(10):\n sprites.append(self.sprite())\n lrg2 = sprite.LayeredUpdates(sprites)\n self.assert_(len(lrg2._spritelist)==10)\n for i in range(10):\n self.assert_(lrg2.get_layer_of_sprite(sprites[i])==self.LG._default_layer)\n\n def test_remove__sprite(self):\n # test_remove_sprite\n\n self.assert_(len(self.LG._spritelist)==0)\n sprites = []\n for i in range(10):\n sprites.append(self.sprite())\n sprites[-1].rect = 0\n self.LG.add(sprites)\n self.assert_(len(self.LG._spritelist)==10)\n for i in range(10):\n self.LG.remove(sprites[i])\n self.assert_(len(self.LG._spritelist)==0)\n\n def test_sprites(self):\n # test_sprites\n\n self.assert_(len(self.LG._spritelist)==0)\n sprites = []\n for i in range(10):\n sprites.append(self.sprite())\n sprites[-1]._layer = 10-i\n self.LG.add(sprites)\n self.assert_(len(self.LG._spritelist)==10)\n for idx,spr in enumerate(self.LG.sprites()):\n self.assert_(spr == sprites[9-idx])\n\n def test_layers(self):\n # test_layers\n\n self.assert_(len(self.LG._spritelist)==0)\n sprites = []\n for i in range(10):\n for j in range(5):\n sprites.append(self.sprite())\n sprites[-1]._layer = i\n self.LG.add(sprites)\n lays = self.LG.layers()\n for i in range(10):\n self.assert_(lays[i] == i)\n\n def test_add__layers_are_correct(self): #TODO\n # test_layers2\n\n self.assert_(len(self.LG)==0)\n layers = [1,4,6,8,3,6,2,6,4,5,6,1,0,9,7,6,54,8,2,43,6,1]\n for lay in layers:\n self.LG.add(self.sprite(), layer=lay)\n layers.sort()\n for idx, spr in enumerate(self.LG.sprites()):\n self.assert_(self.LG.get_layer_of_sprite(spr)==layers[idx])\n\n def test_change_layer(self):\n # test_change_layer\n\n self.assert_(len(self.LG._spritelist)==0)\n spr = self.sprite()\n self.LG.add(spr, layer=99)\n self.assert_(self.LG._spritelayers[spr] == 99)\n self.LG.change_layer(spr, 44)\n self.assert_(self.LG._spritelayers[spr] == 44)\n\n spr2 = self.sprite()\n spr2.layer = 55\n self.LG.add(spr2)\n self.LG.change_layer(spr2, 77)\n self.assert_(spr2.layer == 77)\n\n def test_get_top_layer(self):\n # test_get_top_layer\n\n layers = [1,5,2,8,4,5,3,88,23,0]\n for i in layers:\n self.LG.add(self.sprite(), layer=i)\n self.assert_(self.LG.get_top_layer()==max(layers))\n self.assert_(self.LG.get_top_layer()==max(self.LG._spritelayers.values()))\n self.assert_(self.LG.get_top_layer()==self.LG._spritelayers[self.LG._spritelist[-1]])\n\n def test_get_bottom_layer(self):\n # test_get_bottom_layer\n\n layers = [1,5,2,8,4,5,3,88,23,0]\n for i in layers:\n self.LG.add(self.sprite(), layer=i)\n self.assert_(self.LG.get_bottom_layer()==min(layers))\n self.assert_(self.LG.get_bottom_layer()==min(self.LG._spritelayers.values()))\n self.assert_(self.LG.get_bottom_layer()==self.LG._spritelayers[self.LG._spritelist[0]])\n\n def test_move_to_front(self):\n # test_move_to_front\n\n layers = [1,5,2,8,4,5,3,88,23,0]\n for i in layers:\n self.LG.add(self.sprite(), layer=i)\n spr = self.sprite()\n self.LG.add(spr, layer=3)\n self.assert_(spr != self.LG._spritelist[-1])\n self.LG.move_to_front(spr)\n self.assert_(spr == self.LG._spritelist[-1])\n\n def test_move_to_back(self):\n # test_move_to_back\n\n layers = [1,5,2,8,4,5,3,88,23,0]\n for i in layers:\n self.LG.add(self.sprite(), layer=i)\n spr = self.sprite()\n self.LG.add(spr, layer=55)\n self.assert_(spr != self.LG._spritelist[0])\n self.LG.move_to_back(spr)\n self.assert_(spr == self.LG._spritelist[0])\n\n def test_get_top_sprite(self):\n # test_get_top_sprite\n\n layers = [1,5,2,8,4,5,3,88,23,0]\n for i in layers:\n self.LG.add(self.sprite(), layer=i)\n self.assert_(self.LG.get_layer_of_sprite(self.LG.get_top_sprite())== self.LG.get_top_layer())\n\n def test_get_sprites_from_layer(self):\n # test_get_sprites_from_layer\n\n self.assert_(len(self.LG)==0)\n sprites = {}\n layers = [1,4,5,6,3,7,8,2,1,3,4,5,6,7,8,9,1,2,3,4,5,6,7,8,9,0,1,6,5,4,3,2]\n for lay in layers:\n spr = self.sprite()\n spr._layer = lay\n self.LG.add(spr)\n if lay not in sprites:\n sprites[lay] = []\n sprites[lay].append(spr)\n\n for lay in self.LG.layers():\n for spr in self.LG.get_sprites_from_layer(lay):\n self.assert_(spr in sprites[lay])\n sprites[lay].remove(spr)\n if len(sprites[lay]) == 0:\n del sprites[lay]\n self.assert_(len(sprites.values())==0)\n\n def test_switch_layer(self):\n # test_switch_layer\n\n self.assert_(len(self.LG)==0)\n sprites1 = []\n sprites2 = []\n layers = [3,2,3,2,3,3,2,2,3,2,3,2,3,2,3,2,3,3,2,2,3,2,3]\n for lay in layers:\n spr = self.sprite()\n spr._layer = lay\n self.LG.add(spr)\n if lay==2:\n sprites1.append(spr)\n else:\n sprites2.append(spr)\n\n for spr in sprites1:\n self.assert_(spr in self.LG.get_sprites_from_layer(2))\n for spr in sprites2:\n self.assert_(spr in self.LG.get_sprites_from_layer(3))\n self.assert_(len(self.LG)==len(sprites1)+len(sprites2))\n\n self.LG.switch_layer(2,3)\n\n for spr in sprites1:\n self.assert_(spr in self.LG.get_sprites_from_layer(3))\n for spr in sprites2:\n self.assert_(spr in self.LG.get_sprites_from_layer(2))\n self.assert_(len(self.LG)==len(sprites1)+len(sprites2))\n\n def test_copy(self):\n\n self.LG.add(self.sprite())\n spr = self.LG.sprites()[0]\n lg_copy = self.LG.copy()\n self.assert_(isinstance(lg_copy, type(self.LG)))\n self.assert_(spr in lg_copy and lg_copy in spr.groups())\n\n########################## LAYERED RENDER GROUP TESTS ##########################\n\nclass LayeredUpdatesTypeTest__SpriteTest(LayeredGroupBase, unittest.TestCase):\n sprite = sprite.Sprite\n\n def setUp(self):\n self.LG = sprite.LayeredUpdates()\n\nclass LayeredUpdatesTypeTest__DirtySprite(LayeredGroupBase, unittest.TestCase):\n sprite = sprite.DirtySprite\n\n def setUp(self):\n self.LG = sprite.LayeredUpdates()\n\nclass LayeredDirtyTypeTest__DirtySprite(LayeredGroupBase, unittest.TestCase):\n sprite = sprite.DirtySprite\n\n def setUp(self):\n self.LG = sprite.LayeredDirty()\n\n def test_repaint_rect(self):\n group = self.LG\n surface = pygame.Surface((100, 100))\n\n group.repaint_rect(pygame.Rect(0, 0, 100, 100))\n group.draw(surface)\n\n def test_repaint_rect_with_clip(self):\n group = self.LG\n surface = pygame.Surface((100, 100))\n\n group.set_clip(pygame.Rect(0, 0, 100, 100))\n group.repaint_rect(pygame.Rect(0, 0, 100, 100))\n group.draw(surface)\n\n############################### SPRITE BASE CLASS ##############################\n#\n# tests common between sprite classes\n\nclass SpriteBase:\n def setUp(self):\n self.groups = []\n for Group in self.Groups:\n self.groups.append(Group())\n\n self.sprite = self.Sprite()\n\n def test_add_internal(self):\n\n for g in self.groups:\n self.sprite.add_internal(g)\n\n for g in self.groups:\n self.assert_(g in self.sprite.groups())\n\n def test_remove_internal(self):\n\n for g in self.groups:\n self.sprite.add_internal(g)\n\n for g in self.groups:\n self.sprite.remove_internal(g)\n\n for g in self.groups:\n self.assertFalse(g in self.sprite.groups())\n\n def test_update(self):\n\n class test_sprite(pygame.sprite.Sprite):\n sink = []\n def __init__(self, *groups):\n pygame.sprite.Sprite.__init__(self, *groups)\n def update(self, *args):\n self.sink += args\n\n s = test_sprite()\n s.update(1, 2, 3)\n\n self.assertEqual(test_sprite.sink, [1, 2, 3])\n\n def test___init____added_to_groups_passed(self):\n self.sprite = self.Sprite(self.groups)\n\n self.assert_(unordered_equality(\n self.sprite.groups(),\n self.groups\n ))\n\n def test_add(self):\n self.sprite.add(self.groups)\n\n self.assert_(unordered_equality(\n self.sprite.groups(),\n self.groups\n ))\n\n def test_alive(self):\n self.assert_(\n not self.sprite.alive(),\n \"Sprite should not be alive if in no groups\"\n )\n\n self.sprite.add(self.groups)\n self.assert_(self.sprite.alive())\n\n def test_groups(self):\n for i, g in enumerate(self.groups):\n self.sprite.add(g)\n\n groups = self.sprite.groups()\n self.assert_( unordered_equality (\n groups,\n self.groups[:i+1],\n ))\n\n def test_kill(self):\n self.sprite.add(self.groups)\n\n self.assert_(self.sprite.alive())\n self.sprite.kill()\n\n self.assert_(not self.sprite.groups() and not self.sprite.alive() )\n\n def test_remove(self):\n self.sprite.add(self.groups)\n self.sprite.remove(self.groups)\n self.assert_(not self.sprite.groups())\n\n############################## SPRITE CLASS TESTS ##############################\n\nclass SpriteTypeTest(SpriteBase, unittest.TestCase):\n Sprite = sprite.Sprite\n\n Groups = [ sprite.Group,\n sprite.LayeredUpdates,\n sprite.RenderUpdates,\n sprite.OrderedUpdates, ]\n\nclass DirtySpriteTypeTest(SpriteBase, unittest.TestCase):\n Sprite = sprite.DirtySprite\n\n Groups = [ sprite.Group,\n sprite.LayeredUpdates,\n sprite.RenderUpdates,\n sprite.OrderedUpdates,\n sprite.LayeredDirty, ]\n\n############################## BUG TESTS #######################################\n\nclass SingleGroupBugsTest(unittest.TestCase):\n def test_memoryleak_bug(self):\n # For memoryleak bug posted to mailing list by <NAME> on 16/11/10.\n # Fixed in revision 2953.\n\n import weakref\n import gc\n\n class MySprite(sprite.Sprite):\n def __init__(self, *args, **kwargs):\n sprite.Sprite.__init__(self, *args, **kwargs)\n self.image = pygame.Surface( (2, 4), 0, 24 )\n self.rect = self.image.get_rect()\n\n g = sprite.GroupSingle()\n screen = pygame.Surface((4, 8), 0, 24)\n s = MySprite()\n r = weakref.ref(s)\n g.sprite = s\n del s\n gc.collect()\n self.assert_(r() is not None)\n g.update()\n g.draw(screen)\n g.sprite = MySprite()\n gc.collect()\n self.assert_(r() is None)\n\n################################################################################\n\nif __name__ == '__main__':\n unittest.main()\n",
"id": "1101302",
"language": "Python",
"matching_score": 1.4573942422866821,
"max_stars_count": 1,
"path": "test/sprite_test.py"
},
{
"content": "import os\nimport sys\nif os.environ.get('SDL_VIDEODRIVER') == 'dummy':\n __tags__ = ('ignore', 'subprocess_ignore')\nimport unittest\nfrom pygame.tests.test_utils import trunk_relative_path\n\nimport pygame\nfrom pygame import scrap\nfrom pygame.compat import as_bytes\n\nclass ScrapModuleTest(unittest.TestCase):\n not_initialized = True\n\n def setUp(self):\n if self.not_initialized:\n pygame.init ()\n pygame.display.set_mode ((1, 1))\n scrap.init ()\n self.not_initialized = False\n\n def todo_test_contains(self):\n\n # __doc__ (as of 2008-08-02) for pygame.scrap.contains:\n\n # scrap.contains (type) -> bool\n # Checks, whether a certain type is available in the clipboard.\n #\n # Returns True, if data for the passed type is available in the\n # clipboard, False otherwise.\n #\n # if pygame.scrap.contains (SCRAP_TEXT):\n # print \"There is text in the clipboard.\"\n # if pygame.scrap.contains (\"own_data_type\"):\n # print \"There is stuff in the clipboard.\"\n\n self.fail()\n\n def todo_test_get(self):\n\n # __doc__ (as of 2008-08-02) for pygame.scrap.get:\n\n # scrap.get (type) -> string\n # Gets the data for the specified type from the clipboard.\n #\n # Returns the data for the specified type from the clipboard. The data\n # is returned as string and might need further processing. If no data\n # for the passed type is available, None is returned.\n #\n # text = pygame.scrap.get (SCRAP_TEXT)\n # if text:\n # # Do stuff with it.\n # else:\n # print \"There does not seem to be text in the clipboard.\"\n\n self.fail()\n\n def todo_test_get_types(self):\n\n # __doc__ (as of 2008-08-02) for pygame.scrap.get_types:\n\n # scrap.get_types () -> list\n # Gets a list of the available clipboard types.\n #\n # Gets a list of strings with the identifiers for the available\n # clipboard types. Each identifier can be used in the scrap.get()\n # method to get the clipboard content of the specific type. If there\n # is no data in the clipboard, an empty list is returned.\n #\n # types = pygame.scrap.get_types ()\n # for t in types:\n # if \"text\" in t:\n # # There is some content with the word \"text\" in it. It's\n # # possibly text, so print it.\n # print pygame.scrap.get (t)\n\n self.fail()\n\n def todo_test_init(self):\n\n # __doc__ (as of 2008-08-02) for pygame.scrap.init:\n\n # scrap.init () -> None\n # Initializes the scrap module.\n #\n # Tries to initialize the scrap module and raises an exception, if it\n # fails. Note that this module requires a set display surface, so you\n # have to make sure, you acquired one earlier using\n # pygame.display.set_mode().\n #\n\n self.fail()\n\n def todo_test_lost(self):\n\n # __doc__ (as of 2008-08-02) for pygame.scrap.lost:\n\n # scrap.lost() -> bool\n # Checks whether the clipboard is currently owned by the application.\n #\n # Returns True, if the clipboard is currently owned by the pygame\n # application, False otherwise.\n #\n # if pygame.scrap.lost ():\n # print \"No content from me anymore. The clipboard is used by someone else.\"\n\n self.fail()\n\n def test_set_mode (self):\n scrap.set_mode (pygame.SCRAP_SELECTION)\n scrap.set_mode (pygame.SCRAP_CLIPBOARD)\n self.assertRaises (ValueError, scrap.set_mode, 1099)\n\n def test_scrap_put_text (self):\n scrap.put (pygame.SCRAP_TEXT, as_bytes(\"Hello world\"))\n self.assertEquals (scrap.get (pygame.SCRAP_TEXT),\n as_bytes(\"Hello world\"))\n\n scrap.put (pygame.SCRAP_TEXT, as_bytes(\"Another String\"))\n self.assertEquals (scrap.get (pygame.SCRAP_TEXT),\n as_bytes(\"Another String\"))\n\n def test_scrap_put_image (self):\n if 'pygame.image' not in sys.modules:\n return\n sf = pygame.image.load (\n trunk_relative_path(\"examples/data/asprite.bmp\")\n )\n string = pygame.image.tostring (sf, \"RGBA\")\n scrap.put (pygame.SCRAP_BMP, string)\n self.assertEquals (scrap.get(pygame.SCRAP_BMP), string)\n\n def test_put (self):\n scrap.put (\"arbitrary buffer\", as_bytes(\"buf\"))\n r = scrap.get (\"arbitrary buffer\")\n self.assertEquals (r, as_bytes(\"buf\"))\n\nclass X11InteractiveTest(unittest.TestCase):\n __tags__ = ['ignore', 'subprocess_ignore']\n try:\n pygame.display.init()\n except Exception:\n pass\n else:\n if pygame.display.get_driver() == 'x11':\n __tags__ = ['interactive']\n pygame.display.quit()\n\n def test_issue_208(self):\n \"\"\"PATCH: pygame.scrap on X11, fix copying into PRIMARY selection\n\n Copying into theX11 PRIMARY selection (mouse copy/paste) would not\n work due to a confusion between content type and clipboard type.\n\n \"\"\"\n\n from pygame import display, event, freetype\n from pygame.locals import SCRAP_SELECTION, SCRAP_TEXT\n from pygame.locals import KEYDOWN, K_y, QUIT\n\n success = False\n freetype.init()\n font = freetype.Font(None, 24)\n display.init()\n display.set_caption(\"Interactive X11 Paste Test\")\n screen = display.set_mode((600, 200))\n screen.fill(pygame.Color('white'))\n text = \"Scrap put() succeeded.\"\n msg = ('Some text has been placed into the X11 clipboard.'\n ' Please click the center mouse button in an open'\n ' text window to retrieve it.'\n '\\n\\nDid you get \"{}\"? (y/n)').format(text)\n word_wrap(screen, msg, font, 6)\n display.flip()\n event.pump()\n scrap.init()\n scrap.set_mode(SCRAP_SELECTION)\n scrap.put(SCRAP_TEXT, text.encode('UTF-8'))\n while True:\n e = event.wait()\n if e.type == QUIT:\n break\n if e.type == KEYDOWN:\n success = (e.key == K_y)\n break\n pygame.display.quit()\n self.assertTrue(success)\n\ndef word_wrap(surf, text, font, margin=0, color=(0, 0, 0)):\n font.origin = True\n surf_width, surf_height = surf.get_size()\n width = surf_width - 2 * margin\n height = surf_height - 2 * margin\n line_spacing = int(1.25 * font.get_sized_height())\n x, y = margin, margin + line_spacing\n space = font.get_rect(' ')\n for word in iwords(text):\n if word == '\\n':\n x, y = margin, y + line_spacing\n else:\n bounds = font.get_rect(word)\n if x + bounds.width + bounds.x >= width:\n x, y = margin, y + line_spacing\n if x + bounds.width + bounds.x >= width:\n raise ValueError(\"word too wide for the surface\")\n if y + bounds.height - bounds.y >= height:\n raise ValueError(\"text to long for the surface\")\n font.render_to(surf, (x, y), None, color)\n x += bounds.width + space.width\n return x, y\n\ndef iwords(text):\n # r\"\\n|[^ ]+\"\n #\n head = 0\n tail = head\n end = len(text)\n while head < end:\n if text[head] == ' ':\n head += 1\n tail = head + 1\n elif text[head] == '\\n':\n head += 1\n yield '\\n'\n tail = head + 1\n elif tail == end:\n yield text[head:]\n head = end\n elif text[tail] == '\\n':\n yield text[head:tail]\n head = tail\n elif text[tail] == ' ':\n yield text[head:tail]\n head = tail\n else:\n tail += 1\n\nif __name__ == '__main__':\n unittest.main()\n",
"id": "4639942",
"language": "Python",
"matching_score": 3.0912327766418457,
"max_stars_count": 0,
"path": "test/scrap_test.py"
},
{
"content": "\"\"\"pygame.midi\npygame module for interacting with midi input and output.\n\nThe midi module can send output to midi devices, and get input\nfrom midi devices. It can also list midi devices on the system.\n\nIncluding real midi devices, and virtual ones.\n\nIt uses the portmidi library. Is portable to which ever platforms\nportmidi supports (currently windows, OSX, and linux).\n\nThis uses pyportmidi for now, but may use its own bindings at some\npoint in the future. The pyportmidi bindings are included with pygame.\n\nNew in pygame 1.9.0.\n\"\"\"\n\n\n#TODO:\n# - finish writing tests.\n# - likely as interactive tests... so you'd need to plug in a midi device.\n# - create a background thread version for input threads.\n# - that can automatically inject input into the event queue\n# once the input object is running. Like joysticks.\n\n\n\nimport atexit\nimport math\n\nimport pygame\nimport pygame.locals\n\n\n\n#\nMIDIIN = pygame.locals.USEREVENT + 10\nMIDIOUT = pygame.locals.USEREVENT + 11\n\n_init = False\n_pypm = None\n\n\n__all__ = [\n \"Input\",\n \"MIDIIN\",\n \"MIDIOUT\",\n \"MidiException\",\n \"Output\",\n \"get_count\",\n \"get_default_input_id\",\n \"get_default_output_id\",\n \"get_device_info\",\n \"init\",\n \"midis2events\",\n \"quit\",\n \"time\",\n \"frequency_to_midi\",\n \"midi_to_frequency\",\n \"midi_to_ansi_note\",\n]\n\n__theclasses__ = [\"Input\", \"Output\"]\n\n\ndef init():\n \"\"\"initialize the midi module\n pygame.midi.init(): return None\n\n Call the initialisation function before using the midi module.\n\n It is safe to call this more than once.\n \"\"\"\n global _init, _pypm\n if not _init:\n import pygame.pypm\n _pypm = pygame.pypm\n\n _pypm.Initialize()\n _init = True\n atexit.register(quit)\n\n\ndef quit():\n \"\"\"uninitialize the midi module\n pygame.midi.quit(): return None\n\n\n Called automatically atexit if you don't call it.\n\n It is safe to call this function more than once.\n \"\"\"\n global _init, _pypm\n if _init:\n # TODO: find all Input and Output classes and close them first?\n _pypm.Terminate()\n _init = False\n del _pypm\n #del pygame._pypm\n\ndef _check_init():\n if not _init:\n raise RuntimeError(\"pygame.midi not initialised.\")\n\ndef get_count():\n \"\"\"gets the number of devices.\n pygame.midi.get_count(): return num_devices\n\n\n Device ids range from 0 to get_count() -1\n \"\"\"\n _check_init()\n return _pypm.CountDevices()\n\n\n\n\ndef get_default_input_id():\n \"\"\"gets default input device number\n pygame.midi.get_default_input_id(): return default_id\n\n\n Return the default device ID or -1 if there are no devices.\n The result can be passed to the Input()/Ouput() class.\n\n On the PC, the user can specify a default device by\n setting an environment variable. For example, to use device #1.\n\n set PM_RECOMMENDED_INPUT_DEVICE=1\n\n The user should first determine the available device ID by using\n the supplied application \"testin\" or \"testout\".\n\n In general, the registry is a better place for this kind of info,\n and with USB devices that can come and go, using integers is not\n very reliable for device identification. Under Windows, if\n PM_RECOMMENDED_OUTPUT_DEVICE (or PM_RECOMMENDED_INPUT_DEVICE) is\n *NOT* found in the environment, then the default device is obtained\n by looking for a string in the registry under:\n HKEY_LOCAL_MACHINE/SOFTWARE/PortMidi/Recommended_Input_Device\n and HKEY_LOCAL_MACHINE/SOFTWARE/PortMidi/Recommended_Output_Device\n for a string. The number of the first device with a substring that\n matches the string exactly is returned. For example, if the string\n in the registry is \"USB\", and device 1 is named\n \"In USB MidiSport 1x1\", then that will be the default\n input because it contains the string \"USB\".\n\n In addition to the name, get_device_info() returns \"interf\", which\n is the interface name. (The \"interface\" is the underlying software\n system or API used by PortMidi to access devices. Examples are\n MMSystem, DirectX (not implemented), ALSA, OSS (not implemented), etc.)\n At present, the only Win32 interface is \"MMSystem\", the only Linux\n interface is \"ALSA\", and the only Max OS X interface is \"CoreMIDI\".\n To specify both the interface and the device name in the registry,\n separate the two with a comma and a space, e.g.:\n MMSystem, In USB MidiSport 1x1\n In this case, the string before the comma must be a substring of\n the \"interf\" string, and the string after the space must be a\n substring of the \"name\" name string in order to match the device.\n\n Note: in the current release, the default is simply the first device\n (the input or output device with the lowest PmDeviceID).\n \"\"\"\n return _pypm.GetDefaultInputDeviceID()\n\n\n\n\ndef get_default_output_id():\n \"\"\"gets default output device number\n pygame.midi.get_default_output_id(): return default_id\n\n\n Return the default device ID or -1 if there are no devices.\n The result can be passed to the Input()/Ouput() class.\n\n On the PC, the user can specify a default device by\n setting an environment variable. For example, to use device #1.\n\n set PM_RECOMMENDED_OUTPUT_DEVICE=1\n\n The user should first determine the available device ID by using\n the supplied application \"testin\" or \"testout\".\n\n In general, the registry is a better place for this kind of info,\n and with USB devices that can come and go, using integers is not\n very reliable for device identification. Under Windows, if\n PM_RECOMMENDED_OUTPUT_DEVICE (or PM_RECOMMENDED_INPUT_DEVICE) is\n *NOT* found in the environment, then the default device is obtained\n by looking for a string in the registry under:\n HKEY_LOCAL_MACHINE/SOFTWARE/PortMidi/Recommended_Input_Device\n and HKEY_LOCAL_MACHINE/SOFTWARE/PortMidi/Recommended_Output_Device\n for a string. The number of the first device with a substring that\n matches the string exactly is returned. For example, if the string\n in the registry is \"USB\", and device 1 is named\n \"In USB MidiSport 1x1\", then that will be the default\n input because it contains the string \"USB\".\n\n In addition to the name, get_device_info() returns \"interf\", which\n is the interface name. (The \"interface\" is the underlying software\n system or API used by PortMidi to access devices. Examples are\n MMSystem, DirectX (not implemented), ALSA, OSS (not implemented), etc.)\n At present, the only Win32 interface is \"MMSystem\", the only Linux\n interface is \"ALSA\", and the only Max OS X interface is \"CoreMIDI\".\n To specify both the interface and the device name in the registry,\n separate the two with a comma and a space, e.g.:\n MMSystem, In USB MidiSport 1x1\n In this case, the string before the comma must be a substring of\n the \"interf\" string, and the string after the space must be a\n substring of the \"name\" name string in order to match the device.\n\n Note: in the current release, the default is simply the first device\n (the input or output device with the lowest PmDeviceID).\n \"\"\"\n _check_init()\n return _pypm.GetDefaultOutputDeviceID()\n\n\ndef get_device_info(an_id):\n \"\"\" returns information about a midi device\n pygame.midi.get_device_info(an_id): return (interf, name, input, output, opened)\n\n interf - a text string describing the device interface, eg 'ALSA'.\n name - a text string for the name of the device, eg 'Midi Through Port-0'\n input - 0, or 1 if the device is an input device.\n output - 0, or 1 if the device is an output device.\n opened - 0, or 1 if the device is opened.\n\n If the id is out of range, the function returns None.\n \"\"\"\n _check_init()\n return _pypm.GetDeviceInfo(an_id)\n\n\nclass Input(object):\n \"\"\"Input is used to get midi input from midi devices.\n Input(device_id)\n Input(device_id, buffer_size)\n\n buffer_size - the number of input events to be buffered waiting to\n be read using Input.read()\n \"\"\"\n\n def __init__(self, device_id, buffer_size=4096):\n \"\"\"\n The buffer_size specifies the number of input events to be buffered\n waiting to be read using Input.read().\n \"\"\"\n _check_init()\n\n if device_id == -1:\n raise MidiException(\"Device id is -1, not a valid output id. -1 usually means there were no default Output devices.\")\n\n try:\n r = get_device_info(device_id)\n except TypeError:\n raise TypeError(\"an integer is required\")\n except OverflowError:\n raise OverflowError(\"long int too large to convert to int\")\n\n # and now some nasty looking error checking, to provide nice error\n # messages to the kind, lovely, midi using people of whereever.\n if r:\n interf, name, input, output, opened = r\n if input:\n try:\n self._input = _pypm.Input(device_id, buffer_size)\n except TypeError:\n raise TypeError(\"an integer is required\")\n self.device_id = device_id\n\n elif output:\n raise MidiException(\"Device id given is not a valid input id, it is an output id.\")\n else:\n raise MidiException(\"Device id given is not a valid input id.\")\n else:\n raise MidiException(\"Device id invalid, out of range.\")\n\n\n\n\n def _check_open(self):\n if self._input is None:\n raise MidiException(\"midi not open.\")\n\n\n\n def close(self):\n \"\"\" closes a midi stream, flushing any pending buffers.\n Input.close(): return None\n\n PortMidi attempts to close open streams when the application\n exits -- this is particularly difficult under Windows.\n \"\"\"\n _check_init()\n if not (self._input is None):\n self._input.Close()\n self._input = None\n\n\n\n def read(self, num_events):\n \"\"\"reads num_events midi events from the buffer.\n Input.read(num_events): return midi_event_list\n\n Reads from the Input buffer and gives back midi events.\n [[[status,data1,data2,data3],timestamp],\n [[status,data1,data2,data3],timestamp],...]\n \"\"\"\n _check_init()\n self._check_open()\n return self._input.Read(num_events)\n\n\n def poll(self):\n \"\"\"returns true if there's data, or false if not.\n Input.poll(): return Bool\n\n raises a MidiException on error.\n \"\"\"\n _check_init()\n self._check_open()\n\n r = self._input.Poll()\n if r == _pypm.TRUE:\n return True\n elif r == _pypm.FALSE:\n return False\n else:\n err_text = GetErrorText(r)\n raise MidiException( (r, err_text) )\n\n\n\n\nclass Output(object):\n \"\"\"Output is used to send midi to an output device\n Output(device_id)\n Output(device_id, latency = 0)\n Output(device_id, buffer_size = 4096)\n Output(device_id, latency, buffer_size)\n\n The buffer_size specifies the number of output events to be\n buffered waiting for output. (In some cases -- see below --\n PortMidi does not buffer output at all and merely passes data\n to a lower-level API, in which case buffersize is ignored.)\n\n latency is the delay in milliseconds applied to timestamps to determine\n when the output should actually occur. (If latency is < 0, 0 is\n assumed.)\n\n If latency is zero, timestamps are ignored and all output is delivered\n immediately. If latency is greater than zero, output is delayed until\n the message timestamp plus the latency. (NOTE: time is measured\n relative to the time source indicated by time_proc. Timestamps are\n absolute, not relative delays or offsets.) In some cases, PortMidi\n can obtain better timing than your application by passing timestamps\n along to the device driver or hardware. Latency may also help you\n to synchronize midi data to audio data by matching midi latency to\n the audio buffer latency.\n\n \"\"\"\n\n def __init__(self, device_id, latency = 0, buffer_size = 4096):\n \"\"\"Output(device_id)\n Output(device_id, latency = 0)\n Output(device_id, buffer_size = 4096)\n Output(device_id, latency, buffer_size)\n\n The buffer_size specifies the number of output events to be\n buffered waiting for output. (In some cases -- see below --\n PortMidi does not buffer output at all and merely passes data\n to a lower-level API, in which case buffersize is ignored.)\n\n latency is the delay in milliseconds applied to timestamps to determine\n when the output should actually occur. (If latency is < 0, 0 is\n assumed.)\n\n If latency is zero, timestamps are ignored and all output is delivered\n immediately. If latency is greater than zero, output is delayed until\n the message timestamp plus the latency. (NOTE: time is measured\n relative to the time source indicated by time_proc. Timestamps are\n absolute, not relative delays or offsets.) In some cases, PortMidi\n can obtain better timing than your application by passing timestamps\n along to the device driver or hardware. Latency may also help you\n to synchronize midi data to audio data by matching midi latency to\n the audio buffer latency.\n \"\"\"\n\n _check_init()\n self._aborted = 0\n\n if device_id == -1:\n raise MidiException(\"Device id is -1, not a valid output id. -1 usually means there were no default Output devices.\")\n\n try:\n r = get_device_info(device_id)\n except TypeError:\n raise TypeError(\"an integer is required\")\n except OverflowError:\n raise OverflowError(\"long int too large to convert to int\")\n\n # and now some nasty looking error checking, to provide nice error\n # messages to the kind, lovely, midi using people of whereever.\n if r:\n interf, name, input, output, opened = r\n if output:\n try:\n self._output = _pypm.Output(device_id, latency)\n except TypeError:\n raise TypeError(\"an integer is required\")\n self.device_id = device_id\n\n elif input:\n raise MidiException(\"Device id given is not a valid output id, it is an input id.\")\n else:\n raise MidiException(\"Device id given is not a valid output id.\")\n else:\n raise MidiException(\"Device id invalid, out of range.\")\n\n def _check_open(self):\n if self._output is None:\n raise MidiException(\"midi not open.\")\n\n if self._aborted:\n raise MidiException(\"midi aborted.\")\n\n\n def close(self):\n \"\"\" closes a midi stream, flushing any pending buffers.\n Output.close(): return None\n\n PortMidi attempts to close open streams when the application\n exits -- this is particularly difficult under Windows.\n \"\"\"\n _check_init()\n if not (self._output is None):\n self._output.Close()\n self._output = None\n\n def abort(self):\n \"\"\"terminates outgoing messages immediately\n Output.abort(): return None\n\n The caller should immediately close the output port;\n this call may result in transmission of a partial midi message.\n There is no abort for Midi input because the user can simply\n ignore messages in the buffer and close an input device at\n any time.\n \"\"\"\n\n _check_init()\n if self._output:\n self._output.Abort()\n self._aborted = 1\n\n\n\n\n\n def write(self, data):\n \"\"\"writes a list of midi data to the Output\n Output.write(data)\n\n writes series of MIDI information in the form of a list:\n write([[[status <,data1><,data2><,data3>],timestamp],\n [[status <,data1><,data2><,data3>],timestamp],...])\n <data> fields are optional\n example: choose program change 1 at time 20000 and\n send note 65 with velocity 100 500 ms later.\n write([[[0xc0,0,0],20000],[[0x90,60,100],20500]])\n notes:\n 1. timestamps will be ignored if latency = 0.\n 2. To get a note to play immediately, send MIDI info with\n timestamp read from function Time.\n 3. understanding optional data fields:\n write([[[0xc0,0,0],20000]]) is equivalent to\n write([[[0xc0],20000]])\n\n Can send up to 1024 elements in your data list, otherwise an\n IndexError exception is raised.\n \"\"\"\n _check_init()\n self._check_open()\n\n self._output.Write(data)\n\n def write_short(self, status, data1=0, data2=0):\n \"\"\"write_short(status <, data1><, data2>)\n Output.write_short(status)\n Output.write_short(status, data1 = 0, data2 = 0)\n\n output MIDI information of 3 bytes or less.\n data fields are optional\n status byte could be:\n 0xc0 = program change\n 0x90 = note on\n etc.\n data bytes are optional and assumed 0 if omitted\n example: note 65 on with velocity 100\n write_short(0x90,65,100)\n \"\"\"\n _check_init()\n self._check_open()\n self._output.WriteShort(status, data1, data2)\n\n def write_sys_ex(self, when, msg):\n \"\"\"writes a timestamped system-exclusive midi message.\n Output.write_sys_ex(when, msg)\n\n msg - can be a *list* or a *string*\n when - a timestamp in miliseconds\n example:\n (assuming o is an onput MIDI stream)\n o.write_sys_ex(0,'\\\\xF0\\\\x7D\\\\x10\\\\x11\\\\x12\\\\x13\\\\xF7')\n is equivalent to\n o.write_sys_ex(pygame.midi.time(),\n [0xF0,0x7D,0x10,0x11,0x12,0x13,0xF7])\n \"\"\"\n _check_init()\n self._check_open()\n self._output.WriteSysEx(when, msg)\n\n def note_on(self, note, velocity, channel=0):\n \"\"\"turns a midi note on. Note must be off.\n Output.note_on(note, velocity, channel=0)\n\n note is an integer from 0 to 127\n velocity is an integer from 0 to 127\n channel is an integer from 0 to 15\n\n Turn a note on in the output stream. The note must already\n be off for this to work correctly.\n \"\"\"\n if not (0 <= channel <= 15):\n raise ValueError(\"Channel not between 0 and 15.\")\n\n self.write_short(0x90 + channel, note, velocity)\n\n def note_off(self, note, velocity=0, channel=0):\n \"\"\"turns a midi note off. Note must be on.\n Output.note_off(note, velocity=0, channel=0)\n\n note is an integer from 0 to 127\n velocity is an integer from 0 to 127 (release velocity)\n channel is an integer from 0 to 15\n\n Turn a note off in the output stream. The note must already\n be on for this to work correctly.\n \"\"\"\n if not (0 <= channel <= 15):\n raise ValueError(\"Channel not between 0 and 15.\")\n\n self.write_short(0x80 + channel, note, velocity)\n\n\n def set_instrument(self, instrument_id, channel=0):\n \"\"\"select an instrument for a channel, with a value between 0 and 127\n Output.set_instrument(instrument_id, channel=0)\n\n Also called \"patch change\" or \"program change\".\n \"\"\"\n if not (0 <= instrument_id <= 127):\n raise ValueError(\"Undefined instrument id: %d\" % instrument_id)\n\n if not (0 <= channel <= 15):\n raise ValueError(\"Channel not between 0 and 15.\")\n\n self.write_short(0xc0 + channel, instrument_id)\n\n def pitch_bend(self, value=0, channel=0):\n \"\"\"modify the pitch of a channel.\n Output.pitch_bend(value=0, channel=0)\n\n Adjust the pitch of a channel. The value is a signed integer\n from -8192 to +8191. For example, 0 means \"no change\", +4096 is\n typically a semitone higher, and -8192 is 1 whole tone lower (though\n the musical range corresponding to the pitch bend range can also be\n changed in some synthesizers).\n\n If no value is given, the pitch bend is returned to \"no change\".\n \"\"\"\n if not (0 <= channel <= 15):\n raise ValueError(\"Channel not between 0 and 15.\")\n\n if not (-8192 <= value <= 8191):\n raise ValueError(\"Pitch bend value must be between \"\n \"-8192 and +8191, not %d.\" % value)\n\n # \"The 14 bit value of the pitch bend is defined so that a value of\n # 0x2000 is the center corresponding to the normal pitch of the note\n # (no pitch change).\" so value=0 should send 0x2000\n value = value + 0x2000\n LSB = value & 0x7f # keep least 7 bits\n MSB = value >> 7\n self.write_short(0xe0 + channel, LSB, MSB)\n\n\n\n\"\"\"\nMIDI commands\n\n 0x80 Note Off (note_off)\n 0x90 Note On (note_on)\n 0xA0 Aftertouch\n 0xB0 Continuous controller\n 0xC0 Patch change (set_instrument?)\n 0xD0 Channel Pressure\n 0xE0 Pitch bend\n 0xF0 (non-musical commands)\n\"\"\"\n\n\n\ndef time():\n \"\"\"returns the current time in ms of the PortMidi timer\n pygame.midi.time(): return time\n\n The time is reset to 0, when the module is inited.\n \"\"\"\n return _pypm.Time()\n\n\n\ndef midis2events(midis, device_id):\n \"\"\"converts midi events to pygame events\n pygame.midi.midis2events(midis, device_id): return [Event, ...]\n\n Takes a sequence of midi events and returns list of pygame events.\n \"\"\"\n evs = []\n for midi in midis:\n\n ((status,data1,data2,data3),timestamp) = midi\n\n e = pygame.event.Event(MIDIIN,\n status=status,\n data1=data1,\n data2=data2,\n data3=data3,\n timestamp=timestamp,\n vice_id = device_id)\n evs.append( e )\n\n\n return evs\n\n\n\n\n\nclass MidiException(Exception):\n \"\"\"exception that pygame.midi functions and classes can raise\n MidiException(errno)\n \"\"\"\n def __init__(self, value):\n self.parameter = value\n def __str__(self):\n return repr(self.parameter)\n\n\n\ndef frequency_to_midi(freqency):\n \"\"\" converts a frequency into a MIDI note.\n\n Rounds to the closest midi note.\n\n ::Examples::\n\n >>> frequency_to_midi(27.5)\n 21\n >>> frequency_to_midi(36.7)\n 26\n >>> frequency_to_midi(4186.0)\n 108\n \"\"\"\n return int(\n round(\n 69 + (\n 12 * math.log(freqency / 440.0)\n ) / math.log(2)\n )\n )\n\ndef midi_to_frequency(midi_note):\n \"\"\" Converts a midi note to a frequency.\n\n ::Examples::\n\n >>> midi_to_frequency(21)\n 27.5\n >>> midi_to_frequency(26)\n 36.7\n >>> midi_to_frequency(108)\n 4186.0\n \"\"\"\n return round(440.0 * 2 ** ((midi_note - 69) * (1./12.)), 1)\n\ndef midi_to_ansi_note(midi_note):\n \"\"\" returns the Ansi Note name for a midi number.\n\n ::Examples::\n\n >>> midi_to_ansi_note(21)\n 'A0'\n >>> midi_to_ansi_note(102)\n 'F#7'\n >>> midi_to_ansi_note(108)\n 'C8'\n \"\"\"\n notes = ['A', 'A#', 'B', 'C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#']\n num_notes = 12\n note_name = notes[int(((midi_note - 21) % num_notes))]\n note_number = int(round(((midi_note - 21) / 11.0)))\n return '%s%s' % (note_name, note_number)\n",
"id": "3860010",
"language": "Python",
"matching_score": 1.7060141563415527,
"max_stars_count": 0,
"path": "src_py/midi.py"
},
{
"content": "import random\nimport pygame, sys\nfrom pygame.locals import *\n\nBLACK = (0, 0, 0)\nBROWN = (153,76, 0)\nGREEN = (0, 255,0)\nBLUE = (0, 0, 255)\nGREY = (211,211,211)\nRED = (255,0, 0)\nWHITE = (255,255,255)\n\nDIRT = 0\nGRASS = 1\nWATER = 2\nCOAL = 3\nROCK = 4\nLAVA = 5\nCLOUD = 6\n\nresources = [DIRT, GRASS, WATER, COAL, ROCK, LAVA]\n\n'''colours = {\n DIRT : BROWN,\n GRASS : GREEN,\n WATER : BLUE,\n COAL : BLACK,\n ROCK : GREY,\n LAVA : RED\n }'''\n\ntextures = {\n DIRT : pygame.image.load(\"images/dirt.png\"),\n GRASS : pygame.image.load(\"images/grass.png\"),\n WATER : pygame.image.load(\"images/water.png\"),\n COAL : pygame.image.load(\"images/coal.png\"),\n ROCK : pygame.image.load(\"images/rock.png\"),\n LAVA : pygame.image.load(\"images/lava.png\"),\n CLOUD : pygame.image.load(\"images/cloud.png\")\n }\n\ninventory = {\n DIRT : 0,\n GRASS : 0,\n WATER : 0,\n COAL : 0,\n ROCK : 0,\n LAVA : 0\n }\n\n'''tilemap = [\n [GRASS, COAL, DIRT, DIRT, ROCK],\n [WATER, WATER, GRASS, ROCK, ROCK],\n [COAL, GRASS, WATER, LAVA, ROCK],\n [DIRT, GRASS, COAL, LAVA, COAL],\n [GRASS, WATER, DIRT, GRASS, LAVA]\n ]'''\n\nTILESIZE = 40\nMAPWIDTH = 40\nMAPHEIGHT = 20\n\n'''tilemap = [[random.choice(resources)\n for w in range(MAPWIDTH)]\n for h in range(MAPHEIGHT)]'''\n\ntilemap = [[DIRT for w in range(MAPWIDTH)] for h in range(MAPHEIGHT)]\n\ncount = [0] * (LAVA + 1)\n\nfor rw in range(MAPHEIGHT):\n for cl in range(MAPWIDTH):\n randomNumber = random.randint(0,20)\n if randomNumber == 0:\n tile = LAVA\n elif randomNumber == 1 or randomNumber == 2:\n tile = ROCK\n elif randomNumber >= 3 and randomNumber <=5:\n tile = COAL\n elif randomNumber >= 6 and randomNumber <=8:\n tile = WATER\n elif randomNumber >= 9 and randomNumber <=11:\n tile = GRASS\n else:\n tile = DIRT\n count[tile] += 1\n tilemap[rw][cl] = tile\n\nfor index in range(len(count)):\n print(index, \" = \", count[index])\n\npygame.init()\nDISPLAYSURF = pygame.display.set_mode((MAPWIDTH*TILESIZE,MAPHEIGHT*TILESIZE + 50))\n\nINVFONT = pygame.font.Font('fonts/FreeSansBold.ttf',18)\nPLAYER = pygame.image.load('images/player.png').convert_alpha()\nplayerPos = [0,0]\n\ncloudx = -200\ncloudy = 0\n\nfpsClock = pygame.time.Clock()\n\npygame.display.set_caption('My Minecraft')\npygame.display.set_icon(pygame.image.load('images/player.png'))\n\nwhile True:\n DISPLAYSURF.fill(BLACK)\n \n for event in pygame.event.get():\n #print(event)\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n elif event.type == KEYDOWN:\n if (event.key == K_RIGHT) and playerPos[0] < MAPWIDTH - 1:\n playerPos[0] += 1\n elif (event.key == K_LEFT) and playerPos[0] > 0:\n playerPos[0] -= 1\n elif (event.key == K_UP) and playerPos[1] > 0:\n playerPos[1] -= 1\n elif (event.key == K_DOWN) and playerPos[1] < MAPHEIGHT - 1:\n playerPos[1] += 1\n elif event.key == K_SPACE:\n currentTile = tilemap[playerPos[1]][playerPos[0]]\n inventory[currentTile] += 1\n tilemap[playerPos[1]][playerPos[0]] = DIRT\n elif event.key == K_1:\n currentTile = tilemap[playerPos[1]][playerPos[0]]\n if inventory[DIRT] > 0:\n inventory[DIRT] -= 1\n tilemap[playerPos[1]][playerPos[0]] = DIRT\n inventory[currentTile] += 1\n elif event.key == K_2:\n currentTile = tilemap[playerPos[1]][playerPos[0]]\n if inventory[GRASS] > 0:\n inventory[GRASS] -= 1\n tilemap[playerPos[1]][playerPos[0]] = GRASS\n inventory[currentTile] += 1\n\n for row in range(MAPHEIGHT):\n for column in range(MAPWIDTH):\n DISPLAYSURF.blit(textures[tilemap[row][column]],\n (column*TILESIZE,row*TILESIZE))\n '''pygame.draw.rect(DISPLAYSURF,colours[tilemap[row][column]],\n (column*TILESIZE,row*TILESIZE,TILESIZE,TILESIZE))'''\n \n DISPLAYSURF.blit(PLAYER,(playerPos[0]*TILESIZE,playerPos[1]*TILESIZE))\n \n DISPLAYSURF.blit(textures[CLOUD].convert_alpha(),(cloudx,cloudy))\n cloudx += 3\n if cloudx > MAPWIDTH*TILESIZE:\n cloudy = random.randint(0,MAPHEIGHT*TILESIZE)\n cloudx = -200\n\n placePosition = 10\n barPosition = MAPHEIGHT*TILESIZE + 10\n for item in resources:\n DISPLAYSURF.blit(textures[item],(placePosition,barPosition))\n placePosition += 60\n textObj = INVFONT.render(str(inventory[item]), True, WHITE, BLACK)\n DISPLAYSURF.blit(textObj,(placePosition,barPosition + 10))\n placePosition += 50\n \n pygame.display.update()\n fpsClock.tick(24)\n",
"id": "9640808",
"language": "Python",
"matching_score": 0.5021955370903015,
"max_stars_count": 0,
"path": "mine2D.py"
}
] | 1.706014 |
jhonpj | [
{
"content": "from graphviz import Digraph\nimport theme\nimport util\nimport os\nfrom pathlib import Path\nimport datamap\nimport leetcode\nfrom svgpathtools import svg2paths\nfrom bs4 import BeautifulSoup\nimport platform_view\n\nclass LeetcodeView(platform_view.PlatformView):\n def __init__(self, leet):\n self.leet = leet\n self.m = None\n self.slug = \"leetcode\"\n\n def check_finish(self, title):\n return self.leet.check_finish(title)\n\n def get_problem(self, title):\n return self.m.problem_map[title]\n\n def check_flask(self, title):\n return self.leet.check_flask(title)\n\n def is_valid_title(self, title):\n return title.isdigit()\n\n def post_process(self, path):\n self.add_finish_icon(path)\n\n def generate_leetcode(self, leet, file, slug, out_name):\n c = util.get_file_content(util.get_map(file))\n m = datamap.DataMap(c)\n self.m = m\n g = Digraph('stones', encoding='utf-8')\n\n for n in m.nodes:\n if n.is_root:\n count = self.get_module_problem_count(m)\n #count获得问题数量\n label = \"%s(%s)\" % (n.name, str(count))\n # 根节点, 性格属性\n g.node(name=n.name, label=label, style='filled', target=\"_parent\", href=\"https://leetcode-cn.com/tag/\"+slug, \n fontsize='16',\n fillcolor=\"orangered\", color='lightgrey', fontcolor=\"white\", fontname=\"Microsoft YaHei\", shape='box')\n else:\n # 普通模块节点\n label = \"%s(%s)\" % (n.name, str(len(n.problems)))\n g.node(name=n.name, label=label, style='filled', fillcolor=\"lightslategray\", color='lightgrey', \n fontsize='14',\n fontcolor=\"white\", fontname=\"Microsoft YaHei\", shape='box')\n g.edge(n.parent, n.name, color=theme.color_arrow)\n\n # add problem\n last = \"\"\n for p in n.problems:\n title = leet.get_title(p.id)\n #level难易程度\n level = leet.get_level(p.id)\n problem = leet.get_problem(p.id)\n #problem题目内容\n idstr = str(p.id)\n if title == None:\n continue \n title = idstr+\". \"+title\n color = \"lightgrey\"\n\n if level == \"Easy\":\n color = \"greenyellow\"\n elif level == \"Medium\":\n color = \"orange\"\n elif level == \"Hard\":\n color = \"red\"\n else:\n print(\"unknown level:\", level)\n continue\n slug = problem['data']['question']['questionTitleSlug']\n #slug题目网址后缀,\n # 题目节点\n g.node(name=idstr, label=title, target=\"_parent\", href=\"https://leetcode-cn.com/problems/\"+slug, \n color=color, fontname=\"Microsoft YaHei\", fontsize='14', shape='box')\n\n if len(last) > 0:\n g.edge(last, idstr, color=theme.color_arrow)\n else:\n g.edge(n.name, idstr, color=theme.color_arrow)\n last = idstr\n\n g.format = 'svg'\n g.render(filename=util.get_images(out_name))\n os.remove(util.get_images(out_name))\n #增加完成标志\n self.post_process(util.get_images(out_name)+\".svg\")\n\ndef process():\n leet = leetcode.Leetcode()\n view = LeetcodeView(leet)\n leet.update_db()\n view.generate_leetcode(leet, \"leetcode/leetcode-dp.txt\", \"dynamic-programming\", \"leetcode_dp\")\n view.generate_leetcode(leet, \"leetcode/leetcode-tree.txt\", \"tree\", \"leetcode_tree\")\n view.generate_leetcode(leet, \"leetcode/leetcode-mini.txt\", \"\", \"leetcode_mini\")\n view.generate_leetcode(leet, \"leetcode/leetcode-linked-list.txt\", \"linked-list\", \"leetcode_linked_list\")\n view.generate_leetcode(leet, \"leetcode/leetcode-union-find.txt\", \"union-find\", \"leetcode_union_find\")\n view.generate_leetcode(leet, \"leetcode/leetcode-heap-stack-queue.txt\", \"\", \"leetcode_heap_stack_queue\")\n view.generate_leetcode(leet, \"leetcode/leetcode-geometry.txt\", \"geometry\", \"leetcode_geometry\")\n view.generate_leetcode(leet, \"leetcode/leetcode-binary-search.txt\", \"binary-search\", \"leetcode_binary_search\")\n leet.close_db()\n",
"id": "1037101",
"language": "Python",
"matching_score": 0,
"max_stars_count": 0,
"path": "src/leetcode_view.py"
},
{
"content": "from graphviz import Digraph\r\nimport os\r\nfrom pathlib import Path\r\nimport time\r\n\r\ndef now():\r\n return round(time.time() * 1000)\r\n\r\ndef save_file_content(file, content):\r\n f = open(file, \"w\", encoding=\"utf-8\")\r\n f.write(content)\r\n f.close()\r\n\r\ndef get_file_content(file):\r\n f = open(file, \"r\", encoding=\"utf-8\")\r\n t = f.read()\r\n f.close()\r\n return t\r\n\r\ndef get_root(d, f):\r\n path = Path(os.path.abspath(__file__))\r\n return os.path.abspath(os.path.join(path.parent.parent, d, f))\r\n\r\ndef get_db(f):\r\n path = Path(os.path.abspath(__file__))\r\n #path.parent返回返回父级目录\r\n return os.path.abspath(os.path.join(path.parent.parent, \"db\", f))\r\n\r\ndef get_map(f):\r\n path = Path(os.path.abspath(__file__))\r\n return os.path.abspath(os.path.join(path.parent.parent, \"map\", f))\r\n\r\ndef get_images(f):\r\n path = Path(os.path.abspath(__file__))\r\n return os.path.abspath(os.path.join(path.parent.parent, \"images\", f))\r\n\r\ndef is_int(s):\r\n try: \r\n int(s)\r\n return True\r\n except ValueError:\r\n return False",
"id": "11456795",
"language": "Python",
"matching_score": 0,
"max_stars_count": 0,
"path": "src/util.py"
}
] | 0 |
suranakritika | [
{
"content": "# import argparse, os\nimport cv2\nimport csv \nimport numpy as np\nimport pandas as pd\nfrom keras.models import Sequential\nfrom keras.layers import Convolution2D, ELU, Flatten, Dropout, Dense, MaxPooling2D, Lambda, Conv2D\nfrom keras.preprocessing.image import img_to_array, load_img\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport glob\nimport os\n# %matplotlib inline\nimg = os.listdir(\"opts/data/IMG\")\n\ndef reader(path):\n lines = []\n with open(path) as csvfile:\n reader = csv.reader(csvfile)\n for line in reader:\n lines.append(line)\n return lines\n\ndef process_img(lines, row, correction):\n images = []\n measurements = []\n for line in lines:\n sourcepath = line[row]\n filename = sourcepath.split('\\\\')[-1]\n current_path = 'opts/data/IMG/' + filename\n image = mpimg.imread(current_path)\n images.append(image)\n measurement = (float(line[3]) + correction)\n measurements.append(measurement)\n return images, measurements\n\nlines = reader(\"opts/data/driving_log.csv\")\nimage_center, measure_center = process_img(lines, 0, 0.0)\nimage_left, measure_left = process_img(lines, 1, 0.2)\nimage_right, measure_right = process_img(lines, 2, -0.2)\n\ncam_images = []\nsteering_measure = []\ncam_images.extend(image_center)\ncam_images.extend(image_left)\ncam_images.extend(image_right)\nsteering_measure.extend(measure_center)\nsteering_measure.extend(measure_left)\nsteering_measure.extend(measure_right)\n\naugmented_image, augmented_measurement = [], []\nfor image, measurement in zip(cam_images, steering_measure):\n augmented_image.append(image)\n augmented_measurement.append(measurement)\n augmented_image.append(cv2.flip(image, 1))\n augmented_measurement.append(measurement * -1.0)\n\nX_train = np.array(augmented_image)\ny_train = np.array(augmented_measurement)\nprint(X_train.shape)\nprint(y_train.shape)\n\nmodel = Sequential()\nmodel.add(Lambda(lambda x: (x/255.0)-0.5, input_shape=(160, 320, 3)))\nmodel.add(Conv2D(24, 5, 5, activation='relu', subsample=(2, 2)))\nmodel.add(Conv2D(36, 5, 5, activation='relu', subsample=(2, 2)))\nmodel.add(Conv2D(48, 5, 5, activation='relu', subsample=(2, 2)))\nmodel.add(Conv2D(64, 3, 3, activation='relu'))\nmodel.add(Conv2D(64, 3, 3, activation='relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Flatten())\nmodel.add(Dense(100, activation='relu'))\nmodel.add(Dense(50, activation='relu'))\nmodel.add(Dense(10, activation='relu'))\nmodel.add(Dense(1))\nmodel.summary()\n# model.compile(loss = 'mse', optimizer = 'adam')\n# model.fit(X_train, y_train, validation_split = 0.2, shuffle = True, nb_epoch = 10)\n# model.save('model.h5')\n# exit()\n",
"id": "5085870",
"language": "Python",
"matching_score": 0,
"max_stars_count": 0,
"path": "P4.py"
}
] | 0 |
BenLHetherington | [
{
"content": "import json\nimport os\nimport logging\nimport boto3\nfrom botocore.exceptions import ClientError\nimport pandas as pd\nimport openpyxl\nimport numpy as np\n\n# Handle logger\nlogger = logging.getLogger()\nlogger.setLevel(logging.os.environ['LOG_LEVEL'])\n\ndynamodb = boto3.resource('dynamodb')\naws_environment = os.environ['AWSENV']\n\nlogger.info(\"Finished handling variables, imports, and clients\")\n\n# Check if executing locally or on AWS, and configure DynamoDB connection accordingly.\n# https://github.com/ganshan/sam-dynamodb-local/blob/master/src/Person.py\nif aws_environment == \"AWS_SAM_LOCAL\":\n table = boto3.resource('dynamodb', endpoint_url=\"http://dynamodb-local:8000\").Table('visitorCount') #Local table name hard coded in entrypoint.sh for local dev\n logger.info(\"Using local Dynamo container for testing\")\nelse: # Running in AWS\n table = dynamodb.Table(os.environ['TABLE_NAME'])\n\nlogger.info(\"Finished conditional dynamodb logic\")\n\ndef getUserCount():\n try:\n logger.info(\"Querying DDB\")\n user_count_from_table = table.get_item(\n Key={'Count': 'Users'}\n )\n\n #Handle first use case where count doesn't exist yet\n if 'Item' in user_count_from_table:\n user_count = user_count_from_table['Item']['Number'] +1\n else: \n user_count = 1\n logger.info(user_count)\n\n return user_count\n\n #Catch known errors\n #ToDo: Add more handling here\n except ClientError as e:\n if e.response['Error']['Code'] == 'RequestLimitExceeded':\n logger.error('ERROR: ', e)\n else:\n logger.error(\"UNEXPECTED ERROR from DDB: %s\" % e)\n\ndef updateUserCount(count):\n try:\n logger.info(\"Updating DDB with new user count\")\n table.put_item(\n Item={\n 'Count': 'Users',\n 'Number': count\n }\n )\n\n #Catch known errors\n #ToDo: Add more handling here\n except ClientError as e:\n if e.response['Error']['Code'] == 'RequestLimitExceeded':\n logger.error('ERROR: ', e)\n else:\n logger.error(\"UNEXPECTED ERROR from DDB: %s\" % e)\n\n\ndef passiveGrowth(BEGdate, ENDdate, RID):\n \n #Reads Data In\n sarsales = pd.read_excel(r'data.xlsx', sheet_name='sarsales')\n #sarsalesColumns = sarsales.columns.tolist()\n sarsales=sarsales.to_numpy()\n ElastRSector = pd.read_excel(r'data.xlsx', sheet_name='Elasticites')\n ElastRSectorColumns = ElastRSector.columns.tolist()\n ElastRSector=ElastRSector.to_numpy()\n EFactors = pd.read_excel(r'data.xlsx', sheet_name='Econ Factors data')\n EFactors=EFactors.to_numpy()\n SizeSarsales = sarsales.shape[0]\n SizeEFactors = EFactors.shape[0]\n SizeElastRSector = ElastRSector.shape[0]\n WidthElastRSector = ElastRSector.shape[1]\n\n # logger.info(EFactors)\n logger.info(\"SizeSarsales: \"+str(SizeSarsales))\n\n #Declares a few variables as set up\n TRBID = RID * 1000000 + BEGdate\n TREID = RID * 1000000 + ENDdate\n TotalEconomicFactor = 0\n factors = []\n\n # logger.info(\"SizeSarsales:\",str(SizeSarsales))\n\n #Gets rsale for start and end\n RSaleBeg=0\n RSaleEnd=0\n i=0\n while i < SizeSarsales:\n if sarsales[i][2] == TRBID:\n RSaleBeg = sarsales[i][4]\n if sarsales[i][2] == TREID:\n RSaleEnd = sarsales[i][4]\n if ((RSaleBeg != 0) and (RSaleEnd != 0)):\n break\n i=i+1\n\n #Sets TGRSales\n TGRSales = (RSaleEnd - RSaleBeg)/RSaleBeg\n\n #Gets index of interest from EFactors\n i=0\n while i < SizeEFactors:\n if EFactors[i][0] == BEGdate:\n EFactorsIndex1 = i\n if EFactors[i][0] == ENDdate:\n EFactorsIndex2 = i\n i=i+1\n\n ##Finds none zero values in EfactorsIndex1 and EfactorsIndex2 and calculates factors\n ##----------assumes its sorted (ie column[x] is the same factor in EFactors and ElastRSector\n ##Generates index we care about from ElastRSector\n i = 0\n while i < SizeElastRSector:\n if ElastRSector[i][0] == RID:\n ElastRSectorIndex = i\n #finds none-zero values\n j=2\n while j < WidthElastRSector:\n if ElastRSector[i][j] != 0:\n #None zero Column\n factors.append(j)\n #Factor Name\n #factors.append(ElastRSector[0][j])\n factors.append(ElastRSectorColumns[j])\n temp1=ElastRSector[i][j]\n #Elastisity\n factors.append(ElastRSector[i][j])\n temp2=((EFactors[EFactorsIndex2][j-1] - EFactors[EFactorsIndex1][j-1]) / EFactors[EFactorsIndex1][j-1])\n #growth\n factors.append((EFactors[EFactorsIndex2][j-1] - EFactors[EFactorsIndex1][j-1]) / EFactors[EFactorsIndex1][j-1])\n #Impact\n factors.append(temp1*temp2)\n #Begining factor\n factors.append(EFactors[EFactorsIndex1][j-1])\n #Ending factor\n factors.append(EFactors[EFactorsIndex2][j - 1])\n TotalEconomicFactor = TotalEconomicFactor + temp1 * temp2\n j=j+1\n if TotalEconomicFactor != 0:\n break\n i=i+1\n\n factors = np.reshape(factors, (-1, 7))\n Sizefactors = factors.shape[0]\n PassiveGrowth = TotalEconomicFactor / TGRSales\n return PassiveGrowth, TotalEconomicFactor, TGRSales, RSaleBeg, RSaleEnd;\n\ndef extract_child_from_body_of_apg_event(event, child_item, mandatory): \n try:\n passed_value = event['multiValueQueryStringParameters'][child_item][0]\n return passed_value\n except (KeyError, json.decoder.JSONDecodeError, TypeError) as e: #If passed value is empty then throw an error\n if(mandatory):\n logger.error(f\"Could not find value for: {child_item}\")\n raise 'ERROR: Must pass in all required values!'\n\ndef lambda_handler(event, context):\n RID=extract_child_from_body_of_apg_event(event, 'RID', mandatory=True)\n StartDate=extract_child_from_body_of_apg_event(event, 'StartDate', mandatory=True)\n EndDate=extract_child_from_body_of_apg_event(event, 'EndDate', mandatory=True)\n StartDate=StartDate.replace('-','')\n EndDate=EndDate.replace('-','')\n RID=int(RID)\n StartDate=int(StartDate)\n EndDate=int(EndDate)\n logger.info(\"RID: \"+str(RID))\n logger.info(\"StartDate: \"+str(StartDate))\n logger.info(\"EndDate: \"+str(EndDate))\n logger.info(type(EndDate))\n\n user_count = getUserCount()\n updateUserCount(user_count)\n passiveGrowthVar, TotalEconomicFactor, TotalSalesGrowth, RSaleBeg, RSaleEnd = passiveGrowth(StartDate, EndDate, RID)\n logger.info(\"passiveGrowthVar: \"+str(passiveGrowthVar))\n logger.info(\"BeginningValue: \"+str(RSaleBeg))\n logger.info(\"EndingValue: \"+str(RSaleEnd))\n logger.info(\"TotalSalesGrowth: \"+str(TotalSalesGrowth))\n logger.info(\"InfluencerEconomicFactorImpact: \"+str(TotalEconomicFactor))\n logger.info(\"User count: \"+str(user_count))\n\n return {\n \"statusCode\": 200,\n \"headers\": {\n \"Access-Control-Allow-Origin\": \"*\",\n \"Access-Control-Allow-Credentials\": \"true\",\n \"Access-Control-Allow-Headers\": 'Content-Type,X-Amz-Date,Authorization,X-Api-Key,X-Amz-Security-Token',\n \"Content-Type\": \"application/json\"\n },\n \"body\": json.dumps({\n \"User count\": str(user_count),\n \"passiveGrowthVar\": str(passiveGrowthVar),\n \"BeginningValue\": str(RSaleBeg),\n \"EndingValue\": str(RSaleEnd),\n \"TotalSalesGrowth\": str(TotalSalesGrowth),\n \"InfluencerEconomicFactorImpact\": str(TotalEconomicFactor)\n }),\n }\n",
"id": "7017372",
"language": "Python",
"matching_score": 0,
"max_stars_count": 0,
"path": "backend/hello_world/app.py"
}
] | 0 |
egorkis | [
{
"content": "from typing import Optional\n\nfrom vkbottle.rule import FromMe\nfrom vkbottle.user import Blueprint, Message\n\nfrom logger import logger_decorator\nfrom objects import RolePlayCommand, Database\nfrom utils import edit_message\n\nuser = Blueprint(\n name='role_play_commands_blueprint'\n)\n\n\nasync def get_role_play_message(\n message: Message,\n role_play_command: RolePlayCommand,\n user_id: Optional[int] = None,\n call_all: bool = False\n) -> str:\n called_user = (await message.api.users.get(fields=[\"sex\"]))[0]\n\n pattern = role_play_command.formatter_woman if called_user.sex == 1 else role_play_command.formatter_man\n\n first_user = f\"[id{called_user.id}|{called_user.first_name} {called_user.last_name}]\"\n if call_all:\n return pattern.format(\n first_user=first_user,\n second_user=role_play_command.all_ending\n )\n\n second_user = (await message.api.users.get(user_ids=user_id, name_case=role_play_command.gen))[0]\n last_user = f\"[id{second_user.id}|{second_user.first_name} {second_user.last_name}]\"\n return pattern.format(\n first_user=first_user,\n second_user=last_user\n )\n\n\nall_role_play_cmd = [\n \"<service_prefix:service_prefix> <role_play_command:role_play_command> всех\",\n \"<service_prefix:service_prefix> <role_play_command:role_play_command> всем\",\n]\n\n\n@user.on.message_handler(FromMe(), text=\"<service_prefix:service_prefix> рп\")\nasync def show_rp_commands(message: Message, **kwargs):\n db = Database.get_current()\n text = \"📃 Доступные РП-команды:\\n\"\n index = 1\n for rp_cmd in db.role_play_commands:\n text += f\"{index}. {rp_cmd.name}\\n\"\n index += 1\n await edit_message(\n message,\n text\n )\n\n\n@user.on.message_handler(FromMe(), text=all_role_play_cmd)\n@logger_decorator\nasync def role_play_command_wrapper(\n message: Message,\n role_play_command: RolePlayCommand,\n **kwargs\n):\n await edit_message(\n message,\n await get_role_play_message(\n message,\n role_play_command,\n call_all=True\n )\n )\n\n\nuser_id_cmd = \"<service_prefix:service_prefix> <role_play_command:role_play_command> [id<user_id:int>|<name>]\"\n\n\n@user.on.message_handler(FromMe(), text=user_id_cmd)\n@logger_decorator\nasync def role_play_command_wrapper(\n message: Message,\n role_play_command: RolePlayCommand,\n user_id: int,\n **kwargs\n):\n await edit_message(\n message,\n await get_role_play_message(\n message,\n role_play_command,\n user_id=user_id\n )\n )\n\n\nno_user_id_cmd = \"<service_prefix:service_prefix> <role_play_command:role_play_command>\"\n\n\n@user.on.message_handler(FromMe(), text=no_user_id_cmd)\n@logger_decorator\nasync def role_play_command_wrapper(\n message: Message,\n role_play_command: RolePlayCommand,\n **kwargs\n):\n user_id = None\n if message.reply_message:\n user_id = message.reply_message.from_id\n if message.fwd_messages:\n user_id = message.fwd_messages[0].from_id\n\n if not user_id:\n return\n\n if user_id < 0:\n return\n\n await edit_message(\n message,\n await get_role_play_message(\n message,\n role_play_command,\n user_id=user_id\n )\n )\n",
"id": "1762550",
"language": "Python",
"matching_score": 2.96376371383667,
"max_stars_count": 1,
"path": "commands/role_play_commands.py"
},
{
"content": "from vbml.blanket import validator\nfrom objects import Database\n\n__all__ = (\n 'alias',\n 'role_play_command',\n 'self_prefix',\n 'duty_prefix',\n 'service_prefix',\n)\n\n\n@validator\ndef alias(value: str):\n db = Database.get_current()\n for alias_ in db.aliases:\n if value.lower() == alias_.command_from:\n return alias_\n\n\n@validator\ndef role_play_command(value: str):\n db = Database.get_current()\n for rp_cmd in db.role_play_commands:\n if value.lower() == rp_cmd.name.lower():\n return rp_cmd\n\n\n@validator\ndef self_prefix(value: str):\n db = Database.get_current()\n if value.lower() in db.self_prefixes:\n return value\n\n\n@validator\ndef dd_prefix(value: str):\n db = Database.get_current()\n if value.lower() in db.dd_prefix:\n return value\n\n\n@validator\ndef dd_value(value: str):\n db = Database.get_current()\n if db.dd_prefix in value.lower():\n try:\n return int(value.lower().replace(db.dd_prefix, ''))\n except:\n ...\n\n\n@validator\ndef duty_prefix(value: str):\n db = Database.get_current()\n if value.lower() in db.duty_prefixes:\n return value\n\n\n@validator\ndef service_prefix(value: str):\n db = Database.get_current()\n if value.lower() in db.service_prefixes:\n return value\n\n\n@validator\ndef repeater_word(value: str):\n db = Database.get_current()\n if value.startswith(db.repeater_word):\n return value.replace(db.repeater_word, '', 1)\n\n\n@validator\ndef yes_or_no(value: str):\n if value in ('да', '+', '1'):\n return True\n elif value in ('нет', '-', '0'):\n return False\n#",
"id": "9735955",
"language": "Python",
"matching_score": 0.34291425347328186,
"max_stars_count": 0,
"path": "validators.py"
},
{
"content": "from vkbottle.rule import FromMe\nfrom vkbottle.user import Blueprint, Message\n\nfrom logger import logger_decorator\nfrom objects import Database\nfrom rules import DeleteNotifyRule\nfrom utils import edit_message\n\nuser = Blueprint(\n name='delete_notify_blueprint'\n)\n\n\n@user.on.message_handler(DeleteNotifyRule())\n@logger_decorator\nasync def delete_notify_wrapper(message: Message):\n await message.api.messages.delete(message_ids=[message.id])\n\n\n@user.on.message_handler(FromMe(), text=\"<prefix:service_prefix> -уведы\")\n@logger_decorator\nasync def activate_delete_all_notify_wrapper(message: Message, **kwargs):\n db = Database.get_current()\n db.delete_all_notify = True\n db.save()\n await edit_message(\n message,\n \"✅ Удаление уведомлений включено\"\n )\n\n\n@user.on.message_handler(FromMe(), text=\"<prefix:service_prefix> +уведы\")\n@logger_decorator\nasync def deactivate_delete_all_notify_wrapper(message: Message, **kwargs):\n db = Database.get_current()\n db.delete_all_notify = False\n db.save()\n await edit_message(\n message,\n \"✅ Удаление уведомлений отключено\"\n )\n",
"id": "10615338",
"language": "Python",
"matching_score": 2.804447650909424,
"max_stars_count": 1,
"path": "commands/delete_notify.py"
},
{
"content": "from vkbottle.user import Blueprint, Message\n\n\nimport rules\nfrom logger import logger_decorator\nfrom objects import Database\nfrom utils import send_request\nfrom collections import namedtuple\nfrom typing import Optional, NamedTuple\nimport re\nfrom utils import edit_message\nfrom vkbottle.rule import FromMe\n\nuser = Blueprint(\n name='bio_wars_blueprint'\n)\n\n\nRegexFindAllBase = namedtuple('RegexFindAll', ['regex', 'groups_map'])\n\n\nclass RegexFindAll(RegexFindAllBase):\n\n def match(self, text: str) -> Optional[NamedTuple]:\n re_result = re.findall(self.regex, text)\n if re_result:\n if isinstance(re_result[0], tuple):\n return namedtuple('RegexFindAllResult', self.groups_map)(*[str(res) for res in re_result[0]])\n else:\n return namedtuple('RegexFindAllResult', self.groups_map)(str(re_result[0]))\n return None\n\n\n\nUSER_ID_REGEX = RegexFindAll(\n re.compile(\n r'Организатор заражения: \\[id(?P<user_id>\\d+)',\n flags=re.MULTILINE & re.IGNORECASE\n ),\n ['user_id']\n)\n\n\n@user.on.message_handler(rules.ContainsRule(['Служба безопасности лаборатории']))\n@logger_decorator\nasync def bio_reply_handler(message: Message):\n if message.from_id > 0:\n return\n\n db = Database.get_current()\n if not db.bio_reply:\n return\n\n if str(await message.api.user_id) not in message.text:\n return\n\n user = USER_ID_REGEX.match(message.text)\n if user:\n return f\"Заразить @id{user.user_id}\"\n\n\n@user.on.message_handler(FromMe(), text=\"<prefix:service_prefix> -заражение\")\n@logger_decorator\nasync def activate_bio_reply_wrapper(message: Message, **kwargs):\n db = Database.get_current()\n db.bio_reply = False\n db.save()\n await edit_message(\n message,\n \"✅ Заражение в ответ отключено\"\n )\n\n\n@user.on.message_handler(FromMe(), text=\"<prefix:service_prefix> +заражение\")\n@logger_decorator\nasync def deactivate_bio_reply_wrapper(message: Message, **kwargs):\n db = Database.get_current()\n db.bio_reply = True\n db.save()\n await edit_message(\n message,\n \"✅ Заражение в ответ включено\"\n )\n",
"id": "11265200",
"language": "Python",
"matching_score": 2.7498974800109863,
"max_stars_count": 0,
"path": "commands/bio_wars.py"
},
{
"content": "from vkbottle.rule import FromMe\nfrom vkbottle.user import Blueprint, Message\n\nfrom logger import logger_decorator\nfrom objects import Database\nfrom utils import send_request\n\nuser = Blueprint(\n name='self_signal_blueprint'\n)\n\n\n@user.on.message_handler(FromMe(), text='<prefix:self_prefix> <signal>')\n@logger_decorator\nasync def self_signal(message: Message, prefix: str, signal: str):\n db = Database.get_current()\n message_ = message.dict()\n __model = {\n \"user_id\": message_['from_id'],\n \"method\": \"lpSendMySignal\",\n \"secret\": db.secret_code,\n \"message\": {\n \"conversation_message_id\": message_['conversation_message_id'],\n \"from_id\": message_['from_id'],\n \"date\": message.date,\n \"text\": prefix + ' ' + signal,\n \"peer_id\": message.peer_id\n },\n \"object\": {\n \"chat\": None,\n \"from_id\": message_['from_id'],\n \"value\": prefix + ' ' + signal,\n \"conversation_message_id\": message_['conversation_message_id']\n },\n \"vkmessage\": message_\n }\n\n await send_request(__model)\n",
"id": "1673206",
"language": "Python",
"matching_score": 1.8522688150405884,
"max_stars_count": 1,
"path": "commands/self_signal.py"
},
{
"content": "from typing import Optional\n\nfrom vkbottle.rule import ChatActionRule, FromMe\nfrom vkbottle.user import Blueprint, Message\n\nfrom logger import logger_decorator\nfrom objects import Database, ChatEnterModel\nfrom rules import ChatEnterRule\nfrom utils import edit_message\n\nuser = Blueprint(\n name='add_to_friends_on_chat_enter'\n)\n\n\n@user.on.chat_message(ChatActionRule([\"chat_invite_user\", \"chat_invite_user_by_link\"]), ChatEnterRule())\n@logger_decorator\nasync def chat_enter_wrapper(message: Message):\n db = Database.get_current()\n model = None\n for chat_enter_model in db.add_to_friends_on_chat_enter:\n if chat_enter_model.peer_id == message.peer_id:\n model = chat_enter_model\n try:\n await user.api.friends.add(user_id=message.action.member_id)\n except:\n pass\n return model.hello_text\n\n\n@user.on.chat_message(FromMe(), text=[\n \"<prefix:service_prefix> +добавление\",\n \"<prefix:service_prefix> +добавление <hello_text>\"\n])\n@logger_decorator\nasync def add_chat_enter_model_wrapper(message: Message, hello_text: Optional[str] = None, **kwargs):\n db = Database.get_current()\n for i in range(len(db.add_to_friends_on_chat_enter)):\n if db.add_to_friends_on_chat_enter[i].peer_id == message.peer_id:\n db.add_to_friends_on_chat_enter[i].hello_text = hello_text\n db.save()\n await edit_message(\n message,\n \"✅ Приветственный текст обновлен\"\n )\n return\n db.add_to_friends_on_chat_enter.append(\n ChatEnterModel(dict(peer_id=message.peer_id, hello_text=hello_text))\n )\n db.save()\n await edit_message(\n message,\n \"✅ Добавление новичков в друзья в этом чате включено\"\n )\n return\n\n\n@user.on.chat_message(FromMe(), text=[\n \"<prefix:service_prefix> -добавление\",\n])\n@logger_decorator\nasync def add_chat_enter_model_wrapper(message: Message, **kwargs):\n db = Database.get_current()\n model = None\n for i in range(len(db.add_to_friends_on_chat_enter)):\n if db.add_to_friends_on_chat_enter[i].peer_id == message.peer_id:\n model = db.add_to_friends_on_chat_enter[i]\n if model is None:\n await edit_message(\n message,\n \"⚠ Добавление новичков в друзья в этом чате не настроено\"\n )\n return\n db.add_to_friends_on_chat_enter.remove(model)\n db.save()\n await edit_message(\n message,\n \"✅ Добавление новичков в друзья в этом чате выключено\"\n )\n return\n",
"id": "855844",
"language": "Python",
"matching_score": 4.019765853881836,
"max_stars_count": 1,
"path": "commands/add_to_friends_on_chat_enter.py"
},
{
"content": "from vkbottle.rule import ChatActionRule, FromMe\nfrom vkbottle.user import Blueprint, Message\n\nfrom logger import logger_decorator\nfrom objects import Database\nfrom utils import edit_message\n\nuser = Blueprint(\n name='auto_exit_from_chat_blueprint'\n)\n\n\nasync def set_auto_exit(\n db: Database,\n auto_exit: bool = None,\n delete_chat: bool = None,\n black_list: bool = None\n):\n db.auto_exit_from_chat = auto_exit if auto_exit is not None else db.auto_exit_from_chat\n db.auto_exit_from_chat_delete_chat = delete_chat if delete_chat is not None else db.auto_exit_from_chat_delete_chat\n db.auto_exit_from_chat_add_to_black_list = (\n black_list if black_list is not None else db.auto_exit_from_chat_add_to_black_list\n )\n db.save()\n\n\n@user.on.message_handler(FromMe(), text=\"<prefix:service_prefix> +автовыход\")\n@logger_decorator\nasync def auto_exit_setting_on_exit_wrapper(message: Message, **kwargs):\n db = Database.get_current()\n await set_auto_exit(db, True, False)\n await edit_message(message, \"✅ Настройка изменена\")\n\n\n@user.on.message_handler(FromMe(), text=\"<prefix:service_prefix> -автовыход\")\n@logger_decorator\nasync def auto_exit_setting_on_exit_wrapper(message: Message, **kwargs):\n db = Database.get_current()\n await set_auto_exit(db, False, False)\n await edit_message(message, \"✅ Настройка изменена\")\n\n\n@user.on.message_handler(FromMe(), text=\"<prefix:service_prefix> автовыход +чс\")\n@logger_decorator\nasync def auto_exit_setting_on_exit_wrapper(message: Message, **kwargs):\n db = Database.get_current()\n await set_auto_exit(db, black_list=True)\n await edit_message(message, \"✅ Настройка изменена\")\n\n\n@user.on.message_handler(FromMe(), text=\"<prefix:service_prefix> автовыход -чс\")\n@logger_decorator\nasync def auto_exit_setting_on_exit_wrapper(message: Message, **kwargs):\n db = Database.get_current()\n await set_auto_exit(db, black_list=False)\n await edit_message(message, \"✅ Настройка изменена\")\n\n\n@user.on.message_handler(FromMe(), text=\"<prefix:service_prefix> автовыход +удаление\")\n@logger_decorator\nasync def auto_exit_setting_on_exit_wrapper(message: Message, **kwargs):\n db = Database.get_current()\n await set_auto_exit(db, delete_chat=True)\n await edit_message(message, \"✅ Настройка изменена\")\n\n\n@user.on.message_handler(FromMe(), text=\"<prefix:service_prefix> автовыход -удаление\")\n@logger_decorator\nasync def auto_exit_setting_on_exit_wrapper(message: Message, **kwargs):\n db = Database.get_current()\n await set_auto_exit(db, delete_chat=False)\n await edit_message(message, \"✅ Настройка изменена\")\n\n\n@user.on.chat_message(ChatActionRule(\"chat_invite_user\"))\n@user.on.chat_message(ChatActionRule(\"chat_invite_user_by_link\"))\n@logger_decorator\nasync def to_chat_wrapper(message: Message):\n if message.action.member_id == await message.api.user_id:\n db = Database.get_current()\n if message.action.type == \"chat_invite_user\":\n if db.disable_notifications:\n await user.api.account.set_silence_mode(\n time=-1,\n peer_id=message.peer_id,\n sound=0\n )\n if db.auto_exit_from_chat:\n await message.api.messages.remove_chat_user(chat_id=message.chat_id, member_id=await message.api.user_id)\n if db.auto_exit_from_chat_delete_chat:\n await message.api.messages.delete_conversation(peer_id=message.peer_id)\n if db.auto_exit_from_chat_add_to_black_list:\n await message.api.account.ban(owner_id=message.from_id)\n else:\n if db.disable_notifications:\n await user.api.account.set_silence_mode(\n time=-1,\n peer_id=message.peer_id,\n sound=0\n )\n",
"id": "8573285",
"language": "Python",
"matching_score": 4.081554889678955,
"max_stars_count": 1,
"path": "commands/auto_exit_from_chat.py"
},
{
"content": "from vkbottle.rule import ChatActionRule, FromMe\nfrom vkbottle.user import Blueprint, Message\n\nfrom logger import logger_decorator\nfrom objects import Database\nfrom utils import edit_message\n\nuser = Blueprint(\n name='disable_notifications_blueprint'\n)\n\n\n@user.on.message_handler(FromMe(), text=\"<prefix:service_prefix> выключать уведы\")\n@logger_decorator\nasync def allow_disable_notifications_wrapper(message: Message, **kwargs):\n db = Database.get_current()\n db.disable_notifications = True\n db.save()\n await edit_message(message, \"✅ Настройка изменена\")\n\n\n@user.on.message_handler(FromMe(), text=\"<prefix:service_prefix> не выключать уведы\")\n@logger_decorator\nasync def deny_disable_notifications_wrapper(message: Message, **kwargs):\n db = Database.get_current()\n db.disable_notifications = False\n db.save()\n await edit_message(message, \"✅ Настройка изменена\")\n\n",
"id": "11387529",
"language": "Python",
"matching_score": 1.4089999198913574,
"max_stars_count": 1,
"path": "commands/disable_notifications.py"
},
{
"content": "from objects.json_orm.database import Database\nfrom objects.json_orm.errors import *\n",
"id": "4214956",
"language": "Python",
"matching_score": 0,
"max_stars_count": 2,
"path": "objects/json_orm/__init__.py"
},
{
"content": "from commands.members_manager import ignored\nfrom commands.members_manager import ignored_global\nfrom commands.members_manager import muted\nfrom commands.members_manager import trusted\n\nusers_bp = (\n ignored.user,\n ignored_global.user,\n muted.user,\n trusted.user,\n)\n",
"id": "12692107",
"language": "Python",
"matching_score": 0,
"max_stars_count": 1,
"path": "commands/members_manager/__init__.py"
}
] | 2.301083 |
jbmarcos | [
{
"content": "# mostra os tipos de input -=-\n\na = input ('Escreva algo:')\nprint ('As informações sobre o que vc digitou são as seguintes:')\nprint('Só tem espaços? ', a.isspace())\nprint('É alfabético?', a.isalpha())\nprint('É um númro?', a.isnumeric())\nprint('É alfanumerico?', a.isalnum())\nprint('Está maiuscula?', a.isupper())\nprint('Está em minuscula?', a.islower())\nprint('Está capitalizada?,', a.istitle())\n\n",
"id": "9310956",
"language": "Python",
"matching_score": 0.8280981779098511,
"max_stars_count": 0,
"path": "ex004 # mostra os tipos de input .py"
},
{
"content": "# imc if elif\n\nprint(' ')\npe = float(input('Qual é o seu peso? (Kg) '))\nal = float(input('Qual sua altura? (m) '))\nimc = pe/(al ** 2)\nprint(' ')\nprint(' O IMC dessa pessoa é de {:.1f}'.format(imc))\n\nif imc < 18.5:\n print(' Você está ABAIXO do peso normal.')\nelif 18.5 <= imc < 25:\n print(' PARABÉNS!!! Você está na faixa de peso ADEQUADO.')\nelif 25 <= imc < 30:\n print(' Você está em SOBREPESO.')\nelif 30 <= imc < 40:\n print(' Você está em OBESIDADE.')\nelif imc >= 40:\n print(' Você está em OBESIDADE MORBIDA, cuidado!!!')\n",
"id": "12503780",
"language": "Python",
"matching_score": 1.0831397771835327,
"max_stars_count": 0,
"path": "ex043 # imc if elif .py"
},
{
"content": "# valores extremos min e max for if\n\nprint('')\nmaior = 0\nmenor = 0\nfor p in range(1, 6):\n peso = float(input('Peso da {}ª pessoa: '.format(p)))\n if p == 1:\n maior = peso\n menor = peso\n else:\n if peso > maior:\n maior = peso\n if peso < menor:\n menor = peso\nprint('')\nprint('O maior peso foi {} Kg e o menor foi {} Kg.'.format(maior, menor))",
"id": "11120974",
"language": "Python",
"matching_score": 1.488588571548462,
"max_stars_count": 0,
"path": "ex055 # valores extremos min e max for if .py"
},
{
"content": "# reconhecer valores. maior e menor -=-\n\na = int(input('Didite três valores. Primeito valor: '))\nb = int(input('Segundo valor: '))\nc = int(input('Terceiro valor: '))\n\nmenor = a\nif b<a and b<c:\n menor = b\nif c<a and c<b:\n menor = c\n\nmaior = a\nif b>a and b>c:\n maior = b\nif c>a and c>b:\n maior = c\n\nprint('O menor valor é {} '.format(menor))\nprint('O maior valor é {} '.format(maior))\n",
"id": "11966703",
"language": "Python",
"matching_score": 1.4723706245422363,
"max_stars_count": 0,
"path": "ex033 # reconhecer valores. maior e menor -=-.py"
},
{
"content": "# selecionando o maior valor if else\n\nprint(' ')\nnum1 = int(input(' Primeiro número inteiro: '))\nnum2 = int(input(' Segundo número interiro: ' ))\nprint(' ')\nif num1 > num2:\n print(' O primeiro valor é o maior!')\nelif num1 == num2:\n print( 'Os dois valores são iguais!')\nelse:\n print(' O segundo valor é o maior!')",
"id": "1025077",
"language": "Python",
"matching_score": 1.0333664417266846,
"max_stars_count": 0,
"path": "ex038 # selecionando o maior valor if else .py"
},
{
"content": "#PA soma de 10 termos (n termo da PA) for\n\nprint(' ')\nprimeiro = int(input(('Primeiro termo: ')))\nrazao = int(input('Razão '))\ndecimo = primeiro + (10 - 1 ) * razao #(enésimo termo da PA)\nfor c in range(primeiro,decimo + razao, razao):\n print('{}'.format(c), end='-> ')\nprint('ACABOU')",
"id": "3792438",
"language": "Python",
"matching_score": 0.7796396017074585,
"max_stars_count": 0,
"path": "ex051 # pa soma de 10 termos (n termo da PA) for .py"
},
{
"content": "# Tratando valores v1 while\n\nnum = cont = soma = 0\nwhile num != 999:\n num = int(input('[Pare digitando 999] Digite um número: '))\n soma += num\n cont += 1\nprint('Você digitou {} números e a soma entre eles foi {}.'.format(cont-1, soma-999))\n\n\n'''num = cont = soma = 0\nnum = int(input('[Pare digitando 999] Digite um número: '))\nwhile num != 999:\n soma += num\n cont += 1\n num = int(input('[Pare digitando 999] Digite um número: '))\nprint('Você digitou {} números e a soma entre eles foi {}.'.format(cont, soma))'''\n",
"id": "265209",
"language": "Python",
"matching_score": 0.9445971846580505,
"max_stars_count": 0,
"path": "ex064 # Tratando valores v1 while .py"
},
{
"content": "\n\nn = int(input('Quantos valores quer ver dentro da sequência de Fibroonacci? '))\nv1 = 0\nv2 = 1\nprint('{}-> {}'.format(v1, v2), end='')\ncont = 3\nwhile cont <= n:\n v3 = v1 + v2\n print('-> {}'.format(v3))#, end='')\n v1 = v2\n v2 = v3\n cont += 1\nprint('-> FIM')",
"id": "10611251",
"language": "Python",
"matching_score": 0.9521850943565369,
"max_stars_count": 0,
"path": "ex063 # Fibronacci while .py"
},
{
"content": "# fatorial while\n\nnum = int(input(\"Digite um número: \"))\ncont = num\nfact = 1\nwhile cont>0:\n print('{} '.format(cont), end='')\n print(' x ' if cont > 1 else ' = ', end='')\n fact = fact * cont # fact *= cont\n cont = cont - 1 #cont -= 1\nprint('\\033[34m{}\\033[m'.format(fact))\n\n'''from math import factorial\nnum = int(input(\"Digite um número: \"))\nfat = factorial(num)\nprint('O Fatorias de {} é \\033[33m{}\\033[m.'.format(num, fat))'''",
"id": "11164045",
"language": "Python",
"matching_score": 1.1914596557617188,
"max_stars_count": 0,
"path": "ex060 # fatorial while .py"
},
{
"content": "# calcula o cumprimento da hipotenusa\n\n'''opo = float(input('Digite o valor do cateto oposto: '))\nadj = float(input('Digite o valor do cateto adjacente: '))\nhip = (opo ** 2 + adj ** 2) ** (1/2)\nprint('O cumprimento da hipotenusa é: {:.2f} '.format(hip))'''\n\nimport math\nopo = float(input('Digite o valor do cateto oposto: '))\nadj = float(input('Digite o valor do cateto adjacente: '))\nhip = math.hypot(opo, adj)\nprint('O cumprimento da hipotenusa é: {:.2f} '.format(hip))",
"id": "4911165",
"language": "Python",
"matching_score": 0.9892281889915466,
"max_stars_count": 0,
"path": "ex017 # calcula o cumprimento da hipotenusa.py"
},
{
"content": "# porcentagens multiplas\n\nvalor = float(input('Qual o valor do salário? '))\nif valor <= 1250:\n novo = valor + (valor*15/100)\nelse:\n novo = valor + (valor*10/100)\nprint('O salário era {:.2f} e aumentou para {:2.2f}'.format(valor, novo))",
"id": "4532116",
"language": "Python",
"matching_score": 1,
"max_stars_count": 0,
"path": "ex034 # porcentagens multiplas.py"
},
{
"content": "# reproduz um arquivo mp3 -=-\n\n'''import pygame\npygame.init()\npygame.mixer.music.load('ex021.mp3')\npygame.mixer.music.play()\npygame.event.wait()\ninput('CURTE O SOM PIVETE!')'''\n\n\n'''from pygame import mixer\nmixer.init()\nmixer.music.load('ex021.mp3')\nmixer.music.play()\ninput('CURTE O SOM PIVETE!')'''\n\n\n'''import pygame\n# Inicializando o mixer PyGame\npygame.mixer.init()\n# Iniciando o Pygame\npygame.init()\npygame.mixer.music.load('ex021.mp3')\npygame.mixer.music.play(loops=0, start=0.0)\npygame.event.wait()\ninput('CURTE O SOM PIVETE!')'''\n\n\nimport playsound as playsound\nplaysound.playsound('ex021 # musica para usar no 21 .mp3')",
"id": "6425264",
"language": "Python",
"matching_score": 0.7479659914970398,
"max_stars_count": 0,
"path": "ex021 # reproduz um arquivo mp3 -=-.py"
},
{
"content": "# bin oct hex fatiamento str if elif else\n\nnum = int(input(' Digite um númeto inteiro: '))\nprint('''Escolha a base de conversção que deseja: \n[ 1 ] para Binário\n[ 2 ] para Octal\n[ 3 ] para Hexadecimal''')\nopção = int(input(' Sua opção: '))\nif opção == 1:\n print(' {} convertido para Binário é {} '.format(num, bin(num) [2:]))\nelif opção == 2:\n print(' {} convertido para Octal é {}'.format(opção, oct(num) [2:]))\nelif opção == 3:\n print(' {} convertido para Hexadecimal é {}'.format(opção, hex(num) [2:]))\nelse:\n print(' Opção inválida. Tente novamente.')",
"id": "5190138",
"language": "Python",
"matching_score": 1.6487364768981934,
"max_stars_count": 0,
"path": "ex037 # bin oct hex fatiamento str if elif else .py"
},
{
"content": "# calculadora 2.0 for range\nprint(' ')\nnum = int(input('Digite um número para cer sua tabuada: '))\nprint(' ')\nfor c in range(1, 11):\n print('{} x {:2} = {}'.format(num, c, num*c))",
"id": "5004142",
"language": "Python",
"matching_score": 1.5130560398101807,
"max_stars_count": 0,
"path": "ex049 # calculadora 2.0 for range .py"
},
{
"content": "# par ou impar ? if: else:\n\nnum = int(input('Digite um nímero: '))\nresultado = num % 2\nif resultado == 0:\n print(' O número {} é PAR!'.format(num))\nelse:\n print('O número {} é ÍMPAR'.format(num))",
"id": "637995",
"language": "Python",
"matching_score": 1.0320054292678833,
"max_stars_count": 0,
"path": "ex030 # par ou impar if else.py"
},
{
"content": "# encontrando U D C e M -=- (com ajuda)\n\nn = int(input('Digite um número entre 0 e 9999: '))\nn2 = str(int(10000 + n))\nprint('O número {} possui, {} milhares.'.format(n, n2[1]))\nprint('O número {} possui, {} centenas. '.format(n, n2[2]))\nprint('O número {} possui, {} dezenas. '.format(n, n2[3]))\nprint('O número {} possui, {} unidades.'.format(n, n2[4]))\n\n\n\"\"\"esse abaixo tb funcionou mas n entendi como\nn = int(input('Digite um número entre 0 e 9999:'))\nns = ('{:4}'.format(n))\nnss = str(ns.replace(' ','0'))\nprint('''Pelo metodo de manipulação de string:\nNúmero {}\nMilhares: {}\nCentenas: {}\nDezenas: {}\nUnidades: {}'''.format(nss, nss[0], nss[1], nss[2], nss[3]))'''\"\"\"\n",
"id": "5508526",
"language": "Python",
"matching_score": 0.4562291204929352,
"max_stars_count": 0,
"path": "ex023 # encontrando U D C e M -=- (com ajuda).py"
},
{
"content": "# converter Rs em U$ -=-\n\nrs = float(input('Quantos teais vc tem na sua carteira? '))\nus = rs / 3.27\nprint('Com esse valor de {} reais vc pode comprar {:.2f} Dólares.'.format(rs, us))\n",
"id": "5113671",
"language": "Python",
"matching_score": 0.7991971373558044,
"max_stars_count": 0,
"path": "ex010 # converter Rs em U$ -=-.py"
},
{
"content": "# financiamento de uma casa if else -=-\n\nprint(' ')\ncasa = float(input(' Valor da casa: R$ '))\nganha = float(input(' Qual o valor mensal ganho pelo comprador? '))\nanos = int(input(' Quantos anos de financiamento? '))\nprestacao = casa / (anos * 12)\nprint(' ')\nprint(' Para pagar uma casa de {:.2f} R$'.format(casa))\nprint(' A prestação será de {:.2f} reais mensais.'.format(prestacao))\nprint(' ')\nminimo = ganha * 30 /100\n\nif prestacao <= minimo:\n print(' Parabéns. Emprestimo CONCEDIDO')\nelse:\n print(' Empréstimo NEGADO')\n",
"id": "4418032",
"language": "Python",
"matching_score": 1.8524973392486572,
"max_stars_count": 0,
"path": "ex036 # financiamento de uma casa if else -=- .py"
},
{
"content": "# Carro alugado km x dias -=-\n\nkm = float(input('Quantos Km foram percorridos? '))\ndias = int(input('O carro foi alugado por quantos dias? ' ))\npagar = dias * 60 + km * 0.15\nprint('O valor a ser pago pelo uso do carro é de: {:.2f} R$' .format(pagar))",
"id": "601846",
"language": "Python",
"matching_score": 0.12275645136833191,
"max_stars_count": 0,
"path": "ex015 # carro alugado km x dias -=-.py"
},
{
"content": "# escolhe um nome aleatotiamente -=-\n\nimport random\nn1 = input('Escreva o nome. Primeiro aluno: ')\nn2 = input('Escreva o nome. Segundo aluno: ')\nn3 = input('Escreva o nome. Terceiro aluno: ')\nn4 = input('Escreva o nome. Primeiro aluno: ')\nlista = [n1, n2, n3,n4]\nn = random.choice(lista)\nprint('O aluno ecolhido foi', n )\n",
"id": "7601509",
"language": "Python",
"matching_score": 2.5967118740081787,
"max_stars_count": 0,
"path": "ex019 # escolhe um nome aleatotiamente -=-.py"
},
{
"content": "# cria uma sequencia aleatotiamente -=-\n\nimport random\nn1 = input('Escreva o nome. Primeiro aluno: ')\nn2 = input('Escreva o nome. Segundo aluno: ')\nn3 = input('Escreva o nome. Terceiro aluno: ')\nn4 = input('Escreva o nome. Primeiro aluno: ')\nlista = [n1, n2, n3,n4]\nrandom.shuffle(lista)\nprint('A ordem de apresentação será: ')\nprint(lista)\n",
"id": "2100856",
"language": "Python",
"matching_score": 1.1161420345306396,
"max_stars_count": 0,
"path": "ex020 # cria uma sequencia aleatotiamente -=-.py"
},
{
"content": "# procurar por caracteres ordinalmente -=-\n\nnome = str(input(\"Didite uma frase: \")).upper().strip()\nprint('A letra A apatece {} vezes na frase.'.format(nome.count('A')))\nprint('A primeira letra A apareceu na posição {}'.format(nome.find('A')+1))\nprint('A última letra A apareceu na posição {}'.format(nome.rfind('A')+1))\n",
"id": "7577012",
"language": "Python",
"matching_score": 1.3806568384170532,
"max_stars_count": 0,
"path": "ex026 # procurar por caracteres ordinalmente -=-.py"
},
{
"content": "# localizar uma palavra/caractere\n\nnome = str(input('Digite o nome da sua cidade: ')).strip()\n#print('Santo'in nome)\nprint(nome[:5].upper() == 'SANTO')",
"id": "8641401",
"language": "Python",
"matching_score": 1.060058832168579,
"max_stars_count": 0,
"path": "ex024 # localizar uma palavra caractere.py"
},
{
"content": "# procura por caracteres -=- in\n\nnome = str(input('Digite seu nome competo: '))\nprint('Seu nome tem silva? {}'.format('SILVA' in nome.upper()))\n\n\n",
"id": "9440202",
"language": "Python",
"matching_score": 1.4713376760482788,
"max_stars_count": 0,
"path": "ex025 # procura por caracteres -=- in.py"
},
{
"content": "# [M / F] while not strip upper\n\nsexo = str(input('Digite seu sexo: [M/F] ')) .strip().upper()[0]\nwhile sexo not in 'MmFf':\n sexo = str(input('Dados inválidos. Por favor, informe corretamente: ')).strip().upper()[0]\nprint(sexo)",
"id": "11155158",
"language": "Python",
"matching_score": 0.36015400290489197,
"max_stars_count": 0,
"path": "ex057 # [M or F] while not strip upper .py"
},
{
"content": "# jogo random number while not randint if elif else\n\nfrom random import randint\npc = randint(0,10)\nprint('Sou seu computador. Pensei em um número de 0 a 10. Será que você consegue adivinhar qual foi???')\ncerto = False\npalpite = 0\nwhile not certo:\n player = int(input('Qual seu palpite? '))\n palpite = palpite + 1\n if player == pc:\n certo = True\n else:\n if player < pc:\n print('Mais...Tente novamente')\n elif player > pc:\n print('Menos...Tente novamente')\nprint('Acertou na tentativa {}. PARABÉNS!'.format(palpite))",
"id": "3430644",
"language": "Python",
"matching_score": 2.3687214851379395,
"max_stars_count": 0,
"path": "ex058 # jogo random number while not randint if elif else .py"
},
{
"content": "# gera um int e compara com o do user -=- if: else:\n\nfrom random import randint\nimport time\nprint('---'*25)\nprint('---'*25)\nn1 = int(input('\\033[30;44m Tente adivinhar qual número vou escolher. Digite um número de 1 a 5: ' ))\nprint('---'*25)\nprint('---'*25)\nprint('Pensando... ... ...')\nprint('---'*25)\nprint('---'*25)\nn2 = (randint(1,5))\ntime.sleep(3)\nif n1 == n2:\n print('\\033[30;42mParabéns você acertou, o número foi {}!!!' .format(n1))\n\nelse:\n print('\\033[1;30;41m A,que pena. Vovê errou. O número foi {}. Tente outra vez!!'.format(n2))",
"id": "8643137",
"language": "Python",
"matching_score": 1.0594407320022583,
"max_stars_count": 0,
"path": "ex028 # gera um int e compara com o do user -=- if else .py"
},
{
"content": "# <NAME> ''' if elif\nfrom random import randint\nfrom time import sleep\n\nbot = randint(0,2)\nprint(' ')\nprint('JO <NAME>... Vamos jogar!')\nprint(' ')\nprint('''Suas opções:\n[ 1 ] PEDRA\n[ 2 ] PAPEL\n[ 3 ] TESOURA''')\nprint(' ')\njogador = int(input('Qual a sua jogada? '))\nprint(' ')\nprint('JO')\nsleep(1)\nprint('Ken')\nsleep(1)\nprint('pô')\nsleep(1)\nprint(' ')\nitens = ('Pedra', 'Papel','Tesoura')\nprint('O computador jogou {}'.format(itens[bot]))\nprint('Você jogou {}'.format(itens[jogador]))\n\nif bot == 0: #pedra\n if jogador == 0:\n print('--> Empate! <--')\n elif jogador == 1:\n print('--> Você venceu! ✅')\n elif jogador == 2:\n print('--> O computador venceu! <@_@>')\n else:\n print('Jogada inválida!')\n\nelif bot == 1: #papel\n if jogador == 0:\n print('--> O computador venceu! <@_@>')\n elif jogador == 1:\n print('--> Empate! <--')\n elif jogador == 2:\n print('--> Você venceu! ✅')\n else:\n print('Jogada inválida!')\n\nelif bot == 2: #tesoura\n if jogador == 0:\n print('--> Você venceu! ✅')\n elif jogador == 1:\n print('--> O computador venceu! <@_@>')\n elif jogador == 2:\n print('--> Empate! <--')\n else:\n print('Jogada inválida!')",
"id": "3636746",
"language": "Python",
"matching_score": 0.5032780766487122,
"max_stars_count": 0,
"path": "ex045 # jo ken poo ''' if elif .py"
},
{
"content": "# simulador de loja if elif else\nprint(' ')\nrs = float(input('Qual o valor das compras? R$ = '))\nprint(' ')\nprint('''Formas de pagamento\n[ 1 ] à vista dinheiro/déboto/PIX\n[ 2 ] à vista no cartão\n[ 3 ] 2x no cartão\n[ 4 ] 3x ou mais vezes no cartão''')\nprint(' ')\nopção = int(input('Qual a opção? '))\nif opção == 1 :\n total = rs - (rs * 10 / 100)\n print('Sua compra de {} R$ vai custar {} no final com desconto de 10%. '.format(rs, total))\nelif opção == 2:\n total = rs - (rs * 5 / 100)\n print('Sua compra de {} R$ vai custar {} no final com desconto de 5%. '.format(rs, total))\nelif opção == 3:\n total = rs / 2\n print('Sua compra será parcelada em 2x de {} R$.'.format(total))\n print('A compra de {} R$ custará no final o mesmo valor de {} R$.'.format(rs, rs))\nelif opção == 4:\n total = rs + (rs * 20 / 100)\n parcelas = int(input('Quantas parcelas? '))\n totalPorParcela = rs / parcelas\n print(' ')\n print('Sua compra será parcelada em {}x de {} R$.'.format(parcelas, totalPorParcela))\n print('A compra de {} R$ custará no final com 20% de juros o valor de {} R$.'.format(rs, total))\nelse:\n print(' ')\n print('Opção incorrerta. Tente novamente.')\n\n\n\n",
"id": "2343907",
"language": "Python",
"matching_score": 0.10971729457378387,
"max_stars_count": 0,
"path": "ex044 # simulador de loja if elif else .py"
},
{
"content": "# alistamento data atual if elif else -=-\n\nfrom datetime import date\natual = date.today().year\nprint(' ')\nnasc = int(input(' Qual o ano de nascimento? '))\nidade = atual - nasc\nprint(' ')\nprint(' Quem nasceu em {} tem {} anos em {}.'.format(nasc, idade, atual))\nprint(' ')\nif idade < 18:\n print(' Ainda faltam {} anos para o alistamento.'.format(18 - idade ))\n print(' O alistamento será em {}.'.format(nasc + 18))\nelif idade > 18:\n print(' O alistamento deveria ter acontecido a {} anos.'.format(idade - 18))\n print(' O ano de alistamento foi em {}.'.format(nasc + 18))\nelse:\n print(' O alistamento deve ocorrer IMEDIATAMENTE!')\n",
"id": "8372243",
"language": "Python",
"matching_score": 2.0765466690063477,
"max_stars_count": 0,
"path": "ex039 # alistamento data atual if elif else -=- .py"
},
{
"content": "# ano bisexto -=-\n\nfrom datetime import date\nano = int(input('Qual o ano quer testar? (Se osar 0 mostrará o ano atual) '))\nif ano == 0:\n ano =date.today().year\nif ano % 4 == 0 and ano % 100 != 0 or ano % 400 ==0:\n print('O ano {} é BISEXTO'.format(ano))\nelse:\n print('O ano {} não é BISEXTO'.format(ano))\n",
"id": "11044034",
"language": "Python",
"matching_score": 2.006589412689209,
"max_stars_count": 0,
"path": "ex032 # ano bisexto -=-.py"
}
] | 1.059441 |
rac2030 | [
{
"content": "gewicht = int(input(\"Wie schwer bist du? \"))\ngroesse = int(input(\"Wie gross bist du? \"))\nBMI = gewicht / groesse**2\nprint(\"Dein BMI ist: {0}\".format(BMI))\n",
"id": "8857852",
"language": "Python",
"matching_score": 0,
"max_stars_count": 10,
"path": "micropython-workshop/testproject/BMI.py"
},
{
"content": "__author__ = 'F446178'\nimport json\n\ndef createJsonFromNewsFeed(filePath):\n newsJsonArray = {}\n inputFile = open(filePath, encoding=\"UTF-8\")\n newsFile = inputFile.read()\n rawNewsRecords = newsFile.split(\"\\n\\n\")\n ctr = 0\n for rawNews in rawNewsRecords:\n rawNewsAttributes = rawNews.split(\"\\n\")\n news = {}\n for attributes in rawNewsAttributes:\n if attributes.startswith('#### seqNo: '):\n news[\"seqNo\"] = attributes.split('seqNo: ')[1]\n elif attributes.startswith(\"#### storyId: \"):\n news[\"storyId\"] = attributes.split(\"#### storyId: \")[1]\n if attributes.startswith(\"#### date: \"):\n news[\"date\"] = attributes.split(\"#### date: \")[1]\n elif attributes.startswith(\"#### headLine: \"):\n news[\"headLine\"] = attributes.split(\"#### headLine: \")[1]\n if attributes.startswith(\"#### topics: \"):\n news[\"topics\"] = attributes.split(\"#### topics: \")[1]\n elif attributes.startswith(\"#### topicsDescription: \"):\n topicsDescription = attributes.split(\"#### topicsDescription: \")[1]\n topicsDescription = topicsDescription.replace('\"', '')\n existingKeywordArray = topicsDescription.split(\",\")\n for keyword in existingKeywordArray:\n print(keyword)\n if keyword.startswith(\" \"):\n keyword = keyword[1:]\n print(keyword)\n news[\"topicsDescription\"] = existingKeywordArray\n elif attributes.startswith(\"#### story: \"):\n news[\"story\"] = attributes.split(\"#### story: \")[1]\n\n newsJsonArray[ctr] = news\n ctr = ctr +1;\n print(newsJsonArray)\n mystring = str(newsJsonArray)\n\n print(newsJsonArray)\n\n jsonString = json.dumps(newsJsonArray)\n jsonObject = json.loads(jsonString)\n return jsonObject\n\n#test the method\njson1 = createJsonFromNewsFeed(\"News300.txt\")\nprint(json1['200']['story'])\n#j = json.loads(news)\n",
"id": "4527496",
"language": "Python",
"matching_score": 1.7710593938827515,
"max_stars_count": 0,
"path": "bin/JsonCreator.py"
},
{
"content": "\noriginalList = [\"Reuters News Topics\", \"Reuters News Topics;News Topics\", \"Reuters News Topics;News Topics;General News Topics\"]\nnewsList = []\n\nstr1 = \"Reuters News Topics;News Topics\"\nstr2 = \"Reuters News Topics;News Topics;NewNode\"\n\n\nfor line in originalList:\n if(line is str1):\n newsList.append(line)\n newsList.append(str2)\n else:\n newsList.append(line)\n\nprint(newsList)\n",
"id": "3260711",
"language": "Python",
"matching_score": 0.7955470681190491,
"max_stars_count": 0,
"path": "bin/MergeList.py"
},
{
"content": "\ndelimeter = \";\"\nlines = []\nmaxLenth = 0;\nwith open('new300.csv') as f:\n for line in f:\n lines.append(line)\n arr = line.split(\";\")\n if(len(arr) > maxLenth):\n maxLenth = len(arr)\nprint(lines)\nprint(maxLenth)\nfout = open(\"Tn+1.csv\",encoding='utf8',mode='w')\nnewLine = \"\"\nfor line in lines:\n newLine = \"\"\n arr = line.split(delimeter)\n node = arr[len(arr) - 1]\n if(node.split(\"\\n\")):\n node = node.split(\"\\n\")[0]\n if(len(arr) - 1 > 0):\n for ctr in range(0, len(arr) - 1):\n newLine = newLine + delimeter\n newLine = newLine + node\n for ctr in range(len(arr) - 1, maxLenth):\n newLine = newLine + delimeter\n fout.write(newLine + '\\n')\n\nfout.close()\n",
"id": "4453210",
"language": "Python",
"matching_score": 1.1516752243041992,
"max_stars_count": 0,
"path": "bin/ConvertCSVToN+1CSV.py"
},
{
"content": "\nn0 = ''\nn1 = ''\nn2 = ''\nn3 = ''\nn4 = ''\nn5 = ''\nn6 = ''\ndelimeter = \";\"\n\ndef find_between( s, first, last ):\n try:\n start = s.index( first ) + len( first )\n end = s.index( last, start )\n return s[start:end]\n except ValueError:\n return \"\"\n\nfout = open(\"new300.csv\",encoding='utf8',mode='w')\n\nwith open('T0.CSV') as f:\n for line in f:\n newline = '@'\n if line.startswith(';;;;;;'):\n n6 = find_between(line, \";;;;;;\", \";\")\n newline = n0 + delimeter + n1 + delimeter + n2 + delimeter + n3 + delimeter + n4 + delimeter + n5 + delimeter + n6\n print(newline)\n elif line.startswith(';;;;;'):\n n5 = find_between(line, \";;;;;\", \";;\")\n newline = n0 + delimeter + n1 + delimeter + n2 + delimeter + n3 + delimeter + n4 + delimeter + n5\n print(newline)\n elif line.startswith(';;;;'):\n n4 = find_between(line, \";;;;\", \";;;\")\n newline = n0 + delimeter + n1 + delimeter + n2 + delimeter + n3 + delimeter + n4\n print(newline)\n elif line.startswith(';;;'):\n n3 = find_between(line, \";;;\", \";;;;\")\n newline = n0 + delimeter + n1 + delimeter + n2 + delimeter + n3\n print(newline)\n elif line.startswith(';;'):\n n2 = find_between(line, \";;\", \";;;;;\")\n newline = n0 + delimeter + n1 + delimeter + n2\n print(newline)\n elif line.startswith(';'):\n n1 = find_between(line, \";\", \";;;;;;\")\n newline = n0 + delimeter + n1\n print(newline)\n else:\n n0 = find_between(line, \"\", \";;;;;;;\")\n newline = n0\n if not newline.startswith('@'):\n fout.write(newline + '\\n')\nfout.close()\n",
"id": "7804002",
"language": "Python",
"matching_score": 1.8640087842941284,
"max_stars_count": 0,
"path": "bin/ConvertRawCVS.py"
},
{
"content": "import numpy as np\n\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\nlist = [\"India\"]\n\nwith open('new300.csv') as f:\n for line in f:\n list.append(line)\n\nvect = TfidfVectorizer(min_df=1)\n\ntfidf = vect.fit_transform(list)\nprint(tfidf.toarray())\nprint ((tfidf * tfidf.T).A)\n\nprint(vect.get_feature_names())\nfout = open(\"vector-matrix.txt\",encoding='utf8',mode='w') \nfout.write(tfidf.toarray())\nfout.close()\n\n",
"id": "5991106",
"language": "Python",
"matching_score": 0.17137539386749268,
"max_stars_count": 0,
"path": "bin/Vectorizer.py"
},
{
"content": "import json\ninputfile = 'basedata/News300.new.txt'\noutputfile = 'basedata/News300.new.json'\nfin = open(inputfile,encoding='utf8')\nfout = open(outputfile,encoding='utf8',mode='w')\n\ninstream = fin.read()\n\nrawNewsRecords = instream.split('\\n\\n')\n\nrecords = []\nfor rawNewsRecord in rawNewsRecords:\n newsAttributes = rawNewsRecord.split('\\n')\n newdict = {}\n for newsAttribute in newsAttributes:\n try:\n elements = newsAttribute.split(':', 1)\n elements[0] = elements[0].replace('#### ', '')\n elements[1] = elements[1].replace('\"', '')\n splited_values = elements[1].split(',')\n\n if elements[0] in ['seqNo', 'storyId', 'headLine', 'story']:\n newdict[elements[0]] = elements[1]\n else:\n newdict[elements[0]] = splited_values\n except Exception as inst:\n print(inst)\n records.append(newdict)\n\ndata = {}\ndata['data']=records\n\nfout.write(json.dumps(data, indent=2))\nfout.close()\n",
"id": "3030351",
"language": "Python",
"matching_score": 4.138520240783691,
"max_stars_count": 0,
"path": "bin/newsfeed_to_json.py"
},
{
"content": "import urllib.request\nimport requests\nimport json\nfrom watson_developer_cloud import NaturalLanguageUnderstandingV1, WatsonApiException\nimport watson_developer_cloud.natural_language_understanding.features.v1 as Features\nfrom watson_developer_cloud.natural_language_understanding_v1 import Features, EntitiesOptions, KeywordsOptions, \\\n CategoriesOptions, ConceptsOptions, RelationsOptions\n\n#inputurl = \"https://hackathon17.mope.ml/HackathonSite/AccuracyTestNews300.txt\"\n#outputfile = 'AccuracyTestNews300.json'\ninputurl = \"https://hackathon17.mope.ml/HackathonSite/NewsAll.txt\"\noutputfile = 'NewsAllEnhanced.json'\n\nnews300 = urllib.request.urlopen(inputurl)\nnews300Bytes = news300.read()\nnews300String = news300Bytes.decode(\"utf8\")\nnews300.close()\n\nrawNewsRecords = news300String.split('\\r\\n\\r\\n')\n\nrecords = []\nfor rawNewsRecord in rawNewsRecords:\n newsAttributes = rawNewsRecord.split('\\r\\n')\n newdict = {}\n for newsAttribute in newsAttributes:\n try:\n if not newsAttributes:\n break\n elements = newsAttribute.split(':', 1)\n if len(elements) < 2:\n break\n elements[0] = elements[0].replace('#### ', '')\n elements[1] = elements[1].replace('\"', '')\n splited_values = elements[1].split(',')\n\n if elements[0] in ['seqNo', 'storyId', 'headLine', 'story']:\n newdict[elements[0]] = elements[1]\n else:\n newdict[elements[0]] = splited_values\n except Exception as inst:\n print(inst)\n records.append(newdict)\n\ndata = {}\ndata['data'] = records\n\nnlu = NaturalLanguageUnderstandingV1(\n username=\"b6e37f9b-f1be-4fa9-9915-80e0333af98b\",\n password=\"<PASSWORD>mU\",\n version=\"2017-02-27\")\n\n\ndef queryWatson(headline, story):\n return nlu.analyze(\n text=headline + \"; \" + story,\n language=\"en\",\n features=Features(entities=EntitiesOptions(\n emotion=False, sentiment=False, limit=60),\n keywords=KeywordsOptions(\n emotion=False, sentiment=False, limit=60),\n categories=CategoriesOptions(limit=60),\n concepts=ConceptsOptions(limit=50)\n )\n )\n\ntry:\n for entry in data['data']:\n try:\n result = queryWatson(entry['headLine'], entry['headLine'])\n # enhance the entry\n entry['keywords'] = result['keywords']\n entry['entities'] = result['entities']\n entry['concepts'] = result['concepts']\n entry['categories'] = result['categories']\n except KeyError:\n print(\"KeyError with: \" + json.dumps(result, indent=2))\n except WatsonApiException:\n print(\"API Exception: \" + json.dumps(entry, indent=2))\n else:\n print(json.dumps(entry, indent=2))\nfinally:\n print(\"-------------- End result written to \" + outputfile + \" --------------\")\n fout = open(outputfile,encoding='utf8',mode='w')\n fout.write(json.dumps(data, indent=2))\n fout.close()\n",
"id": "11292976",
"language": "Python",
"matching_score": 0.6426727771759033,
"max_stars_count": 0,
"path": "bin/fetchAndEnhance.py"
},
{
"content": "from microbit import *\nimport radio\nimport random\nimport music\n\n# update this user number or enable badge for staff\n# user = 'U1234'\nuser = 'STAFF'\n\n# Create the \"flash\" animation frames. Can you work out how it's done?\nflash = [Image().invert()*(i/9) for i in range(7, -1, -1)]\nmust_send = random.randint(60000, 180000) # forced flash at random delay\ncnt_must_send = 0\n# cnt_msg_send = 0\n\n\ndef display_msg(x, msg):\n# cnt_msg_send = 0\n while True:\n buzzer()\n display.scroll(x, delay=100, wait=True, loop=False)\n# if cnt_msg_send != 5:\n# radio.send_bytes(msg)\n# cnt_msg_send = cnt_msg_send + 1\n if button_b.was_pressed():\n msg = ''\n msg_u = ''\n break\n\n\ndef buzzer():\n for freq in range(880, 1760, 16):\n music.pitch(freq, 4)\n for freq in range(1760, 880, -16):\n music.pitch(freq, 4)\n\n\n# start here\nradio.on()\nradio.config(channel=43, queue=10, length=128, power=7, data_rate=radio.RATE_2MBIT)\n\n\nwhile True:\n cnt_must_send = cnt_must_send + 1\n if must_send == cnt_must_send:\n radio.send_bytes('flash')\n cnt_must_send = 0\n must_send = random.randint(60000, 180000) # forced flash at random delay\n # Button A sends a \"flash\" message.\n if button_a.was_pressed(): #\n radio.send_bytes('BGDG Zürich was here')\n display.show(Image.DUCK)\n cnt_must_send = 0\n display.set_pixel(2, 1, 5)\n sleep(100)\n display.set_pixel(2, 1, 0)\n if button_b.was_pressed(): #\n radio.send_bytes('BMake a break and have a kitkat')\n display.show(Image.GHOST)\n cnt_must_send = 0\n display.set_pixel(2, 1, 5)\n sleep(100)\n display.set_pixel(2, 1, 0)\n if accelerometer.was_gesture('shake'):\n radio.send_bytes('BShake your body and dance around')\n display.show(Image.PACMAN)\n cnt_must_send = 0\n display.set_pixel(2, 1, 5)\n sleep(100)\n display.set_pixel(2, 1, 0)\n msg = radio.receive_bytes()\n if not msg:\n display.set_pixel(2, 2, 3)\n else:\n display.set_pixel(2, 3, 5)\n sleep(100)\n display.set_pixel(2, 3, 0)\n msg_u = str(msg, 'utf8')\n msg_u_len = 0\n msg_u_len = len(msg_u)\n if msg_u == 'flash':\n sleep(random.randint(50, 550))\n display.show(flash, delay=100, wait=False)\n cnt_must_send = 0\n # Randomly re-broadcast the flash message after a\n # slight delay.\n if random.randint(0, 9) == 0:\n sleep(500)\n radio.send('BMakeZurich has been pawned') # a-ha\n elif msg_u[0] == 'B': # broadcast\n msg_u = msg_u[1:msg_u_len]\n display_msg(msg_u, msg)\n elif msg_u[0:5] == user: # User or Staff addressed\n msg_u = msg_u[5:msg_u_len]\n display_msg(msg_u, msg)\n # display.scroll(msg_u, delay=100, wait=False, loop=False) # for debugging",
"id": "12594234",
"language": "Python",
"matching_score": 1.4435151815414429,
"max_stars_count": 10,
"path": "MZ-badge-fun/MZ-rac-badge.py"
},
{
"content": "import serial\nimport time\nimport datetime\n\npromini = '/dev/cu.SLAB_USBtoUART'\nser = serial.Serial(promini, 9600)\n \nrepeatTime = 1000 # milliseconds\n\ndef writeData(value):\n # Get the current data\n today = datetime.date.today() \n \n # Open log file 2012-6-23.log and append\n with open(str(today)+'.log', 'ab') as f: \n \n f.write(value) \n # Write our integer value to our log\n \n f.write('\\n') \n # Add a newline so we can retrieve the data easily later, could use spaces too.\n\ntimer = time.time() # Timer to see when we started\nwhile True:\n time.sleep(0.25) # sleep for 250 milliseconds\n if time.time() - timer > repeatTime:\n # If the current time is greater than the repeat time, send our 'get' command again \n serial.write(\"t\\n\")\n timer = time.time() # start the timer over\n \n if ser.inWaiting() > 2: # if we have \\r\\n in there, we will have two characters, we want more!\n value = serial.read() # Read the data from the \n value = value.strip('\\r\\n') # Arduino will return DATA\\r\\n when println is used \n \n try:\n value = int(value) #This is likely where you will get 'duff' data and catch it.\n writeData(value) # Write the data to a log file\n \n except:\n serial.write(\"t\\n\") # Try it again!",
"id": "2060779",
"language": "Python",
"matching_score": 0.8474031686782837,
"max_stars_count": 10,
"path": "logs/arduinoDataWriter.py"
},
{
"content": "from network import LoRa\nimport socket\nimport binascii\nimport struct\n\n# Initialize LoRa in LORAWAN mode.\nlora = LoRa(mode=LoRa.LORAWAN)\n\n# create an ABP authentication params\ndev_addr = struct.unpack(\">l\", binascii.unhexlify('26011279'))[0]\nnwk_swkey = binascii.unhexlify('1E7EF194F81B1F250E995281DFE3E189')\napp_swkey = binascii.unhexlify('47DA18DCE05337803D9AEF2DD04CEABE')\n\n# join a network using ABP (Activation By Personalization)\nlora.join(activation=LoRa.ABP, auth=(dev_addr, nwk_swkey, app_swkey))\n\n# create a LoRa socket\ns = socket.socket(socket.AF_LORA, socket.SOCK_RAW)\n\n# set the LoRaWAN data rate\ns.setsockopt(socket.SOL_LORA, socket.SO_DR, 5)\n\n# make the socket blocking\n# (waits for the data to be sent and for the 2 receive windows to expire)\ns.setblocking(True)\n\n# send some data\ns.send(bytes([0x01, 0x02, 0x03]))\n\n# make the socket non-blocking\n# (because if there's no data received it will block forever...)\ns.setblocking(False)\n\n# get any data received (if any...)\ndata = s.recv(64)\nprint(data)\n",
"id": "10931256",
"language": "Python",
"matching_score": 2.349569320678711,
"max_stars_count": 10,
"path": "micropython-workshop/testproject/ttn.py"
},
{
"content": "from network import LoRa\nimport binascii\n\n\n# Initialize LoRa in LORAWAN mode.\nlora = LoRa(mode=LoRa.LORAWAN)\ndevEUI = binascii.hexlify(lora.mac())\n\n# show DevEUI\nprint(\"LoPy DevEUI; \", devEUI)\n",
"id": "10701798",
"language": "Python",
"matching_score": 0.8015830516815186,
"max_stars_count": 10,
"path": "micropython-workshop/testproject/show_devEUI.py"
},
{
"content": "import network\nimport time\n\n\nwlan = network.WLAN(mode=network.WLAN.STA)\nfor n in wlan.scan():\n print(n.ssid)\n\nssid = input(\"ssid: \")\npassword = input(\"passwort: \")\nwlan.connect(ssid, auth=(network.WLAN.WPA2, password))\nwhile not wlan.isconnected():\n time.sleep_ms(50)\nprint(wlan.ifconfig())\n",
"id": "10525599",
"language": "Python",
"matching_score": 0.9275200366973877,
"max_stars_count": 10,
"path": "micropython-workshop/testproject/wlan.py"
},
{
"content": "import pycom\nimport time\npycom.heartbeat(False)\n\nfor cycles in range(50): # stop after 10 cycles\n pycom.rgbled(0x7f0000) # red\n time.sleep(1)\n pycom.rgbled(0x0040ff) # blau\n time.sleep(1)\n \n \n",
"id": "11517693",
"language": "Python",
"matching_score": 0.3001859486103058,
"max_stars_count": 10,
"path": "micropython-workshop/testproject/polizei.py"
}
] | 0.887462 |
zmellal | [
{
"content": "import boto3\nimport json\nimport os\nimport traceback\n\n# AWS SDK CLIENTS\ndynamo_client = boto3.client('dynamodb')\nsqs_client = boto3.client('sqs')\n\n# Function handler\ndef handler(event, context):\n next_scan_key = \"?\"\n while next_scan_key:\n # Scan sources table \n scan_request = {\n \"TableName\" : os.environ[\"DYNAMO_TABLE\"]\n }\n if next_scan_key != \"?\":\n request[\"ExclusiveStartKey\"] = next_scan_key\n scan_response = dynamo_client.scan(**scan_request)\n if scan_response[\"Count\"] != 0:\n # For each source found in table, send message to queue for processing\n for item in scan_response[\"Items\"]:\n try:\n message = {\n \"source\": item[\"source\"][\"S\"]\n }\n if item.get(\"httpHeaderOverrides\"):\n message[\"headers\"] = { key: value[\"S\"] for key, value in item[\"httpHeaderOverrides\"][\"M\"].items() }\n sqs_client.send_message(\n QueueUrl=os.environ[\"QUEUE_URL\"],\n MessageBody=json.dumps(message)\n )\n except:\n traceback.print_exc() \n next_scan_key = scan_response[\"LastEvaluatedKey\"] if \"LastEvaluatedKey\" in scan_response else None",
"id": "11521232",
"language": "Python",
"matching_score": 0,
"max_stars_count": 0,
"path": "lambdas/list-sources/index.py"
}
] | 0 |
Barlog951 | [
{
"content": "from blockapi.api.binance import *\nfrom blockapi.api.blockchaininfo import *\nfrom blockapi.api.blockchainos import *\nfrom blockapi.api.blockchair import *\nfrom blockapi.api.blockcypher import *\nfrom blockapi.api.blockonomics import *\nfrom blockapi.api.blockscout import *\nfrom blockapi.api.btc import *\nfrom blockapi.api.cardanoexplorer import *\nfrom blockapi.api.chainso import *\nfrom blockapi.api.cosmos import *\nfrom blockapi.api.cryptoid import *\nfrom blockapi.api.dcrdata import *\nfrom blockapi.api.digonchain import *\nfrom blockapi.api.eospark import *\nfrom blockapi.api.etherscan import *\nfrom blockapi.api.ethplorer import *\nfrom blockapi.api.greymass import *\nfrom blockapi.api.insight import *\nfrom blockapi.api.neoscan import *\nfrom blockapi.api.ontology import *\nfrom blockapi.api.stellar import *\nfrom blockapi.api.trezor import *\nfrom blockapi.api.tronscan import *\nfrom blockapi.api.tzscan import *\nfrom blockapi.api.zchain import *\nfrom blockapi.api.zensystem import *\n",
"id": "8193299",
"language": "Python",
"matching_score": 0.12299490720033646,
"max_stars_count": 0,
"path": "blockapi/api/__init__.py"
},
{
"content": "from web3 import Web3\nimport cfscrape\nfrom bs4 import BeautifulSoup\nimport re\nfrom blockapi.api import EtherscanAPI\nfrom ethereum_input_decoder import AbiMethod\n\n\nclass Ethereum:\n def __init__(self, node_url):\n self.node_url = node_url\n self.web3 = Web3(Web3.HTTPProvider(self.node_url))\n self.abi = None\n\n def load_abi(self, contract):\n myapi = EtherscanAPI(contract)\n self.abi = myapi.get_abi(contract)['result']\n\n def to_checksum_addr(self, address):\n return self.web3.toChecksumAddress(address)\n\n def get_contract(self, contract):\n self.load_abi(contract)\n return self.web3.eth.contract(address=Web3.toChecksumAddress(\n contract), abi=self.abi)\n\n def get_tx_by_hash(self, txhash):\n tx = self.web3.eth.getTransaction(txhash)\n return tx\n\n def get_function_by_inputdata(self, tx_input):\n tx_input_decoded = AbiMethod.from_input_lookup(\n bytes.fromhex(tx_input[2:]))\n tx_input_values = list(tx_input_decoded.values())\n tx_function = tx_input_values[0]\n return tx_function\n\n\nclass Infura(Ethereum):\n def __init__(self, network, api_key):\n self.network = network\n self.api_prefix = network if network != \"mainnet\" else \"api\"\n self.api_key = api_key\n self.infura_url = 'https://{}.infura.io/v3/{}'.format(self.network,\n self.api_key)\n super().__init__(self.infura_url)\n\n\nclass ERC20Token:\n def __init__(self):\n self.url = 'https://etherscan.io/tokens?p={}'\n self.page = 0\n self.reqobj = cfscrape.create_scraper()\n self.tokens = {}\n\n def get_token_list(self):\n \"\"\"\n Scrapes the ERC20 token list from etherscan.io\n\n :return: dictionary containing currency_name, contract_addres,\n price, change, volume, market_cap, holders - for each token\n :rtype: dict\n \"\"\"\n scrape_result = True\n result_msg = ''\n page = 0\n\n while True:\n status_code, rows = self._get_table_rows(page)\n if status_code != 200:\n scrape_result = False\n result_msg = 'error {} on page {}'.format(status_code,\n page)\n break\n\n if rows is None:\n break\n\n for row in rows:\n result = self._parse_table_row(row)\n self.tokens[result[0]] = \\\n {'currency_name': result[1],\n 'contract_address': result[2],\n 'price': float(result[3]),\n 'change': result[4],\n 'volume': result[5],\n 'market_cap': result[6],\n 'holders': result[7]}\n\n page += 1\n\n return {'result': scrape_result, 'result_msg': result_msg,\n 'tokens': self.tokens}\n\n def _get_table_rows(self, page):\n erc20_url = self.url.format(page)\n result = self.reqobj.get(erc20_url)\n\n if result.status_code == 200:\n soup = BeautifulSoup(result.text, \"lxml\")\n table = soup.body.find(\"table\", {\"id\": \"tblResult\"})\n\n if table is None:\n return result.status_code, None\n\n table_body = table.find_all('tbody')[0]\n table_rows = table_body.find_all('tr')\n\n return result.status_code, table_rows\n else:\n return result.status_code, None\n\n def _parse_table_row(self, row):\n row_ahref = row.find_all('a')[0]\n cointext = row_ahref.text\n\n coin_sc = row_ahref['href'][7:]\n currency_symbol = self._get_currency_symbol(cointext)\n currency_name = self._get_currency_name(cointext)\n\n row_tds = row.find_all('td')\n row_tds[2].find('div').decompose()\n\n currency_price = ERC20Token._get_number(row_tds[2].text,\n float)\n currency_change = ERC20Token._get_number(row_tds[3].text,\n float)\n currency_volume = ERC20Token._get_number(row_tds[4].text, int)\n market_cap = ERC20Token._get_number(row_tds[5].text, int)\n holders = ERC20Token._get_number(row_tds[6].text, int)\n\n return (currency_symbol, currency_name, coin_sc, currency_price,\n currency_change, currency_volume, market_cap, holders)\n\n def _get_currency_symbol(self, cointext):\n try:\n currency_symbol = re.search('\\([A-Za-z0-9]+\\)',\n cointext).group(0)[1:-1]\n except AttributeError:\n currency_symbol = cointext\n\n return currency_symbol\n\n def _get_currency_name(self, cointext):\n currency_name = re.sub('\\([A-Za-z0-9]+\\)', '',\n cointext).strip()\n return currency_name\n\n @staticmethod\n def _get_number(num_string, rtype):\n \"\"\"\n converts string in currency format $1234,56.78 or 1234.56 % into\n a user-defined type (usually float or int)\n\n :param num_string: string in US currency format / percentage\n :type num_string: string\n :param rtype: class to convert the resulting number to\n :type rtype: class\n :return: number converted from the string to the desired type\n \"\"\"\n try:\n result = rtype(\n re.sub('[$,%]', '', num_string))\n except ValueError:\n result = None\n\n return result\n",
"id": "4313085",
"language": "Python",
"matching_score": 3.4684290885925293,
"max_stars_count": 0,
"path": "blockapi/utils/ethereum.py"
},
{
"content": "import setuptools\nfrom setuptools import find_packages\n\nwith open(\"README.md\", \"r\") as f:\n long_description = f.read()\n\nPACKAGES = find_packages(where='.')\n\nsetuptools.setup(\n name='blockapi',\n version='0.0.44',\n author='<NAME>.',\n description='BlockAPI library',\n license='MIT',\n long_description=long_description,\n long_description_content_type='text/markdown',\n packages=PACKAGES,\n install_requires=[\n 'requests>=2.22.0', \n 'pytz>=2019.2', \n 'python-dateutil>=2.8.0',\n 'coinaddrng==1.0.25',\n 'cfscrape>=2.0.8',\n 'gevent>=1.4.0',\n 'ethereum_input_decoder>=0.2.2',\n 'web3>=5.2.2',\n 'bs4>=0.0.1',\n 'lxml>=4.4.1'\n ],\n)\n\n",
"id": "7846672",
"language": "Python",
"matching_score": 2.1192870140075684,
"max_stars_count": 0,
"path": "setup.py"
}
] | 2.119287 |