{ "cells": [ { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [], "source": [ "%%capture\n", "!pip install -r requirements.txt" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "# Tubemaps" ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [], "source": [ "london_stations = pd.read_csv(\"../dataset/tubemaps/london.stations.csv\")\n", "london_connections = pd.read_csv(\"../dataset/tubemaps/london.connections.csv\")\n", "london_lines = pd.read_csv(\"../dataset/tubemaps/london.lines.csv\")" ] }, { "cell_type": "code", "execution_count": 8, "metadata": {}, "outputs": [], "source": [ "# Map Bounds to limit stations of interest\n", "lat_max = 51.5345\n", "lat_min = 51.4860\n", "long_max = -0.0725\n", "long_min = -0.1527\n", "\n", "london_stations_filtered = london_stations[(london_stations['latitude'] <= lat_max) & \n", " (london_stations['latitude'] >= lat_min) & \n", " (london_stations['longitude'] <= long_max) & \n", " (london_stations['longitude'] >= long_min)]\n", "london_stations_filtered = london_stations_filtered.drop(columns=['Unnamed: 0']).drop_duplicates()\n", "london_stations_filtered.to_csv(\"dataset/stations.csv\", index=False)" ] }, { "cell_type": "code", "execution_count": 10, "metadata": {}, "outputs": [ { "data": { "text/html": [ "
\n", "\n", "\n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", " \n", "
station1station2linetime
2498711
34919712
68414813
78727912
1614827911
...............
398107273112
400192277112
401198272111
402198273113
40513279124
\n", "

67 rows × 4 columns

\n", "
" ], "text/plain": [ " station1 station2 line time\n", "2 49 87 1 1\n", "3 49 197 1 2\n", "6 84 148 1 3\n", "7 87 279 1 2\n", "16 148 279 1 1\n", ".. ... ... ... ...\n", "398 107 273 11 2\n", "400 192 277 11 2\n", "401 198 272 11 1\n", "402 198 273 11 3\n", "405 13 279 12 4\n", "\n", "[67 rows x 4 columns]" ] }, "execution_count": 10, "metadata": {}, "output_type": "execute_result" } ], "source": [ "london_connections_filtered = london_connections[(london_connections['station1'].isin(london_stations_filtered['id'])) & (london_connections['station2'].isin(london_stations_filtered['id']))]\n", "london_connections_filtered = london_connections_filtered.drop_duplicates(subset=['station1','station2'])\n", "london_connections_filtered.to_csv(\"dataset/connections.csv\", index=False)" ] }, { "cell_type": "code", "execution_count": 68, "metadata": {}, "outputs": [], "source": [ "london_lines.to_csv(\"dataset/lines.csv\", index=False)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "# DB Creation" ] }, { "cell_type": "code", "execution_count": 5, "metadata": {}, "outputs": [], "source": [ "import pandas as pd\n", "london_stations = pd.read_csv(\"dataset/stations.csv\")\n", "london_connections = pd.read_csv(\"dataset/connections.csv\")\n", "london_lines = pd.read_csv(\"dataset/lines.csv\")\n", "\n", "facts = \"\"\n", "\n", "def remove_spl_chars(text):\n", " return text.lower().replace(\" \",\"_\").replace(\"'\",\"\").replace(\"(\",\"_\").replace(\")\",\"_\").replace(\"&\",\"and\").replace(\".\",\"\").replace(\",\",\"\")\n", "\n", "# ETA\n", "for station1, station2, line, time in london_connections.values.tolist(): \n", " station1_name = remove_spl_chars(london_stations[london_stations['id']==station1]['name'].values[0])\n", " station2_name = remove_spl_chars(london_stations[london_stations['id']==station2]['name'].values[0])\n", " facts += f\"eta({station1_name},{station2_name},{time}).\\n\"\n", " facts += f\"eta({station2_name},{station1_name},{time}).\\n\"\n", "# facts += \"eta(Node, NextNode, Time):- !,eta(NextNode, Node, Time).\\n\"\n", "for id, lat, long, name, display_name, zone, tot_line, rail, prolog_name in london_stations.values.tolist(): \n", " facts += f\"location({prolog_name},{lat},{long}).\\n\"\n", "\n", "text_file = open(\"src/facts.pl\", \"w\")\n", "n = text_file.write(facts)\n", "text_file.close()\n" ] }, { "cell_type": "code", "execution_count": 6, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "True\n", "True\n", "Path: temple - - to - - cannon_street\n", "[{'Path': ['temple', 'blackfriars', 'mansion_house', 'cannon_street']}]\n" ] } ], "source": [ "from swiplserver import PrologMQI\n", "\n", "with PrologMQI() as mqi:\n", " with mqi.create_thread() as prolog_thread:\n", " result = prolog_thread.query(\"consult(\\\"./src/facts.pl\\\").\")\n", " print(result)\n", " result = prolog_thread.query(\"consult(\\\"./src/search_algorithms.pl\\\").\")\n", " print(result)\n", " origin = remove_spl_chars(\"Temple\")\n", " destination = remove_spl_chars(\"Cannon Street\") \n", " print(f\"Path: {origin} - - to - - {destination}\")\n", " query = f\"breadth_first_search({origin}, {destination}, Path).\"\n", " result = prolog_thread.query_async(query, find_all=False)\n", " result = prolog_thread.query_async_result()\n", " print(result)" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.9.0" }, "orig_nbformat": 4 }, "nbformat": 4, "nbformat_minor": 2 }