{
  "nbformat": 4,
  "nbformat_minor": 0,
  "metadata": {
    "colab": {
      "name": "Get_ICESat-2_Data_Online.ipynb",
      "provenance": [],
      "collapsed_sections": [],
      "toc_visible": true,
      "mount_file_id": "1HemVBpC6Rmm9WWn0YPP-KHFH5zdiOCRE",
      "authorship_tag": "ABX9TyPzEPXoj8gSf0LTWCYReSt8",
      "include_colab_link": true
    },
    "kernelspec": {
      "name": "python3",
      "display_name": "Python 3"
    }
  },
  "cells": [
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "view-in-github",
        "colab_type": "text"
      },
      "source": [
        "<a href=\"https://colab.research.google.com/github/kyle1990kauffman/ICESAT/blob/master/Get_ICESat_2_Data_Online.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "31DP4dSxl3SG",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "import pandas as pd\n",
        "import json\n",
        "import requests\n",
        "import plotly.graph_objects as go\n",
        "from plotly.offline import iplot\n",
        "import numpy as np\n",
        "import pickle\n",
        "from datetime import timedelta, date\n",
        "from google.colab import files"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "D_gjU3PNpVYh",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "def daterange(start_date, end_date):\n",
        "    for n in range(int ((end_date - start_date).days)):\n",
        "        yield start_date + timedelta(n)\n",
        "\n",
        "DATE = []\n",
        "start_date = date(2018, 11, 1)\n",
        "end_date = date(2018, 12, 1)\n",
        "\n",
        "for single_date in daterange(start_date, end_date):\n",
        "    # print(single_date.strftime(\"%Y-%m-%d\"))\n",
        "    DATE.append(single_date.strftime(\"%Y-%m-%d\"))"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "YaxgG57yl5_m",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "Position = [47.45681762695317, 33.116912841796854, \n",
        "            47.59826660156254, 33.229522705078104]  # [minX, minY, maxX, maxY]\n",
        "\n",
        "date = '2019-09-06'  # [start, end]\n",
        "\n",
        "trackID = 1081   # 1 - 1387\n",
        "\n",
        "# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n",
        "# Paste the OpenAltimetry API URL for Photon here: \n",
        "URL_1 = 'https://openaltimetry.org/data/api/icesat2/'\n",
        "URL_2 = 'atl03' + '?&'   # 'atl08'\n",
        "URL_3 = 'minx=' + str(Position[0]) + '&' + 'miny=' + str(Position[1]) + '&' + \\\n",
        "        'maxx=' + str(Position[2]) + '&' + 'maxy=' + str(Position[3]) + '&'\n",
        "URL_4 = 'date=' + date\n",
        "URL_5 = '&trackId=' + str(trackID)\n",
        "URL_6 = '&beamName=gt3r&beamName=gt3l&beamName=gt2r&beamName=gt2l&beamName=gt1r&beamName=gt1l'\n",
        "\n",
        "URL = URL_1 + URL_2 + URL_3 + URL_4 + URL_5 + URL_6\n",
        "# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n",
        "\n",
        "# OA_API_URL = 'https://openaltimetry.org/data/api/icesat2/atl03?&minx=47.45681762695317&miny=33.116912841796854&maxx=47.59826660156254&maxy=33.229522705078104&date=2019-09-06&trackId=1081&beamName=gt3r&beamName=gt3l&beamName=gt2r&beamName=gt2l&beamName=gt1r&beamName=gt1l'\n",
        "OA_API_URL = URL\n",
        "\n",
        "# Select the list of confidence to display: 'Noise', 'Buffer', 'Low', 'Medium', 'High'\n",
        "OA_PHOTON_CONFIDENCE = ['Noise', 'Buffer', 'Low', 'Medium', 'High']"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "8HadqzpmlvPZ",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "df_beams = []\n",
        "oa_plots = []\n",
        "\n",
        "# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n",
        "# This function will request the 6 beams data using OpenAltimetry's API\n",
        "def getPhotonData():\n",
        "    series = []\n",
        "    b_url = OA_API_URL + '&client=jupyter'\n",
        "    print('Requesting data from OA')\n",
        "    r = requests.get(b_url)\n",
        "    data = r.json()\n",
        "    return data\n",
        "\n",
        "        \n",
        "photon_cloud = getPhotonData()\n",
        "\n",
        "# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n",
        "for beam in photon_cloud:\n",
        "    print('Reading data for beam: ' + beam['beam_name'])\n",
        "\n",
        "    for photons in beam['series']:\n",
        "        if any(word in photons['name'] for word in OA_PHOTON_CONFIDENCE):\n",
        "            series = []\n",
        "            for p in photons['data']:\n",
        "                series.append({\n",
        "                    'lat': p[0],\n",
        "                    'lon': p[1],\n",
        "                    'h': p[2],\n",
        "                    'conf': photons['name']\n",
        "                })\n",
        "            if (len(series) > 0):\n",
        "                df = pd.DataFrame.from_dict(series)\n",
        "                df.name = beam['beam_name'] + ' ' + photons['name']\n",
        "                df_beams.append(df)"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "widf0ochM-5a",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "# for i in range(len(df_beams)):\n",
        "#   print(df_beams[i].conf.unique())\n",
        "#   print(len(df_beams[i]))\n",
        "\n",
        "all = pd.concat(df_beams, axis=0, join='outer', ignore_index=False)\n",
        "print(all.head(5))"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "e8wt9ubZzqtS",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "DATA_PATH = '/content/Data/'\n",
        "name = URL_3 + URL_4 + URL_5 + '.csv'\n",
        "all.to_csv(DATA_PATH + name)\n",
        "files.download('/content/Data/' + name)"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "rP3cd_nAD1oU",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "from zipfile import ZipFile\n",
        "import os\n",
        "from os.path import basename\n",
        "\n",
        "dirName = '/content/drive/My Drive/Van Data/'\n",
        "# create a ZipFile object\n",
        "with ZipFile('/content/drive/My Drive/Van Data files.zip', 'w') as zipObj:\n",
        "  # Iterate over all the files in directory\n",
        "  for folderName, subfolders, filenames in os.walk(dirName):\n",
        "    for filename in filenames:\n",
        "      #create complete filepath of file in directory\n",
        "      filePath = os.path.join(folderName, filename)\n",
        "      # Add file to zip\n",
        "      zipObj.write(filePath, basename(filePath))"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "OyZ1E7JoVlRd",
        "colab_type": "text"
      },
      "source": [
        "# Practical section\n",
        "\n",
        "In this part of the code, I mixed all things. Now it's enough to specify the time-range, track IDs and location BBox to download everything.\n"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "Oi4ms1q1Rn7s",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "import pandas as pd\n",
        "import json\n",
        "import requests\n",
        "import plotly.graph_objects as go\n",
        "from plotly.offline import iplot\n",
        "import numpy as np\n",
        "import pickle\n",
        "from datetime import timedelta, date\n",
        "from google.colab import files\n",
        "\n",
        "\n",
        "\n",
        "# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n",
        "def daterange(start_date, end_date):\n",
        "    for n in range(int ((end_date - start_date).days)):\n",
        "        yield start_date + timedelta(n)\n",
        "\n",
        "DATE = []\n",
        "start_date = date(2019, 1, 1)\n",
        "end_date = date(2020, 6, 6)\n",
        "\n",
        "for single_date in daterange(start_date, end_date):\n",
        "    # print(single_date.strftime(\"%Y-%m-%d\"))\n",
        "    DATE.append(single_date.strftime(\"%Y-%m-%d\"))\n",
        "  \n",
        "\n",
        "\n",
        "# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n",
        "def getPhotonData():\n",
        "    series = []\n",
        "    b_url = OA_API_URL + '&client=jupyter'\n",
        "    # print('Requesting data from OA')\n",
        "    r = requests.get(b_url)\n",
        "    data = r.json()\n",
        "    return data\n",
        "\n",
        "\n",
        "\n",
        "# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n",
        "OA_PHOTON_CONFIDENCE = ['Noise', 'Buffer', 'Low', 'Medium', 'High']\n",
        "\n",
        "Position = [-.5375, 51.2175, 0.2864, 51.7311]\n",
        "\n",
        "tracks = [16,237,458,679,900,1182]\n",
        "\n",
        "counter = 0\n",
        "m = 0\n",
        "for date in DATE:\n",
        "  for trackID in tracks:    # range(1, 1388)\n",
        "    print(m)\n",
        "    m += 1\n",
        "\n",
        "    URL_1 = 'https://openaltimetry.org/data/api/icesat2/'\n",
        "    URL_2 = 'atl03' + '?&'   # 'atl08'\n",
        "    URL_3 = 'minx=' + str(Position[0]) + '&' + 'miny=' + str(Position[1]) + '&' + \\\n",
        "            'maxx=' + str(Position[2]) + '&' + 'maxy=' + str(Position[3]) + '&'\n",
        "    URL_4 = 'date=' + date\n",
        "    URL_5 = '&trackId=' + str(trackID)\n",
        "    URL_6 = '&beamName=gt3r&beamName=gt3l&beamName=gt2r&beamName=gt2l&beamName=gt1r&beamName=gt1l'\n",
        "\n",
        "    URL = URL_1 + URL_2 + URL_3 + URL_4 + URL_5 + URL_6\n",
        "    OA_API_URL = URL\n",
        "\n",
        "    df_beams = []\n",
        "    oa_plots = []\n",
        " \n",
        "    photon_cloud = getPhotonData()\n",
        "\n",
        "    # >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n",
        "    for beam in photon_cloud:\n",
        "        # print('Reading data for beam: ' + beam['beam_name'])\n",
        "\n",
        "        for photons in beam['series']:\n",
        "            if any(word in photons['name'] for word in OA_PHOTON_CONFIDENCE):\n",
        "                series = []\n",
        "                for p in photons['data']:\n",
        "                    series.append({\n",
        "                        'lat': p[0],\n",
        "                        'lon': p[1],\n",
        "                        'h': p[2],\n",
        "                        'conf': photons['name']\n",
        "                    })\n",
        "                if (len(series) > 0):\n",
        "                    df = pd.DataFrame.from_dict(series)\n",
        "                    df.name = beam['beam_name'] + ' ' + photons['name']\n",
        "                    df_beams.append(df)\n",
        "    \n",
        "    if len(df_beams) > 0:\n",
        "      print('Data is getting ready for download...')\n",
        "      print('Number of downloaded files:  ', counter)\n",
        "      counter += 1\n",
        "      \n",
        "      all = pd.concat(df_beams, axis=0, join='outer', ignore_index=False)\n",
        "    \n",
        "      DATA_PATH = '/content/drive/My Drive/'\n",
        "      name = URL_3 + URL_4 + URL_5 + '.csv'\n",
        "      all.to_csv(DATA_PATH + name)\n",
        "\n",
        "\n",
        "\n",
        "# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n",
        "from zipfile import ZipFile\n",
        "import os\n",
        "from os.path import basename\n",
        "\n",
        "dirName = '/content/drive/My Drive/London Data/'\n",
        "# create a ZipFile object\n",
        "with ZipFile('/content/drive/My Drive/London Data files.zip', 'w') as zipObj:\n",
        "  # Iterate over all the files in directory\n",
        "  for folderName, subfolders, filenames in os.walk(dirName):\n",
        "    for filename in filenames:\n",
        "      #create complete filepath of file in directory\n",
        "      filePath = os.path.join(folderName, filename)\n",
        "      # Add file to zip\n",
        "      zipObj.write(filePath, basename(filePath))\n"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "sKsZGkIu9TXe",
        "colab_type": "text"
      },
      "source": [
        "# NSIDC Data Download Script\n",
        "Tested in Python 2.7 and Python 3.4, 3.6, 3.7\n",
        "*italicized text*\n",
        "\n",
        "\n",
        "\n",
        "To run the script at a Linux, macOS, or Cygwin command-line terminal:\n",
        "\n",
        "  `$ python nsidc-data-download.py`\n",
        "\n",
        "On Windows, open Start menu -> Run and type cmd. Then type:\n",
        "    \n",
        "    `python nsidc-data-download.py`\n",
        "\n",
        "\n",
        "The script will first search Earthdata for all matching files.\n",
        "You will then be prompted for your Earthdata username/password\n",
        "and the script will download the matching files.\n",
        "\n",
        "\n",
        "If you wish, you may store your Earthdata username/password in a `.netrc` file in your $HOME directory and the script will automatically attempt to read this file. The .netrc file should have the following format:\n",
        "\n",
        "   `machine urs.earthdata.nasa.gov login myusername password mypassword`\n",
        "\n",
        "where '`myusername`' and '`mypassword`' are your Earthdata credentials."
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "UVFL23o5iFNd",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "# !/usr/bin/env python\n",
        "# ------------------------------------------------------------------------------\n",
        "# NSIDC Data Download Script\n",
        "# Tested in Python 2.7 and Python 3.4, 3.6, 3.7\n",
        "\n",
        "# To run the script at a Linux, macOS, or Cygwin command-line terminal:\n",
        "#   $ python nsidc-data-download.py\n",
        "\n",
        "# On Windows, open Start menu -> Run and type cmd. Then type:\n",
        "#     python nsidc-data-download.py\n",
        "\n",
        "# The script will first search Earthdata for all matching files.\n",
        "# You will then be prompted for your Earthdata username/password\n",
        "# and the script will download the matching files.\n",
        "\n",
        "# If you wish, you may store your Earthdata username/password in a .netrc\n",
        "# file in your $HOME directory and the script will automatically attempt to\n",
        "# read this file. The .netrc file should have the following format:\n",
        "#    machine urs.earthdata.nasa.gov login myusername password mypassword\n",
        "# where 'myusername' and 'mypassword' are your Earthdata credentials.\n",
        "\n",
        "from __future__ import print_function\n",
        "\n",
        "import base64\n",
        "import itertools\n",
        "import json\n",
        "import netrc\n",
        "import ssl\n",
        "import sys\n",
        "from getpass import getpass\n",
        "\n",
        "try:\n",
        "    from urllib.parse import urlparse\n",
        "    from urllib.request import urlopen, Request, build_opener, HTTPCookieProcessor\n",
        "    from urllib.error import HTTPError, URLError\n",
        "except ImportError:\n",
        "    from urlparse import urlparse\n",
        "    from urllib2 import urlopen, Request, HTTPError, URLError, build_opener, HTTPCookieProcessor\n",
        "\n",
        "short_name = 'ATL03'\n",
        "version = '003'\n",
        "time_start = '2018-10-31T00:00:00Z'\n",
        "time_end = '2018-11-24T23:59:59Z'\n",
        "bounding_box = '44,23,47,26'\n",
        "polygon = ''\n",
        "filename_filter = ''\n",
        "url_list = []\n",
        "\n",
        "CMR_URL = 'https://cmr.earthdata.nasa.gov'\n",
        "URS_URL = 'https://urs.earthdata.nasa.gov'\n",
        "CMR_PAGE_SIZE = 2000\n",
        "CMR_FILE_URL = ('{0}/search/granules.json?provider=NSIDC_ECS'\n",
        "                '&sort_key[]=start_date&sort_key[]=producer_granule_id'\n",
        "                '&scroll=true&page_size={1}'.format(CMR_URL, CMR_PAGE_SIZE))\n",
        "\n",
        "\n",
        "def get_username():\n",
        "    username = ''\n",
        "\n",
        "    # For Python 2/3 compatibility:\n",
        "    try:\n",
        "        do_input = raw_input  # noqa\n",
        "    except NameError:\n",
        "        do_input = input\n",
        "\n",
        "    while not username:\n",
        "        try:\n",
        "            username = do_input('Earthdata username: ')\n",
        "        except KeyboardInterrupt:\n",
        "            quit()\n",
        "    return username\n",
        "\n",
        "\n",
        "def get_password():\n",
        "    password = ''\n",
        "    while not password:\n",
        "        try:\n",
        "            password = getpass('password: ')\n",
        "        except KeyboardInterrupt:\n",
        "            quit()\n",
        "    return password\n",
        "\n",
        "\n",
        "def get_credentials(url):\n",
        "    \"\"\"Get user credentials from .netrc or prompt for input.\"\"\"\n",
        "    credentials = None\n",
        "    errprefix = ''\n",
        "    try:\n",
        "        info = netrc.netrc()\n",
        "        username, account, password = info.authenticators(urlparse(URS_URL).hostname)\n",
        "        errprefix = 'netrc error: '\n",
        "    except Exception as e:\n",
        "        if (not ('No such file' in str(e))):\n",
        "            print('netrc error: {0}'.format(str(e)))\n",
        "        username = None\n",
        "        password = None\n",
        "\n",
        "    while not credentials:\n",
        "        if not username:\n",
        "            username = get_username()\n",
        "            password = get_password()\n",
        "        credentials = '{0}:{1}'.format(username, password)\n",
        "        credentials = base64.b64encode(credentials.encode('ascii')).decode('ascii')\n",
        "\n",
        "        if url:\n",
        "            try:\n",
        "                req = Request(url)\n",
        "                req.add_header('Authorization', 'Basic {0}'.format(credentials))\n",
        "                opener = build_opener(HTTPCookieProcessor())\n",
        "                opener.open(req)\n",
        "            except HTTPError:\n",
        "                print(errprefix + 'Incorrect username or password')\n",
        "                errprefix = ''\n",
        "                credentials = None\n",
        "                username = None\n",
        "                password = None\n",
        "\n",
        "    return credentials\n",
        "\n",
        "\n",
        "def build_version_query_params(version):\n",
        "    desired_pad_length = 3\n",
        "    if len(version) > desired_pad_length:\n",
        "        print('Version string too long: \"{0}\"'.format(version))\n",
        "        quit()\n",
        "\n",
        "    version = str(int(version))  # Strip off any leading zeros\n",
        "    query_params = ''\n",
        "\n",
        "    while len(version) <= desired_pad_length:\n",
        "        padded_version = version.zfill(desired_pad_length)\n",
        "        query_params += '&version={0}'.format(padded_version)\n",
        "        desired_pad_length -= 1\n",
        "    return query_params\n",
        "\n",
        "\n",
        "def build_cmr_query_url(short_name, version, time_start, time_end,\n",
        "                        bounding_box=None, polygon=None,\n",
        "                        filename_filter=None):\n",
        "    params = '&short_name={0}'.format(short_name)\n",
        "    params += build_version_query_params(version)\n",
        "    params += '&temporal[]={0},{1}'.format(time_start, time_end)\n",
        "    if polygon:\n",
        "        params += '&polygon={0}'.format(polygon)\n",
        "    elif bounding_box:\n",
        "        params += '&bounding_box={0}'.format(bounding_box)\n",
        "    if filename_filter:\n",
        "        option = '&options[producer_granule_id][pattern]=true'\n",
        "        params += '&producer_granule_id[]={0}{1}'.format(filename_filter, option)\n",
        "    return CMR_FILE_URL + params\n",
        "\n",
        "\n",
        "def cmr_download(urls):\n",
        "    \"\"\"Download files from list of urls.\"\"\"\n",
        "    if not urls:\n",
        "        return\n",
        "\n",
        "    url_count = len(urls)\n",
        "    print('Downloading {0} files...'.format(url_count))\n",
        "    credentials = None\n",
        "\n",
        "    for index, url in enumerate(urls, start=1):\n",
        "        if not credentials and urlparse(url).scheme == 'https':\n",
        "            credentials = get_credentials(url)\n",
        "\n",
        "        filename = url.split('/')[-1]\n",
        "        print('{0}/{1}: {2}'.format(str(index).zfill(len(str(url_count))),\n",
        "                                    url_count,\n",
        "                                    filename))\n",
        "\n",
        "        try:\n",
        "            # In Python 3 we could eliminate the opener and just do 2 lines:\n",
        "            # resp = requests.get(url, auth=(username, password))\n",
        "            # open(filename, 'wb').write(resp.content)\n",
        "            req = Request(url)\n",
        "            if credentials:\n",
        "                req.add_header('Authorization', 'Basic {0}'.format(credentials))\n",
        "            opener = build_opener(HTTPCookieProcessor())\n",
        "            data = opener.open(req).read()\n",
        "            open(filename, 'wb').write(data)\n",
        "        except HTTPError as e:\n",
        "            print('HTTP error {0}, {1}'.format(e.code, e.reason))\n",
        "        except URLError as e:\n",
        "            print('URL error: {0}'.format(e.reason))\n",
        "        except IOError:\n",
        "            raise\n",
        "        except KeyboardInterrupt:\n",
        "            quit()\n",
        "\n",
        "\n",
        "def cmr_filter_urls(search_results):\n",
        "    \"\"\"Select only the desired data files from CMR response.\"\"\"\n",
        "    if 'feed' not in search_results or 'entry' not in search_results['feed']:\n",
        "        return []\n",
        "\n",
        "    entries = [e['links']\n",
        "               for e in search_results['feed']['entry']\n",
        "               if 'links' in e]\n",
        "    # Flatten \"entries\" to a simple list of links\n",
        "    links = list(itertools.chain(*entries))\n",
        "\n",
        "    urls = []\n",
        "    unique_filenames = set()\n",
        "    for link in links:\n",
        "        if 'href' not in link:\n",
        "            # Exclude links with nothing to download\n",
        "            continue\n",
        "        if 'inherited' in link and link['inherited'] is True:\n",
        "            # Why are we excluding these links?\n",
        "            continue\n",
        "        if 'rel' in link and 'data#' not in link['rel']:\n",
        "            # Exclude links which are not classified by CMR as \"data\" or \"metadata\"\n",
        "            continue\n",
        "\n",
        "        if 'title' in link and 'opendap' in link['title'].lower():\n",
        "            # Exclude OPeNDAP links--they are responsible for many duplicates\n",
        "            # This is a hack; when the metadata is updated to properly identify\n",
        "            # non-datapool links, we should be able to do this in a non-hack way\n",
        "            continue\n",
        "\n",
        "        filename = link['href'].split('/')[-1]\n",
        "        if filename in unique_filenames:\n",
        "            # Exclude links with duplicate filenames (they would overwrite)\n",
        "            continue\n",
        "        unique_filenames.add(filename)\n",
        "\n",
        "        urls.append(link['href'])\n",
        "\n",
        "    return urls\n",
        "\n",
        "\n",
        "def cmr_search(short_name, version, time_start, time_end,\n",
        "               bounding_box='', polygon='', filename_filter=''):\n",
        "    \"\"\"Perform a scrolling CMR query for files matching input criteria.\"\"\"\n",
        "    cmr_query_url = build_cmr_query_url(short_name=short_name, version=version,\n",
        "                                        time_start=time_start, time_end=time_end,\n",
        "                                        bounding_box=bounding_box,\n",
        "                                        polygon=polygon, filename_filter=filename_filter)\n",
        "    print('Querying for data:\\n\\t{0}\\n'.format(cmr_query_url))\n",
        "\n",
        "    cmr_scroll_id = None\n",
        "    ctx = ssl.create_default_context()\n",
        "    ctx.check_hostname = False\n",
        "    ctx.verify_mode = ssl.CERT_NONE\n",
        "\n",
        "    try:\n",
        "        urls = []\n",
        "        while True:\n",
        "            req = Request(cmr_query_url)\n",
        "            if cmr_scroll_id:\n",
        "                req.add_header('cmr-scroll-id', cmr_scroll_id)\n",
        "            response = urlopen(req, context=ctx)\n",
        "            if not cmr_scroll_id:\n",
        "                # Python 2 and 3 have different case for the http headers\n",
        "                headers = {k.lower(): v for k, v in dict(response.info()).items()}\n",
        "                cmr_scroll_id = headers['cmr-scroll-id']\n",
        "                hits = int(headers['cmr-hits'])\n",
        "                if hits > 0:\n",
        "                    print('Found {0} matches.'.format(hits))\n",
        "                else:\n",
        "                    print('Found no matches.')\n",
        "            search_page = response.read()\n",
        "            search_page = json.loads(search_page.decode('utf-8'))\n",
        "            url_scroll_results = cmr_filter_urls(search_page)\n",
        "            if not url_scroll_results:\n",
        "                break\n",
        "            if hits > CMR_PAGE_SIZE:\n",
        "                print('.', end='')\n",
        "                sys.stdout.flush()\n",
        "            urls += url_scroll_results\n",
        "\n",
        "        if hits > CMR_PAGE_SIZE:\n",
        "            print()\n",
        "        return urls\n",
        "    except KeyboardInterrupt:\n",
        "        quit()\n",
        "\n",
        "\n",
        "def main():\n",
        "    global short_name, version, time_start, time_end, bounding_box, \\\n",
        "        polygon, filename_filter, url_list\n",
        "\n",
        "    # Supply some default search parameters, just for testing purposes.\n",
        "    # These are only used if the parameters aren't filled in up above.\n",
        "    if 'short_name' in short_name:\n",
        "        short_name = 'MOD10A2'\n",
        "        version = '6'\n",
        "        time_start = '2001-01-01T00:00:00Z'\n",
        "        time_end = '2019-03-07T22:09:38Z'\n",
        "        bounding_box = ''\n",
        "        polygon = '-109,37,-102,37,-102,41,-109,41,-109,37'\n",
        "        filename_filter = '*A2019*'  # '*2019010204*'\n",
        "        url_list = []\n",
        "\n",
        "    if not url_list:\n",
        "        url_list = cmr_search(short_name, version, time_start, time_end,\n",
        "                              bounding_box=bounding_box,\n",
        "                              polygon=polygon, filename_filter=filename_filter)\n",
        "\n",
        "    cmr_download(url_list)\n",
        "\n",
        "\n",
        "if __name__ == '__main__':\n",
        "    main()\n"
      ],
      "execution_count": 0,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "N_HSCO6jo1b-",
        "colab_type": "code",
        "colab": {}
      },
      "source": [
        "from zipfile import ZipFile\n",
        "import os\n",
        "from os.path import basename\n",
        "\n",
        "dirName = '/content/drive/My Drive/London Data/'\n",
        "# create a ZipFile object\n",
        "with ZipFile('/content/drive/My Drive/London Data files.zip', 'w') as zipObj:\n",
        "  # Iterate over all the files in directory\n",
        "  for folderName, subfolders, filenames in os.walk(dirName):\n",
        "    for filename in filenames:\n",
        "      #create complete filepath of file in directory\n",
        "      filePath = os.path.join(folderName, filename)\n",
        "      # Add file to zip\n",
        "      zipObj.write(filePath, basename(filePath))"
      ],
      "execution_count": 0,
      "outputs": []
    }
  ]
}