#!/usr/bin/python

#                                 Panoramio downloader
#                                                           by Tobias Weyand (weyand@umic.rwth-aachen.de)
#                                                                            Mobile Multimedia Processing
#                                                                                  RWTH Aachen University
#
# This script retrieves the meta-information of all images on Panoramio that are within a given bounding
# box. The resulting JSON file contains (among other things) the URLs of the original size photos that can
# then be downloaded using wget.
#
# WARNINGS:
#   - This script is very multithreaded! It may spawn up to 80 threads. The individual threads
#     just do a HTTP query though.
#   - Panoramio is cool with 10,000 queries per day from a single user. This script does not necessarily
#     stay below this.
#
# So why is this tool so long and complicated?
#
# Well, the Panoramio API is weird. It always gives you the top 1000-7000 photos for the currect zoom level
# (bounding box size). There seems to be a minimum bounding box size of 0.0005 degrees (latitude/
# longitude). When using a smaller bounding box, the result will still be images from a bounding box of
# 0.0005 degrees. So, in order to get all photos in some bounding box, it would make sense to divide the
# desired bounding box into bounding boxes of the minimal size and traverse these. But then, some photos
# that are only returned for bigger bounding boxes are missing. So, a hierarchical approach is necessary
# that descends a bounding box hierarchy until the minimum size is reached.

import sys
import os
import urllib
import json
import threading
import time
from hashlib import md5
from math import ceil
import simplejson
jdecoder = simplejson.JSONDecoder();

files_per_page = 100
dataset = "full"
size = "original"

# Paris inner city
#minx = -2.290794
#miny = 48.837071
#maxx = 2.365606
#maxy = 48.875211

# Aachen
#minx = 6.057552
#miny = 50.764409
#maxx = 6.098337
#maxy = 50.780902

# Bruges
#minx = 121.05
#miny = 30.41
#maxx = 122.06
#maxy = 31.45

# New York
minx = -79.749756
miny = 41.264044
maxx = -73.366699
maxy = 44.264044

#Los Angeles

#minx = -118.666489
#miny = 33.922851
#maxx = -118.161118
#maxy = 34.329828

#Shanghai
#minx = 120.956314 
#miny = 30.692250
#maxx = 121.956070
#maxy = 31.861230

#Beijing
#minx = 115.580841
#miny = 39.525230
#maxx = 117.294708
#maxy = 40.963308

total_queries = 0
response_hashes = []

class QueryThread ( threading.Thread ):
    def __init__( self ):
        threading.Thread.__init__( self )
        self.response = 0
        self.arg = ()

    def run( self ):
        query = "http://www.panoramio.com/map/get_panoramas.php?order=upload_date&set=%s&from=%d&to=%d&minx=%f&miny=%f&maxx=%f&maxy=%f&size=%s" % self.arg

        flag = False

        while flag == False:
            try:
                self.response = urllib.urlopen( query ).readline()
                jdecoder.decode( self.response )
                flag = True
            except simplejson.decoder.JSONDecodeError:
                print 'tired...need to sleep'
                time.sleep( 10 )


def json_load( str ):
    try:
        response_json = json.loads( str )
    except:
        print "JSON exception! response was:"
        print str
        response_json = {}
        response_json["count"] = 0
        response_json["photos"] = []
    return response_json


def get_rect( minx, miny, maxx, maxy ):
    global total_queries
    global dataset
    global size
    global files_per_page

    from_idx = 0
    to_idx = files_per_page
    has_more = True
    photos = []
    page = 1

    # Get the first page
    query = "http://www.panoramio.com/map/get_panoramas.php?order=upload_date&set=%s&from=%d&to=%d&minx=%f&miny=%f&maxx=%f&maxy=%f&size=%s" % ( dataset, from_idx, to_idx, minx, miny, maxx, maxy, size )
    #print query



    flag = False

    while flag == False:
        try:
            response = urllib.urlopen( query ).readline()
            jdecoder.decode( response )
            flag = True
        except simplejson.decoder.JSONDecodeError:
            print 'tired...need to sleep'
            time.sleep( 10 )

    response_json = json_load( response )

    total_queries += 1

    # Hash the first page of the reply and exit if we have alreadly seen it
    response_hash = md5( response ).digest()
    if response_hash in response_hashes:
        print "Seen the begining of this response before. Skipping the rest." # We're bad listeners :-)
        return []
    else:
        response_hashes.append( response_hash )

    total_files = response_json["count"]
    photos, n_new = append_new_photos( photos, response_json["photos"] )

    # Otherwise spawn threads that get the rest of the reply
    n_threads = int( ceil( float( response_json["count"] ) / 100.0 ) ) - 1

    print "Spawning", n_threads, "spiders"
    total_queries += n_threads

    spiders = []
    for i in range( 1, n_threads + 1 ):
        t = QueryThread()
        from_idx = files_per_page * i
        to_idx = files_per_page * ( i + 1 )
        t.arg = ( dataset, from_idx, to_idx, minx, miny, maxx, maxy, size )
        t.start()
        spiders.append( t )

    while threading.active_count() > 1:
        #print "Waiting for spiders to return ..."
        time.sleep( 1 )

    for t in spiders:
        response_json = json_load( t.response )
        photos, n_new = append_new_photos( photos, response_json["photos"] )

    #outfile = open("dump_%d.json" % depth,"w")
    #outfile.write(json.dumps(photos, sort_keys=True, indent=4))
    #outfile.close()

    print total_files, "photos"
    return photos

def append_new_photos( photos, new_photos ):
    ids = [p["photo_id"] for p in photos]
    new_ids = [p["photo_id"] for p in new_photos]
    n_new = 0
    nr = 0 # number of photo in new_photos
    for i in new_ids:
        if i not in ids:
            n_new += 1
            photos.append( new_photos[nr] )
            ids.append( i )
        nr += 1
    return ( photos, n_new )

# Depth first search, not used in this program but left in for testing.
# Does a lot of unnecessary queries, but gets everything.
def recurse_bbox( minx, miny, maxx, maxy, photos ):
    print "box:", minx, miny, maxx, maxy, "(w:", maxx - minx, ",h:", maxy - miny, ")"
    photos_res = get_rect( minx, miny, maxx, maxy )
    photos, n_new = append_new_photos( photos, photos_res )
    if n_new > 0: # Recurse as long as new photos are found
        print n_new, "new photos -> recusring"
        dx = ( maxx - minx ) / 2
        dy = ( maxy - miny ) / 2
        photos = recurse_bbox( minx, miny, minx + dx, miny + dy, photos )
        photos = recurse_bbox( minx + dx, miny, maxx, miny + dy, photos )
        photos = recurse_bbox( minx, miny + dy, minx + dx, maxy, photos )
        photos = recurse_bbox( minx + dx, miny + dy, maxx, maxy, photos )
    else:
        print "No new photos."
    print "Going up."
    return photos


def recurse_bbox2( minx, miny, maxx, maxy, photos ):
    nblock = 4
    print "box:", minx, miny, maxx, maxy, "(w:", maxx - minx, ",h:", maxy - miny, ")"
    photos_res = get_rect( minx, miny, maxx, maxy )
    photos, n_new = append_new_photos( photos, photos_res )
    if n_new > 0: # Recurse as long as new photos are found
        print n_new, "new photos -> recusring"

        dx = ( maxx - minx ) / nblock
        dy = ( maxy - miny ) / nblock

        for i in range( 0, nblock ):
            for j in range( 0, nblock ):
                photos = recurse_bbox( minx, miny, minx + i * dx, miny + j * dy, photos )
    else:
        print "No new photos."
    print "Going up."
    return photos

def spider_breadth_first( minx, miny, maxx, maxy ):
    photos = []
    queue = [( 0, 0, 0, 0 )] # queue item: x, y, layer, no

    dx = maxx - minx
    dy = maxy - miny

    last_n_photos = 0

    while len( queue ) > 0:

        # Dequeue top item
        x, y, l, no = queue[0]
        queue = queue[1:]

        # Calculate cell coordinates
        dx_layer = dx / ( 2 ** l )
        dy_layer = dy / ( 2 ** l )

        minx_cell = minx + x * dx_layer
        miny_cell = miny + y * dy_layer
        maxx_cell = minx + ( x + 1 ) * dx_layer
        maxy_cell = miny + ( y + 1 ) * dy_layer

        print "lon:", minx_cell, "lat:", miny_cell, "w:", dx_layer, ",h:", dy_layer, ",layer:", l, ",no:", no

        # Query cell
        photos_res = get_rect( minx_cell, miny_cell, maxx_cell, maxy_cell )
        photos, n_new = append_new_photos( photos, photos_res )

        print n_new, "photos are new"

        # If there were new results, enqueue the cell's children
        if n_new > 0:
            queue.append( ( 2 * x, 2 * y, l + 1, 0 ) )
            if  True: #dx_layer > 0.0005:
                print "making 4 children on layer", l + 1
                queue.append( ( 2 * x + 1, 2 * y, l + 1, 1 ) )
                queue.append( ( 2 * x, 2 * y + 1, l + 1, 2 ) )
                queue.append( ( 2 * x + 1, 2 * y + 1, l + 1, 3 ) )
            else:
                print "minimum b-box size reached. Making only one child on layer", l + 1

    return photos

#if len( sys.argv ) != 2:
#    print "Syntax:", sys.argv[0], "<output.json>"
#    sys.exit( 0 )

def generateList(start, end, length):
    result = []
    if start > end:
        start, end = end, start
    start += length
    while start < end:
        result.append(start)
        start += length
    result.append(end)
    return result

#outfilename = 'plist-new.txt'

print "Starting download"

#photos = spider_breadth_first( minx, miny, maxx, maxy )
length = 0.3
previousLongitude = minx
previousLatitude = miny
count = 1

print generateList(minx, maxx, length)
print generateList(miny, maxy, length)


for longitude in generateList(minx, maxx, length):
    for latitude in generateList(miny, maxy, length):
        photos = []
        photos = recurse_bbox( previousLongitude, previousLatitude, longitude, latitude, photos )
        outfile = open( "D:/wt/pfile/newYorkList-" + str(count) + ".txt", "w" )
        outfile.write( json.dumps( photos, sort_keys = True, indent = 4 ) )
        outfile.flush()
        outfile.close()
        print str(count) + ' done.'
        count += 1
        print "previousLatitude",previousLatitude
        print "latitude", latitude
        previousLatitude = latitude
    previousLatitude = miny
    previousLongitude = longitude

print len( photos ), "spidered"
print "Total queries:", total_queries
