#!/usr/bin/env python
#

import os
import urllib
import logging
from google.appengine.ext import blobstore
from google.appengine.ext import webapp
from google.appengine.ext.webapp import blobstore_handlers

from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.api import users

from google.appengine.ext import db
from model import ImageUrlMap

# WARNING uploadmgr.html is a STATIC file that contains client side template
# generation the {{ }} syntax clashes with GAE templates.
from google.appengine.ext.webapp import template


#from google.appengine.dist import use_library
#use_library('django', '1.0')

DEV_SERVER = os.environ['SERVER_SOFTWARE'].find('Development') >= 0
# Google's Picasa Image Service uses this parameter for max dimension size
THUMBNAIL_MAX_DIM_PARAM = "=s100"

# this is a server enforced max size. there is a client (jscript) checks
# in application.js. You should adjust that size too.
# TODO change the max file upload size to what is reasonable for your need
MAX_FILE_UPLOAD_SIZE = 500*1024  # = None if you want unlimited

# download csv files can be cpu expensive. Limit how many rows we'll return in a
# document.
# TODO: Chnage your limit on CSV rows
MAX_ROWS_TO_DOWNLOAD = 2000

class UploadFileHandler(blobstore_handlers.BlobstoreUploadHandler):
    def post(self, batchidstr):
        """
        Before starting an upload. The client requests a streaming upload
        url to use.
        """
        import urlparse
        from google.appengine.api import images
        from model import ImageUrlMap

        blob_info = self.get_uploads('files[]')[0]

        # this call can take several seconds
        imgserving_url = images.get_serving_url(blob_info.key())

        # remove the scheme part from the url so http/https is transparent
        # e.g. http://domain/path vs domain/path
        # (which just uses the refering page's scheme)
        if imgserving_url.lower()[0:5] == 'http://':
            imgserving_url = imgserving_url[7:]
        elif imgserving_url.lower()[0:6] == 'https://':  # shouldn't happen
            imgserving_url = imgserving_url[8:]

        imgmap = ImageUrlMap(blobstoreinfo=blob_info.key(),
                             imgserving_url=imgserving_url,
                             filename=blob_info.filename,
                             filesize=blob_info.size,
                             batchid=int(batchidstr))
        imgmap.put()

        baseuri = urlparse.urlparse(self.request.url)
        deleteuri = ("//%s/del/%d/%s" %
                    (baseuri.netloc, imgmap.key().id(),
                     urllib.quote(imgmap.filename)))

        # thumbnail dimensions are controlled by url param
        thumbnailuri = imgserving_url+THUMBNAIL_MAX_DIM_PARAM

        # We cannot return an actual document here. We *MUST* do a redirect.
        # We want to pass data from this function to the redirect url.
        # We could just pass the key from imgmap and re-read, but better
        # to just encode everything here. Risk is that the url will be > 2048
        # and not work on IE.
        # This is the json data blueimp expects
        json_response = (
            ('[{"name":"%s","id":%d,"size":%d,"url":"%s",'+
                '"batchid":%s,'+
                '"thumbnail_url":"%s","delete_url":"%s", "delete_type":"POST"}]') %
            (blob_info.filename, imgmap.key().id(),blob_info.size, imgserving_url,
                batchidstr,
                thumbnailuri, deleteuri))

        urlparam = json_response.encode('base64').encode('hex')
        self.redirect('/blueimp/%s' % urlparam)

# blobstore's upload logic requires a redirect
class BlueImpUploadDoneHandler(webapp.RequestHandler):
    def get(self, encodedparam):
        json_response = encodedparam.decode('hex').decode('base64')
        self.response.headers['Content-Type'] = 'application/json'
        self.response.out.write(json_response)

# this is for use with blueimp jquery uploader
class UploadUrlJsonHandler(webapp.RequestHandler):
    def get(self):
        batchidstr = self.request.get('batchid')[:20]
        if not batchidstr:
            batchidstr = '0'
        # this is initially confusing until you realize blob uploading is a
        # difference service.
        # Basically, you return a url that does all the complexity of
        # uploading data and you give the call that generates that
        # url a "redirect" url to call once the upload is complete.
        # That completion url is yours, but it's stored internally by
        # the service
        upload_url = blobstore.create_upload_url("/upload/%s/" % batchidstr,
                max_bytes_total=MAX_FILE_UPLOAD_SIZE)
        # maybe it's me, but supplying a max_bytes_total param seems to slow
        # down the create_upload_url() call
        self.response.headers['Content-Type'] = 'application/json'
        self.response.out.write('"%s"' % upload_url)

# Given an id and a filename, redirect to the PIS (Picasa image service) url.
# NOTE: One of the key points to hosting images in the PIS is to use the
# distributed domains (e.g. we browsers will load more images at the same
# time when they are spread across different domains). Using this redirect
# kind of defeats this.
class ServeRedirHandler(webapp.RequestHandler):
    def get(self, mapid, filename):
        imgmap = ImageUrlMap.GetbyIdAndFilename(mapid, urllib.unquote(filename))
        if imgmap and imgmap.imgserving_url:
            self.redirect(imgmap.imgserving_url, permanent=True)
            return
        
        self.error(404)

class ViewerHandler(webapp.RequestHandler):
    def get(self):
        import urlparse
        from paging import PagedQuery, PageLinks

        # optional filter params, no UI support (yet) you must enter the
        # parameter by hand.
        batchidfilter = self.request.get('batchid', default_value='')[:16]
        filenamefilter = self.request.get('filename', default_value='')[:255]

        q = ImageUrlMap.all()
        if batchidfilter:
            q.filter("batchid =",int(batchidfilter))
        if filenamefilter:
            q.filter("filename =",filenamefilter)
        
        pageQ = PagedQuery(q, 10)
        pageQ.order('-datecreated')

        if self.request.get("clear"):
            pageQ.clear()

        pagestr = self.request.get("pg")
        if not pagestr:
            results = pageQ.fetch_page()
            pageint = 0
        else:
            pageint = int(pagestr)
            results = pageQ.fetch_page(pageint)

        pages_url_root="/viewer"
        if batchidfilter:
            pages_url_root = "?batchid="+batchidfilter

        # uses Memcache to reduce load, but still pretty expensive because
        # of page_count()
        expensive_count = pageQ.page_count()
        links = PageLinks(page=pageint, page_count=expensive_count,
                          url_root=pages_url_root,
                          page_field="pg", page_range=10)
        isadmin = users.is_current_user_admin()

        saveaslink = ('dumpdata?%s' % self.request.query_string)

        # if only one page, don't show anything
        pagelinks = links.get_links()
        if len(pagelinks) == 2:   # 2 because there's always "1" and "Next"
            pagelinks = []

        template_values = {
            'ISADMIN' : isadmin,
            'SAVEASLINK' : saveaslink,
            'BATCHID' : batchidfilter,
            'DOMAIN' : urlparse.urlparse(self.request.url).netloc,
            'COUNT': expensive_count,
            'DATA': results,
            'LINKS': pagelinks
        }
        path = os.path.join(os.path.dirname(__file__), 'viewer.html')
        self.response.out.write(template.render(path, template_values))

class GetDataDump(webapp.RequestHandler):
    # stupid web tricks. We want to generate a different filename for the csv
    # file, so we create it here and redirect to /csvfile/filename to do the
    # actual download.
    def get(self):
        batchidfilter = self.request.get('batchid', default_value='')[:16]
        filenamefilter = self.request.get('filename', default_value='')[:255]

        filename = 'export_'
        urlparams = ''
        if batchidfilter:
            filename += '_batch_' + batchidfilter

        if filenamefilter:
            filename += '_filename_' + filenamefilter


        url = ('/csvfile/%s.csv?%s' % (filename, self.request.query_string))
        self.redirect(url)

class SaveTabCSVData(webapp.RequestHandler):
    def get(self, csv_filename):
        import csv
        import cStringIO
        import math

        # You really should use the data export feature
        # See tools/downloading_data.txt
        batchidfilter = self.request.get('batchid', default_value='')[:16]
        filenamefilter = self.request.get('filename', default_value='')[:255]

        q = ImageUrlMap.all()
        if batchidfilter:
            q.filter("batchid =",int(batchidfilter))
        if filenamefilter:
            q.filter("filename =",filenamefilter)

        output = cStringIO.StringIO()

        csv_writer = csv.writer(output,
                ['datecreated','id','filename','filesize',
                 'batchid','imgserving_url'])

        numloops = int(math.ceil(MAX_ROWS_TO_DOWNLOAD/1000.0))

        for loops in xrange(0,numloops):
            # we cap it at 2,000 rows (1000 fetches - 2 times)
            # You can increase it.
            # Can cost too much cpu, use download tools!
            r = q.fetch(limit=1000)
            if not r or len(r) == 0:
                break

            for row in r:
                csv_writer.writerow(
                    [row.datecreated,row.key().id(),row.filename,row.filesize,
                     row.batchid,row.imgserving_url])

            # more in result.
            q.with_cursor(start_cursor=q.cursor())
            
        self.response.headers['Content-Type'] = 'text/csv'
        self.response.out.write(output.getvalue())
        output.close()


class GetBatchId(webapp.RequestHandler):
    def get(self):
        # Used by upload page to tag batches of uploads. We want them
        # generally sequential; however, there can be gaps in the batchids.
        # We use Google DB to grab and consume a row id as a marker.
        (start_id, end_id) = db.allocate_ids(
                    db.Key.from_path('ImageUrlMap', 1), 1)
        self.response.out.write('{"batchid":%d}' % start_id)

class DeepDeleteHandler(webapp.RequestHandler):
    def post(self, mapid, filename):
        imgmap = ImageUrlMap.GetbyIdAndFilename(mapid, urllib.unquote(filename))
        if not imgmap:
            logging.info("Image Delete failed. Filename mismatch ('%s'!='%s')" %
                         (filename, imgmap.filename))
            self.error(404)
            return
        imgmap.DeepDelete()  # kinda wrong that one has to load to delete
        self.response.headers['Content-Type'] = 'application/json'
        self.response.out.write(mapid)

class DeleteBatchHandler(webapp.RequestHandler):
    def post(self):
        from google.appengine.ext import deferred
        from deferredtasks import DeleteBatchIDData
        batchidstr = self.request.get("batchid")
        if batchidstr and int(batchidstr):
            deferred.defer(DeleteBatchIDData, batchidstr)
            self.response.headers['Content-Type'] = 'application/json'
            self.response.out.write('"Delete task started. Rate is 1000/sec"')

class DeleteAllHandler(webapp.RequestHandler):
    def post(self):
        from google.appengine.ext import deferred
        from deferredtasks import DeleteAllData

        if self.request.get("confirm_delete_all") == "deleteall":
            deferred.defer(DeleteAllData)
            self.response.headers['Content-Type'] = 'application/json'
            self.response.out.write('"Delete task started. Rate is 1000/sec"')
        else:
            logging.info("confirm field for delete not set.")
            self.response.headers['Content-Type'] = 'application/json'
            self.response.out.write('"Safety check. Not confirmed first."')

def main():
    application = webapp.WSGIApplication(
        [   (r'/upload/([^/]+)?/?', UploadFileHandler),
            (r'/i/([^/]+)?/(.*)', ServeRedirHandler),
            (r'/viewer', ViewerHandler),
            (r'/getuploadurl', UploadUrlJsonHandler), # called from blueimp js
            (r'/blueimp/([^/]+)?', BlueImpUploadDoneHandler ),
            (r'/getbatchid', GetBatchId ),
            (r'/dumpdata', GetDataDump), # redirects to /csvfile/filename
            (r'/csvfile/([^/]+)?', SaveTabCSVData),
            (r'/del/([^/]+)?/(.*)', DeepDeleteHandler ),
            (r'/deletebatch', DeleteBatchHandler ),
            (r'/deleteall', DeleteAllHandler )

        ], debug=True) # TODO: turn off debug
    run_wsgi_app(application)

if __name__ == '__main__':
    logging.getLogger().setLevel(logging.DEBUG) # TODO: lower logging to INFO
    main()