import os
import cgi
import wsgiref.handlers
import logging
import urlparse
import urllib
import re

from google.appengine.ext import db
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from google.appengine.api import urlfetch
from BeautifulSoup import BeautifulSoup

import gdata.photos.service
import gdata.media
import gdata.geo

import gdata.urlfetch

MAX_CRAWL_TIMES = 1000
gdata.service.http_request_handler = gdata.urlfetch

class BackupHistory(db.Model):
  site = db.LinkProperty(required=True)
  gmtCreate = db.DateTimeProperty(auto_now_add=True)
  pageCount = db.IntegerProperty(default=0)
  pageSize = db.IntegerProperty(default=0)
  imageCount = db.IntegerProperty(default=0)
  imageSize = db.IntegerProperty(default=0)
                                                                                                                                                                                                                                                                                                                                                        
  @staticmethod
  def getLastHistory():
    query = db.Query(BackupHistory)
    query.order('-gmtCreate')
    return query.fetch(limit=20)
  
class Page(db.Model):
  sourceUrl = db.LinkProperty()
  targetUrl = db.StringProperty()
  gmtCreate = db.DateTimeProperty(auto_now_add=True)
  content = db.TextProperty()
  size = db.IntegerProperty()
  history = db.ReferenceProperty(BackupHistory,
                                 collection_name='pages')

class Image(db.Model):
  url = db.LinkProperty()
  title = db.StringProperty()
  gmtCreate = db.DateTimeProperty(auto_now_add=True)
  content = db.BlobProperty(default=None)
  size = db.IntegerProperty()
  page = db.ReferenceProperty(Page,
                              collection_name='images')
  history = db.ReferenceProperty(BackupHistory,
                                 collection_name='images')

class Index(webapp.RequestHandler):

  def get(self):
   
    path = os.path.join(os.path.dirname(__file__), 'templates/index.html')
    self.response.out.write(template.render(path, {'historys': BackupHistory.getLastHistory()}))
    

class ShowImage(webapp.RequestHandler):
  def get(self):
    key = cgi.escape(self.request.get('key')).strip()
    image = db.get(db.Key(key))

    if (image and image.content):
      prefix = image.url[image.url.rfind('.')+1:]
      self.response.headers['Content-Type'] = str('image/' + prefix)
      filename = 'attachment;filename=' + image.url[image.url.rfind('/')+1:]
      self.response.headers['content-disposition'] = str(filename)
      self.response.out.write(image.content)
    else:
      self.error(404)

class ShowHistory(webapp.RequestHandler):
  def get(self):
    key = cgi.escape(self.request.get('key')).strip()
    history = db.get(db.Key(key))
    pages = history.pages
    images = history.images

    path = os.path.join(os.path.dirname(__file__), 'templates/result.html')
    self.response.out.write(template.render(path, {'pages': pages, 'images': images, 'history': history}))

class ShowPage(webapp.RequestHandler):
  def get(self):
    key = cgi.escape(self.request.get('key')).strip()
    page = db.get(db.Key(key))
    
    if (page and page.content):
      self.response.out.write(page.content)
    else:
      self.error(404)

class ShowAlbums(webapp.RequestHandler):
  def get(self):
    gd_client = gdata.photos.service.PhotosService()
    gd_client.ClientLogin('javaone@gmail.com', 'firstaimitrdalibaba')
    
    username = 'javaone'
    feed = gd_client.GetUserFeed(user=username)
    for entry in feed.entry:
      self.response.out.write('相册: <a href='/showalbum %s (共%s张照片) <br/>' % (entry.title.text, entry.numphotos.text))

class Backup(webapp.RequestHandler):
  
  def saveImage(self, url, title, history, page):
    image = Image()
    image.url = url
    content = urlfetch.Fetch(image.url).content
    image.content = db.Blob(content)
    image.size = len(content)
    image.title = title
    image.history = history
    image.page = page
    image.put()

    history.imageCount += 1
    history.imageSize += image.size

    
  def fetchProductImage(self, history, page, crawledProducts):
    
    url = page.sourceUrl
    path = url.split('/')
    if len(path) != 7 and len(path) != 8 :
      logging.info('Split Product Url ' + url + ' error...')
      return
    
    productId = path[5]
    if crawledProducts.count(productId) != 0:
      return
    
    crawledProducts.append(productId)
    productName = path[len(path) - 1]
    productName = productName[:productName.find('.')]
    productUrl = 'http://img.alibaba.com/photo/' + productId + '/' + productName + '.jpg'
    
    #todo process vip product(example:http://sunrejoy.en.alibaba.com/product/200195214/206535181/truck_parts/HOWO_Truck_Parts.html
    vipFounded = False
    vipProductUrl = 'http://img.alibaba.com/photo/' + productId + '_1/' + productName + '_.jpg'
    if page.content.find(vipProductUrl) == -1:
      logging.info('Fetching Product Image ' + productUrl + ' start...')
      self.saveImage(productUrl, productName, history, page)
    else:
      i = 6
      while i > 0:
        vipProductUrl = 'http://img.alibaba.com/photo/' + productId + '_' + str(i) + '/' + productName + '_.jpg'
        if page.content.find(vipProductUrl) > -1:
          break
        i -= 1
    
      j = 1
      logging.info('Found Vip Product ' + str(i))
      while j <= i:
        vipProductUrl = 'http://img.alibaba.com/photo/' + productId + '_' + str(j) + '/' + productName + '_.jpg'
        logging.info('Fetching Product Image ' + vipProductUrl + ' start...')
        self.saveImage(vipProductUrl, productName, history, page)
        j = j + 1

    return

  def fetchCompanyImage(self, history, page):
    soup = BeautifulSoup(''.join(page.content))
    tags = soup.findAll('img')
    for tag in tags:
      if tag.has_key('src'):
        url = str(tag['src']).strip()
        if url.startswith('http://img.alibaba.com/img/company/'):
          logging.info('Fetching Company Image ' + url + ' start...')
          if url.rfind('_aboutus') > -1:
            self.saveImage(url, 'Aboutus Image', history, page)
        
          if url.rfind('_logo') > -1:
            self.saveImage(url, 'Logo Image', history, page)
      
    return

  def fetchColumnImage(self, history, page):
    soup = BeautifulSoup(''.join(page.content))
    tags = soup.findAll('img')
    for tag in tags:
      if tag.has_key('src'):
        url = str(tag['src']).strip()
        if url.startswith('http://img.alibaba.com/img/colcontent/'):
          logging.info('Fetching Column Image ' + url + ' start...')
          self.saveImage(url, 'Column Image', history, page)
      
    return


  def fetchPage(self, url, history, crawledProducts):
    try:
      logging.info("Fetching Page URL " + url + " start...")
      
      httpresponse = urlfetch.fetch(url, allow_truncated=True)
      if httpresponse.content_was_truncated:
        logging.error("read content too large...")
      page = Page()
      page.sourceUrl = url
      page.content = db.Text(httpresponse.content, encoding="iso-8859-1")
      page.history = history
      page.size = len(page.content)
      page.put()
      
      history.pageCount += 1
      history.pageSize += page.size

      if url.find('/product/') > -1:
        self.fetchProductImage(history, page, crawledProducts)
        
      if url.find('/aboutus.html') > -1:
        self.fetchCompanyImage(history, page)

      if url.find('/column/') > -1:
        self.fetchColumnImage(history, page)
      
      return page

    except urlfetch.DownloadError:
      logging.error("DownloadError,please check link format,only support HTTP and HTTPS")
      return None

    except Exception, e:
      logging.error(e)
      return None
    
  def getURL(self, startURL, content, crawledUrls):
    results = []
    soup = BeautifulSoup(''.join(content))
    links = soup.findAll('a')
    for link in links:
      if link.has_key('href'):
        url = str(link['href']).strip().replace('\n', '')
        '''
        if url != 'http://cntop.en.alibaba.com/product/200031840/200722014/Aluminium_Flashlights/Aluminium_flashlight.html':
          continue
        '''
        
        if url == '/' or url.find('/offer') > -1 or url.rfind('/showimg.html') > -1 \
           or url.find('/collection_product/') > -1 or url.find('/memberhome/') > -1:
          continue

        if url.startswith('/'):
          url = startURL + url
          
        if url.startswith(startURL):
          
          #todo file suffix filter, example: html, htm
          if url.rfind('.html') or url.rfind('.htm'):
            if url.find('#') > -1:
              url = url[:url.find('#')]
            
            if results.count(url) == 0 and crawledUrls.count(url) == 0:
              results.append(url)

    return results
    
  def Spider(self, startURL, history):
    crawlUrls = []
    crawlUrls.append(startURL)
    crawledUrls = []
    crawledProducts = []
    i = 1
    while 1:
      if i > MAX_CRAWL_TIMES:
        break
      
      if len(crawlUrls) > 0:
        url = crawlUrls.pop(0)
        crawledUrls.append(url)
        logging.info(str(i) + '/' + str(len(crawlUrls)) + '... ' + url)
        page = self.fetchPage(url, history, crawledProducts)

        if page != None:
          i = i + 1
          urllist = self.getURL(startURL, page.content, crawledUrls)
          for url in urllist:
            if crawlUrls.count(url) == 0:
              crawlUrls.append(url)
      else:
        break

    
  def post(self):
    domainName = cgi.escape(self.request.get('domainName')).strip()
    site = 'http://' + domainName + '.en.alibaba.com'
    u = urlparse.urlparse(site)
    if u.scheme != 'http' or u.netloc == '':
      self.redirect('/')

    urlMaps = {
    'http://' + domainName + '.en.alibaba.com':																																							'index.html',                                                                  
    'http://' + domainName + '.en.alibaba.com/group/0.html':                                                                  'group/0.html',                                                                
    'http://' + domainName + '.en.alibaba.com/group/{$group_id}/{$group_name}.html':                                          'group/{$group_id}/{$group_name}.html',                                        
    'http://' + domainName + '.en.alibaba.com/group/{$group_id}/{$group_name}/{$page_no}.html':                               'group/{$group_id}/{$group_name}/{$page_no}.html',                             
    'http://' + domainName + '.en.alibaba.com/product/{$group_id}/{$product_id}/{$group_name}/{$product_name}.html':          'product/{$group_id}/{$product_id}/{$group_name}/{$product_name}.html',        
    'http://' + domainName + '.en.alibaba.com/product/{$group_id}/{$product_id}/{$group_name}/{$product_name}/showimg.html':  'product/{$group_id}/{$product_id}/{$group_name}/{$product_name}/showimg.html',
    'http://' + domainName + '.en.alibaba.com/offerlist.html':                                                                'offerlist.html',                                                              
    'http://' + domainName + '.en.alibaba.com/offercategory/{$category_id}/{$category_name}.html':                            'offercategory/{$category_id}/{$category_name}.html',                          
    'http://' + domainName + '.en.alibaba.com/offercategory/{$category_id}/{$category_name}/{$page_no}.html':                 'offercategory/{$category_id}/{$category_name}/{$page_no}.html',               
    'http://' + domainName + '.en.alibaba.com/offerdetail/{$offer_id}/{$offer_subject}.html':                                 'offerdetail/{$offer_id}/{$offer_subject}.html',                               
    'http://' + domainName + '.en.alibaba.com/offerdetail/{$offer_id}/{$offer_subject}/showimg.html':                         'offerdetail/{$offer_id}/{$offer_subject}/showimg.html',                       
    'http://' + domainName + '.en.alibaba.com/trustpass_profile.html':                                                        'trustpass_profile.html',                                                      
    'http://' + domainName + '.en.alibaba.com/aboutus.html':                                                                  'aboutus.html',                                                                
    'http://' + domainName + '.en.alibaba.com/contactinfo.html':                                                              'contactinfo.html',                                                            
    'http://' + domainName + '.en.alibaba.com/videoshow.html':                                                                'videoshow.html',                                                              
    'http://' + domainName + '.en.alibaba.com/column/{$column_id}.html':                                                      'column/{$column_id}.html'
    }

    history = BackupHistory(site = site)
    history.put()
    self.Spider(site, history)
    history.put()
    self.redirect('/history?key=' + str(history.key()))

def main():
  application = webapp.WSGIApplication(
                                       [('/', Index),
                                        ('/backup', Backup),
                                        ('/showimage', ShowImage),
                                        ('/showpage', ShowPage),
                                        ('/showalbums', ShowAlbums),
                                        ('/history', ShowHistory)],
                                       debug=True)
  wsgiref.handlers.CGIHandler().run(application)

if __name__ == "__main__":
  main()