import lxml
import urllib, urllib2 
import re, sys, os
import mechanize
import datetime, time
import traceback
import workerpool
import hashlib
import urlparse
import binascii
import threading
import cStringIO as StringIO
import pycommonlib as pyclib
from lxml       import etree
from pymongo    import Connection
from urlparse   import urljoin
from termcolor  import cprint 

LOCAL_PATH  = '/home/data1/66rpg'
BASE_SITE   = 'http://www.66rpg.com' 
GET_URL     = 'http://www.66rpg.com/categories/3/page/1'
start = 0

def getMaxPage(url):
    try:
        maxPage = 1
        tree = pyclib.getXMLTree(url)
        pageNode = tree.xpath('//div[@class="mainbox"]//div[@class="looppage"]//a[last()]')
        if len(pageNode)>0:
            text = pageNode[0].get('href')
            preg = re.compile(r'(\d+)$')
            m    = preg.search(text)
            if m: maxPage = int(float(m.group(1))) + 1
        return maxPage
    except:
        traceback.print_exc()

def downloadFile(src):
    try:
        file_name = ''
        if type(src).__name__ == 'unicode': src = src.encode('utf-8')
        print 'Process url: ', src
        u = urllib2.urlopen(src)
        meta = u.info()
        print meta
        if meta.getheaders("Content-Disposition") != None and len(meta.getheaders("Content-Disposition")) > 0:
          disposition = meta.getheaders("Content-Disposition")[0]
          preg        = re.compile(r'filename=(.+)$')
          m           = preg.search(disposition)
          if m: file_name = m.group(1)
        if file_name == '': print 'Not get name file !!!'; return
        file_name = file_name.replace('"', '')
        file_name = re.sub(r'\s+', '', file_name)
        
        file_size = int(meta.getheaders("Content-Length")[0])
        path_file = '{0}/{1}'.format(LOCAL_PATH, file_name)
        path_dir  = '{0}'.format(LOCAL_PATH)

        print 'file_name: ', file_name
        print 'Path_file: ', path_file
        print 'file_size: ', file_size
        chunk = 256 * 10240
        #buffer = u.read(8192)
        if not os.path.isfile(path_file):
            if not os.path.exists(path_dir): os.makedirs(path_dir, 0777)
            print "Downloading: %s Bytes: %s" % (path_file, file_size)
            f = open(path_file, 'wb')
            while True:
                buffer = u.read(chunk)
                if not buffer: break
                f.write(buffer)
            f.close()
            u.close()
            size = os.path.getsize(path_file)
            if (size != file_size):
                os.unlink(path_file); print 'File download error => remove'
            else:
                print 'Download success with url: ', src
        else:
            size = os.path.getsize(path_file)
            if (size != file_size):
                os.unlink(path_file)
                if not os.path.exists(path_dir): os.makedirs(path_dir, 0777)
                print "Downloading: %s Bytes: %s" % (path_file, file_size)
                f = open(path_file, 'wb')
                while True:
                    buffer = u.read(8192)
                    if not buffer: break
                    f.write(buffer)
                f.close()
                u.close()
                size = os.path.getsize(path_file)
                if (size != file_size):
                    os.unlink(path_file); print 'File download error => remove'
                else:
                    print 'Download success with url: ', src
            else:
              print 'File is exists !!!'
    except:
        traceback.print_exc()
        
def processItem(url):
    try:
        tree = pyclib.getXMLTree(url)
        nodeDownload = tree.xpath('//div[@class="mainbox"]//div[@class="download"]//a[1]')
        if len(nodeDownload) > 0:
            lurl = urlparse.urljoin(BASE_SITE, nodeDownload[0].get('href'))
            downloadFile(lurl)
    except:
        traceback.print_exc()
        
def processPage(url):
    try:
        print 'Process page: ', url
        listUrl = []
        tree = pyclib.getXMLTree(url)
        listNode = tree.xpath('//div[@class="mainbox"]//ul[@class="list"]/li//a[1]')
        for node in listNode:
            lurl = urlparse.urljoin(BASE_SITE, node.get('href'))
            listUrl.append(lurl)
        print listUrl
        pool = workerpool.WorkerPool(size=1)
        pool.map(processItem, listUrl)
        pool.shutdown(); pool.wait()
    except:
        traceback.print_exc()

def timeOut():        
    while True:
        delta = time.time() - start
        if delta > 1800:
            pid = os.getpid(); os._exit(1); os.kill(pid, 9)
        time.sleep(30)
        
def processAll():
    try:
        maxPage = getMaxPage(GET_URL)
        print 'Max page: ', maxPage
        #for page in range(1, maxPage):
        for page in range(3, maxPage):
            lurl = urlparse.urljoin(BASE_SITE, 'categories/3/page/{0}'.format(page))
            processPage(lurl)
    except:
        traceback.print_exc()

if __name__ == '__main__':
    try:
        cprint('start crawler 66rpg.com', 'yellow')
        start = time.time() 
        timeout = threading.Thread(target=timeOut).start()
        processAll()
        #processItem('http://www.66rpg.com/articles/3067')
        pyclib.forceQuit()
    except:
        traceback.print_exc()
        
