# -*- coding: utf-8 -*-
import commonlib
import os
import traceback
import mechanize
import re
import workerpool

STORE_FOLDER = '/sannhac/{0}'
MAX_THREAD = 10

class DJob(workerpool.Job):
    
    def __init__(self, callback, url, downloadPath):
        self.callback = callback
        self.url = url
        self.downloadPath = downloadPath
        
    def run(self):
        self.callback(self.url, self.downloadPath)

def downloadSWF(url, downloadPath):
    print 'start download ---------------------- {0}'.format(url)
    try:
        if not os.path.isdir(downloadPath): os.makedirs(downloadPath, 0777)
        m = re.search("/([a-zA-Z0-9-_]+\.swf)", url)
        filename = m.group(1) if m else ''
        if filename == '':
            print 'Error: file name of swf which will be downloaded is empty'; return
        downloadPath = downloadPath + '/' + filename
        if not os.path.isfile(downloadPath):
            mechanize.urlretrieve(url, downloadPath)
            print 'download finished ------------------- {0}'.format(url)
        else:
            print 'already exists ---------------------- {0}'.format(downloadPath)
    except:
        traceback.print_exc()

def getSWF(url, downloadPath):
    print 'call ----------------------getSWF {0}'.format(url)
    try:
        tree = commonlib.getXMLTree(url)
        if tree == None: print 'Error: getAPage error, tree is None'; return
        swfNode = tree.xpath("//a[contains(., 'Tải beat')]".decode('utf-8'))
        if len(swfNode) < 1: print 'Error: can not find swf link'; return
        swfLink = commonlib.getAttribText(swfNode, 'href')
        if swfLink == '': print 'Error: can not get swf link'; return
        if 'Ban_thu_ca_nhan.swf' in swfLink: print 'Do not download Ban_thu_ca_nhan'; return
        downloadSWF(swfLink, downloadPath)
    except:
        traceback.print_exc()

def getAPage(pageNum):
    print 'call ----------------------getAPage {0}'.format(pageNum)
    url = 'http://sannhac.com/ajax.php?cmd=listBeat&page={0}&charFist=&genreId=&orderType=undefined&ord=undefined'.format(pageNum)
    downloadPath = STORE_FOLDER.format(pageNum)
    if not os.path.isdir(downloadPath): os.makedirs(downloadPath, 0777)
    try:
        tree = commonlib.getXMLTree(url, isXML=True)
        if tree == None: print 'Error: getAPage error, tree is None'; return
        queue = []
        for song in tree.xpath("//song"):
            title = commonlib.getAttribText(song, "name")
            linkSong = commonlib.getAttribText(song, "linkSong")
            singer = commonlib.getAttribText(song, "singer")
            queue.append(linkSong)
        pool = workerpool.WorkerPool(size=MAX_THREAD)
        for link in queue:
            pool.put(DJob(getSWF, link, downloadPath))
        pool.shutdown()
        pool.wait()
    except:
        traceback.print_exc()

def process(startPage, endPage):
    for i in range(startPage, endPage + 1):
        getAPage(i)
        
if __name__ == '__main__':
    
    print 'start crawler sannhac.com'
    print 'number of thread: {0}'.format(MAX_THREAD)
    process(0, 150)
    print 'finished crawler sannhac.com'
    os._exit(1)