# -*- coding: utf-8 -*-

import commonlib, workerpool
import traceback, os, re, datetime, time
from MongoModel import MongoModel
from termcolor import cprint

LOG = commonlib.getLogger('muaban.net')
DOWNLOAD_PATH = '/home/hoangnamhai/HarvestedData/muaban/'
PREFIX = '/uploads/muaban/'
MONGO_SERVER = 'beta.mana.vn'
MONGO_PORT = 27017
MAX_PROCESS = 30
MAX_DUPLICATE = 10

class Crawler(MongoModel):
    
    rootUrl = 'http://muaban.net/{0}/raovat/{1}/{2}/{3}.html'
    cities = {'ha-noi': 'Hà Nội', 'ho-chi-minh': 'TP Hồ Chí Minh', 'hai-phong': 'Hải Phòng', 'da-nang': 'Đà Nẵng', 'can-tho': 'Cần Thơ'}
    categories = {2: {'name': 'Việc làm', 'type': 'viec-lam', 'sub': [
                        [3, 'viec-tim-nguoi', 'Việc tìm người'], [20, 'nguoi-tim-viec', 'Người tìm việc'], 
                        [31, 'dich-vu-lao-dong', 'Dịch vụ lao động'], [177, 'viec-lam-khac', 'Việc làm khác']]},
                  32: {'name': 'Tuyển sinh - Đào tạo', 'type': 'tuyen-sinh-dao-tao', 'sub': [
                        [40, 'tuyen-sinh', 'Tuyển sinh'], [41, 'dao-tao', 'Đào tạo'], 
                        [160, 'du-hoc', 'Du học'], [162, 'khac', 'Khác']]},
                  33: {'name': 'Bất động sản', 'type': 'bat-dong-san', 'sub': [
                        [42, 'nhuong-qsd-dat', 'Nhượng QSD đất'], [43, 'ban-nha', 'Bán nhà'], 
                        [44, 'sang-kiot-cua-hang', 'Sang kiot cửa hàng'], [46, 'cho-thue-nha-dat', 'Cho thuê nhà đất'], 
                        [1614, 'nha-cho-sinh-vien-thue', 'Nhà cho sinh viên thuê'], [47, 'dich-vu-nha-dat', 'Dịch vụ nhà đất']]},
                  34: {'name': 'Ô tô', 'type': 'o-to', 'sub': [
                        [103, 'ban-o-to', 'Bán ô tô'], [104, 'mua-o-to', 'Mua ô tô'], [105, 'cho-thue-o-to', 'Cho thuê ô tô']]},
                  36: {'name': 'Điện tử - Điện máy', 'type': 'dien-tu-dien-may', 'sub': [
                        [125, 'may-vi-tinh', 'Máy vi tính'], [126, 'dien-thoai-di-dong', 'Điện thoại di động'], 
                        [127, 'hang-dien-may', 'Hàng điện máy'], [165, 'khac', 'Khác']]},
                  35: {'name': 'Xe máy', 'type': 'xe-may', 'sub': [
                        [110, 'ban-xe', 'Bán xe'], [111, 'mua-xe', 'Mua xe'], [112, 'cho-thue-xe-may', 'Cho thuê xe máy']]}
                  }
    
    def __init__(self, host, port):
        MongoModel.__init__(self, host, port)
        
    def parseTable(self, node, result=[], separate=False):
        xpStr = './tbody/tr' if node.getchildren()[0].tag == 'tbody' else './tr' 
        for line in node.xpath(xpStr):
            lineStr = ''
            ic = 0
            tdNodes = line.xpath("./td")
            numTdNodes = len(tdNodes)
            for iTd in tdNodes:
                ulCheck = False
                ic += 1
                if len(iTd.getchildren()) > 0:
                    fChild = iTd.getchildren()[0]
                    if fChild.tag == 'table':
                        self.parseTable(fChild, result, True); continue
                    elif fChild.tag == 'ul' and fChild.get('class', '') == 'ulCheck':
                        liStr = ''
                        ii = 0 
                        for li in fChild.xpath("./li"):
                            ii += 1
                            liStr += commonlib.getElementText(li, descendant=1) + ', '
                        liStr = liStr[:-2]
                        if liStr != '': lineStr += '{0} '.format(liStr) 
                        if ii>1: continue
                        ulCheck = True
                text = commonlib.getElementText(iTd, descendant=1)
                if ulCheck: text = 'Có'
                lineStr += '{0} '.format(text)
                if separate and numTdNodes > 2 and ic % 2 == 0 and ic < numTdNodes: lineStr += ' | '
            lineStr = lineStr.strip()
            if lineStr != '': result.append({'type': 'text', 'data': lineStr})
        return result
                    
    
    def getDetailJob(self, url):
        LOG.debug('getDetailJob(url={0})'.format(url))
        data = {'content': [], 'area': '', 'tag': '', 'postDate': ''}
        try:
            tree = commonlib.getXMLTree(url)        
            if tree == '' or tree == None: return
            
            tag = commonlib.getElementText(tree.xpath("//div[@style='float:left;width:740px;font-weight:bold;']//a[last()]"))
            tag = re.sub(r'\(\d+\)', '', tag).strip()
            data['tag'] = tag
            if tag == '': return
            # ------------------------------
            print '--------'
            print tag
            print '--------'
            # ------------------------------
            postDate = commonlib.getElementText(tree.xpath("//table[@id='ctl00_pC_DV_tableHeader']//tr/td/b[contains(., 'Ngày đăng')]/../following-sibling::*[2]".decode('utf-8')))
            if len(postDate) > 3: postDate = postDate[:-2]
            if postDate == '': return
            data['postDate'] = datetime.datetime.strptime(postDate, "%d/%m/%Y")
            district = commonlib.getElementText(tree.xpath("//table[@id='ctl00_pC_DV_tableHeader']//tr/td/b[contains(., 'Khu vực')]/../following-sibling::*[2]".decode('utf-8')))
            districtArr = district.split(' » ')
            if len(districtArr) > 1: district = districtArr[1]
            data['area'] = district
            # ------------------------------
            print 'Posted: ', data['postDate']
            print 'Area: ', district
            # ------------------------------
            content = []
            for item in tree.xpath("//div[@class='content' and @style='width:590px;border-top:dashed 1px silver;padding:7px 0px 15px 0px;margin-top:7px;']/*"):
                if item.tag in ['script']: continue
                if item.tag == 'table':
                    tableRet = self.parseTable(item, [])
                    for line in tableRet: content.append(line)
                    continue
                text = commonlib.getElementText(item, descendant=1)
                if text != '':
                    content.append({'type': 'text', 'data': text})
                text = commonlib.getElementText(item, text=0)
                if text != '':
                    content.append({'type': 'text', 'data': text})
            # --> lay phan anh cua mot so tin co anh?
            if len(tree.xpath("//*[@id='slideshow']")) > 0:
                for imgNode in tree.xpath("//*[@id='slideshow']//img"):
                    img = commonlib.getAttribText(imgNode, 'src')
                    img = commonlib.urlJoin(url, img) if img != '' else ''
                    if img != '':
                        dImg = commonlib.downloadNUpload(ssh, img, DOWNLOAD_PATH, PREFIX)
                        if dImg != '':
                            content.append({'type': 'image', 'data': dImg})
            data['content'] = content
            # ------------------------------
            for line in content:
                print line['data']
            # ------------------------------
            
        except:
            LOG.error(traceback.format_exc())
            cprint(url, 'red')
        finally:
            return data
        
    def identifyId(self, link):
        pat = r'chi-tiet/(\d+)/'
        try:
            return commonlib.extractWithRegEx(pat, link, 1)
        except:
            return link
    
    def getCategoryId(self, catId):
        db = self.connection['muaban']
        categoryId = db.eval('''
            var ret = null;
            var cat = "%s";
            ret = db.category.findOne({"name": cat});
            if (ret == null) {
                db.category.save({"name": cat});
                ret = db.category.findOne({"name": cat});
            }
            return ret["_id"];
        ''' % self.categories[catId]['name'])
        return categoryId['value']
    
    def getListJobs(self, cityId, catId, subCat, url, currentProcessed=0, currentDuplicated=0):
        LOG.debug('start getListJobs(cityId={0}, catId={1})'.format(cityId, catId))
        try:
            db = self.connection['muaban']
            categoryId = self.getCategoryId(catId)
            collection = db['article']
            tree = commonlib.getXMLTree(url)
            if tree == '' or tree == None: return
            for item in tree.xpath("//div[@class='listing']//ul[@class='list_4']/li[position()>1]"):
                price = commonlib.getElementText(item.xpath(".//div[@class='item_price']"))
                title = commonlib.getElementText(item.xpath(".//a[@class='subject']"), descendant=1)
                link = commonlib.getAttribText(item.xpath(".//a[@class='subject']"), 'href')
                link = commonlib.urlJoin(url, link) if link != '' else ''
                if link == '': continue
                id = self.identifyId(link)
                hashUrl = commonlib.getMD5Hash(id)
                isExit = collection.find_one({'hashUrl': hashUrl})
                if isExit:
                    currentDuplicated += 1
                    cprint('WARNING: current existed in database !!!', 'yellow')
                    continue
                currentProcessed += 1
                description = commonlib.getElementText(item.xpath(".//div[@class='sum']"), descendant=1)
                thumbnail = commonlib.getAttribText(item.xpath(".//img[@class='thumb']"), 'src')
                thumbnail = commonlib.urlJoin(url, thumbnail) if thumbnail not in ['', '/res/images/icons/noimage.gif'] else ''
                # ------------------------------------------------------------------------------------------------------------
                print '--------------------------------'
                print 'ID: ', id
                print 'Title: ', title
                print 'Description: ', description
                print 'Link: ', link
                print 'Price: ', price
                print 'Thumbnail: ', thumbnail
                # ------------------------------------------------------------------------------------------------------------
                data = self.getDetailJob(link)
                if len(data['content']) == 0: LOG.warning('Khong lay duoc noi dung link {0}'.format(link)); continue
                collection.save({
                    'title': title,
                    'link': link,
                    'category': categoryId,
                    'subcategory': subCat,
                    'hashUrl': hashUrl,
                    'price': price,
                    'description': description,
                    'thumbnail': thumbnail,
                    'area': data['area'],
                    'city': self.cities[cityId],
                    'tag': data['tag'],
                    'content': data['content'],
                    'reporttime': data['postDate'],
                    'timestamp': time.time(),
                    'lastupdate': datetime.datetime.now(),
                    'type': 'normal'
                })
        except:
            LOG.error(traceback.format_exc())
        finally:
            LOG.debug('finished getListJobs(cityId={0}, catId={1})'.format(cityId, catId))
            cprint('currentProcessed={0}, currentDuplicated={1}'.format(currentProcessed, currentDuplicated), 'green')
            return currentProcessed, currentDuplicated
    
    def getListVipNews(self, catId, cityId, subCat, currentProcessed=0, currentDuplicated=0):
        LOG.debug('start getListVipNews(%s, %s)' % (catId, cityId))
        url = "http://muaban.net/{0}/raovat/{1}/{2}/topvip.html?{3}".format(cityId, catId, self.categories[catId]['type'], time.time())
        try:
            html = commonlib.extractWithRegEx(r"\.html\('(.+)'\)$", commonlib.getHTML(url), 1)
            tree = commonlib.buildTreeFromHTML(html)
            db = self.connection['muaban']
            categoryId = self.getCategoryId(catId)
            collection = db['article']
            for item in tree.xpath("//ul[@class='list_4']/li/div[@class='w600']"):
                title = commonlib.getElementText(item.xpath("./a/b"))
                link = commonlib.getAttribText(item.xpath("./a"), 'href')
                link = commonlib.urlJoin(url, link) if link != '' else ''
                if link == '': continue
                id = self.identifyId(link) 
                hashUrl = commonlib.getMD5Hash(id)
                if not collection.find_one({'hashUrl': hashUrl}):
                    description = commonlib.getElementText(item.xpath(".//div[@class='sum']"), descendant=1)
                    price = commonlib.getElementText(item.xpath(".//div[@class='item_price']"))
                    thumbnail = commonlib.getAttribText(item.xpath(".//div[@class='sum']/img"), 'src')
                    thumbnail = commonlib.urlJoin(url, thumbnail) if thumbnail != '/res/images/icons/noimage.gif' and thumbnail != '' else ''
                    if thumbnail == '': continue
                    thumbnail = commonlib.downloadNUpload(ssh, thumbnail, DOWNLOAD_PATH, PREFIX)
                    # ------------------------------------------------------------------------------------------------------------
                    print '--------------------------------'
                    print 'ID: ', id
                    print 'Title: ', title
                    print 'Description: ', description
                    print 'Link: ', link
                    print 'Price: ', price
                    print 'Thumbnail: ', thumbnail
                    # ------------------------------------------------------------------------------------------------------------
                    currentProcessed += 1
                    data = self.getDetailJob(link)
                    if len(data['content']) == 0: LOG.warning('Khong lay duoc noi dung link {0}'.format(link));continue
                    collection.save({
                        'title': title,
                        'link': link,
                        'category': categoryId,
                        'subcategory': subCat,
                        'hashUrl': hashUrl,
                        'price': price,
                        'description': description,
                        'thumbnail': thumbnail,
                        'area': data['area'],
                        'city': self.cities[cityId],
                        'tag': data['tag'],
                        'content': data['content'],
                        'reporttime': data['postDate'],
                        'timestamp': time.time(),
                        'lastupdate': datetime.datetime.now(),
                        'type': 'vip'
                    })
                else:
                    cprint('Already existed in database !!', 'yellow')
                    currentDuplicated += 1
        except:
            LOG.error(traceback.format_exc())
        finally:
            LOG.debug('finished getListVipNews(%s, %s)' % (catId, cityId))
            cprint('currentProcessed={0}, currentDuplicated={1}'.format(currentProcessed, currentDuplicated), 'green')
            return currentProcessed, currentDuplicated
    
    def process(self, cityId, catId):
        LOG.debug('call process(cityId={0}, catId={1})'.format(cityId, catId))
        for icat in self.categories[catId]['sub']:
            cprint('DEBUG: process {0} >> {1} : {2}'.format(self.categories[catId]['name'], icat[2], self.cities[cityId]), 'green')
            currentProcessed = 0
            currentDuplicated = 0
            for iPage in range(1, 3):
                print '-------------------------'
                cprint('PAGE: {0} -- {1} -- {2}'.format(iPage, self.cities[cityId], icat[2]), 'yellow')
                print '-------------------------'
                if currentProcessed > MAX_PROCESS:
                    LOG.info('Dừng crawler chuyên mục {0}, thành phố {1} do vượt quá giới hạn {2}'.format(self.categories[catId]['name'], self.cities[cityId], MAX_PROCESS))
                    break
                if currentDuplicated > MAX_DUPLICATE:
                    LOG.info('Dừng crawler chuyên mục {0}, thành phố {1} do trùng tin vượt quá giới hạn {2}'.format(self.categories[catId]['name'], self.cities[cityId], MAX_DUPLICATE))
                    break
                url = self.rootUrl.format(cityId, icat[0], icat[1], iPage)
                currentProcessed, currentDuplicated = self.getListJobs(cityId, catId, icat[2], url, currentProcessed, currentDuplicated)
                if iPage <=4:
                    currentProcessed, currentDuplicated = self.getListVipNews(catId, cityId, icat[2], currentProcessed, currentDuplicated)
        LOG.debug('finished process(cityId={0}, catId={1})'.format(cityId, catId))
        return

class Task(workerpool.Job):
    '''su dung cho viec put mot job cho workerpool'''
    def __init__(self, catId, cityId, callback):
        self.catId = catId
        self.cityId = cityId
        self.callback = callback
        
    def run(self):
        self.callback(self.cityId, self.catId)

def forceQuit():
    pid = os.getpid()
    os._exit(1)
    os.kill(pid, 9) # kill -9 pid tren linux

def quitIfTimeout():
    print 'call quitIfTimeout'
    while True:
        delta = time.time() - lastaction
        pid = os.getpid()
        try:
            if delta > 900:
                print 'process timeout {0}'.format(delta)
                print 'kill process {0}'.format(pid)
                os.system("kill -9 {0}".format(pid))
        except:
            print 'ERROR: could not kill python process with pid={0}'.format(pid)
        time.sleep(5)

if __name__ == '__main__':
    lastaction = time.time()
    import threading
    import argparse
    # -- Phần argument command line
    parser = argparse.ArgumentParser(description="Crawler tin tức")
    parser.add_argument('--run', action='store', dest='run_mode',
                    help='sftp: chạy dưới local và upload file lên server thông qua sftp', metavar='sftp')
    args = parser.parse_args()
    # -----------------------------
    # -- Phần khởi tạo kết nối ssh tới server mana.vn
    run_mode = args.run_mode
    ssh = None
    if run_mode == 'sftp':
        ssh = commonlib.getSSH('mana.vn', 'giangnh')
        if ssh == None:
            print 'Không kết nối được tới server, thoát crawler'
            forceQuit()
    # -----------------------------
    threading.Thread(target=quitIfTimeout).start()
    LOG.info('start crawler muaban.net')
    crawler = Crawler(MONGO_SERVER, MONGO_PORT)
    try:
        pool = workerpool.WorkerPool(size=5)
        for icity in ['ha-noi', 'ho-chi-minh']:
            for icat in crawler.categories.keys():
                pool.put(Task(icat, icity, crawler.process))
        pool.shutdown(); pool.wait() 
    except:
        LOG.error(traceback.format_exc())
    if ssh is not None: ssh.close()
    LOG.info('finished crawler muaban.net')
    forceQuit()
