# -*- coding: utf-8 -*-
import workerpool
import os, re, datetime, time
import commonlib, traceback
from lxml import etree
from MongoModel import MongoModel
from MongoDbLog import MongoDbLog


LOG = MongoDbLog('muaban.net', 'debug')
DOWNLOAD_PATH = '/home/hoangnamhai/HarvestedData/muaban/'
MAX_PROCESS = 100
MAX_DUPLICATE = 15
DATABASE_NAME = 'muaban_v2'
PREFIX = '/uploads/muaban/'
MONGO_SERVER = '27.0.12.106'
MONGO_PORT = 27017

class Task(workerpool.Job):
    def __init__(self, catId, city, func):
        self.catId = catId
        self.city = city
        self.func = func
        
    def run(self):
        self.func(self.catId, self.city)
    
class MuaBan(MongoModel):
    __cities = {'ha-noi': 'Hà Nội', 'da-nang': 'Đà Nẵng', 'ho-chi-minh': 'TP.HCM',
        'hai-phong': 'Hải Phòng', 'binh-dinh': 'Bình Định', 'can-tho': 'Cần Thơ',
        'bac-giang': 'Bắc Giang', 'binh-phuoc': 'Bình Phước', 'an-giang': 'An Giang',
        'bac-kan': 'Bắc Kạn', 'binh-thuan': 'Bình Thuận', 'ba-ria-vung-tau': 'Bà Rịa - Vũng Tàu',
        'bac-ninh': 'Bắc Ninh', 'dak-lak': 'Đắk Lắk', 'bac-lieu': 'Bạc Liêu',
        'cao-bang': 'Cao Bằng', 'dak-nong': 'Đắk Nông', 'ben-tre': 'Bến Tre',
        'dien-bien': 'Điện Biên', 'gia-lai': 'Gia Lai', 'binh-duong': 'Bình Dương',
        'ha-giang': 'Hà Giang', 'ha-tinh': 'Hà Tĩnh', 'ca-mau': 'Cà Mau',
        'ha-nam': 'Hà Nam', 'khanh-hoa': 'Khánh Hòa', 'dong-nai': 'Đồng Nai',
        'hai-duong': 'Hải Dương', 'kon-tum': 'Kon Tum', 'dong-thap': 'Đồng Tháp',
        'hoa-binh': 'Hòa Bình', 'lam-dong': 'Lâm Đồng', 'hau-giang': 'Hậu Giang',
        'hung-yen': 'Hưng Yên', 'nghe-an': 'Nghệ An', 'kien-giang': 'Kiên Giang',
        'lai-chau': 'Lai Châu', 'ninh-thuan': 'Ninh Thuận', 'long-an': 'Long An',
        'lang-son': 'Lạng Sơn', 'phu-yen': 'Phú Yên', 'soc-trang': 'Sóc Trăng',
        'lao-cai': 'Lào Cai', 'quang-binh': 'Quảng Bình', 'tay-ninh': 'Tây Ninh',
        'nam-dinh': 'Nam Định', 'quang-nam': 'Quảng Nam', 'tien-giang': 'Tiền Giang',
        'ninh-binh': 'Ninh Bình', 'quang-ngai': 'Quảng Ngãi', 'tra-vinh': 'Trà Vinh',
        'phu-tho': 'Phú Thọ', 'quang-tri': 'Quảng Trị', 'vinh-long': 'Vĩnh Long',
        'quang-ninh': 'Quảng Ninh', 'thua-thien-hue': 'Thừa Thiên Huế',
        'son-la': 'Sơn La', 'thai-binh': 'Thái Bình', 'thai-nguyen': 'Thái Nguyên',
        'thanh-hoa': 'Thanh Hóa', 'tuyen-quang': 'Tuyên Quang', 'vinh-phuc': 'Vĩnh Phúc',
        'yen-bai': 'Yên Bái'
        }
    
    __cities5Top = {'ha-noi': 'Hà Nội', 'da-nang': 'Đà Nẵng', 
                    'ho-chi-minh': 'TP.HCM', 'hai-phong': 'Hải Phòng', 
                    'can-tho': 'Cần Thơ'}
    
    categories = {'viec-lam': [2, 'Việc làm'],
        'tuyen-sinh-dao-tao': [32, 'Tuyển sinh - Đào tạo'],
        'bat-dong-san': [33, 'Bất động sản'],
        'o-to': [34, 'Ô tô'],
        'xe-may': [35, 'Xe máy'],
        'dien-tu-dien-may': [36, 'Điện tử - Điện máy'],
        'do-dung-mat-hang-khac': [37, 'Đồ dùng - Mặt hàng khác'],
        'dich-vu': [39, 'Dịch vụ'],
        'cong-dong': [634, 'Cộng đồng']
        }
    
    __url = 'http://muaban.net/{city}/raovat/{id}/{cat}.html'
    
    def __init__(self, host='localhost', port=27017):
        MongoModel.__init__(self, host, port)
    
    def standardizeTimeString(self, timeStr):
        result = ''
        try:
            preg = re.compile(r'(\d+)/(\d+)/(\d+)')
            m = preg.search(timeStr)
            if m != '':
                result = datetime.datetime(int(m.group(3)), int(m.group(2)), int(m.group(1)))
        except:
            LOG.error(traceback.format_exc())
        finally:
            return result
    
    def getNewsDetail(self, url, catId):
        LOG.debug('start getNewsDetail(%s)' % url)
        area, reportTime, tag = '', '', ''
        data = []
        try:
            tree = commonlib.getXMLTree(url)
            if tree == '': return '', '', '', ''
            tag = commonlib.getElementText(tree.xpath("/html/body/form/div[8]/div/div/a[contains(@href, '{0}')]/following-sibling::*[2]".format(catId))).strip()
            tag = commonlib.replaceStr(r'\(\d+\).*$', '', tag).strip()
            for content in tree.xpath("//table[@id='ctl00_pC_DV_tableHeader']/.."):
                reportTime = content.xpath(".//td/b[contains(., 'Ngày đăng')]/../following-sibling::*[2]".decode('utf-8'))
                reportTime = self.standardizeTimeString(commonlib.getElementText(reportTime))
                area = content.xpath(".//td/b[contains(., 'Khu vực')]/../following-sibling::*[2]".decode('utf-8'))
                area = commonlib.getElementText(area)
                if re.search(r'.+»(.+)', area):
                    area = commonlib.extractWithRegEx(r'.+»(.+)', area, 1, '').strip()
                nd = ''
                ndNode = content.xpath(".//div[@class='content']")
                if len(ndNode) > 0:
                    commonlib.cleanElementWithTag(ndNode[0], ['script'])
                    nd = etree.tostring(ndNode[0], method='text', encoding='utf-8')
                    nd = commonlib.replaceStr(r"\t|", '', nd).strip()
                    for line in nd.splitlines():
                        text = line.strip() 
                        if text != '': data.append({'type': 'text', 'data': text})
            if len(tree.xpath("//*[@id='slideshow']")) > 0:
                for imgNode in tree.xpath("//*[@id='slideshow']//img"):
                    img = commonlib.getAttribText(imgNode, 'src')
                    img = commonlib.urlJoin(url, img) if img != '' else ''
                    if img != '':
                        dImg = commonlib.downloadImage(img, DOWNLOAD_PATH, PREFIX)
                        if dImg != '':
                            data.append({'type': 'image', 'data': dImg})
                            print('DEBUG: saved image (%s) to (%s)' % (img, dImg))
        except:
            LOG.error(traceback.format_exc())
        LOG.debug('finished getNewsDetail(%s)' % url)
        return data, area, reportTime, tag
            
    def getListNews(self, catId, city):
        LOG.debug('start getListNews(%s, %s)' % (catId, city))
        url = self.__url.format(city=city, cat=catId, id=self.categories[catId][0])
        db = self.connection[DATABASE_NAME]
        collection = db[catId]
        currentProcessed = 0
        currentDuplicated = 0
        try:
            while url != '' and currentProcessed < MAX_PROCESS and currentDuplicated < MAX_DUPLICATE:
                tree = commonlib.getXMLTree(url)
                if tree == '' or tree == None: return
                for item in tree.xpath("//ul[@class='list_4']/li/div[@class='w600']"):
                    if currentProcessed > MAX_PROCESS or currentDuplicated > MAX_DUPLICATE:
                        if currentDuplicated > MAX_DUPLICATE: LOG.info('so luong tin trung vuot qua nguong {0} cho phep'.format(MAX_DUPLICATE))
                        if currentProcessed > MAX_PROCESS: LOG.info('so luong tin vuot qua nguong {0} cho phep'.format(MAX_PROCESS))  
                        return
                    title = commonlib.getElementText(item.xpath("./a/b"))
                    link = commonlib.getAttribText(item.xpath("./a"), 'href')
                    link = commonlib.urlJoin(url, link) if link != '' else ''
                    _id = commonlib.getCRC32Unsign(title+link)
                    if not collection.find_one({'_id': _id}):
                        description = commonlib.getElementText(item.xpath(".//div[@class='sum']"), descendant=1)
                        price = commonlib.getElementText(item.xpath(".//div[@class='item_price']"))
                        thumbnail = commonlib.getAttribText(item.xpath(".//div[@class='sum']/img"), 'src')
                        thumbnail = commonlib.urlJoin(url, thumbnail) if thumbnail != '/res/images/icons/noimage.gif' and thumbnail != '' else ''
                        dThumbnail = ''
                        if thumbnail != '':
                            dThumbnail = commonlib.downloadImage(thumbnail, DOWNLOAD_PATH, PREFIX)
                        content, area, reportTime, tag = self.getNewsDetail(link, catId)
                        tag = tag if tag != '' else self.categories[catId][1]
                        print('Tagging: %s' % tag)
                        if len(content) > 0:
                            data = {'_id': _id,
                                'title': title,
                                'link': link,
                                'price': price,
                                'description': description,
                                'downloadedThumbnail': dThumbnail,
                                'thumbnail': thumbnail,
                                'area': area,
                                'city': self.__cities[city],
                                'tag': tag,
                                'content': content,
                                'reporttime': reportTime,
                                'timestamp': time.time(),
                                'lastupdate': datetime.datetime.now(),
                                'type': 'normal'
                                }
                            collection.save(data)
                            currentProcessed += 1
                            # PRINT DE TEST
                            self.print2Test({'title': title, 'link': link, 'area': area, 'content': content, 'thumbnail': thumbnail, 'price': price})
                    else:
                        LOG.info('already exist in database')
                        currentDuplicated += 1
                pNode = tree.xpath("//div[@id='pagingTop']//a[@class='lnkpageactive']/../following-sibling::*[1]")
                url = commonlib.urlJoin(url, commonlib.getAttribText(pNode, 'href')) if len(pNode) > 0 else ''
        except:
            LOG.error(traceback.format_exc())
        finally:
            LOG.info('currentProcessed: {0}, currentDuplicated: {1}'.format(currentProcessed, currentDuplicated))
            LOG.debug('finished getListNews(%s, %s)' % (catId, city))
            return ''
        
        
    def getListVipNews(self, catId, city):
        LOG.debug('start getListVipNews(%s, %s)' % (catId, city))
        url = "http://muaban.net/{city}/raovat/{id}/{cat}/topvip.html?{nocache}".format(city=city, id=self.categories[catId][0], 
                                                                                        cat=catId, nocache=time.time())
        try:
            html = commonlib.extractWithRegEx(r"\.html\('(.+)'\)$", commonlib.getHTML(url), 1)
            tree = commonlib.buildTreeFromHTML(html)
            db = self.connection[DATABASE_NAME]
            collection = db[catId]
            for item in tree.xpath("//ul[@class='list_4']/li/div[@class='w600']"):
                title = commonlib.getElementText(item.xpath("./a/b"))
                link = commonlib.getAttribText(item.xpath("./a"), 'href')
                link = commonlib.urlJoin(url, link) if link != '' else ''
                _id = commonlib.getCRC32Unsign(title+link)
                if not collection.find_one({'_id': _id}):
                    description = commonlib.getElementText(item.xpath(".//div[@class='sum']"), descendant=1)
                    price = commonlib.getElementText(item.xpath(".//div[@class='item_price']"))
                    thumbnail = commonlib.getAttribText(item.xpath(".//div[@class='sum']/img"), 'src')
                    thumbnail = commonlib.urlJoin(url, thumbnail) if thumbnail != '/res/images/icons/noimage.gif' and thumbnail != '' else ''
                    dThumbnail = ''
                    if thumbnail != '':
                        dThumbnail = commonlib.downloadImage(thumbnail, DOWNLOAD_PATH, PREFIX)
                    content, area, reportTime, tag = self.getNewsDetail(link, catId)
                    tag = tag if tag != '' else self.categories[catId][1]
                    print('Tagging: %s' % tag)
                    if len(content) > 0:
                        data = {'_id': _id,
                            'title': title,
                            'link': link,
                            'price': price,
                            'description': description,
                            'downloadedThumbnail': dThumbnail,
                            'thumbnail': thumbnail,
                            'area': area,
                            'city': self.__cities[city],
                            'tag': tag,
                            'content': content,
                            'reporttime': reportTime,
                            'timestamp': time.time(),
                            'lastupdate': datetime.datetime.now(),
                            'type': 'vip'
                            }
                        collection.save(data)
                        # PRINT DE TEST
                        self.print2Test({'title': title, 'link': link, 'area': area, 'content': content, 'thumbnail': thumbnail, 'price': price})
                else:
                    LOG.info('already exist in database')    
        except:
            LOG.error(traceback.format_exc())
        finally:
            LOG.debug('finished getListVipNews(%s, %s)' % (catId, city))
            return ''
        
    def print2Test(self, args):
        title, link, price, thumbnail, area, content = '', '', '', '', '', ''
        for i, v in args.items():
            if commonlib.dataTypeName(v) == 'str':
                exec("%s = '%s'" % (i, v))
            else:
                exec("%s = %s" % (i, v))
        print('Title: %s' % title)
        print('Link: %s' % link)
        if price != '': print('Price: %s' % price)
        if thumbnail != '': print('Thumbnail: %s' % thumbnail)
        print('Area: %s' % area)
        print('--------------------------------------------')
        print('Content:')
        for i in content:
            print i['data']
        print('--------------------------------------------')
        
    def process(self):
        LOG.debug('start process')
        pool = workerpool.WorkerPool(size=5)
        for citiId in self.__cities5Top.keys():             # test voi 5 thanh pho lon
#        for citiId in self.__cities.keys():
            for catId in self.categories.keys():
                pool.put(Task(catId, citiId, self.getListVipNews))
                pool.put(Task(catId, citiId, self.getListNews))
            break
        pool.shutdown()
        pool.wait()
        LOG.debug('finished process')
    
if __name__ == '__main__':
    LOG.info('start crawler muaban.net')
    mb = MuaBan(MONGO_SERVER, MONGO_PORT)
#    mb = MuaBan()
#    mb.getNewsDetail('http://muaban.net/ha-noi/raovat/chi-tiet/8284261/daewoo-lacetti-ban-lacetti-2009-rao-ho-ong-anh-dang-dinh-doi-len-civic-xe-da-do-am-th.html', 'o-to')
#    mb.getListNews('viec-lam', 'ha-noi')
    mb.process()
    LOG.info('finished crawler muaban.net')
    pid = os.getpid()
    os._exit(1)
    os.kill(pid, 9)
    