# -*- coding: utf-8 -*-

import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '../'))
import commonlib
import re
import datetime
import time
import traceback
import workerpool
import mechanize
import pprint
import html2text_lopngoaingu as html2text
from pymongo.objectid import ObjectId
from MongoModel import MongoModel
from workerpool import WorkerPool
from lxml import etree

MONGO_SERVER = 'beta.mana.vn'
MONGO_PORT = 27017

logger = commonlib.getLogger('lopngoaingu')

class Crawler(MongoModel):
    
    url = 'http://lopngoaingu.com/english-GT/Grammar/index.php?view=17-1131'
    dbName = 'tracnghiemtienganh'
    fileRoot = 'http://lopngoaingu.com/english-GT/Grammar/'
    def __init__(self, host, port):
        MongoModel.__init__(self, host, port)
        self.bookId = self.getBookId('English grammar in use')

    def getBookId(self, name):
        db = self.connection[self.dbName]
        collection = db['book']
        row = collection.find_one({'name': name})
        if row:
            return ObjectId(row['_id'])
        return None
    
    def process(self, item):
        try:
            detail = self.getDetail(item['link'])
            if len(detail) == 0: return
            db = self.connection['tracnghiemtienganh']
            collection = db['chapter']
            collection.save({'hashUrl': item['hashUrl'],
                            'bookId': self.bookId,
                            'chapter': item['chapterId'],
                            'name': item['name'],
                            'data': detail,
                            'link': item['link']
                            })
        except:
            logger.error(traceback.format_exc())
    
    def getListChapter(self):
        try:
            db = self.connection['tracnghiemtienganh']
            collection = db['chapter']
            tree = commonlib.getXMLTree(self.url)
            if tree == None: return
            items = []
            for item in tree.xpath("//div[@id='gramma']//div[@class='gram_title']/a"):
                name = commonlib.getElementText(item)
                print 'name:{0}'.format(name)
                chapterId = int(commonlib.extractWithRegEx(r'Bài\s*(\d+)', name, 1))
                print 'chapterId: {0}'.format(chapterId)
                url = commonlib.urlJoin(self.url, commonlib.getAttribText(item, 'href'))
                hashUrl = commonlib.getMD5Hash(url)
                if collection.find_one({'hashUrl': hashUrl}): continue
                items.append({'chapterId': chapterId, 'name': name, 'hashUrl': hashUrl, 'link': url})
            pool = workerpool.WorkerPool(size=3)
            pool.map(self.process, items)
            pool.shutdown()
            pool.wait()
        except:
            logger.error(traceback.format_exc())
    
    def processDich(self, url):
        logger.debug('call processDich(url={0})'.format(url))
        m = re.search(r'var (dich\d+=\[\].+$)', commonlib.getHTML(url))
        dich = {}
        if m:
            js = m.group(1).replace('[]', '{}')
            js = re.sub(r'dich\d+', 'dich', js)
            for line in js.split(';'):
                exec(line)
        return dich
    
    def getDetail(self, url):
        logger.debug('call getDetail({0})'.format(url))
        try:
            tree = commonlib.getXMLTree(url)
            if tree == None: return
            dichUrl = tree.xpath("//script[contains(@src, 'file/baihoc/gramma/grammar_in_use')]/@src")
            dich = {}
            if len(dichUrl)>0: 
                dich = self.processDich(commonlib.urlJoin(self.fileRoot, dichUrl[0]))
            detailNode = tree.xpath("//div[@class='detail']")
            if len(detailNode) > 0: detailNode = detailNode[0]
            content = html2text.html2text(etree.tounicode(detailNode), self.fileRoot, dich=dich)
            pprint.pprint(content)
            db = self.connection['giang']
            collection = db['test']
            collection.save({'data': content, '_id': commonlib.getMD5Hash(str(content))})
            return content
        except:
            logger.error(traceback.format_exc())
            return []

if __name__ == '__main__':
    logger.info('start crawler lopngoaingu')
    cr = Crawler(MONGO_SERVER, MONGO_PORT)
    cr.getListChapter()
#    cr.getDetail('http://lopngoaingu.com/english-GT/Grammar/index.php?view=17-1131-1269')
    logger.info('finished crawler lopngoaingu')
    os._exit(1)
