'''
Created on Feb 10, 2014

@author: eyaomai
'''
import threading
import Queue
import datetime
import time
import urllib2
from bs4 import BeautifulSoup
import re
import logging
import traceback
class Logging(object):
    
    LOGGER = logging.getLogger()

class FinanceJrj(threading.Thread):
    '''
    classdocs
    '''


    def __init__(self, conf):
        '''
        Constructor
        '''
        logconfig = conf['logconfig']
        import logging.config
        logging.config.fileConfig(logconfig)
        Logging.LOGGER = logging.getLogger('jrj')
        
        self.__beginDate = conf['beginDate']
        self.__endDate = conf['endDate']
        self.__threadPoolSize = conf['threadPoolSize']
        self.__dbHost = conf['dbHost']
        self.__dbPort = conf['dbPort']
        self.__dbUser = conf['dbUser']
        self.__dbPasswd = conf['dbPasswd']
        self.__dbName = conf['dbName']
        self.__dbProxy = self.__createMySqlProxy()
        self.__threads = list()
        self.__crawlerProxyList = list()
        self.__shutdown = False
        self.__pidfile = conf['pidfile']
        super(FinanceJrj, self).__init__()
    
    def shutdown(self):
        self.__dbProxy.close()

        for thread in self.__threads:
            thread.shutdown()
        
        for thread in self.__threads:
            thread.join()
        
        for proxy in self.__crawlerProxyList:            
            proxy.close() 
            
        self.__threads = list()
        self.__crawlerProxyList = list()
        self.__shutdown = True
            
    def crawl(self):
        Logging.LOGGER.info('START CRAWLING for range %s to %s', self.__beginDate, self.__endDate)
        queue = Queue.Queue()
        self.__fetchTaskFromDB(queue)
        Logging.LOGGER.debug('Queue size is %d for startup', queue.qsize())
        
        for i in range(0,self.__threadPoolSize):
            proxy = self.__createMySqlProxy()
            self.__crawlerProxyList.append(proxy)
            cthread = Crawler(queue, proxy)
            self.__threads.append(cthread)
            cthread.start()
        
        self.start()
        
    def __generateTasks(self, queue, currentDate=None):
        if currentDate is None:
            currentDate = self.__beginDate
        else:
            currentDate += datetime.timedelta(1)
        
        urlList = set()
        while currentDate<self.__endDate:
            url = 'http://finance.jrj.com.cn/xwk/'+generateDateUrlPortion(currentDate)+'_1.shtml'
            urlList.add(url)
            currentDate+=datetime.timedelta(1)
        if len(urlList) ==0:
            return
        
        #check existence
        ulist_str = '","'.join(urlList)
        sql = 'select url from jrj_pages where url in ("%s")'%ulist_str
        count = self.__dbProxy.execute(sql)
        if count>0:
            results = self.__dbProxy.cur.fetchall()
            for result in results:
                urlList.remove(result[0])
        
        if len(urlList) ==0:
            return
        #compose insert statement
        sql = 'insert into jrj_pages (url,issue_date,status) values '
        values=list()
        for url in urlList:
            issue_date_str = retrieveDateStrFromUrl(url)
            values.append('("%s","%s", "%s")' % (url.strip(),issue_date_str,'NEW'))
        
        sql += ','.join(values)
        
        #execute insert
        if self.__dbProxy.execute(sql) > 0:
            self.__dbProxy.commit()
            #insert to queue
            for url in urlList:
                queue.put_nowait(Task(url,1))

    def __fetchTaskFromDB(self, queue):
        sql = 'select max(issue_date) from jrj_pages'
        self.__dbProxy.execute(sql)
        currentDate = self.__dbProxy.cur.fetchone()[0]
        Logging.LOGGER.info('Max issue_date in pages table is %s', currentDate)
        self.__generateTasks(queue, currentDate)
        if currentDate is None:
            return
        
        #fetch from pages
        sql = 'select url from jrj_pages where status="NEW"'
        count = self.__dbProxy.execute(sql)
        Logging.LOGGER.info('%d not-yet crawled url found in pages table', count)
        if count > 0:
            results = self.__dbProxy.cur.fetchall()
            for result in results:
                url = result[0]
                queue.put_nowait(Task(url,1))
        
        #fetch from newscontent
        sql = 'select url from jrj_newscontent where status="NEW"'
        count = self.__dbProxy.execute(sql)
        Logging.LOGGER.info('%d not-yet crawled url found in newscontent table', count)
        if count > 0:
            results = self.__dbProxy.cur.fetchall()
            for result in results:
                url = result[0]
                queue.put_nowait(Task(url,2))
        
    def __createMySqlProxy(self):
        return MySqlProxy(self.__dbHost, self.__dbPort, self.__dbUser, self.__dbPasswd, self.__dbName)
    
    def run(self):
        nsql = 'select count(*) from jrj_newscontent where status="NEW"'
        psql = 'select count(*) from jrj_pages where status="NEW"'
        while self.__shutdown is False:
            time.sleep(30)
            if not isPidFileExist(self.__pidfile):
                Logging.LOGGER.info('Stop externally')
                break
            self.__dbProxy.execute(psql)
            count = self.__dbProxy.cur.fetchone()[0]
            if count>0:
                continue
            self.__dbProxy.execute(nsql)
            count = self.__dbProxy.cur.fetchone()[0]
            if count>0:
                continue
            Logging.LOGGER.info('No NEW items found in tables, just stop')
            break
        self.shutdown()    
            
class Crawler(threading.Thread):
    def __init__(self, queue, mysqlProxy):
        super(Crawler, self).__init__()
        self.__queue = queue
        self.__mysqlProxy = mysqlProxy
        self.__shutDown = False
        self.__stat = [0, 0]
    
    def shutdown(self):
        self.__shutDown = True
        
    def run(self):
        Logging.LOGGER.info('%s: Thread is started', self.getName())
        while self.__shutDown is False:
            time.sleep(5)
            if self.__queue.empty():
                continue
            task = None
            try:
                task = self.__queue.get_nowait()
                if task is None:
                    continue
                self.__queue.task_done()
                if task.ctype not in [1,2]:
                    Logging.LOGGER.error('Unknown crawling type %d found for url: %s', task.ctype, task.url)
                    continue
                Logging.LOGGER.debug('%s begin to handle task (type=%d): %s',self.getName(), task.ctype, task.url)
                result = None
                try:
                    if task.ctype == 1:
                        result = self.__crawlPage(task.url)
                    else:
                        result = self.__crawlNews(task.url)
                except Exception, e:
                    type_, value_, traceback_ = sys.exc_info()
                    ex = traceback.format_exception(type_, value_, traceback_)
                    Logging.LOGGER.error(ex)
                    result = ('ERROR', str(e), '')
                    
                if result is not None:
                    self.__updateResult(task.url, task.ctype, result)
                    self.__stat[task.ctype-1]+=1
            except Queue.Empty:
                continue
        Logging.LOGGER.info('%s Finish crawling %d page links and %d news links', self.getName(), self.__stat[0], self.__stat[1])
        self.__mysqlProxy.close()
    
    def __updateResult(self, url, ctype, result):
        sql = ''
        if ctype == 1:
            sql = 'update jrj_pages set status="%s", error_str = "%s" where url="%s"' % (result[0], result[1].replace('"','\\"').strip(), url)                
        else:
            sql = 'update jrj_newscontent set status="%s", content="%s", error_str="%s" where url="%s"' % (result[0],result[2].replace('"','\\"').strip(),result[1].replace('"','\\"').strip(),url)
                
        if self.__mysqlProxy.execute(sql) > 0:
            self.__mysqlProxy.commit()
        else:
            if ctype == 1:
                sql = 'update jrj_pages set status="%s", error_str = "%s" where url="%s"' % ('ERROR', 'Error in the generated sql string, pls refer to log', url)
            else:
                sql = 'update jrj_newscontent set status="%s", error_str = "%s" where url="%s"' % ('ERROR', 'Error in the generated sql string, pls refer to log', url)
            if self.__mysqlProxy.execute(sql) > 0:
                self.__mysqlProxy.commit()
        
    def __crawlPage(self, url):
        content, warning_str = self.__fetchContent(url)
        soup = BeautifulSoup(content)
        
        #Fetch the news url
        ullist = soup.findAll('ul',{'class':'list'})
        if len(ullist)!=1:
            Logging.LOGGER.error('url:%s: No <ul class="list" .../> found', url)
            return ('ERROR','No <ul class="list" .../> found')
        lilist = ullist[0].findChildren('li')
        if len(lilist)==0:
            Logging.LOGGER.error('url:%s: No <li> element under <ul class="list" .../> found', url)
            return ('ERROR','No <li .../> element under <ul class="list" .../> found')

        newsdict = dict()
        for li in lilist:
            ahref = li.findChildren('a')
            if len(ahref) == 0:
                continue
            href = ahref[1]['href'].strip()
            title = ahref[1].text.strip()
            span = li.findChildren('span')
            if len(span) == 0:
                continue
            timestring = span[0].text.strip()
            newsdict[href]=(title,timestring)

        #check existence
        if len(newsdict)==0:
            Logging.LOGGER.error('url:%s: <li .../> element does not contain expected attributes or content', url)
            return ('ERROR','<li .../> element does not contain expected attributes or content')
        beforeCount = len(newsdict)
        newsurl_str = '","'.join(newsdict)
        sql = 'select url from jrj_newscontent where url in ("%s")'%newsurl_str
        count = self.__mysqlProxy.execute(sql)
        if count>0:
            results = self.__mysqlProxy.cur.fetchall()
            for result in results:
                del newsdict[result[0]]
        afterCount = len(newsdict)
        Logging.LOGGER.debug('url:%s: Found %d news links and %d are new', url, beforeCount, afterCount)
        
        if len(newsdict)>0:
            #compose insert statement
            sql = 'insert into jrj_newscontent (url, title, issue_datetime, status) values '
            values = list()
            for href in newsdict:
                values.append('("%s","%s", "%s", "%s")' % (href,newsdict[href][0].replace('"','\\"'),newsdict[href][1],'NEW'))
            sql += ','.join(values)
        
            #do insert to db
            if self.__mysqlProxy.execute(sql) >0:
                self.__mysqlProxy.commit()
                #insert task to queue
                for newsurl in newsdict:
                    self.__queue.put_nowait(Task(newsurl,2))
            else:
                return ('ERROR','Error in the generated sql string, pls refer to log')
        
        #Fetch page url
        if url.find('_1.shtml')<=0:
            return ('DONE','')
        page_newslib = soup.findAll('p',{'class':'page_newslib'})
        if len(page_newslib)==0:
            return ('DONE','')
        ahref = page_newslib[0].findChildren('a')
        if len(ahref)==0:
            Logging.LOGGER.warn('url:%s: a href is not found under newslib', url)
            return ('WARNING','<a href=...> not found under newslib')
        index = url.rfind('/')
        pageurls = set()
        for href in ahref:
            if href.has_attr('href'):
                if href['href'] not in url:
                    newurl = url[:index+1]+href['href']
                    pageurls.add(newurl.strip())
        beforeCount = len(pageurls)
        #check existence
        if len(pageurls) == 0:
            Logging.LOGGER.warn('url:%s No href attribute found for all page link', url)
            return ('WARNING','No href attribute found for all page link')
        ulist_str = '","'.join(pageurls)
        sql = 'select url from jrj_pages where url in ("%s")'%ulist_str
        count = self.__mysqlProxy.execute(sql)
        if count>0:
            results = self.__mysqlProxy.cur.fetchall()
            for result in results:
                pageurls.remove(result[0])
        afterCount = len(pageurls)
        Logging.LOGGER.debug('url:%s: Found %d more-pages links and %d are new', url, beforeCount, afterCount)
        
        if len(pageurls) == 0:
            return ('DONE','')
        #compose insert statement
        sql = 'insert into jrj_pages (url,issue_date,status) values '
        values=list()
        for pageurl in pageurls:
            issue_date_str = retrieveDateStrFromUrl(pageurl)
            values.append('("%s","%s", "%s")' % (pageurl,issue_date_str,'NEW'))
        
        sql += ','.join(values)
        
        #execute insert
        if self.__mysqlProxy.execute(sql) >0:
            self.__mysqlProxy.commit()
            #insert to queue
            for pageurl in pageurls:
                self.__queue.put_nowait(Task(pageurl,1))
            return ('DONE','')
        else:
            return ('ERROR','Error in the generated sql string, pls refer to log')

    def __removeTags(self, soup, tag, arg=None):
        tags = soup.findAll(tag, arg)
        if len(tags)>0:
            for tag in tags:
                tag.extract()
                
    def __crawlNews(self, url):
        content, warning_str = self.__fetchContent(url)
        if warning_str is None:
            warning_str = ''
        soup = BeautifulSoup(content)
        a_full = soup.findAll('a',{'class':'all'})
        if len(a_full)>0:
            #TODO, sleep
            content = self.__fetchContent(url[:url.rfind('/')+1]+a_full[0]['href'])
            soup = BeautifulSoup(content)
        div_main = soup.findAll('div',{'class':'textmain tmf14 jrj-clear'})
        if len(div_main)<=0:
            div_main = soup.findAll('div',{'class':'newsCon'})
            if len(div_main)<=0:
                Logging.LOGGER.error('url:%s:NO news main body is found', url)
                return ('ERROR','NO news main body is found.'+warning_str,'')
        
        #remove topnews
        self.__removeTags(div_main[0], 'div',{'class':'hd'})
        
        #remove script
        self.__removeTags(div_main[0],'script')
        
        #remove style
        self.__removeTags(div_main[0],'style')
        
        #remove none display item
        self.__removeTags(div_main[0], 'div',{'style':'DISPLAY: none'})
        
        body=''
        ps = div_main[0].findAll('p')
        if len(ps)==0:
            brs = div_main[0].findAll('br')
            if len(brs) == 0:
                Logging.LOGGER.error('url:%s:<p> and <br> not found for text', url)
                return ('ERROR','<p> and <br> not found for text. '+warning_str,'')
            body = div_main[0].br.text
        else:
            '''
            #judge whether it is a well form html
            div_html = str(div_main[0])
            index = 0
            lindex = -1
            rindex = len(div_html)
            well_form = True
            while index>=len(div_html):                
                lindex = div_html[index:].find('<p>')
                rindex = div_html[index:].find('</p>')
                if lindex*rindex<0:
                    well_form = False
                    break
                if div_html[lindex+3:rindex].find('<p>')>0:
                    well_form = False
                    break
                if lindex<0:
                    break
                index = rindex
                
            if well_form:
                for p in ps:
                    body+=p.text
            else:
                warning_str = '<p> is not well-formed' + warning_str
                body = div_main[0].text
            '''
            body = div_main[0].p.text    
        if warning_str == '':
            return ('DONE','', body.strip())
        else:
            return ('WARNING',warning_str,body.strip())
    
    def __fetchContent(self, url):
        warning_str = None
        conn = urllib2.urlopen(url)
        html = conn.read()
        pattern="charset=.*?\""
        charset_groups = re.search(pattern, html)
        charset = 'utf-8'
        if charset_groups is not None:
            charset_str = charset_groups.group(0)
            charset = charset_str[charset_str.index('=')+1:len(charset_str)-1]
        else:
            warning_str = 'charset not found'
        
        ss = set(['gb2312','gbk','utf-8'])
        if warning_str is not None:
            ss.remove(charset)
        charset_list = list(ss)
        if warning_str is not None:
            charset_list.insert(0,charset)
        failure = False
        successCharset = ''
        content = html
        for charset_i in charset_list:
            try:
                content = unicode(html,charset_i)
                if failure:
                    successCharset = charset_i
                break
            except UnicodeDecodeError:
                if failure is False:
                    failure = True                
                continue    
        if failure:            
            if successCharset == '':
                content = unicode(html, charset, 'ignore')
                if warning_str:
                    warning_str +=' Can not decode'
                else:
                    warning_str = 'Charset was declared as %s with some character decode error ignored' % charset
            else:
                warning_str ='Charset was declared as %s but is actually %s' % (charset, successCharset)
        return content.encode('utf-8'), warning_str                
    
class Task(object):
    def __init__(self, url, ctype):
        self.url = url
        self.ctype = ctype

def generateDateUrlPortion(newsDate):
    year = str(newsDate.year)
    month = str(newsDate.month)
    if len(month)==1:
        month = '0'+month
    day = str(newsDate.day)
    if len(day)==1:
        day = '0'+day
    return year+month+'/'+year+month+day

def retrieveDateStrFromUrl(url):
    lindex = url.rfind('/')
    rindex = url.rfind('_')
    if lindex<0 or rindex<0 or lindex>=rindex:
        Logging.LOGGER.error('Can not find date string in url %s', url)
        return None
    return url[lindex+1:rindex]    

def retrieveDateFromUrl(url):
    dateStr = retrieveDateStrFromUrl(url)
    return translateDateStr(dateStr)

def installProxy(protocol, host, port):
    proxy=urllib2.ProxyHandler({protocol:host+":"+port})
    opener = urllib2.build_opener(proxy)
    urllib2.install_opener(opener)

def translateDateStr(dateStr):
    return datetime.date(int(dateStr[:4]),int(dateStr[4:6]),int(dateStr[6:8]))

import MySQLdb
class MySqlProxy(object):
    def __init__(self, host, port, user, passwd, db):
        self.conn = MySQLdb.Connect(host=host, user=user, passwd=passwd, db=db, charset='utf8', port=port)
        self.cur=self.conn.cursor()
    
    def execute(self, sql):
        try:
            return self.cur.execute(sql)
        except Exception, e:
            Logging.LOGGER.error('ERROR sql:%s; Exception is %s', sql, str(e))
            return -1
    
    def commit(self):
        self.conn.commit()
    
    def close(self):
        try:
            self.conn.close()
            self.cur.close()
        except Exception:
            pass

class Configuration(object):
    '''
    Configuration helper to read config in json format dict
    Pass the json_file when instantiate it and then invoke readConfig to get a dict with configuration
    '''
    def __init__(self, json_file):
        self.__json_file = json_file
    
    def readConfig(self):        
        with open(self.__json_file) as f:
            json_string = ''.join(f)
            json_string =json_string.replace("\r","").replace("\t","").replace("\n", "")
            import json
            return json.loads(json_string)

conf = dict()
'''
conf['beginDate'] = datetime.date(2014,2,12)
conf['endDate'] = datetime.date(2014,2,13)
conf['threadPoolSize'] = 5
conf['dbHost'] = 'localhost'
conf['dbPort'] = 3306
conf['dbUser'] = 'stockreport'
conf['dbPasswd'] = 'stockreport'
conf['dbName'] = 'news'
conf['logconfig'] = '/home/lensadmin/jrjcrawler/conf/logging.cfg'
conf['pidfile'] = '/home/lensadmin/jrjcrawler/log/.jrj.pid'
'''
fj = None

def terminate(signal, frame):
    print 'Shutdown the crawler ...'
    fj.shutdown()
    removePidFile(conf['pidfile'])

def writePid(filename, pid):
    pidfile = open(filename,'w')
    pidfile.write(str(pid))
    pidfile.flush()
    pidfile.close()

def readPid(filename):
    try:
        pidfile = file(filename,'r')
        pid = int(pidfile.read().strip())
        return pid
    except Exception:
        return None

def removePidFile(filename):
    os.remove(filename)

def isPidFileExist(filename):
    return os.path.exists(filename)
            
if __name__ == '__main__':
    import sys
    import os
    import signal
    arg_length = len(sys.argv)
    action = sys.argv[1]
    conffile = sys.argv[2]
    c = Configuration(conffile)
    conf = c.readConfig()
    conf['beginDate'] = translateDateStr(conf['beginDate'])
    conf['endDate'] = translateDateStr(conf['endDate'])
    #pid = readPid(conf['pidfile'])

    if action == 'start':
        if isPidFileExist(conf['pidfile']):
            print 'Previous process is on-going, please stop it firstly'
            sys.exit(1)
        signal.signal(signal.SIGTERM, terminate)  
        print 'Starting process... '
        pid = os.getpid()
        writePid(conf['pidfile'], pid)
        fj = FinanceJrj(conf)
        fj.crawl()
        sys.exit(0)        
    elif action == 'stop':
        if not isPidFileExist(conf['pidfile']):
            print 'Pid not found'
            sys.exit(1)
        removePidFile(conf['pidfile'])
        sys.exit(0)