#!/usr/bin/env python
import crawler.crawler
import crawler.utils
import concurrent.futures
import requests
import traceback
import psycopg2
import time
import json
import re
import sys
from bs4 import BeautifulSoup
from hashlib import md5
class TrendingTopic:
    def __init__(self):
        self.name = '' #Topic name 
        self.link = '' #Link address of topic main page
        self.num_reads = 0    #Amount of topic readings
    def __str__(self):
        return self.name + ' ' + str(self.num_reads) + ' ' + self.link + '\n'
def parseRankPage(html):
    """
    Extract topic names and number of readings from rank page. 
    """
    for script in BeautifulSoup(html, 'lxml').find_all('script'):
        if '"domid":"Pl_Discover_Pt6Rank__5"' in str(script):
            html = str(script)
            break
    html = scriptToHtml(html)
    bs = BeautifulSoup(html, 'lxml')
    lines = bs.find_all('li', class_ = 'pt_li S_line2')
    topics = dict()
    for line in lines:
        text_box = line.div.div.next_sibling.div
        if text_box.div.span.contents[0] == '推荐':
            continue
        tr = TrendingTopic()
        tr.name = text_box.div.a.contents[0][1:-1]
        tr.link = text_box.div.a['href'] + '&retcode=6102'
        subinfo_box = text_box.next_sibling
        tr.num_reads = strToInteger(subinfo_box.div.span.span.span.contents[0][:-1])
        topics[md5(tr.name.encode('utf-8')).hexdigest()] = tr
    #for tid, topic in topics.items():
    #    print(tid, str(topic))
    return topics
def parseTopicPage(html):
    """
    Extract the amount of readings.
    """
    num_box = None
    for script in BeautifulSoup(html, 'lxml').find_all('script'):
        script = str(script)
        if '"domid":"Pl_Core_T8CustomTriColumn__' in script:
            script = scriptToHtml(script)
            if script:
                num_box = BeautifulSoup(script, 'lxml').body.div.div.div.table.tbody.tr
                #print(num_box.prettify())
                break
    if num_box:
        return strToInteger(num_box.td.strong.contents[0])
    return 0 

def scriptToHtml(script):
    """
    Weibo API always encapsulate html document in script. That's
    why we have this function.
    """
    script = script.replace('\\t', '').replace('\\n', '').replace('\\r', '')
    mat = re.match(r'[\s\S]*(\{[\s\S]*\})[\s\S]*', script)
    if mat:
        json_html = json.loads(mat.group(1))
        if 'html' in json_html.keys():
            return json_html['html']
        else:
            print("html part is missing in this script")
            return None
    else:
        print('Input does not seem to be a line of javascript')
        print(script)
        return None
def strToInteger(string):
    if string[-1] == '万':
        return int(float(string[:-1])*10000)
    elif string[-1] == '亿':
        return int(float(string[:-1])*100000000)
    else:
        return int(string)

def rankSpider(page_num, cookies):
    """
    Crawl only one page.
    Return pairs of <Topic_id, Topic_readings>
    """
    link = 'http://d.weibo.com/100803?cfs=920&'\
            'Pl_Discover_Pt6Rank__5_filter=hothtlist_type=1&'\
            'Pl_Discover_Pt6Rank__5_page=' + str(page_num) +\
            '&retcode=6102'
    #print("Requesting: ",link)
    tries = 1
    max_tries = 4
    while True:
        try:
            ret = requests.get(link, cookies = cookies)
            topics = parseRankPage(ret.text)
            break
        except requests.RequestException:
            traceback.print_exc()
        except Exception:
            print("An error happend during parsing page %s. Page Details are:" % (page_num,))
            sys.stdout.flush()
            print(ret.text)
            sys.stdout.flush()
            if tries >= max_tries:
                print("Page-level recovery fails, run global level restore")
                return dict()
            tries += 1
            continue
    #for tid, topic in topics.items():
    #    try:
    #        print(tid, topic.num_reads)
    #        print(tid, topicSpider(topic.link, cookies))
    #    except Exception:
    #        traceback.print_exc()
    return topics
        
def topicSpider(link, cookies):
    """
    Crawl topic's main page and extract the amount of readings.
    """
    while True:
        try: 
            ret = requests.get(link, cookies = cookies)
            break
        except requests.RequestException:
            traceback.print_exc()
    return parseTopicPage(ret.text)

def cacDelta(lstprds, tprds):
    """
    Caculate the changes of topic readings after last sampling.
    Return pairs of <Topic_id, Topic_reading_changes> and update
    the last topic reading records.
    """
    return (dict(), dict())

def initdb():
    return psycopg2.connect("dbname=trending_topic_alpha user=atropos host='/var/run/postgresql/'") 

def closedb(db):
    db.close()


def ldittp(db_con):
    """
    Load saved topics last time.
    """
    cur = db_con.cursor()
    cur.execute("select * from initial_topics;")
    topics = dict()
    for topic in cur.fetchall():
        tr = TrendingTopic()
        tr.link = topic[1]
        tr.num_reads = topic[2]
        tr.name = topic[3]
        topics[topic[0]] = tr
    cur.close()
    return topics

def crtmsp():
    """
    Return hourly timestamp
    """
    ts = time.localtime()
    ts_str = str(ts.tm_year)
    if ts.tm_mon < 10:
        ts_str += '0' + str(ts.tm_mon)
    else:
        ts_str += str(ts.tm_mon)
    if ts.tm_mday < 10:
        ts_str += '0' + str(ts.tm_mday)
    else:
        ts_str += str(ts.tm_mday)
    if ts.tm_hour < 10:
        ts_str += '0' + str(ts.tm_hour)
    else:
        ts_str += str(ts.tm_hour)
    return ts_str


def dpDelta(delta, db_con):
    """
    Dump changes of topic reads to database
    """
    cur = db_con.cursor()
    ts = crtmsp()
    for tid, reads in delta.items():
        cur.execute("""insert into topic_reads (id, reads, timestamp)
                values (%s, %s, %s);""", (tid, reads, ts))
    cur.close()
    db_con.commit()

def dpIttps(ittps, db_con):
    """
    Dump initial topics set to database.
    """
    cur = db_con.cursor()
    cur.execute("delete from initial_topics;")
    db_con.commit()
    ts = crtmsp()
    for tid, topic in ittps.items():
        # Backup routine
        cur.execute("""insert into initial_topics_bk (id, link, reads, name, timestamp)
             values (%s, %s, %s, %s, %s);""", (tid, topic.link, topic.num_reads, topic.name, ts))
        cur.execute("""insert into initial_topics (id, link, reads, name)
             values (%s, %s, %s, %s);""", (tid, topic.link, topic.num_reads, topic.name))
    cur.close()
    db_con.commit()

def udTpstar(tpstar, cookies):
    """
    Update
    """
    topics = dict()
    keys = list(tpstar.keys())
    ksplit = [i for i in range(0, len(keys), 20)]
    ksplit.append(len(keys))
    with concurrent.futures.ThreadPoolExecutor() as executor:
        futures = dict()
        k = 0
        for i in ksplit[1:]:
            futures[executor.submit(gttpnds,tpstar, keys[k:i], cookies)] = k
            k = i
        for future in concurrent.futures.as_completed(futures):
            try:
                topics.update(future.result())
            except Exception:
                traceback.print_exc()
    return topics

def gttpnds(tpstar, keys, cookies):
    """
    Get topic reads.
    """
    topics = dict()
    for tid in keys:
        topics[tid] = tpstar[tid]
        topics[tid].num_reads = topicSpider(topics[tid].link, cookies)
    return topics 


if __name__ == '__main__':
    total_pages = 66
    max_tries = 3
    cookies = crawler.utils.loadCookies('/home/atropos/weibo-research/src/python/.cookies/weibo.com')
    # Try to load initial trending topics set.
    db_con = initdb()
    ittps = ldittp(db_con)
    tries = 1
    while tries <= max_tries:
        topics = dict()
        with concurrent.futures.ThreadPoolExecutor() as executor:
            pgn_futures = {executor.submit(rankSpider,page_num, cookies): page_num for page_num in range(1, total_pages+1)}
            for future in concurrent.futures.as_completed(pgn_futures):
                try:
                    topics.update(future.result())
                except Exception:
                    traceback.print_exc()
        if len(topics.keys()) == total_pages*15:
            print("Success")
            break
        print("Only get %s topics, Retrying..." % (len(topics.keys()),))
        tries += 1
    if tries > max_tries:
        # We should restart the program
        print("Global recovery fails, restarting")
        closedb(db_con)
        import subprocess
        subprocess.Popen('/home/atropos/weibo_research/src/shell/hour_task_alpha.sh')
        print("Restarted")
        sys.exit()
    tpidstar = set(ittps.keys()) - set(topics.keys())
    tpstar = {tpid: ittps[tpid] for tpid in tpidstar}
    tpstar = udTpstar(tpstar, cookies)

    # Caculate delta T*
    delta = dict()
    for tid, topic in tpstar.items():
        delta[tid] = topic.num_reads - ittps[tid].num_reads
        ittps[tid].num_reads = topic.num_reads

    # Cacaulate delta T
    for tid, topic in topics.items():
        if tid in ittps:
            delta[tid] = topic.num_reads - ittps[tid].num_reads
            ittps[tid].num_reads = topic.num_reads
        else:
            #New topic
            #Changed in 2016-09-11  
            delta[tid] = 0 #topic.num_reads
            ittps[tid] = topic
    # Dump delta and initial trending topic set to database

    dpDelta(delta, db_con)
    dpIttps(ittps, db_con)
    closedb(db_con)
