import crawler.crawler
import crawler.utils
import requests
import json
from bs4 import BeautifulSoup
import re
import traceback
import psycopg2
import uuid
def getTrendingTopcis(page_num, cks):
    """
    Get a list trending topics on a single page
    """
    link = 'http://d.weibo.com/100803?cfs=920&'\
            'Pl_Discover_Pt6Rank__5_filter=hothtlist_type=1&'\
            'Pl_Discover_Pt6Rank__5_page=' + str(page_num) +\
            '#Pl_Discover_Pt6Rank__5&retcode=6102'
    #print("Requesting: ",link)
    ret = requests.get(link, cookies = cks)
    #ret.encoding = 'gb2312'
    #print(ret.text)
    #print('http-equiv="refresh"' in ret.text)
    #print(ret.text.find('http-equiv="refresh"'))

    if 'http-equiv="refresh"' in ret.text:
        bs = BeautifulSoup(ret.text, 'lxml')
        if bs.head.meta['http-equiv'] == 'refresh':
            link = bs.body.script.contents[0]
            http = link.find('http')
            retcode = link.find('retcode')
            link = link[http:retcode+12]
            #print(link)
            #print("Requesting: ",link)
            ret = requests.get(link, cookies = cks)
    return parser(ret.text, cks)

def parser(html, cks):
    """
    Parse a page of trending topics
    """
    #for script in bs:
    #    print('***********************')
    #    print(script.prettify())
    #    print('***********************')
    for script in BeautifulSoup(html, 'lxml').find_all('script'):
        if '"domid":"Pl_Discover_Pt6Rank__5"' in str(script):
            html = str(script)
            break
    html = scriptToHtml(html)
    bs = BeautifulSoup(html, 'lxml')
    lines = bs.find_all('li', class_ = 'pt_li S_line2')
    topics = dict()
    for line in lines:
        text_box = line.div.div.next_sibling.div
        tr = crawler.crawler.TrendingTopic()
        #print(text_box.prettify())
        #print(text_box.div.span.contents[0])
        rank = str(text_box.div.span.contents[0])
        if 'TOP' in rank:
            tr.rank = int(rank[3:])
        else:
            tr.rank = int(rank)
        tr.topic = text_box.div.a.contents[0]
        tr.topic_link = text_box.div.a['href']
        tr.description = text_box.div.next_sibling.contents[0]
        subinfo_box = text_box.next_sibling
        #print(subinfo_box.div.next_sibling.next_sibling.prettify())
        if subinfo_box.div.next_sibling.next_sibling:
            tr.topic_presenter = subinfo_box.div.next_sibling.next_sibling.span.span.a.contents[0]
            tr.topic_presenter_link = subinfo_box.div.next_sibling.next_sibling.span.span.a['href']
        tr.num_reads = subinfo_box.div.span.span.span.contents[0]
        #print(rank + ' ' + topic_name + ' ' + topic_link + ' ' + topic_presenter + ' ' + topic_presenter_link + ' ' + num_reads)
        link = tr.topic_link + '&retcode=6102'
        #print("Requesting: ",link)
        ret = requests.get(link, cookies = cks)
        try:
            # Incase parse error happens.
            tr = topicParser(ret.text, tr)
            topics[tr.rank] = tr
        except AttributeError:
            print('******************************')
            print(ret.text)
            traceback.print_exc()
            print('******************************')
    return topics
    
    
def topicParser(html, topic):
    """
    Parse the index page of a single trending topic
    """
    intro_box = None
    relates_box = None
    num_box = None
    script_id = 0x0
    for script in BeautifulSoup(html, 'lxml').find_all('script'):
        script = str(script)
        if script_id & 0x1 != 1 and '"domid":"Pl_Third_Inline__3"' in script:
            script = scriptToHtml(script)
            if script:
                intro_box = BeautifulSoup(script, 'lxml')
            script_id += 1
            #print(intro_box.prettify())
        elif script_id & 0x2 != 2 and '"domid":"Pl_Core_T8CustomTriColumn__' in script:
            script = scriptToHtml(script)
            if script:
                num_box = BeautifulSoup(script, 'lxml').body.div.div.div.table.tbody.tr
            script_id += 2
            #print(num_box.prettify())
        elif script_id & 0x4 != 4 and '"domid":"Pl_Core_T5MultiText__' in script:
            script = scriptToHtml(script)
            if script:
                relates_box = BeautifulSoup(script, 'lxml').find('ul')
            script_id += 4
            #print(relates_box.prettify())
    if intro_box:
        topic.description = intro_box.body.div.div.div.div.p.contents[1]
    if num_box:
        topic.num_readings = crawler.utils.strToInteger(num_box.td.strong.contents[0])
        topic.num_discussion = crawler.utils.strToInteger(num_box.td.next_sibling.strong.contents[0])
        topic.num_fans = crawler.utils.strToInteger(num_box.td.next_sibling.next_sibling.strong.contents[0])
    if relates_box:
        #print(num_reads, num_discussion, num_fans)
        for tag in relates_box.li.find_all('a'):
            topic.categories += str(tag.contents[2]).replace(' ', '') + ','
        topic.categories = topic.categories[:-1]
        #print('"' + topic_category[:-1] + '"')
        relates_box = relates_box.li.next_sibling.next_sibling
        if relates_box:
            for tag in relates_box.find_all('a'):
                topic.district+= str(tag.contents[2]).replace(' ', '') + ','
            topic.district= topic.district[:-1]
        #print('"' + topic_districts[:-1] + '"')
            relates_box = relates_box.next_sibling.next_sibling
            if relates_box:
                for tag in relates_box.find_all('a'):
                    topic.tags += str(tag.contents[2]).replace(' ', '') + ','
                topic.tags = topic.tags[:-1]
        #print('"' + topic_tags[:-1] + '"')
    return topic


    
def scriptToHtml(script):
    """
    Weibo API always encapsulate html document in script. That's
    why we have this function.
    """
    script = script.replace('\\t', '').replace('\\n', '').replace('\\r', '')
    mat = re.match(r'[\s\S]*(\{[\s\S]*\})[\s\S]*', script)
    if mat:
        json_html = json.loads(mat.group(1))
        if 'html' in json_html.keys():
            return json_html['html']
        else:
            return None
    else:
        print('Input does not seem to be a line of javascript')
        print(script)
        return None


        
if __name__ == '__main__':
    cookies = crawler.utils.loadCookies('/home/atropos/weibo-research/src/python/.cookies/weibo.com')
    rank = dict()
    for page_num in range(1, 5):
        while True:
            try:
                rank.update(getTrendingTopcis(page_num, cookies))
                break
            except requests.RequestException:
                traceback.print_exc()
    #for num in range(1, 30):
    #    rank[num] = crawler.crawler.TrendingTopic.randomTrtopic()
    db_conn = psycopg2.connect("dbname=trending_topic user=atropos host='/var/run/postgresql/'") 
    cur = db_conn.cursor()
    for topic in rank.values():
        cur.execute("select id from topics where topic = %s", (topic.topic,))
        topic_id = str(uuid.uuid4())
        res = cur.fetchone()
        if not res: 
            cur.execute("insert into topics (id, topic, " + \
                    "description, tags, presenter, presenter_link,"+ \
                    "categories, district) values (%s, %s, %s, %s, %s, %s, %s, %s)",
                    (topic_id, topic.topic, topic.description,
                            topic.tags, topic.topic_presenter, topic.topic_presenter_link,
                            topic.categories, topic.district))
        else:
            topic_id = res[0] 
        cur.execute("insert into topic_statis (id, rank, num_readings, num_discussion, num_fans," + \
                "timestamp) values (%s, %s, %s, %s, %s, %s)", (topic_id, topic.rank, topic.num_readings, 
                        topic.num_discussion, topic.num_fans, crawler.utils.getTimeStamp()))
        db_conn.commit()
    db_conn.close()


