"""
Spide topic related weibo. We are only interested in weibo content.
"""
import requests
from bs4 import BeautifulSoup
from utils import UserInfo, PageConfigInfo, HtmlUtils, Util, \
        DBUtils, Repost, Post, __DEBUG__
import traceback

import pdb


class SpideTRWeibo:
    def __init__(self):
        pass
    def spide(self):
        """
        Return a list of weibo's. We hope we could get at least 
        num weibo's.
        """
        contents = []
        for cp in range(1, 4):
            for p in range(1, 7):
                while True:
                    try:
                        ret = requests.get(
                                "http://weibo.com/p/100808aa6061e1fbe38e6136ce2d60885e239c?" +
                                "pids=Pl_Third_App__11&current_page=" + str(cp) + 
                                "&page=" + str(p) +"&ajaxpagelet=1", headers = HtmlUtils.HEADERS)
                        html = HtmlUtils._scriptToHtml(ret.text)
                        contents.extend(self._parseWeibo(html))
                        break
                    except requests.RequestException:
                        print("Network error")
                    except:
                        traceback.print_exc()
                        break
        return contents
    def _parseWeibo(self, html):
        box = BeautifulSoup(html, 'lxml')
        contents = []
        for wrap_box in box.find('div', 'WB_feed').find_all('div', 'WB_cardwrap'):
            try:
                for text_box in wrap_box.find_all('div', 'WB_text'):
                    ltext = text_box.find('a', 'WB_text_opt')
                    if ltext:
                        print('Need to request long text')
                        while True:
                            data = ltext.attrs['action-data']
                            try:
                                ret = requests.get('http://www.weibo.com/p/aj/mblog/getlongtext?ajwvr=6&mid='
                                        + data[data.find('mid')+4:data.find('mid')+20], headers = HtmlUtils.HEADERS)
                                text_box = BeautifulSoup(ret.json()['data']['html'], 'lxml').body
                                break
                            except requests.RequestException:
                                print("Error while requesting long text of " + ltext.attrs['mid'])
                    text = ''
                    for content in text_box.contents:
                        content = str(content)
                        #The following is the filter rules
                        if '<a' in content:
                            if '网页链接' in content:
                                #bypass @user 
                                continue
                            else:
                                text += BeautifulSoup(content, 'lxml').a.get_text()
                        elif '<img' in content or '<div' in content or '<br' in content:
                            continue
                        else:
                            text += content.strip()
                    contents.append(text)
            except:
                print("Error in parsing weibo: " + wrap_box.content)
                traceback.print_exc()
        return contents

if __name__ == '__main__':
    spider = SpideTRWeibo()
    contents = spider.spide()
    for c in contents:
        print(c)
    print("Got " + str(len(contents)) + " weibo")
    








