#!/usr/bin/env python

import requests
import concurrent.futures
import traceback
import re
import time as builtin_time
from urllib.parse import unquote
from bs4 import BeautifulSoup
from utils import UserInfo, PageConfigInfo, HtmlUtils, Util, \
        DBUtils, Repost, Post, __DEBUG__

import pdb


"""
Crawl all of posts in the given time window of a given user 
"""
__DEBUG__ = True

class WeiboGenerator:
    def __init__(self, uid, pid, domain, start, end, db_con):
        """
        Initialize a user weibo generator with a specific user identifier
        and a time window, in which the wanted weibo falls. The time window
        is from 'start' to 'end' including 'end'
        """
        self.uid = uid
        self.page_id = pid
        self.domain = domain
        self.start = start
        self.end = end
        self.domid = ''
        self.stop_spider = False #Whether the spider should be stopped or not
        self.db_con = db_con
    def run(self):
        """
        Run the generator
        """
        reposts, posts = self._spide()
        self._persist(reposts, posts)
        return reposts

    def _spide(self):
        """
        Return an array of reposts.
        """
        reposts_bag, posts_bag = self._spideFirstPage()
        reposts = []
        posts = []
        page_number = 1
        if self.stop_spider:
            return reposts_bag, posts_bag
        while not self.stop_spider:
            reposts.extend(reposts_bag)
            posts.extend(posts_bag)
            page_number += 1;
            reposts_bag, posts_bag = self._spideOnePage(page_number)
            break
        return reposts, posts

    def _persist(self, reposts, posts):
        """
        Dump all of reposts.
        """
        DBUtils.dumpUserReposts(reposts, self.db_con)
        DBUtils.dumpUserPosts(posts, self.db_con)

    def _spideFirstPage(self):
        """
        Return an array of reposts in the first page.
        """
        #Crawl the first screen of the first page.
        builtin_time.sleep(Util.SLEEP_SECONDS)
        link = "http://www.weibo.com/p/" + str(self.page_id) +\
               "?profile_ftype=1&is_all=1"
        if __DEBUG__:
            print("Requesting " + link)
        while True:
            try:
                ret = requests.get(link, headers = HtmlUtils.HEADERS) 
                break
            except requests.RequestException:
                print("Error in getting: " + link)
                traceback.print_exc()
        repost_box, self.domid = HtmlUtils.findScriptWithDomid(ret.text, 
                "Pl_Official_MyProfileFeed")
        if not repost_box:
            if __DEBUG__:
                print("Reposts box is missing")
            self.stop_spider = True
            return [], []
        repost_box = BeautifulSoup(repost_box, 'lxml')

        reposts, posts = self._parseRepostBox(repost_box)
        repost_bag, post_bag = self._spideSecondScreen(1)
        reposts.extend(repost_bag)
        posts.extend(post_bag)
        repost_bag, post_bag = self._spideThirdScreen(1)
        reposts.extend(repost_bag)
        posts.extend(post_bag)
        return reposts, posts
    def _spideOnePage(self, page_number):
        """
        Return an array of reposts of a single page except the first page.
        """
        reposts = []
        posts = []
        repost, post = self._spideFirstScreen(page_number)
        reposts.extend(repost)
        posts.extend(repost)
        repost, post = self._spideSecondScreen(page_number)
        reposts.extend(repost)
        posts.extend(repost)
        repost, post = self._spideThirdScreen(page_number)
        reposts.extend(repost)
        posts.extend(repost)
        return reposts, posts
    def _spideFirstScreen(self, page_number):
        """
        Return an array of reposts in the first screen.
        """
        builtin_time.sleep(Util.SLEEP_SECONDS)
        if self.stop_spider:
            return [], []
        link = "http://weibo.com/p/" + str(self.page_id) + \
                "?pids=" + self.domid + \
                "&is_all=1&is_tag=0&profile_ftype=1&page=" +\
                str(page_number) + "&ajaxpagelet=1&ajaxpagelet_v6=1"
        if __DEBUG__:
            print("Requesting " + link)
        while True:
            try:
                ret = requests.get(link, headers = HtmlUtils.HEADERS) 
                break
            except requests.RequestException:
                print("Error in getting: " + link)
                traceback.print_exc()
        repost_box = HtmlUtils.findScript(ret.text, 
              self.domid) 
        if not repost_box:
            if __DEBUG__:
                print("Reposts box is missing")
            self.stop_spider = True
            return [], []
        repost_box = BeautifulSoup(repost_box, 'lxml')
        return  self._parseRepostBox(repost_box)

    def _spideSecondScreen(self, page_number):
        """
        Return an array of reposts in the second screen.
        """
        builtin_time.sleep(Util.SLEEP_SECONDS)
        if self.stop_spider:
            return [], []
        link = "http://weibo.com/p/aj/v6/mblog/mbloglist?ajwvr=6&domain=" + \
                str(self.domain) + "&profile_ftype=1&is_all=1&pagebar=0&pl_name=" + \
                self.domid + "&id=" + str(self.page_id) + \
                "&feed_type=0&page=" + str(page_number) + \
                "&pre_page=" + str(page_number) + \
                "&domain_op=" + str(self.domain)
        if __DEBUG__:
            print("Requesting " + link)
        while True:
            try:
                ret = requests.get(link, headers = HtmlUtils.HEADERS) 
                break
            except requests.RequestException:
                print("Error in getting: " + link)
                traceback.print_exc()
        repost_box = HtmlUtils.loadJsonResponse(ret.text, 'data')
        if not repost_box:
            if __DEBUG__:
                print("Reposts box is missing")
            self.stop_spider = True
            return [], []
        repost_box = BeautifulSoup(repost_box, 'lxml')
        return  self._parseRepostBox(repost_box)

    def _spideThirdScreen(self, page_number):
        """
        Return an array of reposts in the third screen.
        """
        builtin_time.sleep(Util.SLEEP_SECONDS)
        if self.stop_spider:
            return [], []
        link = "http://weibo.com/p/aj/v6/mblog/mbloglist?ajwvr=6&domain=" + \
                str(self.domain) + "&profile_ftype=1&is_all=1&pagebar=1&pl_name=" + \
                self.domid + "&id=" + str(self.page_id) + \
                "&feed_type=0&page=" + str(page_number) + \
                "&pre_page=" + str(page_number) + \
                "&domain_op=" + str(self.domain)
        if __DEBUG__:
            print("Requesting " + link)
        while True:
            try:
                ret = requests.get(link, headers = HtmlUtils.HEADERS) 
                break
            except requests.RequestException:
                print("Error in getting: " + link)
                traceback.print_exc()
        repost_box = HtmlUtils.loadJsonResponse(ret.text, 'data')
        if not repost_box:
            if __DEBUG__:
                print("Reposts box is missing")
            self.stop_spider = True
            return [], []
        repost_box = BeautifulSoup(repost_box, 'lxml')
        return  self._parseRepostBox(repost_box)

    def _parseRepostBox(self, repost_box):
        """
        Parse the input repost box to generate a list of 
        reposts
        """
        reposts = []
        posts = []
        for wrap_box in repost_box.find_all('div', 'WB_cardwrap'):
            try:
                if 'mid' not in wrap_box.attrs:
                    continue
                mid = wrap_box.attrs['mid']
                repost = wrap_box.find('div', 'WB_feed_detail')
                if str(self.uid) not in repost.find('div', class_='WB_info').a.attrs['usercard']:
                    continue

                detail = repost.find('div', class_='WB_detail')
                expand_box = detail.find('div', 'WB_feed_expand')
                if expand_box:
                    #takes care of the repost
                    omid = wrap_box.attrs['omid']
                    expand_func_box = expand_box.find('div', 'WB_from')
                    if not expand_func_box:
                        continue
                    repo = Repost()
                    repo.original_time = expand_func_box.a.attrs['title']
                    repo.uid = self.uid
                    from_box = detail.find('div', 'WB_from')
                    repo.time = from_box.a.attrs['title']
                    t = self._timeToInt(repo.time)
                    if t > self.end:
                        continue
                    elif t < self.start:
                        self.stop_spider = True
                        break
                    text_box = detail.find('div', 'WB_text')
                    w_content = text_box.get_text()
                    index = w_content.find('//@')
                    if index < 0:
                        link = "http://weibo.com" + \
                                expand_box.find('div', 'WB_info').a.attrs['href'] 
                    else:
                        index += 3
                        try:
                            link = text_box.find('a', 
                                attrs={"usercard": "name=" + w_content[index :w_content.find(':', index)]}).attrs['href']
                        except:
                            link = text_box.find('a', 
                                attrs={"usercard": "name=" + w_content[index :w_content.find('：', index)]}).attrs['href']
                    self._parseLastHop(repo, link)
                    while True:
                        try:
                            ret = requests.get('http://www.weibo.com/p/aj/mblog/getlongtext?ajwvr=6&mid=' + omid,
                                    headers = HtmlUtils.HEADERS)
                            break
                        except requests.RequestException:
                            print('http://www.weibo.com/p/aj/mblog/getlongtext?ajwvr=6&mid=' + omid)
                            traceback.print_exc()
                    #repo.content = self._parseWeibo(expand_box.find('div', 'WB_text'))
                    repo.content = self._parseWeibo(BeautifulSoup(ret.json()['data']['html'], 'lxml').body)
                    if repo.content == '':
                        continue
                    if __DEBUG__:
                        print(repo)
                    reposts.append(repo)
                else:
                    #takes care of the weibo 
                    post = Post()
                    post.uid = self.uid
                    post.time = detail.find('div', 'WB_from').a.attrs['title']
                    t = self._timeToInt(post.time)
                    if t > self.end:
                        continue
                    elif t < self.start:
                        self.stop_spider = True
                        break
                    while True:
                        try:
                            ret = requests.get('http://www.weibo.com/p/aj/mblog/getlongtext?ajwvr=6&mid=' + mid,
                                    headers = HtmlUtils.HEADERS)
                            break
                        except requests.RequestException:
                            print('http://www.weibo.com/p/aj/mblog/getlongtext?ajwvr=6&mid=' + mid)
                            traceback.print_exc()
                    #repo.content = self._parseWeibo(expand_box.find('div', 'WB_text'))
                    #post.content = self._parseWeibo(BeautifulSoup(ret.json()['data']['html'], 'lxml').body)
                    bs = BeautifulSoup(ret.json()['data']['html'], 'lxml')
                    post.content = self._parseWeibo(bs.body)

                    if __DEBUG__:
                        print(post)
                    posts.append(post)
            except:
                print("Error in parsing weibo: " + wrap_box.content)
                traceback.print_exc()
        return reposts, posts

    def _parseLastHop(self, repost, link):
        """
        Parse user configs of the last hop. 
        """
        link = unquote(link)
        if __DEBUG__:
            print("Requesting " + link)
        while True:
            try:
                ret = requests.get(link, headers = HtmlUtils.HEADERS, 
                        allow_redirects=False)
                if ret.status_code == 302:
                    link = ret.headers['Location']
                    if 'http' not in link:
                        link = 'http://weibo.com'
                    ret = requests.get(link, 
                        headers = HtmlUtils.HEADERS)
                break
            except requests.RequestException:
                print("Error in requesting: " + link)
                traceback.print_exc()
        for script in BeautifulSoup(ret.text, 'lxml').find_all('script'):
            if '$CONFIG' in str(script):
                break;
        script = str(script.contents[0])
        for value in script.split(';'):
            if "['oid']" in value:
                repost.last_uid = int(value.split("'")[-2])
            elif "['page_id']" in value:
                repost.last_pid = int(value.split("'")[-2])
            elif "['domain']" in value:
                repost.last_domain= int(value.split("'")[-2])
        #if __DEBUG__:
        #    print(repost)

    def _parseWeibo(self, message_box):
        """
        Return the content of a weibo. If it has links, only crawl the first 
        2 links of them. 
        """
        if message_box is None:
            return ''
        text = ''
        links = []
        if message_box.p is not None:
            message_box = message_box.p
        for content in message_box.contents:
            content = str(content)
            #The following is the filter rules
            if '<a' in content:
                if '@' in content:
                    #bypass @user 
                    continue
                elif '视频' in content:
                    #bypass video link
                    continue
                elif '图片' in content:
                    #bypass image link
                    continue
                elif '#' in content:
                    text += BeautifulSoup(content, 'lxml').a.get_text()
                else:
                    #Found an external link
                    links.append(BeautifulSoup(content, 'lxml').a.attrs['href'])
            elif '<img' in content or '<div' in content or '<br' in content:
                continue
            else:
                text += content
<<<<<<< HEAD
        #text += "||&*^||" Seperators
        #count = 0;
        #for link in links:
        #    if count >= 2:
        #        break
        #    text += self._retrieveExternalLink(link)
=======
        text += "||&*^||" #Seperators
        count = 0;
        for link in links:
            if count >= 2:
                break
            text += self._retrieveExternalLink(link)
>>>>>>> c14d169c0db32257d1a875d2abdc0e658d68b1b7
        return re.sub(r'\s+', '', text)

    def _retrieveExternalLink(self, link):
        """
        A general crawler to crawl external link.
        """
        if __DEBUG__:
            print("Requesting external link: " + link)

        try:
            ret = requests.get(link)
            if ret.status_code == 200:
                text = BeautifulSoup(ret.text, 'lxml').get_text()
                text = re.sub(r'\s+', '', text)
                if 'SinaVisitorSystem' not in text:
                    return text
            return ''
        except requests.RequestException:
            print("Error happens while retrieve page " + link)
            return ''
        except:
            print("Error while reqeusting: " + link)
            traceback.print_exc()
            return ''

    def _timeToInt(self, t):
        return int(t.strip().replace('-','').replace(':', '').replace(' ', ''))
        


if __name__=='__main__':
    #Load an array of users of the form
    #<uid, page_id, domain>
    user_configs = DBUtils.loadUserConfig()

    #config = PageConfigInfo()
    #config.uid = 2789981090 
    #config.page_id = 1005052789981090 
    #config.domain = 100505
    #user_configs = []
    #user_configs.append(config)

    db_con = DBUtils.initdb()

<<<<<<< HEAD
    start, end = 201612050000, 201612062359
=======
    start, end = 201609140000, 201611072359
>>>>>>> c14d169c0db32257d1a875d2abdc0e658d68b1b7
    reposts = []
    with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor:
        futures = dict()
        for user in user_configs:
            if __DEBUG__:
                print("Submmit task: " + str(user))
            futures[executor.submit(WeiboGenerator(
                    user.uid, user.page_id, user.domain, 
                    start, end, db_con).run)] = user
        for future in concurrent.futures.as_completed(futures):
            try:
                reposts.extend(future.result())
            except Exception:
                traceback.print_exc()
    print("We got " + str(len(reposts)) + " reposts in total")
    db_con.close()
