#!/usr/bin/env python
"""
    Almost all of Microblogs user identifier lies in 
    [10 0000 0000 ~ 50 0000 0000]. We randomly generate user identifiers, 
    and then test whether the user identifier is a real one. If it is, we
    collect the user information, otherwise try next. Currently, we generate
    same number of uids for every 10 0000 0000. The user identification desity
    is about 20%.
"""
import concurrent.futures 
import traceback
import requests
import random
import time
from bs4 import BeautifulSoup
random.seed()
from utils import Util, HtmlUtils, DBUtils, UserInfo, PageConfigInfo, __DEBUG__

import pdb



WANTED_USERS = 1000


class RandomUserSpider():
    def __init__(self):
        self.uid_lb = 1000000000
        self.uid_ub = 5000000000
        self.step_size = 1000000000
        self.num_samples = int(WANTED_USERS / (0.17 * (self.uid_ub - self.uid_lb)/self.step_size))

    def genRealUsers(self):
        """
        Test real users among our faked users. And find their page links
        """
        users = dict()  #{uid: page_link}
        with concurrent.futures.ThreadPoolExecutor() as executor:
            futures = dict()
            for i in range(self.uid_lb, self.uid_ub, self.step_size):
                uids = [random.randint(i+1, i+self.step_size) for k in range(0, self.num_samples)]
                futures[executor.submit(self._uid_spider,uids)] = i
            for future in concurrent.futures.as_completed(futures):
                try:
                    users.update(future.result())
                except Exception:
                    traceback.print_exc()
        return users
    def _uid_spider(self, uids):
        """
        Crawl for real users.
        """
        users = dict()
        for uid in uids:
            if __DEBUG__:
                print("Requesting ", uid, end=" ", flush=True)
            while True:
                try:
                    ret = requests.head("http://weibo.com/u/" + str(uid), 
                            headers = HtmlUtils.HEADERS,
                            allow_redirects = False)
                    break
                except requests.RequestException:
                    traceback.print_exc()
                    continue
            #print(ret.headers)
            if ret.status_code == 302:
                if "http://weibo.com/sorry" in ret.headers["Location"]:
                    if __DEBUG__:
                        print("Miss")
                else:
                    if __DEBUG__:
                        print("Hit ", end = " ", flush=True)
                        print("http://weibo.com" + ret.headers["Location"])
                    users[uid] = "http://weibo.com" + ret.headers["Location"]
            else:
                if __DEBUG__:
                    print("Hit ", ret.url)
                users[uid] = ret.url
        return users
    def spideUserProfile(self, links):
        """
        Collects user profiles
        """
        pl_conns = 20
        users = dict()
        s = int(len(links) / pl_conns)
        uids = linkss.keys()
        with concurrent.futures.ThreadPoolExecutor() as executor:
            futures = dict()
            for i in range(0, pl_conns):
                if i*s >= len(links):
                    break
                futures[executor.submit(self._profile_spider, links, uids[i*s:(i+1)*s])] = i
            for future in concurrent.futures.as_completed(futures):
                try:
                    users.update(future.result())
                except Exception:
                    traceback.print_exc()
        return users
    def _profile_spider(self, links, uids):
        """
        Internal user profile spider
        """
        time.sleep(Util.SLEEP_SECONDS)
        users = dict()
        for uid in uids:
            link = links[uid] + '/info'
            while True:
                try:
                    ret = requests.get(links[uid], headers = HtmlUtils.HEADERS)
                    if __DEBUG__:
                        print("Requesting ", links[uid])
                    break;
                except requests.RequestException:
                    traceback.print_exc()
            config = self._parse_config(ret.text)
            #If we could not get the config information, bypass it
            if not config:
                continue

            if __DEBUG__:
                print("Requesting: ", link)
            while True:
                try:
                    ret = requests.get(link, headers = HtmlUtils.HEADERS)
                    break
                except requests.RequestException:
                    traceback.print_exc()
            if __DEBUG__:
                print("Link ", link , " response status: ", ret.status_code)
            if ret.status_code != 200:
                #http://weibo.com/u/{uid}/info is missing,
                #try http://weibo.com/p/{pid}/info
                while True:
                    try:
                        link = "http://www.weibo.com/p/" + str(config.page_id) + "/info"
                        ret = requests.get(link, headers = HtmlUtils.HEADERS)
                        break;
                    except requests.RequestException:
                        traceback.print_exc()

            if ret.status_code == 200:
                user = self._parser(ret.text)
                if user:
                    user.uid = uid
                    user.link = links[uid]
                    user.config = config
                    users[uid] = user
            else:
                if __DEBUG__:
                    print("Link ", link, " Got ", ret.status_code)
        return users
    def _parser(self, html):
        """
        Parsing user profiles from raw html page.
        Currently, we only crawl 'Basic information' and 'Tags'. If these two
        parts are missing, we simply abdon this user.
        """
        info_box = HtmlUtils.findScript(html, "Pl_Official_PersonalInfo__59")
        if not info_box: 
            if __DEBUG__:
                print("Information box is missing")
            return None
        num_box = HtmlUtils.findScript(html, "Pl_Core_T8CustomTriColumn")
        if not num_box:
            if __DEBUG__:
                print("Number box is missing")
            return None
        #Digging for basic information and tags
        info_box = BeautifulSoup(info_box, 'lxml')
        user = UserInfo()
        for box in info_box.find_all('div', 'WB_cardtitle_b'):
            if box.div.h2.contents[0] == '基本信息':
                #Basic information
                for title in box.next_sibling.find_all('span', 'pt_title'):
                    title_name = title.contents[0].strip()
                    if title_name == '昵称：':
                        user.nick_name = title.next_sibling.contents[0]
                    elif title_name == '性别：':
                        user.gender = title.next_sibling.contents[0]
                    elif title_name == '简介：':
                        user.overview = title.next_sibling.contents[0]
                    elif title_name == '注册时间：':
                        user.register_time = title.next_sibling.contents[0]
                    if __DEBUG__:
                        print(title.contents[0])
                        print(title.next_sibling.contents[0])
            elif box.div.h2.contents[0] == '标签信息':
                #Tag information
                for title in box.next_sibling.find_all('span', 'pt_title'):
                    for tag in title.next_sibling.find_all('a'):
                        user.tags += tag.contents[1] + ';'
                    user.tags = user.tags[:-1]
                    if __DEBUG__:
                        print("Tags: ", user.tags)
        num_box = BeautifulSoup(num_box, 'lxml')
        for box in num_box.find_all('strong'):
            box_name = box.next_sibling.contents[0].strip()
            if box_name == "关注":
                user.num_followees = int(box.contents[0])
            elif box_name == "粉丝":
                user.num_fans = int(box.contents[0])
            elif box_name == "微博":
                user.num_microblogs = int(box.contents[0])
        #TODO:
        #   Find a way to improve data quality

        #If we turn this on, the data generation rate is too slow
        #if not user.num_microblogs or not user.tags \
        #        or not user.overview:
        #    if __DEBUG__:
        #        print("Some error happens:", str(user))
        #    return None
        return user
    def _parse_config(self, html):
        for script in BeautifulSoup(html, 'lxml').find_all('script'):
            if '$CONFIG' in str(script):
                break;
        config = PageConfigInfo()
        try:
            script = str(script.contents[0])
        except UnboundLocalError:
            print("********************************")
            print(html)
            print("********************************")
            return None
        for value in script.split(';'):
            if "['oid']" in value:
                config.uid = int(value.split("'")[-2])
            elif "['page_id']" in value:
                config.page_id = int(value.split("'")[-2])
            elif "['domain']" in value:
                config.domain = int(value.split("'")[-2])
        if __DEBUG__:
            print(str(config))
        return config


if __name__ == "__main__":
    spider = RandomUserSpider()
    links = spider.genRealUsers()
    users = spider._profile_spider(links, links.keys())
    #for k, v in users.items():
    #    print(k, str(v))
    print("We got ", len(users.keys()), " users")
    DBUtils.dumpUserInfo(users.values())
