import re
import random
import sys
import time
import datetime
import math
import pprint
import requests

from bs4 import BeautifulSoup
from selenium import webdriver
from pymongo import MongoClient
from multiprocessing import Process

sys.setrecursionlimit(1000000)
net = sys.argv[1] if len(sys.argv) == 2 else 'localhost'

client = MongoClient(net, 27017, connect = False)

jianshu_user_db = client['jianshu_user_db']
jianshu_user_coll = jianshu_user_db['jianshu_user_coll']


def users(id, driver = None):
    if not driver:
        options = webdriver.ChromeOptions()

        options.add_argument('--headless')
        options.add_argument('--disable-gpu')
        options.add_argument('--disable-images')
        desired_capabilities = options.to_capabilities()

        driver = webdriver.Chrome(desired_capabilities = desired_capabilities, executable_path = '/Users/xuchaosheng/Workspace/knx-scrapy/libs/chromedriver')

    try:
        driver.get('https://www.jianshu.com/u/' + id)
    except:
        return users(id, driver)

    soup = BeautifulSoup(driver.page_source, 'lxml')

    if soup.find(class_ = 'error-block'):
        for i in jianshu_user_coll.aggregate([{'$sample': {'size': 1}}]):
            users(i['id'], driver)

        return

    try:
        following_num = int(soup.find(class_ = 'main-top').find(class_ = 'info').select('li')[0].find('a').find('p').text)
        followers_num = int(soup.find(class_ = 'main-top').find(class_ = 'info').select('li')[1].find('a').find('p').text)

        following_page = math.ceil(following_num / 9)
        followers_page = math.ceil(followers_num / 9)
    except:
        for i in jianshu_user_coll.aggregate([{'$sample': {'size': 1}}]):
            users(i['id'], driver)

        return

    page_num = {
        'followers': followers_page,
        'following': following_page
    }

    for t in ['followers', 'following']:
        for page in range(1, page_num[t]):
            if page > 100:
                break

            try:
                driver.get('https://www.jianshu.com/users/' + id + '/' + t + '?page=' + str(page))
                soup = BeautifulSoup(driver.page_source, 'lxml')
            except:
                continue

            if not soup:
                continue

            for item in soup.select('.user-list li'):
                link = 'https://www.jianshu.com' + item.find(class_ = 'info').find(class_ = 'name').get('href')

                u = {
                    'name': item.find(class_ = 'info').find(class_ = 'name').get_text(),
                    'followers': int(item.find(class_ = 'meta').find(text = re.compile('粉丝')).parent.get_text().split(' ')[1]),
                    'following': int(item.find(class_ = 'meta').find(text = re.compile('关注')).parent.get_text().split(' ')[1]),
                    'articles': int(item.find(class_ = 'meta').find(text = re.compile('文章')).parent.get_text().split(' ')[1]),
                    'link': link,
                    'id': link.split('/')[-1]
                }

                if not jianshu_user_coll.find_one({'id': u['id']}):
                    jianshu_user_coll.insert_one(u)
                    pprint.pprint(u)

    for i in jianshu_user_coll.aggregate([{'$sample': {'size': 1}}]):
        users(i['id'], driver)


if __name__ == '__main__':
    pool = []

    for i in jianshu_user_coll.aggregate([{'$sample': {'size': 5}}]):
        p = Process(target = users, args = (i['id'],))
        pool.append(p)

    for p in pool:
        p.start()

    for p in pool:
        p.join()
