from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.wait import WebDriverWait
from multiprocessing import Process
from multiprocessing import Pool
from pymongo import MongoClient
from bs4 import UnicodeDammit
from bs4 import BeautifulSoup
from urllib import request
from urllib import parse

from selenium import webdriver
import numpy as np
import requests
import datetime
import pymongo
import random
import hashlib
import math
import base64
import codecs
import json
import pprint
import time
import ssl
import sys
import os
import re

sys.setrecursionlimit(1000000)
net = sys.argv[1] if len(sys.argv) == 2 else 'localhost'

def chrome_driver():
    options = webdriver.ChromeOptions()
    options.add_argument('--headless')
    options.add_argument('--disable-gpu')
    options.add_argument('--disable-images')

    desired_capabilities = options.to_capabilities()

    driver = webdriver.Chrome(desired_capabilities = desired_capabilities, executable_path = '/Users/xuchaosheng/Workspace/qs-scrapy/chromedriver2')
    driver.set_page_load_timeout(20)

    return driver


class ZHUANLAN():
    def __init__(self, begin, end):
        self.log = {}

        self.client = MongoClient(net, 27017, connect = False)

        self.jianshu_zhuanlan_db = self.client['jianshu_zhuanlan_db']
        self.jianshu_zhuanlan_coll = self.jianshu_zhuanlan_db['jianshu_zhuanlan_coll']

        self.jianshu_user_db = self.client['jianshu_user_db']
        self.jianshu_user_coll = self.jianshu_user_db['jianshu_user_coll']

        self.driver = chrome_driver()

        self.users = []

        for item in self.jianshu_user_coll.find().skip(begin).limit(end - begin):
            if 'crawled' in item:
                continue

            self.users.append(item)

        print('user list load finished!')

        for user in self.users:
            self.crawl_post_list(user['link'])

    def url2base64(self, picUrl):
        with request.urlopen(picUrl) as web:
            return base64.b64encode(web.read())

    def CalcSign(self, s):
        hl = hashlib.md5()
        hl.update(s.encode(encoding = 'utf-8'))

        return hl.hexdigest()

    def get_code(self, picUrl):
        tm = str(int(time.time()))
        img_data = self.url2base64(picUrl)
        rsp = requests.post('http://pred.fateadm.com/api/capreg', data = {
            'user_id': '103595',
            'timestamp': tm,
            'sign': self.CalcSign('103595' + tm + self.CalcSign(tm + 'qn4iUtbRoSU95do6zEglMUmOLIjkNIiQ')),
            'predict_type': '30400',
            'img_data': img_data
        }).json()

        if rsp['RetCode'] == '0':
            return json.loads(rsp['RspData'])['result']
        else:
            return False

    def crawl_post(self, url):
        if self.jianshu_zhuanlan_coll.find_one({'url': url}):
            return

        try:
            self.driver.get(url)
        except:
            return self.crawl_post(url)

        soup = BeautifulSoup(self.driver.page_source, 'lxml')

        if not soup:
            return

        try:
            content = soup.find(class_ = 'show-content-free').get_text().strip().lower()
        except:
            content = ''

        try:
            title = soup.find('h1').get_text()
        except:
            title = ''

        item = {
            'url': url,
            'text': content,
            'title': title,
            'time': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        }

        print(title)
        print()
        print(content)

        if not self.jianshu_zhuanlan_coll.find_one({'url': url}) and content:
            self.jianshu_zhuanlan_coll.insert_one(item)

    def crawl_post_list(self, url):
        self.driver.get(url)

        soup = BeautifulSoup(self.driver.page_source, 'lxml')
        post_num = int(soup.select('.main-top .info li')[2].find('p').text.strip())

        for page in range(1, math.ceil(post_num / 9) + 1):
            try:
                self.driver.get(url + '?page=' + str(page))
            except:
                continue

            soup = BeautifulSoup(self.driver.page_source, 'lxml')

            for item in soup.select('.note-list li'):
                self.crawl_post('https://www.jianshu.com' + item.find(class_ = 'title').get('href'))

            time.sleep(1)

        self.jianshu_user_coll.update_one({'link': url}, {'$set': {'crawled': True}})


def start(begin, end):
    ZHUANLAN(begin, end)


if __name__ == '__main__':
    pool = []

    client = MongoClient(net, 27017)

    jianshu_user_db = client['jianshu_user_db']
    jianshu_user_coll = jianshu_user_db['jianshu_user_coll']

    step = math.ceil(jianshu_user_coll.count() / 5)

    for i in range(5):
        p = Process(target = start, args = (i * step, (i + 1) * step))
        pool.append(p)

    for p in pool:
        p.start()

    for p in pool:
        p.join()

    # start('//www.zhihu.com/people/chen-ke-55-65')
    # start('https://www.jianshu.com/u/919618c9a33d')
