from multiprocessing import Process
from pymongo import MongoClient
from bs4 import BeautifulSoup

from selenium import webdriver
import numpy as np
import requests
import socket
import re
import time
import sys
import os
import random

sys.setrecursionlimit(1000000)
socket.setdefaulttimeout(5)

h = {
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
    'Accept-Encoding': 'gzip, deflate, br',
    'Accept-Language': 'en,zh-CN;q=0.9,zh;q=0.8,en-US;q=0.7',
    'Cache-Control': 'no-cache',
    'Connection': 'keep-alive',
    'Pragma': 'no-cache',
    'Upgrade-Insecure-Requests': '1',
    'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'
}

net = sys.argv[1] if len(sys.argv) == 2 else 'localhost'
client = MongoClient(net, 27017)

kr_post_db = client['kr_post_db']
kr_post_coll = kr_post_db['kr_post_coll']

error = {}


def new_driver():
    options = webdriver.ChromeOptions()
    options.add_argument('--headless')
    options.add_argument('--disable-gpu')
    options.add_argument('--disable-images')
    desired_capabilities = options.to_capabilities()
    chrome_driver_path = os.getcwd()[:os.getcwd().rfind('/') + 1] + 'chromedriver'
    driver = webdriver.Chrome(chrome_driver_path, desired_capabilities = desired_capabilities)

    return driver


driver = new_driver()


def crawl(url, flag = True):
    if re.findall('/p/(\d+).html', url):
        pid = re.findall('/p/(\d+).html', url)[0]
    else:
        return

    if flag:
        if kr_post_coll.find_one({'pid': pid}):
            return

    if url in error:
        return

    try:
        driver.get(url)
    except:
        error[url] = 1
        return

    soup = BeautifulSoup(driver.page_source, 'lxml')

    try:
        article = soup.find(class_ = 'textblock').get_text().strip()
    except:
        article = ''

    try:
        title = soup.find('h1').get_text().strip()
    except:
        title = ''

    if not article or not title:
        error[url] = 1
        return

    item = {
        'pid': pid,
        'text': article,
        'title': title
    }

    if not kr_post_coll.find_one({'pid': pid}) and article and title:
        kr_post_coll.insert_one(item)
        print(item['title'])

    for a in soup.findAll('a'):
        if a.get('href') and re.findall('^/p/', a.get('href')):
            crawl('http://36kr.com' + a.get('href'))


if __name__ == '__main__':
    # driver = new_driver()
    # driver.get('http://36kr.com')
    #
    # time.sleep(8)
    #
    # soup = BeautifulSoup(driver.page_source, 'lxml')
    #
    # pool = []
    # links = []
    #
    # for a in soup.select('a'):
    #     if a.get('href') and re.findall('^/p/', a.get('href')):
    #         links.append('http://36kr.com' + a.get('href'))
    #
    # for i in random.sample(links, 10):
    #     p = Process(target = crawl, args = (i,))
    #     pool.append(p)
    #
    # for p in pool:
    #     p.start()
    #
    # for p in pool:
    #     p.join()

    # crawl('http://36kr.com/p/5142858.html?from=next')

    for i in kr_post_coll.aggregate([{'$sample': {'size': 100}}]):
        crawl('http://36kr.com/p/' + i['pid'] + '.html', flag = False)
