from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from multiprocessing import Process
from multiprocessing import Pool
from pymongo import MongoClient
from bs4 import UnicodeDammit
from bs4 import BeautifulSoup
from urllib import request
from urllib import parse

from selenium import webdriver
import numpy as np
import requests
import datetime
import pymongo
import random
import math
import codecs
import json
import pprint
import time
import ssl
import sys
import os
import re

net = sys.argv[1] if len(sys.argv) == 2 else 'localhost'

sys.setrecursionlimit(1000000)

from bs4 import BeautifulSoup

headers0 = {
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
    'Accept-Encoding': 'gzip, deflate, br',
    'Accept-Language': 'en,zh-CN;q=0.9,zh;q=0.8,en-US;q=0.7',
    'Cache-Control': 'no-cache',
    'Connection': 'keep-alive',
    'Host': 'blog.csdn.net',
    'Pragma': 'no-cache',
    'Referer': 'https://blog.csdn.net/u011391093/article/list/5?t=1',
    'Upgrade-Insecure-Requests': '1',
    'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 Safari/537.36'
}

headers1 = {
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
    'Accept-Encoding': 'gzip, deflate, br',
    'Accept-Language': 'en,zh-CN;q=0.9,zh;q=0.8,en-US;q=0.7',
    'Cache-Control': 'no-cache',
    'Connection': 'keep-alive',
    'Host': 'blog.csdn.net',
    'Pragma': 'no-cache',
    'Referer': 'https://blog.csdn.net/u011391093/article/list/4?t=1',
    'Upgrade-Insecure-Requests': '1',
    'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 Safari/537.36'
}


# def crawl_blog_content(uid):

def new_driver(headless = False):
    options = webdriver.ChromeOptions()

    if headless:
        options.add_argument('--headless')
        options.add_argument('--disable-gpu')

    options.add_argument('--disable-images')

    if os.path.exists('/Users/xuchaosheng/Workspace/qs-scrapy/chromedriver'):
        chrome_driver_path = '/Users/xuchaosheng/Workspace/qs-scrapy/chromedriver'
    else:
        chrome_driver_path = '/Users/xuchaosheng/Workspace/qushu-recruit-data/chromedriver'

    driver = webdriver.Chrome(chrome_driver_path)

    return driver


class BLOG():
    def __init__(self, uids):
        self.driver = new_driver()
        self.client = MongoClient(net, 27017, maxPoolSize = None, waitQueueTimeoutMS = 100)

        self.csdn_user_db = self.client['csdn_user_db']
        self.csdb_user_collection = self.csdn_user_db['csdb_user_collection']

        self.csdn_bolg_db = self.client['csdn_bolg_db']
        self.csdb_blog_collection = self.csdn_bolg_db['csdb_blog_collection']

        self.uids = uids

        for uid in self.uids:
            self.craw_blog(uid)
            self.csdb_user_collection.update_one({'uid': uid}, {'$set': {'crawled': True}})

    def craw_blog(self, uid):
        page = 1

        while True:
            url = 'https://blog.csdn.net/' + uid + '/article/list/' + str(page) + '?t=1'
            headers0['Referer'] = url

            try:
                r = requests.get(url, headers = headers0)
            except:
                page += 1
                continue

            soup = BeautifulSoup(r.text, 'lxml')

            if not len(soup.select('.article-list .article-item-box')):
                break

            for item in soup.select('.article-list .article-item-box'):
                title = re.sub('^原', '', item.find('h4').get_text().strip()).strip()
                link = item.find('h4').find('a').get('href')
                print(title, link)

                if self.csdb_blog_collection.find_one({'url': link}):
                    print(link, 'existed')
                    continue

                self.driver.get(link)

                page_source = self.driver.page_source

                if '页面找不到了' in page_source:
                    print(title, '页面找不到了')
                    continue

                soup = BeautifulSoup(page_source, 'lxml')
                content = soup.find(id = 'content_views').get_text()

                blog = {
                    'url': link,
                    'text': content,
                    'title': title,
                    'uid': uid
                }

                if self.csdb_blog_collection.find_one({'url': link}):
                    continue

                self.csdb_blog_collection.insert_one(blog)
                pprint.pprint(blog)

            page += 1


def start(uids):
    BLOG(uids)


if __name__ == '__main__':
    client = MongoClient(net, 27017)

    csdn_user_db = client['csdn_user_db']
    csdb_user_collection = csdn_user_db['csdb_user_collection']

    uids = []

    for user in csdb_user_collection.find():
        if 'crawled' in user:
            continue

        uids.append(user['uid'])

    step = math.ceil(len(uids) / 5)

    pool = []

    for i in range(5):
        p = Process(target = start, args = (uids[i * step: (i + 1) * step],))
        pool.append(p)

    for p in pool:
        p.start()

    for p in pool:
        p.join()
