from multiprocessing import Process
from pymongo import MongoClient
from selenium import webdriver
from bs4 import BeautifulSoup

import numpy as np
import requests
import time
import re
import os

client = MongoClient('127.0.0.1', 27017, connect = False)

souhu_news_db = client['souhu_news_db']
souhu_news_coll = souhu_news_db['souhu_news_coll']


def new_driver():
    options = webdriver.ChromeOptions()
    options.add_argument('--headless')
    options.add_argument('--disable-gpu')
    options.add_argument('--disable-images')
    desired_capabilities = options.to_capabilities()
    chrome_driver_path = os.getcwd()[:os.getcwd().rfind('/') + 1] + 'chromedriver'
    driver = webdriver.Chrome(chrome_driver_path, desired_capabilities = desired_capabilities)

    return driver


class POST():
    def __init__(self, urls):
        self.driver = new_driver()

        for url in urls:
            self.crawl(url, enter = True)

    def crawl(self, url, enter = False):
        nid = re.findall('a\/([\d_]+)\??', url)[0]

        if not enter and not nid:
            return

        if souhu_news_coll.find_one({'nid': nid}):
            return

        self.driver.get(url)

        soup = BeautifulSoup(self.driver.page_source, 'lxml')

        try:
            article = re.sub('返回搜狐，查看更多.*', '', soup.find('article').get_text().strip()).strip()
        except:
            article = ''

        try:
            title = soup.find('h1').get_text().strip()
        except:
            title = ''

        if nid and article and title and not souhu_news_coll.find_one({'nid': nid}):
            item = {
                'title': title,
                'text': article,
                'nid': nid
            }

            souhu_news_coll.insert_one(item)
            print(title)
            print('-' * 50)

        for a in soup.findAll('a'):
            if a.get('href') and re.findall('www.sohu.com/a/', a.get('href')):
                link = a.get('href')

                if link in url or url in link:
                    continue

                if link.find('https:') == -1 and link.find('http') == -1:
                    link = 'http:' + link

                self.crawl(link)


def start(urls):
    POST(urls)


if __name__ == '__main__':
    driver = new_driver()
    driver.get('http://news.sohu.com/')
    soup = BeautifulSoup(driver.page_source, 'lxml')

    links = []

    for a in soup.findAll('a'):
        if a.get('href') and re.findall('www.sohu.com/a/', a.get('href')):
            link = a.get('href')

            if link.find('https:') == -1 and link.find('http') == -1:
                link = 'http:' + link

            links.append(link)

    pool = []

    step = int(len(links) / 5)

    for i in range(5):
        p = Process(target = start, args = (links[i * step: (i + 1) * step],))
        pool.append(p)

    for p in pool:
        p.start()

    for p in pool:
        p.join()
