from pymongo import MongoClient
from bs4 import BeautifulSoup
from multiprocessing import Process

import socket
import requests
import re
import time
import sys
import numpy as np
import random

sys.setrecursionlimit(1000000)
socket.setdefaulttimeout(5)

h = {
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
    'Accept-Encoding': 'gzip, deflate, br',
    'Accept-Language': 'en,zh-CN;q=0.9,zh;q=0.8,en-US;q=0.7',
    'Cache-Control': 'no-cache',
    'Connection': 'keep-alive',
    'Pragma': 'no-cache',
    'Upgrade-Insecure-Requests': '1',
    'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'
}

net = sys.argv[1] if len(sys.argv) == 2 else 'localhost'
client = MongoClient(net, 27017)

cto_post_db = client['cto_post_db']
cto_post_coll = cto_post_db['cto_post_coll']

error = {}


def crawl(url, flag = True):
    if flag:
        if cto_post_coll.find_one({'url': url}):
            return

    if url in error:
        return

    try:
        r = requests.get(url, headers = h)
        r.encoding = 'gbk'
    except:
        error[url] = 1
        return

    soup = BeautifulSoup(r.text, 'lxml')

    try:
        article = soup.find(class_ = 'zwnr').get_text().strip()
    except:
        article = ''

    try:
        title = soup.find(class_ = 'wznr').find('h2').get_text().strip()
    except:
        title = ''

    if not article or not title:
        error[url] = 1
        return

    item = {
        'url': url,
        'text': article,
        'title': title
    }

    if not cto_post_coll.find_one({'url': url}) and article and title:
        time.sleep(1)
        cto_post_coll.insert_one(item)
        print(item['title'])

    for a in soup.findAll('a'):
        if a.get('href') and a.get('href').find('51cto.com/art/') != -1:
            crawl(a.get('href'))


if __name__ == '__main__':
    # for i in main_index():
    #     soup = BeautifulSoup(requests.get(i).text, 'lxml')
    #     links = []
    #
    #     for a in soup.findAll('a'):
    #         if a.get('href') and re.findall('http://.+\.jrj.com.cn/\d{4}/\d{2}/\d+.shtml', a.get('href')):
    #             links.append(a.get('href'))
    #
    #     pool = []
    #
    #     for i in random.sample(links, 5):
    #         p = Process(target = crawl, args = (i,))
    #         pool.append(p)
    #
    #     for p in pool:
    #         p.start()
    #
    #     for p in pool:
    #         p.join()

    # pool = []

    # r = requests.get('http://www.chinalawnews.cn/yiliaoshigu/1114.html')
    # r.encoding = 'utf-8'
    # soup = BeautifulSoup(r.text, 'lxml')
    #
    # for a in soup.findAll('a'):
    #     if a.get('href') and re.findall('\d+\.html', a.get('href')):
    #         print('http://www.chinalawnews.cn' + a.get('href'))
    #         crawl('http://www.chinalawnews.cn' + a.get('href'))



    soup = BeautifulSoup(requests.get('http://www.51cto.com/').text, 'lxml')

    for i in soup.select('.zxf_menubot a'):
        soup = BeautifulSoup(requests.get(i.get('href')).text, 'lxml')

        links = []
        pool = []

        for link in soup.select('a'):
            if link.get('href') and link.get('href').find('51cto.com/art/') != -1:
                links.append(link.get('href'))

        for i in random.sample(links, 10 if len(links) > 10 else len(links)):
            p = Process(target = crawl, args = (i, False))
            pool.append(p)

        for p in pool:
            p.start()

        for p in pool:
            p.join()
