from pymongo import MongoClient
from bs4 import BeautifulSoup
from multiprocessing import Process

import socket
import requests
import re
import time
import sys
import numpy as np
import random

sys.setrecursionlimit(1000000)
socket.setdefaulttimeout(5)

h = {
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
    'Accept-Encoding': 'gzip, deflate, br',
    'Accept-Language': 'en,zh-CN;q=0.9,zh;q=0.8,en-US;q=0.7',
    'Cache-Control': 'no-cache',
    'Connection': 'keep-alive',
    'Pragma': 'no-cache',
    'Upgrade-Insecure-Requests': '1',
    'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'
}

net = sys.argv[1] if len(sys.argv) == 2 else 'localhost'

client = MongoClient(net, 27017)
iresearch_post_db = client['iresearch_post_db']
iresearch_post_coll = iresearch_post_db['iresearch_post_coll']

error = {}


def crawl(url, uid):
    if iresearch_post_coll.find_one({'url': url}):
        return

    try:
        r = requests.get(url, headers = h)
        r.encoding = 'gbk'
    except:
        print('error')
        error[url] = 1
        return

    if 'box-404' in r.text:
        print('not existed')
        return

    soup = BeautifulSoup(r.text, 'lxml')

    try:
        article = soup.find(class_ = 'g-article').get_text().strip()
    except:
        article = ''

    try:
        title = soup.find(class_ = 'title').get_text().strip()
    except:
        title = ''

    item = {
        'url': url,
        'text': article,
        'title': title
    }

    if not iresearch_post_coll.find_one({'url': url}) and article and title:
        iresearch_post_coll.insert_one(item)
        print(item['title'], uid)

        # for a in soup.findAll('a'):
        #     if a.get('href') and a.get('href').find('news.iresearch.cn/content/') != -1:
        #         crawl(a.get('href'))


def crawl_user(uid):
    page = 1

    while True:
        url = 'http://column.iresearch.cn/u/' + uid + '_' + str(page) + '/'
        r = requests.get(url)

        if 'box-404' in r.text:
            break

        soup = BeautifulSoup(r.text, 'lxml')

        if not len(soup.select('.m-list-colu li')):
            break

        for item in soup.select('.m-list-colu li'):
            crawl(item.find('a').get('href'), uid)

        page += 1


def crawl_list(rootId, lastId):
    url = 'http://start.iresearch.cn/include/pages/redis.aspx'
    r = requests.get(url, params = {
        'rootId': rootId,
        'lastId': lastId
    })

    soup = BeautifulSoup(r.text, 'lxml')

    for item in soup.select('li'):
        crawl(item.find('h3').find('a').get('href'))

    if len(soup.select('li')):
        crawl_list(rootId, soup.select('li')[-1].get('id'))


stat = {}


def crawl_zhuanlan_list(rootId, classId, lastId):
    url = 'http://column.iresearch.cn/include/pages/blogRedis.aspx'
    r = requests.get(url, params = {
        'rootId': rootId,
        'classId': classId,
        'lastId': lastId
    })
    soup = BeautifulSoup(r.text, 'lxml')

    if not len(soup.select('li')):
        return

    for item in soup.select('li'):
        crawl(item.find('h3').find('a').get('href'))
        # usr_link = item.find(class_ = 'foot').find('a').get('href')
        #
        # if usr_link in stat:
        #     continue
        #
        # stat[usr_link] = 1
        # crawl_user(re.findall('u/(\d+)', usr_link)[0])

    crawl_zhuanlan_list(rootId, classId, soup.select('li')[-1].get('id'))


def rand_id():
    random.randint(100000, 1000000)


if __name__ == '__main__':
    # pool = []
    #
    # r = requests.get('http://s.iresearch.cn/search/世界/')
    # r.encoding = 'gbk'
    #
    # soup = BeautifulSoup(r.text, 'lxml')
    #
    # links = []
    #
    # for a in soup.findAll('a'):
    #     if a.get('href') and a.get('href').find('news.iresearch.cn/content/') != -1:
    #         links.append(a.get('href'))
    #
    # print(links)
    # for i in random.sample(links, 10):
    #     p = Process(target = crawl, args = (i,))
    #     pool.append(p)
    #
    # for p in pool:
    #     p.start()
    #
    # for p in pool:
    #     p.join()

    # crawl('http://column.iresearch.cn/b/201807/838248.shtml')

    # for i in range()

    for i in range(400000, 10000, -1):
        crawl_user(str(i))

        # crawl_user('504156')
