from pymongo import MongoClient
from bs4 import BeautifulSoup
from multiprocessing import Process

import socket
import requests
import re
import time
import sys
import numpy as np
import random

sys.setrecursionlimit(1000000)
socket.setdefaulttimeout(5)

h = {
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
    'Accept-Encoding': 'gzip, deflate, br',
    'Accept-Language': 'en,zh-CN;q=0.9,zh;q=0.8,en-US;q=0.7',
    'Cache-Control': 'no-cache',
    'Connection': 'keep-alive',
    'Pragma': 'no-cache',
    'Upgrade-Insecure-Requests': '1',
    'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'
}

net = sys.argv[1] if len(sys.argv) == 2 else 'localhost'
client = MongoClient(net, 27017)

jrj_post_db = client['jrj_post_db']
jrj_post_coll = jrj_post_db['jrj_post_coll']

error = {}


def crawl(url):
    if url in error:
        return

    if jrj_post_coll.find_one({'url': url}):
        return

    try:
        r = requests.get(url, headers = h)
    except:
        return

    soup = BeautifulSoup(r.text, 'lxml')

    try:
        article = soup.find(class_ = 'texttit_m1').get_text().strip().replace('.klinehk{margin:0 auto 20px;}', '')
    except:
        article = ''

    try:
        title = soup.find('h1').get_text().strip()
    except:
        title = ''

    item = {
        'url': url,
        'text': article.strip(),
        'title': title
    }

    if not article or not title:
        error[url] = 1
        return

    if not jrj_post_coll.find_one({'url': url}) and article and title:
        time.sleep(1)
        jrj_post_coll.insert_one(item)
        print(item['title'])

    for a in soup.findAll('a'):
        if a.get('href') and re.findall('http://.+\.jrj.com.cn/\d{4}/\d{2}/\d+.shtml', a.get('href')):
            crawl(a.get('href'))


def main_index():
    return ['http://finance.jrj.com.cn/', 'http://opinion.jrj.com.cn/', 'http://finance.jrj.com.cn/biz/', 'http://focus.jrj.com.cn/', 'http://blog.jrj.com.cn/', 'http://itougu.jrj.com.cn/activity/web/vipweb.jspa?tgqdcode=84B6ZE63&ylbcode=2WSK563K', 'http://stock.jrj.com.cn/', 'http://summary.jrj.com.cn/', 'http://stock.jrj.com.cn/yanbao/', 'http://stock.jrj.com.cn/list/stockssgs.shtml', 'http://bbs.jrj.com.cn', 'http://itougu.jrj.com.cn/?channel=VC835Z48D&tgqdcode=W734CVZS',
            'http://money.jrj.com.cn/', 'http://fund.jrj.com.cn/', 'http://bank.jrj.com.cn/', 'http://insurance.jrj.com.cn/', 'http://trust.jrj.com.cn/', 'https://1.jrj.com.cn/zntg/?tgqdcode=N3E66P6A', 'http://8.jrj.com.cn/?tgqdcode=W734CVZS&ylbcode=Y8EZB36R', 'http://stock.jrj.com.cn/invest/', 'http://stock.jrj.com.cn/skzj/', 'http://stock.jrj.com.cn/invest/scgc.shtml', 'http://stock.jrj.com.cn/hotstock/gnjj.shtml', 'http://qs.jrj.com.cn/', 'http://column.jrj.com.cn/', 'http://bc.jrj.com.cn/',
            'http://gold.jrj.com.cn/', 'http://loan.jrj.com.cn/list/zfdkwd.shtml', 'http://futures.jrj.com.cn/', 'http://gzqh.jrj.com.cn/', 'http://forex.jrj.com.cn/', 'http://bond.jrj.com.cn/', 'http://stock.jrj.com.cn/hotstock/', 'http://summary.jrj.com.cn/zljk/ddjmb.shtml', 'http://stock.jrj.com.cn/stockprompt/stock_gegugonggao_list.shtml', 'http://stock.jrj.com.cn/ztbjm/ztbjm.shtml', 'http://stock.jrj.com.cn/jyts/', 'http://stock.jrj.com.cn/list/cpbdlist.shtml', 'http://auto.jrj.com.cn/',
            'http://finance.jrj.com.cn/tech/', 'http://v.jrj.com.cn/', 'https://www.yinglb.com.cn/', 'http://itougu.jrj.com.cn/act/jzxg-activity-new?tgqdcode=Q5A72BZE&ylbcode=2W6UPX7W', 'http://edu.jrj.com.cn/', 'http://finance.jrj.com.cn/consumer/', 'http://stock.jrj.com.cn/ipo/', 'http://hk.jrj.com.cn/', 'http://usstock.jrj.com.cn/', 'http://stock.jrj.com.cn/xsb/', 'http://finance.jrj.com.cn/zntx/', 'http://istock.jrj.com.cn/',
            'http://focus.jrj.com.cn/action/topicsearch.jspa?keyword=%u56FE%u89E3%u6295%u8D44']


if __name__ == '__main__':
    for i in main_index():
        soup = BeautifulSoup(requests.get(i).text, 'lxml')
        links = []

        for a in soup.findAll('a'):
            if a.get('href') and re.findall('http://.+\.jrj.com.cn/\d{4}/\d{2}/\d+.shtml', a.get('href')):
                links.append(a.get('href'))

        pool = []

        for i in random.sample(links, 10):
            p = Process(target = crawl, args = (i,))
            pool.append(p)

        for p in pool:
            p.start()

        for p in pool:
            p.join()
