from multiprocessing import Process
from pymongo import MongoClient

import requests
import pprint
import time
import re
import sys

net = sys.argv[1] if len(sys.argv) == 2 else 'localhost'

sys.setrecursionlimit(1000000)

from bs4 import BeautifulSoup

client = MongoClient(net, 27017, connect = False)

csdn_user_db = client['csdn_user_db']
csdb_user_collection = csdn_user_db['csdb_user_collection']

headers = {
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
    'Accept-Encoding': 'gzip, deflate, br',
    'Accept-Language': 'en,zh-CN;q=0.9,zh;q=0.8,en-US;q=0.7',
    'Cache-Control': 'no-cache',
    'Connection': 'keep-alive',
    'Host': 'me.csdn.net',
    'Pragma': 'no-cache',
    'Upgrade-Insecure-Requests': '1',
    'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36'
}

uid = 'yyl424525'


def scrapy(uid):
    # try:
    #     print('https://blog.csdn.net/' + uid)
    #     r = requests.get('https://blog.csdn.net/' + uid)
    # except:
    #     print('error')
    #     for item in csdb_user_collection.aggregate([{'$sample': {'size': 1}}]):
    #         scrapy(item['uid'])
    #
    #     return
    #
    # # print(r.text)
    # soup = BeautifulSoup(r.text, 'lxml')
    #
    # if r.text.find('该用户尚未开通博客') != -1:
    #     print('该用户尚未开通博客')
    #     return
    #
    # if r.text.find('该博客违反了网站规则被关闭') != -1:
    #     print('该博客违反了网站规则被关闭')
    #     return
    #
    # if soup.find(class_ = 'data-info') and soup.find(class_ = 'grade-box'):
    #     try:
    #         Original = soup.find(class_ = 'data-info').find(text = re.compile('原创')).parent.find_next_sibling().get_text()
    #     except:
    #         Original = ''
    #
    #     item = {
    #         'Original': Original,
    #         'funs': soup.find(class_ = 'data-info').find(text = re.compile('粉丝')).parent.find_next_sibling().get_text(),
    #         'like': soup.find(class_ = 'data-info').find(text = re.compile('喜欢')).parent.find_next_sibling().get_text(),
    #         'comment': soup.find(class_ = 'data-info').find(text = re.compile('评论')).parent.find_next_sibling().get_text(),
    #         'grade': re.sub("\D", "", soup.find(class_ = 'grade-box').find(text = re.compile('等级')).parent.find_next_sibling().find('a').get('title')),
    #         'visit': soup.find(class_ = 'grade-box').find(text = re.compile('访问')).parent.find_next_sibling().text.strip(),
    #         'integral': soup.find(class_ = 'grade-box').find(text = re.compile('积分')).parent.find_next_sibling().text.strip(),
    #         'rank': soup.find(class_ = 'grade-box').find(text = re.compile('排名')).parent.find_next_sibling().text.strip(),
    #         'blogs': [],
    #         'detail': '',
    #         'sign': '',
    #         'nick': '',
    #         'uid': uid
    #     }
    # else:
    #     item = {
    #         'Original': '',
    #         'funs': '',
    #         'like': '',
    #         'comment': '',
    #         'grade': '',
    #         'visit': '',
    #         'integral': '',
    #         'rank': '',
    #         'blogs': [],
    #         'detail': '',
    #         'sign': '',
    #         'nick': '',
    #         'uid': uid
    #     }
    #
    # url = 'https://my.csdn.net/' + uid
    #
    # try:
    #     r = requests.get(url, headers = headers)
    # except:
    #     for item in csdb_user_collection.aggregate([{'$sample': {'size': 1}}]):
    #         scrapy(item['uid'])
    #
    #     return
    #
    # soup = BeautifulSoup(r.text, 'lxml')
    #
    # try:
    #     item['detail'] = soup.find(class_ = 'person-detail').get_text().strip().replace('\t', '').replace(' ', '')
    #     item['sign'] = soup.find(class_ = 'person-sign').get_text().strip().replace('\t', '')
    #     item['nick'] = soup.find(class_ = 'person-nick-name').get_text()
    #
    #     if item['sign'] == '没有内容可显示':
    #         item['sign'] = ''
    #
    #     if not csdb_user_collection.find_one({'uid': item['uid']}):
    #         csdb_user_collection.insert_one(item)
    #         pprint.pprint(item['nick'])
    # except:
    #     pass
    #
    # # for p in soup.select('.mod_relations a'):
    # #     uid = p.get('href')
    # #
    # #     if csdb_user_collection.find_one({'uid': uid}):
    # #         continue
    # #
    # #     scrapy(uid)
    #
    # for item in csdb_user_collection.aggregate([{'$sample': {'size': 1}}]):
    #     scrapy(item['uid'])

    url = 'https://me.csdn.net/' + uid

    r = requests.get(url, headers = headers)
    soup = BeautifulSoup(r.text, 'lxml')

    for user in soup.select('.fan_att_con ul li'):
        item = {
            'uid': user.find('a').get('href').replace('https://me.csdn.net/', '')
        }

        if not csdb_user_collection.find_one({'uid': item['uid']}):
            csdb_user_collection.insert_one(item)
            print(item['uid'])

    for item in csdb_user_collection.aggregate([{'$sample': {'size': 1}}]):
        scrapy(item['uid'])


if __name__ == '__main__':
    proc = []

    for item in csdb_user_collection.aggregate([{'$sample': {'size': 8}}]):
        p = Process(target = scrapy, args = (item['uid'],))
        p.start()
        proc.append(p)

    for p in proc:
        p.join()
