from pymongo import MongoClient
from bs4 import BeautifulSoup
from pymongo import MongoClient
from multiprocessing import Process

import random
import requests
import re
import math
import sys
import socket

net = sys.argv[1] if len(sys.argv) == 2 else 'localhost'

socket.setdefaulttimeout(10)

sys.setrecursionlimit(1000000)

client = MongoClient(net, 27017)

sina_blog_db = client['sina_blog_db']
sina_blog_coll = sina_blog_db['sina_blog_coll']

sina_blog_user_db = client['sina_blog_user_db']
sina_blog_user_coll = sina_blog_user_db['sina_blog_user_coll']

h = {
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
    'Accept-Encoding': 'gzip, deflate',
    'Accept-Language': 'en,zh-CN;q=0.9,zh;q=0.8,en-US;q=0.7',
    'Cache-Control': 'no-cache',
    'Connection': 'keep-alive',
    'Host': 'blog.sina.com.cn',
    'Pragma': 'no-cache',
    'Referer': 'http://blog.sina.com.cn/',
    'Upgrade-Insecure-Requests': '1',
    'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'
}

import time


class POST():
    def __init__(self, users):
        self.users = users

        for user in self.users:
            self.crawl_article_links(user['uid'])
            sina_blog_user_coll.update_one({'uid': user['uid']}, {'$set': {'crawled': True}})

    def crawl_article_links(self, uid):
        page = 0
        links = []

        while True:
            url = 'http://blog.sina.com.cn/s/article_sort_' + uid + '_10001_' + str(page) + '.html'
            r = requests.get(url)
            r.encoding = 'utf-8'
            soup = BeautifulSoup(r.text)

            if not len(soup.select('.blog_title a')):
                break

            for i in soup.select('.blog_title a'):
                if not i.get('href'):
                    continue

                if i.get('href').find('blog.sina.com.cn/s/blog_') != -1:
                    title = i.get_text().strip()
                    url = i.get('href')

                    # links.append()

                    if sina_blog_coll.find_one({'uid': uid, 'title': title}):
                        continue

                    self.crawl_article_content(url, uid)

            page += 1

        return links

    def crawl_article_content(self, url, uid):
        if url.find('blog.sina.com.cn/s/blog_') == -1:
            return

        blog_id = re.findall('blog_(.+).html', url)[0]

        if sina_blog_coll.find_one({'blog_id': blog_id}):
            return

        time.sleep(1)

        try:
            r = requests.get(url, headers = h, timeout = 10)
        except:
            return

        r.encoding = 'utf-8'

        soup = BeautifulSoup(r.text, 'lxml')

        try:
            title = soup.find(class_ = 'articalTitle').find('h2').text
        except:
            title = ''

        try:
            text = soup.find(class_ = 'articalContent').get_text().strip()
        except:
            text = ''

        item = {
            'blog_id': blog_id,
            'title': title,
            'text': text,
            'url': url,
            'uid': uid
        }

        if not sina_blog_coll.find_one({'blog_id': blog_id}) and item['title'] and item['text']:
            sina_blog_coll.insert_one(item)
            print(item['title'])

            # for link in soup.findAll('a'):
            #     if not link.get('href'):
            #         continue
            #
            #     if link.get('href').find('blog.sina.com.cn/s/blog_') != -1:
            #         start(link.get('href'))


def start(users):
    POST(users)


if __name__ == '__main__':
    # POST(list(sina_blog_user_coll.aggregate([{'$sample': {'size': 10}}])))

    pool = []

    users = []

    for u in sina_blog_user_coll.find():
        if 'crawled' in u:
            continue

        users.append(u)

    step = math.ceil(len(users) / 1)

    for i in range(1):
        p = Process(target = start, args = (users[i * step:(i + 1) * step],))
        pool.append(p)

    for p in pool:
        p.start()

    for p in pool:
        p.join()
