﻿# coding=utf-8
import json
import re
import sys

import requests
from bs4 import BeautifulSoup

from dp import DuplicatesPipeline

sys.setrecursionlimit(1000000)  # 递归深度,默认1000
dPipeline = DuplicatesPipeline('jianshu', 'articles')

article_themes = {}  # 文章主题map

url = 'http://www.jianshu.com/c/bc2986022c08?utm_medium=index-collections&utm_source=desktop'

type = re.findall('c\\/(.*?)\\?', url, re.S)
sort = type[0]

scope = 'http://www.jianshu.com'

headers = {
    'User-Agent': ':Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 '
                  'Safari/537.36 '
}


def get_theme_urls(theme, page):
    """
    步骤：
    获取主题
    获取主题中url
    根据url抓取内容
    :param theme: 分类、主题
    :param page: 分页抓取
    :return: None
    """
    url = 'http://www.jianshu.com/recommendations/collections?page=%s&order_by=%s' % (page, theme)
    # print(url)
    html = requests.get(url, headers=headers).content
    soup = BeautifulSoup(html, 'html.parser')
    divs = soup.find_all('div', 'col-xs-8')
    if divs != None:
        for div in divs:
            theme_name = div.div.a.h4.get_text()
            theme_url = div.div.a['href'][3:]
            if article_themes.get(theme_url) == None:
                article_themes[theme_url] = theme_name
                get_articles_url(theme_url, 0, theme_name)
            else:
                print('已包含此主题')

        get_theme_urls(theme, page=page + 1)
    else:
        print('此主题分类url已抓取完')
        pass


def get_articles_url(theme, page, theme_name):
    """
    抓取主题文章url
    :param theme: 主题识别id
    :param page: 分页抓取
    :param theme_name: 主题名
    :return:
    """
    page = page + 1
    url = 'http://www.jianshu.com/c/%s?order_by=added_at&page=%d' % (theme, page)
    html = requests.get(url, headers=headers).content
    urls = re.findall('<a class="title" target="_blank" href="(.*?)"', html.decode('utf-8'), re.S)

    if len(urls) > 0:
        try:
            # next_url = 'http://www.jianshu.com/c/%s?order_by=added_at&page=%d' % (theme, page)
            print(url)
            get_articles_url(theme, page, theme_name)
        except Exception as e:
            print(url)
            print(e)
            # print('数据抓取失败')
    else:
        # print('%s主题抓取完成。' % theme_name)
        pass


# soup.find('h1','title') = soup.find('h1').find('title') = soup.h1.title = soup.find_all('title',limit=1)
def get_article(url, theme_name):
    """ 获取 文章"""
    # print(url)..........................................

    url = scope + url
    print(url)
    html = requests.get(url, headers=headers).content
    soup = BeautifulSoup(html, 'html.parser')

    script = soup.find('script', type='application/json').get_text()
    obj = json.loads(script)
    words = obj['note']['public_wordage']
    likes = obj['note']['likes_count']
    views = obj['note']['views_count']
    comments = obj['note']['comments_count']
    rewards = obj['note']['total_rewards_count']
    nickname = obj['note']['author']['nickname']
    total_words = obj['note']['author']['total_wordage']
    total_likes = obj['note']['author']['total_likes_count']
    followers = obj['note']['author']['followers_count']

    title = soup.find('h1', 'title').string
    # name = soup.find('span', 'name').a.string
    time = soup.find('span', 'publish-time').string
    notebook = soup.find('a', 'notebook').get_text()
    avatar = soup.find('a', 'avatar').find('img')['src']

    try:
        signature = soup.find('div', 'signature').get_text()
    except:
        signature = '无签名信息'

    content = soup.find('div', 'show-content').children

    contents = []

    for item in content:
        s = str(item)
        # print(s)
        if s[:2] == '<p':
            if s is None:
                contents.append('')
            else:
                # s = unicode(s, "utf-8")
                s.encode('utf-8')
                contents.append(item.get_text())
                # print()

        if s[:2] == '<d':
            tag = item.find('img')
            if tag is None:
                # print('not picture')
                contents.append(item.get_text())
            else:
                try:
                    contents.append(tag['data-original-src'])
                except:
                    contents.append(tag['src'])

        if s[:2] == '<h':
            contents.append(item.get_text())

        if s[:2] == '<b':
            contents.append(item.get_text())

    article = {
        'article_url': url,
        'title': title,
        'nick_name': nickname,
        'notebook': notebook,
        'content': contents,
        'words': words,
        'likes': likes,
        'views': views,
        'comments': comments,
        'rewards': rewards,
        'total_likes': total_likes,
        'total_words': total_words,
        'followers': followers,
        'pub_time': time,
        'signature': signature,
        'avatar': avatar,
        'theme': theme_name

    }

    try:
        dPipeline.process_item(article)
    except:
        dPipeline.remove_item_from_db(article)


def run():
    themes = [
        'schoolyard',
        'city',
        'hot',
        'recommend',
    ]

    for theme in themes:
        get_theme_urls(theme, 0)
        print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
        print('%s搜索完成' % theme)


# get_articles_url('http://www.jianshu.com/c/' + sort + '?utm_medium=index-collections&utm_source=desktop', 1)
# get_article("/p/00f3398980d7","测试名")
# Request URL:http://www.jianshu.com/c/RfYyQj?utm_medium=index-collections&utm_source=desktop
# Request URL:http://www.jianshu.com/c/RfYyQj?order_by=added_at&page=2
#
# Request URL:http://www.jianshu.com/c/RfYyQj?order_by=added_at&page=13

# get_theme_urls("http://www.jianshu.com/recommendations/collections?order_by=recommend&_pjax=%23list-container")

# get_articles_url("http://www.jianshu.com/c/251e2bcd323b?order_by=added_at&page=2", 0)
run()
