# coding=utf-8
import ConfigParser
import json
import re
import time
import urllib2

import redis
from bs4 import BeautifulSoup
"""
author:junhong
date:2018-12-12
desc:虎嗅新闻抓取
step1:
    fetch_seed()
step2:
    fetch_detail()
"""


def fetch(url):
    return urllib2.urlopen(url).read()


def fetch_with_class(soup, class_type="jiemi-content"):
    return soup.find(class_=class_type).get_text()


def save_seed(cat_id, seed_list):
    r = redis.Redis(host=redis_db_host, port=redis_db_port, db=redis_db_index_1)
    for seed in seed_list:
        r.sadd("%s-%s" % ("huxiu.news.set", cat_id), seed)
    pass


def save2redis(index, article_list):
    r = redis.Redis(host=redis_db_host, port=redis_db_port, db=redis_db_index_1)
    for article in article_list:
        r.sadd("%s-%d" % ("huxiu.news.set", index), article)


def load_config():
    print("load config...")

    global redis_db_host
    global redis_db_port
    global redis_db_index_1
    redis_db_index_1 = 1
    global redis_db_index_2
    redis_db_index_2 = 2

    conf = ConfigParser.SafeConfigParser()
    conf.read("huxiu-crawler.ini")
    redis_db_host = conf.get("local", "redis.host")
    redis_db_port = conf.get("local", "redis.port")


def fetch_all_categories():
    categories = []
    with open("huxiu_news_type.txt", "r") as f:
        lines = f.readlines()
        for line in lines:
            if line.count("\t") > 0:
                categories.append(line.split("\t")[1][:-1])  # 去除换行符'\n'
    return categories


def save_detail(id, para_list):
    r = redis.Redis(host=redis_db_host, port=redis_db_port, db=redis_db_index_2)
    r.set("hx_%s" % id, "\n".join(para_list))
    return 1


class HuXiuNewsCrawler(object):
    def __init__(self):
        pass

    def crawl_seed_all(self):
        print("start crawl_seed_all() ")
        for cat_id in fetch_all_categories():
            print("--> fetch category id = %s" % cat_id)
            self.crawl_seed(total_page=10, cat_id=cat_id)

    def crawl_seed(self, total_page=100, cat_id=""):
        page = 1
        while page <= total_page:
            if page % 5 == 0:
                print("page=%d, sleep..." % page)
                time.sleep(2)
            url = 'https://www.huxiu.com/channel/ajaxGetMore?page=%d&catId=%s' % (page, cat_id)
            result = fetch(url)
            if len(result) != 0:
                ret_json = json.loads(result)
                data = ret_json["data"]
                if len(data) == 0:
                    print "*** data is empty! ***"
                    break
                total_page = int(data["total_page"])
                content = data["data"]
                seed_list = re.findall(r'href="/article/(.+?)\.html', content)
                save_seed(cat_id=cat_id, seed_list=seed_list)
            page += 1
        return 1

    def crawl_detail_all(self):
        print("start crawl_detail_all() ")
        r = redis.Redis(host=redis_db_host, port=redis_db_port, db=redis_db_index_1)
        cnt = 0
        for cat_id in fetch_all_categories():
            print("cat_id = %s" % cat_id)
            seeds = r.smembers("%s-%s" % ("huxiu.news.set", cat_id))
            for seed in seeds:
                cnt += self.crawl_detail(seed)
                if cnt % 100 == 0:
                    print("cnt = %d, sleep..." % cnt)
                    time.sleep(2)

    def crawl_detail(self, id):
        """
        crawl single item
        :param id: news id
        :return:
        """
        url = 'https://www.huxiu.com/article/%s.html' % id
        content = fetch(url)
        soup = BeautifulSoup(content, "html.parser")
        class_type = "article-wrap"
        tmp_content = soup.find("div", class_=class_type)
        soup = BeautifulSoup(str(tmp_content), "html.parser")
        para_list = []
        paragraph_list = soup.div.find_all("p")
        for text in paragraph_list:
            tmp_text = text.get_text()
            if len(tmp_text) > 0:
                para_list.append(tmp_text)

        return save_detail(id, para_list)
        # return fetch_with_class(soup, class_type="article-wrap div p")
        # return fetch_with_class(soup, class_type="article-wrap div p")
        pass


def test_01():
    crawler = HuXiuNewsCrawler()
    # seed种子
    ret = crawler.crawl_seed(cat_id="2", total_page=10)
    # re_extract = re.findall(r'href=.+?html', ret)
    print "ret = ", ret
    # detail详情
    ret = crawler.crawl_detail("274036")
    print "ret length =", len(ret)
    print "ret=", ret


if __name__ == '__main__':
    # test_01()
    # ret = fetch_all_categories()
    # print "ret = ", ret
    load_config()
    start = time.time()
    HuXiuNewsCrawler().crawl_seed_all()
    HuXiuNewsCrawler().crawl_detail_all()
    print "time used = ", (time.time() - start), '(s)'
    pass
