#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date    : 2016-06-17 14:54:58
# @Author  : lichao (qingligongzi@163.com)
# @Link    : http://www.qingligongzi.com
# @Version : 0.0.3

# 更新内容
# 使用requests和bs4模块
# 故事使用对象处理

import re
import http_util
import sys
import time
import requests
from bs4 import BeautifulSoup

class Story():
	"""糗事百科故事类"""

	# 基本常量
	TOPIC_URL = "头像url"
	ALIAS = "昵称"
	CONTENT = "内容"
	CONTENT_IMG_URL = "图文url"
	LAUGH_NUM = "好笑数"
	COMMENT_NUM = "评论数"

	q_author_img_url = ''
	q_author_name = ''
	q_content = ''
	q_content_img_url = ''
	q_stats_laugh_num = ''
	q_stats_comment_num = ''

	def __init__(self):
		pass

	def __str__(self):
		output = ''
		pattern = "\n{0}:\n{1}"
		if self.q_author_img_url:
			output += pattern.format(self.TOPIC_URL, self.q_author_img_url)
		if self.q_author_name:
			output += pattern.format(self.ALIAS, self.q_author_name)
		if self.q_content:
			output += pattern.format(self.CONTENT, self.q_content.replace('<br/>', '\n').strip())
		if self.q_content_img_url:
			output += pattern.format(self.CONTENT_IMG_URL, self.q_content_img_url)
		if self.q_stats_laugh_num:
			output += pattern.format(self.LAUGH_NUM, self.q_stats_laugh_num)
		if self.q_stats_comment_num:
			output += pattern.format(self.COMMENT_NUM, self.q_stats_comment_num)

		return output

class QSBK():

    """糗事百科爬虫类"""

    def __init__(self, onlyPic=False):
        self.onlyPic = onlyPic
        # 初始化数据
        self.enable = True
        self.page = 1
        self.url = "http://www.qiushibaike.com/8hr/page/"
        self.headers = http_util.headers

        # 故事的集合
        self.story_collection = []

    def load_html(self, url, data, headers):
        # do request
        # use requests
        try:
            r = requests.get(url, headers=headers)
        except requests.exception.RequestException, e:
            # print("请求糗事百科失败：", e)
            return None
        return r.text

    def parse_html(self, html):
        story_list = []
        # use bs4
        soup = BeautifulSoup(html)

        # 获取故事根节点
        q_tag_list = soup.find_all(id=re.compile("qiushi_tag"))
        for q_tag in q_tag_list:
            # 新建故事实例
            story = Story()
            # 作者信息
            q_author = q_tag.select(".author")[0]
            story.q_author_img_url = q_author.select("img")[0].attrs["src"]
            story.q_author_name = q_author.select("h2")[0].string

            # 故事内容
            story.q_content = q_tag.select(".content")[0].text

            # 故事内容图片
            q_content_img = q_tag.select(".thumb")
            if q_content_img:
                story.q_content_img_url = q_content_img[0].select("a > img")[0].attrs["src"]

            # 好笑数及评论数
            q_stats = q_tag.select(".stats")
            if len(q_stats) > 0:
                q_stats = q_stats[0]
                q_stats_laugh = q_stats.select(".stats-vote > i")
                if len(q_stats_laugh) > 0:
                    story.q_stats_laugh_num = q_stats.select(".stats-vote > i")[0].string
            
                q_stats_comment_a = q_stats.select(".stats-comments > a")
                if len(q_stats_comment_a) > 0:
                    story.q_stats_comment_num = q_stats_comment_a[0].i.string

            # 只获取图片类型的故事
            if self.onlyPic:
                if len(story.q_content_img_url) != 0:
                    story_list.append(story)
            else:
                story_list.append(story)
        return story_list

    def get_story(self):
        print(u"正在获取糗事百科第%d页" % self.page)
        # 拼接url
        url = self.url + str(self.page)
        # 抓取页面
        content = self.load_html(url, None, self.headers)
        # 解析页面并返回所有故事
        page_story_list = self.parse_html(content)
        print(u"获取%d个故事" % len(page_story_list))
        # 页数自增
        self.page += 1
        # 如果获取的纪录等于0，获取下一页
        if len(page_story_list) == 0:
            time.sleep(1)
            self.get_story()
        else:
            # 将新加载的故事放入总的故事集合内
            self.story_collection.extend(page_story_list)

    def print_story(self, story):
        print("-" * 30)
        # ignore 用来处理UnicodeEncodeError
        # UnicodeEncodeError: ‘gbk’ codec can’t encode character u’\u200e’
        # in position 43: illegal multibyte sequence
        print(str(story).encode('gbk', 'ignore'))

    def print_del_story(self):
        # 输出第一个故事，并在集合中将其删除
        if len(self.story_collection) > 0:
            self.print_story(self.story_collection[0])
            del self.story_collection[0]

    def start(self):
        # solve UnicodeEncodeError: 'ascii' codec can't encode characters
        reload(sys)
        sys.setdefaultencoding("utf-8")

        self.get_story()
        self.print_del_story()
        while self.enable:
            input = raw_input()
            if input == "q":
                self.enable = False
                return
            self.print_del_story()

            # 如果故事集合中的故事数小于等于1，则启动抓取任务，抓取下一页
            if len(self.story_collection) <= 1:
                self.get_story()
                self.print_del_story()

spider = QSBK()

# only crawl pic
# spider = QSBK(True)

# 启动爬虫
spider.start()

