#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date    : 2016-06-15 23:09:53
# @Author  : lichao (qingligongzi@163.com)
# @Link    : http://www.qingligongzi.com
# @Version : 0.0.2

# 更新内容
# 面向对象编程
# 支持按每页获取故事，支持只获取图片故事

import urllib2
import re
import http_util
import sys
import time


class QSBK():

    """糗事百科爬虫类"""

    # 基本常量
    TOPIC_URL = "头像url"
    ALIAS = "昵称"
    CONTENT = "内容"
    CONTENT_IMG_URL = "图文url"
    LAUGH_NUM = "好笑数"
    COMMENT_NUM = "评论数"

    def __init__(self, onlyPic=False):
        self.onlyPic = onlyPic
        # 初始化数据
        self.enable = True
        self.page = 1
        self.url = "http://www.qiushibaike.com/8hr/page/"
        self.headers = http_util.headers

        # 故事的集合
        self.story_collection = []

    def load_html(self, url, data, headers):
        # do request
        req = urllib2.Request(url, data, headers)
        try:
            response = urllib2.urlopen(req)
        except urllib2.URLError, e:
            if hasattr(e, 'reason'):
                print("请求糗事百科失败：%s" % e.reason)
                return None
        return response.read().decode("utf-8")

    def parse_html(self, html):
        # .*?(<div.*?class="thumb.*?<img.*?src="(.*?)")?
        # pattern_str = '<div.*?class=.*?author.*?<a.*?<img.*?src="(.*?)".*?<a.*?href="/users.*?<h2>(.*?)</h2>' + \
        #     '.*?<div.*?content">(.*?)</div>(.*?)' + \
        #     '<span.*?stats-vote.*?number">(.*?)</i>.*?qiushi_comments.*?number">(.*?)</i>'
        pattern_str = '<div.*?class=.*?author.*?<a.*?<img.*?src="(.*?)".*?<a.*?href="/users.*?<h2>(.*?)</h2>' + \
            '.*?<div.*?content">(.*?)</div>(.*?)<span.*?stats-vote.*?number">(.*?)</i>.*?qiushi_comments.*?number">(.*?)</i>'
        pattern = re.compile(pattern_str, re.S)
        items = re.findall(pattern, html)

        pattern_str = 'class="thumb.*?<img.*?src="(.*?)"'
        pattern = re.compile(pattern_str, re.S)

        story_list = []
        for item in items:
            sub_items = re.findall(pattern, item[3])
            if len(sub_items) > 0:
                content_img_url = sub_items[0]
            else:
                content_img_url = ''
            # 组装每个故事成一个字典
            story = [(self.TOPIC_URL, item[0]), (self.ALIAS, item[1]), (self.CONTENT, item[2].replace(
                '<br/>', '\n').strip()), (self.CONTENT_IMG_URL, content_img_url), (self.LAUGH_NUM, item[4]), (self.COMMENT_NUM, item[5])]
            # 只获取图片类型的故事
            if self.onlyPic:
                if len(content_img_url) != 0:
                    story_list.append(story)
            else:
                story_list.append(story)
        return story_list

    def get_story(self):
        print(u"正在获取糗事百科第%d页" % self.page)
        try:
                # 拼接url
            url = self.url + str(self.page)
            # 抓取页面
            content = self.load_html(url, None, self.headers)
            # 解析页面并返回所有故事
            page_story_list = self.parse_html(content)
            print(u"获取%d个故事" % len(page_story_list))
            # 页数自增
            self.page += 1
            # 如果获取的纪录等于0，获取下一页
            if len(page_story_list) == 0:
                time.sleep(1)
                self.get_story()
            else:
                # 将新加载的故事放入总的故事集合内
                self.story_collection.extend(page_story_list)
        except Exception, e:
            print e

    def print_story(self, story):
        print("-" * 30)
        for t in story:
            # ignore 用来处理UnicodeEncodeError
            # UnicodeEncodeError: ‘gbk’ codec can’t encode character u’\u200e’
            # in position 43: illegal multibyte sequence
            print(("\n%s:\n%s" % t).encode('gbk', 'ignore'))

    def print_del_story(self):
        # 输出第一个故事，并在集合中将其删除
        if len(self.story_collection) > 0:
            self.print_story(self.story_collection[0])
            del self.story_collection[0]

    def start(self):
        # solve UnicodeEncodeError: 'ascii' codec can't encode characters
        reload(sys)
        sys.setdefaultencoding("utf-8")

        self.get_story()
        self.print_del_story()
        while self.enable:
            input = raw_input()
            if input == "q":
                self.enable = False
                return
            self.print_del_story()

            # 如果故事集合中的故事数小于等于1，则启动抓取任务，抓取下一页
            if len(self.story_collection) <= 1:
                self.get_story()
                self.print_del_story()

spider = QSBK()

# only crawl pic
# spider = QSBK(True)

# 启动爬虫
spider.start()
