#!/usr/bin/env python  
# -*- coding: utf-8 -*-  
from HTMLParser import HTMLParser
import sys,re,urllib2,urllib,time,xlwt,os

type = sys.getfilesystemencoding() 
reload(sys)  
sys.setdefaultencoding('utf8')
#糗事百科爬虫类
class QSBK:        
    #开始方法
    def __init__(self):
        self.pageIndex = 1
        self.user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
        self.headers = { 'User-Agent' : self.user_agent }
        #存放段子的变量，每一个元素是每一页的段子们
        self.stories = []
        #存放程序是否继续运行的变量
        self.enable = False
    #传入某一页的索引获得页面代码
    def getPage(self,pageIndex):
        try:
            url = 'http://www.qiushibaike.com/hot/page/' + str(pageIndex)
            # proxy_support = urllib2.ProxyHandler({"http":"http://192.168.2.6:80"})
            # opener = urllib2.build_opener(proxy_support)
            # urllib2.install_opener(opener)
            #构建请求的request
            request = urllib2.Request(url,headers = self.headers)
            #利用urlopen获取页面代码
            response = urllib2.urlopen(request).read()
            #将页面转化为UTF-8编码
            pageCode = response.decode('utf-8').encode(type) 
            return pageCode
        except urllib2.URLError, e:
            if hasattr(e,"reason"):
                print u"连接糗事百科失败,错误原因",e.reason
                return None

    #传入某一页代码，返回本页不带图片的段子列表
    def getPageItems(self,pageIndex):
        pageCode = self.getPage(pageIndex)
        if not pageCode:
            print u"页面加载失败...."
            return None
            
        pattern = re.compile(r'<h2>[^<>]*?</h2>')
        items = re.findall(pattern,pageCode)
        #用来存储每页的段子们
        pageStories = []
        #遍历正则表达式匹配的信息
        for item in items:
            data_one = item.replace('<h2>', '')
            data_two = data_one.replace('</h2>', '')
            pageStories.append(data_two)
            
        return pageStories
        # for item in items:
        #   print item
        
    def loadPage(self):
        #如果当前未看的页数少于2页，则加载新一页
        if self.enable == True:
            pageStories = self.getPageItems(self.pageIndex)
            #将该页的段子存放到全局list中
           
            time.sleep(2)
            print self.pageIndex
            if pageStories:
                self.stories.append(pageStories)
                #获取完之后页码索引加一，表示下次读取下一页
                self.pageIndex += 1
            if(self.pageIndex == 3):
                 self.enable = False
                 index = 0
                 wb = xlwt.Workbook(encoding = 'utf-8')
                 ws = wb.add_sheet('My Worksheet')
                 for item in self.stories:
                    for value in item:
                        print value
                        ws.write(index,0,u"劉金平")
                        index += 1
                 wb.save('test.xls')
                 return
    #调用该方法，每次敲回车打印输出一个段子
    def getOneStory(self,pageStories,page):
        print page
        #遍历一页的段子
        for story in pageStories:
           #等待用户输入
           input = raw_input()
           #每当输入回车一次，判断一下是否要加载新页面
           self.loadPage()
           #如果输入Q则程序结束
           if input == "Q":
               self.enable = False
               return
           print story
    def start(self):
        print u"正在读取糗事百科,按回车查看新段子，Q退出"
        self.enable = True
        self.loadPage()
        nowPage = 0
        while self.enable:
            self.loadPage()
        # while self.enable:
        #     if len(self.stories)>0:
        #         #从全局list中获取一页的段子
        #         pageStories = self.stories[0]
        #         #当前读到的页数加一
        #         nowPage += 1
        #         #将全局list中第一个元素删除，因为已经取出
        #         del self.stories[0]
        #         #输出该页的段子
        #         self.getOneStory(pageStories,nowPage)


spider = QSBK()
spider.start()












# page = 1
# url = 'http://www.qiushibaike.com/hot/page/' + str(page)
# user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
# headers = { 'User-Agent' : user_agent }
# try:
#     request = urllib2.Request(url,headers = headers)
#     response = urllib2.urlopen(request).read()
#     # response = 'an example word:cat!!'
#     content =  response.decode("UTF-8").encode(type) 
#     # print content
#     pattern = re.compile(r'<h2>\n.*?\n</h2>')
#     # if pattern:
#       # print pattern.group()
#     # pattern = re.match(r'<img.*?>',content)
#     # if pattern:
#        # print("yo it's a {}".format(pattern.group()))
#     items = re.findall(pattern,content)
#     # print (items)
#     for item in items:
#       print item
# except urllib2.URLError, e:
#     if hasattr(e,"code"):
#         print e.code
#     if hasattr(e,"reason"):
#         print e.reason
