#!/usr/bin/python
# -*- coding: utf8 -*-

import urllib2
import urllib
import string
import re

# 爬取网页，并保存,返回值以下三种
DOWN_FAIL = -1
SAVE_FAIL = -2
SUCC_DOWN_SAVE = 0

def fetch_html(url, saveFile):
	user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
	headers = { 'User-Agent' : user_agent }
	req = urllib2.Request(url, headers=headers)
	try:

		res = urllib2.urlopen(req)
		print 'fetch success: url='+url
	except:
		print 'cannot open url='+url
		return DOWN_FAIL

	#print res.read()
	try:
		sf = open(saveFile,'w+')
		sf.write(res.read())
		print 'save success: file='+saveFile
	except:
		print 'write to file error, url='+url+', file='+saveFile
		return SAVE_FAIL

	return SUCC_DOWN_SAVE


# 解析html中的数据,返回感兴趣的数据内容
def parse_html(savedFile):
	txt = open(savedFile,'r').read()
    # ?如何去除图片的段子
	myItems = re.findall('<div\s+class="content"\s+title="(.*?)">(.*?)</div>',txt,re.S)
	items=[]
	for item in myItems:

		items.append(item)
	return items


init_url='http://www.qiushibaike.com/hot/page/'
items = []
def load(page_num):
	# print page_num
	# print 'get page '+ str(page_num)
	curr_url = init_url + str(page_num)
	sName = string.zfill(page_num,5) + '.html'#自动填充成六位的文件名
	result_code = fetch_html(curr_url,sName)
	if result_code == SUCC_DOWN_SAVE:
		result = parse_html(sName)
	else:
		print 'fetch url fail'

	# for line in mylog:
	#     list1.append(line)
	# list1.extend(mylog)
	items.extend(result)


def show():
	size = len(items)
	if size>10 :
		size = 10
	for i in range (1,size):
		item = items.pop(0)
		print item[0].replace('\n','')
		print item[1].replace('\n','')
		print '\n'

# test
FLAG_TEST = False
if FLAG_TEST:
	this_url='http://www.baidu.com'
	fetch_html(this_url,'baidu.html')


# entry
print u"""
---------------------
web crawer
ver: 0.1
author: wzq
date: 2015-02-26
des: 爬取糗事百科,终端展示,q退出
---------------------
"""
page_num = 1
usr_input=str(raw_input('press anykey start, q exit\n'))
while True:
	if usr_input == 'q':
		print 'exit now'
		break


	if len(items) < 10: # need load
		print '-----------page_num---------',page_num
		load(page_num)
		page_num +=1
		show()
	else:
		show()
	usr_input=str(raw_input(''))
