__author__ = 'chobit'
# -*- coding:utf-8 -*-
import urllib 
import urllib2
import re
import sys
#处理页面类
class Tool:
	pass
#下载进度条
def report(count, blockSize, totalSize):
	percent = int(count*blockSize*100/totalSize)
	print ''  
	sys.stdout.write("\r%d%%" % percent + ' complete')  
	sys.stdout.flush()
# sys.stdout.write('\rFetching ' + name + '...\n')
# urllib.urlretrieve(getFile, saveFile, reporthook=report)
# sys.stdout.write("\rDownload complete, saved as %s" % (fileName) + '\n\n')
# sys.stdout.flush()
#百度贴吧爬虫类
class BDTD:

	#初始化。传入基地址，是否只看楼主参数
	def __init__(self,baseUrl,seeLZ):
		self.baseUrl = baseUrl
		self.seeLZ = '?see_lz='+str(seeLZ)

	#传入页码，获取页帖子的代码
	def getPage(self,pageNum):
		try:
			url = self.baseUrl + self.seeLZ + '&pn=' + str(pageNum)
			url = self.baseUrl
			request = urllib2.Request(url)
			response = urllib2.urlopen(request)
			# pyageCode = response.decode('utf-8')
			resultCode = ''
			for result in response:
				resultCode +=result
			# print resultCode
			return resultCode
		except urllib2.URLError,e:
			if hasattr(e,'reason'):
				print u"连接百度贴吧失败,错误原因",e.reason
				return None
	#获取帖子标题
	def getTitle(self):
		page = self.getPage(1)
		pattern = re.compile('<h3 class="core_title_txt pull-left text-overflow.*?>(.*?)</h3>',re.S)
		result = re.search(pattern,page)
		if result:
			#print result.group(1) 测试输出
			return result.group(1).strip()
		else:
			return None
	#提取贴子的页数
	def getPageNum(self):
		page = self.getPage(1)
		pattern = re.compile('<li class="l_reply_num.*?</span>.*?<span.*?>(.*?)</span>',re.S)# reseult = re.search(pattern,data)
		reseult = re.search(pattern,page)
		if reseult:
			return reseult.group(1).strip()
		else:
			return None

	#获取每一页的内容，传入页面内容
	def getContent(self):
		page = self.getPage(1)
		pattern = re.compile('<div id="post_content_.*?>(.*?)</div>',re.S)
		items = re.findall(pattern,page)
		itemimg = ''
		for item in items:
			itemimg += item 
			# print item
		# 	print '-------------------------'
		# pattern_img = re.compile('<img class="BDE_Image" src="(.*?)" size="\d\d\d\d\d\d" changedsize="true" width="\d\d\d" height="\d\d\d" size="\d\d\d">',re.S)
		# items_img = re.findall(itemimg,pattern_img)
		# for img in items_img:
		# print img
		# print itemimg
		# print type(itemimg)
		return itemimg

	#获取每一页的图片链接
	def getImg(self):
		pageContent = self.getContent()
		pattern = re.compile('src="(.+?\.(jpg|png))"',re.S)# reseult = re.search(pattern,data)
		imgs = re.findall(pattern,pageContent)
		# print pageContent
		imglist = []
		for img in imgs:
			if img[1] == 'jpg':
				imglist.append(img[0])
				# print img 
		return imglist
		# return imgs
		# print imgs
	#下载图片
	def downImg(self):
		print self.getTitle()
		imglist = self.getImg()
		x = 1
		for imgurl in imglist:
			urllib.urlretrieve(imgurl,'./img/%s.jpg' % x,reporthook=report)
			x = x + 1
baseUrl = 'http://tieba.baidu.com/p/4507524795'
bdtb = BDTD(baseUrl,1)
bdtb.downImg()
