﻿from urllib import request
import re
#处理页面的标签类
class Tool(object):
	#去除img标签,7位长空格
	removeImg = re.compile('<img.*?>| {7}|')
	#删除超链接标签
	removeAddr = re.compile('<a.*?>|</a>')
	#把换行的标签换为\n
	replaceLine = re.compile('<tr>|<div>|</div>|</p>')
	#将表格制表<td>替换为\t
	replaceTD= re.compile('<td>')
	#把段落开头换为\n加空两格
	replacePara = re.compile('<p.*?>')
	#将换行符或双换行符替换为\n
	replaceBR = re.compile('<br><br>|<br>')
	#将其余标签剔除
	removeExtraTag = re.compile('<.*?>')

	def replace(self,x):
		x = re.sub(self.removeImg,"",x)
		x = re.sub(self.removeAddr,"",x)
		x = re.sub(self.replaceLine,"\n",x)
		x = re.sub(self.replaceTD,"\t",x)
		x = re.sub(self.replacePara,"\n    ",x)
		x = re.sub(self.replaceBR,"\n",x)
		x = re.sub(self.removeExtraTag,"",x)
		#strip()将前后多余内容删除
		return x.strip()
		
	
class BDTB(object):
	def __init__(self, baseUrl,seeLZ):
		self.baseUrl = baseUrl;
		self.seeLZ = "see_lz=" + str(seeLZ);#是否只观看楼主
		self.tool = Tool();
	
	#获取该页的所有帖子
	def getPage(self,pageNum):
		url = self.baseUrl+"?"+self.seeLZ+"&pn="+str(pageNum);
		print(url)
		req = request.Request(url);
		req.add_header("User-Agent","Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.110 Safari/537.36");
		with request.urlopen(req) as f:
			return f.read().decode('utf-8');
			# with open("baidutieba.txt","wb") as file:
				# file.write(f.read());
				
	def getContent(self,page):
		pattern = re.compile('<div id="post_content_.*?>(.*?)</div>',re.S)
		items = re.findall(pattern,page)
		#for item in items:
		#  print item
		print(self.tool.replace(items[1]))	
baseURL = 'http://tieba.baidu.com/p/3138733512'
bdtb = BDTB(baseURL,1)
bdtb.getContent(bdtb.getPage(1))
		