#!/usr/bin/python2.7
#coding:utf-8
import urllib2
import re
from bs4 import BeautifulSoup

import sys
reload(sys)
sys.setdefaultencoding('utf-8')

#1.获取主页内容，也可用于获取详情页
def OpenPage(url):
	'''
	根据URL构造http请求发送给服务器
	获取到http服务器的响应
	'''
	# 构造请求对象
	req=urllib2.Request(url)
	# 发送http请求，获取到一个文件对象
	f=urllib2.urlopen(req)
	# 从文件中读取返回结果
	data=f.read()
	# decode 操作是把GBK编码格式转成unicode的编码格式
	# encode 操作是把Unicode转成utf-8格式
	data = data.decode('GBK','ignore').encode('UTF-8')
	return data

def ParseMainPage(page):
	soup = BeautifulSoup(page, 'html.parser')
	# 筛选所有符合条件的a标签，筛选规则：
	# 查找所有带有href属性的标签，并且href中包含的URL中
	# 又包含‘read’关键字
	#print soup
	chapter_list = soup.find_all(href=re.compile('3300'))
	# 获取出a标签中的URL
	url_list =['https://www.ybdu.com/xiaoshuo/13/13052/'+ item['href'] 
 		    	 for item in chapter_list]
 
 	return url_list	
	#print chapter_list

def ParseDetailPage(page):
	soup=BeautifulSoup(page,'html.parser')
	result = soup.find_all(class_='contentbox')[0].get_text()
	return result[:-len('show_style2();')-3]

def Test1():
	url='https://www.ybdu.com/xiaoshuo/13/13052/'
	print OpenPage(url)	

def Test2():
	url = 'https://www.ybdu.com/xiaoshuo/13/13052/'
	html = OpenPage(url)
	print ParseMainPage(html)	
def Test3():
	url='https://www.ybdu.com/xiaoshuo/13/13052/3300273.html'
	print OpenPage(url)
   	# print html`
def Test4():
	url='https://www.ybdu.com/xiaoshuo/13/13052/3300273.html'
	html=OpenPage(url)
	result=ParseDetailPage(html)
	#print result
	Write('./ww.txt',result)

def Write(file_path,data):
	with open(file_path,'a+') as f:
		f.write(data)
def Run():
	'''
	整个项目的入口函数
	'''
	# 1.获取到小说网站的主页面
	main_url='https://www.ybdu.com/xiaoshuo/13/13052/'
	page=OpenPage(main_url)
	# 2.根据主页面解析出所有详细页的URL
	url_list=ParseMainPage(page)
	for url in url_list:
		# 3.遍历详细页。获取到每个详细页的小说内容
		detail_page=OpenPage(url)
		result=ParseDetailPage(detail_page)
		# 4.把详细页的内容写到文件中
		Write('./wwyx.txt',result)

if __name__ == '__main__':
	#Test1()
	#Test2()
	#Test3()
	#Test4()
	Run()

