# coding:utf-8
'''
抓取小说 <<冥妻你真坏>>
入口: http://www.shengxu6.com/book/2967.html

[安装pip]
yum install python-pip

[依赖的库]
beautifulsoup4(pip install BeautifulSoup4)

爬虫的开发往往是一个探索性的过程, 需要见招拆招, 更适合自底向上的编程方式.
'''
import urllib2
import re
from bs4 import BeautifulSoup

import sys
reload(sys)
sys.setdefaultencoding('utf8')

# 1. 获取页面内容
def OpenPage(url):
    # import urllib2
    headers = {}
    req = urllib2.Request(url, headers=headers)
    f = urllib2.urlopen(req)
    data = f.read()
    # 直接打印发现乱码, 需要进行转码
    # return data
    return data.decode('GBK', errors="ignore").encode('UTF-8')

# 测试代码
def Test1():
    print OpenPage('http://www.shengxu6.com/book/2967.html')
# Test1()

# 2. 解析主页内容, 获取到 url 列表
def ParseMainPage(page):
    # from bs4 import BeautifulSoup
    # import re
    soup = BeautifulSoup(page, 'html.parser')
    list_charts = soup.find_all(href=re.compile("read"))
    url_list = ['http://www.shengxu6.com' + item['href'] for item in list_charts]
    return url_list

def Test2():
    page = OpenPage('http://www.shengxu6.com/book/2967.html')
    print ParseMainPage(page)
# Test2()

# 3. 解析详情页内容, 获取到小说正文
def ParseDetailPage(page):
    soup = BeautifulSoup(page, 'html.parser')
    title = soup.find_all(class_="panel-heading")[0].get_text()
    content = soup.find_all(class_="content-body")[0].get_text()
    # 观察结果中多了一句 js 代码, 直接使用切片去掉
    return title, content[:-12]

def Test3():
    page = OpenPage('http://www.shengxu6.com/read/2967_2008289.html')
    title, content = ParseDetailPage(page)
    print title
    print content
# Test3()

# 4. 结果写到文件中
def WriteDataToFile(file_path, data):
    f = open(file_path, 'a+')
    f.write(data)
    f.close()

def Test4():
    WriteDataToFile('tmp.txt', 'aaaaaaa\n')
    WriteDataToFile('tmp.txt', 'bbbbbbb\n')
# Test4()

# 5. 所有步骤串到一起
def Main():
    url = "http://www.shengxu6.com/book/2967.html"
    main_page = OpenPage(url)
    url_list = ParseMainPage(main_page)
    for url in url_list:
        print "crawler url=" + url
        detail_page = OpenPage(url)
        title, content = ParseDetailPage(detail_page)
        WriteDataToFile("./result.txt", "\n\n\n" + title + "\n\n\n" + content)
    print "crawler done!"

Main()

# 6. 进一步改进
#    多进程/多线程 并行爬取
