# -*- coding: UTF-8 -*-
#支持《笔趣阁》《新笔下文学》《笔下文学》三个网站，用时输入第一章的网址、书名和是否从第一章开始爬取三个参数即可
from bs4 import BeautifulSoup
import urllib2
import sys
import re
import time
from sys import argv
reload(sys)
sys.setdefaultencoding( "utf-8" )
startTime=time.time()
file,url,filename,isFirstChapter= argv
if(isFirstChapter=="True"):
    isFirstChapter=True;
else:
    isFirstChapter=False;
ConstUrl=""
items=re.findall(".+(?=/\d+\.html)", url)
if(len(items)>0):
    ConstUrl=items[0]
filename=filename.decode("gbk")
f=open(filename+".txt",'w')
nextIndex=-1
titleAttrsKey=""
titleAttrsValue=""
nextAttrsValue=""
contentAttrsKey=""
contentAttrsValue=""
nextStrValue=""
responsPage = urllib2.urlopen(url)
WebContents = responsPage.read()
soup = BeautifulSoup(WebContents,"html.parser")
nextStrValue=u"(?<=href=\")\d+(?=\.html)"

if (titleAttrsKey == "" or titleAttrsValue == ""):
    if (soup.find(attrs={'id': 'title'}) != None):
        titleAttrsKey = 'id'
        titleAttrsValue = 'title'
    elif (soup.find(attrs={'id': 'novel_title'}) != None):
        titleAttrsKey = 'id'
        titleAttrsValue = 'novel_title'
    elif (soup.find(attrs={'class': 'title'}) != None):
        titleAttrsKey = 'class'
        titleAttrsValue = 'title'
    elif (soup.find(attrs={'class': 'bookname'}) != None):
        titleAttrsKey = 'class'
        titleAttrsValue = 'bookname'
    elif (soup.find(attrs={'class': 'novel_title'}) != None):
        titleAttrsKey = 'class'
        titleAttrsValue = 'novel_title'
if (contentAttrsKey == "" or contentAttrsValue == ""):
    if (soup.find(attrs={'id': 'content'}) != None):
        contentAttrsKey = 'id'
        contentAttrsValue = 'content'
    elif (soup.find(attrs={'id': 'novel_content'}) != None):
        contentAttrsKey = 'id'
        contentAttrsValue = 'novel_content'
    elif (soup.find(attrs={'class': 'content'}) != None):
        contentAttrsKey = 'class'
        contentAttrsValue = 'content'
    elif (soup.find(attrs={'class': 'novel_content'}) != None):
        contentAttrsKey = 'class'
        contentAttrsValue = 'novel_content'
while(True):
    try:
        responsPage = urllib2.urlopen(url)
        orignWeb=responsPage.read()
        WebContents =orignWeb
        soup = BeautifulSoup(WebContents,"html.parser")
        if(titleAttrsValue=='bookname' and titleAttrsKey=='class'):#笔趣阁的格式
            titleset=soup.find(attrs={titleAttrsKey:titleAttrsValue})
            title=titleset.h1
        else:
            title = soup.find(attrs={titleAttrsKey: titleAttrsValue})
        f.write(title.string)
        print title.string
        content=soup.find(attrs={contentAttrsKey:contentAttrsValue})
        contentText=""
        if (titleAttrsValue == 'bookname' and titleAttrsKey == 'class'):  # 笔趣阁的格式
            allbr=content.find_all('br')
            for br in allbr:
                text=unicode(br.next.string)
                a=''
                contentText=a.join([contentText,text.strip(' '),'\n'])
        else:
            contentText = content.text
            contentText = re.sub("[\&nbsp\;]+", "\n  ", contentText)
        f.write(contentText+'\n\n')
        nextCount=len(soup.find_all(text = "下一章"))
        items = re.findall(nextStrValue, WebContents)
        if(isFirstChapter==True):
            isFirstChapter=False
            url = ConstUrl + '//' + items[nextCount-1] + '.html'
        else:
            if(len(items)<nextCount*2):
                break
            url = ConstUrl + '//' + items[nextCount*2-1] + '.html'
    except :
        #针对某些网站某一章转码失败的问题，选择跳过该章
        try:
            regstr=u"(?<=href=\")\d+(?=\.html)"
            items = re.findall(regstr,orignWeb)
            url = ConstUrl + '//' + (unicode)((int)(items[1]))+'.html'
            continue
        except:
            f.close()
f.close()
endTime=time.time()
print u"下载完成。所花时间为：",(endTime-startTime)