
import urllib.request
from urllib.error import URLError,HTTPError
from bs4 import BeautifulSoup as BS

header={'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.221 Safari/537.36 SE 2.X MetaSr 1.0'

        }
#Header建议另外写一个py文件，写项目时导入用即可
class NovelDownload(object):
    def geturl(self,url,List_url):
        try:
            req=urllib.request.Request(url,headers=header)
            response=urllib.request.urlopen(req)
            html=response.read().decode('gbk')
            soup = BS(html, 'lxml')
            charpter = soup.find_all('div', attrs={'class': 'listmain'})
            download_soup = BS(str(charpter), 'lxml')
            download_List = download_soup.find_all('a')
        except URLError as e:
            print("URLError:",e.reason)
        except HTTPError as e:
            print("HTTPError:",e.code)
        finally:
            Novel_num=len(download_List)
            for i in range(12,Novel_num-3):
                List=url+download_List[i].get('href').split('/')[-1]
                #print(List)
                List_url.append(List)
            print(Novel_num)
        return List_url

    def get_content(self,List_url):
        for url in List_url:
            try:
                req=urllib.request.Request(url,headers=header)
                response=urllib.request.urlopen(req)
                text=response.read().decode('gbk')
                soup=BS(text,'lxml')
                Title=soup.find_all('h1')[0].text
                #print(Title)
                #NovelName='c:\\users\\XXX\\desktop\\123\\'+Title+'.txt'      可指定文件
                NovelName=Title+'.txt'
                content=soup.find_all('div',attrs={'id':'content'})
                #print(content[0].text)
                CC=content[0].text.replace('\xa0\xa0','/')#有八个\xa0，编码不知，不删去无法写入
                CC=CC.replace('\u3000\u3000','////')#后一千多章作者换了排版方式
                Txt=CC.split('////')              #想排版好看点，小问题
                for i in range(len(Txt)):
                    Txt[i]=Txt[i]+'\n'#为了让他换行
                with open(NovelName,'w+')as f:
                    f.write(Title)
                    f.writelines(Txt)
                    f.flush()
                print(Title,Txt)
            except HTTPError as e:
                print("HTTPError:",e.code)
            except URLError as e:
                print('URLError:',e.reason)


if __name__=="__main__":
    url = 'http://www.biqukan.com/1_1094/'
    List_url=[]
    spider=NovelDownload()
    spider.geturl(url,List_url)
    print(len(List_url))
    spider.get_content(List_url)



