import requests
from bs4 import BeautifulSoup
import re
import os
import time

def getlist(book_id,re_header):
    url='http://m.50zw.la/chapters_'+book_id+'/'
    
    length = len(url)-1
    print('正在获取章节列表')
    i = 1
    chapter_list=[]
    while 1:
        #获取网页
        #更改编码
        #获得BeautifulSoup对象
        #获取章节列表
        r = requests.get(url + str(i),params=re_header)
        r.encoding = 'gbk'
        soup = BeautifulSoup(r.text,"html.parser")
        i+=1
        
        temp_list = soup.select('.last9 a')
        for t in range(len(temp_list)):
            temp_list[t] = temp_list[t]['href'][0:-5]
        del temp_list[0]
        
        if(len(temp_list)==0):
            break
        chapter_list.extend(temp_list)
        
    print('章节列表获取完毕')
    print('共',i,'页  约',30*i,'章\n')
    
    return chapter_list

def dealwithcontent(soup):
    #get chapter content
    chapter_content=soup.select('#nr1')[0].text
    
    #del               
    chapter_content=re.sub('-->>','',chapter_content)
    chapter_content=re.sub('_Middle\(\);','',chapter_content)
    chapter_content=re.sub('本章未完，点击下一页继续阅读','',chapter_content)

    return chapter_content

def dealwithname(soup):
     #get chapter name
    chapter_name=soup.select('title')[0].text

    chapter_name=re.sub('_.*_.*_武林中文网','',chapter_name)

    return chapter_name

def writeinfo(book_url,book_name,re_header,fo):
    #get pages
    r=requests.get(book_url,params=re_header)
    r.encoding='gbk'
    soup=BeautifulSoup(r.text,"html.parser")

    #get book info
    book_name=soup.select('.info p strong')[0].text
    book_author=soup.select('.info p')[1].text
    book_type=soup.select('.info p')[2].text
    book_status=soup.select('.info p')[3].text
    book_lastest=soup.select('.info p')[4].text
    book_intro=soup.select('.intro')[0].text

    print('名称：'+book_name)
    print(book_author)
    print('简介：'+book_intro+'\n')

    #write in
    fo.write(('名称：'+book_name+'\n').encode('utf-8'))
    fo.write((book_author+'\n').encode('utf-8'))
    fo.write((book_type+'\n').encode('utf-8'))
    fo.write((book_status+'\n').encode('utf-8'))
    fo.write((book_lastest+'\n').encode('utf-8'))
    fo.write(('简介：\n'+book_intro+'\n').encode('utf-8'))

def getname(book_url,re_header):
    #get pages
    r=requests.get(book_url,params=re_header)
    r.encoding='gbk'
    soup=BeautifulSoup(r.text,"html.parser")

    #get book info
    book_name=soup.select('.info p strong')[0].text

    return book_name
    
