from bs4 import BeautifulSoup as bs
import requests, threading, datetime, os
from retrying import retry

@retry(stop_max_attempt_number=100) #最大重试100次，100次全部报错，才会报错
def _open_url(url):
    response = requests.get(url, headers={
    'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36 QIHU 360SE'
    }, timeout=10) #超时的时候回报错并重试
    assert response.status_code == 200 #状态码不是200，也会报错并充实
    return response

def textfilter(text):# 注意顺序
    text = text.replace("\xa0"," ")
    text = text.replace("\u2022"," ")
    text = text.replace("\ufffd"," ")
    text = text.replace("    ","\n")
    return text

def fetch_info(url):
    html = _open_url(url)
    html.encoding = 'gbk' # charset
    soup = bs(html.text,"html.parser")#bs煮过
    title = soup.find('h1').get_text()
    text = soup.find(id='content').get_text()
    return title,text

def fetch_chaps(url):
    html = _open_url(url)
    html.encoding = 'gbk' # charset
    soup = bs(html.text,"html.parser")
    soup = soup.find_all('a')
    cist = []
    for s in soup[28:-24]:
        cist.append(url+s.get("href"))
    return cist

def writefile(filename="out.txt",context="NULL"):
    with open(filename, mode="a+", encoding="utf-8") as f:
        f.write(context)

def fetchtime():
    return datetime.datetime.now().strftime("%Y-%m-%d %H:%M")

class myThread (threading.Thread):
    def __init__(self, threadName, savepath, urlList):
        threading.Thread.__init__(self)
        self.threadName = threadName
        self.savepath = savepath
        self.urlList = urlList
    def run(self):
        print("Start thread: %s" % self.threadName)
        for url in self.urlList:
            title, text = fetch_info(url)
            text = textfilter(text)
            writefile(os.path.join(self.savepath,self.threadName+".txt"), title+text+"\n")
            print(fetchtime()," ", self.threadName," ",title)
        print("End thread: %s" % self.threadName)

def checkfolder(path):
    sonpath = path.split("/")
    for i in range(len(sonpath)):
        path = ''
        for sp in sonpath[:i+1]:
            path += (sp+'/')
        if sp!=".":
            if bool(1-os.path.exists(path)):
                os.mkdir(path)

if __name__ == '__main__':
    bookurl = "https://www.12zw.la/0/592/" # end with /
    savepath = bookurl.split("/")[-2]
    checkfolder(savepath)
    cist = fetch_chaps(bookurl)
    clen = len(cist)
    avelen = clen//9

    thread1 = myThread("Thread-1", savepath, cist[avelen*0:avelen*1])
    thread2 = myThread("Thread-2", savepath, cist[avelen*1:avelen*2])
    thread3 = myThread("Thread-3", savepath, cist[avelen*2:avelen*3])
    thread4 = myThread("Thread-4", savepath, cist[avelen*3:avelen*4])
    thread5 = myThread("Thread-5", savepath, cist[avelen*4:avelen*5])
    thread6 = myThread("Thread-6", savepath, cist[avelen*5:avelen*6])
    thread7 = myThread("Thread-7", savepath, cist[avelen*6:avelen*7])
    thread8 = myThread("Thread-8", savepath, cist[avelen*7:avelen*8])
    thread9 = myThread("Thread-9", savepath, cist[avelen*8:])

    thread1.start()
    thread2.start()
    thread3.start()
    thread4.start()
    thread5.start()
    thread6.start()
    thread7.start()
    thread8.start()
    thread9.start()

    thread1.join()
    thread2.join()
    thread3.join()
    thread4.join()
    thread5.join()
    thread6.join()
    thread7.join()
    thread8.join()
    thread9.join()