import requests
import os
from lxml import etree
#from multiprocessing import Pool as ThreadPool #多进程  
from multiprocessing.dummy import Pool as ThreadPool #多线程   
class Ksw:
    "00ksw.org 操作类"
    #以get方式请求网页
    def request(url):
         req = requests.get(url)
         req.encoding="gbk"
         return req.text

     
     #获取小说所有的章节网址
    def getlist(url):
         ele=etree.HTML(Ksw.request(url))
         a=ele.xpath('/html/body/ul/li/a')
         href = []
         for x in a:
             href.append("%s/%s" % (url.replace('all.html',''),x.attrib["href"]))
         return href
    #获取小说所有的标题
    def getlisttitle(url):
         ele1=etree.HTML(Ksw.request(url))
         a1=ele1.xpath('//*[@id="chaptertitle"]')
         return a1[0].text
    #获取小说内容
    def getcontent(url):
         ele=etree.HTML(Ksw.request(url))
         content=ele.xpath('//*[@id="novelcontent"]/p/text()')
         title=ele.xpath('//*[@id="chaptertitle"]')
         return {
          "title"  :title[0].text,
          "content" :Ksw.format("\r\n".join(content))
         }

    #格式化小说内容

    def format(content):
        return content.replace('www.00ksw.org','')
     # 获取全本小说地址
    def getmuligetListFromAll(url):
         ele=etree.HTML(Ksw.request(url))
         category=ele.xpath('/html/body/div[4]/div/p[1]')
         title=ele.xpath('/html/body/div[4]/div/p[2]/a')
         author=ele.xpath('/html/body/div[4]/div/p[3]/a')
         array_length=len(category)
         listTitle=[]
         listHref=[]
         for i in  range(array_length):
            textTitle=category[i].text+"----"+title[i].text+"----"+author[i].text+".txt"
            textHref ="http://m.00ksw.org"+title[i].attrib["href"]+"all.html"
            listTitle.append(textTitle)
            listHref.append(textHref)
            Ksw.handle1(textTitle,textHref)
         # pool = ThreadPool(100)  
         # pool.map(Ksw.handle1,listTitle,listHref)
         # pool.close()  
         # pool.join()

         # func_var = [(hrefList, None), (titleList, None)]
         




    def getListFromAll():
         list=[]
         for i in range(1,201+1):
             # Ksw.getmuligetListFromAll("http://m.00ksw.org/qb/%d.html"%i)
             list.append("http://m.00ksw.org/qb/%d.html"%i)
             # break

         pool = ThreadPool()  
         pool.map(Ksw.getmuligetListFromAll, list)  
         pool.close()  
         pool.join() 


    def handle1(titleA,url):
         path=r"G:\py\爬虫数据\零点全本电子书\resource\%s"% titleA
         isExist=os.path.exists(path)
         if isExist :
            print("无需再次下载----------"+titleA)
            return

         ls=Ksw.getlist(url)
         pool = ThreadPool()  
         pool.map(Ksw.handle2, ls)  
         pool.close()  
         pool.join()  
         str="各种qq快手视频业务：3782.321ya.cn\r\n淘宝优惠券：tbyhq.c8d8.com\r\nq/vx:1377093782\r\n公众号:azrs66666\r\n小说群:334656364\r\n---------------下面就没有广告了\r\n"
        
         for x in ls:
             str=str+("%s")%Ksw.handle2(x)
         with open(path,"w",encoding='utf-8') as f:
             f.write(str)
         print("下载成功----------"+titleA)
    def handle2(url):
         info=Ksw.getcontent(url)
         if info==None :
            print ("网址错误：>>>>>."+url)
            return "网址错误："+url
         title=info['title']
         content=info['content']
         if title==None:
            title=""
         if content==None:
            content=""

         content="\r\n--->"+title+"<---\r\n"+content
         return content


         
         
   
