import re,random,time
import requests
from multiprocessing.pool import ThreadPool
from urllib import parse
pool = ThreadPool(5)
class Search(object):
    def __init__(self,name):
        self.name = name
        self.file_name = './file/'+ name +'.txt'
        self.num = 0

    def headers_random(self):
        UA = ["Mozilla/5.0 (Windows NT 5.1; rv:7.0.1) Gecko/20100101 Firefox/7.0.1",
              "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:54.0) Gecko/20100101 Firefox/54.0",
              "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.1",
              "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:18.0) Gecko/20100101 Firefox/18.0",
              "Mozilla/5.0 (X11; U; Linux Core i7-4980HQ; de; rv:32.0; compatible; JobboerseBot;Gecko/20100101 Firefox/38.0",
              "Mozilla/5.0 (Windows NT 5.1; rv:36.0) Gecko/20100101 Firefox/36.0",
              "Mozilla/5.0 (Windows NT 5.1; rv:33.0) Gecko/20100101 Firefox/33.0",
              "Mozilla/5.0 (Windows NT 10.0; WOW64; rv:50.0) Gecko/20100101 Firefox/50.0",
              "Mozilla/5.0 (Windows NT 10.0; WOW64; rv:52.0) Gecko/20100101 Firefox/52.0",
              "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:50.0) Gecko/20100101 Firefox/50.0",
              "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:43.0) Gecko/20100101 Firefox/43.0",
              "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:61.0) Gecko/20100101 Firefox/61.0",
              "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.7.12) Gecko/20050915 Firefox/1.0.7",
              "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:17.0) Gecko/20100101 Firefox/17.0",
              "Mozilla/5.0 (Windows NT 10.0; WOW64; rv:54.0) Gecko/20100101 Firefox/54.0",
              "Mozilla/5.0 (Windows NT 6.0; rv:34.0) Gecko/20100101 Firefox/34.0",
              "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:52.0) Gecko/20100101 Firefox/52.0",
              "Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:57.0) Gecko/20100101 Firefox/57.0",
              "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:63.0) Gecko/20100101 Firefox/63.0",
              "Mozilla/5.0 (Windows NT 5.1; rv:40.0) Gecko/20100101 Firefox/40.0",
              "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:57.0) Gecko/20100101 Firefox/57.0",
              "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:42.0) Gecko/20100101 Firefox/42.0",
              "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.7.5) Gecko/20041107 Firefox/1.0",
              "Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:61.0) Gecko/20100101 Firefox/61.0",
              "Mozilla/5.0 (Windows NT 6.1; rv:17.0) Gecko/20100101 Firefox/20.6.14",
              "Mozilla/5.0 (Windows NT 5.1; rv:30.0) Gecko/20100101 Firefox/30.0",
              "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10; rv:33.0) Gecko/20100101 Firefox/33.0",
              "Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:25.0) Gecko/20100101 Firefox/29.0",
              "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:38.0) Gecko/20100101 Firefox/38.0",
              "Mozilla/5.0 (Windows NT 6.1; rv:52.0) Gecko/20100101 Firefox/52.0",
              "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:59.0) Gecko/20100101 Firefox/59.0",
              "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:47.0) Gecko/20100101 Firefox/47.0",
              "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:41.0) Gecko/20100101 Firefox/41.0",
              "Mozilla/5.0 (X11; U; Linux Core i7-4980HQ; de; rv:32.0; compatible; JobboerseBot; Gecko/20100101 Firefox/38.0",
              "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:45.0) Gecko/20100101 Firefox/45.0"
              ]
        headers = {
            'User-Agent': {}
        }
        headers['User-Agent'] = random.choice(UA)
        return headers
    #  传入url   返回网页数据
    def resp_data(self,url):
        print('正在获取网页代码')
        headers = self.headers_random()
        time.sleep(5)
        try:
            resp = requests.get(url,headers=headers,timeout = 10)
        except Exception :
            print('正在尝试重新发送请求')
            return self.resp_data(url)
        resp.encoding = 'gbk'
        return resp.text


    def find_url(self,resp):
        print('正在查找小说地址')
        find_name = re.findall('<span class="s2"><a href=.*?>(.*?)</a></span>', resp, re.S)
        find_url = re.findall('<span class="s2"><a href="(.*?)">.*?</a></span>',resp,re.S)
        for x in range(len(find_name)):
            if self.name == find_name[x]:
                url = find_url[x]
                break
        return url

    def find(self,url):  #url  是resp
        print('找到小说章节')
        chapter = re.findall('<dd><a href=.(.*?).>',self.resp_data(url), re.S)
        return chapter

    def find_1(self,resp):  #url  是resp
        print('找到小说章节')
        chapter = re.findall('<dd><a href=.(.*?).>',resp, re.S)
        return chapter



    def find_content(self,chapter_url):
        print('正在返回内容')
        url = self.resp_data(chapter_url)
        title = ''.join(re.findall('<h1>.*?第.*?章(.*?)</h1>',url))
        print(title)
        content = "".join(re.findall('<div id="content" name="content">(.*?)</div>',url, re.S))
        content = re.sub("<.*?>", "", re.sub("&nbsp;&nbsp;", " ", content))
        return title,content

    def write(self,title,content):
        self.num += 1
        title = '第{}章 '.format(self.num)+title
        print(title)
        with open(self.file_name,'a+',encoding='utf-8')as f:
            f.write(title)
            f.write(content)


    def main(self):
        # 查找小说
        name =  parse.quote(self.name.encode('gbk'))
        url ='https://www.52bqg.com/modules/article/search.php?searchkey={}'.format(name)
        resp = self.resp_data(url)

        try:
            # 小说地址
            url  = self.find_url(resp)
            chapter_urls = self.find(url)
            chapter_urls = [url+chapter_url for chapter_url in chapter_urls]
        except UnboundLocalError:
            print('小说查找错误，正再重定向小说地址')
            url = 'https://www.52bqg.com/modules/article/search.php?searchkey={}'.format(name)
            book_url = requests.get(url).url
            chapter_urls = self.find_1(resp)
            chapter_urls = [book_url + chapter_url for chapter_url in chapter_urls]
        for x in range(len(chapter_urls)):
            print('*'*25+'正在下载第{}章。还剩{}章'.format(x+1,len(chapter_urls)-x-1))
            parameter = pool.apply_async(self.find_content,(chapter_urls[x],)).get()
            # parameter = self.find_content(chapter_urls[x])
            self.write(parameter[0],parameter[1])



if __name__ == '__main__':
    name = input('请输入要下载的小说名：')
    Search(name).main()




