# -*- coding: utf-8 -*-
import requests
from bs4 import BeautifulSoup
import requests

# url = "https://588ku.com/so-tuku/zhongqiu.html"
# # url='https://588ku.com/so-sucai/zhongqiu.html'
# # payload = {}
# headers = {
#   'Cookie': '0dea95bdb8093eb2d78be190b8fccf0f=%228-1%22; _var_search_from=%22pic%22; adIssem=0; all_pic_search_words=%5B%22zhongqiu%22%5D; bt_guid=%22adf4d648674c180728ecf4a133be5023%22; f534b0f448272f9dd53a66a49c3ac167=%220a35a8a8059a99fb4f9570dd8c0a57c7%22; keyword=%22%5Cu4e2d%5Cu79cb%22; location=99; no_login_pv=2; qk_host=588ku.com; referer=%22%5C%2F%5C%2F588ku.com%5C%2Fso-tuku%5C%2Fzhongqiu.html%22; search:last:keyword=%22%5Cu4e2d%5Cu79cb%22; source_url=588ku.com',
#  'user-agent':'Mozilla/5.0(Windows NT 10.0;Win64;x64) AppleWebKit/537.36 (KHTML,like Gecko)Chrome/127.0.0.0Safari/537.36'
# }
#
# html = requests.get( url, headers=headers, data=payload)
#
# # 使用BeautifulSoup将html页面放进一个筛子里，取出需要的信息
# soup=BeautifulSoup(html.text,'html.parser')
# items=soup.find("div",class_="clearfix data-list dataList V-maronyV1 Vmarony J_ViewCards marony-horizontal").find_all('img')
# urls=[]
# for item in items:
#     url=item['data-original']
#     url='https:'+url
#     urls.append(url)
# num=1
# for url in urls:
#     with open(f'E:/pic/AA/{num}.jpg','wb')as f:
#         f.write(requests.get(url, headers=headers).content)
#         num=num+1

#------------------------------------------------



# url='https://588ku.com/so-sucai/zhongqiu.html'
# payload = {}
# headers = {
#   'Cookie': '0dea95bdb8093eb2d78be190b8fccf0f=%228-1%22; _var_search_from=%22pic%22; adIssem=0; all_pic_search_words=%5B%22zhongqiu%22%5D; bt_guid=%22adf4d648674c180728ecf4a133be5023%22; f534b0f448272f9dd53a66a49c3ac167=%220a35a8a8059a99fb4f9570dd8c0a57c7%22; keyword=%22%5Cu4e2d%5Cu79cb%22; location=99; no_login_pv=2; qk_host=588ku.com; referer=%22%5C%2F%5C%2F588ku.com%5C%2Fso-tuku%5C%2Fzhongqiu.html%22; search:last:keyword=%22%5Cu4e2d%5Cu79cb%22; source_url=588ku.com',
#  'user-agent':'Mozilla/5.0(Windows NT 10.0;Win64;x64) AppleWebKit/537.36 (KHTML,like Gecko)Chrome/127.0.0.0Safari/537.36'
# }
#
# html = requests.get( url, headers=headers, data=payload)
#
# # 使用BeautifulSoup将html页面放进一个筛子里，取出需要的信息
# soup=BeautifulSoup(html.text,'html.parser')
#
# items=soup.find_all('img',class_='lazy')
#
#
#
# # print(items)
# urls=[]
# for item in items:
#     print(item)
#     url = item['data-original']
#     # if item['class']==["lazy"]:
#     #
#     # if item['class']==["img-show"]:
#     #     url=item['src']
#     url='https:'+url
#     urls.append(url)
# print(urls)
# num=1
# for url in urls:
#     with open(f'E:/pic/BB/{num}.jpg','wb')as f:
#         f.write(requests.get(url, headers=headers).content)
#         num=num+1
#----------------------------------------------------------------------------



import requests,concurrent
from bs4 import BeautifulSoup
from datetime import datetime
from concurrent.futures import ThreadPoolExecutor
from concurrent.futures import ThreadPoolExecutor


def down_txt(url):
    html = requests.get(url, headers).text
    # 使用BeautifulSoup将html页面放进一个筛子里，取出需要的信息
    soup = BeautifulSoup(html, 'html.parser')
    # 获取每一章的标题标签
    title = soup.find('div', class_="bookname").find('h1')

    # 获取每一章的正文标签
    content = soup.find('div', id='content')

    # 判断标题和正文都不为空，获取其内容
    if title and content:
        title = title.get_text()
        content = content.get_text()
    # 进行文件下载，保存到本地，一章一个txt文件
    with open(f"E:/TXT//txt/{title}.txt", 'w') as f:
        f.write(content)
        print(title + '下载完成..........')



if __name__ == "__main__":
    starttime=datetime.now()
    url='https://www.bagebbb.com/2_2537/'
    headers={"user-agent":"Mozilla/5.0(Windows NT 10.0;Win64;x64) AppleWebKit/537.36 (KHTML,like Gecko)Chrome/127.0.0.0Safari/537.36"}
    #调接口拿到网页
    html=requests.get(url,headers).text
    # 使用BeautifulSoup将html页面放进一个筛子里，取出需要的信息
    soup=BeautifulSoup(html,'html.parser')
    #从筛子中取出小说的url信息
    items=soup.find("div",id="list").find_all("a")
    #定义一个空列表，进行接收全部的url
    urls=[]
    #遍历url，拼接成完整可直接访问的url
    for item in items:
        url=item['href']
        url='https:'+url
        urls.append(url)
    # print(urls)
    # url = 'https://www.bagebbb.com/2_2537/27578171.html'

    # 遍历完整的url，调用下载方法，进行下载
    # for url in urls:
    #     down_txt(url)

    with ThreadPoolExecutor(max_workers=40) as exe:
        for url in urls:
            exe.submit(down_txt, url)
    endtime=datetime.now()
    print(f'本次下载共花费了{(endtime-starttime).seconds}秒')