# from bs4 import BeautifulSoup
# import requests
# import random
# import re
# import time
# import csv
# import os
# import threading
# from flask import request
# from flaskapp.util.Jwt import user_token_required
# from flaskapp.util.Redprint import Redprint
# from flaskapp.util.Result import Result
#
# api = Redprint('booksOnline')
# network_interval = 0.1  # 联网间隔，自动调整避免503
#
# save_path = r'static/onlineread'  # 指定保存路径
# index_url = 'http://www.xbiqugu.net/'
#
#
# #open_url 函数的作用是通过发送 HTTP 请求获取指定 URL 的内容
# def open_url(url):
#     global network_interval
#     header = [
#         'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 '
#         'Safari/534.50',
#         'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0)',
#         'Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
#         'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; 360SE)'
#     ]  #设置多个use-agent存放在header列表中，用于伪装爬虫身份
#     header = {'User-Agent': str(header[random.randint(0, 3)])}
#     req = requests.get(url=url, headers=header)
#     if req.status_code != 200:
#         network_interval += 0.1
#         print('（503 正在重试...）', round(network_interval, 2))
#         return open_url(url)
#     if network_interval > 0.1:
#         network_interval *= 0.995
#     response = req.content
#     response_decode = response.decode("UTF-8", "ignore")
#     return response_decode
#
#
# #获取首页各类小说URL
# def get_novelTypelUrl(indexUrl):
#     html = open_url(indexUrl)  #调用open_url()函数传入url获取首页内容
#     soup = BeautifulSoup(html, "html.parser")  #解析网页
#     urlList = []
#     textList = []
#     aList = []
#     for novels in soup.select('.nav'):  #select出class=nav的div元素
#         aList += novels.find_all(re.compile('a'))
#     for novels in aList:
#         text = novels.text
#         textList.append(text)
#         novels = novels['href']
#         url = indexUrl + novels
#         urlList.append(url)
#     textList = textList[2:8]
#     urlList = urlList[2:8]
#     dict = {}
#     for i in range(len(urlList)):
#         dict[textList[i]] = urlList[i]  #key值为第i个文本（小说类型），value为第i个小说类型url
#     return dict  #返回字典，即存储各个类型的url（如玄幻小说：网址）
#
#
# #获取各类小说里的每一本小说url
#
# def get_novel(indexUrl):
#     dict = get_novelTypelUrl(indexUrl)  #调用get_novelTypeUrl()传入首页url，返回各类型的url的字典
#     novelTypeList = []
#     for key, value in dict.items():  #遍历字典内的key和value值，key值即字典
#         html = open_url(value)  #调用open_url()获取类型小说URL的内容，传入value
#         soup = BeautifulSoup(html, "html.parser")  #解析网页
#         novelList = []
#         typeDict = {}
#         urlList = []
#         for s2 in soup.find_all('div', attrs={'class': "l"})[0].select(
#                 '.s2'):  #遍历查找div中class=l的第一个元素，在里面再选类名为s2的元素返回的列表
#             novelList += s2.find_all('a')  #从s2元素中选出a标签返回的列表拼接到novelList列表中
#         for url in novelList:  #遍历novelList列表中的a标签
#             url = url['href']  #获取a标签内的href属性重新赋值到url
#             urlList.append(url)  #将获取的小说url添加到列表中
#         typeDict[key] = urlList  #key值为小说的类型，value为存取该类型的所有小说url的列表
#         novelTypeList.append(typeDict)  #将该字典添加到小说类型列表中
#     return novelTypeList  #返回该列表
#
#
# #获取小说信息
# def get_novelInfo(indexUrl):
#     global network_interval  #需要操作函数外面的网络时间间隔变量
#     novelUrlTypeList = get_novel(indexUrl)  #存取各类的每一本小说的url
#     novelTypeList = []
#     novelTypeDict = {}
#     for dict in novelUrlTypeList:  #遍历存取各类小说的列表，元素是字典（key是类型，value是该类型全部小说的列表）
#         noveUrlList = []
#         novelType = ''
#         novelList = []
#         for key, value in dict.items():  #遍历一类小说的字典key，value，即例如存取所有玄幻小说的列表
#             novelType = key
#             noveUrlList = value
#         path = 'D:\pythonWorkSpace\work1\\' + str(novelType) + "图片"  #写好一个路径
#         isExists = os.path.exists(path)  #判断该路径下有无文件夹novelType（小说类型名，用来存取名字）
#         if not isExists:  #如果不存在，创建该文件夹
#             os.makedirs(path)
#             print(novelType + '创建成功！')
#         else:  #存在，提示已经创建文件夹
#             print(novelType + '已存在！')
#         for novel in noveUrlList:  #遍历存取该类型每一部小说的url
#             novelDict = {}
#             html = open_url(novel)  #调用open_url（）返回url的内容
#             soup = BeautifulSoup(html, 'html.parser')  #解析网页内容
#             title = str(soup.find_all('h1')[0].text)  #查找h1标签的第一个元素的text，即是标题
#             pList = soup.find_all('div', id="info")[0].select('p')  #查找div，id为info的第一个元素，select其中的p元素，返回存取p元素的列表
#             author = str(pList[0].text).replace('作    者：', '')  #p元素第一个的text即是作者，但是需要切割掉作者名字前的作者多余字符串
#             updateTime = str(pList[2].text)  #plist的第三个元素的text即最新更新时间
#             newChapter = str(pList[3].text)  #plist的第四个元素的text即最新章节
#             pList = soup.find_all('div', id="intro")[0].select('p')  #查找div，id为intro的第一个元素，查找其中的p元素返回列表
#             novelIntro = str(pList[1].text)  #pList的第二个元素的text即是小说的简介
#             imgList = soup.find_all('div', id='fmimg')[0].select('img')  #查找div，id为fmimg的第一个元素，select出img元素返回列表
#             imgUrl = str(imgList[0]['src'])  #图片列表中的第一个img元素即是小说的简介图片，其中的src属性即是url
#             imgContent = requests.get(imgUrl).content  #获取图片链接的二进制内容
#             writeImg(key, title, imgContent)
#             novelDict['标题'] = title
#             novelDict['作者'] = author
#             novelDict['更新时间'] = updateTime
#             novelDict['最新章节'] = newChapter
#             novelDict['小说介绍'] = novelIntro
#             novelDict['链接'] = novel
#             print('链接：', novel)
#             novelList.append(novelDict)
#             time.sleep(network_interval)
#         novelTypeDict[novelType] = novelList
#         novelTypeList.append(novelTypeDict)  #再把字典添加到列表中
#         writeNovelData(novelType, novelList)  #将爬取的一类型的全部小说保存到本地
#
#
# #保存小说图片
# def writeImg(key, title, imgContent):  #imgContent传入的是小说url的content二进制形式
#     with open('D:\\pythonWorkSpace\\work1\\' + key + '图片\\' + title + '.jpg', 'wb') as f:  #wb方式打开，存取到对应的title.jpg里面
#         f.write(imgContent)  #将图片以二进制的方式写到本地
#
#
# #保存小说简介到csv
# # def writeNovelData(key, novelList):
# #     with open(key + '.csv', 'w', encoding='UTF-8', newline='') as f:  #以w方式打开csv文件
# #         writer = csv.DictWriter(f, fieldnames=['标题', '作者', '更新时间', '最新章节', '小说介绍',
# #                                                '链接'])  #创建一个csv对象，头为其中的标题，作者等
# #         writer.writeheader()  #写入csv头
# #         for each in novelList:  #循环小说列表
# #             writer.writerow(each)  #将其每一本小说写入csv中
# def writeNovelData(key, novelList):
#     file_path = os.path.join(save_path, key + '.csv')  # 构建完整的文件路径
#     with open(file_path, 'w', encoding='UTF-8', newline='') as f:  # 以w方式打开csv文件
#         writer = csv.DictWriter(f, fieldnames=['标题', '作者', '更新时间', '最新章节', '小说介绍',
#                                                '链接'])  # 创建一个csv对象，头为其中的标题，作者等
#         writer.writeheader()  # 写入csv头
#         for each in novelList:  # 循环小说列表
#             writer.writerow(each)  # 将其每一本小说写入csv中
#
#
# def chapter_url(url):
#     html = open_url(url)  #返回url对应的content内容
#     soup = BeautifulSoup(html, 'html.parser')  #将本地的html文件转化为beautifulSoup对象，解析html
#     charpterUrlList = []
#     charpterurl = soup.find('div', id="list").select('a')  #查找网页内的div标签，id为list的第一个元素，从中选出多个a标签返回列表
#     for i in charpterurl:  #遍历a标签列表
#         i = i['href']  #a标签的href属性重新赋值给i
#         trueUrl = 'http://www.xbiqugu.net' + i  #小说的url拼接上章节url（不完整）即可就是有效的章节url地址
#         charpterUrlList.append(trueUrl)  #将正确的章节url地址添加到列表中
#     return charpterUrlList  #返回一本小说的存储所有章节的url
#
#
# def get_chapter_content(chapter_url):
#     html = open_url(chapter_url)  # 返回url对应的content内容
#     soup = BeautifulSoup(html, 'html.parser')  # 将本地的html文件转化为beautifulSoup对象，解析html
#     # 查找章节内容所在的标签，根据具体网页结构进行调整
#     chapter_content_tag = soup.find('div', id="content")
#     if chapter_content_tag:
#         chapter_content = chapter_content_tag.get_text  # 提取章节内容并去除首尾空白字符
#         return chapter_content
#     else:
#         return None
#
#
#
# # @user_token_required(admin_required=False)
# def onlineReading(novelName,page,category):
#     data = request.form
#     novelName = data['novelName']
#     page = int(data['page'])
#     category = data['category']
#     url = search_url_by_novel_name(novelName, category)
#
#     chapter_urls = chapter_url(url)
#     chapterContent = get_chapter_content(chapter_urls[page - 1])
#     if chapterContent:
#         print(chapterContent)
#         return chapterContent
#
#     else:
#         return None
#
#
# def search_url_by_novel_name(novel_name, category):
#     if category == 'xh':
#         csv_file = 'D:\pythonWorkSpace\work1\玄幻小说.csv'
#     elif category == 'xz':
#         csv_file = 'D:\pythonWorkSpace\work1\修真小说.csv'
#     elif category == 'wy':
#         csv_file = 'D:\pythonWorkSpace\work1\网游小说.csv'
#     elif category == 'kh':
#         csv_file = 'D:\pythonWorkSpace\work1\科幻小说.csv'
#     elif category == 'ds':
#         csv_file = 'D:\pythonWorkSpace\work1\都市小说.csv'
#     elif category == 'cy':
#         csv_file = 'D:\pythonWorkSpace\work1\穿越小说.csv'
#     else:
#         csv_file = ''
#     try:
#         with open(csv_file, 'r', encoding='utf-8') as file:
#             reader = csv.DictReader(file)
#             for row in reader:
#                 if row['标题'] == novel_name:
#                     return row['链接']
#     except FileNotFoundError:
#         print("文件未找到:", csv_file)
#     except Exception as e:
#         print("发生错误:", e)
#
#     return None
#
#
# # def start_spider():
# #     onlineReading('星辰之主',5,'kh')
# #     # get_novelInfo(index_url)
# #
# #
# # if __name__ == "__main__":
# #     start_spider()
