from bs4 import BeautifulSoup
import requests
import random
import re
import time
import csv
import os
import threading
from flask import request
from flaskapp.util.Jwt import user_token_required
from flaskapp.util.Redprint import Redprint
from flaskapp.util.Result import Result

api = Redprint('booksOnline')
network_interval = 0.1  # 联网间隔，自动调整避免503

save_path = r'static/onlineread'  # 指定保存路径
index_url = 'http://www.xbiqugu.net/'


#open_url 函数的作用是通过发送 HTTP 请求获取指定 URL 的内容
def open_url(url):
    global network_interval
    header = [
        'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 '
        'Safari/534.50',
        'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0)',
        'Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
        'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; 360SE)'
    ]  #设置多个use-agent存放在header列表中，用于伪装爬虫身份
    header = {'User-Agent': str(header[random.randint(0, 3)])}
    req = requests.get(url=url, headers=header)
    if req.status_code != 200:
        network_interval += 0.1
        print('（503 正在重试...）', round(network_interval, 2))
        return open_url(url)
    if network_interval > 0.1:
        network_interval *= 0.995
    response = req.content
    response_decode = response.decode("UTF-8", "ignore")
    return response_decode


#获取首页各类小说URL
def get_novelTypelUrl(indexUrl):
    html = open_url(indexUrl)  #调用open_url()函数传入url获取首页内容
    soup = BeautifulSoup(html, "html.parser")  #解析网页
    urlList = []
    textList = []
    aList = []
    for novels in soup.select('.nav'):  #select出class=nav的div元素
        aList += novels.find_all(re.compile('a'))
    for novels in aList:
        text = novels.text
        textList.append(text)
        novels = novels['href']
        url = indexUrl + novels
        urlList.append(url)
    textList = textList[2:8]
    urlList = urlList[2:8]
    dict = {}
    for i in range(len(urlList)):
        dict[textList[i]] = urlList[i]  #key值为第i个文本（小说类型），value为第i个小说类型url
    return dict  #返回字典，即存储各个类型的url（如玄幻小说：网址）


#获取各类小说里的每一本小说url
def get_novel(indexUrl):
    dict = get_novelTypelUrl(indexUrl)  #调用get_novelTypeUrl()传入首页url，返回各类型的url的字典
    novelTypeList = []
    for key, value in dict.items():  #遍历字典内的key和value值，key值即字典
        html = open_url(value)  #调用open_url()获取类型小说URL的内容，传入value
        soup = BeautifulSoup(html, "html.parser")  #解析网页
        novelList = []
        typeDict = {}
        urlList = []
        for s2 in soup.find_all('div', attrs={'class': "l"})[0].select(
                '.s2'):  #遍历查找div中class=l的第一个元素，在里面再选类名为s2的元素返回的列表
            novelList += s2.find_all('a')  #从s2元素中选出a标签返回的列表拼接到novelList列表中
        for url in novelList:  #遍历novelList列表中的a标签
            url = url['href']  #获取a标签内的href属性重新赋值到url
            urlList.append(url)  #将获取的小说url添加到列表中
        typeDict[key] = urlList  #key值为小说的类型，value为存取该类型的所有小说url的列表
        novelTypeList.append(typeDict)  #将该字典添加到小说类型列表中
    return novelTypeList  #返回该列表


#获取小说信息
def get_novelInfo(indexUrl):
    global network_interval  #需要操作函数外面的网络时间间隔变量
    novelUrlTypeList = get_novel(indexUrl)  #存取各类的每一本小说的url
    novelTypeList = []
    novelTypeDict = {}
    for dict in novelUrlTypeList:  #遍历存取各类小说的列表，元素是字典（key是类型，value是该类型全部小说的列表）
        noveUrlList = []
        novelType = ''
        novelList = []
        for key, value in dict.items():  #遍历一类小说的字典key，value，即例如存取所有玄幻小说的列表
            novelType = key
            noveUrlList = value
        path = 'static/onlineread\\' + str(novelType) + "图片"  #写好一个路径
        isExists = os.path.exists(path)  #判断该路径下有无文件夹novelType（小说类型名，用来存取名字）
        if not isExists:  #如果不存在，创建该文件夹
            os.makedirs(path)
            print(novelType + '创建成功！')
        else:  #存在，提示已经创建文件夹
            print(novelType + '已存在！')
        for novel in noveUrlList:  #遍历存取该类型每一部小说的url
            novelDict = {}
            html = open_url(novel)  #调用open_url（）返回url的内容
            soup = BeautifulSoup(html, 'html.parser')  #解析网页内容
            title = str(soup.find_all('h1')[0].text)  #查找h1标签的第一个元素的text，即是标题
            pList = soup.find_all('div', id="info")[0].select('p')  #查找div，id为info的第一个元素，select其中的p元素，返回存取p元素的列表
            author = str(pList[0].text).replace('作    者：', '')  #p元素第一个的text即是作者，但是需要切割掉作者名字前的作者多余字符串
            updateTime = str(pList[2].text)  #plist的第三个元素的text即最新更新时间
            newChapter = str(pList[3].text)  #plist的第四个元素的text即最新章节
            pList = soup.find_all('div', id="intro")[0].select('p')  #查找div，id为intro的第一个元素，查找其中的p元素返回列表
            novelIntro = str(pList[1].text)  #pList的第二个元素的text即是小说的简介
            imgList = soup.find_all('div', id='fmimg')[0].select('img')  #查找div，id为fmimg的第一个元素，select出img元素返回列表
            imgUrl = str(imgList[0]['src'])  #图片列表中的第一个img元素即是小说的简介图片，其中的src属性即是url
            imgContent = requests.get(imgUrl).content  #获取图片链接的二进制内容
            writeImg(key, title, imgContent)
            novelDict['标题'] = title
            novelDict['作者'] = author
            novelDict['更新时间'] = updateTime
            novelDict['最新章节'] = newChapter
            novelDict['小说介绍'] = novelIntro
            novelDict['链接'] = novel
            print('链接：', novel)
            novelList.append(novelDict)
            time.sleep(network_interval)
        novelTypeDict[novelType] = novelList
        novelTypeList.append(novelTypeDict)  #再把字典添加到列表中
        writeNovelData(novelType, novelList)  #将爬取的一类型的全部小说保存到本地


#保存小说图片
def writeImg(key, title, imgContent):  #imgContent传入的是小说url的content二进制形式
    with open('D:\\pythonWorkSpace\\work1\\' + key + '图片\\' + title + '.jpg', 'wb') as f:  # wb方式打开，存取到对应的title.jpg里面
        f.write(imgContent)  #将图片以二进制的方式写到本地


#保存小说简介到csv
def writeNovelData(key, novelList):
    file_path = os.path.join(save_path, key + '.csv')  # 构建完整的文件路径
    with open(file_path, 'w', encoding='UTF-8', newline='') as f:  # 以w方式打开csv文件
        writer = csv.DictWriter(f, fieldnames=['标题', '作者', '更新时间', '最新章节', '小说介绍',
                                               '链接'])  # 创建一个csv对象，头为其中的标题，作者等
        writer.writeheader()  # 写入csv头
        for each in novelList:  # 循环小说列表
            writer.writerow(each)  # 将其每一本小说写入csv中


def chapter_url(url):
    html = open_url(url)  #返回url对应的content内容
    soup = BeautifulSoup(html, 'html.parser')  #将本地的html文件转化为beautifulSoup对象，解析html
    charpterUrlList = []
    charpterurl = soup.find('div', id="list").select('a')  #查找网页内的div标签，id为list的第一个元素，从中选出多个a标签返回列表
    for i in charpterurl:  #遍历a标签列表
        i = i['href']  #a标签的href属性重新赋值给i
        trueUrl = 'http://www.xbiqugu.net' + i  #小说的url拼接上章节url（不完整）即可就是有效的章节url地址
        charpterUrlList.append(trueUrl)  #将正确的章节url地址添加到列表中
    return charpterUrlList  #返回一本小说的存储所有章节的url


def get_chapter_content(chapter_url):
    html = open_url(chapter_url)  # 返回url对应的content内容
    soup = BeautifulSoup(html, 'html.parser')  # 将本地的html文件转化为beautifulSoup对象，解析html
    # 查找章节内容所在的标签，根据具体网页结构进行调整
    chapter_content_tag = soup.find('div', id="content")
    if chapter_content_tag:
        chapter_content = chapter_content_tag.get_text()  # 提取章节内容并去除首尾空白字符
        return chapter_content
    else:
        return None


def search_url_by_novel_name(novel_name):

    csv_list = []
    file_list = os.listdir('static/onlineread')
    for file in file_list:
        if file.lower().endswith('.csv'):
            csv_list.append(file)

    try:
        # 初始化字典来保存结果
        novel_info = {}
        for file in csv_list:
            csv_file = save_path + '/' + file
            # 打开CSV文件
            with open(csv_file, mode='r', encoding='utf-8') as csv_file:
                csv_reader = csv.DictReader(csv_file)  # 使用DictReader来将每一行读取为字典

                # 遍历CSV文件的每一行
                for row in csv_reader:
                    if row['标题'] == novel_name:  # 检查标题是否匹配
                        return row['链接']


    except FileNotFoundError:
        print("文件未找到:", csv_file)
        return Result.error(404, 'no such novel')
    except Exception as e:
        print("发生错误:", e)
        return Result.error(500, 'Server Error')


@api.route('/getContent', methods=['POST'])
def onlineReading():
    data = request.form
    novelName = data['novelName']
    page = int(data['page'])

    url = search_url_by_novel_name(novelName)

    chapter_urls = chapter_url(url)
    chapterContent = get_chapter_content(chapter_urls[page - 1])
    if chapterContent:

        return chapterContent
    else:
        return None


# 返回封面图片URL列表
@api.route('/getCoverURL', methods=['GET'])
def getCoverURLs():
    STATIC_URL_PREFIX = 'http://127.0.0.1:5000/static/'
    jpg_urls = {}
    # 获取当前文件的绝对路径
    current_file_path = os.path.abspath(__file__)
    # 获取当前文件所在的目录
    current_dir = os.path.dirname(current_file_path)
    # 获取flaskapp的父目录（即books-manage-system目录）
    parent_dir_of_flaskapp = os.path.dirname(os.path.dirname(os.path.dirname(current_dir)))
    # 获取static目录的路径（与flaskapp同级）
    static_folder = os.path.join(parent_dir_of_flaskapp, 'static/onlineread/')

    if not os.path.exists(static_folder):
        print(f"The directory {static_folder} does not exist.")
        return Result.success({}), 404

    for root, dirs, files in os.walk(static_folder):
        for file in files:
            if file.lower().endswith('.jpg'):
                # 构造相对于static文件夹的路径
                relative_path = os.path.relpath(os.path.join(root, file), static_folder)
                # 构造完整的图片URL
                url = os.path.join(STATIC_URL_PREFIX, 'onlineread/', relative_path)
                # 将文件名（不包括.jpg后缀）作为键，图片URL作为值存储到字典中
                key = os.path.splitext(file)[0]  # 去除文件扩展名
                jpg_urls[key] = url
    return Result.success(jpg_urls)


# 根据Category返回URL
@api.route('/getCoverURLByCategory', methods=['POST'])
def getCoverURLsByCategory():
    STATIC_URL_PREFIX = 'http://127.0.0.1:5000/static/'
    jpg_urls = {}
    category = request.json.get('category')
    print(category)
    # 获取当前文件的绝对路径
    current_file_path = os.path.abspath(__file__)
    # 获取当前文件所在的目录
    current_dir = os.path.dirname(current_file_path)
    # 获取flaskapp的父目录（即books-manage-system目录）
    parent_dir_of_flaskapp = os.path.dirname(os.path.dirname(os.path.dirname(current_dir)))
    # 获取static目录的路径（与flaskapp同级）
    static_folder = os.path.join(parent_dir_of_flaskapp, 'static')

    if not os.path.exists(static_folder):
        print(f"The directory {static_folder} does not exist.")
        return Result.success({}), 404

    for root, dirs, files in os.walk(static_folder):
        for file in files:
            if os.path.basename(root).endswith(category + '小说图片'):
                if file.lower().endswith('.jpg'):
                    # 构造相对于static文件夹的路径
                    relative_path = os.path.relpath(os.path.join(root, file), static_folder)
                    # 构造完整的图片URL
                    url = os.path.join(STATIC_URL_PREFIX, relative_path)
                    print(url)
                    # 将文件名（不包括.jpg后缀）作为键，图片URL作为值存储到字典中
                    key = os.path.splitext(file)[0]  # 去除文件扩展名
                    jpg_urls[key] = url
    return Result.success(jpg_urls)


# 根据“最热”返回URL
@api.route('/getHottestCoverURL', methods=['GET'])
def hottestCoverURL():
    STATIC_URL_PREFIX = 'http://127.0.0.1:5000/static/'
    jpg_urls = {}
    category = '最热'
    print(category)
    # 获取当前文件的绝对路径
    current_file_path = os.path.abspath(__file__)
    # 获取当前文件所在的目录
    current_dir = os.path.dirname(current_file_path)
    # 获取flaskapp的父目录（即books-manage-system目录）
    parent_dir_of_flaskapp = os.path.dirname(os.path.dirname(os.path.dirname(current_dir)))
    # 获取static目录的路径（与flaskapp同级）
    static_folder = os.path.join(parent_dir_of_flaskapp, 'static')

    if not os.path.exists(static_folder):
        print(f"The directory {static_folder} does not exist.")
        return Result.success({}), 404

    for root, dirs, files in os.walk(static_folder):
        for file in files:
            if os.path.basename(root).endswith(category + '小说图片'):
                if file.lower().endswith('.jpg'):
                    # 构造相对于static文件夹的路径
                    relative_path = os.path.relpath(os.path.join(root, file), static_folder)
                    # 构造完整的图片URL
                    url = os.path.join(STATIC_URL_PREFIX, relative_path)
                    print(url)
                    # 将文件名（不包括.jpg后缀）作为键，图片URL作为值存储到字典中
                    key = os.path.splitext(file)[0]  # 去除文件扩展名
                    jpg_urls[key] = url
    return Result.success(jpg_urls)


# 推荐
@api.route('/getRecommendCoverURL', methods=['GET'])
def getRecommendCoverURL():
    STATIC_URL_PREFIX = 'http://127.0.0.1:5000/static/'
    jpg_urls = {}
    category = '推荐'
    # 获取当前文件的绝对路径
    current_file_path = os.path.abspath(__file__)
    # 获取当前文件所在的目录
    current_dir = os.path.dirname(current_file_path)
    # 获取flaskapp的父目录（即books-manage-system目录）
    parent_dir_of_flaskapp = os.path.dirname(os.path.dirname(os.path.dirname(current_dir)))
    # 获取static目录的路径（与flaskapp同级）
    static_folder = os.path.join(parent_dir_of_flaskapp, 'static')

    if not os.path.exists(static_folder):
        print(f"The directory {static_folder} does not exist.")
        return Result.success({}), 404

    for root, dirs, files in os.walk(static_folder):
        for file in files:
            if os.path.basename(root).endswith(category + '小说图片'):
                if file.lower().endswith('.jpg'):
                    # 构造相对于static文件夹的路径
                    relative_path = os.path.relpath(os.path.join(root, file), static_folder)
                    # 构造完整的图片URL
                    url = os.path.join(STATIC_URL_PREFIX, relative_path)
                    print(url)
                    # 将文件名（不包括.jpg后缀）作为键，图片URL作为值存储到字典中
                    key = os.path.splitext(file)[0]  # 去除文件扩展名
                    jpg_urls[key] = url
    return Result.success(jpg_urls)


# 返回在线书籍信息
@api.route('/getBookInfo', methods=['POST'])
def getBookInfo():
    data = request.json
    novelName = data['novelName']
    # 获取当前文件的绝对路径
    current_file_path = os.path.abspath(__file__)
    # 获取当前文件所在的目录
    current_dir = os.path.dirname(current_file_path)
    # 获取flaskapp的父目录（即books-manage-system目录）
    parent_dir_of_flaskapp = os.path.dirname(os.path.dirname(os.path.dirname(current_dir)))
    # 获取static目录的路径（与flaskapp同级）
    static_folder = os.path.join(parent_dir_of_flaskapp, 'static/onlineread/')
    file_list = os.listdir(static_folder)
    csv_list = []
    for file in file_list:
        if file.lower().endswith('.csv'):
            csv_list.append(file)
    isFound = False
    try:
        # 初始化字典来保存结果
        novel_info = {}
        for file in csv_list:
            csv_file = static_folder + '/' + file
            # 打开CSV文件
            with open(csv_file, mode='r', encoding='utf-8') as csv_file:
                csv_reader = csv.DictReader(csv_file)  # 使用DictReader来将每一行读取为字典
                # 遍历CSV文件的每一行
                for row in csv_reader:
                    if row['标题'] == novelName:  # 检查标题是否匹配
                        # 将需要的列添加到结果字典中
                        novel_info['author'] = row['作者'].strip().replace("作    者：", "")  # 注意这里可能需要处理作者字段的空格
                        novel_info['updateTime'] = row['更新时间']
                        novel_info['latestChapter'] = row['最新章节']
                        novel_info['syn'] = row['小说介绍'].strip()  # 注意这里可能需要处理小说介绍字段的空格和换行符
                        novel_info['link'] = row['链接']
                        isFound = True
                        # 找到匹配的行后，退出循环
                        break
            if isFound:
                category = file.split('.')[0]
                break
        # 返回结果字典，如果没有找到匹配的行，则返回空字典
        return Result.success(novel_info, category)
    except FileNotFoundError:
        print("文件未找到:", csv_file)
        return Result.error(404, 'no such novel')
    except Exception as e:
        print("发生错误:", e)
        return Result.error(500, 'Server Error')


@api.route('/getBooksectionList', methods=['POST'])
def get_section():
    data = request.form
    url = data['url']
    html = open_url(url)  # 返回url对应的content内容
    soup = BeautifulSoup(html, 'html.parser')  # 将本地的html文件转化为beautifulSoup对象，解析html
    charpterTitleList = []
    charpterurl = soup.find('div', id="list").select('a')  # 查找网页内的div标签，id为list的第一个元素，从中选出多个a标签返回列表
    for i in charpterurl:  # 遍历a标签列表
        title = i.get_text()
        charpterTitleList.append(title)  # 将正确的章节url地址添加到列表中
    return charpterTitleList  # 返回一本小说的存储所有章节的url
