import random
from datetime import datetime

import requests
import re
from bs4 import BeautifulSoup
import lxml
from concurrent.futures import ThreadPoolExecutor, as_completed

from django.db import transaction
from django.db.models import Q

from lucky.models import *
from djangoApp.utils.enums import StatusCodeEnum
from lucky.util.book.progressTracker import *

def get_html(url):
    """
    发送请求并解析html
    :param url: 请求地址
    :return: BeautifulSoup格式化的html
    """
    r = requests.get(url)
    if r.status_code == 200:
        soup = BeautifulSoup(r.text, 'lxml')
    else:
        soup = 400
    return soup


def save_booktype(typeName):
    """
    保存书分类,不存在就新建
    :param typeName: 分类名称
    """
    try:
        EbookType.objects.get_or_create(name=typeName)
    except:
        print(StatusCodeEnum.DB_ERR_UP_TYPE)


def save_bookstroe(srcid, typeid, bookno, bookname, bookstatus=None, bookimg=None, booknum=None,
                   auto=None, update=None, score=None, booksyno=None, bookdetail=None, clicktime=None, clicknum=None):
    """
    保存书城表,不存在就新建
    :param srcid: ID
    :param typeid: 分类id
    :param bookno: 书籍编号
    :param bookname: 书名
    :param bookstatus: 状态
    :param bookimg: 封面
    :param booknum: 字数
    :param auto: 作者
    :param update: 更新时间
    :param score: 评分
    :param booksyno: 简介
    :param bookdetail: 详情
    :param clicktime: 最后点击时间
    :param clicknum: 点击次数
    """
    dic = {'srcid': srcid, 'typeid': typeid, 'bookno': bookno, 'bookname': bookname, 'bookstatus': bookstatus,
           'bookimg': bookimg, 'booknum': booknum, 'auto': auto, 'update': update,
           'score': score, 'booksyno': booksyno, 'bookdetail': bookdetail,
           'clicktime': clicktime, 'clicknum': clicknum, }
    zback = None
    try:
        ebook, created = EbookStore.objects.update_or_create(defaults=dic, srcid=srcid, bookname=bookname)
        zback = [ebook.storeid, ebook.srcid, ebook.typeid]  # 书库id,网址id,分类id
    except BaseException as e:
        print(str(e))
    return zback


def save_chaptext(storeid, srcid, typeid, chapurl, chapternum, chaptertitle, content, bookName):
    """
    逐条保存书地点章节内容
    :param storeid: 书城id
    :param srcid: 网址id
    :param typeid: 分类id
    :param chapurl: 章节URL地址
    :param chapternum: 章节数
    :param chaptertitle: 章节名
    :param content: 正文内容
    :param bookName: 书名
    :return: 书库id,网址id,分类id
    """
    print(bookName + '---' + chaptertitle)
    dic = {'storeid': storeid, 'srcid': srcid, 'typeid': typeid, 'chapurl': chapurl, 'chapternum': chapternum,
           'chaptertitle': chaptertitle, 'content': content, }
    zback = None
    try:
        ebookchap, created = EbookChapter.objects.update_or_create(defaults=dic, storeid=storeid, srcid=srcid,
                                                                   typeid=typeid, chapternum=chapternum)
        zback = [ebookchap.id, ebookchap.chapternum]  # 书库id,网址id,分类id
        EbookChapter.objects.bulk_update()
    except BaseException as e:
        print(str(e))
    return zback


def bulk_save_book_text(data: list):
    """
    根据数据批量更新或新增章节正文内容
    :param data: 数据list
    """
    # 将数据转换为模型实例
    instances = [EbookChapter(**item) for item in data]
    # 构建用于查询的Q对象
    query = Q()
    for item in data:
        query |= Q(storeid=item['storeid'], chapternum=item['chapternum'])

    # 查找数据库中已存在的实例
    existing_instances = EbookChapter.objects.filter(query)
    # 创建一个字典以便快速查找现有实例
    existing_lookup = {(instance.storeid, instance.chapternum): instance for instance in existing_instances}
    # 分离要更新和创建的实例
    to_update = []
    to_create = []
    for instance in instances:
        key = (instance.storeid, instance.chapternum)
        if key in existing_lookup:
            existing_instance = existing_lookup[key]
            existing_instance.content = instance.content  # 更新内容
            to_update.append(existing_instance)
        else:
            to_create.append(instance)

    # 使用事务来确保操作的原子性
    with transaction.atomic():
        # 批量更新
        if to_update:
            EbookChapter.objects.bulk_update(to_update, ['content'], batch_size=500)

        # 批量创建
        if to_create:
            EbookChapter.objects.bulk_create(to_create, batch_size=500)


def get_chap_text(srcurl, url, storeid, srcid, ztypeid, bookName, zindex):
    """
    从第一章开始,下一页和下一章获取后面的内容
    :param srcurl:
    :param url: 第一章url
    :param storeid: 书城id
    :param srcid: 网址id
    :param ztypeid: 分类id
    :param zindex: 章节数,默认从1开始
    :return: 所有章节获取完成标识
    """
    data = []
    book_text = '\r'  # 每章完整的内容
    next_url = srcurl + url
    index = zindex
    while True:
        soup = get_html(next_url)
        plist = soup.select('div #content p')
        chaptext = ''
        chapurl = soup.select('link[rel="canonical"]')[0]['href']
        for i in plist:
            chaptext += i.text
        book_text += chaptext
        next = soup.select('div #next_url')[0]
        if '下一页' in next.text:
            next_url = srcurl + next['href']
        elif '下一章' in next.text:  # 下一章
            next_url = srcurl + next['href']
            chapternum = index
            title = soup.select('a.btn-addbs')[0]['href']
            chaptertitle = re.search(r"'([^']*)'\)$", title)[1]
            chapdata = {'storeid': storeid, 'srcid': srcid, 'typeid': ztypeid, 'chapurl': chapurl,
                        'chapternum': chapternum, 'chaptertitle': chaptertitle, 'content': book_text, }
            data.append(chapdata)
            # zbacke = save_chaptext(storeid,srcid,ztypeid,chapurl, chapternum, chaptertitle, book_text,bookName)#逐条更新
            index += 1
            book_text = '\r'
        else:  # '没有了'
            chapternum = index
            title = soup.select('a.btn-addbs')[0]['href']
            chaptertitle = re.search(r"'([^']*)'\)$", title)[1]
            chapdata = {'storeid': storeid, 'srcid': srcid, 'typeid': ztypeid, 'chapurl': chapurl,
                        'chapternum': chapternum, 'chaptertitle': chaptertitle, 'content': book_text, }
            data.append(chapdata)
            # zbacke = save_chaptext(storeid, srcid, ztypeid,chapurl, chapternum, chaptertitle, book_text,bookName)
            index = 1
            break
    bulk_save_book_text(data)
    print('书库id:' + str(storeid) + bookName + '运行结束')


def get_book(srcid, srcurl, type, bookUrl, bookName, auto):
    """
    根据书链url获取书内容,保存到数据库中
    :param srcurl: 网址
    :param type: 分类
    :param bookUrl: 书url
    :param bookName: 书名
    :param auto: 作者
    """
    return
    # 一本书籍的所有内容
    bookpage = get_html(srcurl + bookUrl)
    a = bookpage.select('div.fix p')

    ztype = re.split('别：', a[1].text)[-1]  # 分类
    type, created = EbookType.objects.get_or_create(name=ztype)
    ztypeid = type.typeid
    zbookno = re.findall(r'\d+', a[0].select('a')[0]['href'])[-1]
    zstatus = re.split('态：', a[2].text)[-1]  # 状态
    zbookimg = bookpage.select('div.imgbox img')[0]['src']
    zbooknum = a[5].select('span')[0].text  # 字数
    zupdate = datetime.strptime((re.split('新：', a[4].text)[-1])[:-1], '%Y-%m-%d %H:%M:%S')  # 更新时间
    zbooksyno = bookpage.select('div.desc')[0].text
    clicknum = random.randint(10000, 100000)
    # 把书存到书库表
    idlist = save_bookstroe(srcid, ztypeid, zbookno, bookName, zstatus, zbookimg, zbooknum,
                            auto, zupdate, 0, zbooksyno, '', None, clicknum)
    if idlist != None:
        #     获取全部章节数据
        # 第一个章节url
        url_ul = bookpage.select('ul.section-list')[1]
        url_one = url_ul.select('li a')[0]['href']
        # 获取本书全部内容
        get_chap_text(srcurl, url_one, idlist[0], srcid, ztypeid, bookName, 1)


def get_booklist(srcid, srcurl, listurl):
    """
    根据网址获取最近更新的小说列表
    :param html: 网址
    """
    typeNameList = []
    bookNameList = []
    autoNameList = []
    one_url = srcurl + listurl  # '/booklist/'   #/quanben/booklist/   全本列表地址
    soup = get_html(one_url)
    z = soup.select('.pagination a')  # 分页
    lastfy = int(z[-1].text)
    # for i in range(1,lastfy+1):  todo
    for i in range(1, 2):  # 先得到所有书的链接
        list_url = one_url + str(i) + '.html'
        bp = get_html(list_url)
        # 当前书籍分页下所有的书URL链接
        typeNameList += bp.select('.txt-list-row5 span.s1')  # 分类名列表
        bookNameList += bp.select('.txt-list-row5 span.s2 a')  # 链接和书名
        autoNameList += bp.select('.txt-list-row5 span.s4')  # 作者

    typeNameSet = list(set(typeNameList))
    total_books = len(bookNameList)  # 总共要获取的书籍数量
    progress['total'] = total_books
    completed_threads = 0  # 已经完成的线程数量
    for i in typeNameSet:
        # 去除特殊符号
        pattern = r'[^a-zA-Z0-9\u4e00-\u9fa5\s]'
        t = re.sub(pattern, '', i.text)
        save_booktype(t)

        # 定义一个线程回调函数，用于在每个线程完成时递增计数器
    def increment_counter():
        nonlocal completed_threads
        completed_threads += 1
        print(f"已完成 {completed_threads}/{total_books} 本书的获取")
        progress['completed'] = completed_threads

    # 启动线程,获取书内容,并保存到数据库
    results = []
    with ThreadPoolExecutor() as pool:
        for j in range(len(bookNameList)):
            a = bookNameList[j]
            bookUrl = a['href']
            bookName = a.text
            auto = autoNameList[j].text
            type = typeNameList[j].text
            future = pool.submit(get_book, srcid, srcurl, type, bookUrl, bookName, auto)
            future.add_done_callback(lambda _: increment_counter())  # 当线程完成时调用 increment_counter
            results.append(future)
        # 等待所有线程完成
        for future in as_completed(results):
            if future.done():  # 检查任务是否已完成
                try:
                    result = future.result()  # 获取线程执行结果
                    print("线程成功完成：", result)
                    # 在这里处理成功完成的线程的结果
                except Exception as e:
                    print("线程执行出错：", e)
            else:
                print("线程尚未完成")

def update_ebook(srcid, srcurl, listurl):
    # flat=True 参数确保查询结果是一个简单的列表
    ebookStoreList = EbookStore.objects.filter(bookstatus='连载').values_list('storeid', flat=True)
    # 启动线程,获取更新的书内容,并保存到数据库
    results = []
    with ThreadPoolExecutor(max_workers=8) as pool:
        for i in ebookStoreList:
            ebookChapter = EbookChapter.objects.filter(storeid=i).order_by('-chapternum').first()
            if ebookChapter:
                soup = get_html(ebookChapter.chapurl)
                next = soup.select('div #next_url')[0]
                if '下一章' in next.text:  # 上次获取时最后一章节有了下一章,那就循环获取此书的最新章节
                    future = pool.submit(get_chap_text, srcurl, next['href'], ebookChapter.storeid,
                                         srcid, ebookChapter.typeid, '', ebookChapter.chapternum + 1)
                    results.append(future)
                else:  # 没有新章节则继续下一本书
                    continue
