import os

import requests, re
from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor
from lxml import etree
from databases import HandleDb, db_configs

from time import time

# etree_xml.xpath 通过xpath解析的返回的都是列表

website_source_url = 'https://www.1qxs.com'


# 入口文件
def start(spider_obj: dict):
    """
        params spider_obj {
            view_url:string 爬取地址
            book_author?:string 作者名称
            page_count=1:int 爬取多少页
        }
    """
    # 网站源地址
    global website_source_url
    if 'view_url' not in spider_obj:
        return "缺少参数!"
    start_time = time()
    url = spider_obj.get('view_url')
    page_count = (spider_obj.get('page_count') or 1) - 1
    db = HandleDb(**db_configs.LOCALHOST_DB_FLASK_DEMO_CONFIG)

    def get_view(url: str):
        nonlocal page_count
        db = HandleDb(**db_configs.LOCALHOST_DB_FLASK_DEMO_CONFIG)
        response = requests.get(url)
        # 创建xml对象
        etree_xml = etree.HTML(response.text)
        # 拿到下一页按钮
        next_pages = etree_xml.xpath('//div[@class="page"]/a[last()-2]/@href')
        if len(next_pages):
            next_page = next_pages[0]
            while page_count > 0:
                page_count -= 1
                start_time = time()
                get_view(website_source_url + next_page)
                print(
                    f"\033[1;31m{website_source_url + next_page}页面采集完成,用时{round(time() - start_time, 0)}秒\033[0m")
        # 拿到书的详情地址
        books_url = etree_xml.xpath('//div[@class="book"]//a/@href')
        # 此页全部书的信息(包含地址,名称,对应数据库信息)
        current_page_book_info_list = []
        # 计数变量
        i = 0
        for b_u in books_url:
            book_info = requests.get(website_source_url + b_u)
            etree_xml1 = etree.HTML(book_info.text)
            # 书标题
            book_titles = etree_xml1.xpath('//h1/text()')
            # 书描述
            book_describes = etree_xml1.xpath("//div[@class='description']/text()")
            # 书目录地址
            book_directory_urls = etree_xml1.xpath("//span[@class='title-right']/a/@href")
            # 状态
            book_states = etree_xml1.xpath("//div[@class='label']/span[contains(text(),'状态')]/text()")
            # 字数
            book_counts = etree_xml1.xpath("//div[@class='label']/span[contains(text(),'字数')]/text()")
            # 拿到书的作者
            book_authors = etree_xml1.xpath("//div[@class='name']/span/text()") or []
            # 书的封面
            book_covers = etree_xml1.xpath("//div[@class='image']/img/@data-original")
            # 书的类型
            book_types = etree_xml1.xpath("//div[@class='label']//span[contains(text(),'分类')]/a/text()")
            if len(book_types):
                book_type = book_types[0]
            categorys = db.find('category', '*', f'name = "{book_type}"')
            # 书的标签
            book_tags = etree_xml1.xpath("//span[@class='tags']//a/text()")
            if len(categorys) == 0:
                book_type = '暂无类型'
            categorys = db.find('category', '*', f'name = "{book_type}"')
            if type(categorys) == list and len(categorys):
                category_id = categorys[0]['id']
                category_title = categorys[0]['name']
                table_name_content = categorys[0]['table_name_content']
                table_name_directory = categorys[0]['table_name_directory']
            else:
                return
            # 如果传递过来了作者名,则用参数
            if not spider_obj.get('book_author'):
                if len(book_authors):
                    book_author = book_authors[0][0:-3]
                else:
                    book_author = '暂无作者!'
            else:
                book_author = spider_obj.get('book_author')
            if len(book_describes) and len(book_titles):
                book_title = book_titles[i]
                # 去除描述中空格特殊字符
                book_describe = book_describes[0].strip().replace(u'\u3000', u'').replace('一七小说', '一八小说')
                books = db.find('book', '*', f'book_title = "{book_title}" and book_author = "{book_author}"')
                if len(book_covers):
                    book_cover = book_covers[0]
                    # 如果小说有封面 则下载 并把路径保存到数据库
                    index = book_cover.rfind('.')
                    suffix = book_cover[index:]
                    file_path = f'./static/book/{category_title}/{book_title}{suffix}'
                    # 创建目录
                    try:
                        dir_path = f'./static/book/{category_title}'
                        os.makedirs(dir_path)
                    except Exception as e:
                        # 报错则存在
                        pass
                    # 准备下载
                    response = requests.get(book_cover)
                    with open(file_path, 'wb') as f:
                        for chunk in response.iter_content(chunk_size=256):
                            f.write(chunk)
                        download_state = True
                    # if download_state:
                    #     book_dict['image_url'] = '/admin' + file_path[1:]

                # 如果有此书籍 不添加到数据库
                if not len(books):
                    book_dict = {
                        'book_title': book_title, 'book_describe': book_describe,
                        'source_server': website_source_url,
                        'book_author': book_author,
                        'category_id': category_id
                    }
                    # 如果有状态则添加状态
                    if len(book_states):
                        book_state = book_states[0].split("：")[1]
                        book_dict['state'] = book_state

                    if len(book_counts):
                        book_count = book_counts[0].split("：")[1]
                        book_dict['count'] = book_count

                    if len(book_tags):
                        book_tags = ','.join(book_tags).replace(r"\n\s*/g", r'')
                        book_dict['book_tags'] = book_tags
                    if len(book_covers):
                        book_cover = book_covers[0]
                        # 如果小说有封面 则下载 并把路径保存到数据库
                        index = book_cover.rfind('.')
                        suffix = book_cover[index:]
                        file_path = f'./static/book/{category_title}/{book_title}{suffix}'
                        # 创建目录
                        try:
                            dir_path = f'./static/book/{category_title}'
                            os.makedirs(dir_path)
                        except Exception as e:
                            # 报错则存在
                            pass
                        # 准备下载
                        response = requests.get(book_cover)
                        with open(file_path, 'wb') as f:
                            for chunk in response.iter_content(chunk_size=256):
                                f.write(chunk)
                            download_state = True
                        if download_state:
                            book_dict['image_url'] = '/admin' + file_path[1:]
                    # 添加
                    result = db.add('book', book_dict)
                else:
                    print(f"{book_title}---已存在!")
                    result = ''
                if len(book_directory_urls) and result != 'error':
                    current_page_book_info_list.append({
                        "directory_url": book_directory_urls[i],
                        "title": book_title,
                        "table_name_directory": table_name_directory,
                        "table_name_content": table_name_content
                    })
        # 将此页面所有书的目录地址保存到列表中
        current_page_book_info_list.reverse()
        start_thread_tasks(current_page_book_info_list, 10)

    get_view(url)
    # 打印绿色字体
    print(f"\033[1;32m任务结束:共耗时{round(time() - start_time, 2)}秒\033[0m")


# 获取整本书信息
def task_fun1(dict_):
    book_directory_url = dict_.pop('directory_url')
    book_title = dict_.pop('title')
    table_name_directory = dict_.pop('table_name_directory')
    table_name_content = dict_.pop('table_name_content')
    global website_source_url

    # 记录时间
    start_time = time()

    # 内容处理函数
    def content_func(book_content_url, book_id):
        db = HandleDb(**db_configs.LOCALHOST_DB_FLASK_DEMO_CONFIG)
        book_content_view = requests.get(website_source_url + book_content_url)
        etree_xml3 = etree.HTML(book_content_view.text)
        # 拿到下一页内容的url
        book_next_content_urls = etree_xml3.xpath("//div[@class='next']/a/@href")
        # 当前内容页面的titles
        book_content_titles = etree_xml3.xpath("//h1/text()")
        if not len(book_content_titles):
            print("找不到标题")
            return
        book_content_title = book_content_titles[0][:-5]
        result = db.find(table_name_directory, 'id',
                         f'title like "%{book_content_title}%" and book_id={book_id}')

        if result != 'error' and len(result) == 1:
            # 目录id
            directory_id = result[0]['id']
            # 当前内容页面的内容
            book_content_contents = etree_xml3.xpath("//div[@class='content']/node()[position()>1]")
            print(book_title + '-' + book_content_title + ' 采集中...')
            book_contents = ''
            for book_content_content in book_content_contents:
                content = book_content_content.text
                book_contents += f'<p>{content}</p>'

            book_contents = book_contents.replace(u'\u3000', u'').replace('一七小说', '一八小说')
            result = db.find(table_name_content, '*',
                             f'content = "{book_contents}" and title = "{book_content_titles[0]}" and directory_id = "{directory_id}"')
            if type(result) == list and len(result) == 0:
                result = db.add(table_name_content,
                                {"content": book_contents, "title": book_content_titles[0],
                                 "directory_id": directory_id})
                if result == 'error':
                    print("添加内容时错误!")
            else:
                print("内容...---已存在!")
            if len(book_next_content_urls):
                db.close()
                content_func(book_next_content_urls[0], book_id)
        else:
            print(book_content_titles[0])
            print("匹配不到目录!!!")

    db = HandleDb(**db_configs.LOCALHOST_DB_FLASK_DEMO_CONFIG)

    # 书目录页面
    book_directory_info = requests.get(website_source_url + book_directory_url)
    etree_xml2 = etree.HTML(book_directory_info.text)
    # 拿到书全部目录
    book_directory_texts = etree_xml2.xpath("//div[@class='list']//ul//li//p/text()")
    result = db.find('book', where=f'book_title="{book_title}"')
    if result == 'error':
        pass
    else:
        book_id = result[0]['id']
        with ThreadPoolExecutor(3) as threadPool:
            for book_directroy in book_directory_texts:
                threadPool.submit(task_add_directory(book_directroy, table_name_directory, book_id))
        # 拿到书的内容_url
        book_content_urls = etree_xml2.xpath("//div[@class='list']//ul//li[position()<2]/a/@href")
        content_func(book_content_urls[0], book_id)

    # 释放资源
    db.close()

    print(f"\033[1;31m{book_title}采集完成,用时{round(round(time() - start_time, 0) / 60)}分钟\033[0m")


# 目录数量太多需要使用多进程 不然cpu会100
# 添加书目录
def task_add_directory(book_directroy, table_name_directory, book_id):
    db = HandleDb(**db_configs.LOCALHOST_DB_FLASK_DEMO_CONFIG)
    book_directorys = db.find(table_name_directory, '*',
                              f'book_id = {book_id} and title = "{book_directroy}"')
    # 如果有此目录 不添加到数据库
    if isinstance(book_directorys,list) and len(book_directorys) == 0:
        result = db.add(table_name_directory, {"book_id": book_id, "title": book_directroy})
        if result == 'error':
            print("发生错误!")
    elif book_directorys != 'error':
        print(f"目录{book_directroy}---已存在!")
    elif book_directorys == 'error':
        print("发生错误!")


# 开启线程任务
def start_thread_tasks(directory_and_title, workers: int = 10):
    with ThreadPoolExecutor(workers) as threadPool:
        for item in directory_and_title:
            print(item['title'] + "开始采集!")
            threadPool.submit(task_fun1, item)


if __name__ == "__main__":
<<<<<<< HEAD
    start({"view_url": "https://www.1qxs.com/all/1_0_0_0_0_3.html"})
=======
    start({"view_url": "https://www.1qxs.com/all/1_0_0_0_0_2.html"})
>>>>>>> 9cc5faecda0ff4b1b93e929a59f5ecfe3e8ae13d

    # 思路
    """
        改进,先开启多线程添加目录,目录添加完毕,则添加内容，。
    """
