import requests, os
from databases import db_configs, HandleDb
from lxml import etree
from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor

db = HandleDb(**db_configs.LOCALHOST_DB_FLASK_DEMO_CONFIG)
result = db.find('category')
str1 = ''
str2 = ''
for item in result:
    table_name_content = item['table_name_content']
    table_name_directory = item['table_name_directory']
    str1 += f"""
    CREATE TABLE {table_name_directory} (
  `id` int NOT NULL AUTO_INCREMENT,
  `title` varchar(255) DEFAULT NULL,
  `book_id` int DEFAULT NULL,
  PRIMARY KEY (`id`) USING BTREE
) ENGINE=InnoDB AUTO_INCREMENT=296555 DEFAULT CHARSET=utf8mb3 MAX_ROWS=1000000000 AVG_ROW_LENGTH=15000 ROW_FORMAT=DYNAMIC;"""
    str2 += f"""
    CREATE TABLE {table_name_content} (
  `id` int NOT NULL AUTO_INCREMENT,
  `title` varchar(255) NOT NULL COMMENT '标题',
  `content` text COMMENT '内容',
  `directory_id` int DEFAULT NULL COMMENT '目录id',
  PRIMARY KEY (`id`) USING BTREE
) ENGINE=InnoDB AUTO_INCREMENT=92341 DEFAULT CHARSET=utf8mb3 MAX_ROWS=1000000000 AVG_ROW_LENGTH=15000 ROW_FORMAT=DYNAMIC;
    """
with open('../../databases/create_table_directory.sql', 'w', encoding='utf-8') as f:
    f.write(str1)
with open('../../databases/create_table_content.sql', 'w', encoding='utf-8') as f:
    f.write(str2)
exit()
def start(spider_obj: dict):
    """
    params spider_obj{
        view_url:string 爬取地址
        category_title:string=默认 分类(比如风景...)
        part_title:string=默认     栏目(比如电脑壁纸...)
        page_count:int=1 爬取多少页
    }
    """
    # 网页源地址
    website_source_url = 'https://www.toopic.cn'
    if 'view_url' not in spider_obj:
        print('传入页面地址')
        return '传入页面地址!'
    view_url = spider_obj.get('view_url')
    category_title = spider_obj.get('category_title', '默认')
    part_title = spider_obj.get('part_title', '默认')
    page_count = spider_obj.get('page_count', 2)

    db = HandleDb(**db_configs.LOCALHOST_DB_GALLERY_CONFIG)
    # 查找有没有这个栏目
    result = db.find('part', '*', f'title = "{part_title}"')
    part_id = None
    if len(result) and result != 'error':
        part_id = result[0]['id']
    else:
        print("没有此栏目!")
        return "没有此栏目"

    result = db.find('category', '*', f'title = "{category_title}" and part_id = {part_id}')
    category_id = None
    if len(result) and result != 'error':
        category_id = result[0]['id']
    else:
        db.add('category', {"title": category_title, "part_id": part_id})
        result = db.find('category', '*', f'title = "{category_title}" and part_id = {part_id}')
        category_id = result[0]['id']

    img_info_list = []

    def request(page: int):
        params = {
            "page": page
        }
        nonlocal page_count

        response = requests.get(url=view_url, params=params)
        if response.status_code != 200:
            return
        # 先递归请求 因为 有可能页面没有数据!
        if page_count > 1:
            page_count -= 1
            request(page_count)
        etree_xml1 = etree.HTML(response.text)
        # 拿到图片链接列表
        img_urls: list = etree_xml1.xpath("//div[@class='bd']//img/@data-original")
        # 图片标题
        img_titles: list = etree_xml1.xpath("//div[@class='bd']//img/@title")
        # 图片大小
        img_sizes: list = etree_xml1.xpath("//div[@class='bd']//img/@data-resolution")
        if not len(img_sizes) or not len(img_titles) or not len(img_urls):
            print("没有数据!")
            return
        img_dict = {
            "img_urls": img_urls,
            "img_titles": img_titles,
            "img_sizes": img_sizes,
            "category_id": category_id,
            "part_id": part_id
        }
        img_info_list.append(img_dict)


    request(page_count)
    # 整理成一个字典

    # 开启线程下载
    start_thread_tasks(img_info_list, website_source_url)


# 线程
def task_func(img_info, website_source_url):
    img_urls = img_info.get('img_urls')
    part_id = img_info.get('part_id')
    category_id = img_info.get('category_id')
    img_titles = img_info.get('img_titles')
    img_sizes = img_info.get('img_sizes')
    # 线程数量
    workers = len(img_urls) // 6
    with ThreadPoolExecutor(max_workers=workers) as threadPool:
        for i in range(len(img_urls)):
            new_img_info = {"url": img_urls[i], "title": img_titles[i], "size": img_sizes[i], "part_id": part_id,
                            "category_id": category_id}

            threadPool.submit(task_son_func, new_img_info, website_source_url)


def task_son_func(img_info: dict, website_source_url: str):
    url = website_source_url + img_info.get('url')
    title = img_info.get('title')
    size = img_info.get('size')
    part_id = img_info.get('part_id')
    category_id = img_info.get('category_id')
    if not size or not title or not url:
        return
    # 栏目名 + 分类名
    db = HandleDb(**db_configs.LOCALHOST_DB_GALLERY_CONFIG)
    result = db.find('category', '*', f'id = {category_id}')
    category_title = result[0]['title']
    result = db.find('part', '*', f'id = {part_id}')
    part_title = result[0]['title']
    result = db.find('image', '*', f'title = "{title}"')
    if len(result) and result != 'error':
        return
    # 去除图片标题的空格
    title = title.replace(r"\s/g", "")
    index = url.rfind('.')
    if index != -1:
        suffix = url[index:]
        file_path = f'./static/gallery/{part_title}/{category_title}/{title}{suffix}'
        # 创建目录
        try:
            dir_path = f'./static/gallery/{part_title}/{category_title}'
            os.makedirs(dir_path)
        except Exception as e:
            pass
        # 下载,添加之前先查找有没有这张图片
        result = db.find('image', '*', f'title = "{title}" and category_id = {category_id}')
        if len(result) and result != 'error':
            print("已有此图片!")
            return
        # 请求图片 并下载
        response = requests.get(url)
        with open(file_path, 'wb') as f:
            for chunk in response.iter_content(chunk_size=256):
                f.write(chunk)
        print(f"{title}下载完成!")
        result = db.find('size', '*', f'size = "{size}"')
        # 有则拿id 没用则添加到默认尺寸
        if len(result) and result != 'error':
            size_id = result[0]['id']
        else:
            result = db.find('size', '*', f'size = "0000x0000"')
            if len(result) and result != 'error':
                size_id = result[0]['id']

        # 当前蓝图名 + 路径
        img_url = '/admin' + file_path[1:]
        result = db.add('image',
                        {"title": title, "size_id": size_id, "category_id": category_id, "url": img_url,"source_server":website_source_url})
        if result != 'error':
            print(f"{title}已经添加到数据库!")
    else:
        print("图片有误!", url)


def start_thread_tasks(img_info_list: dict, website_source_url: str = '', workers: int = 5):
    with ProcessPoolExecutor(max_workers=workers) as processPool:
        for img_item in img_info_list:
            processPool.submit(task_func, img_item, website_source_url)


if __name__ == '__main__':
    list_img_view = [
        {"url": "https://www.toopic.cn/dnbz/?q=--82--.html", "title": "游戏原画"},
        {"url": "https://www.toopic.cn/dnbz/?q=--81--.html", "title": "卡通动漫"},
        {"url": "https://www.toopic.cn/dnbz/?q=--80--.html", "title": "飞机航天"},
        {"url": "https://www.toopic.cn/dnbz/?q=--79--.html", "title": "自然风景"},
        {"url": "https://www.toopic.cn/dnbz/?q=--78--.html", "title": "花卉植物"},
        {"url": "https://www.toopic.cn/dnbz/?q=--77--.html", "title": "绘画创意"},
        {"url": "https://www.toopic.cn/dnbz/?q=--76--.html", "title": "动物萌宠"},
        {"url": "https://www.toopic.cn/dnbz/?q=--75--.html", "title": "家居陈设"},
        {"url": "https://www.toopic.cn/dnbz/?q=--74--.html", "title": "静物特写"},
        {"url": "https://www.toopic.cn/dnbz/?q=--89--.html", "title": "肌理纹理"},
        {"url": "https://www.toopic.cn/dnbz/?q=--72--.html", "title": "军事科技"},
        {"url": "https://www.toopic.cn/dnbz/?q=--71--.html", "title": "明星大咖"},
        {"url": "https://www.toopic.cn/dnbz/?q=--70--.html", "title": "太空科幻"},
        {"url": "https://www.toopic.cn/dnbz/?q=--69--.html", "title": "禅意古风"},
        {"url": "https://www.toopic.cn/dnbz/?q=--85--.html", "title": "体育运动"},
        {"url": "https://www.toopic.cn/dnbz/?q=--65--.html", "title": "美女写真"},
        {"url": "https://www.toopic.cn/dnbz/?q=--87--.html", "title": "人文风土"},
        {"url": "https://www.toopic.cn/dnbz/?q=--88--.html", "title": "美食甜品"},
        {"url": "https://www.toopic.cn/dnbz/?q=--86--.html", "title": "精选壁纸"},
        {"url": "https://www.toopic.cn/dnbz/?q=--83--.html", "title": "城市建筑"},
        {"url": "https://www.toopic.cn/dnbz/?q=--84--.html", "title": "汽车船舶"},
        {"url": "https://www.toopic.cn/dnbz/?q=--90--.html", "title": "影视剧照"},
        {"url": "https://www.toopic.cn/dnbz/?q=--91--.html", "title": "情感文艺"},
        {"url": "https://www.toopic.cn/dnbz/?q=--92--.html", "title": "网红萝莉"}
    ]
    for item in list_img_view:
        start({"part_title": "电脑壁纸", "category_title": item['title'],
               "view_url": item['url'], "page_count": 30})

    # start({"part_title": "电脑壁纸", "category_title": "网红萝莉",
    #        "view_url": "https://www.toopic.cn/dnbz/?q=--92--.html","page_count":20})

