import logging
import time
from concurrent.futures import ThreadPoolExecutor, wait, FIRST_COMPLETED

from src.com.itheima.web.service.blog_service import BlogService, UrlLink


class Config:
    def __init__(self, task1_time, task2_time):
        self.task1_time = task1_time
        self.task2_time = task2_time
config=Config(task1_time=900,task2_time=1800)

#下载某一组文章
#例如 https://blog.csdn.net/weixin_43767015/category_10402194.html
def down_blog_muti(links=None,group=None,save_dir=None):
    global futures
    futures = [executor2.submit(BlogService(save_dir=save_dir).down_blog_single ,url=link,group=group) for link in links]


def extract_number(s):
    # 去掉 ".html" 后缀
    s = s.replace(".html", "")
    # 以 "_" 分割字符串，取最后一个部分
    number = s.split("_")[-1]
    if number==None or len(number)==0:
        return "weixin_defalut"
    return 'weixin_'+str(number)


#下载某个用户的所有文章
#下载链接如 'https://blog.csdn.net/weixin_43767015/'
# 格式: https://blog.csdn.net+用户
def down_blog_all_ofsomuser(links=None,save_dir=None):
    # 步骤1将 Future 和 link 存储到字典中
    task1_future_dict = {}
    task1_futures=[]
    for link in links:
        future = executor1.submit(BlogService().get_article_urls, url=link.url)
        task1_futures.append(future)
        task1_future_dict[link] = future  # 将 Future 和 link 关联

    # 检查所有任务是否完成
    total = len(links)
    index = 1
    exectued_task1s=[]

    task2_future_dict = {}
    task2_futures = []
    exectued_task2s = []


    #检查任务1是否执行完成
    exec_time=0
    while True:
        if all(future.done() for future in task1_futures):
            break
        #最长等待1分钟
        if exec_time>config.task1_time:
            logging.warning(
                f"{index}>>>down_blog_all_ofsomuser>>>开始下载链接为:{link.url},task1:下载超时退出")
            break
        for link in links:
            #已经执行过
            if link.url in exectued_task1s:
                continue
            future = task1_future_dict[link]
            if(future.done()):
                exec_time=0
                logging.warning(f"{index}>>>down_blog_all_ofsomuser>>>开始下载链接为:{link.url},task1:下载进度为{index * 100 / total}%")
                exectued_task1s.append(link.url)
                urls = future.result()
                for url in urls:
                    future2 = executor2.submit(BlogService(save_dir=save_dir).down_blog_single, url=url,group=link.name)
                    task2_futures.append(future2)
                    task2_future_dict[future2]=url
                logging.warning(f"{index}>>>down_blog_all_ofsomuser>>>开始下载链接为:{link.url},task1:下载进度为{index * 100 / total}%")
                index += 1
        if exec_time % 20 == 0:
            logging.warning(
                f"check>>>{index}/{len(task1_futures)}>>>执行次数{exec_time}>>>down_blog_all_ofsomuser>>>task1:开始下载进度为:{index * 100 / total}%")
        time.sleep(0.1)  # 避免忙等待
        exec_time += 1

    # 检查任务2是否执行完成
    index = 1
    exec_time=0
    while True:
        #最长等待3分钟
        if exec_time>config.task2_time:
            logging.warning(f"{index}>>>down_blog_all_ofsomuser>>>下载task2:下载超时退出")
            break
        for future in task2_futures:
            url = task2_future_dict.get(future)
            if url in exectued_task2s:
                continue
            if future.done():
                exec_time = 0
                exectued_task2s.append(url)
                logging.warning(f"{index}/{len(task2_futures)}>>>down_blog_all_ofsomuser:{url}>>>task2:开始下载进度为:{index * 100 / len(task2_futures)}%")
                index += 1
        if exec_time%20==0:
            logging.warning(f"check>>>{index}/{len(task2_futures)}>>>执行次数{exec_time}>>>down_blog_all_ofsomuser:{url}>>>task2:开始下载进度为:{index * 100 / len(task2_futures)}%")
        time.sleep(0.1) # 避免忙等待
        exec_time+=1


def main():
    global executor1
    global executor2
    executor1 = ThreadPoolExecutor(max_workers=20)
    executor2 = ThreadPoolExecutor(max_workers=300)

    while True:
        time.sleep(0.1)
        command = input("请输入操作指令(1.下载某用户所有文章 2.下载某用户某章节文章 3.下载某篇文章 4.退出)>>>").strip()
        if command=='1':
            url = input("请输入下载链接>>>").strip()
            # url = 'https://blog.csdn.net/weixin_42039228'
            service = BlogService()
            links = service.get_article_urls_ofuser(url)#用户分组链接
            save_dir = extract_number(url)
            down_blog_all_ofsomuser(links=links,save_dir=save_dir)#采用多线程进行下载
        elif command == '2':
            url = input("请输入下载链接>>>").strip()
            links=[UrlLink(url=url, name='default')]
            save_dir = extract_number('')
            down_blog_all_ofsomuser(links=links, save_dir=save_dir)  # 采用多线程进行下载
        elif command == '3':
            url = input("请输入下载链接>>>").strip()
            save_dir = extra33
            ct_number('')
            BlogService(save_dir=save_dir).down_blog_single(url=url,group='zh')
            break
        elif command == '4':
            break
        else:
            continue

# Press the green button in the gutter to run the script.
if __name__ == '__main__':
    main()
