from requests_html import HTMLSession
import requests
from threading import Thread
import os
import logging
from logging import handlers
import time
import random
from concurrent.futures import ThreadPoolExecutor
import urllib3

# 记录日志
class Logger(object):
    level_relations = {
        'debug': logging.DEBUG,
        'info': logging.INFO,
        'warning': logging.WARNING,
        'error': logging.ERROR,
        'crit': logging.CRITICAL
    }  # 日志级别关系映射

    def __init__(self, filename, level='info', when='D', backCount=3,
                 fmt='%(asctime)s - %(pathname)s[line:%(lineno)d] - %(levelname)s: %(message)s'):
        self.logger = logging.getLogger(filename)
        format_str = logging.Formatter(fmt)  # 设置日志格式
        self.logger.setLevel(self.level_relations.get(level))  # 设置日志级别
        sh = logging.StreamHandler()  # 往屏幕上输出
        sh.setFormatter(format_str)  # 设置屏幕上显示的格式
        th = handlers.TimedRotatingFileHandler(filename=filename, when=when, backupCount=backCount, encoding='utf-8')
        th.setFormatter(format_str)  # 设置文件里写入的格式
        self.logger.addHandler(sh)  # 把对象加到logger里
        self.logger.addHandler(th)


def download(url):
    # 流模式 取消验证 最多尝试5次 连接超时2秒，下载超时5秒
    headers = {
        "X-Forwarded-For": "8.8.8.8"
    }
    try:
        res = requests.get(url=url['url'], stream=True, verify=False, timeout=(30, 15), headers=headers)
        with open(url['filename'], mode='wb') as tf:
            tf.write(res.content)
        loger.logger.info(str(url) + " success...")
        success_url.append(url)
        if url in failed_url:
            failed_url.remove(url)
    except:
        failed_url.append(url)
        loger.logger.info(str(url) + " error...")

if __name__ == "__main__":
    # 禁用告警打印
    urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
    loger = Logger(filename='yaojing.log', level='debug')
    loger.logger.info('start...')

    # 全部url
    all_url = []
    # 成功的
    success_url = []
    # 失败的
    failed_url = []
    dir = "E:/妖精"
    # 起始位置
    start_urls =["https://okzyw.com/?m=vod-detail-id-15162.html"]
    session = HTMLSession()
    for url in start_urls:
        res = session.get(url)
        links = res.html.xpath("//div[@id='1']/ul/li")
        # 这就是所有的链接了
        for link in links:
            lk = link.xpath("//input/@value")[0]
            title = link.xpath("//text()")[0].split("$")[0]
            # loger.logger.info(title)
            dir_path = dir + "/" + title
            if not os.path.exists(dir_path):
                os.mkdir(dir_path)
            res = session.get(lk)
            loger.logger.info(title + " start collecting!!!")
            for lin in res.text.splitlines():
                if "index.m3u8" in lin:
                    link_url = lk[:-10] + lin
                    link_res = session.get(link_url)
                    loger.logger.info(link_url)
                    for m3u8 in link_res.text.splitlines():
                        if ".ts" in m3u8:
                            m3u8_url = link_url[:-10] + m3u8
                            filename = m3u8_url.split("/")[-1]
                            # 先把所有的url取出来
                            all_url.append({
                                "filename":dir_path + "/" + filename,
                                "url":m3u8_url
                            })
            loger.logger.info(title + " collected!!!")
    loger.logger.info("total: "+str(len(all_url))+" url!!!")

    # 分割成50一个的列表
    thrd_urls = [ all_url[n*50:(n+1)*50] for n in range(int(len(all_url)/50)+1) ]
    # print(thrd_urls)
    for thrd_url in thrd_urls:
        # 50线程，线程池的概念，还是第一次用到，比多线程好用
        with ThreadPoolExecutor(max_workers=50) as thex:
            for thurl in thrd_url:
                # loger.logger.info(thurl)
                thex.submit(download,thurl)
        # 休眠五秒继续下载失败的url
        time.sleep(random.randint(2,5))
        for faurl in failed_url:
            download(faurl)

    loger.logger.info(" all done!!!")

