import os.path
import re
import threading

import requests
import tqdm
from bs4 import BeautifulSoup
import urllib3
import pandas as pd

b = []
if not os.path.exists("Excel"):
    os.mkdir("Excel")
writer = pd.ExcelWriter("Excel/movie.xlsx")
threads = []


class movie():

    def __init__(self, type):
        self.type = type
        self.name = []
        self.href = []


def get_all_movie():
    url = "https://www.dyttcn.com/"
    res = requests.get(url=url, verify=False)  # verify 去掉安全认证
    res.encoding = 'gbk'
    if not res.status_code == 200:
        print(res.status_code)
        return
    soup = BeautifulSoup(res.text, 'html.parser')
    types = soup.find_all(name='div', class_='title_all')
    try:
        for index, type in enumerate(types):
            mv = movie(type.get_text().replace('>', ''))
            div_list = soup.find_all(name='div', class_='co_content222')[index]
            li_list = div_list.find_all('li')
            for idx, li in enumerate(li_list):
                # 电源跳转网站
                a_list = li.find('a')
                mv.href.append(f"https://www.dyttcn.com{a_list['href']}")
                mv.name.append(a_list.get_text())
            # print(mv.type,mv.name,mv.href)
            print(f"启动第{index}进程")
            thread = threading.Thread(target=get_movie_downloads, args=(mv,))
            threads.append(thread)
            thread.start()
            # get_movie_downloads(mv)
    except Exception as e:
        print(e)


# 获取每个页面的url
def get_movie_downloads(movie):
    global b
    with tqdm.tqdm(total=len(movie.href) * 10) as f:
        for idx, url in enumerate(movie.href):
            try:
                res = requests.get(url, verify=False)
                res.encoding = 'gbk'
                if not res.status_code == 200:
                    return
                f.set_description(f"Downloading{movie.name[idx]}")
                soup = BeautifulSoup(res.text, "html.parser")
                # \s 匹配任意空白字符，例如\t,'+'就是指一个以上，
                movie.href[idx] = re.findall('<a\s+href="(magnet:[^"]+)', res.text)[0] or soup.find("td",bgcolor="#ffffbb").find('a')['href']
                b = movie.href[idx]
                f.update(10)
            except Exception as e:
                print(movie.href)
                print(movie.name)
                print(b)
                print(e)
                return
        f.set_description(f"saving to Excel{movie.name[idx]}")
        data = {
            '电影名字': movie.name,
            '迅雷下载bt': movie.href
        }
        df = pd.DataFrame(data)
        df.to_excel(f"Excel/{movie.type}.xlsx", sheet_name=movie.type, index=False)


if __name__ == '__main__':
    urllib3.disable_warnings()
    get_all_movie()
    for i in threads:
        i.join()
