from myxunlei import MYXunlei
import json
import sys
import requests
import urllib.request
import urllib.parse
import urllib.error
import urllib.response
from bs4 import BeautifulSoup
import re
import time
from retrying import retry

# 全部报错才会报错，如果其中一次正常，则继续执行
# 两次retry之间等待2秒，重试5次
@retry(stop_max_attempt_number=3, wait_fixed=5000)
def get_request(url, headers):
    return requests.get(url, headers=headers, timeout=3)

# 用户名，密码
# username = sys.argv[1]
# password = sys.argv[2]
# xl = MYXunlei(username, password)
# xl.login()

# session_id
session_id = sys.argv[3]
xl = MYXunlei(session_id)

xl.session_id_2_token()

# file_url = 'ed2k://|file|cn_windows_10_multiple_editions_x86_dvd_6846431.iso|3233482752|B5C706594F5DC697B2A098420C801112|/'
# xl.download(file_url)

# 电影天堂
dydytt_host = 'https://www.dydytt.net'
dydytt_list_page_url = dydytt_host + '/html/gndy/dyzz/index.html'
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.110 Safari/537.36','referer':"www.dydytt.net" }

html = get_request(dydytt_list_page_url,  headers=headers)
bs = BeautifulSoup(html.text, 'lxml')

# divs = bs.find_all(name='div', attrs={'class':'co_content8'})
divs = bs.select('.co_content8')

for div in divs:
    namelist = div.find_all(name="a", attrs={"href": re.compile("\/html\/gndy\/dyzz\/.+\/.+\.html")})

    for name in namelist:
        # index = index + 1
        # if index >= count :
        #     break
        dydytt_file_page = dydytt_host + name['href']
        print(dydytt_file_page)
        dydytt_file_html = get_request(dydytt_file_page, headers=headers)
        dydytt_file_bs = BeautifulSoup(dydytt_file_html.text, 'lxml')
        dydytt_file_namelist = dydytt_file_bs.find_all(name="a", attrs={"href": re.compile("magnet:.+")})
        for file_name in dydytt_file_namelist:
            file_url = file_name['href']
            print(file_url)
            time.sleep(0.2)
            xl.download(file_url)


