# https://missav.com/dm548/cn/uncensored-leak
# https://missav.com/dm548/cn/uncensored-leak?page=2
# 获取无码视频得列表

# https://missav.com/dm548/cn/uncensored-leak?page=1

from bs4 import BeautifulSoup
import execjs,os,requests
import m3u8
requests.packages.urllib3.disable_warnings()
import ssl
import time
import sys
print(sys.getrecursionlimit())

sys.setrecursionlimit(65530)

# ssl._create_default_https_context = ssl._create_stdlib_context
import urllib.request
from urllib.request import urlopen
import ssl
import json
ssl._create_default_https_context = ssl._create_unverified_context
requests.DEFAULT_RETRIES = 100
import time
def get_headers():
    with open("1.js", 'r', encoding='utf-8') as f:
        data = f.read()
    os.environ["EXECJS_RUNTIME"] = "Node"
    print(execjs.get().name)
    # 执行js脚本，获取返回值 url
    context1 = execjs.compile(data)  # Node.js (V8)
    user_uuid = context1.call("generateUUID2")
    print(user_uuid)

    headers = {
        'Connection': 'close',
        "Cookie": "user_uuid=" + str(user_uuid),
        "Sec-Ch-Ua": '"Google Chrome";v="123", "Not":""A-Brand";v="8", "Chromium";v="123"',
        "Sec-Ch-Ua-Mobile": "?0",
        "Sec-Ch-Ua-Platform": "Windows",
        "Sec-Fetch-Dest": "empty",
        "Sec-Fetch-Mode": "cors",
        "Sec-Fetch-Site": "cross-site",
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/123.0.0.0 Safari/537.36",

    }
    return headers

headers = get_headers()


def get_page(url):
    print("url-->",url)
    res = requests.get(url=url, headers=headers, verify=False)
    #
    # print(res.status_code)
    cur = int(url.split("=")[1])
    # with open("page.html",'r',encoding='utf8') as f:
    #     data = f.read()
    # print(res.text)
    data = res.text
    soup = BeautifulSoup(data)

    # print(len())
    div_list = soup.find_all("div",class_="relative aspect-w-16 aspect-h-9 rounded overflow-hidden shadow-lg")
    for div_t in div_list:
        a = div_t.find_all('a')[0]
        name = a.find('img')['alt']
        url_t=a['href']
        image_url = a.find('img')['data-src']
        video_type=  div_t.find_all('a')[1].get_text().strip()
    #     print(video_type)
        # print(name,url,image_url)
#         保存到数据库
        print(name,url_t,image_url,video_type)
        sql.insert_data('video', name,url_t,image_url,video_type)
#     获取分页
# aria-current="page"
#     print()
    max = cur
    for a_tmp in soup.find('span',class_="relative z-0 inline-flex shadow-sm").find_all('a'):
        # print(a_tmp['href'])
        href = a_tmp['href']
        max_t =int( href.split("=")[1])
        if max_t > max:
            max = max_t
    if cur < max:
        cur += 1
        url = url.split("=")[0]+"="+str(cur)
    else:
        import sys
        sys.exit(0)
    # time.sleep(1)
    try:
        get_page(url)
    except Exception as e:
        time.sleep(30)
        print("报错：",e)
        get_page(url)
from missav.database import SqliteClass
# max=1
sql = SqliteClass("missav")
sql.create_table("video","(name varchar(100) unique,url varchar(200),image_url varchar(100),video_type varchar(100) )")
# sql.create_table("video_detail","(id int auto_increment primary_key,name varchar(100),url varchar(200),video_url varchar(100),video_name varchar(100))")

url = "https://missav.com/dm548/cn/uncensored-leak?page=1"
get_page(url)

# sql.cursor.execute("select * from video;")
# values = sql.cursor.fetchall()
# print(values)
# print(len(values))
# for t in values:
#     print(t)

# sql.cursor.execute("drop table video;")
# sql.cursor.execute("drop table video_detail;")
# print(sql.cursor.rowcount)