#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# File Name: get_movie_url.py
# Author: bobo
# Created Time: 20231222

# get_movie_url.py
""" 爬虫影片脚本
Usage:
    get_movie_url.py [--url=<url_path>] [--idx=<idx>]
    get_movie_url.py (-h | --help)
    get_movie_url.py --version

Options:
    -h --help  显示帮助信息
    --version  显示版本信息
    --url=<url>  下载地址
    --idx=<idx>  索引，默认为第1个
"""
import re

import requests
from bs4 import BeautifulSoup as BS
from docopt import docopt
from utils import search_protocol_and_domain, slash_unicode_to_chs


def haohan_find_movie(_url):
    """
    浩瀚影院爬虫
    """
    req = requests.get(_url, timeout=5)
    html = req.text
    # print(html)
    soup = BS(html, "html.parser")
    # title = soup.findAll('script')#('td', {'id': 'playleft'})
    script_content = soup.findAll("script", string=re.compile(".*m3u8.*"))
    # print(type(script_content[0].text))
    p = re.compile('"url":"(?P<url>.*m3u8)","url_next"')
    res = p.search(script_content[0].text)
    url = slash_unicode_to_chs(res.group("url"))
    # print(url)
    return url


def haohan_find_movie_list(_url, index=1):
    """
    获取浩瀚影片列表
    """
    pos = _url.find("/", _url.find("//") + 2)
    base_url = _url[:pos]

    req = requests.get(_url, timeout=5)
    html = req.text
    # print(html)
    soup = BS(html, "html.parser")
    title = soup.find("span", class_="name").text
    # print(title)

    links = soup.find(class_="ulli clearfix")
    # print(len(links))
    links = links.find("div").find("ul").find_all("li")
    for link in links:
        node = link.find("a")
        href = node["href"]
        name = node.text
        movie_url = f"{base_url}{href}"
        m3u8 = haohan_find_movie(movie_url)
        print(f"{title}.{name},{m3u8}")


def mianfei_find_movie(_url):
    """
    免费影院爬虫
    """
    req = requests.get(_url, timeout=5, headers=HEADERS)
    html = req.text
    # print(html)
    soup = BS(html, "html.parser")
    # title = soup.findAll('script')#('td', {'id': 'playleft'})
    script_content = soup.findAll("script", string=re.compile(".*m3u8.*"))
    # print(type(script_content[0].text))
    p = re.compile('"url":"(?P<url>.*m3u8)","url_next"')
    res = p.search(script_content[0].text)
    url = slash_unicode_to_chs(res.group("url"))
    # print(url)
    return url


def mianfei_find_movie_list(_url, index=1):
    """
    获取免费影片列表
    """
    pos = _url.find("/", _url.find("//") + 2)
    base_url = _url[:pos]
    # print("url:", base_url)

    req = requests.get(_url, timeout=5, headers=HEADERS)
    html = req.text
    # print(html)
    soup = BS(html, "html.parser")

    #  <h1>少年派</h1>
    title = soup.find("h1").text

    ####
    """
    <ul class="ulli clearfix">
      <div id="stab_11" class="stab_list">
        <ul class="clearfix">
          <li><a href="/vodplay/19269-4-1.html" target="_self">第01集</a></li>
          <li><a href="/vodplay/19269-4-2.html" target="_self">第02集</a></li>
        </ul>
      </div>
    </ul>
    """
    ####
    # src_cloud = soup.find(class_="ulli clearfix") # 默认取第一个
    all_src_cloud = soup.find_all(class_="ulli clearfix", limit=index)
    src_cloud = all_src_cloud[-1]
    # print("src:", src_cloud)
    # return

    links = src_cloud.find_all("li")
    for link in links:
        node = link.find("a")
        href = node["href"]
        name = node.text
        movie_url = f"{base_url}{href}"
        # print(movie_url)
        m3u8 = mianfei_find_movie(movie_url)
        print(f"{title}.{name},{m3u8}")


# http://www.tjnhswine.com.cn/voddetail/fengweirenjiandierji.html
# https://www.sndpx.com/tx/19269.html
MOVIE_URL = {
    "www.tjnhswine.com.cn": haohan_find_movie_list,
    "www.qdxinmei.com": haohan_find_movie_list,
    "www.sndpx.com": mianfei_find_movie_list,
}

HEADERS = {
    "User-Agent":
    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0.3 Safari/605.1.15"
}


def main(url, index=0):
    """
    主函数
    """
    try:
        print(f"#解析地址:{url}")
        (found, protocol, domain) = search_protocol_and_domain(url)
        print(f"#结果：{found}，协议：{protocol}，域名：{domain}")
        if not found:
            print("地址错误")
            return None
        fun_search = MOVIE_URL.get(domain, None)
        if not fun_search:
            print("当前地址还不支持搜索")
            return None
        fun_search(url, index)
    except Exception as ex:
        print(f"出现异常:{ex}")
    return None


if __name__ == "__main__":
    arguments = docopt(__doc__, version="get_movie_url 2.0")
    if _url := arguments.get("--url"):
        # print(_url)
        idx = arguments.get("--idx")
        idx = int(idx) if idx else 1
        main(_url, idx)
    else:
        print(__doc__)
    # if len(sys.argv) != 2: print("用法：./get_movie_url.py url")
    # main(sys.argv[1])
