from urllib import request
from urllib.parse import urlencode,urljoin,unquote,urlparse,parse_qs
import json
from bs4 import BeautifulSoup
import base64
headers = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36"
}


def get_html(url):
    """
    获取 html
    :param url:
    :return:
    """
    req = request.Request(url, headers=headers)
    res = request.urlopen(req)
    return res.read().decode("gbk")


def search_movie_by_name(movie_name):
    search_url = "http://www.76mao.com/Search.asp"
    data = {
        "keyword":movie_name.encode("gbk"),
        "Submit":u'搜索'.encode('gbk')
    }
    data = urlencode(data).encode()
    req = request.Request(search_url,headers=headers,data=data)
    res = request.urlopen(req)
    soup = BeautifulSoup(res.read().decode("gbk"),"lxml")
    movie_dict = {i.text :urljoin(search_url,i.get("href")) for i in soup.find_all("a","mvname")}
    next_url = "http://www.76mao.com/{}".format(soup.find("div", class_="pager").find_all("a")[-2].get("href"))
    url_set = set()
    while next_url not in url_set:
        if parse_qs(urlparse(next_url).query).get("Page")[0]!=1:
            soup = BeautifulSoup(get_html(next_url),"lxml")
            movie_dict.update({i.text: urljoin(search_url, i.get("href")) for i in soup.find_all("a", "mvname")})
        url_set.add(next_url)
    if movie_dict:
        return movie_dict
    else:
        return {}


def get_movie_videos(url):
    """
    获取视频title以及链接
    :param url:
    :return: [{'BD日语': 'http://www.76mao.com/Play/1-45167-1-2.Html', 'BD国语': 'http://www.76mao.com/Play/1-45167-2-2.Html'}, {'BD日语': 'http://www.76mao.com/Play/2-45167-1-1.Html'}, {'BD日语': 'http://www.76mao.com/Play/3-45167-1-2.Html', 'BD国语': 'http://www.76mao.com/Play/3-45167-2-2.Html'}]
    """
    html = get_html(url)
    soup = BeautifulSoup(html,"lxml")
    video_url_list = []
    video_url_soup = soup.find_all("div",class_="list playlist")
    for i in video_url_soup:
        if i.previous_element!=")":
            link_dict = dict()
            links = i.find_all("a") # 获取 所有 a 标签 可能是一个可能是多个
            for link in links:
                href = urljoin(url,link.get("href").split("('")[1].split("',")[0])
                title = link.text
                link_dict[title] = href
            video_url_list.append(link_dict)
    return video_url_list


def get_video_vid(url):
    video_id = ""
    html = get_html(url)
    soup = BeautifulSoup(html,"lxml")
    video = soup.find("div",class_="MacPlayer")
    if video:
        script = video.script.text
        script = script.split('("')[1].split('"))')[0]
        script = base64.b64decode(script).decode()
        video_soup = BeautifulSoup(unquote(script),"lxml")
        video_id = parse_qs(urlparse(video_soup.iframe.get("src")).query).get("id")[0]
    return video_id


def get_m3u8_url(vid):
    url = "http://cdn.76long.com/cs1/url.php"
    data = {
        "id":vid
    }
    data = urlencode(data).encode()
    req = request.Request(url,headers=headers,data=data)
    res = request.urlopen(req)
    data = json.loads(res.read().decode("gbk"))
    print(data)
    # url "m3u8"
    return unquote(data.get("url")),data.get("ext")
