import subprocess
from functools import partial
subprocess.Popen = partial(subprocess.Popen, encoding='utf-8')
import hashlib

import execjs, os, requests
from bs4 import BeautifulSoup
import execjs,os,requests
import m3u8
requests.packages.urllib3.disable_warnings()
import ssl
import time
# ssl._create_default_https_context = ssl._create_stdlib_context
import urllib.request
from urllib.request import urlopen
import ssl
import json
ssl._create_default_https_context = ssl._create_unverified_context

proxies = {'http': 'http://localhost:7897', 'https': 'http://localhost:7897'}
import httplib2
httplib2.HTTPConnectionWithTimeout._http_vsn=10
httplib2.HTTPConnectionWithTimeout._http_vsn_str = 'HTTP/1.0'


import sys
print(sys.getrecursionlimit())
sys.setrecursionlimit(65530)

import os

import platform

requests.DEFAULT_RETRIES = 100

def get_headers():
    # with open("1.js", 'r', encoding='utf-8') as f:
    #     data = f.read()
    # os.environ["EXECJS_RUNTIME"] = "Node"
    # print(execjs.get().name)
    # # 执行js脚本，获取返回值 url
    # context1 = execjs.compile(data)  # Node.js (V8)
    # user_uuid = context1.call("generateUUID2")
    # print(user_uuid)

    headers = {
        # "Cookie": "user_uuid=" + str(user_uuid),
        # "Accept":"*/*",
        # "Accept-Encoding":"gzip, deflate, br, zstd",
        # "Accept-Language":"zh-CN,zh;q=0.9",
        "Dnt": "1",
        "Referer":"https://www.fofoyy.com",
        "Priority":"u=1, i",
        "Sec-Ch-Ua": '"Chromium";v="124", "Google Chrome";v="124", "Not-A.Brand";v="99"',
        "Sec-Ch-Ua-Mobile": "?0",
        "Sec-Ch-Ua-Platform": "Windows",
        "Sec-Fetch-Dest": "empty",
        "Sec-Fetch-Mode": "cors",
        "Sec-Fetch-Site": "cross-site",
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/123.0.0.0 Safari/537.36",

    }
    return headers

headers = get_headers()

from missav.proxyIPS import get_proxy


def get(url):
    try:
        # proxies = get_proxy()
        if url is not None:
            print("get---msg",url)
            session = requests.Session()
            # return session.get(url, headers=headers, timeout=10, verify=False)


            return session.get(url, headers=headers, proxies=proxies, timeout=100, verify=False)
    except Exception as e:
        print("get---msg \t",e)
        if "NameResolutionError" in e:
            pass
        else:
            get(url)

session = requests.Session()

def download_m3u8_video(url,file_path):
    # 通过请求xx.m3u8文件，获取视频ts的url,下载ts文件到一个文件中
    print("download_m3u8_video",url)
    r = get(url)
    print("status_code",r.status_code)
    if r.status_code != 200:
        print('m3u8视频下载链接无效')
        return False

    m3u8_list = r.text.split('\n')
    print("m3u8_list ",m3u8_list)
    if "http" in m3u8_list and "ts" in m3u8_list:
        m3u8_list = [i for i in m3u8_list if i and i[0] != '#']
    else:
        # m3u8_url_t = url[:url.index("video")]
        # m3u8_url_t = url.split('/')[:-1]
        # m3u8_url_t =  "".join(m3u8_url_t)
        # print("m3u8_url_t",m3u8_url_t)
        i = url.rindex('/')
        m3u8_url_t = url[:i]
        print("m3u8_url_t", m3u8_url_t) #  https://v.cdnlz16.com/20230911/38410_55a88c1c/2000k/hls
        m3u8_list = [ m3u8_url_t+"/"+i for i in m3u8_list if i and i[0] != '#']

    print(m3u8_list)
    break_url = 0
    # from missav.proxyIPS import  get_proxy

    while True:
        try:
            with open(file_path, 'ab') as f:
                for ts_url in m3u8_list[break_url:]:
                    print("正在下载：\t"+file_path + ts_url)
                    print("正在下载：\t" + ts_url)
                    break_url = m3u8_list.index(ts_url)
                    print("break_url", break_url)
                    # proxies = get_proxy()
                    # print("proxies", proxies)
                    with session.get(ts_url, headers=headers, verify=False,stream=True,timeout=100) as r:
                        print("status_code",r.status_code)
                        if r.status_code == 200:
                            # print(r.text)
                            f.write(r.content)
                            # raise Exception
                            # os.sync()
                        else:
                            raise Exception

        except Exception as e:
            # time.sleep(10)
            print("break_url",e)
            if "No space left on device" in str(e):
                os._exit(os.EX_OK)
            continue
        break
    print('m3u8视频下载完成')
    return True


# download_m3u8_video(m3u8_url) #['video0.jpeg', 'video1.jpeg', 'video2.jpeg', 'video3.jpeg'
# 'https://surrit.com/7dab4666-5249-4949-9709-312195fd9e1d/1280x720/video0.jpeg',

def convert_ts_to_mp4(ts_file_path, mp4_file_path):
    # 将ts文件转为mp4文件
    os.system(f'ffmpeg -i {ts_file_path} -c copy {mp4_file_path}')


def down_all(url,filename):
    # url = "https://v.cdnlz16.com/20230911/38410_55a88c1c/2000k/hls/mixed.m3u8"

    # filename = "1"
    # 将中文名称改为16禁止
    # import base64
    # import binascii
    # filename = base64.b64encode(filename.encode())
    # hex_string = binascii.b2a_hex(filename).decode()
    # filename = hex_string
    filename=filename.strip()
    if platform.system() == "Windows":  # Windows Linux
        ts_file_path = r'E:\1\BaiduSyncdisk'
        mp4_file_path = r'E:\1\video'
    else:
        ts_file_path = r'/video'
        mp4_file_path = r'/video'
    ts_filename = filename + ".ts"
    mp4_filename = filename + ".mp4"


    if mp4_filename in os.listdir(mp4_file_path):
        pass
    else:
        # mp4_filename_ori =
        ts_file_path = os.path.join(ts_file_path, ts_filename)
        print("ts_file_path", ts_file_path)
        mp4_file_path = os.path.join(mp4_file_path, mp4_filename)
        print("mp4_filename", mp4_file_path)
        # download_m3u8_video
        print("download_m3u8_video--url",url)
        flag = download_m3u8_video(url, ts_file_path)
        if flag == False:
            pass
        else:

            # 转换ts文件为mp4
            convert_ts_to_mp4(ts_file_path, mp4_file_path)
            time.sleep(100)

            # 删除ts文件
            from missav.test_file import rm
            rm(path=ts_file_path)


def get_page_text(url):
    return requests.get(url,headers=headers,verify=False).content.decode("UTF-8")
def get_page_text2(url):
    return requests.get(url,verify=False)
def get_page_content(url):
    return requests.get(url, verify=False).content
def post_page_text(id):
    url = "https://www.fofoyy.com/source/"
    myobj = {'id': id}
    return requests.post(url,verify=False,data=myobj,headers=headers,timeout=100)

def parse_bs4(data):
    soup = BeautifulSoup(data)
    # filename
    filename = soup.find_all("header",class_="product-header")[0].find_all('h1')[0].get_text().split('(')[0]

    print("--------")

    script_list = soup.find_all("script")
    count =0
    for s in script_list:
        content = s.get_text()
        if "urlList" in content :
            start = content.index("function decryptDict(")
            start2 = content.index("var urlList")
            end = content.index("$(document).ready(function()")
            print(start)
            print(end)
            print("count===",count)
            return  content[start:end],filename
        count += 1
        # print("count===", count)
def down(url):
    res = get(url)
    data2, filename = parse_bs4(res.text)
    print("filename", filename)


    os.environ["EXECJS_RUNTIME"] = "Node"
    ctx = execjs.compile(data2)
    res = ctx.eval("urlList")
    print(res['source'])
    print(res['url_list'][0][0]['sid'])
    print("url_list---1",res['url_list'])
    url_data = []
    for urllist in res['url_list']:
        try:
            # print()
            id = urllist[0]['sid']
            res = post_page_text(id)
            # print(res.status_code)
            print(res.text)
            url_data.append(res.text)
            print("url_res",res.text)
            down12345(res.text,filename)
        except Exception as e :
            print("index.m3u8 error",e)
    print("url_data---1",url_data)
    # url_sssss=""
    #
    # for d in url_data:
    #     try:
    #
    #         if "index.m3u8" not  in d:
    #             continue
    #         print("ddd", d)
    #         res = get(d)
    #         print("status_code_ddddddddd",res.status_code)
    #         if res.status_code == 200:
    #             #  m3u8_list = [ m3u8_url_t+i for i in m3u8_list if i and i[0] != '#']
    #             # https://v.cdnlz16.com/20230911/38410_55a88c1c/index.m3u8
    #             # https://v.cdnlz16.com/20230911/38410_55a88c1c/2000k/hls/mixed.m3u8
    #             res_content = res.text
    #             print("200---------",res_content)
    #             # if "ts" in res_content and "cdn" in res_content:
    #             #     # https://cdn.kin6c1.com/fvod/0f4e9db31f223b48c0d95ce542911f99b044a72f5a9e6a3ba6e9d57e160b9daed7543c63f86c8a936ad0e8246b3b116b0ff0151d7fc97e8b81a3b21989f41d2e095b1f3de7f9c6f776a1d497c75c13905a02b9aba79d716c.ts
    #             #     # url = [i for i in res.text.split('\n') if i and i[0] != '#'][0]
    #             #     # url_lists.append(d)
    #             #     url =d
    #             #     break
    #             # if ".m3u8" in res_content:
    #             url1 = d[:d.index('index')]
    #             url_sssss = [url1 + i for i in res.text.split('\n') if i and i[0] != '#'][0]
    #             print("url1111111111", url_sssss)
    #
    #             break
    #                 # url_lists.append(url)
    #     except Exception as e:
    #         print("url2222222222",e)
    #         continue
    #
    # if url_sssss == "":
    #     return  False
    # else:
    #     print("url111333333333331111111", url_sssss)
    #     # for url in url_lists:
    #     #     try:
    #     down_all(url_sssss, filename)
    # #     except Exception as e:
    # #         print(e)
    # #         continue
    # #     break

def down12345(d,filename):
    url_sssss = ""

    # for d in url_data:
    try:

        if "index.m3u8" not in d:
            pass
        print("ddd", d)
        res = get(d)
        print("status_code_ddddddddd 4", res.status_code,d)
        if res.status_code == 200:
            res_content = res.text
            print("200---------", res_content)
            url1 = d[:d.index('index')]
            url_sssss = [url1 + i for i in res.text.split('\n') if i and i[0] != '#'][0]
            print("url1111111111", url_sssss)
            down_all(url_sssss, filename)
        else:
            raise  Exception

            # url_lists.append(url)
    except Exception as e:
        print("url2222222222", e,url_sssss)
        raise e

    # if url_sssss == "":
    #     return False
    # else:
    #     print("url111333333333331111111", url_sssss)
    #     # for url in url_lists:
    #     #     try:
    #     down_all(url_sssss, filename)
    #     except Exception as e:
    #         print(e)
    #         continue
    #     break

if __name__ == '__main__':
    pass
    # while True:
    #     try:
    #         download_all()
    #     except Exception as e:
    #         print("main",e)
    #         continue
    #     break
    # url ="https://www.fofoyy.com/dianying/42025"
    # url = "https://www.fofoyy.com/dianying/39482"
    # url = "https://www.fofoyy.com/dianying/62725"
    # url = "https://www.fofoyy.com/dianying/36257" # 红心女王
    # url = "https://www.fofoyy.com/dianying/10392" # 性告解
    # url = "https://www.fofoyy.com/dianying/32511" # 露露情史
    # # url = "https://www.fofoyy.com/dianying/45428" # 本能2
    # url = "https://www.fofoyy.com/dianying/32713" # 露西亚的情人
    # url = "https://www.fofoyy.com/dianying/28338" # 露露会说话的屁眼
    # # base = r'E:\1\BaiduSyncdisk'
    # # ts_file_path = os.path.join(base,"11111.ts")
    # # mp4_file_path = os.path.join(base,"11111.mp4")
    # # convert_ts_to_mp4(ts_file_path, mp4_file_path)
    # flag = down(url)
    # if flag is False:
    #     print("down 该连接不能下载")
    # base = r'E:\1\BaiduSyncdisk'
    # base2 = r'E:\1\video'
    # ts_file_path = os.path.join(base, "露露会说话的屁眼.ts")
    # mp4_file_path = os.path.join(base2, "露露会说话的屁眼.mp4")
    #
    # convert_ts_to_mp4(ts_file_path, mp4_file_path)