from missav.test_redis import RedisConn
import subprocess
from functools import partial
subprocess.Popen = partial(subprocess.Popen, encoding='utf-8')
import hashlib

import execjs, os, requests
from bs4 import BeautifulSoup
import execjs,os,requests
import m3u8
requests.packages.urllib3.disable_warnings()
import ssl
import time
# ssl._create_default_https_context = ssl._create_stdlib_context
import urllib.request
from urllib.request import urlopen
import ssl
import json
ssl._create_default_https_context = ssl._create_unverified_context


import httplib2
httplib2.HTTPConnectionWithTimeout._http_vsn=10
httplib2.HTTPConnectionWithTimeout._http_vsn_str = 'HTTP/1.0'


import sys
print(sys.getrecursionlimit())
sys.setrecursionlimit(65530)

import os

import platform

requests.DEFAULT_RETRIES = 100

r = RedisConn()

# pool = redis.ConnectionPool(host='localhost', port=6379,password="lzjasdqq", decode_responses=True)
# r = redis.Redis(host='124.71.207.224', port=6379, password="lzjasdqq", db=0, decode_responses=True,
#                 socket_timeout=1000,
#                 socket_connect_timeout=1000)

def down_video(url_t):
   for i in range(3):
       try:
           from missav.get_m3u8_3 import down
           print("movie_url", url_t)
           down(url_t)
       except Exception as e:
           print("down video \t", e)
           continue
       else:
           print("下载完成 down_video")
           break


#
def get_all_url(url):


    res = requests.get(url,verify=False)
    if res.status_code != 200:
        print("请求失败")

    data = res.text

    soup = BeautifulSoup(data)
    # for div in soup.find_all("div",class_="lists-content"):
    #     print("ul" in div)


    # print(soup.find_all("div",class_="lists-content")[1].find_all('li')[0].find('h2'))
    url_list_movie=[]
    for li in soup.find_all("div",class_="lists-content")[1].find_all('li'):
        url_t = "https://www.fofoyy.com"+li.find('h2').find('a')['href']
        print(url_t)
        print(li.find('h2').get_text())
        # from missav.get_m3u8_3 import down
        # print("movie_url",url_t)
        # down(url_t)
        # down_video(url_t)
        # url_list_movie.append(url_t)
        r.add("movie_detail1",url_t)

    # import concurrent.futures
    # with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
    #     executor.map(down_video, url_list_movie)
    #     翻页
    for li in soup.find('div',class_='pagination pagination-multi').find_all('li'):
        # print("https://www.fofoyy.com"+li.find('a')['href'])
        # if li.find('a'):
        #     print(li.find('a')['href'])
        # print()
        if "下一页" in li.get_text():
            # print(li.find('a')['href'])
            url_t = "https://www.fofoyy.com" + li.find('a')['href']
            print("页数：",url_t)
            get_all_url(url_t)
            break

    print()
# url = "https://www.fofoyy.com/dianying/21-0-0-0"
# get_all_url(url)


def get_other_category_page():
    url = "https://www.fofoyy.com/dianying/21-7-0-0"
    res = requests.get(url, verify=False)
    if res.status_code != 200:
        print("请求失败")

    data = res.text

    soup = BeautifulSoup(data)
    url_list =[]
    for a in soup.find('div',class_='lists-content filter').find_all('dd')[1].find_all('a'):
        url_t = "https://www.fofoyy.com" + a['href']
        data = ["全部","中国大陆", "香港", "日本","韩国"]
        # for t in data:
        if    a.get_text()  in data :
            # print("-----",a.get_text())
            pass
        else:
            # print("---222--",a.get_text())
            url_list.append(url_t)
    return url_list
url_list = get_other_category_page()

print("url_list---start  ",url_list)
for url in url_list:
    get_all_url(url)

