# -*- coding: utf-8 -*-
# @#Time : 2021/8/23 19:04

# @File : itv_m3u_spider.py
# @Software : PyCharm
import re
import time
from urllib.parse import urlparse

import execjs
import requests
from bs4 import BeautifulSoup

import schedule_tool


def run_js(js_script):
    with open('decode.js', 'r',encoding='utf-8') as f:
        decode_js = f.read()
        # print(decode_js + str(js_script))
    return execjs.compile(decode_js + str(js_script))


def channel_link_parser(some_str):
    pattern = re.compile(r'href="(.*)"')
    return re.findall(pattern, some_str)


def channel_name_parser(channel_name_str):
    pattern = re.compile(r'">(.*)</a>')
    return re.findall(pattern, channel_name_str)


def build_m3u_data(logo: str, category: str, channel_name: str, play_url: str):
    channel_data = '#EXTINF:-1, tvg-logo="{}", group-title="{}", {}\n{}\n'.format(logo, category, channel_name,
                                                                                  play_url)
    return channel_data


def find_parmas_js(pattern, mystring_list):
    for item in mystring_list:
        # print(type(item))
        search_result = re.search(pattern, str(item))
        if search_result is not None:
            return re.findall(pattern, str(item))


class Spider:
    def __init__(self, header, url, category, blacklist):
        self.blacklist = blacklist
        self.header = header
        self.prehost = '{probuf.scheme}://{host.netloc}/'.format(probuf=urlparse(url), host=urlparse(url))
        self.channel_list_url = url
        self.category = category
        self.secc = requests.Session()

    def bloccker(self, item):
        """
        :rtype: int
        """
        counter = 0
        for block in self.blacklist:
            if item.find(block) != -1:
                counter = -1
                # print(item)
            else:
                continue
        # print(counter)
        return counter

    # def get_channel_list_soup(self):
    #     response = self.secc.get(url=self.channel_list_url, headers=self.header)
    #     soup = BeautifulSoup(response.text, features="html.parser")
    #
    #     return soup

    def parse_channel_list(self):

        response = self.secc.get(url=self.channel_list_url, headers=self.header).text
        soup = BeautifulSoup(response, "html.parser")

        # patternn = re.compile(r'<a .*</a>')

        out = soup.find_all('a', {"data-ajax": "false"})

        data = []
        for channel in out:
            channel = str(channel)
            # print(channel_link_parser(channel)[0])
            channel_link = self.prehost + channel_link_parser(channel)[0].replace('&amp;', '&')

            channel_name = channel_name_parser(channel)[0]
            f_channel_url = self.parse_channel(channel_link)
            logo = ''

            if f_channel_url != -1:
                data.append(build_m3u_data(logo, self.category, channel_name, f_channel_url))
            else:

                continue
            time.sleep(0.3)

        return data

    def get_channel_real_url(self, channel_link):
        print(channel_link)
        try:
            channel_url_response = self.secc.get(channel_link, stream=True, timeout=5, allow_redirects=False)
            location = channel_url_response.headers.get('Location', default=-1)
            if location == -1:
                if int(channel_url_response.headers['Content-Length']) <= 20:
                    return -1
                else:
                    print('成功')
                    return channel_link
            else:
                time.sleep(0.2)
                return self.get_channel_real_url(location)

        except Exception as e:
            print(e)
            return -1

    # def check_channel_url(self,url):
    #     try:
    #         response = self.secc.get(url, timeout=5, stream=True,allow_redirects=False)
    #         if int(response.headers[
    #                    'Content-Length']) <= 20:
    #
    #             print('失败')
    #             return -1
    #         else:
    #             if response.status_code == 302:
    #                 return 302
    #             else:
    #                 print('成功')
    #                 return 0
    #     except Exception as e:
    #         print(e)
    #         return -1

    def parse_channel(self, url):

        htmlstr = self.secc.get(url=url, headers=self.header).text

        soup = BeautifulSoup(htmlstr, "html.parser")

        # 提取js代码的解码js代码

        pattern = re.compile(r'<script> (.*) document.write.*')

        # print('hello')
        pre_channel_params_js = find_parmas_js(pattern, soup.find_all('script'))[0]

        # 运行解码js
        pre0_js_result = run_js(pre_channel_params_js)
        # 得到运行结果中包含js代码的变量
        var_pattern = re.compile(r'; (.{3})=bdecodeb\(')
        parmas_var_js = re.findall(var_pattern, pre_channel_params_js)[0]
        # 得到所需的包含js的代码
        pre1_js_result = pre0_js_result.eval(parmas_var_js)
        # 去除html相关代码,得到关键代码
        channel_params_js_pattern = re.compile(r'<script>(.*)</script>')
        channel_params_js = re.findall(channel_params_js_pattern, pre1_js_result)[0]

        # 获取未解密的播放链接
        encrypted_url = soup.find_all('option')

        for channel_option_url in encrypted_url:

            # print(channel_option_url['value'])
            # 拼接新js并运行
            url = run_js(channel_params_js).call('startPlayer', channel_option_url['value'])
            # print(url)
            real_channel_url = self.get_channel_real_url(url)
            time.sleep(0.2)
            if real_channel_url != -1:
                if self.bloccker(real_channel_url) == 0:
                    print(real_channel_url)
                    return real_channel_url

        return -1

        # if self.secc.get(real_channel_url, timeout=5).status_code != 200:
        #     print('失败')
        #     url = ''
        # else:
        #     print("成功")
        #     url = real_channel_url
        #     break

    def get_js_code(self):
        pass

    def get_parmas(self):
        pass

    def decode_channel_source(self):
        pass

    def get_channel_data(self):
        pass


def quick_start():
    header = {
        'User-Agent': 'Mozilla/5.0 (Linux; Android 6.0.1; Moto G (4)) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.164 Mobile Safari/537.36'

    }
    blacklist = ['live.cache.cmvideo.cn', 'migu.php']
    ys_url = 'http://m.iptv222.com/?tid=ys'
    ys_category = '央视频道'
    ys_start = Spider(header, ys_url, ys_category, blacklist)

    ws_url = 'http://m.iptv222.com/?tid=ws'
    ws_category = '地方卫视'
    ws_start = Spider(header, ws_url, ws_category, blacklist)

    data = []
    data.extend(ys_start.parse_channel_list())
    data.extend(ws_start.parse_channel_list())

    schedule_tool.schedule_process('multi.m3u', data)


if __name__ == '__main__':
    time_start = time.time()  # 记录开始时间
    # function()   执行的程序

    quick_start()

    time_end = time.time()  # 记录结束时间
    time_sum = time_end - time_start  # 计算的时间差为程序的执行时间，单位为秒/s
    print(time_sum)
