import json
import requests
import sys
import urllib3
from lxml import etree
from concurrent.futures import ThreadPoolExecutor

urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)


# 爬取的地址只能看，不能下载

class PianbaSpider(object):
    def __init__(self, num):
        self.num = num
        self.base_url = 'https://www.pianba.tv'
        self.url = "{}/html/{}".format(self.base_url, num)
        self.headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36"}
        # self.real_base_url = 'https://danmu.8old.cn/vip/?url='
        # pianba.tv/bofang/?url =https: // mgtv - com.jjyl12349.com / 20211013 / o2727F3p / index.m3u8
        self.real_base_url = 'https://www.pianba.tv/bofang/?url='
        # self.real_base_url = 'https://dmjx.m3u8.tv/?url'

    def parse_url(self, url):
        resp = requests.get(url, headers=self.headers, verify=False)
        return resp.content.decode()

    def get_url_list(self, html_str):
        url_list = []
        html = etree.HTML(html_str)
        name = html.xpath("//h1[@class='title']/text()")[0]
        ul = html.xpath(".//span[@class='pull-right1'][1]/following-sibling::*[2]")[0]
        li_list = ul.xpath(".//li")
        for li in li_list:
            url = li.xpath(".//a/@href")
            url = url[0] if len(url) > 0 else None
            title_name = li.xpath(".//a/text()")
            title_name = title_name[0] if len(title_name) > 0 else None
            url_list.append({'name': title_name, 'url': self.base_url + url})
        return name, url_list

    def get_detail_url(self, html_str):
        html = etree.HTML(html_str)
        script_content = html.xpath(
            "//div[@class='stui-player__video embed-responsive embed-responsive-16by9 clearfix']/script[1]/text()")
        script_content = script_content[0] if script_content else ''
        script_content = script_content.split('{', 2)
        script_content = '{' + script_content[-1]
        script_content = json.loads(script_content)
        url = script_content.get('url')
        url = self.real_base_url + url if url else ''
        return url

    def save_content_list(self, file_name, content_list):
        with open(file_name + '.txt', 'w', encoding='utf-8') as f:
            f.write('ID：' + str(self.num) + '\n')
            for item in content_list:
                content = item['name'] + '：' + item['url']
                f.write(json.dumps(content, ensure_ascii=False, indent=2).replace("\"", ''))
                f.write('\n')

    def get_detail_content(self, item):
        html_str = self.parse_url(item['url'])
        real_url = self.get_detail_url(html_str)
        return {'name': item['name'], 'url': real_url}

    def run(self):
        url = self.url
        html_str = self.parse_url(url)
        try:
            file_name, url_list = self.get_url_list(html_str)
        except:
            exit('不存在')
        with ThreadPoolExecutor(max_workers=10) as executor:
            req = [executor.submit(self.get_detail_content, item) for item in url_list]
            content_list = list(i.result() for i in req)
        self.save_content_list(file_name, content_list)


if __name__ == '__main__':
    id = sys.argv[1]
    spider = PianbaSpider(id)
    spider.run()
