"""
定向网站爬虫，视频URL获取！
获取千峰网站的视频资源目录，保存其视频对应的URL连接地址到txt文档.
"""

import requests
from bs4 import BeautifulSoup
import bs4
import re


def get_one_page(url):
    # 获取网页.
    headers = {
        'Connection': 'keep-alive',
        'Cache-Control': 'max-age=0',
        'Upgrade-Insecure-Requests': '1',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.106 Safari/537.36',
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
        'Accept-Language': 'zh-CN,zh;q=0.9',
    }
    try:
        response = requests.get(url, headers=headers, verify=False, timeout=30)
        response.raise_for_status()
        response.encoding = response.apparent_encoding
        return response.text
    except:
        print("error!")
        return ""


def parse_one_page(ulist, html):
    # 获取视频连接列表
    soup = BeautifulSoup(html, 'html.parser')
    for tr in soup.find_all('a', re.compile('fr')):
        if isinstance(tr, bs4.element.Tag):
            if tr.get('data-url'):
                ulist.append(tr.get('data-url')+'\n')


def createFiles(fileName, ulist):
    # 将视频名字的公共部分作为保存连接的文本文件名称.
    fileName = ""
    if len(ulist) < 2:
        fileName = "fileMenu.txt"
    else:
        s1 = ulist[0].split('/')[-1]
        s2 = ulist[1].split('/')[-1]
        fileName = "".join([s1[i] for i in range(len(s1))
                            if s1[i] == s2[i]]) + "合集.txt"
    try:
        with open(fileName, "w") as fo:
            print("保存URL到文件:", fo.name)
            fo.writelines(ulist)
    except:
        print("write file failed!")
        return "error"


def main():
    uinfo = []
    fileName = ""
    # 可以将具体的网页地址替换掉下面的地址.
    url = 'http://video.mobiletrain.org/Course/index/courseId/598'
    html = get_one_page(url)
    parse_one_page(uinfo, html)
    # getFileName(fileName,html)
    createFiles(fileName, uinfo)


if __name__ == '__main__':
    main()
