"""
定向网站爬虫，视频URL获取！
获取千峰网站的视频资源目录，保存其视频对应的URL连接地址到txt文档.
"""

import requests
from bs4 import BeautifulSoup
import bs4
import re


def get_page(url):
    # 获取网页.
    headers = {
        'Connection': 'keep-alive',
        'Cache-Control': 'max-age=0',
        'Upgrade-Insecure-Requests': '1',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.106 Safari/537.36',
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
        'Accept-Language': 'zh-CN,zh;q=0.9',
    }
    try:
        response = requests.get(url, headers=headers, verify=False, timeout=30)
        response.raise_for_status()
        response.encoding = response.apparent_encoding
        return response.text
    except:
        print("error!")
        return ""


def parseUrl_to_list(total_url, local_url, html):
    # 获取视频连接到列表.
    soup = BeautifulSoup(html, 'html.parser')
    for page_url in soup.find_all('a', re.compile('fr')):
        if isinstance(page_url, bs4.element.Tag):
            if page_url.get('data-url'):
                total_url.append(page_url.get('data-url')+'\n')
                local_url.append(page_url.get('data-url')+'\n')


def get_file_name(fileName, html):
    # 将解析的主题资源名称作为保存连接文档的文件名.
    soup = BeautifulSoup(html, 'html.parser')
    for tr in soup.find_all('div', re.compile('bread')):
        if isinstance(tr, bs4.element.Tag):
            titles = tr.find_all('a')
            if titles[-1]:
                fileName.append(titles[-1].string + '.txt')


def write_urlist_to_files(fileName, ulist):
    if len(fileName) < 1:
        print("Don't get fileName!")
    else:
        try:
            with open(fileName[-1], "w") as fo:
                print("保存URL到文件:", fo.name)
                fo.writelines(ulist)
        except:
            print("write file failed!")


def build_url(base_url, num_start, num_end):
    index_list = []
    num = num_end - num_start
    for index in range(num):
        index_list.append(base_url + str(index + num_start))
    return index_list


def main():
    # 手动修改范围，可以实现遍历所有网页的数据.
    total_url = []
    local_url = []
    total_file_name = []
    base_url = 'http://video.mobiletrain.org/Course/index/courseId/'
    num_start = 590
    num_end = 620
    # url = 'http://video.mobiletrain.org/Course/index/courseId/598'

    build_url_list = build_url(base_url, num_start, num_end)
    for i in range(len(build_url_list)):
        print("*"*40)
        url = build_url_list[i]
        print(url)

        url_text = get_page(url)
        if len(url_text)>1000:
            # 判断连接式真实的.
            parseUrl_to_list(total_url, local_url, url_text)
            get_file_name(total_file_name, url_text)
            write_urlist_to_files(total_file_name, local_url)
            print("\n")
            local_url.clear()
    try:
        with open('total_url.txt', 'w') as fo:
            print("total_url save to:", fo.name)
            fo.writelines(total_url)
    except:
        print("write total_url to txt failed!")


if __name__ == '__main__':
    main()
