#! /usr/bin/env python
# -*- coding:utf-8 -*-

from urllib import request
import re
import json
import os
import datetime
import time

folder = 'saves'  # 如果saves文件夹不存在，就创建它
if not os.path.exists(folder):
    os.mkdir(folder)


def callback(param):
    return param


def pull_json(url):
    json_source = request.urlopen(url).read().decode("utf-8")
    py_json_source = re.sub(r': *null', ": None", json_source)
    return py_json_source


def save_json(json_obj, file_path):
    with open(file_path, 'w', encoding='utf-8') as f:
        json.dump(json_obj, f, indent = 4, ensure_ascii=False)
        

def single_page(page_url, filename='a.json'):
    loc = locals()

    if re.match(r'https?://www.xuexi.cn/.*?/.*?\.html$', page_url):  # 判断是否为旧版链接
        # 由网页链接拼凑出js文件地址
        page_url_segments = re.match(r'(https?://www.xuexi.cn/.*?/)(.*?\.)html$', page_url).groups()
        json_url = page_url_segments[0] + "data" + page_url_segments[1] + 'js'

        json_source = pull_json(json_url)
        exec(json_source)

        json_content = loc['globalCache']
        save_json(json_content, os.path.join(folder, filename))

    elif re.match(r'https?://www.xuexi.cn/lgpage/detail/index.html\?id=[0-9]+$', page_url):  # 判断是否为新版链接
        page_url_segments = re.match(r'(https?://).*id=([0-9]+)$', page_url).groups()
        json_url = page_url_segments[0] + "boot-source.xuexi.cn/data/app/" + page_url_segments[1] + '.js'

        json_source = pull_json(json_url)
        exec('json_content = ' + json_source)
        json_content = loc['json_content']

        save_json(json_content, os.path.join(folder, filename) + '.json')
    else:
        print(f"未能识别您所输入的{page_url}，请核查其是否为合法网址！")
        
        
def channel2pages(channel_json_url):
    json_source = json.loads(request.urlopen(channel_json_url).read().decode("utf-8"))
    for each_item in json_source:
        try:
            publishTime = datetime.datetime.strptime(each_item['publishTime'], "%Y-%m-%d %H:%M:%S")
        except:
            break
        if publishTime.date() == datetime.date.today():
            print(f'正在处理链接 {each_item["url"]}')
            single_page(each_item['url'], each_item['itemType'] + '-' + each_item['title'])
        elif publishTime.date() < datetime.date.today():  # 如果读到旧闻，就跳出循环
            break


if __name__ == '__main__':
    channel_list_url = 'https://www.xuexi.cn/lgdata/channel-list.json'
    channel_list = json.loads(request.urlopen(channel_list_url).read().decode("utf-8"))
    for channel in channel_list.values():
        print(f"正在分析频道-{channel['channel_name']}")
        try:
            channel_id = channel['channel_id']
            channel_url = f'https://www.xuexi.cn/lgdata/{channel_id}.json'
            channel2pages(channel_url)
        except:
            pass
        time.sleep(1)  # 休息一秒钟，减少服务器负担