# coding = utf-8
"""
晓黑板将于2022.8.31下线，为快速保存其中的数据，本人设计了此爬虫程序。
期望通过这些数据，保留下珍贵的资源，以供日后学习和使用，这些数据将不会应用于任何商业场景。
所有数据将被保存到 D:/D盘桌面/晓黑板资料 目录下。未经本人允许，请勿使用。
本程序将会爬取 4.2_(案例)搜刮晓黑板.csv 中的数据。
"""

import os
import requests
import json
import time
import csv
from random import choice
from concurrent.futures import ThreadPoolExecutor


def read_page(text, detail_id=''):
    # 利用json解析基础数据（不熟悉，但是方便）
    result = json.loads(text)
    info = {}
    for kind in ('title', 'subject', 'content', 'attachements', 'images'):
        try:
            info[kind] = result['data'][kind]
        except KeyError:
            print(detail_id + " " + "未识别到" + kind + "。程序仍将继续")
    return info


def download(url, file):
    useragent = choice([
        "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36",
        "Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko",
        "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; 360SE)"
    ])
    file.write(
        requests.get(
            url=url,
            headers={"User-agent": useragent}
        ).content
    )

def download_images(urls, path):  # 保存图片
    if not os.path.exists(path):
        os.makedirs(path)
    for url in urls:
        f = open(path + '\\' + url.split('/')[-1], mode='wb')  # 命名图片文件（如：D://D盘桌面/晓黑板资料（python正在自动下载）/62ede4fbc2aab00001eba324/images/F76BB18D-7979-4742-AAD4-8BEA818CA74B.jpg）
        download(url, f)
        f.close()
        print(url.split('/')[-1] + " 保存成功！")
        time.sleep(1)

def download_attachements(datas, path):  # 保存文件
    if not os.path.exists(path):
        os.makedirs(path)
    for data in datas:
        f = open(path + '\\' + data['name'], mode='wb')  # 命名文件
        download(url=data['id'], file=f)
        f.close()
        print(data['name'] + " 保存成功！")
        time.sleep(1)


def one_detail(kind, detail_id):
    """对一个网站，进行一次爬取，并把数据保存。"""
    # 1.访问晓黑板，获取相关XHR数据
    print(f"https://platform.xiaoheiban.cn/api/{kind}/{detail_id}")
    resp = requests.get(
        url=f"https://platform.xiaoheiban.cn/api/{kind}/{detail_id}",
        headers={
            "Authorization": "Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE2NjE4NTc3MTksImlhdCI6MTY1OTQzODUxOSwidXNyIjoiNWQ1ZmEyOWU0MzliODgwMDAxODZmNzYwIiwidmVyIjoxfQ.zOwFp8L14HXHLcy9BGwozOXutX1c9OjUOIv9phYPAe8",
            "X-User-Id": "5d5fa29e439b88000186f760",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36",
            "Referer": "https://pc.xiaoheiban.cn/entry/index.html",
            "X-Content-Security": "key=v2Ww;secret=n+bIMTVv2u41FLErrx/cHv7aUGyktcUUspE9g/otU9K3HdeSZHuVmAgdkQdz6VDBhX3bWrBWbtAycAMKAzGdyhaCI0pRj9eeyn3yQIlML2onwZFfVgiAgOla0M/jh4SMpse61k5lsKevCdrzIQCIXuhmWZqKtaDlQKVqmk0S0rc=;signature=W7bu8y38ubqSgCL1FZTf0lTBWv5nclnJv/T2hqOXs28="
        })
    print(resp)
    text = resp.text
    print(text)

    # 2.解析数据，得出作业详情
    data = read_page(text, detail_id)
    print(detail_id + " " + str(data))

    # 3.下载并保存基本信息与内容
    path = f"D:\\D盘桌面\\晓黑板资料（python正在自动下载）\\{data['title']}-{detail_id}"
    if not os.path.exists(path):
        os.makedirs(path)
    f = open(path + '\\detail_information.txt', mode='w')  # 命名文本文档
    try:
        f.write(text)
    except UnicodeError or UnicodeDecodeError or UnicodeEncodeError:
        f.write("UnicodeError")
    f.close()

    # 4.对图片进行进一步解析、整理，下载并保存图片
    try:
        images_data = data['images']
        images_url = [image_data['original'] for image_data in images_data]
        download_images(images_url, path=f"D:\\D盘桌面\\晓黑板资料（python正在自动下载）\\{data['title']}-{detail_id}\\images")
    except KeyError:
        print(detail_id + " " + "将跳过图片解析与下载。")

    # 5.对附件进行进一步解析、整理，下载并保存附件
    try:
        attachements_data = data['attachements']
        download_attachements(attachements_data, path=f"D:\\D盘桌面\\晓黑板资料（python正在自动下载）\\{data['title']}-{detail_id}\\attachements")
    except KeyError:
        print(detail_id + " " + "将跳过附件解析与下载。")

def one_crawl(kind, detail_id):
    try:
        one_detail(kind, detail_id)
    except UnicodeError or UnicodeDecodeError or UnicodeEncodeError as r:
        print(f"!!! ERROR: {kind}/{detail_id} 发生了一个编码错误: " + str(r))
    except Exception as r:
        print(f"!!! ERROR: {kind}/{detail_id} 发生了一个错误: " + str(r))
        raise


if __name__ == '__main__':
    f = open("4.2_(案例)搜刮晓黑板.csv", mode='r')
    ids = csv.reader(f)  # 获取需要爬取的所有detail的id

    with ThreadPoolExecutor(2) as tpe:  # 用线程池将若干个任务并行
        for it in ids:
            tpe.submit(one_crawl, kind=it[0], detail_id=it[1])

    f.close()
    print("Over!")  # 程序结束
