import requests
from bs4 import BeautifulSoup
import re
import os
from win32com.client import Dispatch

BASE_URL = 'https://www.shggzy.com'  # 网站基础路径
MAIN_PATH = './file/'  # 主文件夹存储路径，可以更改
FILE_PATH = './file/'  # 单一类文件存储路径，不用更改


def get_file_path(file_path=''):
    """
    绝对路径拼接
    :param file_path: 当前主文件夹下需要拼接的相对路径
    :return: 绝对路径
    """
    return os.path.normpath(os.path.join(os.path.dirname(__file__), file_path))


def judge_file_path():
    """
    判断下载文件夹是否存在
    :return:
    """
    if not os.path.exists(MAIN_PATH):
        os.makedirs(MAIN_PATH)
        print(f"文件夹 {MAIN_PATH} 创建成功")
    else:
        print(f"文件夹 {MAIN_PATH} 存在")
    if not os.path.exists(FILE_PATH):
        os.makedirs(FILE_PATH)
        print(f"文件夹 {FILE_PATH} 创建成功")
    else:
        print(f"文件夹 {FILE_PATH} 存在")


def get_html(url, montage=''):
    """
    获取网页html
    :param url: base_url
    :param montage: 拼接
    :return: html
    """
    response = requests.get(url + montage)
    return response.text


def get2urls():
    """
    获取页面上的所有二级页面
    :return: urls
    """
    div_gui_title_bottom = soup.select('div.gui-title-bottom')[0]
    urls = []
    for li in div_gui_title_bottom.find_all('li'):
        match = re.search(r'\'(.*?)\'', str(li))
        if match:
            content = match.group(1)
            urls.append(content)
        else:
            print("No match found.")
    return urls


def write_txt(text, file_name):
    """
    网页内容写入到txt中
    :param text:
    :param file_name:
    :return:
    """
    file_path = get_file_path(f'{FILE_PATH}/{file_name}.txt')
    with open(file_path, 'w', encoding='utf8') as file:
        file.write(text)
    print(f'{file_name}网页写入完成')


def doc2docx(file_path):
    """
    将指定的doc文件转化为docx格式
    file_path: 文件路径
    """
    # 打开原始文档
    word = Dispatch("Word.Application")
    doc = word.Documents.Open(file_path)

    # 将文档另存为docx格式
    new_file_path = os.path.splitext(file_path)[0] + ".docx"
    doc.SaveAs(new_file_path, 16)

    # 关闭文档
    doc.Close()

    # 删除原始文件
    os.remove(file_path)

    # 打印操作过程
    print(f"{file_path}已经被成功转换为{new_file_path}")


def write_file(response, file_name):
    """
    附件下载
    :param response:
    :param file_name:
    :return:
    """
    file_path = get_file_path(f'{FILE_PATH}/{file_name}')
    with open(file_path, 'wb') as file:
        file.write(response.content)
    print(f'{file_name}附件写入完成')
    # doc2docx
    if file_name.endswith('doc'):
        doc2docx(file_path)


def get_title():
    """
    获取标题
    :return: title
    """
    div_content_box = soup.select_one('div.content-box')
    title = div_content_box.select_one('h2')
    return title.text.strip()


def get_publish_time():
    """
    获取发布时间
    :return: publish_time
    """
    div_content_box = soup.select_one('div.content-box')
    pattern = re.compile(r'发布时间：(\d{4}-\d{2}-\d{2} \d{2}:\d{2})')
    matches = re.search(pattern, div_content_box.text)
    publish_time = matches.group(1)
    publish_time = publish_time.replace("-", "_").replace(" ", "_").replace(":", "_")
    return publish_time


def save_content(file_name):
    """
    保存网页内容到txt
    :param file_name: 文件名
    :return:
    """
    div_content = soup.select('div.content')[0]
    content = div_content.text.strip().replace(" ", "").replace('　', '')
    write_txt(content, file_name)


def save_file(file_name):
    """
    保存附件内容
    :param file_name: 文件名
    :return:
    """
    div_content = soup.select('div.content')[0]
    a_list = div_content.find_all('a')
    # 如果没有文件
    if len(a_list) == 0:
        print(f'{html_id}-该网页没有附件')
    if a_list:
        for a in a_list:
            href = str(a['href'])
            if href.endswith('doc') or href.endswith('pdf') or href.endswith('docx'):
                # URL健壮性判断
                if href.startswith('/'):
                    response = requests.get(f'{BASE_URL}{href}')
                else:
                    response = requests.get(href)
                if response.status_code == 200:
                    name = href.split('/')[-1]
                    write_file(response, f'{file_name}{name}')


if __name__ == '__main__':
    data_obj = {
        '上海综合法律法规': 'zcfgzhfg',
        '上海行业政策法规': 'zcfg',
        '上海交易规则': 'xxgkgz'
    }
    html_id = 1  # 计数，文章与其附件为同一个id
    for key, value in data_obj.items():
        FILE_PATH = MAIN_PATH + key
        judge_file_path()  # 判断保存文件夹是否存在，不存在则进行创建
        page = 1
        while True:
            html = get_html(f'{BASE_URL}/{value}', f'_{page}')
            page += 1
            soup = BeautifulSoup(html, 'html.parser')
            urls = get2urls()
            if len(urls) == 0:
                print('爬取结束')
                break
            for url in urls:
                html = get_html(BASE_URL, url)
                soup = BeautifulSoup(html, 'html.parser')
                title = get_title()
                publish_time = get_publish_time()
                save_content(f'{html_id}-{publish_time}-{title}')
                try:
                    save_file(f'{html_id}-')
                except Exception as e:
                    print(f'{html_id}-附件无法下载：{e}')
                html_id += 1
