"""
Author : Mr.Sun
DateTime : 2022/9/26.14:24
FileName : Political.py
Desc :  时政数据抓取并保存成pdf
"""
import os
import re
import json
import time
import uuid
import requests
import platform
from bs4 import BeautifulSoup
from pdfy import Pdfy
from utils.logger import logger
from core.OperateSqlite import sqliteOperate

project_dir = os.path.dirname(os.path.dirname(__file__))
pdf_title = "时政"

# 创建需要的文件夹
file_home = os.path.join(os.path.join(project_dir, "pdf"), pdf_title)
if not os.path.exists(file_home):
    os.makedirs(file_home)


def get_sz_url(base_url):
    try:
        # 获取页面内容
        web_data = requests.get(url=base_url)
        web_data.encoding = "utf-8"
        content = web_data.text
        #  获取所有的链接
        bs = BeautifulSoup(content, "html.parser")
        # 获取所有链接
        sz_url = []
        for link in bs.find_all('a',
                                attrs={'href': re.compile("^http")}):
            sz_url.append(link.get('href'))
        # 获取指定的链接
        logger.info("get page all_url is " + json.dumps(sz_url))
        result = []
        for temp in sz_url:
            if "/ggjczs/mszt/" in temp:
                result.append(temp)
        logger.info("get all url list is : " + json.dumps(result))
        return result
    except Exception as e:
        logger.error("get all url_list occur error is " + str(e))


# 页面转成pdf
def get_pdf_by_url(url):
    try:
        # 获取标题
        web_data = requests.get(url=url)
        web_data.encoding = "utf-8"
        content = web_data.text
        #  获取标题
        bs = BeautifulSoup(content, "html.parser")
        title_css = bs.select("body > div > div.content > div.c_l > div.c_l_c > div.c_l_c_2 > h1")[0]
        pat = re.compile(r'(?<=>).*(?=<)')
        title = ''.join(pat.findall(str(title_css))).replace(" ", "")
        uuid_temp = str(uuid.uuid4()).replace('-', '').upper()
        if title:
            # 拼接保存文件的文件夹和名称
            name_path = os.path.join(file_home, title + '.pdf')
        else:
            name_path = os.path.join(file_home, uuid_temp + ".pdf")
        # 网页转换成pdf
        # 判断操作系统选择驱动
        if (platform.system()) == 'Windows':
            driver_path = "chromedriver.exe"
        else:
            driver_path = "chromedriver"
        util_path = os.path.join(project_dir, 'utils')
        driver_path_full = os.path.join(util_path, driver_path)
        p = Pdfy(executable_path=driver_path_full)
        p.html_to_pdf(url, pdf_path=name_path)
        logger.info(url + " pdf convert success ，save in  " + file_home)
        return file_home
    except Exception as e:
        logger.error("convert pdf failed :" + str(e))


# 具体生成pdf
def current_affairs_generate(location_url):
    try:
        url_list = get_sz_url(location_url)
        url_sql = "select url from current_affairs"
        data_url = sqliteOperate().search_data(url_sql)
        url_database = [data[0] for data in data_url]
        logger.info("search data is " + json.dumps(url_database))
        for url in url_list:
            if url in url_database:
                logger.info(url + " already exist.")
                pass
            else:
                content = get_pdf_by_url(url)
                sql = "insert into current_affairs(url,content) values(?,?)"
                sqliteOperate().insert_data(sql=sql, data=(url, content))
                logger.info("insert data success.")
                time.sleep(10)
        logger.info("current_affairs_generate generate success")
    except Exception as e:
        logger.info("current_affairs_generate occur error is " + str(e))


if __name__ == '__main__':
    # url = "https://www.chinagwy.org/html/ggjczs/mszt/202201/52_469149.html"
    # get_sz_url(url)
    # url_1 = 'http://www.chinagwy.org/html/ggjczs/mszt/202202/52_474044.html'
    # get_pdf_by_url(url_1)
    current_affairs_generate('https://www.chinagwy.org/html/ggjczs/mszt/202201/52_469149.html')
