"""
@filename=get_html.py
@version=0.1.0.20240110
@author=amnotgcs
@createTime=20240110 19:38
@lastModifiedTime=20240110 19:38
@description=批量保存HTML文件
@target.url=https://bidding.sinopec.com/tpfront/xxgg/004001/
"""
import json
import logging

import requests
from parsel import Selector

from persistence import html_to_file, pdf_to_file
from utils import parse_bidding_list_of_html, parse_pdf_link_in_bidding_html
from config import BIDDING_INFO_FILENAME


QUERY_API = 'https://bidding.sinopec.com/tpfront/CommonPages/searchmore.aspx?CategoryNum=004001'
HEADER = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:122.0) Gecko/20100101 Firefox/122.0',
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
    'Content-Type': 'application/x-www-form-urlencoded',
    'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6,zh-TW;q=0.5',
    'Referer': 'https://bidding.sinopec.com/tpfront/CommonPages/searchmore.aspx?CategoryNum=004001',
}
HTTP_REQUEST_TIMEOUT = 15


# 日志打印配置
log_format = '[%(asctime)s] %(levelname)s %(message)s'
logging.basicConfig(format=log_format, level=logging.INFO)


def retrieve_html(url: str, pyload: dict = None) -> str:
    """
    获取网页内容,返回HTML字符串

    Parameters
    ----------
    url : str
        要获取的网页地址
    pyload : dict
        HTTP请求载荷

    Returns
    -------
    requests.Response.text
    """
    try:
        response = requests.post(url, headers=HEADER, data=pyload, timeout=HTTP_REQUEST_TIMEOUT)
        return response.text
    except requests.exceptions.ConnectionError as e:
        logging.info(f'访问{url}时出错: {e}')
    return ''


def retrieve_pdf(pdf_link: str) -> bytes:
    """
    从指定链接下载pdf文件

    Parameters
    ----------
    pdf_link : str
        目标pdf的超链接
    
    Returns
    -------
    bytes
        目标pdf的字节序列
    """
    try:
        pdf_response = requests.get(pdf_link, headers=HEADER, timeout=HTTP_REQUEST_TIMEOUT)
        return pdf_response.content
    except requests.exceptions.ConnectionError as e:
        logging.error(f'下载pdf-{pdf_link}时出错: {e}')
    return b''


def get_sinopec_bidding_amount() -> dict:
    """
    获取sinopec招标文件的数量

    Returns
    -------
    dict = {
            total_bidding_amount: 0,  # 总招标文件数量
            total_page_amount: 0,  # 总页数
    }
    """
    index_html_text = retrieve_html(QUERY_API)
    selector = Selector(index_html_text)
    total_bidding_amount = selector.xpath('//*[@id="MoreinfoListsearch1_Pager"]/div[1]/font[1]/b/text()').get()
    total_page_amount = selector.xpath('//*[@id="MoreinfoListsearch1_Pager"]/div[1]/font[2]/b/text()').get()
    return {
       'total_bidding_amount': int(total_bidding_amount),
        'total_page_amount': int(total_page_amount),
    }


def get_all_sinopec_bidding(page_start: int = 1, page_end: int = 10) -> None:
    """
    批量获取sinopec的招标文件

    Parameters
    ----------
    page_start : int
        要获取招标文件的开始页数
    page_end : int
        要获取招标文件的结束页数
    """
    pyload = {
        'MoreinfoListsearch1$Pager_input': 0,
        '__EVENTTARGET': 'MoreinfoListsearch1$Pager',
    }
    for page in range(page_start, page_end + 1):
        logging.info('='*100)
        logging.info(f'正在获取第{page}页的招标文件')
        logging.info('='*100)
        pyload['MoreinfoListsearch1$Pager_input'] = page
        html_text = retrieve_html(QUERY_API, pyload=pyload)
        bidding_list = parse_bidding_list_of_html(html_text)
        for bidding in bidding_list:
            # 获取HTML
            bidding_title = bidding.get('title')
            logging.info(f'正在下载[{bidding_title}]')
            bidding_html = retrieve_html(bidding.get('href'))
            html_to_file(bidding_html, bidding_title)
            logging.info(f'{bidding_title}写入本地完成')
            # 获取PDF
            logging.info('正在查找对应招标文件pdf')
            pdf_info = parse_pdf_link_in_bidding_html(bidding_html)
            pdf_link = pdf_info.get('pdf_link')
            if pdf_link:
                pdf_to_file(retrieve_pdf(pdf_link), bidding_title)
                logging.info('招标文件.pdf写入本地完成')
            else:
                logging.warn('未找到对应招标文件pdf')


def get_all_sinopec_bidding_info(page_start: str, page_end: str) -> dict:
    """
    批量获取sinopec的招标信息

    Parameters
    ----------
    page_start : int
        要获取招标文件的开始页数
    page_end : int
        要获取招标文件的结束页数
    """
    pyload = {
        'MoreinfoListsearch1$Pager_input': 0,
        '__EVENTTARGET': 'MoreinfoListsearch1$Pager',
    }
    bidding_info = []
    try:
        for page in range(page_start, page_end + 1):
            logging.info('='*100)
            logging.info(f'正在获取第{page}页的招标信息')
            logging.info('='*100)
            pyload['MoreinfoListsearch1$Pager_input'] = page
            html_text = retrieve_html(QUERY_API, pyload=pyload)
            bidding_list = parse_bidding_list_of_html(html_text)
            bidding_info.extend(bidding_list)
    except Exception as e:
        logging.error(f'获取招投标信息时出现异常: {e}')
    with open(BIDDING_INFO_FILENAME, 'wt', encoding='UTF-8') as file:
        try:
            json.dump(bidding_info, file, ensure_ascii=False, indent=4)
        except Exception as e:
            logging.error(f'写入招投标信息到文件时出现异常: {e}')
            file.write(str(bidding_info))


if __name__ == '__main__':
    get_all_sinopec_bidding()
