# !/usr/bin/python3
# -*- coding:utf-8 -*-
"""
@author: JHC000abc@gmail.com
@file: crawl_jc.py
@time: 2025/3/14 15:15 
@desc: 

"""
import re
import atexit
from base import Base
from datetime import datetime


class CrawlJuChao(Base):
    """

    """

    def __init__(self):
        super().__init__()
        atexit.register(self.exit_handler)
        self.search_url = "http://www.cninfo.com.cn/new/fulltextSearch/full?" \
                          "searchkey={searchkey}&isfulltext=false&sortName=pubdate&" \
                          "sortType=desc&pageNum={pageNum}&pageSize=100&sdate=2024-01-01&edate=2099-04-15"
        self.platform = "巨潮网"
        self.result_tmp_jsonl = f"result_tmp_{self.platform}_{datetime.now().strftime('%Y-%m-%d-%H-%M-%S')}.jsonl"
        self.thread_num = 10
        self.sleep_time = 5
        self.max_pages = 20
        # 启动前先记录上次的 历史记录 方便误操作后恢复
        self.start_recode_history()

    def thread(self, index, company_name, origin_id, save_folder):
        """

        :param index:
        :param company_name:
        :param origin_id:
        :param save_folder:
        :return:
        """
        res = self.get_results()
        res["origin"].update(
            {
                "company_name": company_name,
                "origin_id": origin_id,
                "index": index,
                "data_from": "巨潮网"
            }
        )

        hasMore = True
        page = 1
        totalpages = 1
        file_name_recode_set = set()
        while hasMore and page <= totalpages + 1:
            from urllib.parse import quote
            search_url = self.search_url.format(searchkey=quote(company_name), pageNum=page)
            search_response = self.requests(search_url, self.headers, method="GET")
            search_check_result = self.check_response(search_response)
            if not search_check_result:
                res["time"] = datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
                res["msg"] = f"搜索失败:第{page}页"
                return res
            search_results = search_response.json()
            if len(search_results) <= 0 or search_results["totalAnnouncement"] == 0:
                res["time"] = datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
                res["msg"] = "相关公司信息为空"
                return res
            # print("search_results", search_results)
            for announcement in search_results["announcements"]:
                title = announcement["announcementTitle"]
                title = re.sub("<em>|</em>|:|：", "", title)
                originalUrl = f'https://static.cninfo.com.cn/{announcement["adjunctUrl"]}'

                pdf_status, pdf_type = self.check_pdf_name(title)
                tmp = {originalUrl: {
                    "title": title,
                    "status": pdf_status,
                    "type": pdf_type,
                }}
                if not pdf_status:
                    res["failed"].update(tmp)
                else:
                    pdf_save_file, file_name_recode_set = self.check_file_name(title, company_name,
                                                                               file_name_recode_set,
                                                                               saveFolder=save_folder)
                    if pdf_save_file is not None:
                        # self.download_pdf(originalUrl, self.headers, pdf_save_file)
                        tmp[originalUrl].update({"save_file": pdf_save_file})
                        res["results"].append([{self.type_map.get(pdf_type, "default"): pdf_save_file}])
                        res["success"].update(tmp)
            hasMore = search_results["hasMore"]
            totalpages = search_results["totalpages"]
            if totalpages > self.max_pages:
                totalpages = self.max_pages
            page += 1

        res["status"] = 0
        res["msg"] = "检查到有效数据" if len(res["success"]) > 0 or len(res["failed"]) > 0 else "未检查到有效数据"
        return res

    def process(self, *args, **kwargs):
        """

        :param args:
        :param kwargs:
        :return:
        """
        file = kwargs["file"]
        save_folder = f"./{self.platform}"
        index = 0
        for company_name, origin_id in self.get_origin_company_info(file):
            index += 1
            self.pool.add_1job_with_callback(self.callback, self.thread, index, company_name, origin_id, save_folder)
        self.pool.stop()
        self.clear_tmp_result(self.result_tmp_jsonl)
        print(f"所有任务处理完成,整理结果参见：{self.result_tmp_jsonl},文件存储在:{save_folder}")
        return self.result_tmp_jsonl


if __name__ == '__main__':
    # file = r"D:\Desktop\采集主体 (1) (1).xlsx"
    file = input("输入巨潮网采集主体文件路径:")
    CrawlJuChao().process(file=file)
