# !/usr/bin/python3
# -*- coding:utf-8 -*-
"""
@author: JHC000abc@gmail.com
@file: base.py
@time: 2025/3/12 14:15
@desc:

"""
import os
import re
import time
import json
import copy
import atexit
import requests
import traceback
from queue import Queue
from datetime import datetime
from cup.util import ThreadPool
from utils.util_excel import Excel
from utils.util_log import MyLogger
import urllib3
from urllib3.exceptions import InsecureRequestWarning
import random
import shutil
from utils.util_pdf import pdfCheck
from utils.util_imgs import ImagesMerge
from utils.util_entrypt import Entrypt

urllib3.disable_warnings(InsecureRequestWarning)
log = MyLogger("logs").get_config()


class Base(object):
    """

    """

    def __init__(self):
        self.type_map = {
            1: "年报",
            2: "年度审计报告",
            3: "社会责任报告",
            4: "环境、社会和管治报告",
            5: "可持续发展报告"
        }
        self.platform = ""

        self.headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36",
        }

        self.excel = Excel()
        self.success_list = []
        self.que = Queue()
        self.max_tmp = 500
        self.result_tmp_jsonl = f"result_tmp_{self.platform}_{datetime.now().strftime('%Y-%m-%d-%H-%M-%S')}.jsonl"
        self.history = None
        self.thread_num = 10
        self.sleep_time = 10
        self.max_pages = 20
        self.pool = ThreadPool(minthreads=3, maxthreads=self.thread_num, daemon_threads=True)
        self.pool.start()

    def start_recode_history(self):
        """

        :return:
        """
        os.makedirs("history", exist_ok=True)
        recode_his_file = f"./history/history_{self.platform}_{datetime.now().strftime('%Y-%m-%d-%H-%M-%S')}.json"
        if os.path.exists("./history.json"):
            shutil.copy("./history.json", recode_his_file)
            print(f"已记录上次运行记录:{os.path.abspath(recode_his_file)}")
        else:
            print(f"不存在上次运行记录:{os.path.abspath(recode_his_file)}")

    def load_history(self):
        """

        :return:
        """
        print("加载已经处理过的数据")
        data = {}
        if os.path.exists("history.json"):
            with open("history.json", "r", encoding="utf-8") as fp:
                data = json.loads(fp.read())
        else:
            print("不存在历史数据")
        return data

    def save_history(self):
        """

        :return:
        """
        if len(self.history) > 0:
            with open("history.json", "w", encoding="utf-8") as fp:
                fp.write(json.dumps(self.history, ensure_ascii=False, indent=4))
            print(f"成功保存历史纪录：{len(self.history)} 条")
        else:
            print("不存在任何历史数据，不写入")

    def check_history(self, key):
        """

        :param key:
        :return:
        """
        if self.history.get(key) is not None:
            return True
        else:
            self.history[key] = f"{datetime.now().strftime('%Y-%m-%d %H:%M:%S')}"
            return False

    def exit_handler(self):
        """

        :return:
        """
        self.clear_tmp_result(self.result_tmp_jsonl)
        shutil.rmtree(self.platform)

    def get_results(self):
        """

        :return:
        """
        return {
            "status": -1,
            "msg": "",
            "results": [],
            "success": {},
            "failed": {},
            "origin": {},
            "time": "1990-01-01 00:00:00"
        }

    def requests(self, url, headers, cookies=None, params=None, json_data=None, method="POST"):
        """

        :param url:
        :param headers:
        :param cookies:
        :param params:
        :param json_data:
        :param method:
        :return:
        """
        retry_times = 5
        res = None
        while retry_times > 0:
            try:
                if method == "POST":
                    res = requests.post(url, headers=headers, cookies=cookies, json=json_data, verify=False,
                                        timeout=(5, 15))
                elif method == "GET":
                    res = requests.get(url, headers=headers, cookies=cookies, params=params, verify=False,
                                       timeout=(5, 15))
                else:
                    pass
                if res is not None:
                    # 给巨潮网定制的处理
                    if res.status_code == 403:
                        retry_times += 1
                    # 给同花顺定制
                    if "ft.51ifind.com" in url and res.status_code == 500:
                        retry_times += 1
                        print("tonghuashu response=500 retry")
                        continue
                    # 给同花顺定制 cookies 无效
                    if res.status_code == 401:
                        retry_times = 0
                        return res
                    elif res.status_code in (200, 500):
                        return res

            except Exception as e:
                log.exception(traceback.print_exc())
            finally:
                retry_times -= 1
                time.sleep(random.randint(3, self.sleep_time))
        return res

    def check_response(self, response):
        """

        :param response:
        :return:
        """
        try:
            if response.status_code == 200:
                return True
            else:
                pass
        except Exception as e:
            log.exception(traceback.format_exc())
        return False

    def download_pdf(self, url, headers, save_file):
        """

        :param url:
        :param headers:
        :param save_file:
        :return:
        """
        pdf_response = self.requests(url, headers, method="GET")
        if self.check_response(pdf_response):
            with open(save_file, "wb") as fp:
                fp.write(pdf_response.content)
            return True
        return False

    def check_pdf_name(self, name):
        """
        根据规则筛选需要下载的文件
        """
        name = name.replace(" ", "")
        name = re.sub("\[\d+\]|\（\d+", "", name)
        if all(x not in name for x in
               ["半年", "中期", "季度", "修订", "公告", "月", "摘要", "内部控制", "说明会", "通知", "记录表", "报告书",
                "2023年度报告", "季未"]):
            if "2024年" in name or ("2023" in name and "2024" in name and "（20" not in name and "2023年" not in name):
                name = name.split("[")[0].split("（")[0]
                if any(x in name for x in ["年报", "年度报告"]):
                    return True, 1
                if any(x in name for x in
                       ["审计报告", "年度审计报告", "审计的财务报告", "审计的合并", "母公司财务报告"]):
                    return True, 2
                if "社会责任报告" in name:
                    return True, 3
                if any(x in name for x in
                       ["社会责任", "社责", "管治", "社会与治理（ESG）", "社会与治理", "社会及治理", "公司治理", "ESG",
                        "环境信息"]):
                    return True, 4
                if "可持续发展报告" in name:
                    return True, 5
        return False, ""

    def check_file_name(self, pdfName, keyWord, file_name_recode_set, saveFolder=r"D:\Desktop\6\tmp"):
        """

        :param pdfName:
        :param keyWord:
        :param saveFolder:
        :return:
        """
        pdf_status, pdf_type = self.check_pdf_name(pdfName)
        pdf_type = self.type_map.get(pdf_type)
        if pdf_type is None:
            return None
        if "2023" in pdfName and "2024" in pdfName:
            year = "2023-2024"
        else:
            year = "2024"
        new_name = f"{keyWord}{year}年{pdf_type}"

        pdf_tmp_folder = fr"{saveFolder}\pdf\{keyWord}\{pdf_type}"
        os.makedirs(pdf_tmp_folder, exist_ok=True)
        pdf_file = os.path.join(pdf_tmp_folder, new_name)
        pdf_file = f"{pdf_file}.pdf"
        file_name_recode_set.add(new_name)
        return pdf_file, file_name_recode_set

    def save_result(self, tag, lis):
        """

        :param tag:
        :param lis:
        :return:
        """
        log.info(f"{tag} 抓取到新结果:{len(lis)}")
        if len(lis) > 0:
            save_file = f"result_{tag}_{datetime.now().strftime('%Y%m%d%H%M%S')}.txt"
            with open(save_file, "w", encoding="utf-8") as fp:
                for i in lis:
                    line = "\t".join(i)
                    fp.write(f"{line}\n")

    def get_origin_company_info(self, file):
        """

        :param file:
        :return:
        """
        # company_map = {}
        for args in self.excel.read_yield_by_pandas(file):
            line = args["line"]
            yield line[0].replace(" ", ""), line[1]

    def process(self, *args, **kwargs):
        """

        :param args:
        :param kwargs:
        :return:
        """

    def callback(self, status, result):
        """

        :param status:
        :param result:
        :return:
        """
        log.info(result)
        self.que.put(json.dumps(copy.deepcopy(result), ensure_ascii=False))
        if self.que.qsize() >= self.max_tmp:
            with open(self.result_tmp_jsonl, "a", encoding="utf-8") as fp:
                for i in range(self.max_tmp):
                    res = self.que.get()
                    fp.write(f"{res}\n")

    def clear_tmp_result(self, file):
        """

        :param file:
        :return:
        """
        while not self.que.empty():
            res = self.que.get()
            with open(file, "a", encoding="utf-8") as fp:
                fp.write(f"{res}\n")

    def process_result(self, file):
        """

        :param file:
        :return:
        """
        success_num = 0
        lis = []
        map = {}
        with open(file, "r", encoding="utf-8") as fp:
            for i in fp:
                line = i.strip()
                data = json.loads(line)
                # print(data)
                if data["status"] == 0:
                    if len(data["success"]) > 0:
                        success_num += 1
                        company_info = data["origin"]
                        company_name = company_info["company_name"]
                        origin_id = company_info["origin_id"]
                        if map.get(company_name) is None:
                            map[company_name] = {
                                "origin_id": origin_id
                            }
                        else:
                            print(f"重复公司：{company_name}")

                        for url, info in data["success"].items():

                            title = info["title"]
                            save_file = info["save_file"]
                            pdf_type = self.type_map.get(info["type"], "")

                            tmp = {
                                "title": title,
                                "save_file": save_file,
                                "url": url,
                            }
                            if map[company_name].get(pdf_type) is None:
                                map[company_name][pdf_type] = [tmp]
                            else:
                                map[company_name][pdf_type].append(tmp)

        print(f"匹配到组织机构代码公司数量:{success_num}")

        # print(map)

        def check_res(res, tmp):
            title = []
            save_file = []
            url = []
            file_url_map = {}
            for i in res:
                title.append(i["title"])
                save_file.append(i["save_file"])
                url.append(i["url"])
                file_url_map[i["save_file"]] = i["url"]

            success_list = []
            imgs_file = []
            for _file in save_file:
                _file.replace("\\", "\\\\")

                folder, fname = os.path.split(_file)
                os.makedirs(folder, exist_ok=True)

                if not os.path.exists(_file):
                    _url = file_url_map.get(_file)
                    headers = {
                        "User-Agent": "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36",
                    }
                    response = requests.get(_url, headers=headers)
                    print("response--->", response)
                    with open(_file, "wb") as f:
                        f.write(response.content)

                hash_file = Entrypt().make_md5(_file)
                if self.check_history(hash_file):
                    print("文件已存在")
                    continue
                with pdfCheck(_file) as fp:
                    tmp_first_file = fp.save_first_page()
                    page_nums = fp.num_pages
                    if page_nums > 10:
                        if not tmp_first_file:
                            print("文件有问题")
                        else:
                            imgs_file.append(tmp_first_file)
                            success_list.append(_file)
                    else:
                        print(f"报告页数不足，不处理:{page_nums}")

            merge_img_file = ""
            if len(imgs_file) > 0:
                im = ImagesMerge(imgs_file)
                merge_img_file = im.merge_imgs()

            tmp.append("\n".join(title))
            tmp.append("\n".join(save_file))
            tmp.append("\n".join(url))
            tmp.append("\n".join(success_list))
            if merge_img_file == "":
                tmp.append("")
            else:
                tmp.append(os.path.abspath(merge_img_file))
            # print(len(tmp))
            return tmp

        for company_name, info in map.items():
            # if company_name == "深圳清溢光电股份有限公司":
            print("company_name", company_name)
            origin_id = info.get("origin_id")
            tmp = [company_name, origin_id]
            year_report = info.get("年报")
            if year_report:
                tmp = check_res(year_report, tmp)
            else:
                tmp.extend(["", "", "", "", ""])
            year_check_report = info.get("年度审计报告")
            if year_check_report:
                tmp = check_res(year_check_report, tmp)
            else:
                tmp.extend(["", "", "", "", ""])
            social_report = info.get("社会责任报告")
            if social_report:
                tmp = check_res(social_report, tmp)
            else:
                tmp.extend(["", "", "", "", ""])
            env_report = info.get("环境、社会和管治报告")
            if env_report:
                tmp = check_res(env_report, tmp)
            else:
                tmp.extend(["", "", "", "", ""])
            dev_report = info.get("可持续发展报告")
            if dev_report:
                tmp = check_res(dev_report, tmp)
            else:
                tmp.extend(["", "", "", "", ""])

            lis.append(tmp)

        headers = ["公司名称", "组织机构代码",
                   "年报", "文件路径", "url", "符合条件文件", "首页图片",
                   "年度审计报告", "文件路径", "url", "符合条件文件",
                   "首页图片",
                   "社会责任报告", "文件路径", "url", "符合条件文件",
                   "首页图片",
                   "环境、社会和管治报告", "文件路径", "url", "符合条件文件",
                   "首页图片",
                   "可持续发展报告", "文件路径", "url", "符合条件文件",
                   "首页图片"]

        # self.excel.write("result2.xlsx", [lis], [headers])

        self.excel.write("result_tonghuashun.xlsx", [lis], [headers])


if __name__ == '__main__':
    b = Base()
    # name_list = []
    # with open(r"D:\Desktop\result.txt", "r", encoding="utf-8") as fp:
    #     for i in fp:
    #         line = i.strip()
    #         name_list.append(line)
    # for name in name_list:
    #     status, kind = b.check_pdf_name(name)
    #     if status:
    #         print(name, kind)
    # b.process_result(
    #     r"D:\Project\Python\baidu\crowdtest\collection-script\CR\tests\zzprojects\result_tmp_jc_2025-03-28-17-55-24.jsonl")
    b.process_result(
        r"D:\Project\Python\baidu\crowdtest\collection-script\CR\tests\zzprojects\result_tmp_tonghuashun_2025-03-31-14-21-10.jsonl")
    # name = "倍杰特集团股份有限公司2023财务报表年报审计-信会师报字[2024]第ZG11477号"
    # # name = "湖北和远新材料有限公司审计报告及财务报表（20230101-20240331）"
    # # name = "和远潜江电子特种气体有限公司审计报告及财务报表（20230101-20240331）"
    # # name = "福成五丰2023财务报表审计报告-永证审字（2024）第110025号"
    # name = "资中县兴资投资开发集团有限责任公司公司债券（2024年）年度报告（2024年）"
    # name = "独立董事年报工作制度（2024年）"
    name = "清溢光电佛山清溢微电子有限公司2024年度审计报告"
    print(b.check_pdf_name(name))
