# !/usr/bin/python3
# -*- coding:utf-8 -*-
"""
@author: JHC000abc@gmail.com
@file: merge_result.py
@time: 2025/3/31 17:48 
@desc: 

"""
import datetime
import json
import os.path
import atexit
import shutil
from base import Base
from utils.util_entrypt import Entrypt
from utils.util_pdf import pdfCheck
from utils.util_excel import Excel
from utils.util_imgs import ImagesMerge
from utils.util_bos import BosOnline


class MergeResult(Base):
    """

    """

    def __init__(self):
        super().__init__()
        atexit.register(self.exit_handler)
        self.history = self.load_history()
        print(f"成功加载到历史记录：{len(self.history)}条")
        self.excel = Excel()
        self.bos = BosOnline()

    def exit_handler(self):
        """

        :return:
        """
        self.save_history()
        # try:
        #     shutil.rmtree("./tmp")
        # except:
        #     pass

    def read_result_from_file(self, file):
        """

        :param file:
        :return:
        """
        with open(file, "r", encoding="utf-8") as fp:
            for i in fp:
                line = i.strip()
                data = json.loads(line)
                yield data

    def process(self, file1, file2):
        """

        :return:
        """
        map = {}

        company_map = {}

        for file in [file1, file2]:
            if "同花顺" in file:
                tag = "同花顺"
            elif "巨潮网" in file:
                tag = "巨潮网"
            else:
                print("未知文件")
                return

            num = 0
            for args in self.read_result_from_file(file):
                if args["status"] == 0:
                    if len(args["success"]) > 0:
                        num += 1
                        origin = args["origin"]
                        company_name = origin["company_name"]
                        if map.get(company_name) is None:
                            map[company_name] = {}

                        # print("company_name", company_name)
                        origin_id = origin["origin_id"]

                        company_map[company_name] = origin_id

                        success = args["success"]

                        pdf_infos = {}

                        for url, pdf_info in success.items():
                            # print(url, pdf_info)
                            pdf_title = pdf_info["title"]
                            pdf_type = pdf_info["type"]
                            tmp = {
                                "url": url,
                                "title": pdf_title,
                                "from": tag,
                            }
                            if pdf_infos.get(pdf_type) is None:
                                pdf_infos[pdf_type] = [tmp]
                            else:
                                pdf_infos[pdf_type].append(tmp)

                        for _type, res in pdf_infos.items():
                            if map[company_name].get(_type) is None:
                                map[company_name][_type] = res
                            else:
                                map[company_name][_type].extend(res)

        num = 0
        print(f"识别到新增相关信息:{len(map)}条")
        for company_name, _map in map.items():
            print(f"company_name:{company_name}")
            origin_code = company_map.get(company_name)
            temp_result = [company_name, origin_code]
            for i in range(1, 6):
                _temp = _map.get(i)
                if not _temp:
                    # name url 首页图片 bos
                    res = ["", "", "", ""]
                else:
                    print("_temp", _temp)
                    title_list = []
                    url_list = []
                    first_img_list = []
                    bos_list = []

                    for ind, _i in enumerate(_temp):
                        _url = _i["url"]
                        pdf_name = f'{_i["title"]}_{ind}.pdf'.replace("*", "")

                        pdf_save_folder_tmp = f"./temp/{company_name}/{self.type_map.get(i)}"
                        os.makedirs(pdf_save_folder_tmp, exist_ok=True)

                        pdf_save_file_tmp = os.path.join(pdf_save_folder_tmp, pdf_name)
                        if not os.path.exists(pdf_save_file_tmp):
                            download_status = self.download_pdf(_url, self.headers, pdf_save_file_tmp)
                        else:
                            print("文件已存在")
                            download_status = True
                        if not download_status:
                            data_from = _i["from"]
                            print(f"下载失败：{_url}--{pdf_save_file_tmp}")
                            _url = f"{_url}"
                            pdf_name = f"下载失败:{data_from}"
                        else:
                            pdf_hash = Entrypt().make_md5(pdf_save_file_tmp)
                            if self.check_history(pdf_hash):
                                _url = ""
                                pdf_name = ""
                            # TODO 上传bos,读总页数 生成第一页
                            else:
                                try:
                                    with pdfCheck(pdf_save_file_tmp) as f:
                                        pdf_pages = f.pages
                                        if pdf_pages <= 10 and i == 2:
                                            _url = ""
                                            pdf_name = ""
                                        else:
                                            first_img_file = f.save_first_page()
                                            first_img_list.append(os.path.abspath(first_img_file))
                                            url = f"https://bj.bcebos.com/petite-mark/public_read/vipshop/" \
                                                  f"{origin_code}/" \
                                                  f"{self.type_map.get(i)}/" \
                                                  f"{datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')}/" \
                                                  f"{pdf_name}"
                                            self.bos.upload(url, pdf_save_file_tmp)
                                            bos_list.append(url)
                                            print(f"bos_url:{url}")
                                except Exception as e:
                                    print(e, e.__traceback__.tb_lineno)
                                    _url = ""
                                    pdf_name = ""

                        url_list.append(_url)
                        title_list.append(pdf_name)
                    if len(first_img_list) > 1:
                        first_imgs = ImagesMerge(first_img_list).merge_imgs()
                    else:
                        first_imgs = "".join(first_img_list)
                    res = ["\n".join(title_list), "\n".join(url_list),
                           first_imgs,
                           "\n".join(bos_list)]

                temp_result.extend(res)

            self.success_list.append(temp_result)
            num += 1

        save_file = f"result_巨潮网_同花顺.xlsx"
        headers = ["公司名称", "组织机构代码",
                   "年报", "url", "首页图片", "bos",
                   "年度审计报告", "url", "首页图片", "bos",
                   "社会责任报告", "url", "首页图片", "bos",
                   "环境、社会和管治报告", "url", "首页图片", "bos",
                   "可持续发展报告", "url", "首页图片", "bos"
                   ]
        self.excel.write(save_file, [self.success_list], [headers])
        self.success_list = []


if __name__ == '__main__':
    file1 = r"D:\Project\Python\baidu\crowdtest\collection-script\CR\tests\zzprojects\result_tmp_jc_2025-03-31-18-15-57.jsonl"
    # file1 = r"D:\Project\Python\baidu\crowdtest\collection-script\CR\tests\zzprojects\result_tmp_jc_2025-03-31-14-21-10 - 副本.jsonl"
    file2 = r"D:\Project\Python\baidu\crowdtest\collection-script\CR\tests\zzprojects\result_tmp_tonghuashun_2025-03-31-14-21-10.jsonl"
    mr = MergeResult()
    mr.process(file1, file2)
