# !/usr/bin/python3
# -*- coding:utf-8 -*-
"""
@author: JHC000abc@gmail.com
@file: main.py
@time: 2025/3/13 13:54 
@desc: 

"""
import os
import requests
import shutil
import multiprocessing
from multiprocessing import Queue
from crawl_jc import CrawlJuChao
from crawl_tonghuashun import CrawlTongHuaShun
from merge_result import MergeResult


def crawl_tonghuashun(file, que, cookies):
    """
    :param file:
    :return:
    """
    print("开始抓取同花顺")
    save_file = CrawlTongHuaShun(cookies).process(file=file)
    que.put(save_file)  # 将结果放入队列
    print("同花顺采集完成")


def crawl_jc(file, que):
    """
    :param file:
    :return:
    """
    print("开始抓取巨潮网")
    save_file = CrawlJuChao().process(file=file)
    que.put(save_file)  # 将结果放入队列
    print("巨潮网采集完成")


def merge_result(file1, file2):
    """
    :param file1:
    :param file2:
    :return:
    """
    mr = MergeResult()
    mr.process(file1, file2)


def verify_cookies(cookies):
    """

    :return:
    """
    data = {
        "beginDate": "2022-12-31",
        "esp": 1,
        "keyword": "国家电网",
        "page": 1,
        "limit": 500,
        "nodeidArr": "[{\"nodeid\":\"903\",\"children\":[]}]",
        "type": 1,
    }
    url = "https://ft.51ifind.com/standardgwapi/api/news_service/announcement/index/list"
    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36",
    }

    response = requests.post(url, headers=headers, cookies=cookies, json=data, verify=False,
                             timeout=(5, 15))
    if response.status_code != 200:
        print(f"状态码：401 为cookies无效 请重新获取 |当前状态码：{response.status_code}")
        return False
    else:
        return True


def get_file_path():
    while True:
        file_path = input("请输入采集主体xlsx文件路径：")
        if not file_path.strip():
            print("路径不能为空，请重新输入。")
        elif not os.path.exists(file_path):
            print(f"文件 '{file_path}' 不存在，请检查路径。")
        else:
            return file_path


if __name__ == '__main__':
    multiprocessing.freeze_support()
    que = Queue()
    file = get_file_path()
    new_file = f"{file}_2.xlsx"
    shutil.copy(file, new_file)
    flag = False
    while not flag:
        jgbsessid = input("请输入：cookies:")
        # jgbsessid = "6f4d391a0073b70741017930abdcc7f6"
        cookies = {
            "jgbsessid": f"{jgbsessid}",
        }
        flag = verify_cookies(cookies)
    print("cookies 验证通过，开始采集数据")

    print(file)
    print(new_file)
    print(jgbsessid)

    p1 = multiprocessing.Process(target=crawl_jc, args=(file, que))
    p2 = multiprocessing.Process(target=crawl_tonghuashun, args=(new_file, que, jgbsessid))
    p2.start()
    p1.start()
    process = [p2, p1]

    for p in process:
        p.join()

    print("采集程序运行完成，即将开启整合结果")

    # 获取结果
    results = []
    print(f"队列中元素数量: {que.qsize()}")

    while not que.empty():
        results.append(que.get())

    # results = ['result_tmp_巨潮网_2025-04-02-13-09-00.jsonl', 'result_tmp_同花顺_2025-04-02-13-09-00.jsonl']

    print("results:", results)
    merge_result(results[0], results[1])

