# -*- coding: utf-8 -*-
import time
from loguru import logger
import threading
import os
import json
from queue import Queue
from fake_useragent import UserAgent
###
from src.crawler.one.one_crawler import zh_hans_transport
from src.crawler.maersk.maersk_crawler import maersk_spider_run
from src.crawler.cosco.cosco_crawler import cosco_transport
from src.crawler.emc.EMC_crwaler import emc_transport
from src.crawler.hapag.hapag_crawler import hapag_spider_run
from src.crawler.maston.matson_crawler import matson_transport
from src.crawler.oocl.oocl_crawler import oocl_transport
from src.crawler.yml.yml_crawler import yangming_transport
from src.crawler.zim.zim_crawler import zim_transport
from src.crawler.msc.msc_crawler import msc_transport
##以下爬虫需要开启vpn运行
from src.crawler.hmm.hmm_crawler import hmm21_transport
from src.crawler.cma.cma_crawler import cma_transport  ##需要国外ip代理池

ip_queue = Queue()
current_working_directory = os.getcwd()
ua = UserAgent()
random_user_agent = ua.random
stop_threading_event = threading.Event()

total_requests = 0
loss_requests = 0

# current_directory = os.getcwd()
current_directory = r'D:\tools\XshellPlus-7.0.0033r-Modified-Green\新建文件夹\src\crawler\log_json'

def load_shipping_counter_files():
    """读取当前目录下包含 'shipping_counter' 的 JSON 文件"""
    shipping_counter_files = []

    # 遍历当前目录下的所有文件
    for file_name in os.listdir(current_directory):
        if "shipping_counter" in file_name and file_name.endswith('.json'):
            shipping_counter_files.append(file_name)

    # 加载所有找到的 JSON 文件
    loaded_data = {}

    for file_name in shipping_counter_files:
        with open(os.path.join(current_directory, file_name), 'r', encoding='utf-8') as file:
            try:
                data = json.load(file)
                loaded_data[file_name] = data
            except json.JSONDecodeError as e:
                print(f"无法读取文件 {file_name}: {e}")

    return loaded_data


def replenish_spider(site, values):
    if site == 'ONE_shipping_counter.json':
        for info in values:
            zh_hans_transport(info['origin_city'], info['des_city'])
    if site == 'CMA_shipping_counter.json':
        for info in values:
            cma_transport(info['origin_city'], info['des_city'])
    if site == 'HMM_shipping_counter.json':
        for info in values:
            hmm21_transport(info['origin_city'], info['des_city'])
    if site == 'OOCL_shipping_counter.json':
        for info in values:
            oocl_transport(info['origin_city'], info['des_city'])
    if site == 'YML_shipping_counter.json':
        for info in values:
            yangming_transport(info['origin_city'], info['des_city'])
    if site == 'ZIM_shipping_counter.json':
        for info in values:
            zim_transport(info['origin_city'], info['des_city'])


def spider_run():
    t_list = []  # 处理国内ip代理池
    t2_list = []  # 处理vpn及国外ip代理池

    loaded_data = load_shipping_counter_files()
    for keys, values in loaded_data.items():
        if keys != 'CMA_shipping_counter.json' or keys != 'HMM_shipping_counter.json':
            t_1 = threading.Thread(target=replenish_spider, args=(keys, values))
            t_list.append(t_1)

    for keys, values in loaded_data.items():
        if keys == 'CMA_shipping_counter.json' or keys == 'HMM_shipping_counter.json':
            t_1 = threading.Thread(target=replenish_spider, args=(keys, values))
            t2_list.append(t_1)

    for t in t_list:

        t.start()

    # 如果开启了vpn或部署非中国内陆服务器可取消注释
    # for t in t2_list:
    #     t.start()

    time.sleep(20)

    for t in t_list:
        t.join()

    logger.info(f"主线程1完成")

    for t in t2_list:
        t.join()
    logger.info(f"VPN主线程2完成")


if __name__ == '__main__':
    spider_run()
