# -*- coding:utf-8 -*-

import asyncio
import datetime
import os
import random
import threading
import time
import tkinter as tk
import traceback
import urllib.parse
from tkinter import messagebox

import aiohttp
import requests
from lxml import etree
from openpyxl import load_workbook
from selenium.webdriver import Chrome, ActionChains
from selenium.webdriver.chrome.options import Options
# from selenium.webdriver.chrome.webdriver import WebDriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.chrome.webdriver import WebDriver
from selenium.webdriver.common.by import By
from selenium.webdriver.remote.webelement import WebElement
from selenium.webdriver.support import expected_conditions as ec
from selenium.webdriver.support.wait import WebDriverWait
from seleniumwire import webdriver

from conf import TEXT, url_tb_home, STEP, url_search_shop, \
    COPYRIGHT, SHEET_NAME, log, R_ROOT_PATH, \
    JD_RESULT_PATH, TB_RESULT_PATH, ON_MESSAGE, ERROR_PATH, APP_NAME, MINE_IP, update_jd_cookies, url_jd_detail, \
    jd_headers, jd_cookies, RESOURCE_PATH
from excel_helper import ExcelHelper
from exceptions import RunException
from model import Result, ResultEnum
from util import convert_path, match_str_similarity, max_index, remove_suff, filter_shop_name, \
    get_longest_name, get_search_keys, deleteInvalidWord
from xpath_address import JdXpath, TbXpath


def send_dingding(message):
    if ON_MESSAGE:
        headers = {
            'Content-Type': 'application/json',
        }
        params = {
            'access_token': '5e3d8f6bcae87e59210e2dc3b8f7aaa8cddd5593f43092b5f3483e1e9a55882a',
        }
        json_data = {
            'msgtype': 'text',
            'text': {
                'content': f'服务器ip ：1.1.1.1 @18208186123 {MINE_IP}====> {message}',
            },
            'at': {
                'atMobiles': [
                    '18208186123'
                ],
                'isAtAll': False
            }
        }
        resp = requests.post('https://oapi.dingtalk.com/robot/send', params=params, headers=headers, json=json_data)


def init_default_driver(is_wire=False, proxies=None):
    try:
        option = Options()
        # 无头模式
        # option.add_argument('--headless')
        # 配置ua
        option.add_argument(
            'user-agent="Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36')
        # 设置中文
        option.add_argument('lang=zh_CN.UTF-8')
        # 禁用弹出拦截
        option.add_argument('–disable-popup-blocking')
        # 启用允许自动化控制，而不提示 目的隐藏"Chrome正在受到自动软件的控制"
        option.add_experimental_option('excludeSwitches', ['enable-automation'])
        # option.add_experimental_option('excludeSwitches', ['enable-logging'])  与上面冲突
        # 隐藏"Chrome正在受到自动软件的控制" 无效就使用下个配置
        option.add_argument('disable-infobars')
        # 开启无痕模式
        option.add_argument('–incognito')
        # 禁用gpu
        option.add_argument("--disable-gpu")
        # 禁用扩展插件
        option.add_argument("--disable-extensions")
        # 禁用缓存
        option.add_argument("disable-cache")
        # 不加载图片, 可提升速度
        # option.add_argument('blink-settings=imagesEnabled=false')
        option.add_argument("disable-blink-features=AutomationControlled")
        # 忽略证书错误
        option.add_argument('-ignore-certificate-errors')
        # 去掉开发者警告
        option.add_experimental_option('useAutomationExtension', False)

        if proxies:
            # option.add_argument("--proxy-server=http://ip:port")  # 设置代理
            option.add_argument(f"--proxy-server={proxies}")

        serve = Service(executable_path=os.path.join(RESOURCE_PATH, 'chromedriver_v109.exe'))
        sw_options = {
            'verify_ssl': False
        }
        if is_wire:
            driver = webdriver.Chrome(service=serve, options=option, seleniumwire_options=sw_options)
        else:
            driver = Chrome(service=serve, options=option)
        driver.implicitly_wait(5)
        driver.maximize_window()

        # 避免反爬- 设置 window.navigator.webdriver 为 undefined
        driver.execute_cdp_cmd('Page.addScriptToEvaluateOnNewDocument',
                               {'source': 'Object.defineProperty(navigator, "webdriver", {get: () => undefined})'})

        with open(os.path.join(RESOURCE_PATH, 'stealth.min.js'), 'r', encoding='utf-8') as f:
            js = f.read()

        # 避免反爬-加载js 该js有抹除浏览器指纹功能
        driver.execute_cdp_cmd("Page.addScriptToEvaluateOnNewDocument", {
            "source": js
        })
        return driver
    except Exception as e:
        log.error(traceback.format_exc())
        return str(e)


def page_dom_complete_load():
    """
    通过js判断页面是否加载完毕
    """

    def _predicate(driver):
        try:
            return driver.execute_script("return document.readyState") == "complete"
        except:
            return False

    return _predicate


def get_column_index(file_path, sheet_name):
    """
    获取表头-列号 返回字典格式
    :param file_path:
    :param sheet_name:
    :return:
    """
    wb = load_workbook(file_path)
    ws = wb[sheet_name]
    column = ws.max_column
    args = list(Result.__dict__['__match_args__'])
    resp = {}
    for i in range(1, column + 1):
        cell_value = ws.cell(row=1, column=i).value
        for arg in args:
            if arg == cell_value:
                resp[arg] = i
    return resp


def _parse_html(text, sku_id):
    try:
        html = etree.HTML(text)
        title = html.xpath("//title")

        # 判断是否是详情页面
        if len(title) > 0 and '京东(JD.COM)-正品低价、品质保障、配送及时、轻松购物' in title[0].text.strip():
            return None, None, None, None
        else:
            # 成功找到商品
            sku_name = html.xpath(JdXpath.x_sku_name)
            img = html.xpath(JdXpath.x_img)
            shop_name = html.xpath(JdXpath.x_shop_name)
            sold_off = html.xpath(JdXpath.x_xiajia)
            # 规格名称作简要名称
            short_name = html.xpath(JdXpath.x_short_name % sku_id)
            jd_img = None
            jd_shop_name = None
            jd_sku_name = None
            jd_short_name = None

            if len(shop_name) > 0:
                jd_shop_name = shop_name[0].text.strip()
            else:
                shop_name = html.xpath(JdXpath.x_shop_name_b)
                if len(shop_name) > 0:
                    jd_shop_name = shop_name[0].text.strip()

            if len(sku_name) > 0:
                jd_sku_name = sku_name[0].text.strip()
            else:
                # 可能没拿到商品名称 备用方案1
                sku_name = html.xpath(JdXpath.x_sku_name_b)
                if len(sku_name) > 0:
                    jd_sku_name = sku_name[0].text.strip()

            # 备用方案2
            if jd_sku_name == '' or not jd_sku_name:
                sku_name = html.xpath(JdXpath.x_sku_name_c)
                for n in sku_name:
                    jd_sku_name += n.replace('\n', '').strip()

            # 备用方案3
            if jd_sku_name == '' or not jd_sku_name:
                jd_sku_name = title[0].text.strip()

            if len(img) > 0:
                jd_img = 'https:' + img[0].strip().removesuffix('.avif')

            if len(short_name) > 0:
                jd_short_name = short_name[0].text.strip()

            if not jd_short_name:
                # 备用方案 1
                short_name = html.xpath(JdXpath.x_short_name_1)
                if len(short_name) > 0:
                    jd_short_name = short_name[0].text.strip()

            if not jd_short_name:
                # 备用方案2
                short_name = html.xpath(JdXpath.x_short_name_2)
                if len(short_name) > 0:
                    jd_short_name = short_name[0].text.strip()

                # 备用方案3
                if jd_sku_name:
                    jd_short_name = get_longest_name(jd_sku_name)

            status = True
            if len(sold_off) > 0:
                for i in sold_off:
                    if '商品已下' in i.text.strip():
                        status = False
                        break
            return jd_shop_name, jd_sku_name, jd_img, status, jd_short_name
    except Exception as e:
        log.error(traceback.format_exc())
        raise e


async def _get_page_text(sku_id):
    """
    异步协程请求
    """
    # time.sleep(random.randint(5, 8))
    await asyncio.sleep(random.randint(1, 4))
    async with aiohttp.ClientSession(timeout=aiohttp.ClientTimeout(total=20),
                                     connector=aiohttp.TCPConnector(ssl=False)) as session:
        async with session.get(url=url_jd_detail % sku_id, headers=jd_headers, cookies=jd_cookies) as resp:
            return await resp.text()
            # return await resp.text(encoding='utf-8')
    # resp = requests.get(url=url_jd_detail % sku_id, headers=jd_headers)
    # resp.encoding = 'utf-8'
    # return resp.text


ww, wh = 0, 0
# 锁-继续
continue_flag = True
lock = threading.Lock()

# 锁-IO 错误
error_flag = False
# excel 标题头 目前固定
head_column_index = {'sort': 1, 'jd_sku_id': 2, 'excel_shop_name': 3, 'jd_shop_name': 4, 'tb_shop_name': 5,
                     'shop_similar': 6, 'jd_short_name': 7, 'jd_sku_name': 8, 'tb_sku_name': 9, 'jd_search_result': 10,
                     'tb_search_result': 11, 'product_similar': 12, 'tb_product_link': 13, 'jd_product_link': 14,
                     'tb_shop_link_1': 15, 'tb_shop_link_2': 16, 'jd_img': 17}
# 防数据丢失
lost_product_jd = []
lost_product_tb = []


class Application(tk.Frame):
    def __init__(self, master=None, *args, **kwargs):
        super().__init__(master, *args, **kwargs)
        self._proxy_path = None
        self.l_status = None
        self.btn_star = None
        self.btn_cont_b = None
        self.btn_canc_s = None
        self.btn_canc_b = None
        self.btn_cont_v = None
        self._source_path = None
        self._now_text = tk.StringVar()
        self._now_text.set("正常状态")
        self.thread = None
        # self._product_name = None
        # self._shop_name = None
        self._text = None
        self.driver: WebDriver | None = None
        self.wait: WebDriverWait | None = None
        self.master = master
        self.pack()
        self.create_widget()

    def create_widget(self):
        """
        客户端布局画布
        """
        global ww, wh
        wwb = int(ww * 0.01 * 0.8)
        wwbb = int(ww * 0.02)
        f_1 = tk.Frame(self.master, height=40, width=int(ww * 0.95))
        f_1.pack(side=tk.TOP)

        self._source_path = tk.StringVar()
        tk.Label(f_1, text='   请输入Excel源文件绝对路径:   ').grid(row=0, column=0, pady=2)
        tk.Entry(f_1, width=int(ww * 0.08), textvariable=self._source_path, fg='black').grid(row=0, column=1, padx=2,
                                                                                             pady=2)
        self.l_status = tk.Label(f_1, textvariable=self._now_text, borderwidth=0, highlightthickness=0, width=wwbb)
        self.l_status.grid(row=0, column=2)

        # 添加代理
        f_p = tk.Frame(self.master, width=int(ww * 0.9), height=40)
        f_p.pack(side=tk.TOP)
        self._proxy_path = tk.StringVar()
        tk.Label(f_p, text='   请输入代理地址及端口   ').grid(row=0, column=0, pady=2)
        tk.Entry(f_p, width=int(ww * 0.08), textvariable=self._proxy_path, fg='black').grid(row=0, column=1, padx=2,
                                                                                            pady=2)
        tk.Button(f_p, text=" 测试代理 ", width=wwbb).grid(row=0, column=3)
        tk.Button(f_p, relief=tk.FLAT, borderwidth=0, width=wwb).grid(row=0, column=2)

        f_2 = tk.Frame(self.master, width=int(ww * 0.9), height=40)
        f_2.pack(side=tk.TOP)

        self.btn_star = tk.Button(f_2,
                                  width=wwbb,
                                  text='  开始(京东)采集  ',
                                  command=lambda: self.async_func(self._run_jd_spider))
        self.btn_star.grid(row=1, column=0, padx=2, pady=2)
        for i in range(1, 5):
            tk.Button(f_2, relief=tk.FLAT, borderwidth=0, width=wwb).grid(row=1, column=i * 2 - 1)

        self.btn_cont_b = tk.Button(f_2, width=wwbb, text='  开始(淘宝)采集  ',
                                    command=lambda: self.async_func(self._start_tb_spider))
        self.btn_cont_b.grid(row=1, column=2, padx=2, pady=2)

        self.btn_cont_v = tk.Button(f_2, width=wwbb, text='  继续/恢复采集  ',
                                    command=lambda: asyncio.run(self._update_continue()))
        self.btn_cont_v.grid(row=1, column=4, padx=2, pady=2)

        self.btn_canc_b = tk.Button(f_2, width=wwbb, text='  关闭浏览器（停止）  ',
                                    command=lambda: self.async_func(self.quit_browser))
        self.btn_canc_b.grid(row=1, column=6, padx=2, pady=2)

        self.btn_canc_s = tk.Button(f_2, width=wwbb, text='  关闭软件  ',
                                    command=lambda: self.async_func(self.shut_down))
        self.btn_canc_s.grid(row=1, column=8, padx=2, pady=2)

        l_bottom = tk.Label(self.master, text=COPYRIGHT, justify=tk.LEFT)
        l_bottom.pack(side=tk.BOTTOM)

        f_3 = tk.Frame(self.master, height=int(wh * 0.1), width=int(ww * 0.9))
        f_3.pack(side=tk.TOP, fill=tk.BOTH)
        self._text = tk.Text(f_3)
        self._text.pack(side=tk.TOP, fill=tk.BOTH, pady=2, expand=True)
        self._text.mark_set('end', "3.0")
        self._text.mark_gravity('end', 'right')
        self._text.insert('1.0', TEXT)

    def _run_jd_spider(self):
        asyncio.run(self._start_jd_spider())

    async def _update_continue(self):
        asyncio.create_task(self.keep_update())  # 异步更新GUI，避免卡死
        if self.driver and not isinstance(self.driver, str):
            ele = self._flu_ele(locator=TbXpath.x_validate)
            if ele and '通过验证以确保正常访问' == ele.text.strip():
                global continue_flag
                lock.acquire()
                continue_flag = False
                self._now_text.set("待校验")
                self.l_status.config(background='red')
                lock.release()
                self._log_and_sending("====> 仍在校验界面，请通过后重试")
            else:
                self.l_status.config(background='grey')
                self._log_and_sending("====> 异常解除 将自动恢复采集 请勿人工操作")
        else:
            Application.toast_info(message="未开启浏览器")
        update_flag(_now_text=self._now_text)

    def _check_in_process(self):
        # 校验是否VPN校验页面
        try:
            ele = self.driver.find_element(By.XPATH, TbXpath.x_vpn)
            if '请检查是否使用了代理软件' in ele.text:
                update_flag(to_false=True, _now_text=self._now_text)
                self.l_status.config(background='red')
                self._log_and_sending("====> 检测到VPN检测页面 请注意!!")
                # self._open_tb_url(url, is_login, is_shop)
        except:
            pass

        # 校验是否反爬-休息校验页面
        # ele = self._flu_ele(TbXpath.x_validate)
        # if ele and '通过验证以确保正常访问' == ele.text.strip():
        if '验证码' in self.driver.title:
            update_flag(to_false=True, _now_text=self._now_text)
            self.l_status.config(background='red')
            # 通知操作人员
            self._log_and_sending("====> 检测到反采集页面，请注意!!")

        start = time.time()
        global continue_flag
        while True:
            time.sleep(5)
            lock.acquire()
            if continue_flag:
                lock.release()
                break
            lock.release()
            now = time.time()
            self._log_and_sending(f"====> 出现异常 等待修复(通过校验后点击[继续/恢复采集]) 已等待[{now - start:.2f}]秒")

    def _open_url(self, url, is_login=False, is_shop=False):
        """
        打开页面 & 模拟人为滚动页面 & 校验 反爬或VPN检测
        """
        self.driver.get(url)
        # 等待加载完成
        finish_loading = self.wait.until(page_dom_complete_load())
        if finish_loading:
            self._log_info(f"====> 打开网址[{url}] 页面加载完毕")

        # 非登录页面-随机滚动翻页
        if not is_login:

            if is_shop:
                self._random_scroll_page()
            else:
                self._random_scroll_page(1, 3)
            self._scroll_to_top()

        # 校验页面-反爬或其他
        self._check_in_process()

    async def _start_jd_spider(self):
        """
        开始爬取
        """
        asyncio.create_task(self.keep_update())  # 异步更新GUI，避免卡死

        # 先解析excel数据
        if not self._source_path.get():
            Application.toast_info(message="请填写excel文件路径后开启采集")
        else:
            self._source_path.set(convert_path(self._source_path.get()).removeprefix('\u202a'))
            file_path = self._source_path.get()

            if not os.path.exists(file_path):
                Application.toast_info(message="excel文件不存在,请检查再试")
            else:

                if not os.path.exists(R_ROOT_PATH):
                    os.mkdir(R_ROOT_PATH)

                # 更新jd_cookies
                update_jd_cookies(self._source_path.get())

                # 解析excel对象
                product_list = []
                try:
                    # 解析后过滤已采集行-只过滤已采集淘宝的（过滤京东已采集的 由_do_jd_crawler）
                    product_list = list(filter(lambda prod: not prod.tb_search_result,
                                               ExcelHelper.convert_2_cls(file_path=file_path, clazz=Result,
                                                                         sheet_name=SHEET_NAME)))
                    self._log_info(f'====> 成功读取文件 [{file_path}] 开始采集jd商品信息...  ')
                except RunException as e:
                    self._log_error(str(e))
                # 京东采集
                start_time = time.time()
                length = len(product_list)
                if length > STEP:
                    # 总数超过STEP 切片 小批量分批采集
                    temp = length / STEP
                    if temp > float(int(temp)):
                        count = int(temp) + 1
                    else:
                        count = int(length / STEP)
                    for i in range(0, count):
                        part_list = None

                        if i < count - 1:
                            part_list = product_list[i * STEP: (i + 1) * STEP]
                        elif i + 1 == count:
                            part_list = product_list[i * STEP: length]

                        await self._do_jd_spider(part_list)
                else:
                    # 总数未超过step 不切片
                    await self._do_jd_spider(product_list)
                end_time = time.time()
                self._log_info(
                    f'====> jd： 京东采集完毕，序号[{product_list[0].sort}]至[{product_list[-1].sort}] 共计耗时{end_time - start_time:.2f}秒')

    async def _do_jd_spider(self, product_list):
        start_time = time.time()

        tasks = []
        # product_list = list(filter(lambda prod: not prod.jd_search_result, product_list))
        for product in product_list:
            if product.jd_search_result:
                self._log_info(f'====> jd： 序号:{product.sort} 已采集 本次跳过 ')
                continue
            # self._get_jd_info(product)
            tasks.append(asyncio.create_task(self._get_jd_info(product)))

        resp = await asyncio.gather(*tasks)

        await save_update_excel(self._source_path.get(), list(resp), SHEET_NAME)
        await save_update_excel(JD_RESULT_PATH, list(resp), SHEET_NAME)

        # asyncio.run(save_update_excel(self._source_path.get(), product_list, SHEET_NAME))
        # asyncio.run(save_update_excel(JD_RESULT_PATH, product_list, SHEET_NAME))
        end_time = time.time()
        self._log_info(
            f"====> jd: 本轮采集成功-序号[{product_list[0].sort}]至[{product_list[-1].sort}] 共计耗时{end_time - start_time:.2f}秒")

    async def _get_jd_info(self, product):
        self._log_info(
            f'====> jd：首次采集 序号:{product.sort} 商铺[{product.excel_shop_name}] 商品[{product.jd_sku_id}]')
        try:
            text = await _get_page_text(product.jd_sku_id)
            jd_info = _parse_html(text, product.jd_sku_id)
            if jd_info:
                if jd_info[1]:
                    product.jd_shop_name = jd_info[0]
                    product.jd_sku_name = jd_info[1]
                    product.jd_img = jd_info[2]
                    product.jd_short_name = jd_info[4]
                else:
                    product.tb_search_result = ResultEnum.NONE_JP.desc

                if not jd_info[3]:
                    product.tb_search_result = ResultEnum.XIA_JIA.desc
                    product.jd_search_result = True
                else:
                    product.jd_search_result = True

                self._log_info(f'====> jd：采集成功: 序号:{product.sort} 商品名[{product.jd_sku_name}]')

            return product
        except Exception as e:
            log.error(traceback.format_exc())
            self._log_error(e)

    def _start_tb_spider(self):
        """
        采集淘宝  - 需求为：根据京东采集的数据 先找到淘宝店铺 再在店铺下找到对应同款商品 并采集需要的数据
        """
        if self.driver and not isinstance(self.driver, str):
            Application.toast_info(message="浏览器已开启，请勿重复开启")
        else:
            if not os.path.exists(JD_RESULT_PATH):
                Application.toast_info(message=f"未发现京东采集结果文件，如需直接开启，请将excel放入路径{JD_RESULT_PATH}")
                raise RunException('未发现京东采集结果文件')
            self.driver = init_default_driver()
            if isinstance(self.driver, str):
                self._log_error(f"====> 初始化Chrome浏览器失败! 错误原因[{self.driver}] 请检查配置 ")
            else:
                self.wait = WebDriverWait(self.driver, 3, 0.5)
                self._open_url(url_tb_home, is_login=True)

            try:
                # 点击二维码等待登录
                self.driver.find_element(By.XPATH, TbXpath.x_login_code).click()
                update_flag(to_false=True, _now_text=self._now_text)
                self._now_text.set("待登录")
                self.l_status.config(background='red')
            except:
                pass

            start = time.time()
            while True:
                self._log_info(
                    f'====> tb： 进入登录页面， 等待人工登录中 已等待{time.time() - start:.2f}秒。。。登录后 请点击【继续/恢复采集】')
                time.sleep(5)
                global continue_flag
                lock.acquire()
                if continue_flag:
                    lock.release()
                    break
                lock.release()

            self._continue_tb_spider()

    def _continue_tb_spider(self):
        """
        登录后 继续(淘宝)采集
        """
        # 校验登录状态
        user = self._flu_ele(TbXpath.x_user)
        if user and '亲，请登录' != user.text.strip():
            # cookie 根据需要是否留存
            # self._cookies = self.driver.get_cookies()

            # 解析jd采集后的excel对象 - 过滤淘宝已采集行
            product_list = list(filter(lambda prod: not prod.tb_search_result,
                                       ExcelHelper.convert_2_cls(file_path=JD_RESULT_PATH, clazz=Result,
                                                                 sheet_name=SHEET_NAME)))
            self._log_info(
                f'====> tb：继续采集 扫码登录成功! 账号:[{user.text}] 开始采集数据 注意请勿编辑文件 [{JD_RESULT_PATH}]及[{TB_RESULT_PATH}] 否则将导致失败')

            # 执行采集
            start_time = time.time()
            length = len(product_list)
            if length > STEP:
                # 总数超过STEP 切片 小批量分批采集
                temp = length / STEP
                if temp > float(int(temp)):
                    count = int(temp) + 1
                else:
                    count = int(length / STEP)
                for i in range(0, count + 1):
                    part_list = None
                    if i < count - 1:
                        part_list = product_list[i * STEP: (i + 1) * STEP]
                    elif i + 1 == count:
                        part_list = product_list[i * STEP: length]

                    self._do_tb_spider(part_list)
            else:
                # 总数未超过step 不切片
                self._do_tb_spider(product_list)
            end_time = time.time()
            self._log_info(
                f'====> tb： 淘宝采集完毕，序号[{product_list[0].sort}]至[{product_list[-1].sort}] 共计耗时{end_time - start_time:.2f}秒')
        else:
            self._log_info(f'====> tb： 登录失败! 请登录重试 ')

    def _do_tb_spider(self, product_list):
        """
        执行淘宝爬取
        """
        start_time = time.time()

        for product in product_list:
            # 只采集 未采集或失败的
            if product.tb_search_result:
                continue
            # 匹配店铺
            self._match_tb_shop(product)

            if product.tb_search_result:
                continue
            # 匹配商品
            self._search_tb_product(product)

        end_time = time.time()
        self._log_info(
            f"====> tb: 本轮采集成功-序号[{product_list[0].sort}]至[{product_list[-1].sort}] 共计耗时{end_time - start_time:.2f}秒")

        try:
            # 异步写入结果
            if self._source_path.get():
                asyncio.run(save_update_excel(self._source_path.get(), product_list, SHEET_NAME))

            asyncio.run(save_update_excel(JD_RESULT_PATH, product_list, SHEET_NAME))
            asyncio.run(save_update_excel(TB_RESULT_PATH, product_list, SHEET_NAME))
        except:
            # 写入excel失败时 写入error.xlsx备份 防止已爬数据丢失 可手动补偿修复
            path = ERROR_PATH
            if os.path.exists(ERROR_PATH):
                path = os.path.join(R_ROOT_PATH, 'error_' + datetime.datetime.now().strftime(
                    '%Y_%m_%d_%H_%M_%S') + '.xlsx')
            asyncio.run(save_update_excel(path, product_list, SHEET_NAME))

    def _match_tb_shop(self, product: Result):
        """
        对比查询目标店铺
        """
        self._log_info(f'====> tb: 开始查询店铺 序号[{product.sort}]店铺[{product.excel_shop_name}]:')
        shop = None
        # 优先 完全匹配店铺
        if product.excel_shop_name == product.jd_shop_name:
            if '京东' in product.jd_shop_name:
                # 过滤 '京东'后的词
                shop = self._perfect_match_shop(remove_suff(product.jd_shop_name, '京东'))
                # 只过滤京东
                if not shop:
                    shop = self._perfect_match_shop(product.jd_shop_name.replace('京东', ''))
            else:
                shop = self._perfect_match_shop(product.jd_shop_name)

            # 如果未匹配，则备用方案
            if not shop:
                # 如果店铺名称含官方、旗舰店、专卖店、专营店 替换搜索
                if '官方旗舰店' in product.jd_shop_name:
                    shop = self._perfect_match_shop(product.jd_shop_name.replace('官方', ''))
                elif '旗舰店' in product.jd_shop_name:
                    shop = self._perfect_match_shop(product.jd_shop_name.replace('旗舰', ''))
                elif '专卖店' in product.jd_shop_name:
                    shop = self._perfect_match_shop(product.jd_shop_name.replace('专卖', ''))
                elif '专营店' in product.jd_shop_name:
                    shop = self._perfect_match_shop(product.jd_shop_name.replace('专营', ''))
        else:
            for shop_name in (product.excel_shop_name, product.jd_shop_name):
                if '京东' in shop_name:
                    shop = self._perfect_match_shop(remove_suff(product.jd_shop_name, '京东'))
                    if not shop:
                        shop = self._perfect_match_shop(product.jd_shop_name.replace('京东', ''))
                    else:
                        break

        # 如果都没查到完美匹配的 则只取最相似的
        if not shop:
            if '京东' in product.excel_shop_name:
                # 如果店铺名称含京东 替换搜索
                shop = self._similar_match_shop(remove_suff(product.excel_shop_name, '京东'))
            else:
                # 过滤部分后缀词 ['官方旗舰店', '旗舰店', '专卖店', '专营店', '京东自营旗舰店', '京东自营店', '自营店']
                shop = self._similar_match_shop(filter_shop_name(product.excel_shop_name))

        # 获取采集结果
        if not shop:
            # 未找到店铺
            product.tb_search_result = ResultEnum.NONE_S.desc
            product.tb_shop_link_1 = None
            self._log_info(f'====> tb: 查询结果: [{ResultEnum.NONE_S.desc}]')
        else:
            product.tb_shop_link_1 = shop[0]
            product.tb_shop_name = shop[1]
            product.shop_similar = str(shop[2] * 100) + '%'
            self._log_info(
                f'====> tb: 查询结果: 序号[{product.sort}] 淘宝店铺名[{product.tb_shop_name}] 相似度[{product.shop_similar}]')

    def _perfect_match_shop(self, shop_name):
        """
        全匹配
        """
        self._log_info(f'====> tb: 开始[全匹配]查询 店铺名称：[{shop_name}]:')
        self._open_url(url_search_shop % urllib.parse.quote(shop_name))
        # 搜索店铺列表结果
        shops = self._flu_eles(TbXpath.x_tb_shops)
        if shops:
            # 只对比前*个 理论上太远的不太靠谱 不用对比
            length = len(shops)
            if length > 8:
                rg = 8
            else:
                rg = length
            for i in range(rg):
                name = shops[i].text.strip()
                ratio = match_str_similarity(shop_name, name)
                if ratio == 1.0:
                    return shops[i].get_attribute('href').strip(), name, 1.0
                elif ratio >= 0.80 and name.startswith(shop_name[:2]):
                    return shops[i].get_attribute('href').strip(), name, ratio
        else:
            return None

    def _similar_match_shop(self, shop_name):
        """
        相似匹配店铺
        """
        self._log_info(f'====> tb: 开始[相似匹配]查询: [{shop_name}]:')
        self._open_url(url_search_shop % urllib.parse.quote(shop_name))
        keyword = shop_name[:2]
        # 搜索店铺列表结果
        shops = self._flu_eles(TbXpath.x_tb_shops)
        if shops:
            length = len(shops)
            # if length > 5:
            #     rg = 5
            # else:
            #     rg = length
            ll = [0.0 for x in range(length)]
            for index in range(length):
                name = shops[index].text.strip()
                # 店铺名称以 keyword 开头 或 包含keyword 则纳入对比
                if name.startswith(keyword) or keyword in name:
                    ratio = match_str_similarity(shop_name, shops[index].text.strip())
                    ll[index] = ratio

            # 取最相似的
            t_in = max_index(ll)
            if ll[t_in] != 0.0:
                return shops[t_in].get_attribute('href').strip(), shops[t_in].text.strip(), ll[t_in]
            else:
                return None
        else:
            return None

    def _search_tb_product(self, product: Result):
        """
        搜索商品-在目标店铺中
        """
        self._open_url(product.tb_shop_link_1)
        self._log_info(f"====> tb: 开始查询商品 序号:[{product.sort}] 商品名:[{product.jd_short_name}][{product.jd_sku_name}]")

        # 为尽最大化搜索到目标商品 将商品长名称 拆分成*个字符长度 分别进行搜索 最终得到最相似的商品
        search_list = get_search_keys(product.jd_sku_name, range_int=6)
        products_resp = []
        for key in search_list:
            products_resp.append(self._do_product_search(key, product.jd_sku_name))

        # 解析结果 取最相似的产品
        _the_most_similar = [res for res in products_resp if res is not None]

        if not _the_most_similar:
            product.tb_search_result = ResultEnum.NONE_P.desc
        else:
            max_t = ()
            max_ratio = 0.0
            for item in _the_most_similar:
                if item[2] > max_ratio:
                    max_ratio = item[2]
                    max_t = item
            product.tb_search_result = max_t[0]
            product.tb_product_link = max_t[1]
            product.product_similar = str(max_t[2] * 100) + '%'
        self._log_info(f"====> tb: 序号:[{product.sort}] 查询结果[{product.tb_search_result}] 相似度[{product.product_similar}]")

    def _do_product_search(self, search_name, jd_sku_name):
        """
        商铺内搜索商品 获取最相似产品
        """
        self._log_info(f"====> tb: 搜索关键词:[{search_name}] 对比商品名称[{jd_sku_name}]")
        product_name = urllib.parse.quote(search_name)
        url = self.driver.current_url
        # 若天猫店铺
        if self._flu_ele(TbXpath.x_mall_logo) or '.tmall.com' in url:
            # 直接拼接链接搜索
            # search_url = remove_suff_af(url, '.tmall.com') + url_mall_search_product % product_name
            # self._open_tb_url(search_url)

            # 模拟人 点击按钮搜索
            self._flu_ele(TbXpath.x_mall_input).send_keys(search_name)
            self._flu_ele(TbXpath.x_mall_submit).click()
            self._random_scroll_page()
            self._check_in_process()
            self.wait.until(page_dom_complete_load())
            self._scroll_to_top()
            return self._get_most_similar(jd_sku_name, TbXpath.x_mall_no_product, TbXpath.x_mall_products, 'tm')

        # 若淘宝店铺
        elif self._flu_ele(TbXpath.x_tb_logo) or url.startswith("https://shop"):
            # 直接拼接链接搜索
            # if '?' in url:
            #     urls = url.split('?')
            #     search_url = urls[0] + url_tb_search_product % product_name + '&' + urls[1]
            # else:
            #     search_url = url + '/' + url_tb_search_product % product_name
            # self._open_tb_url(search_url)

            # 模拟人点击按钮搜索
            # 选中本店
            actions = ActionChains(self.driver)
            e1 = self._flu_ele(TbXpath.x_search_type)
            actions.move_to_element(e1).perform()
            e2 = self._flu_ele("//a[text()='本店']")
            actions.click(e2).perform()
            self._flu_ele(TbXpath.x_tb_input).send_keys(search_name)
            self._flu_ele(TbXpath.x_tb_submit).click()
            self._random_scroll_page()
            self._check_in_process()
            self.wait.until(page_dom_complete_load())
            self._scroll_to_top()
            return self._get_most_similar(jd_sku_name, TbXpath.x_tb_no_product, TbXpath.x_tb_products)

    def _get_most_similar(self, jd_sku_name, xpath_no_prod, xpath_products, _type='tb'):
        """
        在搜索列表结果页中 解析数据 获取最相似产品（信息）
        """
        no_prod = self._flu_ele(xpath_no_prod)

        # 搜索无结果
        if no_prod:
            tt = no_prod.text.strip()
            if '没找到符合条件' in tt or '“0”个宝贝' in tt:
                self._log_info(f"====> tb: 查询结果:[{ResultEnum.NONE_P.desc}]")
                return None

        # 匹配结果
        elements: list[WebElement] = self._flu_eles(xpath_products)
        if not elements:
            # 淘宝误导 备用方案1
            if 'tm' == _type:
                elements = self._flu_eles(TbXpath.x_mall_products_1)
            elif 'tb' == _type:
                elements = self._flu_eles(TbXpath.x_tb_products_1)

        if not elements:
            self._log_info(f"====> tb: 查询结果:[{ResultEnum.NONE_P.desc}]")
            return None
        else:
            length = len(elements)
            ll = [0.0 for x in range(length)]
            for i in range(length):
                ratio = match_str_similarity(deleteInvalidWord(jd_sku_name), elements[i].text.strip())
                # 采集相似度至少 30% 以上
                if ratio > 0.3:
                    ll[i] = ratio
                if ratio == 1.0:
                    self._log_info(f"====> tb: 查询结果:[{ResultEnum.PERFECT_MATCH.desc}] 相似率[{ratio}]")
                    return ResultEnum.PERFECT_MATCH.desc, elements[i].get_attribute("href"), ratio

            # 取最相似的
            idx = max_index(ll)
            if ll[idx] == 0.0:
                self._log_info(f"====> tb: 查询结果:[{ResultEnum.NONE_P.desc}]")
                return None
            else:
                self._log_info(f"====> tb: 查询结果:[{ResultEnum.PERFECT_MATCH.desc}] 相似率[{ll[idx]}]")
                return ResultEnum.SIMILAR_MATCH.desc, elements[idx].get_attribute("href"), ll[idx]

    def _random_scroll_page(self, mi=2, ma=5):
        for i in range(random.randint(mi, ma)):
            time.sleep(0.5)
            self.driver.execute_script("window.scrollTo(0,Math.round(Math.random()*800))")

    def _scroll_to_top(self):
        self.driver.execute_script("window.scrollTo(0,0)")

    def _get_tb_shop_search_link_2(self):
        try:
            dd = self.driver.execute_script("return window.g_config")
            return str(dd['headerVO']['buttons'][1]['events'][0]['fields']['url'])
        except:
            return None

    def _flu_ele(self, locator) -> WebElement | None:
        try:
            return self.wait.until(ec.visibility_of_element_located((By.XPATH, locator)))
        except:
            return None

    def _flu_eles(self, locator) -> list[WebElement] | None:
        try:
            return self.wait.until(ec.visibility_of_all_elements_located((By.XPATH, locator)))
        except:
            return None

    def _log_info(self, str_text):
        self._text.delete('1.0', tk.END)
        self._text.insert('1.0', str_text)
        log.info(str_text)

    def _log_and_sending(self, str_text: str):
        self._log_info(str_text)
        send_dingding(str_text.replace("====> ", ''))

    def _log_error(self, str_text):
        self._text.delete('1.0', tk.END)
        self._text.insert('1.0', str_text)
        log.error(str_text)

    def main_loop(self):
        self.master.mainloop()

    def shut_down(self):
        try:
            if self.driver:
                self.driver.quit()
        except:
            pass
        try:
            if self.master:
                self.master.quit()
        except:
            pass

    def async_func(self, func):
        """
        开启新线程执行 达到异步的目的
        """
        self.thread = threading.Thread(target=func)
        self.thread.daemon = True
        self.thread.start()

    def quit_browser(self):
        try:
            if self.driver:
                self.driver.quit()
                self.driver = None
                global continue_flag
                lock.acquire()
                continue_flag = True
                lock.release()
                self._now_text.set("退出采集")
            else:
                Application.toast_info(message="未开启浏览器")
        except:
            pass

    @staticmethod
    def toast_info(message):
        messagebox.showinfo(title='温馨提示', message=message)

    async def keep_update(self):
        i = 1
        while True:
            await asyncio.sleep(0)  # 让出CPU资源
            self.master.update()  # 强制刷新页面
            if i > 5:
                # 防止死循环
                break
            i += 1


async def save_update_excel(file_path, products_list: list[Result], sheet_name=None, _type='jd'):
    if not file_path.endswith(".xlsx"):
        raise RunException("文件必须为.xlsx结尾的Excel文件")

    if not os.path.exists(file_path):
        ExcelHelper.save(file_path, [products_list], sheet_name)
    else:
        work_book = load_workbook(file_path)
        ws = work_book[sheet_name]

        global head_column_index
        # 更新Excel products_list
        for product in products_list:
            for key in head_column_index.keys():
                ws.cell(row=int(product.sort) + 1, column=head_column_index[key]).value = product.__getattribute__(key)
        try:
            work_book.save(file_path)
        except Exception as e:
            log.error(traceback.format_exc())
            global error_flag, lost_product_jd, lost_product_tb
            error_flag = True
            if _type == 'jd':
                lost_product_jd.append(products_list)
            elif _type == 'tb':
                lost_product_tb.append(products_list)
            log.error(f'====> 采集结果数据写入Excel失败 \n失败原因:{str(e)}')


def save_update_excel_no_async(file_path, products_list: list[Result], sheet_name=None, _type='jd'):
    if not file_path.endswith(".xlsx"):
        raise RunException("文件必须为.xlsx结尾的Excel文件")

    if not os.path.exists(file_path):
        ExcelHelper.save(file_path, [products_list], sheet_name)
    else:
        work_book = load_workbook(file_path)
        ws = work_book[sheet_name]

        global head_column_index
        # 更新Excel products_list
        for product in products_list:
            for key in head_column_index.keys():
                ws.cell(row=int(product.sort) + 1, column=head_column_index[key]).value = product.__getattribute__(key)
        try:
            work_book.save(file_path)
        except Exception as e:
            log.error(traceback.format_exc())
            global error_flag, lost_product_jd, lost_product_tb
            error_flag = True
            if _type == 'jd':
                lost_product_jd.append(products_list)
            elif _type == 'tb':
                lost_product_tb.append(products_list)
            log.info(f'====> 采集结果数据写入Excel失败 \n失败原因:{str(e)}')


def update_flag(to_false=False, _now_text=None):
    global error_flag, continue_flag, lost_product_jd, lost_product_tb

    if to_false:
        # 意外情况 变更为false
        lock.acquire()
        continue_flag = False
        lock.release()
        _now_text.set("异常状态")
    else:
        # 继续
        lock.acquire()
        continue_flag = True
        lock.release()
        _now_text.set("正常状态")

        i = 0
        if error_flag:
            # 补偿机制 异常数据再次尝试写入excel
            try:
                if len(lost_product_jd) > 0:
                    asyncio.run(save_update_excel(JD_RESULT_PATH, lost_product_jd, SHEET_NAME))
                    lost_product_jd = []
                i += 1
            except:
                log.error("异步处理写入数据(备份数据)异常 ====> " + traceback.format_exc())
                pass
            try:
                if len(lost_product_tb) > 0:
                    asyncio.run(save_update_excel(TB_RESULT_PATH, lost_product_tb, SHEET_NAME, 'tb'))
                    lost_product_tb = []
                i += 1
            except:
                log.error("异步处理写入数据(备份数据)异常 ====> " + traceback.format_exc())
                pass
            if i == 2:
                error_flag = False


def create_app(app_name=APP_NAME):
    root = tk.Tk()
    root.title(app_name)
    global ww, wh
    width, height = root.winfo_screenwidth(), root.winfo_screenheight()
    ww, wh = int(width / 2), int(height / 2)
    root.geometry("%dx%d+%d+%d" % (ww, wh, (width - ww) / 2, (height - wh) / 2))
    # root.resizable(False, False)
    return root


def main():
    master = create_app()
    app = Application(master)
    app.main_loop()


if __name__ == '__main__':
    main()
