from pyppeteer.launcher import connect
from pyppeteer.page import Page
from pyppeteer.browser import Browser
from pyppeteer import launch
from email.mime.text import MIMEText
from email.header import Header
from threading import Thread
from dateutil import parser
from xpinyin import Pinyin
from dateutil.relativedelta import relativedelta
from openpyxl.styles import Alignment
from openpyxl.utils import get_column_letter
from openpyxl.cell.cell import MergedCell
from io import BytesIO
from email.utils import formataddr
from sql import sqlseverDB
from lxml import etree
from collections import defaultdict
from collections import Counter
from functools import wraps
from config import *
from county_config import *
import pandas as pd
import requests,subprocess,asyncio,os,re,json,openpyxl,time,datetime
import concurrent.futures,csv,copy,math,logging,traceback,difflib
import random,socket,psutil,urllib.parse,string,xlrd,smtplib

tms = sqlseverDB('{SQL Server}', SQL_SERVER, 'SuMaiTongPol', SQL_USER, SQL_PWD)

def contains_word(title, words):
    """
    检查标题中是否包含目标单词（忽略大小写）

    :param title: str, 待检查的标题
    :param words: set, 包含的目标单词集合
    :return: bool, 如果包含目标单词返回 True，否则返回 False
    """
    title_words = set(title.lower().split())
    for word in words:
        
        # 将目标单词集合转换为小写
        word_fg ={ccw.lower().strip() for ccw in word.strip().split() if ccw.strip()}

        if word_fg:
            if word_fg <= title_words:
                return [word]
            
    # 判断交集是否为空
    return False

def is_valid_ad(kw, qq_kws):
    for word in qq_kws:
        pattern = r'\b{}\b'.format(re.escape(word))
        if re.search(pattern, kw, flags=re.IGNORECASE):
            return False
    return True

def glproqqc(pdatas,qqkws):
    
    def update_pw(pdata):
        pean,pname=pdata
        jiaojikws=contains_word(pname,qqkws)
        if jiaojikws:
            kpara=list(jiaojikws)
            wstr=','.join(['?' for _ in kpara])
            aff1=tms.ExecNoQuery(f'update qqc set usecount=usecount+1 where qtype=? and qcontent in ({wstr})',['波兰语']+kpara)
            aff2=tms.ExecNoQuery('update ALLGoodPro set pw=0 where pean=?',(pean,))
            print(f'《ALLGoodPro》,pean:{pean},触发侵权词{jiaojikws}:{aff1},加入黑名单:{aff2}')
            return
        else:
            #print(f'《ALLGoodPro》,pean:{pean},白名单')
            return pdata
                
        
    pcan_use=[]

    task_count=len(pdatas)
    with concurrent.futures.ThreadPoolExecutor(max_workers=20) as t:

        tasks=[t.submit(update_pw,pdata) for pdata in pdatas]

        for t0 in concurrent.futures.as_completed(tasks):
            try:
                res=t0.result()
                if res:
                    pcan_use.append(res)
                
            except Exception as e:
                print(f'处理侵权词线程错误 => {e}')
                traceback.print_exc()
            task_count-=1
            #print(f'侵权词任务队列:{task_count}')
                
    
    return pcan_use

def getTimeStr():

    time_local = time.localtime(time.time())
    dt = time.strftime("%Y-%m-%d_%H-%M-%S", time_local)
    return dt

def get_yn_0timestamp():

    today = datetime.datetime.now(datetime.timezone(datetime.timedelta(hours=7))).date()
    today_1am = datetime.datetime(today.year, today.month, today.day, 0, 0, 0,
                                    tzinfo=datetime.timezone(datetime.timedelta(hours=7)))
    today_1am_timestamp = int(today_1am.timestamp())
    return today_1am_timestamp

def is_contain000(des,elist):
    isfff=False
    for eee in elist:
        if eee in des:
            isfff=True
    return isfff

def timestamp_to_sql_datetime(timestamp, to_utc=False):
    """
    将时间戳转换为 SQL Server 支持的 DATETIME 格式

    :param timestamp: int/float 时间戳（单位：秒）
    :param to_utc: bool 是否将时间戳转换为 UTC 时间，默认 False 表示本地时间
    :return: str 格式化后的 SQL DATETIME 字符串
    """
    if to_utc:
        # 推荐写法：使用时区感知的 UTC 时间
        dt = datetime.datetime.fromtimestamp(timestamp, tz=datetime.timezone.utc)
    else:
        # 本地时间（无时区信息）
        dt = datetime.datetime.fromtimestamp(timestamp)
    return dt.strftime('%Y-%m-%d %H:%M:%S')


def timestamp_to_datetime(timestamp, tz_offset_hours=8):
    """
    将时间戳转换为指定时区的日期时间字符串（默认东八区）

    :param timestamp: int/float 时间戳（单位：秒）
    :param tz_offset_hours: int 时区偏移（相对于 UTC，默认 +8）
    :return: str 格式化后的日期时间，如 '2025-10-28 15:30:00'
    """
    # 创建时区对象
    tz = datetime.timezone(datetime.timedelta(hours=tz_offset_hours))
    # 转换为该时区的 datetime
    dt = datetime.datetime.fromtimestamp(timestamp, tz)
    return dt.strftime('%Y-%m-%d %H:%M:%S')


def timestamp_to_date(timestamp, tz_offset_hours=8):
    """
    将时间戳转换为指定时区的日期字符串（默认东八区）

    :param timestamp: int/float 时间戳（单位：秒）
    :param tz_offset_hours: int 时区偏移（相对于 UTC，默认 +8）
    :return: str 日期字符串，如 '2025-10-28'
    """
    tz = datetime.timezone(datetime.timedelta(hours=tz_offset_hours))
    dt = datetime.datetime.fromtimestamp(timestamp, tz)
    return dt

def get_yesterday_date_tz(tz_offset_hours=7):
    tz = datetime.timezone(datetime.timedelta(hours=tz_offset_hours))          # 东七区时区
    now = datetime.datetime.now(tz)                     # 当前东七区时间
    yesterday = now - datetime.timedelta(days=1)        # 昨天
    return yesterday.strftime('%Y-%m-%d')      # 返回格式化字符串

def human_wait(min_seconds=1.0, max_seconds=5.0, do_sleep=False):
    """
    生成一个模拟人类操作的等待时间，返回一个 min~max 秒之间的浮点数。
    
    参数:
    - min_seconds: 最小等待时间（包含），默认为 1.0
    - max_seconds: 最大等待时间（包含），默认为 5.0
    - do_sleep: 是否实际调用 time.sleep()，默认 False
    
    返回:
    - 一个浮点数，表示等待秒数
    """
    wait_time = round(random.uniform(min_seconds, max_seconds), 2)

    # 模拟更自然的人类行为：加入一点波动（偏态分布）
    jitter = random.uniform(-0.2, 0.2)
    wait_time = max(min_seconds, min(max_seconds, round(wait_time + jitter, 2)))

    if do_sleep:
        time.sleep(wait_time)
    return wait_time

async def openChrome(dlip=None):
    flag=False
    try:
        session=requests.session()
        res=session.get('http://localhost:9222/json/version')
        if res.status_code==200 and res.json():
            flag=True
    except Exception as e:
        print('未启动浏览器等待启动')

    try:
        if not flag:
            exepath=find_chrome_path()
            print(exepath)
            if not exepath:
                return None
            dlstr='' if not dlip else f' --proxy-server=http://{dlip}'
            subprocess.Popen(f'"{exepath}" --remote-debugging-port=9222{dlstr}',shell=True, stdin=subprocess.PIPE,
            stdout=subprocess.PIPE, stderr=subprocess.PIPE)
        res=session.get('http://localhost:9222/json/version')
        bobj=res.json()
        browser=await connect({
            'browserWSEndpoint':bobj['webSocketDebuggerUrl'],
            'args':['--disable-infobars','--start-maximized'
                    ],    
            'defaultViewport':None
        })
        
        return browser

    except Exception as e:
        print(e)

async def openSunBrowser(basic_info,task_id) -> Browser: 
    rw_name=Task_Name_dict[task_id]
    bid=basic_info['BrowserID']
    bname=basic_info['DpName']
    gname=basic_info['GroupName']
    ping_tai=basic_info['PingTai']
    params={
        'k':ADSPower_API_KEY,
        'user_id':bid,
        'open_tabs':0,
        'ip_tab':0
    }
    while True:
        now_time=datetime.datetime.now()
        err_zds=['iscl','task_id','task_name','BID','loginTime','pintai','bname','gname','des']
        err_params=[0,task_id,rw_name,bid,now_time,ping_tai,bname,gname]

        try:    
            res = requests.get(f'{ADSPower_API_URL}/api/v1/browser/start',params=params,timeout=180)
            res_data = res.json()
            print(res_data)
            if res_data['code']==0:
                browser=None
                try:
                    ws_url = res_data['data']['ws']['puppeteer']
                    browser = await asyncio.wait_for(
                        connect({
                            'browserWSEndpoint': ws_url,
                            'args': [            
                                '--no-sandbox',
                                '--disable-setuid-sandbox',
                                '--disable-blink-features=AutomationControlled',
                                '--start-maximized'
                            ],
                            'defaultViewport': None
                        }),
                        timeout=180  # 设置超时时间为120秒
                    )
                    aff=0

                    if task_id not in range(7,12):
                        start_zds=['bid','task_id','task_name','bname','gname','is_on','state']
                        start_params=[bid,task_id,rw_name,bname,gname,1,0]
                        aff=tms.merge_into('taskStatus',start_zds,start_params,['bid','task_id'],True)
                    logging.info(f'店铺:《{bname}》,{bid},启动指纹浏览器成功,登记任务:{aff}')
                    rs= tms.ExecQuery('select count(*) from taskstatus where bid=? and task_id!=? and is_on=1',(bid,task_id))
                    if rs[0][0]==0:
                        ps=await browser.pages()
                        logging.info(f'店铺:《{bname}》,{bid},无其他任务执行,关闭无关页面{len(ps)}个')
                        for pp in ps[1:]:
                            await pp.close()
                    return browser
                except Exception as e:
                    traceback.print_exc()
                    logging.info(f'店铺:《{bname}》,{bid},接管浏览器错误 => {e}')
                    close_SunBrowser(bid,task_id)
                    return              

            else:
                err_msg=res_data.get('msg')
                if  err_msg=='Too many request per second, please check':
                    wait_sleep_second=random.randint(3,8)
                    logging.info(f'{bid},启动浏览器过多,等待 {wait_sleep_second} 秒,重试...')
                    time.sleep(wait_sleep_second)
                else:
                    err_params.append(err_msg)
                    aff1=tms.merge_into('ErrBid',err_zds,err_params,['bid'])
                    start_zds=['bid','task_id','task_name','bname','gname','is_on','state']
                    start_params=[bid,task_id,rw_name,bname,gname,0,0]
                    aff2=tms.merge_into('taskStatus',start_zds,start_params,['bid','task_id'],True)
                    logging.error(f"店铺:《{bname}》,{bid},启动指纹浏览器错误:{err_msg},错误状态:{aff1},任务状态:{aff2}")
                    close_SunBrowser(bid,task_id)
                    return
            time.sleep(1)
        except Exception as e:
            
            logging.error(f'店铺:《{bname}》,{bid},接口超时错误 => {e}')
            close_SunBrowser(bid,task_id)
            return

def kill_process_using_port(port):
    try:
        # 检查指定端口是否被占用
        with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
            if s.connect_ex(("127.0.0.1", port)) == 0:
                logging.info(f"Port {port} is in use.")
                
                # 遍历所有进程以找到占用端口的进程
                for proc in psutil.process_iter(attrs=["pid", "name"]):
                    try:
                        for conn in proc.net_connections(kind="inet"):
                            if conn.laddr.port == port:
                                logging.info(f"Killing process {proc.info['name']} (PID: {proc.info['pid']}) using port {port}")
                                proc.kill()  # 强制终止进程
                                logging.info(f"Process on port {port} terminated.")
                                return 1
                    except (psutil.AccessDenied, psutil.NoSuchProcess):
                        continue
            else:
                logging.info(f"Port {port} is not in use.")
                return -1

    except Exception as e:
        print(f"An error occurred: {e}")

def close_SunBrowser(bid,task_id):

    rw_name=Task_Name_dict[task_id]
    params={
        'profile_id':bid
    }
    for jjj in range(5):
        try:
            aff_close=tms.ExecNoQuery('update taskstatus set is_on=0 where bid=? and task_id=?',(bid,task_id))
            rs= tms.ExecQuery('select count(*) from taskstatus where bid=? and task_id!=? and is_on=1',(bid,task_id))
            if rs[0][0]==0:
                res = requests.post(f'{ADSPower_API_URL}/api/v2/browser-profile/stop',json=params,timeout=120)
                json_data=res.json()
                code=json_data['code']
                msg=json_data['msg']
                if code==0 and msg=='success':
                    logging.info(f'任务:《{rw_name}》,成功关闭指纹浏览器:{bid},更改状态:{aff_close}')
                    return
                else:
                    if code=='-1' and msg=='Too many request per second, please check':
                        wait_sleep_second=random.randint(3,8)
                        logging.error(f'任务:《{rw_name}》,bid:{bid}，关闭指纹浏览器接口频繁,等待 {wait_sleep_second} 秒重试...')
                        time.sleep(wait_sleep_second)
                        continue

                    logging.error(f'任务:《{rw_name}》,bid:{bid}，关闭指纹浏览器失败:{json_data}')
                    return
            else:
                logging.info(f'任务:《{rw_name}》,未关闭指纹浏览器:{bid},当前其他任务正在使用该浏览器,更改状态:{aff_close}')
                return
        except Exception as e:
            logging.error(f'关闭浏览器错误 => {e},正在重试...')
            
        time.sleep(10)

def close_bro_byapi(bid):

    for jjj in range(3):
        try:
            res = requests.get(f'{ADSPower_API_URL}/api/v1/browser/stop?user_id={bid}')
            json_data=res.json()
            code=json_data['code']
            msg=json_data['msg']
            if code==0 and msg=='success':
                return 1
            else:
                logging.info(json_data)
                return 0

        except Exception as e:
            logging.error(f'关闭浏览器错误 => {e},正在重试...')
        time.sleep(10)

def get_all_active_sunbrowser():

    api_url=f'{ADSPower_API_URL}/api/v2/browser-profile/list'
    data={
        'page':'1',
        'limit':'50'
    }
    for _ in range(3):
        try:
            res=requests.post(api_url,json=data,timeout=120)
            json_data=res.json()
            if json_data['code']==0:
                return json_data
        except Exception as e:
            print(e)


def get_SunBrowser_info(bid):
    
    url=f'{ADSPower_API_URL}/api/v1/user/list'
    params={
        'user_id':bid
    }

    while True:

        try:
            res=requests.get(url,params=params,timeout=20)
            js_data=res.json()
            if js_data['code']==0:
                return js_data['data']['list'][0]
        except Exception as e:
            print(e)
        sj_sec=random.randint(2,8)
        time.sleep(sj_sec)


def update_SunBrowser_info(binfo):

    url=f'{ADSPower_API_URL}/api/v1/user/update'

    while True:

        try:
            res=requests.post(url,json=binfo,timeout=20)
            js_data=res.json()
            if js_data['code']==0:
                print(f'更改信息成功:{binfo}')
                return 1
        except Exception as e:
            print(e)
        sj_sec=random.randint(2,8)
        time.sleep(sj_sec)

async def get_pbrands():

    browser= await openChrome()
    page=await browser.newPage()

    await repeat_goto(page,'https://allegro.pl/regulamin/zalacznik/1')
    #await asyncio.sleep(5)

    pbrand_txt_xpath='//p[contains(text(),"& Other Stories, 111Skin,")]/text()'
    await wait_one_xpath(page,pbrand_txt_xpath,max_wait=60)

    cot= await page.content()
    pbrnad_txt=etree.HTML(cot).xpath(pbrand_txt_xpath)[0]
    pbrands=pbrnad_txt.split(',')
    pbrands=[ppp.strip() for ppp in pbrands]
    pbrands[-1]=pbrands[-1][:-1]
    rs_yqqc=tms.ExecQuery('select count(*) from qqc where qtype=?',('波兰语',))
    yqq_count=rs_yqqc[0][0]
    print(f'本次从官方共采集 {len(pbrands)} 个侵权词')
    print(f'数据库原有波兰语侵权词数量共 {yqq_count} 个,开始更新到数据库...')
    suc=0
    fail=0
    j=1

    for pbrand in pbrands:
        zds=['qcontent','qtype']
        params=[pbrand,'波兰语']

        aff=tms.merge_into('qqc',zds,params,['qcontent'],True)
        if aff:
            print(f'词《{pbrand}》,更新到词库成功,剩余需更新{len(pbrands)-j}')
            suc+=1
        else:
            print(f'词《{pbrand}》,更新到词库失败,剩余需更新{len(pbrands)-j}')
            fail+=1
        j+=1
    rs_now_qqc=tms.ExecQuery('select count(*) from qqc where qtype=?',('波兰语',))
    print(f'本次从官方共采集 {len(pbrands)} 个侵权词,更新成功 {suc} 个,更新失败 {fail} 个')
    print(f'更新前波兰侵权词库:{yqq_count} 个,更新后: {rs_now_qqc[0][0]} 个')

def round_to_nearest_5(num):
    return round(num / 5) * 5    

def random_delay(min_delay=100, max_delay=500):
    """随机延迟，模拟人类行为"""
    opt_delay={'delay': random.randint(min_delay,max_delay)}
    return opt_delay

def get_proxies(user_id):
    params={
        'k':ADSPower_API_KEY,
        'user_id':user_id
    }

    res = requests.get(f'{ADSPower_API_URL}/api/v1/user/list',params=params)

    res_data=res.json()

    if res_data['code']==0:

        return res_data['data']

async def del_hj(user_ids):
    data={
        'user_ids':user_ids
    }
    res = requests.post(f'{ADSPower_API_URL}/api/v1/user/delete',json=data)
    res_data = res.json()
    print(res_data)

async def wait_one_xpath(page:Page,xpath,max_wait=None,host='seller.shopee.co.id'):
    wait_count=0
    raise_count=0
    while True:
        try:
            ss=await page.xpath(xpath)
            if ss:
                return ss
            
            if xpath=='//div[@class="export-button"]':
                npxpath='//span[text()=" Create ads manually "]'
                el_np=await page.xpath(npxpath)
                if el_np:
                    try:
                        await el_np[0].click({'delay':200})
                        await asyncio.sleep(3)
                    except:
                        pass
                if 'verify/captcha' in page.url:
                    url=f'https://{host}/portal/marketing/pas/index'
                    await repeat_goto(page,url)
            else:
                if 'verify/captcha' in page.url:
                        url=f'https://{host}/'
                        await repeat_goto(page,url) 

                    
        except Exception as e:
            #print(f'等待xpath{xpath}错误:{jj+1}次 =>{e}')
            pass
        await asyncio.sleep(1)
        wait_count+=1
        if max_wait and wait_count>max_wait:
            wait_count=0
            raise_count+=1
            if raise_count>3:
                raise
            await page.reload()
        
async def wait_tc_xpath(page:Page):

    el_tc0=await page.xpath('//span[text()=" Got it "]')
    el_tc = await page.xpath('//div[@class="eds-modal__box ShopAdsNewSellerModal"]')
    el_tc2 = await page.xpath('//div[@class="eds-modal__content eds-modal__content--large"]')
    
    if el_tc0:
        logging.info(f'出现GOIT弹窗')
        try:
            await el_tc0[0].click({'delay':200})
            await asyncio.sleep(1)
        except Exception as e:
            logging.error('弹窗错误10')

    if el_tc:
        logging.info(f'出现弹窗')
        try:
            el_tc_close = await el_tc[0].xpath('.//i[@class="eds-icon eds-modal__close"]')
            await el_tc_close[0].click({'delay': 200})
            await asyncio.sleep(1)
        except Exception as e:
            logging.error('弹窗错误1')

    if el_tc2:
        logging.info(f'出现弹窗2')
        try:
            el_tc_cancel = await el_tc2[0].xpath('.//div[@class="eds-popover__ref"]/button[1]')
            await el_tc_cancel[0].click({'delay': 200})
            await asyncio.sleep(1)
        except Exception as e:
            logging.error(f'弹窗错误2')
    await check_tc(page,'//div[@class="FULMadiY5u"]//div[text()="Maybe Later"]')

def getDictDatasByEecel(fpath):
    datas=[]
    wb=openpyxl.load_workbook(fpath)
    ws=wb[wb.sheetnames[0]]
    headers=[ws.cell(1,col).value.strip() for col in range(1,ws.max_column+1)]
    for row in range(2,ws.max_row+1):

        currow={}
        for col in range(1,ws.max_column+1):
            curtitle=headers[col-1]
            currow[curtitle]=ws.cell(row,col).value
        datas.append(currow)

    return datas

def read_csv_to_dict(file_path,header_ind=0, encoding='utf-8'):
    """
    读取 CSV 文件，并根据第 12 行作为表头，将数据转换为字典列表。
    :param file_path: CSV 文件路径
    :param encoding: 文件编码类型，默认为 'utf-8'
    :return: 包含数据的字典列表
    """
    try:
        # 读取 CSV 文件，跳过前 11 行，使用第 12 行作为表头
        df = pd.read_csv(file_path, skiprows=header_ind, encoding=encoding)

        # 将 DataFrame 转换为字典列表
        data_dict_list = df.to_dict(orient='records')

        return data_dict_list

    except Exception as e:
        print(f"读取 CSV 文件时出错: {e}")
        return None

async def scroll_to_bottom(page:Page):
    # 获取页面的高度
    last_height = await page.evaluate('document.body.scrollHeight')

    while True:
        # 执行滚动操作，滚动到页面的底部
        await page.evaluate('window.scrollTo(0, document.body.scrollHeight);')
        
        # 等待页面加载
        await asyncio.sleep(3)

        # 重新计算页面高度
        new_height = await page.evaluate('document.body.scrollHeight')
        
        # 如果高度没有发生变化，说明已经到达页面底部
        if new_height == last_height:
            break

        last_height = new_height

def is_decimal(s: str) -> bool:
    return bool(re.match(r"^-?\d*\.\d+$", s))

def find_chrome_path():
    possible_paths = [
        r'C:\Program Files\Google\Chrome\Application\chrome.exe',  # 默认安装路径
        r'C:\Program Files (x86)\Google\Chrome\Application\chrome.exe',  # 32位版本的默认安装路径
    ]

    for path in possible_paths:
        if os.path.exists(path):
            return path

    try:
        from winreg import ConnectRegistry, OpenKey, HKEY_LOCAL_MACHINE, QueryValue
        reg_path = r'SOFTWARE\Microsoft\Windows\CurrentVersion\App Paths\chrome.exe'
        with ConnectRegistry(None, HKEY_LOCAL_MACHINE) as hkey:
            with OpenKey(hkey, reg_path) as key:
                return QueryValue(key, None)
    except:
        pass

    return None

async def repeat_goto(page,url,basic_info=None):
    err_count=0
    while True:
        try:
            await page.goto(url)
            break
        except Exception as e:
            err_count+=1
        await asyncio.sleep(1)
        if err_count>=3:
            if basic_info:
                bid=basic_info['BrowserID']
                dpName=basic_info['DpName']
                tid=basic_info['task_id']
                zds=['bid','task_id','des','bname','state']
                paras=[bid,tid,'ip被禁止,临时禁用浏览器',dpName,1]
                aff=tms.merge_into('tempDISBID',zds,paras,['bid'],True)
                logging.info(f'店铺:《{dpName}》,bid:{bid},ip被禁止,记录到临时禁用表《tempDISBID》:{aff}')
            raise

async def repeat_reload(page):
    err_count=0
    while True:
        try:
            await page.reload()
            break
        except Exception as e:
            err_count+=1
            pass
        if err_count>=3:
            raise


async def bring_to_front_if_needed(page:Page):
    """ 只在页面不在前台时 bringToFront，后台循环检测 """
    while True:
        try:
            is_focused = await page.evaluate("document.hasFocus()")
            if not is_focused:
                await page.bringToFront()
                #print("页面已带到前台")
        except Exception as e:
            pass
            #logging.error(f'切换置顶错误 => {e}')
        rand_ms=random.randint(30,60)
        await asyncio.sleep(rand_ms)  # 每 30~60 秒检查一次，避免 CPU 过载

def try_to_float(num_str):
    try:
        return round(float(num_str.replace(',','')),2)
    except Exception as e:
        logging.error(f'尝试将 {num_str} 转换成float错误 => {e}')

def try_to_int(num_str):
    try:

        return int(float(num_str.replace(',','')))
    except Exception as e:
        logging.error(f'尝试将 {num_str} 转换成整数错误 => {e}')

def getpy(mes):
    p=Pinyin()
    return p.get_pinyin(mes,splitter='',convert='capitalize')

def get_last_log_time(file_path):

    with open(file_path, "r", encoding="utf-8") as f:
        lines = f.readlines()

    for line in reversed(lines):  # 从后往前遍历
        parts = line.split(" - ")
        if len(parts) > 1:  # 确保有时间戳部分
            try:
                log_time = datetime.datetime.strptime(parts[0], "%Y-%m-%d %H:%M:%S,%f")
                return log_time  # 返回解析出的时间
            except ValueError:
                continue  # 跳过无法解析的行

    return None  # 如果没有找到时间戳

def is_log_outdated(file_path, minutes=10):
    if not file_path:
        return False
    last_time = get_last_log_time(file_path)
    if last_time is None:
        return False  # 没有找到时间戳，默认超时
    
    diff = datetime.datetime.now() - last_time
    return diff.total_seconds() > minutes * 60  # 判断是否超过设定时间

async def get_cookie_string(page):

    cookies = await page.cookies()
    cookie_str = "; ".join([f"{cookie['name']}={cookie['value']}" for cookie in cookies])
    print('获取cookie成功')
    return cookie_str

async def check_tc(page:Page,xpstr):

    el_tc=await page.xpath(xpstr)
    if el_tc:
        logging.info(f'出现弹窗...')
        try:
            await el_tc[0].click({'delay':200})
            return 1
        except Exception as e:
            print(f'弹窗错误 => {e}')

def kchrome():
    import psutil
    pids = psutil.pids()
    for pid in pids:
        try:
            p = psutil.Process(pid)
            # print('pid-%s,pname-%s' % (pid, p.name()))
            if p.name() == 'chrome.exe':
                #print(f'关闭{pid} => {p.name()}')
                cmd = 'taskkill /F /IM chrome.exe'
                os.system(cmd)

        except Exception as e:
            pass

def trydosql(cms,sqlstr,params=None,type=1):
    print('正在链接数据库...')
    while True:
        try:
            if type==1:
                res=cms.ExecQuery(sqlstr,params)
            elif type==2:
                res=cms.ExecNoQuery(sqlstr,params)
            elif type==3:
                res=cms.ExecQuerydict(sqlstr,params)
            
            return res
        except Exception as e:
            pass

def sqlToExcel(fnameqz,cms,sqlstr,params=None):
    rs=trydosql(cms,sqlstr,params,3)
    if len(rs)>0:
        wb=openpyxl.Workbook()
        ws=wb[wb.sheetnames[0]]
        ws.append(list(rs[0].keys()))
        for r in rs:
            ws.append(list(r.values()))

        for row in ws.iter_rows(min_row=1, max_col=ws.max_column, max_row=ws.max_row):
            for cell in row:
                cell.alignment = Alignment(horizontal='left', vertical='center')

        current_time_str = time.strftime("%Y-%m-%d_%H-%M-%S", time.localtime(time.time()))
        fname=f'{fnameqz}_{current_time_str}.xlsx'
        wb.save(fname)
        return fname
    else:
        return 0

def change_task_status(taskid,status):
    
    with open('task.json','r',encoding='utf-8') as f:
        task_json_data=json.load(f)
    cur_task_name=Task_Name_dict[taskid]
    task_json_data[cur_task_name]=status

    with open('task.json','w',encoding='utf-8') as f:
        json.dump(task_json_data,f,ensure_ascii=False)
    
    cur_time=datetime.datetime.now()
    
    if status==0:
        aff=tms.ExecNoQuery('update task set LastEndTime=? where id=?',(cur_time,taskid))
        print(f'任务:《{cur_task_name}》,登记结束运行时间:{cur_time},状态:{aff}')
    elif status==1:
        aff=tms.ExecNoQuery('update task set LastRunTime=? where id=?',(cur_time,taskid))
        print(f'任务:《{cur_task_name}》,登记开始运行时间:{cur_time},状态:{aff}')

def update_task_current_state(HouTai_ID,task_id,col_name,pt='Houtai'):
    rs=tms.ExecQuery(f'select count(*) from {pt} where {col_name}=1 and id>=?',(HouTai_ID,))
    tcount=rs[0][0]
    tms.ExecNoQuery('update [task] set [tcount]=?,LastHouTaiID=? where [id]=?',(tcount,HouTai_ID,task_id))

def boldate_chinese_date_range(date_str):
    # 定义英文月份到数字的映射
    
    months = {
        "January": 1, "February": 2, "March": 3, "April": 4, "May": 5,
        "June": 6, "July": 7, "August": 8, "September": 9,
        "October": 10, "November": 11, "December": 12
    }
    ss='1 - January 14, 2025'
    # 分割字符串
    parts = date_str.split("-")
    try:
        if len(parts) != 2:
            return date_str
        '1 - January 14, 2025'
        for j in range(2):
            
            cur_date = parts[j].strip()
            yy_list=cur_date.split(',')
            cur_year=None
            cur_month=None
            if len(yy_list)==2:
                cur_year=yy_list[-1].strip()
            mm_list=yy_list[0].strip().split()
            mm_list=[item.strip() for item in mm_list if item.strip()]
            if len(mm_list)==2:
                cur_month_str=mm_list[0]
                cur_month=months[cur_month_str]
                cur_day=mm_list[1]
            else:
                cur_day=mm_list[0]
            
            if j==0:
                start_year=cur_year
                start_month=cur_month
                start_day=cur_day
            else:
                end_year=cur_year
                if not start_year:
                    start_year=end_year
                end_month=cur_month
                if not start_month:
                    start_month=end_month
                end_day=cur_day

        # 拼接中文日期范围
        chinese_date_range = f"{start_year}年{start_month}月{start_day}日至{end_year}年{end_month}月{int(end_day)}日"
        return chinese_date_range
    except:
        #traceback.print_exc()
        return date_str

def get_merged_cell_value(ws,merged_ranges, row, col):
    """获取合并单元格的值，始终返回合并区域起始单元格的值"""
    cell_coord = f"{get_column_letter(col)}{row}"  # 将 (row, col) 转换为 Excel 坐标
    for merged_range in merged_ranges:
        if cell_coord in merged_range:  # 检查坐标是否在合并单元格范围内
            # 找到合并区域，返回起始单元格的值
            start_cell = ws.cell(merged_range.min_row, merged_range.min_col)
            return start_cell.value
    # 如果不在合并单元格范围，直接返回单元格值
    return ws.cell(row, col).value

def get_shopee_timestamp(group='last_three_month',sq=7):

    utc_plus_7 = datetime.timezone(datetime.timedelta(hours=sq))
    today_utc7 = datetime.datetime.now(utc_plus_7).replace(hour=0, minute=0, second=0, microsecond=0)
    yesterday_utc7= (today_utc7 - datetime.timedelta(days=1)).replace(hour=0, minute=0, second=0, microsecond=0)
    if group=='today':
        date_start = today_utc7.replace(hour=0, minute=0, second=0, microsecond=0)
        date_end = today_utc7.replace(hour=23, minute=59, second=59, microsecond=0)

    elif group=='yesterday':
        date_start = yesterday_utc7.replace(hour=0, minute=0, second=0, microsecond=0)
        date_end = yesterday_utc7.replace(hour=23, minute=59, second=59, microsecond=0)
    
    elif group=='last_week':
        last_week_toady_utc7=(today_utc7 - datetime.timedelta(days=6)).replace(hour=0, minute=0, second=0, microsecond=0)
        date_start = last_week_toady_utc7.replace(hour=0, minute=0, second=0, microsecond=0)
        date_end = today_utc7.replace(hour=23, minute=59, second=59, microsecond=0)
    
    elif group=='last_month':
        last_month_utc7 = today_utc7 - relativedelta(months=1)
        date_start = last_month_utc7.replace(hour=0, minute=0, second=0, microsecond=0)
        date_end = today_utc7.replace(hour=23, minute=59, second=59, microsecond=0)
    
    elif group=='last_three_month':
        last_three_month_utc7 = today_utc7 - relativedelta(months=3)
        date_start = last_three_month_utc7.replace(hour=0, minute=0, second=0, microsecond=0)
        date_end = today_utc7.replace(hour=23, minute=59, second=59, microsecond=0)


    return int(date_start.timestamp()),int(date_end.timestamp())


def get_shopee_timestamp_day_range(day_range=10):
    utc_plus_7 = datetime.timezone(datetime.timedelta(hours=7))
    today_utc7 = datetime.datetime.now(utc_plus_7).replace(hour=0, minute=0, second=0, microsecond=0)
    pastday_utc7= (today_utc7 - datetime.timedelta(days=day_range)).replace(hour=0, minute=0, second=0, microsecond=0)
    return int(pastday_utc7.timestamp()),int(today_utc7.timestamp())


def calculate_timestamps(daysjg=30):
    # 获取当前日期
    today = datetime.datetime.now().date()
    
    # 当天 1 点的时间
    today_1am = datetime.datetime(today.year, today.month, today.day, 1, 0, 0)
    
    # 30 天前的日期
    thirty_days_ago = today - datetime.timedelta(days=daysjg)
    
    # 30 天前 1 点的时间
    thirty_days_ago_1am = datetime.datetime(thirty_days_ago.year, thirty_days_ago.month, thirty_days_ago.day, 1, 0, 0)
    
    # 转换为时间戳
    today_1am_timestamp = int(time.mktime(today_1am.timetuple()))
    thirty_days_ago_1am_timestamp = int(time.mktime(thirty_days_ago_1am.timetuple()))
    
    return thirty_days_ago_1am_timestamp,today_1am_timestamp

def calculate_timestamps2(daysjg=90):
    # 当前日期和时间（UTC+7）
    today = datetime.datetime.now(datetime.timezone(datetime.timedelta(hours=7))).date()

    # 当天的0点（UTC+7）
    today_1am = datetime.datetime(today.year, today.month, today.day, 23, 59, 59,
                                  tzinfo=datetime.timezone(datetime.timedelta(hours=7)))

    # 过去daysjg天的23:59:59（UTC+7）
    thirty_days_ago = today - datetime.timedelta(days=daysjg)
    thirty_days_ago_1am = datetime.datetime(thirty_days_ago.year, thirty_days_ago.month, thirty_days_ago.day, 0, 0, 0,
                                            tzinfo=datetime.timezone(datetime.timedelta(hours=7)))

    # 转换为时间戳
    today_1am_timestamp = int(today_1am.timestamp())
    thirty_days_ago_1am_timestamp = int(thirty_days_ago_1am.timestamp())

    return thirty_days_ago_1am_timestamp, today_1am_timestamp

def convert_to_timestamp(time_str):
    # 使用 dateutil.parser 解析时间字符串
    try:
        dt = parser.parse(time_str)
        
        # 转换为时间戳
        timestamp = time.mktime(dt.timetuple())

        return int(timestamp)
    except Exception as e:
        return -1

def get_timestamps():
    # 获取今天 23:59:59 的时间戳
    today_end = datetime.datetime.combine(datetime.datetime.today(), datetime.datetime.max.time())
    today_timestamp_end = int(time.mktime(today_end.timetuple()))

    # 获取上个月的今天 0 点的时间戳
    last_month_same_day = datetime.datetime.combine(datetime.datetime.today() - relativedelta(months=1), datetime.datetime.min.time())
    last_month_same_day_timestamp = int(time.mktime(last_month_same_day.timetuple()))

    return last_month_same_day_timestamp, today_timestamp_end

def get_utc7_timestamps(dayyy=30):
    """
    获取 UTC+7 时区：
    - 今天 00:00 的 Unix 时间戳（秒）
    - 一个月前当天 00:00 的 Unix 时间戳（秒）
    
    :return: (timestamp_today, timestamp_one_month_ago)
    """
    # 定义 UTC+7 时区
    utc_plus_7 = datetime.timezone(datetime.timedelta(hours=7))

    # 获取今天 00:00 (UTC+7)
    today_utc7 = datetime.datetime.now(utc_plus_7).replace(hour=0, minute=0, second=0, microsecond=0)
    timestamp_today = int(today_utc7.timestamp())

    # 获取一个月前的当天 00:00 (UTC+7)
    one_month_ago_utc7 = (today_utc7 - datetime.timedelta(days=dayyy)).replace(hour=0, minute=0, second=0, microsecond=0)
    timestamp_one_month_ago = int(one_month_ago_utc7.timestamp())

    return timestamp_one_month_ago,timestamp_today

def get_three_time():
    # 获取今天 23:59:59 的时间戳和日期字符串
    today_end = datetime.datetime.combine(datetime.datetime.today(), datetime.datetime.max.time())
    today_timestamp_end = int(time.mktime(today_end.timetuple()))
    today_date_str = today_end.strftime('%d_%m_%Y')

    # 获取三个月前今天 0 点的时间戳和日期字符串
    three_months_ago_same_day = datetime.datetime.combine(datetime.datetime.today() - relativedelta(months=3), datetime.datetime.min.time())
    three_months_ago_timestamp = int(time.mktime(three_months_ago_same_day.timetuple()))
    three_months_ago_date_str = three_months_ago_same_day.strftime('%d_%m_%Y')

    return (three_months_ago_timestamp,today_timestamp_end ,three_months_ago_date_str, today_date_str)

def get_days_diff_from_timestamps(timestamp1, timestamp2):  
    """  
    使用datetime模块计算两个时间戳之间的天数差  
  
    参数:  
    timestamp1 (int): 第一个时间戳（秒为单位）  
    timestamp2 (int): 第二个时间戳（秒为单位）  
  
    返回:  
    int: 两个时间戳之间的天数差  
    """  
    # 将时间戳转换为datetime对象  
    dt1 = datetime.datetime.fromtimestamp(timestamp1)  
    dt2 = datetime.datetime.fromtimestamp(timestamp2)  
  
    # 计算时间差  
    delta = dt2 - dt1  
  
    # 提取天数差  
    days_diff = delta.days  
  
    return days_diff

def replace_first_occurrence(lst, old_value, new_value):  
    try:  
        index = lst.index(old_value)  
        lst[index] = new_value  
    except ValueError:  
        print(f"Value '{old_value}' not found in the list.")  
    return lst

def isContain(kw,tit):

    glwords=[re.escape(item) for item in kw.split()]
    flag0=True
    if tit and isinstance(tit,str):
        for gkw in glwords:
            if not re.search(r'\b{}\b'.format(gkw),tit,re.I):
                flag0=False
                break
        return flag0
    
    else:
        return False

def read_xpllbb_excel(file_path):
  
    # 读取 Excel 文件

    df = pd.read_excel(file_path,engine='openpyxl',dtype={'Item ID': str,'Variation ID':str,'Parent SKU':str,})
    
    
    # 定义正则表达式替换函数
    def clean_column_name(column_name):
        return re.sub(r'\s+', '_', column_name)

    # 应用正则表达式替换到所有列名
    df.columns = [clean_column_name(col) for col in df.columns]
    df.rename(columns={'Product': 'Product_Name'}, inplace=True)
    # 将 NaN 值替换为 None
    df = df.map(lambda x: None if pd.isna(x) else x)

    # 将数据框转换为字典
    data_dict = df.to_dict(orient='records')
    return data_dict


def recover_podpro_daochu(binfo,ynpros):
    bid=binfo['BrowserID']
    bname=binfo['DpName']
    shopid=binfo['shopid']
    suc=0
    fail=0
    for yn_itemid,ph_itemid,price in ynpros:
        
        if not ph_itemid:
            rs=tms.ExecQuery('select Parent_SKU from YN_LLBB where Item_ID=?',(yn_itemid,))
            if len(rs)>0:
                ph_itemid=rs[0][0]
            else:
                fail+=1
                continue
        cur_time=datetime.datetime.now()
        aff=tms.ExecNoQuery('update YNPodPrice set daochu=0,update_time=? where ph_itemid=? and price=?',(cur_time,ph_itemid,price))
        sql_str=f'''UPDATE proshopeeph
                    SET dc_shopids = REPLACE(dc_shopids, '|{shopid}', '')
                    WHERE itemid='{ph_itemid}' and dc_shopids LIKE '%|{shopid}%' 
                '''
        aff2=tms.ExecNoQuery(sql_str)
        logging.info(f'店铺:《{bname}》,{bid},itemid:{ph_itemid},有流量数据重置:{aff2}')

        if aff:
            suc+=aff
        else:
            fail+=1
    
    logging.info(f'店铺:《{bname}》,{bid},共 {len(ynpros) } 个产品恢复为未导出状态,其中成功:{suc},失败:{fail} 个')

def upad(row,pid,kw,imp,params,tits):
    
    rs1=tms.ExecQuery('select StartDate,Impression from adpro where ProductID=? and PlacementKeyword=?',(pid,kw))
    
    titles=[f'[{tt}]'for tt in tits]
    titles.remove('[BiddingMethod]')
    titles.remove('[Placement]')
    wstr=','.join(['?' for _ in params])
    if len(rs1)==0:
        zdstr=','.join(titles)
        sqlstr=f'insert into [ADPro]({zdstr}) values ({wstr})'
        
        aff=tms.ExecNoQuery(sqlstr,params)

        return ('增加数据',row,pid,kw,aff)

    else:
        start_date,old_imp=rs1[0]
        days_diff=get_days_diff_from_timestamps(start_date,time.time())
        old_imp=old_imp if old_imp else 0
        sqlstr1='update [ADPro] set IsImpressionAdd=0 where ProductID=? and PlacementKeyword=?'
        zdstr=','.join([f'{tt}=?' for tt in titles])
        sqlstr2=f'update [ADPro] set {zdstr},IsImpressionAdd=1 where ProductID=? and PlacementKeyword=?'
        if days_diff>30:

            if imp<old_imp and imp>0:
                sqlstr=sqlstr1
                cur_para=(pid,kw)
                msg='只更新IsImpressionAdd为0'
            elif imp<=old_imp and imp==0:
                sqlstr=sqlstr1
                cur_para=(pid,kw)
                msg='只更新IsImpressionAdd为0'
            elif imp>=old_imp and imp>0:
                sqlstr=sqlstr2
                cur_para=params+[pid,kw]
                msg='更新所有参数'
            else:
                return ('忽略本行',row,pid,kw,-1)

        else:
            if imp<old_imp and imp>0:
                sqlstr=sqlstr1
                cur_para=(pid,kw)
                msg='只更新IsImpressionAdd为0'
            elif imp<=old_imp and imp==0:
                sqlstr=sqlstr1
                cur_para=(pid,kw)
                msg='只更新IsImpressionAdd为0'
            elif imp>old_imp:
                sqlstr=sqlstr2
                cur_para=params+[pid,kw]
                msg='更新所有参数'

            else:
                return ('忽略本行',row,pid,kw,-1)

        aff=tms.ExecNoQuery(sqlstr,cur_para)
        return (msg,row,pid,kw,aff)

def dr_adbb(select_path):

    with open(select_path, newline='',encoding='utf-8-sig') as csvfile:
        reader = csv.reader(csvfile)
        adDatas = list(reader)

    line_row=0
    for line in adDatas:
        if len(line)==0:
            line_row+=1
            continue
        if line[0]=='Shop ID':
            shopid_ind=line_row
        if line[0]=='Sequence':
            title_ind=line_row
            break
        line_row+=1

    shopid=adDatas[shopid_ind][1]
    titles=adDatas[title_ind]
    titles =[re.sub(r'[\s/]+','',t) for t in titles]
    titles=titles[1:]+['ShopID']
    titles=replace_first_occurrence(titles,'AdName','ProductNameAdName')
    titles=replace_first_occurrence(titles,'KeywordLocation','PlacementKeyword')
    pdatas=[dict(zip(titles,pdata[1:]+[shopid])) for pdata in adDatas[title_ind+1:]]
    with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
        futures=[]
        row=title_ind+2
        for pdata in pdatas:
            cparams=[]
            cur_BiddingMethod=pdata['BiddingMethod']
            cur_Placement=pdata['Placement']
            if not (cur_BiddingMethod.lower()=='Manual Bidding'.lower() and cur_Placement.lower()=='Search'.lower()):
                row+=1
                continue
            cur_pid=pdata['ProductID']
            cur_kw=pdata['PlacementKeyword']
            cur_imp=pdata['Impression']
            cur_imp =int(cur_imp) if cur_imp else 0
            for k,v in pdata.items():
                if k=='BiddingMethod' or k=='Placement':
                    continue
                if v=='-':
                    v=None
                cpara=v
                if 'Date' in k:
                    cpara=convert_to_timestamp(v)
                elif v and '%' in v and '.' in v:
                    cpara=float(v.replace('%',''))
                
                cparams.append(cpara)
            futures.append(executor.submit(upad,row,cur_pid,cur_kw,cur_imp,cparams,titles))
            row+=1
        suc=0
        for future in concurrent.futures.as_completed(futures):
            try:
                res_mes,res_row,res_pid,res_kw,res_aff = future.result()
                if res_aff!=0:
                    suc+=1
                logging.info(f'第 {res_row} 行,ProductID:{res_pid},kw:《{res_kw}》,{res_mes},状态:{res_aff}')
            except Exception as exc:
                logging.error(f"线程出错: {exc}")

    logging.info(f'广告数据上传至数据表ADPro完毕,共成功{suc}行')
    return suc

def dr_BBAdID(select_path,basic_info):
     
    bid=basic_info['BrowserID']
    bname=basic_info['DpName']
    gname=basic_info['GroupName']
    sjbm=basic_info['SJBM']

    with open(select_path, newline='',encoding='utf-8-sig') as csvfile:
        reader = csv.reader(csvfile)
        adDatas = list(reader)

    line_row=0
    for line in adDatas:
        if len(line)==0:
            line_row+=1
            continue

        if line[0]=='User Name':
            userName_ind=line_row
        if line[0]=='Shop Name':
            shopName_ind=line_row
        if line[0]=='Shop ID':
            shopid_ind=line_row
        if line[0]=='Report Creation Time':
            bbcreTime_ind=line_row
        if line[0]=='Sequence':
            title_ind=line_row
            break

        line_row+=1
    userName=adDatas[userName_ind][1]
    shopName=adDatas[shopName_ind][1]
    shopid=adDatas[shopid_ind][1]
    bbcreTime=adDatas[bbcreTime_ind][1]

    titles=adDatas[title_ind]
    titles =[re.sub(r'[\s/]+','',t) for t in titles]
    pdatas=[dict(zip(titles[1:],pdata[1:])) for pdata in adDatas[title_ind+1:]]
    need_dr={}
    
    for pdata in pdatas:
        itemid=pdata['ProductID']
        adname=pdata['AdName']
        Status=pdata['Status']
        if not need_dr.get(itemid):
            need_dr[itemid]=[bid,bname,gname,sjbm,userName,shopName,shopid,bbcreTime,adname,Status,itemid]
    
    suc=0
    zds=['BrowserID','DpName','GroupName','SJBM','User_Name','Shop_Name','Shop_ID','Report_Creation_Time','Ad_Name','Status','Product_ID']
    for pid,para in need_dr.items():

        rs=tms.ExecQuery('select count(*) from YN_AD_ID where Product_ID=?',(pid,))
        if rs[0][0]==0:
            zdstr=','.join(zds)
            wstr=','.join(['?' for _ in para])
            sqlstr=f'insert into YN_AD_ID({zdstr}) values({wstr})'
            suc+=tms.ExecNoQuery(sqlstr,para)

        else:
            zdstr=','.join([f'{zd}=?' for zd in zds[:-1]])
            sqlstr=f'update YN_AD_ID set {zdstr} where Product_ID=?'
            suc+=tms.ExecNoQuery(sqlstr,para)

    print(f'浏览器ID{bid},《{bname}》,广告数据上传至数据表YN_AD_ID完毕,共成功{suc}行')
    return suc

def dr_BBInfo(select_path,basic_info):
     
    bid=basic_info['BrowserID']
    bname=basic_info['DpName']
    gname=basic_info['GroupName']
    sjbm=basic_info['SJBM']

    with open(select_path, newline='',encoding='utf-8-sig') as csvfile:
        reader = csv.reader(csvfile)
        adDatas = list(reader)

    line_row=0
    for line in adDatas:
        if len(line)==0:
            line_row+=1
            continue

        if line[0]=='User Name':
            userName=adDatas[line_row][1]
        if line[0]=='Shop Name':
            shopName=adDatas[line_row][1]
        if line[0]=='Shop ID':
            shopid=adDatas[line_row][1]
        if line[0]=='Product Name/Ad Name':
            Product_Name_Ad_Name=adDatas[line_row][1]
        if line[0]=='Product ID':
            Product_ID=adDatas[line_row][1]
        if line[0]=='Report Creation Time':
            bbcreTime=adDatas[line_row][1]
        if line[0]=='Date Period':
            Date_Period=adDatas[line_row][1]
        if line[0]=='Sequence':
            title_ind=line_row
            break
        line_row+=1

    titles=adDatas[title_ind]
    titles =[re.sub(r'[\s/]+','_',t) for t in titles]
    pdatas=[dict(zip(titles[1:],pdata[1:])) for pdata in adDatas[title_ind+2:] if len(pdata)==len(titles)]

    need_dr_dict={}
    
    for pdata in pdatas:
        Search_Query=pdata['Search_Query']
        if Search_Query=='semua':
            pdata['Search_Query']=pdata['Keywords']
            Search_Query=pdata['Search_Query']
        if pdata['Search_Query']!=pdata['Keywords']:
            pdata['Match_Type']='Broad Match'
        if not need_dr_dict.get(Search_Query):
            need_dr_dict[Search_Query]=[pdata]
        else:
            need_dr_dict[Search_Query].append(pdata)

    need_dr=[]
    for cSearch_Query,pdata_list in need_dr_dict.items():
        cpdata=copy.copy(pdata_list[0])
        cpdata['Keywords']=''
        cpdata['Bidding_Method']=''
        cpdata['Average_Ranking']=''
        cur_Match_Type='Broad Match'
        cur_Impression=0
        cur_Clicks=0
        cur_Conversions=0
        cur_Direct_Conversions=0
        cur_Items_Sold=0
        cur_Direct_Items_Sold=0
        cur_GMV=0
        cur_Direct_GMV=0
        cur_Expense=0
        for ppdd in pdata_list:
            if ppdd['Match_Type']=='Exact Match':
                cur_Match_Type='Exact Match'
            cur_Impression+=int(ppdd['Impression'])
            cur_Clicks+=int(ppdd['Clicks'])
            cur_Conversions+=int(ppdd['Conversions'])
            cur_Direct_Conversions+=int(ppdd['Direct_Conversions'])
            cur_Items_Sold+=int(ppdd['Items_Sold'])
            cur_Direct_Items_Sold+=int(ppdd['Direct_Items_Sold'])
            cur_GMV+=int(ppdd['GMV'])
            cur_Direct_GMV+=int(ppdd['Direct_GMV'])
            cur_Expense+=int(ppdd['Expense'])
        
        if cur_Clicks==0:
            continue

        #和列
        cpdata['Match_Type']=cur_Match_Type
        cpdata['Impression']=cur_Impression
        cpdata['Clicks']=cur_Clicks
        cpdata['Conversions']=cur_Conversions
        cpdata['Direct_Conversions']=cur_Direct_Conversions
        cpdata['Items_Sold']=cur_Items_Sold
        cpdata['Direct_Items_Sold']=cur_Direct_Items_Sold
        cpdata['GMV']=cur_GMV
        cpdata['Direct_GMV']=cur_Direct_GMV
        cpdata['Expense']=cur_Expense

        #除列
        if cur_Impression!=0:
            cpdata['CTR']=round(cur_Clicks/cur_Impression,4)
        if cur_Clicks!=0:
            cpdata['Conversion_Rate']=round(cur_Conversions/cur_Clicks,4)
            cpdata['Direct_Conversion_Rate']=round(cur_Direct_Conversions/cur_Clicks,4)
        if cur_Conversions!=0:
            cpdata['Cost_per_Conversion']=round(cur_Expense/cur_Conversions,4)
        if cur_Direct_Conversions!=0:
            cpdata['Cost_per_Direct_Conversion']=round(cur_Expense/cur_Direct_Conversions,4)
        if cur_Expense!=0:
            cpdata['ROAS']=round(cur_GMV/cur_Expense,4)
            cpdata['Direct_ROAS']=round(cur_Direct_GMV/cur_Expense,4)
        if cur_GMV!=0:
            cpdata['ACOS']=round(cur_Expense/cur_GMV,4)
        
        if cur_Direct_GMV!=0:
            cpdata['Direct_ACOS']=round(cur_Expense/cur_Direct_GMV,4)
        
        if isinstance(cpdata['CTR'],str):
            cpdata['CTR']=cpdata['CTR'].replace('%','')
        need_dr.append(cpdata)

    suc=0
    cur_time=int(time.time())
    for drdata in need_dr:
        cur_Search_Query=drdata['Search_Query']
        zds=list(drdata.keys())
        para=list(drdata.values())
        zds=['BrowserID','DpName','GroupName','User_Name','Shop_Name','Shop_ID','Product_Name_Ad_Name','Product_ID',
              'Report_Creation_Time','Date_Period']+zds
        para=[bid,bname,gname,userName,shopName,shopid,Product_Name_Ad_Name,Product_ID,bbcreTime,Date_Period]+para
        rs=tms.ExecQuery('select count(*) from YN_AD_Info where Product_ID=? and Search_Query=?',(Product_ID,cur_Search_Query))
        if rs[0][0]==0:
            zds.append('uploadTime')
            zds.append('updateTime')
            para.append(cur_time)
            para.append(cur_time)
            zdstr=','.join(zds)
            wstr=','.join(['?' for _ in para])
            sqlstr=f'insert into YN_AD_Info({zdstr}) values({wstr})'
            aff=tms.ExecNoQuery(sqlstr,para)
            #print(f'{bid},{bname},ProductID:{Product_ID},新增Search_Query《{cur_Search_Query}》:{aff}')

        else:
            zds.append('updateTime')
            para.append(cur_time)
            zdstr=','.join([f'{zd}=?' for zd in zds])
            para+=[Product_ID,cur_Search_Query]
            sqlstr=f'update YN_AD_Info set {zdstr} where Product_ID=? and Search_Query=?'
            aff=tms.ExecNoQuery(sqlstr,para)
            #print(f'{bid},{bname},ProductID:{Product_ID},更新Search_Query《{cur_Search_Query}》:{aff}')        
        suc+=aff

    logging.info(f'浏览器ID{bid},《{bname}》,广告数据上传至数据表YN_AD_INFO完毕,共成功{suc}行')
    return suc

def dr_LLbb(select_path,basic_info):
    
    bid=basic_info['BrowserID']
    bname=basic_info['DpName']
    gname=basic_info['GroupName']
    uname=basic_info['UserName']
    pwd=basic_info['Password']

    pdatas=read_xpllbb_excel(select_path)
    tttstr=datetime.datetime.now()
    all_tj={}
    added=0
    upted=0
    fail=0
    brow=0
    itemid_list= [pdata['Item_ID'] for pdata in pdatas]
    for pdata in pdatas:
        brow+=1
        item_id=pdata['Item_ID']
        sku_id=pdata['Variation_ID']
        
        if sku_id=='-' and itemid_list.count(item_id)>1:
            if not all_tj.get(item_id):
                all_tj[item_id]={zd:get_xpllbb_value(vvv) for zd,vvv in pdata.items()}
        else:
            zds=list(pdata.keys())
            zds.extend(['BrowserID','BrowserName','User_Name','User_Pwd','GroupName'])
            if sku_id=='-' :
                params=[get_xpllbb_value(vvv)  for zd,vvv in pdata.items()]
            else:
                params=[all_tj[item_id].get(zd) if vvv=='-' or vvv is None  else get_xpllbb_value(vvv) for zd,vvv in pdata.items()]
            params=[para if para!='-' else None for para in params]
            params.extend([bid,bname,uname,pwd,gname])
            if sku_id and sku_id!='-':
                
                rs=tms.ExecQuery('select count(*) from YN_LLBB where item_id=? and Variation_ID=?',(item_id,sku_id))
            else:
                rs=tms.ExecQuery('select count(*) from YN_LLBB where item_id=? and Variation_ID is null',(item_id,))

            if rs[0][0]==0:
                zds.append('DownTime')
                params.append(tttstr)
                zdstr=','.join([f'[{zd}]' for zd in zds])
                wstr=','.join(['?' for _ in params])
                sqlstr=f'insert into YN_LLBB({zdstr}) values({wstr})'
                aff=tms.ExecNoQuery(sqlstr,params)
                #logging.info(f'{item_id},插入状态:{aff}')
                    
                if aff:
                    added+=aff
                else:
                    fail+=1
                    
            else:
                zds.append('updateTime')
                params.append(tttstr)
                parent_sku_ind=zds.index('Parent_SKU')
                if not params[parent_sku_ind] or params[parent_sku_ind]=='-':
                    zds.pop(parent_sku_ind)
                    params.pop(parent_sku_ind)
                if sku_id!='-':
                    params.extend([item_id,sku_id])
                    zdstr=','.join([f'[{zd}]=?' for zd in zds])
                    sqlstr=f'update YN_LLBB set {zdstr} where item_id=? and Variation_ID=?'
                    aff=tms.ExecNoQuery(sqlstr,params)
                else:
                    params.extend([item_id])
                    zdstr=','.join([f'[{zd}]=?' for zd in zds])
                    sqlstr=f'update YN_LLBB set {zdstr} where item_id=? and Variation_ID is null'
                    aff=tms.ExecNoQuery(sqlstr,params)
                    
                #logging.info(f'{item_id},更新:{aff}')
                if aff:
                    upted+=aff
                else:
                    fail+=1
                    print([item_id,sku_id])
    return f'店铺:《{bname}》,{bid},报表行数:{brow},新增行数:{added},更新行数:{upted},失败行数:{fail}'

def upload_adData(campaign_id,basic_info,adDatas):
    bid=basic_info['BrowserID']
    bname=basic_info['DpName']
    gname=basic_info['GroupName']
    userName = basic_info['UserName']
    password = basic_info['Password']
    pid=basic_info['pid']
    zds=['Product_ID','Campaign_ID','Keyword','bid_price','recommended_price','Match_Type',
         'KW_State','BrowserID','BrowserName','User_Name','User_Pwd','GroupName','uploadTime']

    utime=datetime.datetime.now()
    added=0
    upt=0
    ign=0
    for addata in adDatas:
        cur_kw=addata['keyword']['keyword']
        bid_price=round(addata['keyword']['bid_price']/100000,4)
        recommended_price=round(addata['keyword']['recommended_price']/100000,4)
        state=addata['keyword']['state']
        adtype=addata['keyword']['match_type']
        
        rs=tms.ExecQuery(f'select bid_price,Match_Type,KW_State from YN_AD_HasKW where Product_ID=? and Keyword=?',(pid,cur_kw))
        if len(rs)==0:
            params=[pid,campaign_id,cur_kw,bid_price,recommended_price,adtype,state,bid,bname,userName,password,gname,utime]
            zdstr=','.join(zds)
            wstr=','.join(['?' for _ in params])
            sqlstr=f'insert into YN_AD_HasKW({zdstr}) values({wstr})'
            added+=tms.ExecNoQuery(sqlstr,params)
        else:
            old_bid_price,old_Match_Type,old_KW_State=rs[0]
            if (old_bid_price,old_Match_Type,old_KW_State)!=(bid_price,adtype,state):
                zdstr=','.join([f'{zd}=?' for zd in zds])
                params=[pid,campaign_id,cur_kw,bid_price,recommended_price,adtype,state,
                        bid,bname,userName,password,gname,utime,pid,cur_kw]
                sqlstr=f'update YN_AD_HasKW set {zdstr} where Product_ID=? and Keyword=?'
                upt+=tms.ExecNoQuery(sqlstr,params)
            else:
                ign+=1
    
    logging.info(f'店铺:《{bname}》,bid:{bid},pid:{pid},一共有{len(adDatas)}个广告词,《YN_AD_HasKW》,新增:{added},更新:{upt},忽略:{ign}')
    
def get_xpllbb_value(input_str):
    # 检查是否是百分比格式，如 '41,96%'
    
    if not isinstance(input_str,str):
        return input_str
    if len(input_str)>50:
        return input_str
    if input_str=='-':
        return None
    if ',' in input_str and '%' in input_str and '|' not in input_str:
        # 替换逗号为点，去掉百分号，并将其转换为小数
        
        percentage_value = float(input_str.replace(',', '.').replace('%', '')) / 100
        return round(percentage_value, 4)
    elif '.' in input_str and '%' in input_str:
        percentage_value = float(input_str.replace('%', '')) / 100
        return round(percentage_value, 4)
    
    # 检查是否是带有点分隔的数字格式，如 '2.140.549'
    elif '.' in input_str and input_str.replace('.', '').isdigit():
        # 去掉点分隔符，转化为整数
        return int(input_str.replace('.', ''))
    
    # 检查是否是纯数字格式，如 '428'
    elif input_str.isdigit():
        return int(input_str)
    else:
        return input_str

def upload_camp(bid,bname,camps,shopid):

    cur_time=datetime.datetime.now()
    cur_sql_datetime = cur_time.strftime('%Y-%m-%d %H:%M:%S')
    up_count=0
    add_count=0
    fail_count=0
    
    for camp in camps:
        campaign_id=str(camp['campaign']['campaign_id'])
        daily_budget=int(camp['campaign']['daily_budget']/100000)
        end_time=camp['campaign']['end_time']
        start_time=camp['campaign']['start_time']
        item_id=str(camp['manual_product_ads']['item_id'])
        product_placement=camp['manual_product_ads']['product_placement']
        state=camp['state']
        image=f"https://down-id.img.susercontent.com/file/{camp['image']}"
        title=camp['title']
        zds=['bid','shopid','bname','campaign_id','product_id','product_placement','title','daily_budget','start_time','end_time',
        'image','state','is_download_bb','is_del_kw','is_add_kw','is_edit_kw','update_time','is_on']
        params=[bid,shopid,bname,campaign_id,item_id,product_placement,title,daily_budget,start_time,end_time,
                image,state,0,0,0,0,cur_sql_datetime,1]
        rs=tms.ExecQuery('select count(*) from campaign where product_id=?',(item_id,))
        if rs[0][0]==0:
            zds.append('upload_time')
            params.append(cur_sql_datetime)
            zdstr=','.join(zds)
            wstr=','.join(['?' for _ in params])
            aff=tms.ExecNoQuery(f'insert into campaign({zdstr}) values({wstr})',params)
            if aff:
                add_count+=aff
            else:
                fail_count+=aff
        else:
            zdstr=','.join([f'{zd}=?' for zd in zds])
            params.append(item_id)
            aff=tms.ExecNoQuery(f'update campaign set {zdstr} where product_id=?',params)
            if aff:
                up_count+=aff
            else:
                fail_count+=aff
        
    aff_bb_dd=tms.ExecNoQuery('update houtai set isBBAdID=1 where BrowserID=?',(bid,))

    logging.info(f'店铺:《{bname}》,{bid},共{len(camps)} 个广告,新增:{add_count},更新:{up_count},失败:{fail_count},标记已添加任务:{aff_bb_dd}')

def upload_adkw_action(pid,camp_id,kw,action_type,action_des,basic_info):
    bid=basic_info['BrowserID']
    bname=basic_info['DpName']
    gname=basic_info['GroupName']
    cur_time=datetime.datetime.now()
    zds=['pid','camp_id','kw','action_type','action_des','bid','bname','gname','update_time']
    params=[pid,camp_id,kw,action_type,action_des,bid,bname,gname,cur_time]
    rs=tms.ExecQuery('select count(*) from YN_AD_GL where pid=? and kw=? and action_type=?',(pid,kw,action_type))
    if rs[0][0]==0:
        zds.append('upload_time')
        params.append(cur_time)
        zdstr=','.join(zds)
        wstr=','.join(['?' for _ in params])
        aff=tms.ExecNoQuery(f'insert into YN_AD_GL({zdstr}) values({wstr})',params)
        logging.info(f'店铺:《{bname}》,{bid},操作类型:{action_type},详细:{action_des},新增:{aff}')
    else:
        zdstr=','.join([f'{zd}=?' for zd in zds])
        params.extend([pid,kw,action_type])
        aff=tms.ExecNoQuery(f'update YN_AD_GL set {zdstr} where pid=? and kw=? and action_type=?',params)
        logging.info(f'店铺:《{bname}》,{bid},操作类型:{action_type},详细:{action_des},更新:{aff}')

def get_list_kw(cookie,campaign_id,SPC_CDS,binfo):
    session=requests.session()
    url='https://seller.shopee.co.id/api/pas/v1/product/manual/list_keyword_with_recommended_price/'
    headers={
        'cookie':cookie,
        'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36'
    }

    params={
        'SPC_CDS':SPC_CDS,
        'SPC_CDS_VER': 2
    }

    data1= {"campaign_id":int(campaign_id),
            "need_recommended_price":True,
            "header":{}}

    res=session.post(url,params=params,json=data1,headers=headers)
    json_data=res.json()
    if binfo is not None:
        Thread(target=upload_adData,args=(campaign_id,binfo,json_data['data'])).start()
    kw_price_info={}
    for kwitem in json_data['data']:
        cur_kw=kwitem['keyword']['keyword']
        if kwitem['keyword']['state']=='active':
            if not kw_price_info.get(cur_kw):
                kw_price_info[cur_kw]={}
            cur_bid_price=kwitem['keyword']['bid_price']
            kw_price_info[cur_kw]['bid_price']=cur_bid_price/100000
            cur_match_type=kwitem['keyword']['match_type']
            kw_price_info[cur_kw]['match_type']='Exact match' if cur_match_type=='exact' else 'Broad Match'
        
    return kw_price_info

def get_list_kw2(cookie,campaign_id,SPC_CDS,binfo=None):

    session=requests.session()
    url='https://seller.shopee.co.id/api/pas/v1/product/manual/list_keyword_with_recommended_price/'
    headers={
        'cookie':cookie,
        'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36'
    }

    params={
        'SPC_CDS':SPC_CDS,
        'SPC_CDS_VER': 2
    }

    data1= {"campaign_id":int(campaign_id),
            "need_recommended_price":True,
            "header":{}}

    res=session.post(url,params=params,json=data1,headers=headers)
    json_data=res.json()
    # if binfo is not None:
    #     Thread(target=upload_adData,args=(campaign_id,binfo,json_data['data'])).start()

    kws_acitve=[]
    kws_deleted=[]
    kws_active_match={}
    for kwitem in json_data['data']:
        cur_kw=kwitem['keyword']['keyword']
        if kwitem['keyword']['state']=='active':
            kws_acitve.append(cur_kw)
            kws_active_match[cur_kw]=kwitem['keyword']['match_type']
        elif kwitem['keyword']['state']=='deleted':
            kws_deleted.append(cur_kw)
    return kws_acitve,kws_deleted,kws_active_match

def get_all_xppro(ck,spc_cds):
    session=requests.session()
    stay_time=86400*20
    cur_time=int(time.time())
    headers={
        'cookie':ck,
        'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36'
    }
    page=1
    max_p=5
    url=f'https://seller.shopee.co.id/api/v3/mpsku/list/v2/get_product_list'
    cs_pros={}
    while True:
        params={
            'SPC_CDS': spc_cds,
            'SPC_CDS_VER': 2,
            'page_number': page,
            'page_size': 48,
            'list_type': 'all',
            'need_ads': True
        }
        try:
            res=session.get(url,headers=headers,params=params)
            json_data=res.json()
            page_info=json_data['data']['page_info']
            if page_info['total']==0:
                return (0,cs_pros)
            pros=json_data['data']['products']
            if page==1:
                pz=page_info['page_size']
                ptoal=page_info['total']
                max_p=math.ceil(ptoal/pz)

            for pro in pros:
                try:
                    pro_itemid=str(pro['id'])
                    pro_create_time=pro['create_time']
                    pro_min_price=int(float(pro['price_detail']['price_min']))
                    pro_ph_itemid=pro['parent_sku']
                    if cur_time-pro_create_time>stay_time:
                        cs_pros[pro_itemid]=[pro_itemid,pro_ph_itemid,pro_min_price]
                except Exception as e:
                    traceback.print_exc()
        except Exception as e:
            traceback.print_exc()

        page+=1
        if page>max_p:
            break

    return (max_p,cs_pros)

def delte_ad_kw(cookie,campaign_id,SPC_CDS,delkws):
    session=requests.session()
    
    url='https://seller.shopee.co.id/api/pas/v1/product/manual/mass_edit_keyword/'
    headers={
        'cookie':cookie,
        'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36'
    }

    params={
        'SPC_CDS':SPC_CDS,
        'SPC_CDS_VER': 2
    }
    if not delkws:
        return '无需删除'
    suc=0
    fail=0
    qiepian=1
    for j in range(math.ceil(len(delkws)/qiepian)):
        keyword_edit_list=[{"keyword": kw, "type": "delete"} for kw in delkws[j*qiepian:(j+1)*qiepian]]
        data= {
                "campaign_id": int(campaign_id), 
                "keyword_edit_list": keyword_edit_list, 
                "header": {}
                }

        try:
            
            res=session.post(url,params=params,json=data,headers=headers)
            json_data=res.json()
            if json_data['code']==0 and json_data['msg']=='OK':
                suc+=qiepian

            else:
                fail+=qiepian
                time.sleep(1)
        except Exception as e:
            print(f'删除错误 =>{e}')
            fail+=qiepian
        time.sleep(0.1)

    return {'删除成功':suc,'删除失败':fail}

def edit_kw(cookie,campaign_id,SPC_CDS,keyword_edit_list):
    session=requests.session()

    if not keyword_edit_list:
        return '无需执行'
    
    url='https://seller.shopee.co.id/api/pas/v1/product/manual/mass_edit_keyword/'
    headers={
        'cookie':cookie,
        'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36'
    }

    params={
        'SPC_CDS':SPC_CDS,
        'SPC_CDS_VER': 2
    }
    suc=0
    fail=0
    qp=1
    for keyword_edit_item in keyword_edit_list:
        data={
                "campaign_id":int(campaign_id),
                "keyword_edit_list":[keyword_edit_item],
                "header":{}
                }
        err_count=0

        while True:
            try:
                res=session.post(url,params=params,json=data,headers=headers)
                json_data=res.json()
                if json_data['code']==0 and json_data['msg']=='OK':
                    suc+=qp
                    kw_suc=keyword_edit_item['keyword']
                    edit_type=keyword_edit_item['type']
                    action_info=''
                    if edit_type=='add':
                        action_info=f'添加广告词《{kw_suc}》,价格:{keyword_edit_item[edit_type]["bid_price"]/100000},类型:{keyword_edit_item[edit_type]["match_type"]}'
                    elif edit_type=='change_match_type':
                        action_info=f'修改广告词类型《{kw_suc}》:{keyword_edit_item[edit_type]["match_type"]}'
                    elif edit_type=='change_bid_price':
                        action_info=f'修改广告词价格《{kw_suc}》:{keyword_edit_item[edit_type]["price"]/100000}'

                    print(f'成功操作+1,{action_info},剩余:{len(keyword_edit_list)-suc}')
                    break
                else:
                    if json_data['code'] == 1 and json_data['msg'] == 'update fail':
                        if json_data.get('data') and json_data.get('data').get('fail_list'):
                            if json_data['data']['fail_list'][0]['msg']=='keyword already exists':
                                print(f'关键词已存在跳过操作+1,剩余:{len(keyword_edit_list)-suc}')
                                suc+=1
                                break
                            elif json_data['data']['fail_list'][0]['msg']=='keyword is blacklisted':
                                black_kw=json_data['data']['fail_list'][0]['keyword']
                                aff_lh=tms.ExecNoQuery('update YN_AD_INFO set isBlacklist=1 where Search_Query=?',(black_kw,))
                                print(f'关键词《{black_kw}》拉黑操作+1,数据表拉黑+{aff_lh},剩余:{len(keyword_edit_list)-suc}')
                                suc+=1
                                break
                    
                    fail+=qp
                    err_count+=1
                    print(f'失败的操作+1:{res.json()}')
                    if err_count>=10:
                        kw_err=keyword_edit_item['keyword']
                        aff_err=tms.ExecNoQuery('update YN_AD_INFO set failEdit=? where Search_Query=?',(json.dumps(keyword_edit_item),kw_err))
                        print(f'失败操作:{keyword_edit_item},超过10次,记录到数据库:{aff_err}')
                        err_count=0
                        break
                    time.sleep(1)
            except Exception as e:
                print(f'edit_kw错误 => {e}')
                fail+=qp
        time.sleep(0.5)
    
    return {'操作成功':suc,'操作失败':fail}

def get_similarity(str1, str2):
   return difflib.SequenceMatcher(None, str1, str2).quick_ratio()

def buCongYNCD():

    rs= tms.ExecQuery('''select itemid,sku1,sku2,title from YNBigData
                        where id in (select min(id) from YNBigData 
                        where (code is null or code='') and adsid!='手动添加' group by itemID)
                     ''')
    
    print(f'共 {len(rs)} 个出单itemid,需找随机码')
    for cd_itemid,cd_sku1,cd_sku2,cd_title in rs:
        cd_sku2=cd_sku2 if cd_sku2 else None
        rs_ph=tms.ExecQuery('''select itemid,sku1,sku2,yntitle,yntitleRcode from proshopeeph where id in
                                (select min(id) from proshopeeph where yntitle=? group by itemid)
                           ''',(cd_title,))
        print(f'{cd_itemid} 从原始数据匹配到 {len(rs_ph)} 个数据')

        if len(rs_ph)==0:
            print(f'{cd_itemid} 标题匹配不到,先用sku缩减范围,取标题相似度最高作为数据链')
            print(cd_sku1,cd_sku2)
            if not cd_sku2:
                rs_phmgs=tms.ExecQuery("select itemid,yntitle,yntitleRcode from proshopeeph where sku1=? and (sku2='' or sku2 is null)",(cd_sku1,))

            else:
                rs_phmgs=tms.ExecQuery('select itemid,yntitle,yntitleRcode from proshopeeph where sku1=? and sku2=?',(cd_sku1,cd_sku2))
            

            ph_pros=[[ph_itemid,yntitle,yntitleRcode,get_similarity(yntitle,cd_title)] for ph_itemid,yntitle,yntitleRcode in rs_phmgs if yntitle and get_similarity(yntitle,cd_title)>0.5]
            print(f'共有{len(rs_phmgs)} 条数据待比对标题,标题相似度大于50%的有 {len(ph_pros)} 条')

            if len(ph_pros)==0:
                print(f'{cd_itemid},匹配失败')
            
            else:
                ph_pros.sort(key= lambda x :x[-1],reverse=True)

                print(f'匹配到最大相似度的标题数据位:{ph_pros[0][-1]}')
                ppph_itemid,ppyntitle,ppyntitleRcode,ppxsd=ph_pros[0]
                rcode= re.search(r'.*\|(.{8})\|$',ppyntitleRcode).group(1)
                aff=tms.ExecNoQuery('update YNBigData set code=?,title=?,PHItemid=? where itemid=?',(rcode,ppyntitleRcode,ppph_itemid,cd_itemid))
                print(f'cd_itemid:{cd_itemid} => ph_itemid:{ppph_itemid},成功匹配随机码:{rcode},更新行数:{aff}')


        elif len(rs_ph)==1:
            ph_itemid,sku1,sku2,yntitle,yntitleRcode=rs_ph[0]
            rcode= re.search(r'.*\|(.{8})\|$',yntitleRcode).group(1)
            aff=tms.ExecNoQuery('update YNBigData set code=?,title=?,PHItemid=? where itemid=?',(rcode,yntitleRcode,ph_itemid,cd_itemid))
            print(f'cd_itemid:{cd_itemid} => ph_itemid:{ph_itemid},成功匹配随机码:{rcode},更新行数:{aff}')
        else:
            print(f'{cd_itemid} 多行匹配,通过sku进一步匹配')
            
            for ph_itemid,ph_sku1,ph_sku2,yntitle,yntitleRcode in rs_ph:
                rs_skus=tms.ExecQuery('select sku1,sku2 from proshopeeph where itemid=?',(ph_itemid,))
                flag=False
                for sku1,sku2 in rs_skus:
                    sku2= sku2 if sku2 else None
                    rcode= re.search(r'.*\|(.{8})\|$',yntitleRcode).group(1)
                    if sku1==cd_sku1 and sku2==cd_sku2:
                        
                        aff=tms.ExecNoQuery('update YNBigData set code=?,title=?,PHItemid=? where itemid=?',(rcode,yntitleRcode,ph_itemid,cd_itemid))

                        print(f'cd_itemid:{cd_itemid} => ph_itemid:{ph_itemid},成功匹配随机码:{rcode},更新行数:{aff}')
                        flag=True
                        break
                if flag:
                    break

def update_sjmddd(ppp):
    tit,itemid,code=ppp
    with open('sjm.json','r',encoding='utf-8') as f:
        has_pp=json.load(f)
    
    if not has_pp.get(tit):
        has_pp[tit]=[code,itemid]
    
        with open('sjm.json','w',encoding='utf-8') as f:
            json.dump(has_pp,f)

with open('shopeePro.json','r',encoding='utf-8') as f:
    all_tits=json.load(f)

def find_code(pdata):
    with open('sjm.json','r',encoding='utf-8') as f:
        has_pp=json.load(f)

    cd_itemid,cd_sku1,cd_sku2,cd_title=pdata
    cd_sku2=cd_sku2 if cd_sku2 else None
    #先从已有映射表查找
    if has_pp.get(cd_title):
        print(f'{cd_itemid},从已有映射表找到')
        return has_pp.get(cd_title)

    #去流量报表找
    rs_llbb=tms.ExecQuery('select Parent_SKU from YN_LLBB where Item_ID=?',(str(cd_itemid),))
    if len(rs_llbb)>0 and rs_llbb[0][0]:
        bb_ph_itemid=rs_llbb[0][0]
        rs_rcode=tms.ExecQuery(f"select RandomCode from RandomCodes where itemid='{bb_ph_itemid}'")
        bb_rcode=rs_rcode[0][0]
        print(f'{cd_itemid} 从流量报表匹配到phitemid:{bb_ph_itemid},随机码:{bb_rcode}')
        update_sjmddd([cd_title,bb_ph_itemid,bb_rcode])
        return (bb_rcode,rs_llbb[0][0])

    rs_ph=tms.ExecQuery('''select itemid,sku1,sku2,yntitle,yntitleRcode from proshopeeph where id in
                            (select min(id) from proshopeeph where yntitle=? group by itemid)
                        ''',(cd_title,))
    print(f'{cd_itemid},标题方式从原始标题匹配到 {len(rs_ph)} 个数据')

    if len(rs_ph)==0:
        print(f'{cd_itemid} 标题原始标题匹配不到,先用sku:{[cd_sku1,cd_sku2]},缩减范围')

        if not cd_sku2:
            rs_phmgs=tms.ExecQuery("select itemid,yntitle,yntitleRcode from proshopeeph where REPLACE(sku1, ' ', '')=? and (sku2='' or sku2 is null)",(cd_sku1.replace(' ',''),))

        else:
            rs_phmgs=tms.ExecQuery("select itemid,yntitle,yntitleRcode from proshopeeph where REPLACE(sku1, ' ', '')=? and REPLACE(sku2, ' ', '')=?",(cd_sku1.replace(' ',''),cd_sku2.replace(' ','')))
        

        ph_pros=[[ph_itemid,yntitle,yntitleRcode,get_similarity(yntitle,cd_title)] for ph_itemid,yntitle,yntitleRcode in rs_phmgs if yntitle and get_similarity(yntitle,cd_title)>0.5]
        print(f'{cd_itemid},sku方式共有{len(rs_phmgs)} 条数据待比对标题,标题相似度大于50%的有 {len(ph_pros)} 条')

        if len(ph_pros)==0:
            print(f'{cd_itemid},sku没匹配到数据,开始从所有标题匹配...')

            all_pp_tits=[[ph_itemid,yntitle,get_similarity(yntitle,cd_title)] for ph_itemid,yntitle in all_tits if yntitle and get_similarity(yntitle,cd_title)>0.5]
            print(f'{cd_itemid},所有数据标题相似度大于50%的有 {len(all_pp_tits)} 条')
            if len(all_pp_tits)>0:
                all_pp_tits.sort(key= lambda x :x[-1],reverse=True)
                allpp_itemid,all_yntitle,ppxsd=all_pp_tits[0]
                print(f'{cd_itemid},匹配到最大相似度的标题数据位:{ppxsd}')
                rs_sjm=tms.ExecQuery(f"select RandomCode from RandomCodes where itemid='{allpp_itemid}'")
                sjm=rs_sjm[0][0]
                update_sjmddd([cd_title,allpp_itemid,sjm])
                return (sjm,allpp_itemid)
            else:
                return ('','')
        
        
        else:
            ph_pros.sort(key= lambda x :x[-1],reverse=True)
            print(f'{cd_itemid},匹配到最大相似度的标题数据位:{ph_pros[0][-1]}')
            ppph_itemid,ppyntitle,ppyntitleRcode,ppxsd=ph_pros[0]
            rcode= re.search(r'.*\|(.{8})\|$',ppyntitleRcode).group(1)
            update_sjmddd([cd_title,ppph_itemid,rcode])
            return (rcode,ppph_itemid)

    elif len(rs_ph)==1:
        ph_itemid,sku1,sku2,yntitle,yntitleRcode=rs_ph[0]
        rcode= re.search(r'.*\|(.{8})\|$',yntitleRcode).group(1)
        update_sjmddd([cd_title,ph_itemid,rcode])
        return (rcode,ph_itemid)
    
    else:
        print(f'{cd_itemid} 多行匹配,通过sku进一步匹配')
        for ph_itemid,ph_sku1,ph_sku2,yntitle,yntitleRcode in rs_ph:
            rs_skus=tms.ExecQuery('select sku1,sku2 from proshopeeph where itemid=?',(ph_itemid,))

            for sku1,sku2 in rs_skus:
                sku2= sku2 if sku2 else None
                rcode= re.search(r'.*\|(.{8})\|$',yntitleRcode).group(1)
                if sku1==cd_sku1 and sku2==cd_sku2:
                    update_sjmddd([cd_title,ph_itemid,rcode])
                    return (rcode,ph_itemid)
        
        ph_itemid,ph_sku1,ph_sku2,yntitle,yntitleRcode=rs_ph[0]
        rcode= re.search(r'.*\|(.{8})\|$',yntitleRcode).group(1)
        update_sjmddd([cd_title,ph_itemid,rcode])
        return (rcode,ph_itemid)

def find_rcode(cd_title,cd_sku1,cd_sku2=None):


    cd_sku2=cd_sku2 if cd_sku2 else None
    rs_ph=tms.ExecQuery('''select itemid,sku1,sku2,yntitle,yntitleRcode from proshopeeph where id in
                            (select min(id) from proshopeeph where yntitle=? group by itemid)
                        ''',(cd_title,))
    print(f' 从原始数据匹配到 {len(rs_ph)} 个数据')

    if len(rs_ph)==0:
        print(f' 标题匹配不到,先用sku缩减范围,取标题相似度最高作为数据链')
        print(cd_sku1,cd_sku2)
        if not cd_sku2:
            rs_phmgs=tms.ExecQuery("select itemid,yntitle,yntitleRcode from proshopeeph where sku1=? and (sku2='' or sku2 is null)",(cd_sku1,))

        else:
            rs_phmgs=tms.ExecQuery('select itemid,yntitle,yntitleRcode from proshopeeph where sku1=? and sku2=?',(cd_sku1,cd_sku2))
        

        ph_pros=[[ph_itemid,yntitle,yntitleRcode,get_similarity(yntitle,cd_title)] for ph_itemid,yntitle,yntitleRcode in rs_phmgs if yntitle and get_similarity(yntitle,cd_title)>0.5]
        print(f'共有{len(rs_phmgs)} 条数据待比对标题,标题相似度大于50%的有 {len(ph_pros)} 条')

        if len(ph_pros)==0:
            return ('','')
        
        else:
            ph_pros.sort(key= lambda x :x[-1],reverse=True)
            print(f'匹配到最大相似度的标题数据位:{ph_pros[0][-1]}')
            ppph_itemid,ppyntitle,ppyntitleRcode,ppxsd=ph_pros[0]
            rcode= re.search(r'.*\|(.{8})\|$',ppyntitleRcode).group(1)
            return (rcode,ppph_itemid)

    elif len(rs_ph)==1:
        ph_itemid,sku1,sku2,yntitle,yntitleRcode=rs_ph[0]
        rcode= re.search(r'.*\|(.{8})\|$',yntitleRcode).group(1)
        return (rcode,ph_itemid)
    
    else:
        print(f' 多行匹配,通过sku进一步匹配')
        for ph_itemid,ph_sku1,ph_sku2,yntitle,yntitleRcode in rs_ph:
            rs_skus=tms.ExecQuery('select sku1,sku2 from proshopeeph where itemid=?',(ph_itemid,))

            for sku1,sku2 in rs_skus:
                sku2= sku2 if sku2 else None
                rcode= re.search(r'.*\|(.{8})\|$',yntitleRcode).group(1)
                if sku1==cd_sku1 and sku2==cd_sku2:
                    return (rcode,ph_itemid)
        
        ph_itemid,ph_sku1,ph_sku2,yntitle,yntitleRcode=rs_ph[0]
        rcode= re.search(r'.*\|(.{8})\|$',yntitleRcode).group(1)
        return (rcode,ph_itemid)

def getsmtpro(rp):
    # 处理 sku1, sku2 和 pdec
    for item in rp:

        for skuitem in ['sku1', 'sku2']:
            if item[skuitem]:
                if len(item[skuitem]) > 20:
                    item[skuitem] = item[skuitem][:18]
        if not item['pdec']:
            item['pdec'] = item['yntitle']

        if item['pdec'] and len(item['pdec']) > 2500:
            item['pdec'] = item['pdec'][:2500]
        if item.get('sku_price'):
            if item['sku_price']<169000:
                item['sku_price']=169000
        if item.get('skuPirce'):
            if item['skuPirce']<169000:
                item['skuPirce']=169000

    # 处理 sku1 和 sku2 重复的情况
    count_dict = {}
    for item in rp:
        item_id = item['itemid']
        sku1 = item['sku1']
        sku2 = item['sku2']
        if sku1 and not sku2:
            if (item_id, sku1) in count_dict:
                count_dict[(item_id, sku1)] += 1
                item['sku1'] = f"{sku1}_{count_dict[(item_id, sku1)]}"
            else:
                count_dict[(item_id, sku1)] = 1

        if sku2:
            if (item_id, sku1, sku2) in count_dict:
                item['sku2'] = f"{sku2}_{count_dict[(item_id, sku1, sku2)]}"
                count_dict[(item_id, sku1, sku2)] += 1
            else:
                count_dict[(item_id, sku1, sku2)] = 1
    

    rp_dict = {}
    for r in rp:
        item_id = r['itemid']
        if rp_dict.get(item_id):
            rp_dict[item_id].append(r)
        else:
            rp_dict[item_id] = [r]

    final_result = []
    for item_id, records in rp_dict.items():
        
        min_p=min(records, key=lambda x: x["sku_price"])['sku_price']
        max_p=max(records, key=lambda x: x["sku_price"])['sku_price']
        if max_p/min_p>7:
            min_p=math.ceil(max_p/7)
        for pitem in records:
            if pitem['sku_price']<min_p:
                pitem['sku_price']=min_p
        grouped_by_sku1 = defaultdict(list)
        cur_sku2s=defaultdict(set)
        for record in records:
            grouped_by_sku1[record['sku1']].append(record)
            if not record['sku2']:
                record['sku2']=''
            cur_sku2s[record['sku1']].add(record['sku2'])
        current_count = 0
        item_group = []
        #print(cur_sku2s.values())
        set_counts = Counter(tuple(sorted(s)) for s in cur_sku2s.values())
        most_common_set, count = set_counts.most_common(1)[0]
        for sku1, sku_records in grouped_by_sku1.items():
            if cur_sku2s[sku1]==set(most_common_set):
                if current_count + len(sku_records) <= 80:
                    item_group.extend(sku_records)
                    current_count += len(sku_records)
                else:
                    break
        final_result.append(item_group)
    #final_result=list(rp_dict.values())
    return final_result

def get_gdppkw(session,ques,kj,kw):
    res=ask_deepseek(session,ques)
    if res and res=='高度匹配':
        return kj,kw
    else:
        return kj,None


def get_ppd_kj(session,ques,kj,kw):
    res=ask_deepseek(session,ques)
    return kj,kw,res


def ask_deepseek(session,ques,model='deepseek-chat'):
    
    
    payload = {
        "messages": [
            {
                "content": ques,
                "role": "user"
            }
        ],
        "model": model,
        "frequency_penalty": 0,
        "max_tokens": 8192,
        "presence_penalty": 0,
        "response_format": {
            "type": "text"
        },
        "stop": None,
        "stream": False,
        "stream_options": None,
        "temperature": 1,
        "top_p": 1,
        "tools": None,
        "tool_choice": "none",
        "logprobs": False,
        "top_logprobs": None
    }
    for _ in range(5):
        try:
        
            res = session.post('https://api.deepseek.com/chat/completions',json=payload,timeout=120)
            if res.status_code==200:
                js_data=res.json()
                ppd=js_data['choices'][0]['message']['content']
                return ppd
            else:
                print(f'询问状态码出错')

        except Exception as e:
            print(f'询问出错 => {e}')

def deepseek_session_singleton(func):
    session_cache = {}
    @wraps(func)
    def wrapper(*args, **kwargs):
        if "session" not in session_cache:
            session = requests.session()
            session.headers = DS_Headers
            session_cache["session"] = session
        return func(session=session_cache["session"], *args, **kwargs)

    return wrapper

@deepseek_session_singleton
def ask_deepseek_json(ques,model='deepseek-chat',frequency_penalty=0,session=None):
    
    payload = {
        "messages": [
            {
                "role": "system", 
                "content": "你总是以JSON格式返回数据"
            },
            {
                "content": ques,
                "role": "user"
            }
        ],
        "model": model,
        "frequency_penalty": frequency_penalty,
        "max_tokens": 8192,
        "presence_penalty": 0,
        "response_format": {
            "type": "json_object"
        },
        "stop": None,
        "stream": False,
        "stream_options": None,
        "temperature": 1,
        "top_p": 1,
        "tools": None,
        "tool_choice": "none",
        "logprobs": False,
        "top_logprobs": None
    }
    for _ in range(5):
        try:
        
            res = session.post('https://api.deepseek.com/chat/completions',json=payload,timeout=120)
            if res.status_code==200:
                js_data=res.json()
                ppd=js_data['choices'][0]['message']['content']
                if ppd.strip():
                    return ppd
            else:
                print(f'询问状态码出错')

        except Exception as e:
            print(f'询问出错 => {e}')



def update_ppd(basic_info):

    bid=basic_info['BrowserID']
    dpName=basic_info['DpName']

    session=requests.session()
    session.headers={
        'Content-Type': 'application/json',
        'Accept': 'application/json',
        'Authorization': 'Bearer sk-3c8ae8738d91498bbdcffc76bd0345b1'
    }
    def req_ppd0(kitem,j):
        kid,pid,kw,tit=kitem
        url = "https://api.deepseek.com/chat/completions"
        question=f'关键词 {kw} 产品标题 {tit} 关键词跟标题的匹配度高不高，适不适合用来做广告关键词？只需要回答：高度匹配或者比较匹配或者不匹配'

        payload = {
            "messages": [
                {
                    "content": "你是个印尼Shopee广告功能助手",
                    "role": "system"
                },
                {
                    "content": question,
                    "role": "user"
                }
            ],
            "model": "deepseek-chat",
            "frequency_penalty": 0,
            "max_tokens": 2048,
            "presence_penalty": 0,
            "response_format": {
                "type": "text"
            },
            "stop": None,
            "stream": False,
            "stream_options": None,
            "temperature": 1,
            "top_p": 1,
            "tools": None,
            "tool_choice": "none",
            "logprobs": False,
            "top_logprobs": None
        }
        
        try:
            res = session.post(url,json=payload,timeout=60)
            if res.status_code==200:
                js_data=res.json()
                ppd=js_data['choices'][0]['message']['content']
                return [kid,j,pid,kw,ppd]
            else:
                print(f'第{j}个关键词:《{kw}》,状态码出错 => {e}')

        except Exception as e:
            print(f'第{j}个关键词:《{kw}》,匹配度请求出错 => {e}')

    sql_str=f''' select id,Product_ID,KW,ptitle from ShopeeADKWInfo where (conversion>0 or CTR>=0.10) 
                and update_time>= CAST(GETDATE() AS DATE) and deepseek is null
                and bid='{bid}' and is_tz=0
        '''
    lcc=1
    suc=0
    while True:
        rs=tms.ExecQuery(sql_str)
        task_count=len(rs)
        logging.info(f'店铺:《{dpName}》,{bid},第一轮循环,需采集{task_count} 个匹配度...')
        if len(rs)==0:
            logging.info(f'店铺:《{dpName}》,{bid},所有匹配度采集完毕')
            break
        with concurrent.futures.ThreadPoolExecutor(max_workers=10) as t:

            tasks=[]
            j=1
            for kitem in rs:
                tasks.append(t.submit(req_ppd0,kitem,j))
                j+=1
            
            for t0 in concurrent.futures.as_completed(tasks):
                res=t0.result()
                cur_time=datetime.datetime.now()
                if res is not None:
                    res_kid,res_j,res_pid,res_kw,res_ppd=res
                    aff=tms.ExecNoQuery('update ShopeeADKWInfo set update_time=?,deepseek=? where [id]=?',(cur_time,res_ppd,res_kid))
                    logging.info(f'店铺:《{dpName}》,{bid},第 {res_j} 个广告词:《{res_kw}》,采集匹配度:<{res_ppd}>,保存状态1:{aff}')
                    suc+=aff
                task_count-=1
                logging.info(f'店铺:《{dpName}》,{bid},剩余采集匹配任务:{task_count}/{len(rs)}')


        
        logging.info(f'店铺:《{dpName}》,{bid},第{lcc}轮采集匹配度完毕')
    
    return suc


def dc_ty_pro(itemids):
        smtys={
            '标题':'yntitleRcode',
            '产品ID':'itemid',
            '详细参数':'attributes',
            'SKU图片网址':'skuImg',
            '价格':'skuPirce',
            'SKUID':'skuid',
            '描述':'pdec'
        }

        zjys={
            'Option for Variation 1':'sku1',
            'Option for Variation 2':'sku2',
        }

 

        phl=420
        pbs=1
        mbid=11105
        rs_mb=tms.ExecQuery('select dfile from mb where did=?',(mbid,))
        mbpath=rs_mb[0][0]
        rys=tms.ExecQuery('select * from lb where ldid=? order by lrow',(mbid,))
        print(f'正在获取模板:{mbid},{len(itemids)} 个数据...')
        wstr=','.join([f"'{_}'" for _ in itemids])
        sqlstr=f'select * from proshopeeph where itemid in ({wstr})'
        rp=tms.ExecQuerydict(sqlstr)
        dcmr=f'导出结果(通用模版)_{getTimeStr()}'
        os.mkdir(dcmr)
        smt_rp=getsmtpro(rp)
        wb=openpyxl.load_workbook(mbpath)
        ws=wb['Template']
        row=7
        pskudatas=[]
        for pii_lsit in smt_rp:
            pskudatas.extend(pii_lsit)

        for pro in pskudatas:
            pro['price']=int(phl*pro['price']*pbs)
            pro['skuPirce']=int(phl*pro['skuPirce']*pbs)
            allimgs=[pro['image']]
            if pro['images']:
                try:
                    allimgs.extend(json.loads(pro['images']))
                except Exception as e:
                    pass

            rol=1
            try:
                for ry in rys:
                    mbcol=ry[1]
                    smtcol=ry[2]
                    gd=ry[4]

                    if smtcol=='SKU图片网址':
                        rol+=1
                        continue
                    
                    if '无' not in smtcol and smtcol:
                        if smtcol=='SKU名称':
                            if zjys.get(mbcol):
                                ws.cell(row=row,column=rol).value=pro[zjys.get(mbcol)]
                                if zjys.get(mbcol)=='sku1' and not pro['sku1']:
                                    ws.cell(row=row,column=rol).value='style'

                        elif smtcol=='主图网址':
                            if len(allimgs)>0:
                                ws.cell(row=row,column=rol).value=allimgs.pop(0)

                        else:
                            ws.cell(row=row,column=rol).value=str(pro[smtys.get(smtcol)])
                    
                    else:
                        ws.cell(row=row,column=rol).value=gd
                        if gd=='size' and not pro['sku2']:
                            ws.cell(row=row,column=rol).value=''

                    rol+=1
                row+=1
            except Exception as e:
                logging.info(f'第{row}行,第{rol}列填写错误 =>{e}')
        ex_path=f'{dcmr}/广告数据.xlsx'
        wb.save(ex_path)
        print(f'模版:{mbid},成功导出{len(smt_rp)}个产品,到:《{ex_path}》')

        return len(smt_rp)

def add_query_parameters(base_url, params):
    """
    添加查询参数到链接中，保留原始查询参数。

    Args:
        base_url (str): 基本链接。
        params (dict): 包含参数名称和值的字典。

    Returns:
        str: 包含查询参数的新链接。
    """
    # 解析基本链接以获取其查询字符串部分
    parsed_url = urllib.parse.urlparse(base_url)

    # 解析现有的查询参数
    existing_query_params = urllib.parse.parse_qs(parsed_url.query)

    # 将新参数添加到现有参数中
    existing_query_params.update(params)

    qs_parts = []
    for key, values in existing_query_params.items():
        for value in values if isinstance(values, list) else [values]:
            k = urllib.parse.quote(str(key), safe="")
            v = urllib.parse.quote(str(value), safe="")
            qs_parts.append(f"{k}={v}")

    new_query_string = "&".join(qs_parts)

    # 用新的查询字符串替换现有的查询字符串
    new_parsed_url = parsed_url._replace(query=new_query_string)

    # 从新的解析链接中获取最终的链接字符串
    new_url = urllib.parse.urlunparse(new_parsed_url)

    return new_url

def get_shopee_rcode():
    characters = string.ascii_letters + string.digits
    random_code = ''.join(random.choice(characters) for _ in range(8))
    while True:
        rs=tms.ExecQuery('select count(*) from RandomCodes where RandomCode=?',(random_code,))
        if rs[0][0]==0:
            return random_code


def read_xls_with_merged_cells(file_path) -> list[dict[str, str]]:
    if isinstance(file_path,str):
        wb = xlrd.open_workbook(file_path, formatting_info=True)
    else:
        wb = xlrd.open_workbook(file_contents=file_path, formatting_info=True)
    sheet = wb.sheet_by_index(0)

    # 获取合并单元格的范围列表：[(row_start, row_end, col_start, col_end), ...]
    merged_cells = sheet.merged_cells

    data = []

    for row_idx in range(sheet.nrows):
        row_data = {}
        for col_idx in range(sheet.ncols):
            cell_value = sheet.cell_value(row_idx, col_idx)

            # 检查该单元格是否在合并区域中，如果为空则尝试获取合并值
            if cell_value == '':
                for (row_start, row_end, col_start, col_end) in merged_cells:
                    if row_start <= row_idx < row_end and col_start <= col_idx < col_end:
                        cell_value = sheet.cell_value(row_start, col_start)
                        break

            # 你可以使用表头作为 key（第一行）
            key = sheet.cell_value(0, col_idx)
            row_data[key] = cell_value
        if row_idx != 0:  # 跳过标题行
            data.append(row_data)

    return data

def send_email_qq( recipient_emails, subject, body, sender_name='虾皮项目组'):
    # 构造邮件
    smtp_server='smtp.qq.com'
    port=465
    sender_email=MY_EMAIL_INFO['sender_email']
    sender_password=MY_EMAIL_INFO['sender_password']

    if isinstance(recipient_emails, str):
        recipient_emails = [recipient_emails]

    msg = MIMEText(body, 'plain', 'utf-8')
    # 自定义发件人姓名
    msg['From'] = formataddr((str(Header(sender_name, 'utf-8')), sender_email))
    msg['To'] = Header(', '.join(recipient_emails), 'utf-8')
    msg['Subject'] = Header(subject, 'utf-8')

    # 发送邮件
    is_send=False
    try:
        server = smtplib.SMTP_SSL(smtp_server, port)
        server.login(sender_email, sender_password)
        server.sendmail(sender_email, recipient_emails, msg.as_string())
        print("✅ 邮件发送成功")
        is_send=True
    except Exception as e:
        traceback.print_exc()
        print("❌ 发送失败：", e)
    finally:
        try:
            server.quit()
        except:
            pass
    return is_send

def get_kol(basic_info,knum):

    bid=basic_info['BrowserID']
    dpName=basic_info['DpName']
    while True:
        with open('状态锁.json','r',encoding='utf-8') as f:

            zt=json.load(f)
        
        kol_zt=zt['取达人数据']

        if kol_zt==0:
            zt['取达人数据']=1
            with open('状态锁.json','w',encoding='utf-8') as f:
               json.dump(zt,f,ensure_ascii=False,indent=4)
            rs=tms.ExecQuery(f'select top {knum} id,affiliate_id,username from ShopeeKOL where total_follower >=1000 and yao_qing_bid is null order by total_follower desc')
            if len(rs)>0:
                kidstr=','.join([str(r[0]) for r in rs])
                aff=tms.ExecNoQuery(f"update ShopeeKOL set yao_qing_bid='{bid}' where id in ({kidstr})")
                logging.info(f'店铺:《{dpName}》,{bid},成功标记:{aff} 个达人')
            
            zt['取达人数据']=0
            with open('状态锁.json','w',encoding='utf-8') as f:
               json.dump(zt,f,ensure_ascii=False,indent=4)
            
            return rs

async def scroll_down(page, distance=1000, step=100, delay=0.05):
    """向下滚动 distance 像素，分 step 步执行，每步等待 delay 秒"""
    steps = max(1, distance // step)
    for _ in range(steps):
        await page.evaluate(f'window.scrollBy(0, {step})')
        await asyncio.sleep(delay)


def remove_chinese(text: str):
    # 判断是否包含中文
    if re.search(r'[\u4e00-\u9fff]', text):
        # 去掉中文
        return (1,re.sub(r'[\u4e00-\u9fff]+', '', text))
    return (0,text)



def excel_date_to_datetime(excel_date: float) -> datetime.datetime:
    return datetime.datetime(1899, 12, 30) + datetime.timedelta(days=excel_date)


async def clear_input(page:Page, el_ipt):
    """清空指定 xpath 的 input 内容"""

    if not el_ipt:
        raise ValueError(f"未找到 input")

    await el_ipt.click()   # 聚焦输入框
    # Ctrl+A 全选
    await page.keyboard.down('Control')
    await page.keyboard.press('A')
    await page.keyboard.up('Control')
    # 删除
    await page.keyboard.press('Backspace')

    await asyncio.sleep(0.5)

def read_excel_with_merged_from_bytes(content_bytes, sheet_name=None):
    # 用 BytesIO 包装二进制内容
    wb = openpyxl.load_workbook(BytesIO(content_bytes), data_only=True)
    ws = wb[sheet_name or wb.sheetnames[0]]
    # 获取表头
    headers = [cell.value for cell in next(ws.iter_rows(min_row=1, max_row=1))]

    # 用于存放每行数据
    data = []

    # 遍历数据行
    for row_cells in ws.iter_rows(min_row=2):
        row_dict = {}
        for header, cell in zip(headers, row_cells):
            # 如果是 MergedCell 且值为 None，取左上角单元格的值
            if isinstance(cell, MergedCell) or cell.value is None:
                # 找到合并区域
                merged_value = None
                for merged_range in ws.merged_cells.ranges:
                    if (merged_range.min_row <= cell.row <= merged_range.max_row and
                        merged_range.min_col <= cell.column <= merged_range.max_col):
                        # 左上角单元格的值
                        merged_value = ws.cell(merged_range.min_row, merged_range.min_col).value
                        break
                row_dict[header] = merged_value
            else:
                row_dict[header] = cell.value
        data.append(row_dict)

    return data

def mark_1688_thright(basic_info,offerids,copyright_th,th_live_status):

    bid=basic_info['BrowserID']
    dpName=basic_info['DpName']
    shopid=basic_info['shopid']
    aff=0
    if len(offerids)>0:
        offerid_str=','.join([f"'{offerid}'" for offerid in offerids if offerid.strip()])
        if offerid_str:
            sql_str=f''' update S1688Pro set
                            is_th_big_dc=1,
                            th_dc_bid='{bid}',
                            th_dc_shopid= '{shopid}',
                            th_del_count=1,
                            copyright_th='{copyright_th}',
                            th_live_status='{th_live_status}'
                        where itemid in ({offerid_str})
                        '''
            aff=tms.ExecNoQuery(sql_str)

    logging.info(f'店铺:《{dpName}》,{bid},《{th_live_status}-{copyright_th}》,需标记:{len(offerids)},实际标记:{aff}')

def remove_all_punct(text: str) -> str:
    """
    将中文符号和英文符号替换为空格，保留汉字、英文单词和数字。
    多个连续符号合并为一个空格，去掉首尾空格。
    """
    # 中文符号 Unicode
    chinese_punct = r'[\u3000-\u303F\uFF00-\uFFEF]'
    # 英文符号
    english_punct = r'[!"#$%&\'()*+,\-./:;<=>?@[\\\]^_`{|}~]'
    
    # 替换中文和英文符号为空格
    text = re.sub(chinese_punct, ' ', text)
    text = re.sub(english_punct, ' ', text)
    
    # 合并多个空格为一个
    text = re.sub(r'\s+', ' ', text)
    
    # 去掉首尾空格
    return text.strip()

def is_pname_contain_klist(pname:str,kw_list:list):

    for kw in kw_list:
        if kw.lower() not in pname.lower():
            return False
    return True

pattern_html = re.compile(r"<[^>]+>")
def clean_html(text):
    return pattern_html.sub("", text)

def get_big_data_rcode_list(country,tid):

    #获取汇率
    rs_hl=tms.ExecQuery('select distinct country,CurrenyRate from Houtai')
    country_hl_map={}
    for gj,hl in rs_hl:
        if  not country_hl_map.get(gj):
            country_hl_map[gj]=hl
    price_times=Price_Times
    exchange_rate=country_hl_map[gj]
    cur_zd_cs=Country_Sql_Zd_Map[country]
    zd_ptit=cur_zd_cs['zd_ptit']
    zd_pdes=cur_zd_cs['zd_pdes']
    zd_is_fy_sku=cur_zd_cs['zd_is_fy_sku']
    zd_is_fy_tit=cur_zd_cs['zd_is_fy_tit']
    zd_sku1=cur_zd_cs['zd_sku1']
    zd_sku2=cur_zd_cs['zd_sku2']
    zd_dc_bid=cur_zd_cs['zd_dc_bid']
    zd_dc_shopid=cur_zd_cs['zd_dc_shopid']
    zd_price=f'sk.sku_price * {price_times} * {exchange_rate} as int'

    country_wystr=''
    if country in ['马来西亚']:

        country_wystr=f'''AND NOT EXISTS (
                SELECT 1 FROM ShopeeAdsMove m
                WHERE m.to_country='{country}' and m.random_code=p.random_code
            ) '''
    
    if tid==40:
        sql_str=f'''
                SELECT 
                        random_code
                    FROM S1688Pro p
                        WHERE {zd_dc_bid} is null
                        AND {zd_is_fy_sku} = 1 
                        AND {zd_is_fy_tit} = 1
                        AND keword_gender < 4
                        AND p.price between 20 and 100
                        AND (shop_name like '%公司%' or shop_name like '%厂%')
                        AND NOT EXISTS (
                            select 1 from ShopeeProPerformance l
                            where l.shop_usage is not null
                            and l.random_code= p.random_code
                        )
                        AND NOT EXISTS (
                            SELECT 1 FROM Blacklist b
                            WHERE b.word_language = '中文'
                            AND p.pname LIKE '%' + b.word_blacklist + '%'
                        )
                        AND NOT EXISTS (
                            SELECT 1 FROM Blacklist b
                            WHERE b.word_language = '印尼语'
                            AND p.yntitle LIKE '%' + b.word_blacklist + '%'
                        )
                        AND NOT EXISTS (
                            SELECT 1 FROM Blacklist b
                            WHERE b.word_language = '英语'
                            AND p.mltitle LIKE '%' + b.word_blacklist + '%'
                        )
                        {country_wystr}
            '''
    else:
        shop_usage=Task_Name_dict[tid-1]

        sql_str =f''' SELECT  random_code
                    FROM S1688Pro p
                        WHERE {zd_dc_bid} is null
                        AND EXISTS (
                            select 1 from ShopeeProPerformance l where l.shop_usage='{shop_usage}'
                            and l.country='{country}'
                            and p.random_code=l.random_code
                        )
                        '''
    
    rs_codes=tms.ExecQuery(sql_str)

    rcode_list=[r[0] for r in rs_codes]

    return rcode_list

