import sys
import time
from bs4 import BeautifulSoup
import mysql.connector
import threading
from PyQt5.QtWidgets import QWidget, QVBoxLayout, QPushButton, QApplication,QHBoxLayout,QLineEdit,QTextEdit,QDesktopWidget
from PyQt5.QtGui import QTextCursor
from PyQt5.QtCore import pyqtSignal, QObject
import pymysql
from contextlib import contextmanager
import mysql.connector
from mysql.connector import Error as MySQLError
import socket
from proxy_zly import getproxy_zly
lastproxy=getproxy_zly()
proxyusetimes=0
from amzxue import amazon_scraper_images, captureamz, parse_dimensions, parse_product_details, parse_product_features, parse_product_rank, parse_sku_variant_details
from amazon_crawler_optimized import AmazonCrawler
from WalmartContentSpinner import process_product_content
print(mysql.connector.__version__)
import queue
from MySQLHelper_xue import MySQLHelper_xue
from followth_node import get_disk_serial_simple,getnodeorder,getnodescount
import threading
from datetime import datetime, timedelta, time as datetime_time
import requests
from lxml import etree
import time
import os
import random
from random import randint
from datetime import datetime
import re
import sys
from queue import Queue
import multiprocessing


import mysql.connector
from PyQt5.QtWidgets import QApplication, QWidget, QVBoxLayout, QPushButton
from PyQt5.QtCore import QPoint
from xrates import getrate
from PyQt5.QtWidgets import QWidget, QApplication
from PyQt5.QtCore import Qt  # 正确导入Qt
from datetime import datetime, timedelta, time as datetime_time
from PyQt5.QtWidgets import QApplication, QWidget, QVBoxLayout, QPushButton
import sys
import queue
from PyQt5.QtWidgets import QApplication, QWidget, QVBoxLayout, QTextEdit, QPushButton
from PyQt5.QtCore import pyqtSignal, QObject
import threading
from mysql.connector import Error as MySQLError  # 正确的导入方式
from followth_node import heartnode,getnodeorder,getnodescount   ,get_disk_serial_simple 
from PyQt5.QtCore import pyqtSignal, QObject
from PyQt5.QtWidgets import QApplication, QWidget, QVBoxLayout, QTextEdit, QPushButton
import json
from followth_node import get_disk_serial_simple,getnodeorder,getnodescount
from uuid import uuid4
a=234+23
import requests
from lxml import etree
import time
import os
import random
from random import randint
import threading
import mysql.connector
from datetime import datetime
from PyQt5.QtCore import pyqtSignal, QObject
from MySQLHelper_xue import MySQLHelper_xue
from public  import except_to_db
import logging
import traceback
import MySQLdb
from public import getexcept
from MySQLdb.cursors import DictCursor
logging.basicConfig(filename=r'.\error.log', level=logging.ERROR)
from queue import Queue
error_details=''
print(f"a = {a}")
splitchar = '->'
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
    'Accept-Language': 'en-US,en;q=0.9',
    'Accept-Encoding': 'gzip, deflate, br',
    'Connection': 'keep-alive',
    'Upgrade-Insecure-Requests': '1'
}
INSERT_FIELDS_complex = [
        'username', 'productid', 'shop_type', 'storename', 'keywords',
        'title', 'link', 'main_image', 'price', 'currency',  
        'rating', 'reviews_count', 'sales_info',   'delivery_info',
        'brand', 'category',   'in_stock',  
        'product_dimensions',       'length' ,'width','height',
        'lengthunit','weight','weightunit','main_rank','best_sellers_rank',   
        'maincategory','sub_rank', 'sun_subcategory', 'item_model_number',  
        'detail1', 'detail2', 'detail3', 'detail4', 'detail5',
        'image1','image2','image3','image4','image5','image6','image7',
        'Department', 'publishdate', 'Manufacturer', 'last_detaildownload_date',
        'downloaddetailatonce', 'timestamp', 'downloadtimes', 'opdate' ,'newtitle'
    ]
INSERT_FIELDS_simple = [
        'username', 'productid', 'shop_type', 'storename', 'keywords',
        'title', 'link', 'main_image', 'price', 'currency',  
        'rating', 'reviews_count', 'sales_info',   'delivery_info',
        'brand', 'category',   'in_stock',          
        'downloaddetailatonce', 'timestamp', 'downloadtimes', 'opdate' ,
        'maincategory',  'sun_subcategory','newtitle'
    ]


UPDATE_FIELDS_complex = [
        'title',  'pic', 'price',   'rating',
        'reviews', 'sales_info', 
          'detail1', 'detail2', 'detail3', 'detail4', 'detail5',
        'main_rank', 'maincategory', 'sub_rank', 'sun_subcategory', 
        'opdate'  ,'newtitle'
    ]

UPDATE_FIELDS_simple = [
        'title',  'pic', 'price',   'rating',
        'reviews', 'sales_info', 'delivery_info','brand','category',
        'keywords',  'opdate'  ,'newtitle'
       
    ]
INSERT_FIELDS_simple_category = [
        'category', 'parent_level', 'level',  'url', 'image_url', 'alt_text', 'currentpage','parent_category','leaf_category','need_download'
    ]     
named_params_category = [f' :{field} ' for field in INSERT_FIELDS_simple_category]

insert_sql_category = f"""
        INSERT INTO app_category_xue 
        ({', '.join(INSERT_FIELDS_simple_category)})
        VALUES ({', '.join(named_params_category)})
        ON DUPLICATE KEY UPDATE 
            category=VALUES(category),
            parent_level=VALUES(parent_level),
            level=VALUES(level),
            url=VALUES(url),
            image_url=VALUES(image_url),
            alt_text=VALUES(alt_text),
            currentpage=VALUES(currentpage),
            parent_category=VALUES(parent_category),
            leaf_category=VALUES(leaf_category),
            opdate=now() 
    """
  
def except_to_db(self, dbhelper, error_details):
    try:
        logmessage = f'{error_details} {datetime.now()}'
        logmessage = getexcept(logmessage)
        # logging.error("An error occurred->" + logmessage, exc_info=True)   
        self.sender.textUpdateSignal.emit(logmessage)
        
        # 修改 SQL 语句使用命名参数
        sql = "INSERT INTO crawlexcept(opdate, exceptcon) VALUES(NOW(), :error_msg)"
        # 传递字典参数，而不是使用字符串
        dbhelper.executewithparams(sql, {"error_msg": logmessage})
    except Exception as e:
        print(f"处理异常出错: {str(e)}")
        
  
class MessageSender(QObject):
    textUpdateSignal = pyqtSignal(str)
 
class Window(QWidget):
    def __init__(self):
        self.session = requests.Session()
        super().__init__()

        layout_h = QHBoxLayout()
        self.dbhelper =     MySQLHelper_xue()
        self.resume_button = QPushButton("启动任务")
        self.resume_button.clicked.connect(self.resume_thread)
        self.pause_button = QPushButton("停止任务")
        self.pause_button.clicked.connect(self.pause_thread)
        self._pause_event = threading.Event()
        self._pause_event.clear()  #
        self.setGeometry(300, 300, 800, 500)
        self.setWindowTitle('关键词下载产品-RPA')
        text_layout = QHBoxLayout()
        self.textmemo = QTextEdit()
        text_layout.addWidget(self.textmemo)

        self.sender = MessageSender()
        self.sender.textUpdateSignal.connect(self.textmemo.append)
        layout_h.addWidget(self.resume_button)

        
 
        # self.textUpdateSignal.connect(self.textmemo.append)
        self.textmemo.append(
            '启动就可以下载关键词的对应数据')
        # layout_h.addWidget(self.pause_button)
        main_layout = QVBoxLayout()
        main_layout.addLayout(layout_h)
        main_layout.addLayout(text_layout)
        self.show()
        self.setLayout(main_layout)
        self.thread = []
        diskid=''
        try:
            s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
            s.connect(("8.8.8.8", 80))
            diskid = s.getsockname()[0]
        except Exception:
            ip = socket.gethostbyname(socket.gethostname())
        finally:
            s.close()
 
 
        
        tis = threading.Thread(target=self.downloaddata,args=(diskid,))
        self.thread.append(tis)

        tis.start()
       

        tis.join()
        self.resume_thread()

    def pause_thread(self):
        # self._pause_event.clear()
        ixx = 2

    def resume_thread(self):
        # for i in range(2):
        #     self.thread[i].start()
        self.resume_button.setEnabled(False)
        # self._pause_event.set()
    
    def getitemidfromurl(self,url):
        pattern = r'/itm/.*?/(\d+)'
        match = re.search(pattern, url)
        pattern_digital = r'^\d+$' 
        match=re.search(pattern_digital, url)
        if match:
            print(url,"是纯数字")
            return match.group(0)
        else:
            pattern = r'/itm/.*?/(\d+)'
            match = re.search(pattern, url)
            if match:
                itemid= match.group(1)
                print(url,"不是纯数字,提取后为",itemid)
                return itemid   
    def downloaddata(self,diskid):   
        
        # while True:
        #     total_category_noprocessed=self.parse_grid_category_2_db()
        #     if total_category_noprocessed==0:
        #         break

        # while True:
        #     self.parse_category_leaf()  
        
        lock = threading.Lock()
        inat = 1
        try:
            AmazonCrawler_instance = AmazonCrawler()
            waitseconds=5
            while True:
                self.sender.textUpdateSignal.emit(f'爬虫程序几秒钟执行一次')

                    # 在这里放置你要执行的任务代码，这里以打印当前时间为例
                print('爬虫程序5分钟执行一次')
                print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
                lock.acquire()

                try:
                    self.downloadrate2()
                    self.process_1(diskid,AmazonCrawler_instance)
                except Exception as e:
                    error_details = traceback.format_exc()
                    traceback.print_exc()
                    except_to_db(self,self.dbhelper,error_details)                   
                finally:
                    lock.release()

                time.sleep(random.randint(1,2))

        except Exception as e:
            print(e)
            error_details = traceback.format_exc()
            except_to_db(self,self.dbhelper,error_details)
            inat = inat + 1
    def downloadrate2(self):
        a=123
        a+=1
        return 



    def getwhitelist(self,shoptype):
        a=123
        a+=1
        return []
        # sql = f'select distinct shop_type,shop_name from  shopwhite_list where shop_type="{shoptype}"'
         
        # results_a =self.dbhelper.query(sql)
        # whitelist = []
        # for row in results_a:
        #     shopname = row['shop_name']
        #     whitelist.append(shopname)
        # return whitelist

    
    def dictexists(self,my_list, target_str):
        found = False
        target_str = target_str.lower().replace(' ', '')

        for element in my_list:
            element = element.lower().replace(' ', '')
            if element in  target_str:
                found = True
                break
        return found
 

    def getexcept   (self,emsg):
        spec = [
            "'"
            '<', '>', '$', '*', '&', '%', '.', '"', '+', '-', '@', '"', '\\',
            '(', ')', ' ', '#', '.', '//', '#'
        ]
        for sp in spec:
            if sp in emsg:
                emsg = emsg.replace(sp, '')
        emsg = emsg.replace('"', '')
        return emsg.strip()

    def getuser_bykeywords(self, keywords,shoptype):

        try:
            
            # 使用参数化查询
            query = f"SELECT DISTINCT username FROM app_keywords_xue WHERE keywords = '{keywords}' and shop_type='{shoptype}' "
            # 获取所有结果
            results = self.dbhelper.query(query)

            # 提取用户名列表
            usernames = [row['username'] for row in results if row['username']]  # 过滤掉空值
            return usernames

        except Exception as e:
            print(f"Error in getuser_bykeywords: {str(e)}")
            error_details = traceback.format_exc()
            except_to_db(self,self.dbhelper,error_details)
            return []
            

    def get_configure( self):
            
        result = self.dbhelper.query("SELECT * FROM configure")
        return result

    def process_1(self,diskid,AmazonCrawler_instance):
        # 读取数据库
        # 输出数据给capture
        # 连接数据库配置
        # 连接到数据库
        # 查询表A的语，这里假设表A有id和name两个字段，你可根据实际表结构修改
        current_time_stamp = time.time()
        # 将时间戳转换为本地时间的结构体
        local_time_struct = time.localtime(current_time_stamp)
        # 提取小时、分钟、秒
        hour = local_time_struct.tm_hour
        minute = local_time_struct.tm_min
        intnewkeywords=1
        #当前处理的是否当日新增关键词
        second = local_time_struct.tm_sec
        
        hrandom = random.randint(1, 30)
        print(f"当前时间是：{hour}时{minute}分{second}秒")
        current_date = datetime.now().date()
        target_date = current_date + timedelta(days=0)
        t8=datetime.combine(target_date, datetime_time(8, 0, 0))
        t11=datetime.combine(target_date, datetime_time(11, 0, 0))
        t20=datetime.combine(target_date, datetime_time(13, 0, 0))
        t23=datetime.combine(target_date, datetime_time(16, 0, 0))
        t1=None
        t2=None
        if hour <=11 and hour >=8:
                t1=t8
                t2=t11
        elif hour <=23 and hour >=17:
                t1=t20
                t2=t23     
        current_time = datetime.now()
        print(current_time) 
        configure=self.get_configure()
        intnewkeywordssleep=1
        intoldkeywrodssleep=29
        inthoursdiff=20
        for row in configure:
            print(row)
            if row['cfgcode']=='newkeywordssleep':
                intnewkeywordssleep=int(row['cfgvalue'])
            if row['cfgcode']=='oldkeywordssleep':
                intoldkeywrodssleep=int(row['cfgvalue'])
            if row['cfgcode']=='hoursdiff':
                inthoursdiff=int(row['cfgvalue'])
        count=0      
        nodeorder=1  
        heartnode(self.dbhelper,diskid)
        nodeorder=getnodeorder(self.dbhelper,diskid)
        print('节点顺序',nodeorder)
        count=getnodescount(self.dbhelper)   
        print('节点数量',count)
        query = f"""SELECT distinct * FROM app_keywords_xue where   download_date is null    """
        if count>1:
            query=query + f" and  mod(id,{count} )={nodeorder}"
        query+=' limit 3'
        print(query)

        # 获取所有结果
        results_a = self.dbhelper.query(query)
        print(results_a)
        if len(results_a) ==0:
            intnewkeywords=0
            query = f"""SELECT DISTINCT  *
                        FROM app_keywords_xue  
                        WHERE 
                              TIMESTAMPDIFF(HOUR, download_date, NOW()) >={inthoursdiff}    # inthoursdiff
                    """
            if count>1:
                query=query + f" and  mod(id,{count} )={nodeorder}"
            query+=' limit 3'
            print(query)
          
            intsleeptime=30
            results_a = self.dbhelper.query(query)
        data_to_insert = []

        capkeywords_amz = []
        links = []
        exceptitems = []
        link_withkey = []
        sql = 'select currency_simplename,rate from rates'
        rates = self.dbhelper.query(sql)
        ratedict = []
        for rowr in rates:
            ratedict.append({"currency_simplename": rowr['currency_simplename'], "rate": rowr['rate']})

        amzkeyworddicts=[]
        if len(results_a)==0:
            sql=f'select * from app_product_xue where   last_detaildownload_date is null and detail1 is null '
            if count>1:
                sql=sql + f" and  mod(id,{count} )={nodeorder}"
            sql+=' limit 8'
            results_b=self.dbhelper.query(sql)
            productdetails=[]
            for row in results_b:

                if row['link']=='':
                    productdata=None
                else:
                    productdetails.append({'productid':row['productid'],'username':row['username'],'link':row['link']})
            newproductdatas=[]
            for index,row in enumerate(results_b):
                linkx=row['link']
                asin=row['productid']
                if row['link']=='':
                    sql='update app_product_xue set last_detaildownload_date=now() where productid="'+row['productid']+'"'
                    self.dbhelper.execute(sql)
                    linkx=f'https://www.amazon.com/dp/{asin}'

                    continue

                iasindata={'username':row['username']}
                iasindata['last_detaildownload_date']=datetime.now()
                iasindata['productid']=row['productid']
                iasindata['in_stock']=row['in_stock']

                iasindata['shop_type']='amazon' 
                iasindata['keywords']=row['keywords']
                iasindata['downloaddetailatonce']=0
                iasindata['currency']=row['currency']
                iasindata['downloadtimes']=0
                iasindata['opdate']=datetime.now()
                iasindata['sales_info']=row['sales_info']
                iasindata['price']=row['price']
                iasindata['reviews_count']=row['reviews_count']
                iasindata['rating']=row['rating']
                iasindata['main_image']=row['main_image']
                iasindata['link']=row['link']
                iasindata['title']=row['title']
                iasindata['main_image']=row['main_image']
                iasindata['brand']=row['brand']
                iasindata['price']=row['price']
                iasindata['category']=row['category']
                iasindata['storename']=row['storename']
                print(index,time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),linkx) 
                newproductdata=AmazonCrawler_instance.request_page(linkx)
                if newproductdata=='-1':
                    continue
                soup = BeautifulSoup(newproductdata.text,'html.parser')
                a=None
                b=None
                c=None
                d=None
 

                try:
                    a=parse_product_details(soup)
                    if   'main_ranking' not in a:
                        a=parse_product_details(soup)
                except Exception as e:
                    print(f"解析产品 {iasindata['link']} 时出错: {str(e)}")
                    traceback.print_exc()
                try:
                    a=parse_product_details(soup)
                except Exception as e:
                    print(f"解析产品 {iasindata['link']} 时出错: {str(e)}")
                    traceback.print_exc()
                try:
                    #五点描述
                    b=parse_product_features(soup) #五点描述
                except Exception as e:
                    print(f"解析产品 {iasindata['link']} 时出错: {str(e)}")
                    traceback.print_exc()
                try:
                    c=parse_sku_variant_details(soup) #变体
                except Exception as e:
                    print(f"解析产品 {iasindata['link']} 时出错: {str(e)}")
                    traceback.print_exc()
                try:
                    d=amazon_scraper_images(soup) #图片
                except Exception as e:
                    print(f"解析产品 {iasindata['link']} 时出错: {str(e)}")
                    traceback.print_exc()
                mainrank= {
                            'main_rank': 0,
                            'maincategory': '',
                            'sub_rank': 0,
                            'sun_subcategory':''
                        }
                try:
                    if a and 'main_ranking' in a:
                        mainrank=a['main_ranking']
                    else:
                        if a==None:
                            a={}
                            mainrank = {
                                    'main_rank': 0,
                                    'maincategory': '',
                                    'sub_rank': 0,
                                    'sun_subcategory':''
                                }
                    a.update(mainrank)    
                except Exception as e:
                    if 'main_ranking' not in a:
                        result_ranking = {
                            'main_rank': 0,
                            'maincategory': '',
                            'sub_rank': 0,
                            'sun_subcategory':''
                        }
                        a.update(result_ranking)

                    print(f"解析产品 {iasindata['link']} 时出错: {str(e)}")
                    traceback.print_exc()
                subrank={}
                try:
                    subrank={}
                    if 'sub_rankings' in a:
                        subrank=a['sub_rankings']
                except Exception as e:
                    print(f"解析产品 {iasindata['link']} 时出错: {str(e)}")
                    traceback.print_exc()
                # a.update(mainrank)
                # a.update(subrank)
                ranks=parse_product_rank(a['best_sellers_rank'])
                a.update(ranks)
                dimensions={
                        'length': 0,
                        'width': 0,
                        'height': 0,
                        'weight': 0,
                        'lengthunit': 'cm',
                        'weightunit': 'kg'
                    }
                try:
                    dimensions=parse_dimensions(a['product_dimensions'])
                except Exception as e:
                    print(f"解析产品 {iasindata['link']} 时出错: {str(e)}")
                    traceback.print_exc()
                    try:
                        dimensions=parse_dimensions(a['product_dimensions'])
                    except Exception as e:
                        print(f"解析产品 {iasindata['link']} 时出错: {str(e)}")
                        traceback.print_exc()
                if 'product_dimensions' not in a:
                    a['product_dimensions']=''
                a.update(dimensions)
                iasindata.update(a)

                dictb={}
                for iindex,  itemx in enumerate(b,start=1):
                    dictb[f'detail{iindex}'] = itemx
                for iindex in range ( 5  ):
                   if f'detail{iindex+1}' not in   dictb:
                       dictb[f'detail{iindex+1}'] = ''
                iasindata.update(dictb)
                iasindata.update(d)


                newproductdatas.append(iasindata)  
            
            key2='downloadasin'
            shop_type='amazon'

            usernames=self.getuser_bykeywords(key2 ,shop_type)
            linkkey={
                            "key": key2,
                            "shoptype": shop_type,
                            "links": newproductdatas,
                            'username':['nousername']
                        }
            
            self.process_2(ratedict, linkkey, 'amazon', 0)
      
            
                    
                
        for index,results in enumerate( results_a):

            key2 = results['keywords']
            shop_type = results['shop_type']
            downloaddetailatonce=results['downloaddetailatonce'] if results['downloaddetailatonce'] else False
         
            amzkeyworddicts.append({'keywords':key2,'downloaddetailatonce':downloaddetailatonce})
            current_time_stamp = time.time()
            # 将时间戳转换为本地时间的结构体
            local_time_struct = time.localtime(current_time_stamp)
            # 提取小时、分钟、秒
            hour = local_time_struct.tm_hour
            minute = local_time_struct.tm_min
            second = local_time_struct.tm_sec
            hrandom = random.randint(1, 30)
            print(f"当前：{hour}时{minute}分{second}秒->",f'{index}/{len(results_a)} 个关键词记录{key2}')
            self.sender.textUpdateSignal.emit(f'当前：{hour}时{minute}分{second}秒-> {index+1}/{len(results_a)} 个关键词记录{key2}')
            # print(f'{index}/{len(results_a)} 个关键词记录{key2}')
            a = 1
            b = 0
        shop_type='amazon'
        tis4 = threading.Thread(target=self.capturedata,args=(amzkeyworddicts, ratedict,shop_type,current_time_stamp,AmazonCrawler_instance,))
 
        tis4.start()
 
        tis4.join()
          
                
                
    def capture_category(self  ):
        return 1

    def capturedata(self,amzkeyworddicts, ratedict,shop_type ,current_time_stamp,AmazonCrawler_instance,  ):   
        waitseconds=5
        for key3 in amzkeyworddicts:
            try:
                for ix in range(10):
                    key2=key3['keywords']
                    downloaddetailatonce=key3['downloaddetailatonce']
                    
                    link1 = captureamz(key3,ix+1,AmazonCrawler_instance)
                    if len(link1)<40:
                        break
                    usernames=self.getuser_bykeywords(key2 ,shop_type)

                    linkkey={
                            "key": key2,
                            "shoptype": shop_type,
                            "links": link1,
                            'username':usernames
                        }

                    self.process_2(  ratedict, linkkey,shop_type,downloaddetailatonce)

                sql=f'update app_keywords_xue set download_date=now() where keywords="{key2}" and shop_type="{shop_type}"'
                print(sql)
                self.dbhelper.execute(sql)
            except Exception as e:
                print(f"抓取amazon.com,出错：{e}")
                error_details = traceback.format_exc()      
                except_to_db(self,self.dbhelper,error_details)
                print(error_details)
        time.sleep(1)      
        
        
    def extract_integer(self, s):
        # 添加类型检查和处理
        if s is None:
            return 0
        if isinstance(s, (int, float)):
            return int(s)
        
        # 确保输入是字符串类型
        str_s = str(s)
        match = re.search(r'\d+', str_s)
        if match:
            num = match.group()
            if 'K' in str_s.upper():  # 处理K表示千
                num = int(num) * 1000
            return int(num)
        return 0
            
    def process_2(self, ratedict, link, shop_type, downloaddetailatonce):
        keywords=link['key']
        try:
            # 初始化数据库连接
            """处理并存储产品数据"""
            
            # 获取当前时间
            now = datetime.now()
            # 定义时间戳的格式
            timestamp_format = '%Y%m%d%H%M%S'
            # 将当前时间按照指定格式转换为时间戳字符串
            timestamp = now.strftime(timestamp_format)

            print(timestamp)
    
            current_time_stamp = time.time()
            # 将时间戳转换为本地时间的结构体
            local_time_struct = time.localtime(current_time_stamp)
            # 提取小时、分钟、秒
            hour = local_time_struct.tm_hour
            minute = local_time_struct.tm_min
            intnewkeywords=1
            #当前处理的是否当日新增关键词
            second = local_time_struct.tm_sec
            
            hrandom = random.randint(1, 30)
            print(f"当前时间是：{hour}时{minute}分{second}秒")
            current_date = datetime.now().date()
            target_date = current_date + timedelta(days=0)
            t8=datetime.combine(target_date, datetime_time(8, 0, 0))
            t11=datetime.combine(target_date, datetime_time(11, 0, 0))
            t20=datetime.combine(target_date, datetime_time(13, 0, 0))
            t23=datetime.combine(target_date, datetime_time(16, 0, 0))
            # 获取待处理数据（假设linksinner是原始数据列表）
             
            # 配置预期字段数（根据模型字段数调整）
            expected_fields = 45  # 示例值，需按实际模型字段数修改
            valid_batch = []
            error_log = []

            # 数据验证和清洗
            raw_products=link['links']
            usernames=    link['username']
            raw_batches = raw_products
            total_processed = 0
            price_updates = []
            update_data=[]
            insert_data=[]


            # 预加载现有ID集合
             
            downloadtimes=0
            opdate=datetime.now()
            batch_list=[]
            new_prices_list=[]
            new_rank_list=[]
            for batch in raw_batches:
                # 快速分拣插入/更新数据
                try:
                    if len(usernames)>0:
                        batch['username']=usernames[0]
                    else:
                        batch['username']='admin'
                    batch['keywords']=keywords
                    pattern = r'^[-+]?(\d+(\.\d*)?|\.\d+)([eE][-+]?\d+)?$'
                    if 'price' in batch and  bool(re.fullmatch(pattern, str(batch['price']))):
                        batch['price']=float(str(batch['price']).replace(',',''))
                    else:
                        batch['price']=0
                    batch['downloaddetailatonce']=downloaddetailatonce
                    batch['timestamp']=timestamp
                    batch['downloadtimes']=downloadtimes
                    batch['opdate']=datetime.now()
                    batch['shop_type']=shop_type
                    if 'main_rank' not in batch:
                        batch['main_rank']=0
                    if 'sub_rank' not in batch:
                        batch['sub_rank']=0 
                    if 'sales_info' not in batch:
                        batch['sales_info']=0 
                    # batch['sales_info']=0 #这里逻辑不对
                    if 'sales_info' in batch and batch['sales_info']:
                        batch['sales_info']=self.extract_integer(batch['sales_info']) 
                    if 'image_list' in batch and  batch['image_list']:
                        imgage_list=batch['image_list']
                        for ix in range(7):
                            batch[f'image{ix+1}']=''
                            if    ix<len(imgage_list):  
                                batch[f'image{ix+1}']=imgage_list[ix]
                    # 批量插入新记录
                        del batch['image_list']
                    if batch['productid']=='' and 'ASIN' in batch and batch['ASIN']!=None and  batch['ASIN']!='':
                        batch['productid']=batch['ASIN']
                    batch_list.append(batch)
                    new_prices_list.append({'productid':batch['productid'],'price':batch['price']})
                    rank_info={
                        'productid':batch['productid'],
                        'new_main_rank':batch['main_rank'] if batch['main_rank'] else 0,
                        'new_sub_rank':batch['sub_rank'] if batch['sub_rank'] else 0,
                        'new_sales_info':batch['sales_info'] if batch['sales_info'] else 0,
                        'new_rating':batch['rating'] if batch['rating'] else 0,
                        'new_reviews_count':batch['reviews_count'] if batch['reviews_count'] else 0,
                        'opdate':datetime.now()
                    }
                     
                    new_rank_list.append(rank_info)
                except Exception as e:
                    print(f"处理亚马逊产品数据时出错: {e}")
                    traceback.print_exc()
            self._bulk_insert( batch_list)
            product_ids = [p['productid'] for p in new_prices_list]
            rank_changelist=[]
            asins=''
            for productid in product_ids:
                asins=asins+f"'{productid}',"
            asins=asins[:-1]
            if new_rank_list:
                sql = f"SELECT productid, main_rank, sub_rank, sales_info,rating,reviews_count FROM app_product_xue WHERE productid IN ({ asins})"
                old_rank_list ={row['productid']: {'main_rank':row['main_rank'],'sub_rank':row['sub_rank'],'sales_info':row['sales_info'],'rating':row['rating'],'reviews_count':row['reviews_count']} for row in self.dbhelper.query(sql)}  
                for newrank_info in new_rank_list:
                    if newrank_info['productid'] in old_rank_list:
                        old_rank=old_rank_list[newrank_info['productid']]
                        old_rank['main_rank']=0 if old_rank['main_rank']==None else old_rank['main_rank']
                        old_rank['sub_rank']=0 if old_rank['sub_rank']==None else old_rank['sub_rank']   
                        old_rank['rating']=0 if old_rank['rating']==None else old_rank['rating']   
                        newrank_info['new_rating']=0 if newrank_info['new_rating']==None else newrank_info['new_rating']                 
                        if  int(newrank_info['new_main_rank']) != int(old_rank['main_rank']) or int(newrank_info['new_sub_rank']) != int(old_rank['sub_rank'])  or int(newrank_info['new_sales_info']) != int(old_rank['sales_info'])   or float(newrank_info['new_rating']) != float(old_rank['rating']) or int(newrank_info['new_reviews_count']) != int(old_rank['reviews_count']):  
                            print(f"产品{newrank_info['productid']} 有变化")
                            rank_change={
                                'productid':newrank_info['productid'],
                                'old_main_rank':old_rank['main_rank'] if old_rank['main_rank'] else 0,
                                'new_main_rank':newrank_info['new_main_rank'] if newrank_info['new_main_rank'] else 0,
                                'old_sub_rank':old_rank['sub_rank'] if old_rank['sub_rank'] else 0,
                                'new_sub_rank':newrank_info['new_sub_rank'] if newrank_info['new_sub_rank'] else 0,
                                'old_sales_info':old_rank['sales_info'] if old_rank['sales_info'] else 0,
                                'new_sales_info':newrank_info['new_sales_info'],
                                'old_rating':old_rank['rating'] if  old_rank['rating'] else 0,
                                'new_rating':newrank_info['new_rating'] if newrank_info['new_rating'] else 0,
                                'old_reviews_count':old_rank['reviews_count'] if old_rank['reviews_count'] else 0,
                                'new_reviews_count':newrank_info['new_reviews_count'] if newrank_info['new_reviews_count'] else 0,
                                'opdate':datetime.now()
                            }
                            rank_changelist.append(rank_change)
            

            if rank_changelist:
                # self._log_rank_changes(rank_changelist)
                print(f"产品{newrank_info['productid']} 有变化")
            if new_prices_list:
                # 批量查询旧价格
                asins=''
                for productid in product_ids:
                    asins=asins+f"'{productid}',"
                asins=asins[:-1]
                sql = f"SELECT productid, price FROM app_product_xue WHERE productid IN ({ asins})"
                old_prices = {row['productid']: row['price'] for row in self.dbhelper.query(sql)}               
                for p in new_prices_list:
                    if not p['price'] or p['price']=='':
                        p['price']=0
                        continue
                price_changes = [ ]

                for p in new_prices_list:
                    if p['productid'] in old_prices :
                        print(f"产品{p['productid']} ,asin找到了,在核对新旧价格，",old_prices[p['productid']],p['price'])
                        if  float(old_prices[p['productid']]) != float(p['price']):
                            print(f"产品{p['productid']} ,价格有变化")
                            price_changes.append({'productid': p['productid'], 'old_price':old_prices[p['productid']], 'new_price': p['price']})
                 
                if price_changes:
                    self._log_price_changes(price_changes)
                self.sender.textUpdateSignal.emit(f"处理完成，共处理{total_processed}条记录，其中{len(price_updates)}条价格更新")
        except Exception as e:
            self.sender.textUpdateSignal.emit(f"处理失败: {str(e)}")
            traceback.print_exc()

        for username in usernames:
            update_keyword_sql = f"""
                INSERT INTO sku_keywords_history (username, keywords, opdate)
                VALUES ('{username}','{keywords}', NOW())
            """
            self.dbhelper.execute(update_keyword_sql)
            print(f"{keywords}->{shop_type} 批量处理完成，时间：{hour}:{minute}:{second}")
    def _log_rank_changes(self,  rank_changelist):
        if len(rank_changelist)>0:
            """批量记录价格变动"""
            insert_sql = """
                INSERT INTO app_rankchange_xue     
                (productid, old_main_rank, new_main_rank, old_sub_rank, new_sub_rank, old_sales_info, new_sales_info, old_rating, new_rating, old_reviews_count, new_reviews_count, opdate)
                values(:productid, :old_main_rank, :new_main_rank, :old_sub_rank, :new_sub_rank, :old_sales_info, :new_sales_info, :old_rating, :new_rating, :old_reviews_count, :new_reviews_count, now())
                """
            self.dbhelper.execute(insert_sql, rank_changelist)


    def _bulk_insert(self,  datas):


        if not datas:
            return 
        if 'detail1' not  in datas[0]:
            named_params = [f' :{field} ' for field in INSERT_FIELDS_simple]
            INSERT_FIELDS=INSERT_FIELDS_simple
        else:
            named_params = [f' :{field} ' for field in INSERT_FIELDS_complex]
            INSERT_FIELDS=INSERT_FIELDS_complex
        # del data['image_list']
        for data in datas:
            if 'newtitle' not in data:
                data['newtitle']=''
            if 'maincategory' not in data:
                data['maincategory']=''
            if 'sun_subcategory' not in data:
                data['sun_subcategory']=''
            if 'main_ranking' in data:
               del data['main_ranking']
            if 'sub_rankings' in data:
                del data['sub_rankings']
            if 'hires_image' in data:
                del data['hires_image']
            if 'ASIN' in data:
                del data['ASIN']
            if 'seller'  in data:
                data['storename']=data['seller']
            if 'storename' in data and  len(data['storename'])>60:
                data['storename']=data['storename'][:60]
            if 'image_url' in data :
                data['main_image']=data['image_url']
                del data['image_url']
            
            if 'Customer Reviews' in data :
                del data['Customer Reviews']
            if 'seller' in data :
                del  data['seller']
            if 'price' in data and data['price']=='':
                print(f"价格为空：{data['productid']}")
                data['price']='0.0'  
            if 'reviews_count' in data and data['reviews_count']=='':
                data['reviews_count']='0'

            if 'rating' in data and data['rating']=='':
                data['rating']='0'

            if 'sales_info' in data and data['sales_info']=='':
                data['sales_info']='0'
            if 'delivery_info' not in data:
                data['delivery_info']=''
            if 'delivery_info' in data and data['delivery_info']=='':
                data['delivery_info']=''   
          

        insert_sql = f"""
        INSERT INTO app_product_xue 
        ({', '.join(INSERT_FIELDS)})
        VALUES ({', '.join(named_params)})
        ON DUPLICATE KEY UPDATE 
            title=VALUES(title),
            price=VALUES(price),
            opdate=VALUES(opdate),
            main_rank=VALUES(main_rank),
            maincategory=VALUES(maincategory),
            sub_rank=VALUES(sub_rank),
            sun_subcategory=VALUES(sun_subcategory),
            detail1=VALUES(detail1),
            detail2=VALUES(detail2),
            detail3=VALUES(detail3),
            detail4=VALUES(detail4),
            detail5=VALUES(detail5),
            download_date=VALUES(download_date),
            rating=VALUES(rating),
            reviews_count=VALUES(reviews_count),
            sales_info=VALUES(sales_info),
            delivery_info=VALUES(delivery_info),
            last_detaildownload_date=VALUES(last_detaildownload_date)
    """
        

        self.dbhelper.execute(insert_sql, datas)

     

    def _is_price_changed(self,  product_id, new_price):
        """快速价格变动检测"""
         
        result = self.dbhelper.query(f"SELECT price FROM app_product_xue WHERE productid = '{product_id}'")                
        return result and float(result[0]['price']) != float(new_price)
        

    def _log_price_changes(self,  changes):
        """批量记录价格变动"""
        insert_sql = """
            INSERT INTO app_pricechange_xue 
            (productid, old_price, new_price,opdate)
            SELECT :productid, price, :new_price ,now()
            FROM app_product_xue 
            WHERE productid =:productid
        """
        
        self.dbhelper.execute(insert_sql, changes)

 
     
    def parse_grid_category_1(self):
        html=''
        select_sql=f"""SELECT * FROM app_category_xue 
                        WHERE level  = 0 and need_download=1
                        AND   currentpage != 999999   order by opdate desc  
                    """
        search_items_result = self.dbhelper.query(select_sql)
        for s in search_items_result:
            if s['url']=='' or s['url']==None:
                continue
            url=   s['url']
            sid=s['id']
            try:
                html=self.get_page(url)
                soup = BeautifulSoup(html, 'html.parser')
                grid_elements = soup.find_all('div', class_='_Y29ud_bxcGridColumn_J5gfU _Y29ud_bxcGridColumn2Of12_2NLCp')
                results = []
                for element in grid_elements:
                    # 初始化每个元素的数据字典
                    item_data = {
                        'category': '',
                        'alt_text': '',
                        'url': '',
                        'image_url': '',
                        'parent_level':'0',
                        'level':'1',
                        'parent_category':s['id'],
                        'currentpage':1,    
                        'leaf_category':False,
                        'need_download':1
                    }
                    link = element.find('a')
                    if link:
                        item_data.update({
                            'url':'https://www.amazon.com'+ link.get('href', ''),
                            'category': link.get('aria-label', '')
                        })
                        img = link.find('img')
                        if img:
                            item_data.update({
                                'image_url': img.get('src', ''),
                                'alt_text': img.get('alt', '')
                            })
                    results.append(item_data)
            except Exception as e:
                print(f"Error fetching page: {e}")
                traceback.print_exc()
            sqllist=[]
            sqllist.append({
                'query':insert_sql_category,
                'params':results
            })
            sqllist.append({
                'query':f"UPDATE app_category_xue SET currentpage=999999,opdate=now(),need_download=1 WHERE id={sid}",
                'params':{}
            })
            self.dbhelper.execute_multi(sqllist)  
            sql=f"SELECT * FROM app_category_xue WHERE parent_category={sid} "
            results_2= self.dbhelper.query(sql)
            parsed_items_1= [{
                            'id':p['id'],
                            'alt_text':p['alt_text'],
                            'url':p['url'],
                            'category':p['category'],
                            'parent_level':p['parent_level'],
                            'parent_category':p['parent_category'],
                            'level':p['level'],
                            'currentpage':p['currentpage'],
                            'image_url':p['image_url'],
                            'leaf_category':p['leaf_category']                           
                            } for p in results_2]  
            return parsed_items_1 

    def parse_grid_category_2_db(self):
        # 这个程序最好循环执行，直到遍历完所有类目，
        total_category_noprocessed=0
        try:
            all_results=[]
            search_items = []
            blacklist_level1    =['Furniture','bed','vacuums','premium']
            # blacklist_level1=[]
            select_sql=f"""
                        SELECT * FROM app_category_xue WHERE level  !=0 and leaf_category !=1 and     
                        need_download=1 AND    currentpage != 999999 
                        """
            category_1_result = self.dbhelper.query(select_sql)
            total_category_noprocessed=len(category_1_result)
            category_1_ids = [
                            {
                            'id':p['id'],
                            'alt_text':p['alt_text'],
                            'url':p['url'],
                            'category':p['category'],
                            'parent_level':p['parent_level'],
                            'parent_category':p['parent_category'],
                            'level':p['level'],
                            'currentpage':p['currentpage'],
                            'image_url':p['image_url'],
                            'leaf_category':p['leaf_category']  ,
                            'need_download':p['need_download']                         
                            } for p in category_1_result]          
            parsed_items_1   = self.parse_grid_category_1()
            if parsed_items_1==None:
                parsed_items_1=[]
            for cs in category_1_ids:
                if   cs not in parsed_items_1:
                    parsed_items_1.append(cs)
            intparentid=0
            for index in range(len(parsed_items_1) - 1, -1, -1):
                item = parsed_items_1[index]
                inblack=False
                if item['category']=='':
                    del parsed_items_1[index]
                    continue
                if 'Living room' in item['category']:
                    del parsed_items_1[index]

                    break
                for bs in blacklist_level1:
                    if bs.lower() in item['category'].lower():
                        inblack=True
                        del parsed_items_1[index]
                        break
                if inblack:
                    continue
            for index ,parent_item in  enumerate(parsed_items_1):
                intparentid=parent_item['parent_category']

                # item['level']='1'
                # item['currentpage']=1
                # item['leaf_category']=False
                # if 'id' in item:
                #     del item['id']
                print("Category:", parent_item['category'])
                print("URL:", parent_item['url'][:90])
                print("Image URL:", parent_item['image_url'])
                print("Alt Text:", parent_item['alt_text']) 
                print("-" * 50) 
                # search_items.append(item)
            
                # sql=f'delete from app_category_xue where level=1 and parent_category={intparentid}'
                # self.dbhelper.execute(sql) 
                # self.dbhelper.execute(insert_sql_category,search_items) 
                # sql=f"SELECT * FROM app_category_xue WHERE parent_category={intparentid} "
                # results_2= self.dbhelper.query(sql)
                # search_items= [{
                #                 'id':p['id'],
                #                 'alt_text':p['alt_text'],
                #                 'url':p['url'],
                #                 'category':p['category'],
                #                 'parent_level':p['parent_level'],
                #                 'parent_category':p['parent_category'],
                #                 'level':p['level'],
                #                 'currentpage':p['currentpage'],
                #                 'image_url':p['image_url'],  
                #                 'leaf_category':p['leaf_category']                           
                #                 } for p in results_2]  

                if item['url']=='':
                    continue
                url=  parent_item['url']
                html=self.get_page(url)
                category_1=parent_item['category']
                
                # item['currentpage']=1
                results=self.parse_category_cards(html) if html!=None else []
                if len(results)==0:
                    parent_item['leaf_category']=1
                    parsed_items_1[index]['leaf_category']=1
                else:
                    parsed_items_1[index]['leaf_category']=0
                    parsed_items_1[index]['currentpage']=999999

                for index,result in enumerate(results):
                    if 'url' not in result or result['url']==None:
                        del results[index]
                        continue
                    result['parent_level']= parent_item['level']   
                    result['parent_category']=parent_item['id']
                    # result['leaf_category']=True
                    result['level']=str(int(parent_item['level'])+1)
                    if 'currentpage' not in result or  result['currentpage']=='':
                        result['currentpage']=1
                    result['currentpage']=1  if not result['currentpage'] or result['currentpage']=='' else result['currentpage']
                    print(index,'上级的类目编号',parent_item['id'],f"本级Category: {result.get('name')}")
                    category_2=result.get('name')
                    result['category']=category_2
                    result['url']=result.get('url')
                    result['image_url']=result.get('image_url')
                    result['alt_text']=result.get('alt_text')
                    result['need_download']=1
                    result['leaf_category']=0
                    if 'name' in result :
                        del result['name']
                    if 'dynamic_images' in result:
                        del result['dynamic_images']
                    print(f"URL: {result.get('url')}")
                if len(results)>0:
                    all_results.extend(results)
            for item in parsed_items_1:
                del item['id']
            all_results.extend(parsed_items_1)

            for index in range(len(all_results) - 1, -1, -1):
                result = all_results[index]
                if result == {} or 'category' not in result or 'url' not in result or result['url'] is None:
                    print(f"删除的记录：{index} {result}")
                    del all_results[index]
                # if 'dynamic_images' in result:
                #     del all_results[index]


            # sqllist=[]
            # sqllist.append({
            #     'query':insert_sql_category,
            #     'params':all_results
            # })
            self.dbhelper.execute(insert_sql_category,all_results)  
        except Exception as e:
            print(f"Error fetching page: {e}")
            traceback.print_exc()
        return total_category_noprocessed
    def parse_category_leaf(self,default_category_2={}):
        # default_category_2默认为空,目的是为了可以满足使用需求。
        select_sql=f"""
                    SELECT *
                    FROM app_category_xue  
                    WHERE  currentpage != 999999 and need_download=1  and leaf_category=1
                     order by opdate asc limit 1
                    """
        search_items_result = self.dbhelper.query(select_sql)
        search_items=[]
        sid=0
        slevel=0
        #是否为叶子目录
        intleafcategory=1
        for item in search_items_result:
            sid=item['id']
            slevel=item['level']
            print(item)
            sitem=dict(item)
            if  sitem['category'] in ['New Arrivals','Deals','Best Sellers','Movers & Shakers','New Releases','Best Sellers in Toys & Games','Best Sellers in Sports & Outdoors','Best Sellers in Automotive','Best Sellers in Baby','Best Sellers in Beauty & Personal Care','Best Sellers in Electronics','Best Sellers in Home & Kitchen','Best Sellers in Industrial & Scientific','Best Sellers in Pet Supplies','Best Sellers in Software','Best Sellers in Video Games','Best Sellers in Women\'s Fashion','Best Sellers in Men\'s Fashion','Best Sellers in Health & Household','Best Sellers in Grocery & Gourmet Food','Best Sellers in Tools & Home Improvement','Best Sellers in Sports & Fitness','Best Sellers in Musical Instruments','Best Sellers in Pet Supplies','Best Sellers in Office Products','Best Sellers in Arts, Crafts & Sewing','Best Sellers in Baby','Best Sellers in Beauty & Personal Care','Best Sellers in Electronics','Best Sellers in Home & Kitchen','Best Sellers in Industrial & Scientific','Best Sellers in Pet Supplies','Best Sellers in Software','Best Sellers in Video Games','Best Sellers in Women\'s Fashion','Best Sellers in Men\'s Fashion','Best Sellers in Health & Household','Best Sellers in Grocery & Gourmet Food','Best Sellers in Tools & Home Improvement','Best Sellers in Sports & Fitness','Best Sellers in Musical Instruments','Best Sellers in Pet Supplies','Best Sellers in Office Products','Best Sellers in Arts, Crafts & Sewing']:
                sql=f'update app_category_xue set need_download=0 where id={sid}'
                self.dbhelper.execute(sql)
                return 0
            search_items.append(sitem)
         
        for index,result in enumerate(search_items):
            if result['url']=='':
                continue
            url=result['url']
            resultid=result['id']
            result['currentpage']=1  if 'currentpage' not in result or result['currentpage']=='' else result['currentpage']
            category_namex  =result.get('category') if result.get('category') else ''

            if int(result['currentpage'])>1:
                html=self.get_page(url)
                soup = BeautifulSoup(html, 'html.parser')
                page=int(result['currentpage'])

                next_page = soup.select_one('a.s-pagination-next')
                nextpage_url='https://www.amazon.com'+ next_page['href'] if next_page else ''
                nextpage_url=nextpage_url.replace('page=2','page='+str(page)).replace('ref=sr_pg_1',f'ref=sr_pg_{page-1}')
            else:
                html=self.get_page(url)
                soup = BeautifulSoup(html, 'html.parser')
                page=int(result['currentpage'])
                next_page = soup.select_one('a.s-pagination-next')
                nextpage_url='https://www.amazon.com'+ next_page['href'] if next_page else ''
            next_page_link = soup.find('a', class_="s-pagination-next")

                # 提取 href 属性
            href_value = 'https://www.amazon.com'+ next_page_link['href'] if next_page_link else 'No next page link found'
            # 如果是有产品的，找不到下一页的时候，强硬加 ref=sr_pg_1    
            product_divs = soup.find_all('div', attrs={'role': 'listitem'})
            if len(product_divs)==0:  
                product_divs=soup.find_all('div', class_="_octopus-search-result-card_style_apbSearchResultItem__2-mx4")
            if page==1 and     nextpage_url=='':
                link_tag = soup.find('a', id="apb-desktop-browse-search-see-all")

                # 提取 href 属性
                href_value ='https://www.amazon.com'+ link_tag['href'] if link_tag else 'No link found'

                # 打印 href 值
                print(f"Extracted href: {href_value}")
                html=self.get_page(href_value)
                soup = BeautifulSoup(html, 'html.parser')
                next_page = soup.select_one('a.s-pagination-next')
                nextpage_url='https://www.amazon.com'+ next_page['href'] if next_page else ''
            product_divs = soup.find_all('div', attrs={'role': 'listitem'})
            products=[]

            if  len(product_divs)>0:
                category_2=result.get('category')
                print(f"Category: {category_2}")
                print(f"URL: {nextpage_url}")
                page_count = 1       
                while True:
                    next_page = soup.select_one('a.s-pagination-next')
                    next_page_url = 'https://www.amazon.com' + next_page['href']
                    print(page_count,next_page_url)
                    product_divs = soup.find_all('div', attrs={'role': 'listitem'})
                    for asinindex, item in enumerate(product_divs):
                        # print(asinindex,item)
                        product_link_tag = item.select_one('a', class_='a-link-normal s-no-hover s-underline-text s-underline-link-text s-link-style a-text-normal')
                        product_link ='https://www.amazon.com/' + product_link_tag['href'] if product_link_tag else None
                        asin = product_link.split('/dp/')[1].split('/')[0] if product_link and '/dp/' in product_link else ''
                        if asin=='':
                            product_divs = item.find_all('div', attrs={'data-csa-c-item-id': True})
                            if product_divs and 'data-csa-c-item-id' in product_divs[0]:
                                asin = product_divs[0]['data-csa-c-item-id'].replace('amzn1.asin.1.', '')
                            else:
                                print("No ASIN found.")
                        number_text = item.find('span', class_='a-size-base s-underline-text').text.replace(',','') if item.find('span', class_='a-size-base s-underline-text') else '0'
                        if product_link and len(product_link) > 750:
                            product_link = product_link[:750]

                        print(asinindex+1,"Product Link:", asin)
                        main_price = item.find('span', class_='a-price-whole').text +  item.find('span', class_='a-price-fraction').text if item.find('span', class_='a-price-whole') and item.find('span', class_='a-price-fraction') else ''
                        main_price = main_price.replace('$','').strip() if main_price else ''
                        sales_span = item.find('span', class_='a-size-base a-color-secondary')

                        sales_text = sales_span.text if sales_span else "0"
                        sales_text=sales_text.replace(',','').strip()
                        match = re.search(r'(\d+\.?\d*)([KMB])?\+', sales_text)
                        number=0
                        if match:
                            number = float(match.group(1))
                            # Check the suffix and adjust the number accordingly
                            if match.group(2) == 'K':
                                number *= 1_000
                            elif match.group(2) == 'M':
                                number *= 1_000_000
                            elif match.group(2) == 'B':
                                number *= 1_000_000_000

                            print(f"Approximately {int(number)} items were bought in the past month.")
                        else:
                            print("No matching pattern found.")
                        delivery_span = item.find('span', {'aria-label': lambda x: x and 'FREE delivery' in x and 'shipped by Amazon' in x})
                        delivery_text = delivery_span.text if delivery_span else "文本未找到"
                        part1 = 'FREE delivery'
                        part2 = 'shipped by Amazon'
                        if part1.lower() in delivery_text.lower() and part2.lower() in delivery_text.lower():
                            delivery_text = 'free delivery shipped by Amazon' 
                        image_links = [img['src'] for img in item.find_all('img') if 'src' in img.attrs]
                        imagelink=image_links[0] if image_links else ''
                        print(image_links)
                        title=item.select_one('h2.a-size-base-plus span').get_text(strip=True) if item.select_one('h2.a-size-base-plus span') else ''
                        result_ai=process_product_content(title, '')
                        brand=result_ai['origin_brand']
                        newtitle=result_ai['shuffled_title']
                        product_info ={
                            'productid':asin,
                            'title': title,
                            'newtitle': newtitle,
                            'price': main_price,
                            'currency': 'US',  # 货币单位
                            'image_url': imagelink,
                            'link': product_link,
                            'rating': item.select_one('i.a-icon-star-small').get_text(strip=True).split()[0] if item.select_one('i.a-icon-star-small') else None,
                            'reviews_count': number_text,
                            'delivery_info': delivery_text,
                            'price': main_price,
                            'sales_info': number,
                            'seller': '',
                            'maincategory': result['parent_category'],  # 新增类别字段
                            'category': category_2,
                            'sun_subcategory': category_2,
                            'in_stock': True,
                            'brand': brand  # 新增品牌字段
                        }
                        print(f"已解析产品：{product_info['title']}")
                        products.append(product_info)
                    # 读取一页完毕
                    if len(product_divs) < 10:
                        break
                    if not next_page:
                        break
                    print(datetime.now(),len(products),'当前有这么多记录了')
                    
                    page_count += 1
                    if page_count%2 ==0:
                        linkkey={
                            "key": f'category_{category_namex}',
                            "shoptype": 'amazon',
                            "links": products,
                            'username':'admin'
                            }
                        self.process_2(  [], linkkey,'amazon',0)
                        resultid=result['id']
                        curpage=result['currentpage']
                        
                        sql2=f"UPDATE app_category_xue SET currentpage={page_count},leaf_category=1,opdate=now() WHERE id={resultid}"
                        self.dbhelper.execute(sql2)
                        #result写入数据库
                        products=[]
                    html=self.get_page(nextpage_url)
                    soup = BeautifulSoup(html, 'html.parser')
                    match = re.search(r'page=(\d+)', next_page_url)
                    page =int( match.group(1)) if match else None
                    print(f"提取的页面号: {page}")
                    result['currentpage']=int(page)-1
                    # nextpage_url=nextpage_url.replace('page=2','page='+str(page+1)).replace('ref=sr_pg_1',f'ref=sr_pg_{page}')
                    # html = self.get_page(next_page_url)
                    # soup = BeautifulSoup(html, 'html.parser')
                    
                    print("***"*30)
                # 读取完毕这个类目
                sql2=f"UPDATE app_category_xue SET currentpage=99999,leaf_category=1,opdate=now() WHERE id={resultid}"
                self.dbhelper.execute(sql2)
                if len(products)>0:
                    linkkey={
                            "key": f'category_{category_namex}',
                            "shoptype": 'amazon',
                            "links": products,
                            'username':'admin'
                            }
                    self.process_2(  [], linkkey,'amazon',0)
                    resultid=result['id']
                    curpage=result['currentpage']
                    page_count += 1
                    
                    #result写入数据库
                    products=[]

                result['currentpage']=str(int(result['currentpage']) +1)
                next_page = soup.select_one('a.s-pagination-next')
                next_page_url = 'https://www.amazon.com' + next_page['href']
                html=self.get_page(nextpage_url)
                soup = BeautifulSoup(html, 'html.parser')
                print(page_count,next_page)
                print(f"Image URL: {result.get('image_url')}")
                print("---"*30)
    
    def parse_category_cards(self,html):
        soup = BeautifulSoup(html, 'html.parser')
        
        # 查找所有类名为 'a-carousel-card' 的li元素
        cards = soup.find_all('li', class_='a-carousel-card dcl-carousel-element')
        
        categories = []
        for card in cards:
            category = {}
            
            # 获取链接
            link = card.find('a', class_='a-color-base')
            if link:
                category['url'] ='https://www.amazon.com' + link.get('href', '')
                
            # 获取图片信息
            img = card.find('img')
            if img:
                category['image_url'] = img.get('src', '')
                # 有些图片可能存储在 data-a-dynamic-image 属性中
                dynamic_images = img.get('data-a-dynamic-image', '')
                if dynamic_images:
                    category['dynamic_images'] = dynamic_images
            
            # 获取分类名称
            category_name = card.find('span', class_='a-size-base-plus')
            if category_name:
                category['name'] = category_name.text.strip()
            else:
                continue
                
            categories.append(category)
        
        return categories
    def get_page(self, url):
        try:
            response = self.session.get(
                url,
                headers=headers,
                timeout=30
            )
            if response.status_code == 200:
                return response.text
            return None
        except Exception as e:
            print(f"Error fetching page: {e}")
            return None
             

if __name__ == "__main__":
    app = QApplication(sys.argv)
    win = Window()
    win.show()
    # heartnode(win.dbhelper,get_disk_serial_simple())
    sys.exit(app.exec_())

