import json
import os
import time
import openpyxl
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import tkinter as tk
from tkinter import ttk, scrolledtext
from threading import Thread

class PDDPriceCrawlerApp:
    def __init__(self, root):
        self.root = root
        self.root.title("拼多多价格和描述提取工具")
        self.root.geometry("900x700")
        self.root.configure(bg="#f5f5f5")
        
        # 设置Chrome选项
        self.chrome_options = Options()
        self.chrome_options.add_argument("--disable-gpu")
        self.chrome_options.add_argument("--no-sandbox")
        self.chrome_options.add_argument("--disable-dev-shm-usage")
        self.chrome_options.add_argument("--disable-software-rasterizer")
        self.chrome_options.add_argument("--disable-webgl")
        self.chrome_options.add_argument("--disable-features=VizDisplayCompositor")
        self.chrome_options.add_argument("--ignore-certificate-errors")
        self.chrome_options.add_argument("--disable-blink-features=AutomationControlled")
        self.chrome_options.add_argument("user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36")
        # self.chrome_options.add_argument("--headless")  # 无头模式，不显示浏览器
        self.driver = None
        self.cookie_file = r"G:\py\美团\pdd_cookies.json"
        self.url = "https://mobile.pinduoduo.com/search_result.html?search_key=%E6%84%9F%E5%86%92%E7%81%B5&search_met_track=history&search_type=goods&source=index&options=3&refer_search_met_pos=0&refer_page_el_sn=99887&refer_page_name=login&refer_page_id=10169_1757122553288_h7sdfs7qdz&refer_page_sn=10169"
        self.output_file = "pdd_data122.xlsx"
        self.keyword = ""
        self.setup_ui()
        
    def setup_ui(self):
        title_label = tk.Label(self.root, text="拼多多价格和描述提取工具", font=("Arial", 18, "bold"), 
                              fg="#ff453a", bg="#f5f5f5")
        title_label.pack(pady=20)
        
        desc_label = tk.Label(self.root, text="提取首页class='_1AsY9c6Q'的价格和描述，及class='_3ANzdjkc _2kQI8OPk'的标题，并导出到Excel", 
                             font=("Arial", 12), bg="#f5f5f5", fg="#666")
        desc_label.pack(pady=5)
        
        url_frame = tk.Frame(self.root, bg="#f5f5f5")
        url_frame.pack(pady=10, padx=20, fill="x")
        
        tk.Label(url_frame, text="拼多多首页URL:", font=("Arial", 10), bg="#f5f5f5").pack(anchor="w")
        self.url_var = tk.StringVar(value=self.url)
        url_entry = tk.Entry(url_frame, textvariable=self.url_var, font=("Arial", 10), width=70, state="readonly")
        url_entry.pack(pady=5, fill="x")
        
        cookie_frame = tk.Frame(self.root, bg="#f5f5f5")
        cookie_frame.pack(pady=10, padx=20, fill="x")
        
        tk.Label(cookie_frame, text="Cookies文件路径:", font=("Arial", 10), bg="#f5f5f5").pack(anchor="w")
        self.cookie_var = tk.StringVar(value=self.cookie_file)
        cookie_entry = tk.Entry(cookie_frame, textvariable=self.cookie_var, font=("Arial", 10), width=70, state="readonly")
        cookie_entry.pack(pady=5, fill="x")
        
        output_frame = tk.Frame(self.root, bg="#f5f5f5")
        output_frame.pack(pady=10, padx=20, fill="x")
        
        tk.Label(output_frame, text="输出Excel文件:", font=("Arial", 10), bg="#f5f5f5").pack(anchor="w")
        self.output_var = tk.StringVar(value=self.output_file)
        output_entry = tk.Entry(output_frame, textvariable=self.output_var, font=("Arial", 10), width=70)
        output_entry.pack(pady=5, fill="x")
        
       # 输入区域
        input_frame = ttk.LabelFrame(output_frame, text="输入区域", padding=10)
        input_frame.pack(fill=tk.X, pady=(0, 10))
        
        ttk.Label(input_frame, text="请输入关键词:").pack(anchor="w")
        
        self.input_var = tk.StringVar(value=self.keyword)
        input_entry = ttk.Entry(input_frame, textvariable=self.input_var, width=70)
        input_entry.pack(pady=5, fill="x")
        
        btn_frame = tk.Frame(self.root, bg="#f5f5f5")
        btn_frame.pack(pady=15)
        
        self.start_btn = tk.Button(btn_frame, text="开始提取", command=self.start_crawling, 
                                  bg="#ff453a", fg="white", font=("Arial", 12), width=15)
        self.start_btn.pack(side="left", padx=10)
        
        self.clear_btn = tk.Button(btn_frame, text="清除结果", command=self.clear_results, 
                                  bg="#666", fg="white", font=("Arial", 12), width=15)
        self.clear_btn.pack(side="left", padx=10)
        
        self.progress = ttk.Progressbar(self.root, mode="indeterminate")
        self.progress.pack(pady=10, padx=20, fill="x")
        
        result_frame = tk.Frame(self.root, bg="#f5f5f5")
        result_frame.pack(pady=10, padx=20, fill="both", expand=True)
        
        tk.Label(result_frame, text="提取结果:", font=("Arial", 12, "bold"), bg="#f5f5f5").pack(anchor="w")
        self.result_text = scrolledtext.ScrolledText(result_frame, height=20, width=100, font=("Arial", 10))
        self.result_text.pack(pady=10, fill="both", expand=True)
        
        self.status_var = tk.StringVar(value="就绪")
        status_bar = tk.Label(self.root, textvariable=self.status_var, relief="sunken", anchor="w", 
                             font=("Arial", 10), bg="white", fg="#333")
        status_bar.pack(side="bottom", fill="x")
    
    
    def update_keyword(self):
        # 从输入框获取最新值
        self.keyword = self.input_var.get()
        # 更新输出框
        self.output_var.set(self.keyword)
        self.status_var.set(f"关键词已更新: {self.keyword}")
    
    def load_cookies(self):
        try:
            if os.path.exists(self.cookie_file):
                with open(self.cookie_file, 'r', encoding='utf-8') as f:
                    return json.load(f)
            raise FileNotFoundError(f"Cookies文件 {self.cookie_file} 不存在")
        except Exception as e:
            self.root.after(0, self.show_error, f"加载Cookies失败: {str(e)}")
            return None
    
    def save_to_excel(self, data):
        wb = openpyxl.Workbook()
        ws = wb.active
        ws.title = "Pinduoduo Data"
        ws.append(["商品编号", "标题", "价格", "描述"])
        
        for item in data:
            ws.append([item["index"], item["title"], item["price"], item["description"]])
        
        wb.save(self.output_file)
    
    def start_crawling(self):
        self.start_btn.config(state="disabled")
        self.progress.start(10)
        self.status_var.set("正在初始化浏览器...")
        
        thread = Thread(target=self.crawl_data)
        thread.daemon = True
        thread.start()
    
    def crawl_data(self):
        max_retries = 3
        retry_delay = 5
        
        for attempt in range(max_retries):
            try:
                self.driver = webdriver.Chrome(options=self.chrome_options)
                
                self.status_var.set("正在加载Cookies...")
                cookies = self.load_cookies()
                if not cookies:
                    raise Exception("无法加载Cookies")
                
                self.driver.get("https://mobile.pinduoduo.com")
                for cookie in cookies:
                    try:
                        self.driver.add_cookie(cookie)
                    except Exception as e:
                        self.root.after(0, self.show_error, f"设置Cookie失败: {str(e)}")
                        return
                self.update_keyword()
                self.status_var.set("正在加载首页...")
                url = f"https://mobile.pinduoduo.com/search_result.html?search_key={self.keyword}&search_met_track=history&search_type=goods&source=index&options=3&refer_search_met_pos=0&refer_page_el_sn=99887&refer_page_name=login&refer_page_id=10169_1757122553288_h7sdfs7qdz&refer_page_sn=10169"
                print("爬取url:"+url)
                self.driver.get(url)
                
                WebDriverWait(self.driver, 10).until(
                    EC.presence_of_element_located((By.CLASS_NAME, "_1AsY9c6Q"))
                )
                
                self.status_var.set("正在滚动页面...")
                self.driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
                time.sleep(3)
                
                self.status_var.set("正在提取标题、价格和描述...")
                containers = self.driver.find_elements(By.CLASS_NAME, "_1AsY9c6Q")
                title_elements = self.driver.find_elements(By.CSS_SELECTOR, "._3ANzdjkc._2kQI8OPk")
                
                results = []
                data = []
                if not containers or not title_elements:
                    results.append("未找到class='_1AsY9c6Q'或class='_3ANzdjkc _2kQI8OPk'的元素，可能是Cookies无效或页面未加载正确\n")
                else:
                    max_items = min(len(containers), len(title_elements))
                    for i in range(max_items):
                        try:
                            title = title_elements[i].text.strip() or "标题不可用"
                            price_elem = containers[i].find_element(By.CLASS_NAME, "_3_U04GgA")
                            price = price_elem.text.strip() or "价格不可用"
                            desc_elem = containers[i].find_element(By.CLASS_NAME, "_2u4gEhMf")
                            description = desc_elem.text.strip() or "描述不可用"
                            
                            results.append(f"商品 {i+1}:\n标题: {title}\n价格: {price}\n描述: {description}\n{'-'*50}\n")
                            data.append({"index": i+1, "title": title, "price": price, "description": description})
                        except Exception as e:
                            results.append(f"商品 {i+1} 提取失败: {str(e)}\n")
                            data.append({"index": i+1, "title": "提取失败", "price": "提取失败", "description": str(e)})
                    
                    if len(containers) != len(title_elements):
                        results.append(f"警告: 找到 {len(containers)} 个价格/描述容器，但找到 {len(title_elements)} 个标题，可能存在数据不匹配\n")
                
                if data:
                    self.status_var.set("正在导出到Excel...")
                    self.save_to_excel(data)
                    results.append(f"数据已导出到 {self.output_file}\n")
                
                self.root.after(0, self.update_results, results)
                break
            
            except Exception as e:
                error_msg = f"尝试 {attempt + 1}/{max_retries} 失败: {str(e)}"
                self.root.after(0, self.show_error, error_msg)
                if attempt < max_retries - 1:
                    self.status_var.set(f"等待 {retry_delay} 秒后重试...")
                    time.sleep(retry_delay)
                else:
                    self.root.after(0, self.show_error, f"提取失败，已达到最大重试次数: {str(e)}")
            
            finally:
                if self.driver:
                    self.driver.quit()
                    self.driver = None
        
        self.root.after(0, self.crawling_finished)
    
    def update_results(self, results):
        self.result_text.delete(1.0, tk.END)
        for result in results:
            self.result_text.insert(tk.END, result)
        self.status_var.set(f"提取完成，共找到 {len(results)} 个记录")
    
    def show_error(self, error_msg):
        self.result_text.delete(1.0, tk.END)
        self.result_text.insert(tk.END, error_msg)
        self.status_var.set("提取失败")
    
    def crawling_finished(self):
        self.progress.stop()
        self.start_btn.config(state="normal")
    
    def clear_results(self):
        self.result_text.delete(1.0, tk.END)
        self.status_var.set("已清除结果")
    
    def __del__(self):
        if self.driver:
            self.driver.quit()

if __name__ == "__main__":
    root = tk.Tk()
    app = PDDPriceCrawlerApp(root)
    root.mainloop()