import os
import time
import random
import csv
import requests
import pandas as pd
import threading
from tkinter import *
from tkinter import ttk, messagebox, filedialog
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException, NoSuchElementException, WebDriverException

class AmazonScraperApp:
    def __init__(self, root):
        self.root = root
        self.root.title("亚马逊商品数据爬取工具")
        self.root.geometry("700x550")  # 增加高度以容纳更多字段
        self.root.resizable(True, True)

        # 设置主题
        self.style = ttk.Style()
        self.style.theme_use('clam')

        # 初始化变量
        self.input_file = ""
        self.output_file = ""
        self.driver = None
        self.is_running = False
        self.current_asin = ""
        self.scraped_count = 0
        self.total_count = 0
        self.browser_position_set = False

        # 创建界面
        self.create_widgets()

        # 确保关闭窗口时停止爬虫
        self.root.protocol("WM_DELETE_WINDOW", self.on_closing)

    def create_widgets(self):
        # 文件选择区域
        file_frame = ttk.LabelFrame(self.root, text="文件设置")
        file_frame.pack(fill="x", padx=10, pady=5)

        # 输入文件
        ttk.Label(file_frame, text="输入文件:").grid(row=0, column=0, padx=5, pady=5, sticky="w")
        self.input_entry = ttk.Entry(file_frame, width=40)
        self.input_entry.grid(row=0, column=1, padx=5, pady=5, sticky="ew")
        ttk.Button(file_frame, text="浏览...", command=self.browse_input_file).grid(row=0, column=2, padx=5, pady=5)

        # 输出文件
        ttk.Label(file_frame, text="输出文件:").grid(row=1, column=0, padx=5, pady=5, sticky="w")
        self.output_entry = ttk.Entry(file_frame, width=40)
        self.output_entry.grid(row=1, column=1, padx=5, pady=5, sticky="ew")
        # ttk.Button(file_frame, text="浏览...", command=self.browse_output_file).grid(row=1, column=2, padx=5, pady=5)

        # 爬虫设置区域
        settings_frame = ttk.LabelFrame(self.root, text="爬虫设置")
        settings_frame.pack(fill="x", padx=10, pady=5)

        # 延迟设置
        ttk.Label(settings_frame, text="请求延迟 (秒):").grid(row=0, column=0, padx=5, pady=5, sticky="w")
        self.delay_var = StringVar(value="3-8")
        ttk.Entry(settings_frame, textvariable=self.delay_var, width=10).grid(row=0, column=1, padx=5, pady=5, sticky="w")

        # 重试次数
        ttk.Label(settings_frame, text="重试次数:").grid(row=0, column=2, padx=5, pady=5, sticky="w")
        self.retry_var = StringVar(value="3")
        ttk.Entry(settings_frame, textvariable=self.retry_var, width=5).grid(row=0, column=3, padx=5, pady=5, sticky="w")

        # 浏览器大小
        ttk.Label(settings_frame, text="浏览器大小:").grid(row=1, column=0, padx=5, pady=5, sticky="w")
        self.size_var = StringVar(value="1024x768")
        ttk.Entry(settings_frame, textvariable=self.size_var, width=10).grid(row=1, column=1, padx=5, pady=5, sticky="w")

        # 浏览器位置
        ttk.Label(settings_frame, text="浏览器位置:").grid(row=1, column=2, padx=5, pady=5, sticky="w")
        self.position_var = StringVar(value="右下角")
        position_combo = ttk.Combobox(settings_frame, textvariable=self.position_var, width=8)
        position_combo['values'] = ('左上角', '右上角', '左下角', '右下角')
        position_combo.grid(row=1, column=3, padx=5, pady=5, sticky="w")

        # 操作按钮区域
        button_frame = ttk.Frame(self.root)
        button_frame.pack(fill="x", padx=10, pady=5)

        self.start_button = ttk.Button(button_frame, text="开始爬取", command=self.start_scraping)
        self.start_button.pack(side="left", padx=5, pady=5)

        self.stop_button = ttk.Button(button_frame, text="停止爬取", command=self.stop_scraping, state=DISABLED)
        self.stop_button.pack(side="left", padx=5, pady=5)

        self.test_button = ttk.Button(button_frame, text="测试浏览器", command=self.test_browser)
        self.test_button.pack(side="left", padx=5, pady=5)

        # 进度区域
        progress_frame = ttk.LabelFrame(self.root, text="进度信息")
        progress_frame.pack(fill="x", padx=10, pady=5)

        # 进度条
        self.progress = ttk.Progressbar(progress_frame, orient="horizontal", mode="determinate")
        self.progress.pack(fill="x", padx=10, pady=5)

        # 状态标签
        self.status_var = StringVar(value="准备就绪")
        ttk.Label(progress_frame, textvariable=self.status_var).pack(fill="x", padx=10, pady=5)

        # 当前ASIN
        self.current_asin_var = StringVar(value="当前ASIN: 无")
        ttk.Label(progress_frame, textvariable=self.current_asin_var).pack(fill="x", padx=10, pady=5)

        # 日志区域
        log_frame = ttk.LabelFrame(self.root, text="操作日志")
        log_frame.pack(fill="both", expand=True, padx=10, pady=5)

        self.log_text = Text(log_frame, height=8, state=DISABLED)
        self.log_text.pack(fill="both", expand=True, padx=5, pady=5)

        scrollbar = ttk.Scrollbar(log_frame, command=self.log_text.yview)
        scrollbar.pack(side="right", fill="y")
        self.log_text.config(yscrollcommand=scrollbar.set)

    def browse_input_file(self):
        file_path = filedialog.askopenfilename(
            title="选择输入文件",
            filetypes=[("Excel文件", "*.xlsx *.xls"), ("CSV文件", "*.csv"), ("所有文件", "*.*")]
        )
        if file_path:
            self.input_entry.delete(0, END)
            self.input_entry.insert(0, file_path)

            # 自动生成输出文件名
            if not self.output_entry.get():
                dir_name, file_name = os.path.split(file_path)
                name, ext = os.path.splitext(file_name)
                #yyyyMMddHHmmss
                time_str = time.strftime("%Y%m%d%H%M%S")
                output_path = os.path.join(dir_name, f"{name}_result{time_str}.csv")
                self.output_entry.delete(0, END)
                self.output_entry.insert(0, output_path)

    def browse_output_file(self):
        file_path = filedialog.asksaveasfilename(
            title="保存结果文件",
            defaultextension=".csv",
            filetypes=[("CSV文件", "*.csv"), ("所有文件", "*.*")]
        )
        if file_path:
            self.output_entry.delete(0, END)
            self.output_entry.insert(0, file_path)

    def log_message(self, message):
        self.log_text.config(state=NORMAL)
        timestamp = time.strftime("%H:%M:%S")
        self.log_text.insert(END, f"[{timestamp}] {message}\n")
        self.log_text.see(END)  # 滚动到底部
        self.log_text.config(state=DISABLED)
        self.root.update()  # 更新UI

    def start_scraping(self):
        # 验证输入
        self.input_file = self.input_entry.get()
        self.output_file = self.output_entry.get()

        if not self.input_file:
            messagebox.showerror("错误", "请选择输入文件")
            return

        if not self.output_file:
            messagebox.showerror("错误", "请选择输出文件")
            return

        # 获取ASIN列表
        try:
            self.asin_list = self.get_asin_list(self.input_file)
            self.total_count = len(self.asin_list)
            self.scraped_count = 0

            if self.total_count == 0:
                messagebox.showerror("错误", "输入文件中没有找到ASIN")
                return
        except Exception as e:
            messagebox.showerror("错误", f"读取输入文件失败: {str(e)}")
            return

        # 初始化进度
        self.progress["value"] = 0
        self.progress["maximum"] = self.total_count
        self.status_var.set(f"准备开始爬取 {self.total_count} 个商品")
        self.current_asin_var.set("当前ASIN: 无")

        # 更新按钮状态
        self.start_button.config(state=DISABLED)
        self.stop_button.config(state=NORMAL)
        self.test_button.config(state=DISABLED)
        self.is_running = True
        self.browser_position_set = False

        # 在后台线程中运行爬虫
        self.log_message(f"开始爬取 {self.total_count} 个商品...")
        threading.Thread(target=self.run_scraping, daemon=True).start()

    def test_browser(self):
        """测试浏览器是否能正常启动"""
        self.log_message("正在测试浏览器...")
        try:
            driver = self.configure_driver()
            if driver:
                driver.get("https://www.baidu.com")
                self.log_message("浏览器测试成功！已打开Google首页")
                driver.quit()
            else:
                self.log_message("浏览器启动失败")
        except Exception as e:
            self.log_message(f"浏览器测试失败: {str(e)}")

    def stop_scraping(self):
        self.is_running = False
        self.status_var.set("正在停止...")
        self.log_message("用户请求停止爬取")

    def on_closing(self):
        if self.is_running:
            if messagebox.askokcancel("退出", "爬取正在进行中，确定要退出吗？"):
                self.is_running = False
                if self.driver:
                    try:
                        self.driver.quit()
                    except:
                        pass
                self.root.destroy()
        else:
            self.root.destroy()

    def get_asin_list(self, file_path):
        """从Excel或CSV文件中读取ASIN列表"""
        if file_path.endswith('.csv'):
            df = pd.read_csv(file_path)
        else:  # Excel文件
            df = pd.read_excel(file_path)

        # 尝试查找包含ASIN的列
        asin_columns = [col for col in df.columns if 'asin' in col.lower()]

        if not asin_columns:
            raise ValueError("文件中未找到包含'ASIN'的列")

        return df[asin_columns[0]].dropna().astype(str).unique().tolist()

    def set_browser_position(self, driver):
        """设置浏览器窗口位置"""
        if self.browser_position_set:
            return

        screen_width = self.root.winfo_screenwidth()
        screen_height = self.root.winfo_screenheight()

        # 解析浏览器尺寸
        width, height = map(int, self.size_var.get().split('x'))

        # 根据用户选择的位置设置坐标
        position = self.position_var.get()
        if position == "左上角":
            x, y = 0, 0
        elif position == "右上角":
            x, y = screen_width - width, 0
        elif position == "左下角":
            x, y = 0, screen_height - height
        else:  # 右下角
            x, y = screen_width - width, screen_height - height

        # 设置窗口位置
        driver.set_window_position(x, y)
        driver.set_window_size(width, height)

        self.browser_position_set = True
        self.log_message(f"浏览器窗口位置设置为: {position} ({x},{y})")

    def configure_driver(self):
        options = webdriver.ChromeOptions()

        # 反爬绕过设置
        options.add_argument("--disable-blink-features=AutomationControlled")
        options.add_experimental_option("excludeSwitches", ["enable-automation"])
        options.add_experimental_option('useAutomationExtension', False)
        options.add_argument("--disable-extensions")
        options.add_argument("--disable-popup-blocking")

        # 网络优化
        options.add_argument('--ignore-certificate-errors')
        options.add_argument('--allow-running-insecure-content')

        # 页面加载策略
        options.page_load_strategy = 'eager'

        # 随机用户代理
        user_agents = [
            "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36",
            "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.0.3 Safari/605.1.15",
            "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:89.0) Gecko/20100101 Firefox/89.0"
        ]
        options.add_argument(f"user-agent={random.choice(user_agents)}")

        # 创建浏览器实例
        try:
            # 确保chromedriver在PATH中或指定路径
            driver = webdriver.Chrome(options=options)

            # 覆盖webdriver检测
            driver.execute_cdp_cmd(
                "Page.addScriptToEvaluateOnNewDocument", {
                    "source": """
                    Object.defineProperty(navigator, 'webdriver', {
                        get: () => undefined
                    })
                    window.chrome = { runtime: {} };
                    """
                }
            )

            # 设置浏览器窗口位置
            self.set_browser_position(driver)

            return driver
        except WebDriverException as e:
            self.log_message(f"WebDriver启动失败: {str(e)}")
            self.log_message("请确保chromedriver正确安装且版本匹配")
            return None

    def extract_product_data(self, driver, asin):
        data = {'ASIN': asin}
        try:
            # 1. 商品标题
            data['标题'] = WebDriverWait(driver, 15).until(
                EC.visibility_of_element_located((By.ID, "productTitle"))
            ).text.strip()

            # 2. 图片URL
            try:
                data['图片URL'] = driver.find_element(By.ID, "landingImage").get_attribute("src")
            except:
                try:
                    data['图片URL'] = driver.find_element(By.CSS_SELECTOR, "img#imgBlkFront").get_attribute("src")
                except:
                    data['图片URL'] = "N/A"
            #
            # # 3. 详情描述
            # try:
            #     # 尝试展开完整描述
            #     try:
            #         expand_btn = driver.find_element(By.CSS_SELECTOR, "#productDescription a")
            #         driver.execute_script("arguments[0].click();", expand_btn)
            #         time.sleep(0.5)
            #     except:
            #         pass
            #
            #     data['详情描述'] = driver.find_element(By.ID, "productDescription").text.strip()
            # except:
            #     try:
            #         # 备用选择器
            #         data['详情描述'] = driver.find_element(By.CSS_SELECTOR, "#bookDescription_feature_div").text.strip()
            #     except:
            #         data['详情描述'] = "N/A"
                    # 商品描述
                    # try:
                    #     # 尝试展开完整描述
                    #     expand_btn = driver.find_element(By.CSS_SELECTOR, "#productDescription a")
                    #     driver.execute_script("arguments[0].click();", expand_btn)
                    #     time.sleep(0.5)
                    # except:
                    #     pass
                    #
                    # try:
                    #     data['描述'] = driver.find_element(By.ID, "productOverview_feature_div").text.strip()
                    # except:
                    #     try:
                    #         data['描述'] = driver.find_element(By.CSS_SELECTOR, "#feature-bullets").text.strip()
                    #     except:
                    #         data['描述'] = "N/A"
            try:
                spec_data = {}
                overview_div = driver.find_element(By.ID, "productOverview_feature_div");
                spec_rows = overview_div.find_elements(By.CSS_SELECTOR, "table.a-normal.a-spacing-micro tr");
                for row in spec_rows:
                    # 获取规格名称和值
                    name = row.find_element(By.CSS_SELECTOR, "td.a-span3 span").text.strip()
                    value = row.find_element(By.CSS_SELECTOR, "td.a-span9 span").text.strip()
                    spec_data[name] = value

                # feature_data = {}
                feature_rows = overview_div.find_elements(By.CSS_SELECTOR,"#glance_icons_div table tr.a-spacing-base")

                for row in feature_rows:
                    cells = row.find_elements(By.CSS_SELECTOR, "td.a-span6")
                    for cell in cells:
                        # 获取特性名称和值
                        name = cell.find_element(By.CSS_SELECTOR, "span.a-text-bold").text.strip()
                        value = cell.find_elements(By.CSS_SELECTOR, "span.a-size-base")[1].text.strip()
                        spec_data[name] = value

                data['描述'] = spec_data
            except:
                data['描述'] = "N/A"
            # 4. 关于该商品 (About This Item)
            try:
                about_items = driver.find_elements(By.CSS_SELECTOR, "#feature-bullets .a-list-item")
                data['关于该商品'] = "\n".join([item.text.strip() for item in about_items if item.text.strip()])
            except:
                data['关于该商品'] = "N/A"

            # 5. 技术细节 (Technical Details)
            try:
                # 尝试查找技术细节表格
                tech_details = {}
                table = driver.find_element(By.ID, "productDetails_techSpec_section_1")
                rows = table.find_elements(By.TAG_NAME, "tr")

                for row in rows:
                    try:
                        th = row.find_element(By.TAG_NAME, "th").text.strip()
                        td = row.find_element(By.TAG_NAME, "td").text.strip()
                        tech_details[th] = td
                    except:
                        continue

                data['技术细节'] = "\n".join([f"{k}: {v}" for k, v in tech_details.items()])
            except:
                try:
                    # 备用技术细节位置
                    tech_details = {}
                    table = driver.find_element(By.ID, "detailBullets_feature_div")
                    rows = table.find_elements(By.TAG_NAME, "li")

                    for row in rows:
                        try:
                            span = row.find_elements(By.TAG_NAME, "span")
                            if len(span) >= 2:
                                key = span[0].text.strip().replace(":", "")
                                value = span[1].text.strip()
                                tech_details[key] = value
                        except:
                            continue

                    data['技术细节'] = "\n".join([f"{k}: {v}" for k, v in tech_details.items()])
                except:
                    data['技术细节'] = "N/A"

            # 6. 更多信息 (Additional Information)
            try:
                # 尝试查找更多信息部分
                additional_info = {}
                section = driver.find_element(By.ID, "productDetails_db_sections")
                tables = section.find_elements(By.TAG_NAME, "table")

                for table in tables:
                    try:
                        rows = table.find_elements(By.TAG_NAME, "tr")
                        for row in rows:
                            try:
                                th = row.find_element(By.TAG_NAME, "th").text.strip()
                                td = row.find_element(By.TAG_NAME, "td").text.strip()
                                additional_info[th] = td
                            except:
                                continue
                    except:
                        continue

                data['更多信息'] = "\n".join([f"{k}: {v}" for k, v in additional_info.items()])
            except:
                data['更多信息'] = "N/A"

        except TimeoutException:
            self.log_message(f"警告: ASIN {asin} 关键元素加载超时")
            data['状态'] = "超时"
        except Exception as e:
            self.log_message(f"警告: ASIN {asin} 数据提取异常: {str(e)}")
            data['状态'] = "错误"
        else:
            data['状态'] = "成功"

        return data

    def run_scraping(self):
        # 初始化浏览器
        self.driver = self.configure_driver()
        if not self.driver:
            self.is_running = False
            self.log_message("浏览器初始化失败，爬取终止")
            self.reset_ui()
            return

        # 创建结果文件
        try:
            with open(self.output_file, 'w', newline='', encoding='utf-8') as f:
                writer = csv.writer(f)
                # 定义输出字段
                writer.writerow(['ASIN', '标题', '图片URL', '描述', '关于该商品',
                                '技术细节', '更多信息', '状态'])
        except Exception as e:
            self.log_message(f"创建输出文件失败: {str(e)}")
            self.is_running = False
            self.reset_ui()
            return

        # 遍历ASIN列表
        for asin in self.asin_list:
            if not self.is_running:
                break

            self.current_asin = asin
            self.current_asin_var.set(f"当前ASIN: {asin}")
            self.status_var.set(f"正在处理: {asin} ({self.scraped_count+1}/{self.total_count})")

            # 构建URL
            url = f"https://www.amazon.com/dp/{asin}"
            self.log_message(f"开始爬取: {url}")

            # 获取延迟范围
            try:
                delay_range = list(map(float, self.delay_var.get().split('-')))
                delay = random.uniform(delay_range[0], delay_range[1])
            except:
                delay = random.uniform(3, 8)

            # 获取重试次数
            try:
                max_retries = int(self.retry_var.get())
            except:
                max_retries = 3

            # 爬取数据
            product_data = None
            retries = 0

            while retries < max_retries and self.is_running:
                try:
                    # 设置超时
                    self.driver.set_page_load_timeout(30)
                    self.driver.set_script_timeout(20)

                    # 访问页面
                    self.log_message(f"尝试加载页面 (重试 {retries+1}/{max_retries})")
                    self.driver.get(url)

                    # 检查是否成功加载
                    WebDriverWait(self.driver, 15).until(
                        EC.presence_of_element_located((By.ID, "dp-container"))
                    )

                    # 随机滚动 - 确保所有内容加载
                    scroll_actions = random.randint(2, 4)
                    for i in range(scroll_actions):
                        scroll_height = random.randint(300, 600)
                        self.driver.execute_script(f"window.scrollBy(0, {scroll_height});")
                        time.sleep(random.uniform(0.5, 1.5))

                    # 滚动到产品详情部分
                    try:
                        details_section = self.driver.find_element(By.ID, "detailBullets_feature_div")
                        self.driver.execute_script("arguments[0].scrollIntoView();", details_section)
                        time.sleep(1)
                    except:
                        pass

                    # 提取数据
                    product_data = self.extract_product_data(self.driver, asin)
                    break

                except TimeoutException:
                    retries += 1
                    self.log_message(f"页面加载超时，将在 {delay:.1f} 秒后重试")
                    time.sleep(delay)
                except Exception as e:
                    retries += 1
                    self.log_message(f"爬取异常: {str(e)}，将在 {delay:.1f} 秒后重试")
                    time.sleep(delay)

            # 保存结果
            if product_data:
                try:
                    with open(self.output_file, 'a', newline='', encoding='utf-8') as f:
                        writer = csv.writer(f)
                        writer.writerow([
                            product_data['ASIN'],
                            product_data['标题'],
                            product_data['图片URL'],
                            product_data['描述'],
                            product_data['关于该商品'],
                            product_data['技术细节'],
                            product_data['更多信息'],
                            product_data['状态']
                        ])
                    self.scraped_count += 1
                    self.log_message(f"ASIN {asin} 数据保存成功")
                except Exception as e:
                    self.log_message(f"保存数据失败: {str(e)}")

            # 更新进度
            self.progress["value"] = self.scraped_count
            self.root.update()

            # 随机延迟
            if self.is_running and self.scraped_count < self.total_count:
                self.log_message(f"等待 {delay:.1f} 秒后继续...")
                time.sleep(delay)

        # 爬取完成
        self.log_message(f"爬取完成! 成功爬取 {self.scraped_count}/{self.total_count} 个商品")
        self.status_var.set(f"完成: {self.scraped_count}/{self.total_count}")

        # 关闭浏览器
        try:
            self.driver.quit()
        except:
            pass

        self.is_running = False
        self.reset_ui()

    def reset_ui(self):
        self.start_button.config(state=NORMAL)
        self.stop_button.config(state=DISABLED)
        self.test_button.config(state=NORMAL)
        self.root.update()

if __name__ == "__main__":
    root = Tk()
    app = AmazonScraperApp(root)
    root.mainloop()