#!/usr/bin/env python3
# -*- coding: utf-8 -*-

"""
下载链接收集脚本
负责收集搜鞋网站的产品下载链接，支持多进程并发收集
将收集到的链接保存到 link.txt 文件中，避免重复下载
"""

import multiprocessing
import time
import os
import sys
import argparse
from datetime import datetime
from concurrent.futures import ProcessPoolExecutor, as_completed
import logging
import pandas as pd
import json
from urllib.parse import urlparse, parse_qs

# 导入selenium相关
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.edge.service import Service as EdgeService
from selenium.webdriver.edge.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException, NoSuchElementException, StaleElementReferenceException
from driver_manager import get_edge_service

# 全局配置
processed_count = None
test_mode = False
max_products = 1000

def setup_logging():
    """设置日志记录"""
    log_dir = "logs"
    if not os.path.exists(log_dir):
        os.makedirs(log_dir)
    
    log_file = os.path.join(log_dir, f"link_collection_{datetime.now().strftime('%Y%m%d_%H%M%S')}.log")
    
    logging.basicConfig(
        level=logging.INFO,
        format='%(asctime)s - %(levelname)s - %(message)s',
        handlers=[
            logging.FileHandler(log_file, encoding='utf-8'),
            logging.StreamHandler()
        ]
    )
    
    return logging.getLogger(__name__)

def create_browser_instance(process_id):
    """创建浏览器实例"""
    try:
        # 设置浏览器选项
        options = Options()
        options.add_argument('--disable-gpu')
        options.add_argument('--no-sandbox')
        options.add_argument('--disable-dev-shm-usage')
        options.add_argument('--disable-web-security')
        options.add_argument('--allow-running-insecure-content')
        
        # 创建浏览器实例
        service = get_edge_service("msedgedriver1")
        driver = webdriver.Edge(service=service, options=options)
        driver.set_window_size(1200, 800)
        
        print(f"✅ 进程 {process_id} 浏览器实例创建成功")
        return driver
        
    except Exception as e:
        print(f"❌ 进程 {process_id} 创建浏览器实例失败: {e}")
        return None

def login_to_website(driver, process_id, username, password):
    """登录网站"""
    try:
        print(f"🔐 进程 {process_id} 正在使用账号 {username} 登录...")

        driver.get("https://sooxie.com/User/login")
        WebDriverWait(driver, 10).until(
            EC.presence_of_element_located((By.XPATH, "//label[contains(text(), '账号名称')]/following-sibling::input"))
        )

        username_input = driver.find_element(By.XPATH, "//label[contains(text(), '账号名称')]/following-sibling::input")
        password_input = driver.find_element(By.XPATH, "//label[contains(text(), '密码')]/following-sibling::input")

        username_input.clear()
        username_input.send_keys(username)
        password_input.clear()
        password_input.send_keys(password)

        login_button = driver.find_element(By.XPATH, "//button[contains(text(), '登录')]")
        login_button.click()

        time.sleep(3)

        if "login" not in driver.current_url.lower():
            print(f"✅ 进程 {process_id} 登录成功")
            return True
        else:
            print(f"❌ 进程 {process_id} 登录失败")
            return False

    except Exception as e:
        print(f"❌ 进程 {process_id} 登录过程出错: {e}")
        return False

def get_product_info_from_detail_page(driver):
    """从详情页获取产品信息"""
    try:
        product_info = {}
        
        # 获取厂家信息
        try:
            factory_element = driver.find_element(By.XPATH, "//span[contains(text(), '厂家：')]/following-sibling::span")
            product_info['厂家'] = factory_element.text.strip()
        except:
            product_info['厂家'] = ""

        # 获取货号信息
        try:
            product_code_element = driver.find_element(By.XPATH, "//span[contains(text(), '货号：')]/following-sibling::span")
            product_info['货号'] = product_code_element.text.strip()
        except:
            product_info['货号'] = ""

        # 获取产品标题
        try:
            title_element = driver.find_element(By.CSS_SELECTOR, "h1, .product-title, .title")
            product_info['标题'] = title_element.text.strip()
        except:
            product_info['标题'] = ""

        return product_info
        
    except Exception as e:
        print(f"❌ 获取产品信息失败: {e}")
        return {}

def collect_links_from_li_element(driver, li_element, process_id):
    """从li元素收集下载链接 - 使用点击导航方式"""
    try:

        # 查找li内部的可点击链接
        clickable_links = li_element.find_elements(By.TAG_NAME, "a")

        if not clickable_links:
            print(f"⚠️ 进程 {process_id} 未找到可点击的链接")
            return None
        else:
            # 调试：显示所有a标签的href属性
            print(f"🔗 进程 {process_id} 找到 {len(clickable_links)} 个a标签:")
            for i, link in enumerate(clickable_links):
                href = link.get_attribute('href')
                text = link.text.strip()
                print(f"   a标签{i+1}: href='{href}', text='{text}'")

            # 选择第一个有href的a标签，直接获取URL
            detail_url = None
            for link in clickable_links:
                href = link.get_attribute('href')
                if href and href != 'javascript:void(0)' and href != '#' and href.startswith('http'):
                    detail_url = href
                    print(f"✅ 进程 {process_id} 找到有效链接: {detail_url}")
                    break

            if not detail_url:
                print(f"⚠️ 进程 {process_id} 未找到有效的href链接")
                return None

        # 直接收集链接，无需访问详情页
        print(f"✅ 进程 {process_id} 成功收集到链接: {detail_url}")

        # 构建链接信息（简化版本，只包含URL）
        link_info = {
            'detail_url': detail_url,
            'collected_time': datetime.now().isoformat(),
            'process_id': process_id
        }

        print(f"✅ 进程 {process_id} 链接收集完成")
        return link_info

    except Exception as e:
        print(f"❌ 进程 {process_id} 收集链接失败: {e}")
        return None

def collect_links_from_page_range(args):
    """收集指定页面范围的链接"""
    process_id, start_page, end_page, accounts, max_products_per_page, min_price, max_price = args
    
    print(f"🚀 进程 {process_id} 开始收集页面 {start_page}-{end_page} 的链接")
    
    driver = None
    collected_links = []
    
    try:
        # 创建浏览器实例
        driver = create_browser_instance(process_id)
        if not driver:
            return []

        # 登录
        account = accounts[process_id % len(accounts)]
        if not login_to_website(driver, process_id, account['username'], account['password']):
            return []

        # 遍历页面
        for page_num in range(start_page, end_page + 1):
            try:
                print(f"📄 进程 {process_id} 正在处理第 {page_num} 页")
                
                # 访问正确的分页URL
                list_url = f"https://sooxie.com/?r=all&page={page_num}&state=1&min={min_price}&max={max_price}"
                print(f"🌐 进程 {process_id} 访问第{page_num}页: {list_url}")
                driver.get(list_url)
                time.sleep(5)  # 等待页面完全加载

                # 调试：检查页面标题和URL
                print(f"📄 进程 {process_id} 页面标题: {driver.title}")
                print(f"🔗 进程 {process_id} 当前URL: {driver.current_url}")

                # 处理每个产品
                processed_count = 0
                max_attempts = max_products_per_page

                while processed_count < max_attempts:
                    try:
                        # 使用正确的选择器路径获取li元素
                        # 路径：.layui-form > .main.small-screen > .asx-content > form > .asx-layout > .asx-main.bg-white > ul > li
                        correct_selector = ".layui-form .main.small-screen .asx-content form .asx-layout .asx-main.bg-white ul li"
                        li_elements = driver.find_elements(By.CSS_SELECTOR, correct_selector)

                        # 调试：如果没找到，尝试分步查找
                        if not li_elements and processed_count == 0:
                            print(f"⚠️ 进程 {process_id} 使用完整选择器未找到元素，尝试分步查找...")

                            # 分步验证每一层
                            step_selectors = [
                                (".layui-form", "layui-form"),
                                (".layui-form .main.small-screen", "main small-screen"),
                                (".layui-form .main.small-screen .asx-content", "asx-content"),
                                (".layui-form .main.small-screen .asx-content form", "form"),
                                (".layui-form .main.small-screen .asx-content form .asx-layout", "asx-layout"),
                                (".layui-form .main.small-screen .asx-content form .asx-layout .asx-main.bg-white", "asx-main bg-white"),
                                (".layui-form .main.small-screen .asx-content form .asx-layout .asx-main.bg-white ul", "ul"),
                                (".layui-form .main.small-screen .asx-content form .asx-layout .asx-main.bg-white ul li", "li")
                            ]

                            for selector, name in step_selectors:
                                try:
                                    elements = driver.find_elements(By.CSS_SELECTOR, selector)
                                    print(f"🔍 进程 {process_id} 找到 {len(elements)} 个 '{name}' 元素")
                                    if name == "li" and elements:
                                        li_elements = elements
                                        break
                                except Exception as e:
                                    print(f"❌ 进程 {process_id} 查找 '{name}' 失败: {e}")
                                    continue

                        if processed_count >= len(li_elements):
                            print(f"📦 进程 {process_id} 第 {page_num} 页所有产品已处理完成")
                            break

                        if processed_count == 0:
                            print(f"📦 进程 {process_id} 第 {page_num} 页找到 {len(li_elements)} 个产品")

                        # 获取当前要处理的li元素
                        current_li = li_elements[processed_count]

                        print(f"🔄 进程 {process_id} 第 {page_num} 页处理第 {processed_count + 1} 个产品")

                        link_info = collect_links_from_li_element(driver, current_li, process_id)
                        if link_info:
                            collected_links.append(link_info)
                            print(f"✅ 进程 {process_id} 第 {page_num} 页第 {processed_count + 1} 个产品链接收集成功")

                        processed_count += 1

                        # 短暂等待，避免过快操作
                        time.sleep(1)

                    except Exception as e:
                        print(f"❌ 进程 {process_id} 第 {page_num} 页第 {processed_count + 1} 个产品处理失败: {e}")
                        processed_count += 1
                        continue

            except Exception as e:
                print(f"❌ 进程 {process_id} 处理第 {page_num} 页失败: {e}")
                continue

    except Exception as e:
        print(f"❌ 进程 {process_id} 执行过程出错: {e}")
    
    finally:
        if driver:
            driver.quit()
            print(f"🔚 进程 {process_id} 浏览器已关闭")

    print(f"✅ 进程 {process_id} 完成，共收集 {len(collected_links)} 个链接")
    return collected_links

def save_links_to_file(all_links, filename="links.txt"):
    """保存链接到文件，自动去重"""
    try:
        # 读取现有链接（如果文件存在）
        existing_urls = set()
        if os.path.exists(filename):
            with open(filename, 'r', encoding='utf-8') as f:
                for line in f:
                    line = line.strip()
                    if line:
                        existing_urls.add(line)

        # 去重并保存新链接
        new_urls = []
        for link in all_links:
            url = link['detail_url']
            if url not in existing_urls:
                new_urls.append(url)
                existing_urls.add(url)

        # 追加新链接到文件
        with open(filename, 'a', encoding='utf-8') as f:
            for url in new_urls:
                f.write(url + '\n')

        print(f"💾 保存了 {len(new_urls)} 个新链接到 {filename}")
        print(f"📊 文件中总共有 {len(existing_urls)} 个唯一链接")

        return len(new_urls)

    except Exception as e:
        print(f"❌ 保存链接失败: {e}")
        return 0

def load_accounts():
    """加载账号数据"""
    try:
        accounts_file = "账号密码.xlsx"
        if not os.path.exists(accounts_file):
            print(f"❌ 账号文件不存在: {accounts_file}")
            return []

        df = pd.read_excel(accounts_file)
        accounts = []

        for _, row in df.iterrows():
            accounts.append({
                'username': str(row['账号']),
                'password': str(row['密码'])
            })

        print(f"✅ 加载了 {len(accounts)} 个账号")
        return accounts

    except Exception as e:
        print(f"❌ 加载账号失败: {e}")
        return []

def parse_arguments():
    """解析命令行参数"""
    parser = argparse.ArgumentParser(description='下载链接收集脚本')
    parser.add_argument('--workers', type=int, default=4, help='并发进程数 (默认: 4)')
    parser.add_argument('--max-pages', type=int, default=20, help='最大页数 (默认: 20)')
    parser.add_argument('--max-products-per-page', type=int, default=50, help='每页最大产品数 (默认: 50)')
    parser.add_argument('--min-price', type=int, default=55, help='最小价格 (默认: 55)')
    parser.add_argument('--max-price', type=int, default=100, help='最大价格 (默认: 100)')
    parser.add_argument('--test', action='store_true', help='测试模式，只处理少量数据')
    parser.add_argument('--output', type=str, default='links.txt', help='输出文件名 (默认: links.txt)')

    args = parser.parse_args()

    # 验证价格参数
    if args.min_price > args.max_price:
        parser.error(f"最小价格 ({args.min_price}) 不能大于最大价格 ({args.max_price})")

    if args.min_price < 0:
        parser.error(f"最小价格 ({args.min_price}) 不能小于0")

    return args

def main():
    """主函数"""
    global test_mode

    # 解析命令行参数
    args = parse_arguments()
    test_mode = args.test

    print(f"🚀 开始收集下载链接 - {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
    print(f"⚙️ 并发进程数: {args.workers}")
    print(f"📄 最大页数: {args.max_pages}")
    print(f"📦 每页最大产品数: {args.max_products_per_page}")
    print(f"💰 价格范围: {args.min_price}-{args.max_price}元")
    print(f"💾 输出文件: {args.output}")

    if test_mode:
        print("🧪 测试模式已启用")
        args.max_pages = min(args.max_pages, 2)
        args.max_products_per_page = min(args.max_products_per_page, 5)

    # 加载账号数据
    accounts = load_accounts()
    if not accounts:
        print("❌ 没有可用账号，退出程序")
        return

    # 设置日志
    logger = setup_logging()

    # 计算页面分配
    pages_per_process = args.max_pages // args.workers
    remaining_pages = args.max_pages % args.workers

    # 准备进程参数
    process_args = []
    current_page = 1

    for i in range(args.workers):
        start_page = current_page
        end_page = current_page + pages_per_process - 1

        # 将剩余页面分配给前几个进程
        if i < remaining_pages:
            end_page += 1

        process_args.append((
            i,  # process_id
            start_page,
            end_page,
            accounts,
            args.max_products_per_page,
            args.min_price,
            args.max_price
        ))

        current_page = end_page + 1
        print(f"📋 进程 {i}: 页面 {start_page}-{end_page}")

    # 执行多进程收集
    all_collected_links = []
    start_time = time.time()

    print(f"\n🚀 开始多进程链接收集...")

    with ProcessPoolExecutor(max_workers=args.workers) as executor:
        # 提交所有任务
        future_to_process = {
            executor.submit(collect_links_from_page_range, arg): arg[0]
            for arg in process_args
        }

        # 收集结果
        for future in as_completed(future_to_process):
            process_id = future_to_process[future]
            try:
                links = future.result()
                all_collected_links.extend(links)
                print(f"✅ 进程 {process_id} 完成，收集了 {len(links)} 个链接")
            except Exception as e:
                print(f"❌ 进程 {process_id} 执行失败: {e}")

    # 保存链接到文件
    if all_collected_links:
        new_links_count = save_links_to_file(all_collected_links, args.output)

        end_time = time.time()
        elapsed_time = end_time - start_time

        print(f"\n🎉 链接收集完成!")
        print(f"📊 总收集链接数: {len(all_collected_links)}")
        print(f"📊 新增链接数: {new_links_count}")
        print(f"⏱️ 总耗时: {elapsed_time:.2f} 秒")
        print(f"💾 链接已保存到: {args.output}")

    else:
        print("❌ 没有收集到任何链接")

if __name__ == "__main__":
    main()
