import concurrent.futures
import os
import time
import random
from datetime import datetime

import requests
import schedule
from selenium.webdriver import ActionChains
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.chrome.options import Options
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys

from db import insert_item

import logging

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler("scrape.log", encoding='utf-8'),
        logging.StreamHandler()
    ]
)

logger = logging.getLogger(__name__)


def get_ua():
    USER_AGENTS = [
        'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 Edge/16.16299',
        'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.81 Safari/537.36',
        'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.79 Safari/537.36 Edge/14.14393',
        'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.106 Safari/537.36',
        'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36',
        'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.106 Safari/537.36',
        'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36',
        'Mozilla/5.0 (iPad; CPU OS 12_4_5 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.1.2 Mobile/15E148 Safari/604.1',
        'Mozilla/5.0 (iPhone; CPU iPhone OS 13_5_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.1.1 Mobile/15E148 Safari/604.1'

    ]
    return random.choice(USER_AGENTS)

def random_delay(min_delay=1, max_delay=3):
    time.sleep(random.uniform(min_delay, max_delay))

def get_chrome_options():
    chrome_options = Options()
    chrome_options.add_argument("--headless=old")  # 无头模式
    chrome_options.add_argument("--hide-scrollbars")
    chrome_options.add_argument('--start-maximized')
    chrome_options.add_argument(f'--user-agent={get_ua()}')
    chrome_options.add_argument('--disable-blink-features=AutomationControlled')

    # 启动浏览器时不显示“正在受自动软件控制”
    chrome_options.add_experimental_option("excludeSwitches", ["enable-automation"])
    chrome_options.add_experimental_option('useAutomationExtension', False)

    # 可以根据需求随机设置屏幕分辨率
    chrome_options.add_argument("window-size=3280, 2800")

    return chrome_options


def create_driver():
    chrome_options = get_chrome_options()
    # service = Service(executable_path='chrome.exe', chrome_driver_path="chromedriver.exe")
    service = Service(chrome_driver_path="chromedriver.exe")
    driver = webdriver.Chrome(service=service, options=chrome_options)
    # driver.set_window_size(3280, 2800)  # 调整为适合的宽度和高度
    # 修改 webdriver 的 navigator.webdriver 属性
    driver.execute_cdp_cmd("Page.addScriptToEvaluateOnNewDocument", {
        "source": """
            Object.defineProperty(navigator, 'webdriver', {
                get: () => undefined
            })
        """
    })

    return driver

# 模拟鼠标移动到元素上
def hover_over_element(browser, element):
    actions = ActionChains(browser)
    actions.move_to_element(element).perform()


def save_image_from_url(url, img_dir='img'):
    print('保存图片')
    # 确保图片目录存在
    if not os.path.exists(img_dir):
        os.makedirs(img_dir)

    # 图片名称
    image_name = url.split('/')[-1]
    # 构建完整的文件路径
    file_path = os.path.join(img_dir, image_name)

    # 检查文件是否已经存在
    if os.path.exists(file_path):
        # print(f"文件已存在: {file_path}")
        return file_path

    # 从URL获取图片
    try:
        # 设置请求头
        headers = {
            'User-Agent': get_ua(),
            'Referer': 'https://www.goofish.com'  # 添加Referer
        }
        response = requests.get(url, headers=headers)

        # 检查请求是否成功
        response.raise_for_status()  # 如果请求返回了错误，抛出异常

        # 将图片数据写入文件
        with open(file_path, 'wb') as file:
            file.write(response.content)

        return file_path
    except requests.exceptions.RequestException as e:
        logger.info(f"无法从URL获取图片: {url}")
        print(f"错误: {e}")
        return None

def extract_items(products):
    print('解析内容')
    all_product_data = []
    for product in products:
        try:
            # 获取商品详情URL
            product_url = product.get_attribute('href')

            # 获取商品图片
            image = product.find_element(By.TAG_NAME, 'img').get_attribute('src')

            # 获取商品标题
            title = product.find_element(By.XPATH, ".//*[contains(@class, 'row1-wrap-')]").text
            row2_element = product.find_element(By.XPATH, ".//*[contains(@class, 'row2-wrap-')]")
            # 发布时间
            try:
                pub_time = row2_element.find_element(By.XPATH,
                                                     ".//*[contains(@class, 'gradient-image-text')]").get_attribute(
                    'title')
            except Exception as e:
                pub_time = ''
            # 其他信息
            try:
                cpv = row2_element.find_elements(By.TAG_NAME, ".//*[contains(@class, 'cpv--')]")
                cpv_info = ' '.join([c.text for c in cpv])
            except Exception as e:
                # cpv = row2_element.find_elements(By.XPATH,".//span[contains(@class, 'cpv')]")
                # for element in cpv:
                #     print(element.get_attribute('outerHTML'))
                cpv_info = ''

            try:
                # 6000000003488-2-tps-174-42.png 24 小时发货
                # 6000000003181-2-tps-228-42.png 描述不符包邮退
                # 6000000003488-2-tps-174-42.png 24小时发货
                img_src = row2_element.find_element(By.TAG_NAME,"img").get_attribute("src")
                # 判断图片的文件名是否包含特定的特征字符
                if '3488-2-tps-174-42' in img_src:
                    service = '24小时发货'
                elif '3181-2-tps-228-42' in img_src:
                    service = '描述不符包邮退'
                else:
                    service = ''

            except Exception as e:
                service = ''

            # 提取商品价格
            price_element = product.find_element(By.XPATH, ".//*[contains(@class, 'row3-wrap-')]")
            price_unit = price_element.find_element(By.XPATH, ".//*[contains(@class, 'sign')]").text
            price_number = price_element.find_element(By.XPATH, ".//*[contains(@class, 'number')]").text
            decimal = price_element.find_element(By.XPATH, ".//*[contains(@class, 'decimal')]").text
            try:
                # 万
                abbreviation = price_element.find_element(By.XPATH, ".//*[contains(@class, 'abbreviation')]").text
                # print(abbreviation)
            except Exception as e:
                abbreviation = ''
            try:
                # 打折信息
                discount = price_element.find_element(By.XPATH, ".//*[contains(@class, 'text')]").text
                if '想要' in discount:
                    discount = '' + discount
                else:
                    discount = '原价：' + discount
            except Exception as e:
                discount = ''

            try:
                # 0093-2-tps-150-48 可小刀
                img_src = row2_element.find_element(By.TAG_NAME,"img").get_attribute("src")
                # 判断图片的文件名是否包含特定的特征字符
                if '0093-2-tps-150-48' in img_src:
                    discount += ' 可小刀'
            except Exception as e:
                pass


            # 拼接完整价格信息
            price = f"{price_unit}{price_number}{decimal}{abbreviation} {discount}"

            # 获取卖家头像链接
            # avatar_url = seller_element.find_element(By.XPATH, "//*[contains(@class, 'avatar--')]").get_attribute('src')

            # 卖家位置
            seller_element = product.find_element(By.XPATH, ".//*[contains(@class, 'row4-wrap')]")
            location = seller_element.find_element(By.XPATH, ".//*[contains(@class, 'seller-text')]").text

            # 拼凑描述信息
            desc = ' '.join([location, pub_time, cpv_info, service])

            image_path = save_image_from_url(image)


            # 打印获取到的信息
            # print(f"URL: {product_url}")
            # print(f"Image: {image}")
            # print(f"Title: {title}")
            # print(f"Description: {desc}")
            # print(f"Price: {price}")

            # 准备存储的数据，当前时间作为抓取时间
            fetch_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
            product_data = (product_url, title, desc, price, image_path, fetch_time, 1)
            all_product_data.append(product_data)
        except Exception as e:
            print(f"解析信息发生错误但继续: {e}")  # 捕获异常并打印错误信息
            continue

    return all_product_data


def scrape_data(keyword, time_filter, filter_keywords=None):
    print(f"抓取数据，关键字：{keyword}, 抓取范围：{time_filter}, 过滤：{filter_keywords}")
    driver = create_driver()

    driver.get(f"https://www.goofish.com/search?q={keyword}&spm=a21ybx.home.searchInput.0")

    try:
        close_button = WebDriverWait(driver, 5).until(
            EC.element_to_be_clickable((
                By.XPATH, "//img[@src='https://gw.alicdn.com/tfs/TB1QZN.CYj1gK0jSZFuXXcrHpXa-200-200.png']"))
        )
        close_button.click()
    except Exception as e:
        pass

    # 显式等待页面上的搜索框出现
    search_box = WebDriverWait(driver, 5).until(
        EC.presence_of_element_located((By.XPATH, "//*[contains(@class, 'search-input--')]"))
        # EC.presence_of_element_located((By.XPATH, "//span[text()='搜索']"))
    )

    search_box.clear()
    search_box.send_keys(keyword)
    search_box.send_keys(Keys.RETURN)
    # search_box.click()

    new_pub = WebDriverWait(driver, 5).until(
        EC.presence_of_element_located((By.XPATH, '//span[text()="新发布"]'))
    )
    # print(test.get_attribute("outerHTML"))
    # hover_over_element(driver, new_pub)
    new_pub.click()

    time.sleep(1)

    filter_option = WebDriverWait(driver, 5).until(
        EC.presence_of_element_located((By.XPATH, f"//div[contains(@class, 'search-select-item')]//div[text()='{time_filter}']"))

    )
    for i in range(3):
        if filter_option.text == time_filter:
            filter_option.click()
        time.sleep(0.5)

    # 没搜索到内容
    try:
        # 等待元素加载
        empty_text_elements = WebDriverWait(driver, 5).until(
            EC.presence_of_all_elements_located((By.XPATH, "//*[contains(@class, 'empty-text-notfound')]"))
        )

        if empty_text_elements:
            # 如果找到该元素，则执行相应操作
            print(f"关键字：{keyword} 未找到相关内容")
            return  # 可以根据需求返回，或者结束抓取
    except Exception as e:
        pass

    try:
        # 获取所有商品信息的父元素
        product_elements = WebDriverWait(driver, 5).until(
            EC.presence_of_all_elements_located((By.XPATH, "//*[contains(@class, 'feeds-item-wrap--')]"))
        )

        product_data = extract_items(product_elements)
    except Exception as e:
        print(e)
        return

    try:
        insert_item(item=product_data, filter_keywords=filter_keywords)
    except Exception as e:
        print(e)
        return
    finally:
        driver.quit()


def parallel_scrape(tasks):
    """并行执行抓取任务，并记录时间"""
    start = time.time()  # 记录任务开始时间

    with concurrent.futures.ThreadPoolExecutor() as executor:
        futures = [
            executor.submit(scrape_data, task['keyword'], task['filter_text'], task['exclude_keywords'])
            for task in tasks
        ]
        for future in concurrent.futures.as_completed(futures):
            try:
                result = future.result()
                if not result:
                    print(f"抓取成功")
            except Exception as exc:
                print(f"抓取失败: {exc}")

    end = time.time()  # 记录任务结束时间
    # 获取当前时间并格式化
    current_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")

    # 打印消耗时间和当前时间
    print(f"本次任务消耗时间：{end - start:.2f} 秒，当前时间：{current_time}")


def load_keywords(file_path):
    """从文件中加载关键词"""
    with open(file_path, 'r', encoding='utf-8') as f:
        return [line.strip() for line in f if line.strip()]


def load_filter_words(file_path):
    """从文件中加载过滤词"""
    with open(file_path, 'r', encoding='utf-8') as f:
        return [line.strip() for line in f if line.strip()]


def prepare_tasks(keywords, filter_words):
    """准备抓取任务列表"""
    return [
        {'keyword': keyword, 'filter_text': '新发布', 'exclude_keywords': filter_words}
        for keyword in keywords
    ]


def monitor_file_changes(keyword_file, filter_file):
    """监控关键词和过滤词文件的变化并更新"""
    global last_modified_keywords, last_modified_filters

    try:
        # 检查关键词文件的修改时间
        current_keywords_time = os.path.getmtime(keyword_file)
        if current_keywords_time != last_modified_keywords:
            last_modified_keywords = current_keywords_time
            print("检测到关键词文件更新，重新加载...")
            new_keywords = load_keywords(keyword_file)
        else:
            new_keywords = None

        # 检查过滤词文件的修改时间
        current_filters_time = os.path.getmtime(filter_file)
        if current_filters_time != last_modified_filters:
            last_modified_filters = current_filters_time
            print("检测到过滤词文件更新，重新加载...")
            new_filters = load_filter_words(filter_file)
        else:
            new_filters = None

        return new_keywords, new_filters
    except Exception as e:
        print(f"文件监控时出错: {e}")
        return None, None


def schedule_scraping(tasks, interval, keyword_file, filter_file):
    """按指定周期执行并行抓取任务，并监控文件变更"""
    def job():
        print(f"开始新的抓取任务...")
        parallel_scrape(tasks)

    def check_for_updates():
        """定期检查文件更新并动态更新任务"""
        nonlocal tasks  # 使用外部的 tasks 变量
        new_keywords, new_filters = monitor_file_changes(keyword_file, filter_file)

        if new_keywords or new_filters:
            # 使用最新的关键词和过滤词重新生成任务
            print("更新抓取任务...")
            tasks = prepare_tasks(
                new_keywords or [task['keyword'] for task in tasks],
                new_filters or [task['exclude_keywords'] for task in tasks][0]  # 保持原过滤词
            )

    # 启动时立即执行一次任务
    parallel_scrape(tasks)

    # 设置周期性任务和文件检查任务
    schedule.every(interval).seconds.do(job)
    schedule.every(10).seconds.do(check_for_updates)  # 每10秒检查文件更新

    print(f"启动抓取任务，每 {interval} 秒执行一次")
    while True:
        schedule.run_pending()
        time.sleep(1)  # 减少 CPU 占用


if __name__ == '__main__':
    # 文件路径
    keyword_file = 'keywords.txt'
    filter_file = 'filter_words.txt'

    last_modified_keywords = 0  # 保存关键词文件的上次修改时间
    last_modified_filters = 0  # 保存过滤词文件的上次修改时间

    # 从文件中加载初始关键词和过滤词
    keywords = load_keywords(keyword_file)
    filter_words = load_filter_words(filter_file)

    # 组装初始抓取任务
    tasks = prepare_tasks(keywords, filter_words)

    # 设置抓取周期（秒）
    interval = 300

    # 开始按周期执行抓取任务，并监控文件变化
    schedule_scraping(tasks, interval, keyword_file, filter_file)


    pass
