# 路径相关常量配置，便于环境切换
LOG_DIR = "/Users/caohongjun/workspace/crawler/logs"

import os
import json
import traceback
from datetime import datetime, timedelta
from typing import List, Dict
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import time
import random
from tencentcloud.common import credential
from tencentcloud.common.profile.client_profile import ClientProfile
from tencentcloud.common.profile.http_profile import HttpProfile
from tencentcloud.tmt.v20180321 import tmt_client, models

class Logger:
    """简单日志工具，按天写日志文件"""
    def __init__(self, log_dir=LOG_DIR):
        self.log_dir = log_dir
        os.makedirs(self.log_dir, exist_ok=True)
        self.log_file = None

    def set_date(self, date_str):
        self.current_date = date_str
        log_path = os.path.join(self.log_dir, f"producthunt_{date_str}.log")
        self.log_file = open(log_path, "a", encoding="utf-8")

    def log(self, msg):
        now = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
        if self.log_file:
            self.log_file.write(f"[{now}] {msg}\n")
            self.log_file.flush()
        else:
            print(f"[{now}] {msg}")

    def close(self):
        if self.log_file:
            self.log_file.close()
            self.log_file = None

class ProductHunterCrawler:
    """Product Hunt 爬虫服务"""

    def __init__(self, secret_id: str = None, secret_key: str = None, logger: 'Logger' = None):
        self.headers = {
            "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
        }
        self.secret_id = secret_id or os.getenv('TENCENT_SECRET_ID')
        self.secret_key = secret_key or os.getenv('TENCENT_SECRET_KEY')
        self.logger = logger or Logger()
        self.translator = TranslationService(self.secret_id, self.secret_key, self.logger)

    def run_range(self, start_date: str, end_date: str) -> List[str]:
        start = datetime.strptime(start_date, "%Y-%m-%d")
        end = datetime.strptime(end_date, "%Y-%m-%d")
        filepaths = []
        current = start
        while current <= end:
            date_str = current.strftime("%Y-%m-%d")
            filepath = self.run(date_str)
            if filepath:
                filepaths.append(filepath)
            current += timedelta(days=1)
        return filepaths

    def run(self, date: str = None) -> str:
        date = date or datetime.now().strftime("%Y-%m-%d")
        self.logger.set_date(date)
        self.logger.log(f"=== 开始处理 {date} 的数据 ===")
        # 数据爬取
        self.logger.log("[流程] 开始爬取数据")
        products = DataFetcher.fetch_daily_products(date, self.logger)
        if not products:
            self.logger.log("!!! 未获取到有效产品数据，流程终止")
            self.logger.close()
            return ""
        self.logger.log(f"[流程] 数据爬取完成，日期: {date}，共{len(products)}条，状态: 成功")
        for p in products:
            p["date"] = date
        # 数据翻译
        self.logger.log("[流程] 开始翻译数据")
        products = self.translator.batch_translate(products)
        self.logger.log("[流程] 数据翻译完成")
        # 数据保存到MySQL
        self.logger.log("[流程] 开始写入数据库")
        filepath = DataExporter.save_to_mysql(products, self.logger)
        self.logger.log("[流程] 数据写入数据库完成")
        self.logger.log(f"=== 处理完成，结果文件: {filepath} ===")
        self.logger.close()
        return filepath

class DataFetcher:
    """负责数据爬取模块"""
    @staticmethod
    def fetch_daily_products(date: str, logger: Logger = None) -> List[Dict]:
        if logger:
            logger.log(f"[数据爬取] 开始爬取 {date} 的产品数据")
        try:
            year, month, day = date.split('-')
            url = f"https://www.producthunt.com/leaderboard/daily/{year}/{month}/{day}"
            if logger:
                logger.log(f"目标URL: {url}")
            driver = DataFetcher._setup_driver()
            driver.get(url)
            time.sleep(3)
            page_source = DataFetcher._scroll_page(driver)
            driver.quit()
            soup = BeautifulSoup(page_source, 'html.parser')
            items = soup.select('section')
            products = []
            current_rank = 1
            for item in items:
                if product := DataFetcher._parse_product(item, current_rank):
                    products.append(product)
                    current_rank += 1
            if logger:
                logger.log(f"[数据爬取] 完成, 共获取 {len(products)} 个产品")
            return products
        except Exception as e:
            if logger:
                logger.log(f"获取产品列表时发生错误: {str(e)}")
            return []

    @staticmethod
    def _setup_driver() -> webdriver.Chrome:
        """配置Selenium WebDriver"""
        chrome_options = Options()
        chrome_options.add_argument("--headless")
        chrome_options.add_argument("--window-size=1920,1080")
        chrome_options.add_argument("--disable-gpu")
        chrome_options.add_argument("--no-sandbox")
        driver = webdriver.Chrome(options=chrome_options)
        driver.set_page_load_timeout(30)
        driver.set_script_timeout(30)
        return driver

    @staticmethod
    def _scroll_page(driver: webdriver.Chrome) -> str:
        """滚动页面加载所有内容"""
        scroll_attempts, max_attempts = 0, 5
        last_height = driver.execute_script("return document.body.scrollHeight")
        
        while scroll_attempts < max_attempts:
            driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
            time.sleep(random.uniform(2, 4))
            new_height = driver.execute_script("return document.body.scrollHeight")
            scroll_attempts = 0 if new_height != last_height else scroll_attempts + 1
            last_height = new_height
        
        time.sleep(2)
        return driver.page_source

    @staticmethod
    def _parse_product(item, idx: int) -> Dict:
        """解析单个产品信息"""
        try:
            data_test = item.get('data-test', '')
            product_url = f"https://www.producthunt.com/r/p/{data_test.split('-')[2]}" if data_test and len(data_test.split('-')) > 2 else ""
            
            first_a = item.select_one('a[href]')
            media_src = DataFetcher._extract_media_src(first_a) if first_a else ""
            
            a_tags = item.select('a[class]')
            name = a_tags[0].text.strip() if len(a_tags) > 0 else ""
            if name == "Promoted":
                return None
                
            buttons = item.select('button')
            chat_count = buttons[0].select_one('div div').text.strip() if len(buttons) > 0 else ""
            upvote_count = buttons[1].select_one('div div').text.strip() if len(buttons) > 1 else ""
            
            return {
                "product_id": idx,
                "product_name_logo_src": media_src,
                "product_name": name,
                "product_desc": a_tags[1].text.strip() if len(a_tags) > 1 else "",
                "product_tag1": a_tags[2].text.strip() if len(a_tags) > 2 else "",
                "product_tag2": a_tags[3].text.strip() if len(a_tags) > 3 else "",
                "product_tag3": a_tags[4].text.strip() if len(a_tags) > 4 else "",
                "product_url": product_url,
                "product_chat_count": chat_count,
                "product_upvote_count": upvote_count,
                "product_rank": idx
            }
        except Exception:
            return None

    @staticmethod
    def _extract_media_src(element) -> str:
        """提取媒体资源URL"""
        img = element.select_one('img')
        if img and 'src' in img.attrs:
            return img['src']
        video = element.select_one('video source')
        return video['src'] if video and 'src' in video.attrs else ""

class TranslationService:
    """负责翻译服务模块"""

    def __init__(self, secret_id: str, secret_key: str, logger: 'Logger' = None):
        self.secret_id = secret_id
        self.secret_key = secret_key
        self.logger = logger

    def batch_translate(self, products: List[Dict]) -> List[Dict]:
        """批量翻译产品描述"""
        if not products:
            if self.logger:
                self.logger.log("[翻译服务] 跳过: 无产品数据需要翻译")
            return products

        if self.logger:
            self.logger.log(f"[翻译服务] 开始翻译 {len(products)} 条产品描述")

        descriptions = [p["product_desc"] for p in products if p.get("product_desc")]
        if not descriptions:
            if self.logger:
                self.logger.log("[翻译服务] 跳过: 所有产品描述均为空")
            return products

        try:
            cred = credential.Credential(self.secret_id, self.secret_key)
            http_profile = HttpProfile()
            http_profile.endpoint = "tmt.tencentcloudapi.com"
            client = tmt_client.TmtClient(cred, "ap-beijing", ClientProfile(httpProfile=http_profile))
            req = models.TextTranslateBatchRequest()
            req.Source = "en"
            req.Target = "zh"
            req.ProjectId = 0
            req.SourceTextList = descriptions
            resp = client.TextTranslateBatch(req)
            translated_idx = 0
            for product in products:
                if product.get("product_desc"):
                    product["product_cn_desc"] = resp.TargetTextList[translated_idx] if translated_idx < len(resp.TargetTextList) else ""
                    translated_idx += 1
            if self.logger:
                self.logger.log(f"[翻译服务] 完成, 成功翻译 {len(resp.TargetTextList)}/{len(descriptions)} 条")
            return products
        except Exception as e:
            if self.logger:
                self.logger.log(f"[翻译服务] 异常: {str(e)}")
                self.logger.log(traceback.format_exc())
            for product in products:
                product["product_cn_desc"] = ""
            return products

class DataExporter:
    """负责数据导出模块"""

    @staticmethod
    def save_to_json(data: List[Dict], date: str, output_dir: str, logger: 'Logger' = None) -> str:
        """保存数据到JSON文件"""
        filename = f"producthunt_{date}.json"
        filepath = os.path.join(output_dir, filename)
        if logger:
            logger.log(f"[数据导出] 正在保存数据到 {filepath}")
        with open(filepath, 'w', encoding='utf-8') as f:
            json.dump(data, f, indent=2, ensure_ascii=False)
        if logger:
            logger.log("[数据导出] 完成")
        return filepath

    @staticmethod
    def save_to_mysql(data: List[Dict], logger: Logger = None):
        import pymysql
        conn = pymysql.connect(
            host='localhost',
            user='root',
            password='root@200',
            database='database_ai_product',
            charset='utf8mb4'
        )
        cursor = conn.cursor()
        insert_sql = """
        INSERT INTO ai_products (
            product_source, product_name_logo_src, product_name, product_desc, product_tag1,
            product_tag2, product_tag3, product_url, product_chat_count, product_upvote_count, product_rank, product_cn_desc, product_rank_date,product_rank_type
        ) VALUES (
            'ProductHunt', %(product_name_logo_src)s, %(product_name)s, %(product_desc)s, %(product_tag1)s,
            %(product_tag2)s, %(product_tag3)s, %(product_url)s, %(product_chat_count)s, %(product_upvote_count)s, %(product_rank)s, %(product_cn_desc)s, %(date)s,'DAY'
        )
        """
        for item in data:
            required_keys = [
                "product_name_logo_src", "product_name", "product_desc", "product_tag1",
                "product_tag2", "product_tag3", "product_url", "product_chat_count",
                "product_upvote_count", "product_rank", "product_cn_desc", "date"
            ]
            int_keys = ["product_chat_count", "product_upvote_count", "product_rank"]
            for key in required_keys:
                if key not in item:
                    item[key] = None if key in int_keys else ""
                if key in int_keys:
                    if item.get(key) not in (None, ""):
                        try:
                            item[key] = int(item[key])
                        except Exception:
                            item[key] = None
        cursor.executemany(insert_sql, data)
        conn.commit()
        cursor.close()
        conn.close()
        if logger:
            logger.log(f"[数据导出] 数据已写入MySQL数据库, 共{len(data)}条")
        return "MySQL数据库"


def main():
    """独立运行时的主函数"""
    logger = Logger()
    try:
        secret_id = os.getenv('TENCENT_SECRET_ID', 'REDACTED')
        secret_key = os.getenv('TENCENT_SECRET_KEY', 'REDACTED')
        crawler = ProductHunterCrawler(secret_id, secret_key, logger)
        logger.log("=== 爬虫主流程启动 ===")
        date_input = input("请输入要爬取的日期(格式: YYYY-MM-DD，留空使用当天)或日期范围(格式: YYYY-MM-DD:YYYY-MM-DD): ")
        if not date_input:
            date = datetime.now().strftime("%Y-%m-%d")
            filepath = crawler.run(date)
        elif ":" in date_input:
            start_date, end_date = date_input.split(":")
            filepaths = crawler.run_range(start_date.strip(), end_date.strip())
            if filepaths:
                logger.log("=== 爬取完成 ===")
                logger.log(f"数据已保存到以下文件: {filepaths}")
        else:
            filepath = crawler.run(date_input)
        if 'filepath' in locals() and filepath:
            logger.log("=== 爬取完成 ===")
            logger.log(f"数据已保存到: {filepath}")
    except Exception as e:
        logger.log(f"[主流程异常] {str(e)}")
        logger.log(traceback.format_exc())
    finally:
        logger.close()

if __name__ == "__main__":
    main()