from playwright.sync_api import sync_playwright
from tech_article_service import TechArticleService


from dotenv import load_dotenv
load_dotenv()
techArticleService = TechArticleService()

def scrape_techcrunch_ai():
    with sync_playwright() as p:
        browser = p.chromium.launch(headless=False,
                                    proxy={"server": "http://127.0.0.1:10802"}
                                    )
        page = browser.new_page(
            user_agent="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36",
            viewport={"width": 1280, "height": 800},
            locale="en-US"
        )
        page.add_init_script("""
            Object.defineProperty(navigator, 'webdriver', {
                get: () => undefined
            })
            """)
        url = "https://techcrunch.com/category/artificial-intelligence/"
        page.goto(url, wait_until="domcontentloaded")

        page.wait_for_selector('li.wp-block-post', timeout=60000)

        articles = page.query_selector_all('li.wp-block-post')
        print(f"共找到 {len(articles)} 篇文章。")

        results = []

        for article in articles:
            meta_time_ele = article.query_selector('time.loop-card__time')
            meta_time = meta_time_ele.get_attribute('datetime') if meta_time_ele else ""
            title = article.query_selector('h3.loop-card__title')
            link = title.query_selector('a').get_attribute('href')
            text = title.inner_text().strip()
            if any(r['title'] == text for r in results):
                continue
            results.append({
                "title": text,
                "link": link,
                "meta_time": meta_time,
                "is_trending": meta_time == ""
            })
        titles = [result['title'] for result in results]
        non_exist_titles = techArticleService.get_articles_not_titles(titles)
        print(f"共找到 {len(non_exist_titles)} 篇新文章。")
        content_result = []
        for result in results:
            if result['title'] not in non_exist_titles:
                continue
            content = get_content(page, result['link'])
            result['content'] = content
            content_result.append(result)
            
        browser.close()
        return content_result

def get_content(page, url):
    page.goto(url, wait_until="domcontentloaded")
    page.wait_for_selector('div.wp-block-post-content')
    content = page.query_selector('div.wp-block-post-content').inner_text()
    print(f"获取到文章内容：{len(content)} from: {url}")
    return content

if __name__ == "__main__":
    article_list = scrape_techcrunch_ai()
    # article_list = [{"title": "test", "link": "https://techcrunch.com/", "content": "test123", "meta_time": "2021-09-01T14:00:00Z"}]    
    article_list = [{"content": item} for item in article_list]
    techArticleService.upsert_article(article_list)
    print("done")
