from selenium import webdriver
from selenium.webdriver import ChromeOptions
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
import json
import os
import time

def obtain_news(driver: webdriver.Chrome, type="news"):
    if type == "news":
        driver.get("https://twitter.com/explore/tabs/news")
    elif type == "trending":
        driver.get("https://twitter.com/explore/tabs/for_you")
    wait = WebDriverWait(driver, 10)
    time.sleep(4)
    action = webdriver.ActionChains(driver)
    wait.until(EC.presence_of_all_elements_located((By.CSS_SELECTOR, 'div[aria-label="Timeline: Explore"]')))
    news = driver.find_elements(By.CSS_SELECTOR, 'div[aria-label="Timeline: Explore"] div[data-testid="cellInnerDiv"]')
    num_of_news = len(news)
    num_article_per_news = 50
    try:
        output = json.load(open('trending.json', 'r', encoding="utf-8"))
    except:
        output = {}
    already_visited_news_topic = list(output.keys())
    #driver.save_screenshot("test1.png")
    for i in range(1, num_of_news-1):
        driver.get("https://twitter.com/explore/tabs/for_you")
        time.sleep(4)
        news = driver.find_elements(By.CSS_SELECTOR, 'div[aria-label="Timeline: Explore"] div[data-testid="cellInnerDiv"]')
        try:
            news_name = news[i].find_element(By.XPATH, './div/div/div/div/div[2]').text
        except:
            continue
        if news_name in already_visited_news_topic:
            continue
        else:
            already_visited_news_topic.append(news_name)
        driver.execute_script("arguments[0].scrollIntoView({behavior: 'smooth', block: 'center'});", news[i])
        news[i].click()
        time.sleep(3)
        #driver.save_screenshot("test.png")
        wait.until(EC.presence_of_all_elements_located((By.CSS_SELECTOR, 'article')))
        count = 0
        index = 0
        try:
            visited_articles_ID = json.load(open('visited_articles_ID.json', 'r', encoding="utf-8"))
        except:
            visited_articles_ID = []
        output[news_name] = []
        #driver.save_screenshot("test2.png")
        while count < num_article_per_news:
            articles = driver.find_elements(By.CSS_SELECTOR, 'article')
            try:
                parent_div = articles[index].find_element(By.XPATH, "./div/div/div[2]/div[1]")
            except:
                driver.back()
                break
            # .find_element(By.XPATH, "../..")
            # 进入文章界面
            driver.execute_script("arguments[0].scrollIntoView({behavior: 'smooth', block: 'center'});", parent_div)
            action.move_to_element(parent_div).perform() 
            #driver.save_screenshot("test3.png")
            time.sleep(3)
            parent_div.click()
            #driver.save_screenshot("test4.png")
            article_id = driver.current_url.split('/')[-1]
            if article_id in visited_articles_ID:
                driver.back()
                index += 1
                continue
            else:
                visited_articles_ID.append(article_id)
                wait.until(EC.presence_of_all_elements_located((By.CSS_SELECTOR, 'article')))
                time.sleep(3)
                # 进入到文章页面
                retweets = driver.find_elements(By.CSS_SELECTOR, 'article')
                tweets_username = retweets[0].find_element(By.CSS_SELECTOR, 'div[data-testid="User-Name"]').text
                tweet_text = retweets[0].find_element(By.CSS_SELECTOR, 'div[data-testid="tweetText"]').text
                output[news_name].append({'username': tweets_username, 'text': tweet_text, "retweets": []})
                retweets = retweets[1:]
                for retweet in retweets:
                    try:
                        retweet_username = retweet.find_element(By.CSS_SELECTOR, 'div[data-testid="User-Name"]').text
                        retweet_text = retweet.find_element(By.CSS_SELECTOR, 'div[data-testid="tweetText"]').text
                        output[news_name][-1]["retweets"].append({'username': retweet_username, 'text': retweet_text})
                        print(f'Username: {retweet_username}\nText: {retweet_text}\n')
                    except:
                        break
                count += 1
                index += 1
                driver.back()
            time.sleep(4)
            #driver.save_screenshot("test5.png")
            json.dump(output, open('trending.json', 'w', encoding="utf-8"), indent=4, ensure_ascii=False)
            json.dump(visited_articles_ID, open('visited_articles_ID.json', 'w', encoding="utf-8"), indent=4, ensure_ascii=False)
        driver.back()
        # print(driver.page_source)
        
if __name__ == "__main__":
    chrome_options = ChromeOptions()
    chrome_options.add_argument('--headless')
    chrome_options.add_argument('--no-sandbox')
    chrome_options.add_argument('--disable-dev-shm-usage')
    chrome_options.add_argument('--window-size=1920,1080')
    chrome_options.add_argument('--proxy-server=http://127.0.0.1:7890')
    # chrome_options.add_argument('start-maximized')
    chrome_options.add_argument("--disable-extensions")
    chrome_options.add_argument('--disable-browser-side-navigation')
    chrome_options.add_argument('enable-automation')
    chrome_options.add_argument('--disable-infobars')
    # chrome_options.add_argument('enable-features=NetworkServiceInProcess')
    driver = webdriver.Chrome(options=chrome_options)
    cookies = json.load(open('exported-cookies.json', 'r', encoding="utf-8"))
    driver.get("https://twitter.com")
    time.sleep(4)
    driver.save_screenshot("test.png")
    for cookie in cookies:
        if 'expiry' in cookie:
            cookie['expiry'] = int(cookie['expiry'])
        if cookie['sameSite'] not in ['None', 'Lax']:
            cookie['sameSite'] = 'None'
        driver.add_cookie(cookie)
    # driver.get("https://twitter.com")
    time.sleep(2)
    driver.refresh()
    # print(driver.page_source)
    obtain_news(driver, "trending")
    driver.close()



