from selenium import webdriver
from selenium.webdriver import ChromeOptions
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support import expected_conditions as EC
import json
import time

def obtain_news(driver: webdriver.Chrome, url):
    driver.get(url)
    wait = WebDriverWait(driver, 10)
    time.sleep(6)
    action = ActionChains(driver)
    articles = driver.find_elements(By.CSS_SELECTOR, 'article')
    num_articles = driver.find_element(By.XPATH, '/html/body/div[1]/div/div/div[2]/main/div/div/div/div[1]/div/div[1]/div[1]/div/div/div/div/div/div[2]/div/div').text.split(' ')[0]
    if ',' in num_articles:
        num_articles = int(num_articles.replace(",", ""))
    elif 'K' in num_articles:
        num_articles = int(float(num_articles.replace("K", "")) * 1000)
    num_articles = int(num_articles)
    aim_num = 100
    if aim_num > num_articles:
        aim_num = num_articles
    count = 0
    index = 0
    try:
        visited_articles_id = json.load(open('visited_persona_ID.json', 'r', encoding="utf-8"))
    except:
        visited_articles_id = []
    
    try:
        output = json.load(open('Follow_blogs.json', 'r', encoding="utf-8"))
    except:
        output = {}
    # driver.save_screenshot("test.png")
    blogger_name = url.split('/')[-1]
    
    if blogger_name not in output.keys():
        output[blogger_name] = {"UserDescription": "", "following": 0, "followes": 0, "articles": []}
        try:
            user_desp = driver.find_element(By.CSS_SELECTOR, 'div[data-testid="UserDescription"]').text
        except:
            user_desp = None
        output[blogger_name]["UserDescription"] = user_desp
        following = driver.find_element(By.CSS_SELECTOR, f'a[href="/{blogger_name}/following"]').text
        followers = driver.find_element(By.CSS_SELECTOR, f'a[href="/{blogger_name}/verified_followers"]').text
        output[blogger_name]["following"] = following
        output[blogger_name]["followers"] = followers
        time.sleep(7)
    index = 0
    while count < aim_num:
        articles = driver.find_elements(By.CSS_SELECTOR, 'article')
        try:
            article = articles[index]
        except:
            article = articles[-1]
        # driver.save_screenshot("test1.png")
        element = article.find_element(By.XPATH, './div/div/div[2]/div[1]')
        driver.execute_script("arguments[0].scrollIntoView({behavior: 'smooth', block: 'center'});", element)
        action.move_to_element(element).perform() 
        element.click()
        time.sleep(4)
        # driver.save_screenshot("test2.png")
        article_id = driver.current_url.split('/')[-1]
        if article_id in visited_articles_id:
            driver.back()
            index += 1
            continue
        else:
            visited_articles_id.append(article_id)
            wait.until(EC.presence_of_all_elements_located((By.CSS_SELECTOR, 'article')))
            time.sleep(5)
            retweets = driver.find_elements(By.CSS_SELECTOR, 'article')
            # driver.save_screenshot("test3.png")
            try:
                tweet_text = retweets[0].find_element(By.CSS_SELECTOR, 'div[data-testid="tweetText"]').text
            except:
                tweet_text = None
            try:
                time_line = retweets[0].find_element(By.XPATH, "./div/div/div[3]/div[4]/div/div[1]/div").text
            except:
                time_line = None
            
            if time_line != None and 'Mar' in time_line:
                driver.back()
                break

            output[blogger_name]['articles'].append({'text': tweet_text, "retweets": [], "time": time_line})
            retweets = retweets[1:15]
            for retweet in retweets:
                try:
                    retweet_username = retweet.find_element(By.CSS_SELECTOR, 'div[data-testid="User-Name"]').text
                    retweet_text = retweet.find_element(By.CSS_SELECTOR, 'div[data-testid="tweetText"]').text
                    output[blogger_name]['articles'][-1]["retweets"].append({'username': retweet_username, 'text': retweet_text})
                    # print(f'Username: {retweet_username}\nText: {retweet_text}\n')
                except:
                    break
            count += 1
            driver.back()
            
        time.sleep(5)
        # driver.save_screenshot("test5.png")
        json.dump(output, open('Follow_blogs.json', 'w', encoding="utf-8"), indent=4, ensure_ascii=False)
        json.dump(visited_articles_id, open('visited_persona_ID.json', 'w', encoding="utf-8"), indent=4, ensure_ascii=False)
        
        
    
    
    
if __name__ == "__main__":
    chrome_options = ChromeOptions()
    chrome_options.add_argument('--headless')
    chrome_options.add_argument('--no-sandbox')
    chrome_options.add_argument('--disable-dev-shm-usage')
    chrome_options.add_argument('--window-size=1920,1080')
    chrome_options.add_argument('--proxy-server=http://127.0.0.1:7890')
    # chrome_options.add_argument('start-maximized')
    chrome_options.add_argument("--disable-extensions")
    chrome_options.add_argument('--disable-browser-side-navigation')
    chrome_options.add_argument('enable-automation')
    chrome_options.add_argument('--disable-infobars')
    # chrome_options.add_argument('enable-features=NetworkServiceInProcess')
    driver = webdriver.Chrome(options=chrome_options)
    cookies = json.load(open('exported-cookies.json', 'r', encoding="utf-8"))
    driver.get("https://x.com")
    time.sleep(5)
    for cookie in cookies:
        if 'expiry' in cookie:
            cookie['expiry'] = int(cookie['expiry'])
        if cookie['sameSite'] not in ['None', 'Lax']:
            cookie['sameSite'] = 'None'
        driver.add_cookie(cookie)
    # driver.get("https://twitter.com")
    time.sleep(1)
    driver.refresh()
    # print(driver.page_source)
    url_list = [
        "https://x.com/OpenAI",
        "https://x.com/sama",
        "https://x.com/elonmusk"
        # POTUS
        #"ScienceNews"
        #"NBA"
        #"SecBlinken"
        #"esrtweet"
    ]


    for u in url_list:
        obtain_news(driver, u)
    