import os
from time import sleep

from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from tool.debug_util import debug_add_html_in_page
from lxml import etree
from tool.exception import DyExecption
import json


# 练习selenium使用，打开网页，等待，获取元素，执行js
# 自动操作浏览器，想获取一个值，但因为页面原因，滚动那个元素也不出现
# 执行js，为了页面显示有进度，又在页面添加元素
# browser = webdriver.Chrome('F:\packages\chrome-driver\chromedriver')
driver = webdriver.Chrome()
driver.get('https://www.douyin.com/?recommend=1')
driver.maximize_window()

wait_time = 600

# E:\dy_spider_dir 得到得json数据文件
def write_comment_to_files():
    comments = driver.execute_script('''
        comments=[]
        document.querySelectorAll('#merge-all-comment-container > div > div.comment-mainContent > div').forEach(node => comments.push(node.innerHTML))
        return comments
    ''')
    print(comments)
    with open("e:\\dy_spider_dir\\t1.json", 'w', encoding="utf-8") as f:
        f.write(json.dumps(comments))
    i = 0
    for comment in comments:
        # python ++i不是自增，+是正号，++i\--i值都是i
        i += 1
        with open(os.path.join("e:\\dy_spider_dir", "result" + str(i) + ".txt"), 'w', encoding='utf-8') as f:
            f.write(json.dumps(comment))


try:
    # source = driver.page_source  # 通过driver。page_source这个技术可以直接获得源代码不是通过检查获得的那种简陋的源代码，而是通过AJKS技术获得的源代码
    print(f"请在{wait_time}秒内扫描二维码，进入账号！")


    login_pannel_selector = '.login-pannel__header-title'
    WebDriverWait(driver=driver, timeout=3).until(
        EC.presence_of_element_located((By.CSS_SELECTOR, login_pannel_selector))
    )
    login_pannel = driver.find_element_by_css_selector(login_pannel_selector)

    # debug_add_html_in_page(driver, f"请在{wait_time}秒内扫描二维码，进入账号！")
    # 页面添加友好字符提示登录
    login_tip = f'''
            <div style="font-size: 18px;color: red;">请在{wait_time}秒内扫描二维码，进入账号！</div>
    '''
    login_tip = login_tip.replace('\n', '').replace('\r', '')
    js1 = f'''
        login_pannel = document.querySelector('.login-pannel__header-title')
        originHtml = login_pannel.innerHTML
        login_pannel.innerHTML = originHtml + '{login_tip}'
        login_pannel.style.height = '80px'
    '''
    driver.execute_script(js1)


    # 通过cookie sso_uid_tt 判断是否登录
    print('first get sso_uid_tt,', driver.get_cookie("sso_uid_tt"))

    login_flag = False
    for i in range(wait_time):
        sso_uid_tt = driver.get_cookie("sso_uid_tt")
        if (sso_uid_tt is not None):
            print(f'sso_uid_tt={sso_uid_tt}')
            login_flag = True
            break
        sleep(1)

    if not login_flag:
        raise DyExecption(f"未在{wait_time}秒内登录，请重新运行程序")

    write_comment_to_files()


    # 判断页面用户名不太可行。直接获取到了，click异常
    # selector = 'a[href="//www.douyin.com/user/self"]'
    # WebDriverWait(driver=driver, timeout=wait_time).until(
    #     EC.presence_of_element_located((By.CSS_SELECTOR, selector))
    # )
    # a = driver.find_element_by_css_selector(selector)

    sleep(2)
    print(driver.current_url)


    debug_add_html_in_page(driver, '执行到这里来了')

    # comment_container_sel = "#merge-all-comment-container > div > div.comment-mainContent"
    # comments_element = driver.find_element_by_css_selector(comment_container_sel)

    comments_element = driver.find_element_by_xpath('//*[@id="merge-all-comment-container"]/div/div[contains(@class,"comment-mainContent")]/div')
    comments_element = driver.find_element_by_xpath('//*[@id="merge-all-comment-container"]/div/div[contains(@class,"comment-mainContent")]/div[1]')
    comments_element = driver.find_element_by_xpath('//*[@id="merge-all-comment-container"]/div/div[contains(@class,"comment-mainContent")]/div[2]')

    # todo js 和 selenium交互数据; 解析xml功力还需提升
    write_comment_to_files()

    # source = driver.page_source
    # html = etree.HTML(source)
    # links = html.xpath("//a[@class='position_link']/@href")
    source = driver.page_source
    html = etree.HTML(source)
    # todo xpath //*[@id="merge-all-comment-container"]/div/div[3]/div[1] //*[@id="merge-all-comment-container"]/div/div[3]/div[2]
    comments1 = html.cssselect('#merge-all-comment-container > div > div.comment-mainContent')
    comment1 = comments1[0].cssselect('div')
    # print(html.xpath('//*[@id="merge-all-comment-container"]/div/div[@data-e2e="comment-list"]'))

    comments2 = html.xpath('//*[@id="merge-all-comment-container"]/div/div[contains(@class,"comment-mainContent")]')
    comment2 = comments2[0].xpath('//div')

    texts = html.xpath('//*[@id="merge-all-comment-container"]/div/div[contains(@class,"comment-mainContent")]/div/text()')

    # 以下不行
    # print(html.xpath('//*[@id="merge-all-comment-container"]/div/div[@class="comment-mainContent"]'))


    print("end")
    sleep(200000)

except Exception as e:
    print(f"exception 1 = {e}")
    raise e

finally:
    print('finally')


# input.send_keys(Keys.ENTER)
# print(driver.current_url)
# print(browser.get_cookies())
# print(browser.page_source)


