# -*- coding: utf-8 -*-

import tkinter as tk
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from openpyxl import workbook
from lxml import etree
import time
from bs4 import BeautifulSoup


class LingYing_Spider(object):
    # def __init__(self):
    #     # GUI实例化，创建根窗口
    #     self.window = tk.Tk()
    #     # 根窗口添加标题
    #     self.window.title('领英自动化')
    #     # 根窗口固定大小
    #     self.window.geometry('800x600')
    #     # 嵌套一个提示窗口
    #     self.text_hint_1 = tk.Label(self.window, text='请输入查询的公司', bg='green', fg='white', font=12)
    #     self.text_hint_1.pack()
    #     # 嵌套一个输入窗口
    #     self.text_input = tk.Entry(self.window, font=12, show=None)
    #     self.text_input.pack()
    #     # 嵌套一个运行函数的按钮
    #     self.button = tk.Button(self.window, text='点击运行', font=12, bg='red', fg='white', command=self.login_html)
    #     self.button.pack()
    #     # 运行提示
    #     # self.text_hint_2 = tk.Text(self.window, width=85, height=20, font=12)
    #     # self.text_hint_2.pack()
    #     # 根窗口循环
    #     self.window.mainloop()
    #     # 创建一个数据表
    #     self.excel_data = workbook.Workbook()
    #     # 数据表的一个激活
    #     self.Excel_Save = self.excel_data.active
    #     # 添加一下数据表的表头
    #     self.Excel_Save.append(['发布内容', '内容图片url', '点赞数', '转发数', '评论内容'])

    def login_html(self):
        # 创建一个变量接受输入窗口的值
        # str_company = self.text_input.get()
        # 实例化自动化对象
        driver = webdriver.Chrome()
        driver.get('https://www.linkedin.com/authwall?trk=ripf&trkInfo=AQED7qDsuPIMsgAAAX-5l4-AxlGf3Dw5x9hSclATswrbbkcwivzM6S2zWl1_IPtflN3Kiig76mHqmspc1Yv4y56TvYxw37LLA3VzFKg3hGMRIUn4oYXeAxBzy-JRuLwt3C40DXk=&originalReferer=&sessionRedirect=https%3A%2F%2Fwww.linkedin.com%2Fcompany%2Fwhite-lodging%2Fposts%2F%3FfeedView%3Dall')
        wait = WebDriverWait(driver, 100)
        # 点击登录按钮
        driver.find_element(By.XPATH, '//*[@id="main-content"]/div/form/p/button').click()
        # 输入账号
        session_key = driver.find_element(By.XPATH, '//*[@id="session_key"]')
        session_key.send_keys('wphdsb@163.com')
        # 输入密码
        session_password = driver.find_element(By.XPATH, '//*[@id="session_password"]')
        session_password.send_keys('19940820WPH')
        time.sleep(10)
        # 点击登录
        driver.find_element(By.XPATH, '//*[@id="main-content"]/div/div/div/form/button').click()
        time.sleep(10)
        # 点击搜索
        driver.find_element(By.XPATH, '//*[@id="global-nav-typeahead"]/input').click()
        driver.find_element(By.XPATH, '//*[@id="global-nav-typeahead"]/input').send_keys('White Lodging')
        driver.find_element(By.XPATH, '//*[@id="global-nav-typeahead"]/input').send_keys(Keys.ENTER)
        print('点击搜索了进入搜索界面')
        time.sleep(4)
        # 点击进入公司主页
        driver.find_element(By.XPATH, '//*[@id="main"]/div/div/div[1]/div/a/div/div[2]/div[2]/a/div/span').click()
        print('点击进入公司主页')
        time.sleep(6)
        print('开始准备点击posts')
        time.sleep(5)
        # 点击posts
        driver.find_element(By.XPATH, '//div[@class="mb4"]//ul[contains(@class, "org-page-navigation__items")]/li[3]').click()
        print('点击完成')
        # 为判断页面下拉距离设置一个初始值
        check_height = 0
        # 进入循环
        while True:
            # 页面无限下拉
            js = 'window.scrollTo(0,document.body.scrollHeight)'
            # 运行js代码
            driver.execute_script(js)
            # 沉睡，让滚动条有所反应
            time.sleep(3)
            # 测试出滚动条距离页面顶端的距离
            finally_height = driver.execute_script("return document.documentElement.scrollHeight || document.body.scrollHeight;")
            # 开始进行判断
            if check_height == finally_height:
                # 如果页面下拉距离与初始值相等，即退出下拉循环
                break
            else:
                # 否则，将下拉值给初始值，为下一次下拉循环做准备
                check_height = finally_height
        self.parse_html(driver)

    def parse_html(self, driver):
        # for small_div in driver.find_elements(By.XPATH, '//div[@class="scaffold-finite-scroll__content"]/div'):
            # WebDriverWait(driver, 20).until(EC.element_to_be_clickable((By.XPATH, '//div[@class="social-details-social-activity update-v2-social-activity"]//ul/li[contains(@class, "social-details-social-counts__item social-details-social-counts__comments")]/button/span')))
            # print('开始准备点击')
            # element = small_div.find_element(By.XPATH, '//div[@class="social-details-social-activity update-v2-social-activity"]//ul/li[contains(@class, "social-details-social-counts__item social-details-social-counts__comments")]')
            # # driver.execute_script("arguments[0].click();", element)
            # print(small_div)
            # print(element)
            # print('点击完成' + f'{i}')
            # text_1 = small_div.find_element(By.XPATH, '//div[@class="feed-shared-update-v2__description-wrapper"]//span[@dir="ltr"]')
            # print(text_1.text)
            # print(small_div)
            # html_data = small_div.get_attribute("innerHTML")
            # xml_data = etree.HTML(html_data)
            # text_data = xml_data.xpath('//div[@class="feed-shared-update-v2__description-wrapper"]//span[@dir="ltr"]/text()')
            # print(text_data)
            # time.sleep(0.5)
        time.sleep(60)
        html_data = driver.page_source
        commment_html = driver.find_element(By.XPATH,'//span[@class="comment feed-shared-social-action-bar__action-button"]/span[@class="artdeco-hoverable-trigger artdeco-hoverable-trigger--content-placed-top artdeco-hoverable-trigger--is-hoverable ember-view"]/div[1]')
        commment_html.click()
        time.sleep(60)
        # print(html_data)
        # 所有div//*[@id="main"]/div[2]/div/div[2]/div[3]/div/div[1]/div[84]
        # soup = BeautifulSoup(html_data, 'lxml')
        # comment
        xml_data = etree.HTML(html_data)
        comment = xml_data.xpath('//*[@id="ember886"]/div[3]/div/div[2]/button/span[1]/text()')
        print(comment)
        # print(soup)
        # 标题
        # title = soup.find_all('span', {'dir': "ltr"})
        # print(title)
        # for i in title:
        #     print(i.get_text())
        #     print(len(i))

        # text_data = xml_data.xpath('//div[@class="feed-shared-update-v2__description-wrapper"]//span[@dir="ltr"]')
        # print(text_data)

    # def save_excel(self,):
    #     self.Excel_Save.append([])
    #     self.excel_data.save('数据.xlsx')
    #     self.excel_data.close()




if __name__ == '__main__':
    a = LingYing_Spider()
    a.login_html()
