from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import time
from lxml import etree
browser = webdriver.Chrome()
# 设置浏览器窗口大小
browser.set_window_size(1400,700)
# 设置隐式等待
wait = WebDriverWait(browser,5)

def get_page(page):
    if page == 1:
        url = 'https://www.jd.com/'
        browser.get(url)
        # 一直等到页面元素可以定位为止，但是要在wait时间之内
        input = wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, '#key')))
        # 清楚输入框的内容
        input.clear()
        input.send_keys('高达')

        # 点击对象用EC.element_to_be_clickable
        search_button = wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, 'div#search button.button')))
        search_button.click()
    time.sleep(3)

    # 模拟鼠标的滚动，让图片加载,执行原生js代码,
    # scrollTo(0,scrollHeight) 第一个参数是横向滚动的坐标点，第二个是纵向

    # 先滚动到最底部
    str_js = 'var scrollHeight = document.body.scrollHeight;window.scrollTo(0,scrollHeight);'
    browser.execute_script(str_js)

    # 增加滚动次数
    for i in range(16, 0, -1):
        str_js = 'var scrollHeight = document.body.scrollHeight/16;window.scrollTo(0,scrollHeight * %d)' % i
        browser.execute_script(str_js)
        time.sleep(2)
    # 保存当前也内容
    html = browser.page_source
    # 输入页码，跳转到下一页
    page_input = wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, '#J_bottomPage input.input-txt')))
    # 节点位置{'x': 1169, 'y': 4518}
    # print(page_input.location)
    str_js = 'window.scrollTo(0, %d);' % page_input.location['y']
    browser.execute_script(str_js)

  # 点击下一页
    next_button = wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, '#J_bottomPage a.pn-next')))
    next_button.click()

    time.sleep(3)





    #返回页面的内容
    return html

# 解析网页
def parse_page(html):
    etree_html = etree.HTML(html)
    li_list = etree_html.xpath('//div[@id="J_goodsList"]/ul/li')
    # print(len(li_list))
    titles = []
    for li in li_list:
        title = li.xpath('.//div[@class="p-name p-name-type-2"]/a/em//text()')
        title = ''.join(title)
        titles.append(title)
        #获取图片
        img_url = li.xpath('.//div[@class="p-img"]/a/img/@src')
        print(img_url)

        print(titles)
        print(len(titles))






def main():
    for page in range(1,101):
        print(page)
        print('*'*20)
        html = get_page(page)
        parse_page(html)

if __name__ == '__main__':
    main()