from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from bs4 import BeautifulSoup
import time

driver  = webdriver.Firefox(executable_path='C:\Program Files (x86)\Mozilla Firefox\geckodriver.exe')
# driver.get('https://m.dianping.com/shoplist/5/d/1/c/10/s/s_-1?from=m_nav_1_meishi')
driver.get('https://m.dianping.com/shoplist/846/d/1/c/10/s/s_-1?from=m_nav_1_meishi')
# assert "wd" in driver.title
# elem = driver.find_element_by_name('wd')
# elem = driver.find_element_by_name('')
# elem.send_keys('pycon')
# elem.send_keys(Keys.ARROW_DOWN)
# driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
# print(driver.page_source)

count = 0
#下滑到底部
while True:   #下滑到底部后还能继续加载数据，然后继续下滑。下滑后会覆盖前一页js生成的数据
# js="var q=document.documentElement.scrollTop=10000"
# driver.execute_script(js)
    count +=1

    soup = BeautifulSoup(driver.page_source,'lxml')
    shop_list = soup.find(class_="list-search")
    page_url = shop_list.find_all('li',class_='list-item border-bottom-new')

    # print(page_url)
    for url in page_url:
        url = url.find('a')
        url = url.get('href')
        url = 'http://m.dianping.com%s'%url
        print(url)
    time.sleep(3)
    driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")


