# -*- coding: utf-8 -*-
"""
Created on Thu Nov 25 08:05:59 2021

@author: lenovo
"""
#导入驱动webdriver
from selenium import webdriver
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.by import By
#休息睡眠
from time import sleep
#import requests
from lxml import etree
import xlwt
from flask import Flask#,render_template#,request
import datetime
app = Flask(__name__)
today=datetime.datetime.now()
today=today.strftime('%Y-%m-%d')

driver=webdriver.Chrome( r'D:\b\chromedriver')
@app.route('/',methods=['POST'])
def keywords():
    #打开浏览器
    #driver=webdriver.Chrome( r'G:\chromedriver')
#url = input('输入网址：'),打开网页
#app = Flask(__name__)

#url目标
    url = 'https://www.jd.com/'
#打开网页@app.route(‘/’)指定url与python函数的映射关系，也简称路由

    driver.get(url)
# 停顿
    wait = WebDriverWait(driver, 10)#@app.route(‘/’)指定url与python函数的映射关系，也简称路由

#keywords  = input('输入搜索关键字：')
    keywords = '华为手机'
    
    
        #定位搜索框,加载出元素
    wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, '#key'))).send_keys(keywords)
#点击搜索(click)
    wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, '#search > div > div.form > button'))).click()
    sleep(2)
#等待加载出底部页面信息

    wait.until(
        EC.text_to_be_present_in_element((By.CSS_SELECTOR, '#J_bottomPage > span.p-num > a.curr'), '1')
        )
    sleep(2)
    return keywords

@app.route('/')
def index():
    # 创建一个xls文件
    #import xlwt
    file = xlwt.Workbook()
    # 创建一个表名称
    sheet = file.add_sheet('京东商品信息')
# 创建表头
    sheet.write(0, 0, '商品')
    sheet.write(0, 2, '价格')
    sheet.write(0, 1, '店铺')
    sheet.write(0, 3, '评论数')
    # 设置列宽
    sheet.col(0).width = 45000
    sheet.col(1).width = 5000
    

     #from lxml import etree

    # 创建列表
    b=[]

#爬取两页
    page=2
    i=1
# 循环解析所有页面
    while i<=page:
        #实现鼠标滚动下拉功能
        # 执行js代码 
        # 执行js的脚本。
        js='document.documentElement.scrollTop=10100'
    driver.execute_script(js)
    # 暂停4秒
    sleep(4)
    # 获取网页源码
    html=driver.page_source

    # 解析网页 开始提取信息
    tree=etree.HTML(html)
    li_list=tree.xpath('//*[@id="J_goodsList"]/ul/li')
    for li in li_list:
        price=li.xpath('./div/div[@class="p-price"]/strong/i/text()')[0]
        price=float(price)
        name=li.xpath('./div/div[@class="p-name p-name-type-2"]/a/em//text()')
        shop=li.xpath('./div/div[@class="p-shop"]/span/a/text()')
        comment=li.xpath('./div/div[@class="p-commit"]/strong/a/text()')
        # 分析网页数据删除空格符换行符
        name=''.join(name).replace('\t\n','')
        # 将数据存入字典中
        dic={
            '价格':price,
            '商品':name,
            '店铺':shop,
            '评论数':comment
        }
        # 将字典附加到数据列表中
        b.append(dic)

    # selenium模拟点击获取下一页
    driver.find_element_by_class_name('pn-next').click()
    sleep(2)
    i += 1

# 执行完成循环退出浏览器
    driver.quit()
    # 将数据保存
    for i in range(len(b)):
        sheet.write(i+1,0,b[i]['商品'])
        sheet.write(i+1,2,b[i]['价格'])
        sheet.write(i+1,1, b[i]['商品'])
        sheet.write(i+1,3, b[i]['评论数'])
        # 保存文件
        file.save('商品爬取.xls')
        
        return file
if __name__=="__main__":
  app.run()    






