#!/usr/bin/env python3
#coding=utf-8

import os
import time
import json
import hashlib

import requests
import urllib
from selenium import webdriver
import selenium.webdriver.support.ui as ui
from bs4 import BeautifulSoup
from lxml import etree

# 通过selenium获取页面数据
url = "https://www.ele.me/place/ws101hcw982?latitude=22.52721&longitude=113.95232"
#  url = "https://www.ele.me/place/ws10ftdd2bwh?latitude=22.662438&longitude=114.019587"
#  url = "https://www.ele.me/place/ws11rb0p1xqm?latitude=22.720968&longitude=114.246899"
#  龙岗汽车站
#  url = "https://www.ele.me/place/ws11rb0p1xqm?latitude=22.720968&longitude=114.246899"
#  宝安国际机场
#  url = "https://www.ele.me/place/ws0bvbm0zhng?latitude=22.63336&longitude=113.814549"
#  坂田
#  url = "https://www.ele.me/place/ws10g8wc61k3?latitude=22.634804&longitude=114.06942"
#  南山
#  url = "https://www.ele.me/place/ws100w18kd04?latitude=22.533013&longitude=113.930476"


phantomjs = "/home/cat/Downloads/phantomjs"
p = webdriver.PhantomJS(executable_path=phantomjs)
#  firefox = "/home/cat/Downloads/geckop"
#  f = webdriver.Firefox(executable_path=firefox)

RES = "/home/cat/wks/workProduct/naonao_db/res/"

STYPE_ID = PTYPE_ID = 1
SHOP_TYPES = PRODUCT_TYPES = []

def reqsetting():
    """
    首先构造请求头headers，url目前暂时保存根路径
    url请求是https://www.ele.me/place/ws11rb0p1xqm?latitude=22.720968&longitude=114.246899
    """
    weburl  =  "https://www.ele.me/place/"
    webheaders = {
        "Accept":"application/json, text/plain, */*",
        "Accept-Language":"zh-CN,zh;q = 0.8",
        "Connection":"keep-alive",
        "Cookie":"ubt_ssid=71dtj5kdlh7zi6k6e2hvfjwns05p1w18_2018-04-12; _utrace=cc72a49871b2886a50b0548cf885883d_2018-04-12",
        "Host":"mainsite-restapi.ele.me",
        "Origin":"https://www.ele.me",
        "Referer":"https://www.ele.me/home/",
        "User-Agent":"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.75 Safari/537.36"
    }
    req = urllib.request.Request(url = weburl,headers = webheaders)

    return req

def getUrl():
    f.get("https://www.ele.me/home/")
    f.find_element_by_class_name('ng-pristine ng-valid').send_keys('龙华')
    f.find_element_by_class_name('btn-stress').click()
    return f.current_url


def write2File(filename, strData):
    """
    把数据(转化为json格式)保存到文件中
    """
    with open(filename, 'a+') as fp:
        fp.write(json.dumps(strData, ensure_ascii=False))
        fp.write('\n')
        fp.flush()

def getHash(img):
    """
    将中文名字hash成唯一的标识符
    """
    h = hashlib.sha1(img.encode("utf-8"))
    return h.hexdigest()

def downloadImg(filePath, url):
    """
    下载图片
    """
    r = requests.get(url)
    time.sleep(1)
    with open(filePath, 'wb') as fp:
        for chunk in r:
            fp.write(chunk)

def getShopDetails(sName, url):
    """
    获取店铺的详细信息, 包括地址，电话，具体商品信息,商品分类
    url = /shop/1071594, 需要拼接https://www.ele.me/
    """
    p.get("https://www.ele.me" + url)
    tree = etree.HTML(p.page_source)
    products = []
    same_type_products = tree.xpath("//div[@class='shopmenu-list clearfix ng-scope']")
    for same_product in same_type_products:
        product_type = same_product.xpath("./h3/text()")
        #  商品类别增加判断
        if TYPE_ID == 1:
            PRODUCT_TYPES.append(product_type)
        if product_type not in PRODUCT_TYPES:
            TYPE_ID += 1
            PRODUCT_TYPES.append(product_type)

        productDatas = same_product.xpath("./div[@class='shopmenu-food ng-isolate-scope']")
        for product in productDatas:
            pName = product.xpath(".//div[@class='col-2 shopmenu-food-main']/h3/text()")[0]
            #  获得//fuss10.elemecdn.com/1/6e/601899ceaa01005f7b36dbce34d54
            #  jpeg.jpeg?imageMogr2/thumbnail/70x70, 需要添加头部https:
            pImg = product.xpath(".//img/@ng-src")
            #  print("pImg: %s" % ''.join(pImg))
            pAppImg = RES + "static/images/shop/%s/product/%s" % (getHash(sName), getHash(pName))
            #  判断文件夹是否存在，不存在则创建
            if not os.path.exists(os.path.dirname(pAppImg)):
                os.makedirs(os.path.dirname(pAppImg))
            #  判断文件是否存在，不存在则下载, 存在则继续判断下一个
            if not os.path.exists(pAppImg):
                downloadImg(pAppImg, "https:" + ''.join(pImg))
            else:
                print("    %s exists" % pName)
                continue
            pPrice = product.xpath(".//span[@class='col-3 shopmenu-food-price color-stress ng-binding']/text()")[0]
            #  打包数据
            item = dict(product=dict(name=pName, img=pAppImg, price=pPrice, ptype=TYPE_ID))
            print("%s: %d" % (item['product']['name'], item['product']['price']))
            products.append(item)


    pAddress = tree.xpath("//p[@itemprop='streetAddress']/span[@class='ng-binding']/text()")

    return products, ''.join(pAddress), PRODUCT_TYPES

def parse():
    """
    解析页面
    """
    p.get(url)
    time.sleep(5)
    tree = etree.HTML(p.page_source)
    shops = []
    shopPages = tree.xpath("//div[@class='']")
    shopDatas = shopPage.xpath('//div[@class="place-rstbox clearfix"]/a')
    #  解析第一层页面
    for shop in shopDatas:
        sName = shop.xpath(".//div[@class='rstblock-title']/text()")[0]
        sUrl = shop.get('href')
        print("sUrl: %s, shopname: %s" % (''.join(sUrl), sName))
        sLogo = shop.xpath(".//img/@src")
        #  获得//fuss10.elemecdn.com/1/6e/601899ceaa01005f7b36dbce34d54
        #  jpeg.jpeg?imageMogr2/thumbnail/70x70, 需要添加头部https:
        shopLogoPath = RES + "static/images/shop/%s/%s" % (getHash(sName), getHash(sName))
        #  判断文件夹是否存在，不存在创建
        if not os.path.exists(os.path.dirname(shopLogoPath)):
            os.makedirs(os.path.dirname(shopLogoPath))
        #  判断文件是否存在，不存在则下载, 存在则继续判断下一个
        if not os.path.exists(shopLogoPath):
            downloadImg(shopLogoPath, "https:" + ''.join(sLogo))
        else:
            print("%s exists" % sName)
            continue
        #  解析第二层页面
        products, address, detail_type = getShopDetails(sName, sUrl)
        #  打包数据
        item = dict(shop=dict(name=sName, logoImg=shopLogoPath, products=products, address=address))
        shops.append(item)
        PRODUCT_TYPES.append(detail_type)
        time.sleep(1)
        break

    new_shoptypes = tree.xpath('//div[@class="excavator-filter ng-scope"]/a/text()')
    #  店铺类别增加判断
    if STYPE_ID == 1:
        SHOP_TYPES += new_shoptypes
    if new_shoptypes == SHOP_TYPES:
        STYPE_ID += 1
        SHOP_TYPES = list(set(new_shoptypes + SHOP_TYPES))


    return shops, shopTypes

def main():
    """
    执行爬虫
    """
    shops, shopTypes = parse()
    write2File(RES + 'shops.json', shops)
    write2File(RES + 'shopTypes.json', shopTypes)
    write2File(RES + 'productTypes.json', PRODUCT_TYPES)
    print(PRODUCT_TYPES)


if __name__ == "__main__":
    main()


