#!D:/python

import requests
from bs4 import BeautifulSoup
import qiniu
import json
import pymysql

urls = ""
id = ""

#获取网页数据#
def get_web_data(url):
    web_data = requests.get(url)
    soup = BeautifulSoup(web_data.text, 'lxml')
    return soup

#获取步骤#
def get_step(soup):
    data = {}
    steps = soup.select(".steps > ol > li")
    i = 1
    for step in steps:
        imgs = step.select('img')
        for img in imgs:
            key = '%sstep%d.jpg' % (id,i)
            path = './img/%s' % key
            down_img(img['src'],path)
            img_url = update_qiniu(key,path)
            data[i] = {"name": step.get_text().strip(), "img_url": img_url}
            i = i + 1
    return json.dumps(data,ensure_ascii=False)
#获取封面图#
def get_thumb(soup):
    thumbTag = soup.select(".expandable > img ")
    key = '%s.jpg' % id
    path = './img/%s' % key
    down_img(thumbTag[0]['src'],path)
    return update_qiniu(key, path)

#下载图片#
def down_img(url,path):
    html = requests.get(url)
    with open(path,"wb") as file:
        file.write(html.content)

#上传七牛#
def update_qiniu(key,path):
    access_key = 'KwF72jAqjXT_W3l0hqe4kgsPQAlbt9MHqV8vj2M4'
    secret_key = 'ROW7Y-e3ffXyBqnqlkBhdqLbKzUjqeQjuCHD9Jhe'

    q = qiniu.Auth(access_key, secret_key)
    bucket_name = 'test'
    token = q.upload_token(bucket_name, key, 3600)
    ret, info = qiniu.put_file(token, key, path)
    #返回图片地址#
    return "pd84bjg4z.bkt.clouddn.com/%s" % ret['key']

#获取用料#
def get_ings(soup):
    data = {}
    ings = soup.findAll(itemprop="recipeIngredient")
    i = 1
    for ing in ings:
        a = ing.select(".name")
        for g in a:
            if g.a is None:
                data[i] = {"name": g.get_text().strip(),
                            "unit":ing.select(".unit")[0].get_text().strip()
                            }
            else:
                data[i] = {"name": g.a.get_text().strip(),
                           "unit": ing.select(".unit")[0].get_text().strip()
                           }
        i = i+1
    return json.dumps(data,ensure_ascii=False)

def get_title(soup):
    title = soup.select(".page-title")[0].get_text().strip()
    return title

def get_tip(soup):
    tip = soup.select(".tip")
    if len(tip):
        return tip[0].get_text().strip()
    else:
        return ""




def get_data(url):
    global urls
    urls = url
    global id
    id = urls.split("/")[-2]
    soup = get_web_data(url)
    title = get_title(soup)
    thumb_url = get_thumb(soup)
    ings = get_ings(soup)
    steps = get_step(soup)
    tip = get_tip(soup)
    data = (title,thumb_url,ings,steps,tip,url)
    return data

#print(get_title(soup))
#步骤#
#print(get_step(soup))
#用料#
#print(get_ings(soup))
#封面图#
#img_url = get_thumb(soup)
#get_data("http://www.xiachufang.com//recipe/101790129/")

def get_soup(url):
    web_data = requests.get(url)
    soup = BeautifulSoup(web_data.text,'lxml')
    return soup
def get_cate_urls(soup):
    data = []
    urls = soup.select('.homepage-cat-has-submenu > a')
    for url in urls:
        if url.get_text(strip=True):
            data.append("http://www.xiachufang.com/%s" % url['href'])
    return data

def get_recipe_urls(url):
    data = []
    web_data = requests.get(url)
    soup = BeautifulSoup(web_data.text,'lxml')
    recipe_urls = soup.select(".recipe > a ")
    for recipe_url in recipe_urls:
        data.append("http://www.xiachufang.com/%s" % recipe_url["href"])
    return data


def insert_mysql(data):
    db = pymysql.connect(host="127.0.0.1", user="root", passwd='root', port=3306, charset="utf8")
    cur = db.cursor()
    cur.execute('use test')
    cur.executemany("insert into recipes(recipe_name,recipe_thum,recipe_ings,recipe_steps,recipe_tip,source_url)values(%s,%s,%s,%s,%s,%s)", data)
    db.close()
    print("  保存到数据库")

def spider_run():
    #分类url#
    soup = get_soup("http://www.xiachufang.com/")
    cate_urls = get_cate_urls(soup)
    old_cate_urls = []  #已复制的分类URL#
    old_recipe_urls =[] #已复制的菜谱url#
    print(cate_urls)
    #进入列表#
    for url in cate_urls:
        list = []
        if url is None:
            print("url为空")
            continue
        for page in range(1,11):
            list_url = "%s?page=%d" %(url,page)     #列表分页#
            if(list_url in old_cate_urls):
                print("已复制URL：%s" % list_url)
                continue

            print("正在复制url:%s" % list_url)
            recipe_urls = get_recipe_urls(list_url)
            for recipe_url in recipe_urls:
                if recipe_url in old_recipe_urls:
                    print("已复制url:%s" % recipe_url)
                    continue

                print("  当前url：%s" %recipe_url)
                data = get_data(recipe_url)
                list.append(data)
                old_recipe_urls.append(recipe_url)
            old_cate_urls.append(list_url)
            insert_mysql(list)


spider_run()
