import requests
from bs4 import BeautifulSoup

'''
获取到搜索结果的图片,名称,跳转路径
参数:
    1.菜品名称  str
    2.页数    str
'''
def seach(page):
    url = "https://www.meishij.net/chufang/diy/?&page="+str(page)
    session = requests.session()
    response = session.get(url=url).text
    soup = BeautifulSoup(response,"html.parser")
    par = soup.find('div',class_='listtyle1_list clearfix')
    par_list = par.find_all('div',class_='listtyle1')
    data = []
    for list in par_list:
        dict = {}
        dict['food_name'] = list.find('a').get('title')
        dict['food_img'] = list.find('img').get('src')
        dict['food_href'] = list.find('a').get('href')
        data.append(dict)
    return data


'''
获取某菜品头部详情信息
'''
def food_desc_tile(url):
    session = requests.session()
    response = session.get(url=url).text
    soup  = BeautifulSoup(response,"html.parser")
    par = soup.find('div',class_='main clearfix')
    title_par = par.find('div',class_='cp_header clearfix')

    data = {}
    food_img  = title_par.find("div",class_='cp_headerimg_w').find('img').get('src')  #菜品图片
    food_name = title_par.find('h1',class_='title').find('a').text  #菜品名称

    effect1 = title_par.find('dl',class_='yj_tags clearfix')
    effect_data = []  # 功效
    if(effect1):
        effect2 = effect1.find_all('a')
        for list in effect2:
            effect_data.append(list.text)

    technology = title_par.find('li',class_='w127').find('a').text  #工艺
    difficulty = title_par.find('li',class_='w270').find('div',class_='processing_w').text #难度
    # # flavor = title_par.find('li',class_='w127 bb0').find('a').text #口味
    # cooking_time = title_par.find('li',class_='w270 bb0 br0').find('a').text #烹饪时间
    data['food_img'] = food_img
    data['food_name'] = food_name
    data['effect_data'] = effect_data
    data['technology'] = technology
    data['difficulty'] = difficulty
    data['flavor'] = ""
    data['cooking_time'] = ""
    return data


'''
获取菜品材料信息
'''
def food_science_desc(url):
    session = requests.session()
    response = session.get(url=url).text
    soup = BeautifulSoup(response, "html.parser")
    part_science = soup.find('div',class_='materials')
    data = {}
    main_material_data = []  # 主料
    try:
        main_material = part_science.find('div',class_='yl zl clearfix').find_all('h4')
        for list in main_material:
            dict = {}
            dict["main_material_name"] = list.find('a').text
            dict["main_material_number"] = list.find('span').text
            main_material_data.append(dict)
    except:
        main_material_data = []
    accessories_data = []  # 辅料
    try:
        accessories = part_science.find_all('div',class_="yl fuliao clearfix")[0].find_all('li')
        for list in accessories:
            dict = {}
            dict["accessories_name"] = list.find('a').text
            dict["accessories_number"] = list.find('span').text
            accessories_data.append(dict)
    except:
        accessories_data = []  # 辅料

    seasoning_data = []  # 调料
    try:
        seasoning = part_science.find_all('div',class_="yl fuliao clearfix")[1].find_all('li')
        for list in seasoning:
            dict = {}
            dict["seasoning_name"] = list.find('a').text
            dict["seasoning_number"] = list.find('span').text
            seasoning_data.append(dict)
    except:
        seasoning_data = []  # 调料

    data["main_material_data"] = main_material_data
    data["accessories_data"] =accessories_data
    data["seasoning_data"] = seasoning_data

    return data


'''
爬取菜品的操作步骤
'''
def food_operation(url):
    session = requests.session()
    response = session.get(url=url).text
    soup = BeautifulSoup(response, "html.parser")
    data = soup.find('div',class_='edit')
    [s.extract() for s in data(['script','style'])]
    return data




if __name__ == '__main__':
    # data = seach("1")
    # print(data)

    # data =food_science_desc("http://www.meishij.net/zuofa/songrongchunsunbaodoufu.html")  #获取菜品材料信息
    # print(data)

    # data = food_desc_tile("http://www.meishij.net/zuofa/songrongchunsunbaodoufu.html")   #菜谱头部部分
    # print(data)

    data = food_operation("http://www.meishij.net/zuofa/shuiguopinpan_8.html") #爬取菜品的操作步骤
    print(data)


