#!/usr/bin/env python
# coding: utf-8

# In[ ]:



# -*- coding: utf-8 -*-
#!/usr/bin/env python
 
import urllib
import base64
import json
#client_id 为官网获取的AK， client_secret 为官网获取的SK
client_id = 'z0pmPkifUs3MxMLRLOIFCA75'
client_secret = 'klAXTwvE5dW45jlS7fpRORTx0txFkKfz'
 
 
#获取token
def get_token():
    host = 'https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id=' + client_id + '&client_secret=' + client_secret
    request = urllib.request.Request(host)
    request.add_header('Content-Type', 'application/json; charset=UTF-8')
    response = urllib.request.urlopen(request)
    token_content = response.read()
    if token_content:
        token_info = json.loads(token_content)
        token_key = token_info['access_token']
    return token_key
 
def ingredient(filename):
    request_url = " https://aip.baidubce.com/rest/2.0/image-classify/v1/classify/ingredient"
    
    # 二进制方式打开图片文件
    f = open(filename, 'rb')
    img = base64.b64encode(f.read())
    
    params = dict()
    params['image'] = img
    params['show'] = 'true'
    params = urllib.parse.urlencode(params).encode("utf-8")
    #params = json.dumps(params).encode('utf-8')
    
    access_token = get_token()
    request_url = request_url + "?access_token=" + access_token
    request = urllib.request.Request(url=request_url, data=params)
    request.add_header('Content-Type', 'application/x-www-form-urlencoded')
    response = urllib.request.urlopen(request)
    content = response.read()
    ingredient=''
    if content:
        #print(content)
        content=content.decode('utf-8')
        #print(content)
        data = json.loads(content)
        #print(data)
        words_result=data['result'][0]
        ingredient=words_result['name']
        print (ingredient)
        return ingredient
    
from bs4 import BeautifulSoup
#通过百度百科获取食材的详细说明         
def get_baike_summary(itemname):
    url='https://baike.baidu.com/item/'+urllib.parse.quote(itemname)
    # 构建请求对象  
    headers = {  
        "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Safari/537.36"  
    }  
    request = urllib.request.Request(url, headers=headers)  
    # 发送请求  
    response = urllib.request.urlopen(request)  
    # 读取文件  
    content = response.read().decode('utf8')
    # 先构建一个soup对象      
    soup = BeautifulSoup(content, "lxml")  
    #获取描述
    summary = soup.find("div", attrs={"class": "lemma-summary"}).get_text()
    print (summary)
 
#通过获取果蔬卡路里信息        
def get_food_cal(itemname):
    url='https://food.hiyd.com/food/search?kw='+urllib.parse.quote(itemname)
    # 构建请求对象  
    headers = {  
        "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Safari/537.36"  
    }  
    request = urllib.request.Request(url, headers=headers)  
    # 发送请求  
    response = urllib.request.urlopen(request)  
    # 读取文件  
    content = response.read().decode('utf8')
    # 先构建一个soup对象      
    soup = BeautifulSoup(content, "lxml")  
    #获取描述
    summary = soup.find_all("div", attrs={"class": "cont"})
    i=0
    #只显示前5种
    for info in summary:
        if i<5:
            print (info.get_text())
        i=i+1
 
def get_ingredient_more(filename):
    result=ingredient(filename)
    if result!='非果蔬食材':
        get_baike_summary(result)
        get_food_cal(result)
        
get_ingredient_more('C:/Users/13724/Desktop/API/API期末项目/水果.png')


# In[ ]:




