#!/usr/bin/env python
# coding: utf-8

# In[46]:


import requests
import json
KEY = 'f2fe9d6f60f94d17a57ca228b4ab3f0e'
BASE_URL = 'https://apifacialreco2020.cognitiveservices.azure.com//face/v1.0/detect'
HEADERS = {
    # Request headers
    'Content-Type': 'application/json',
    'Ocp-Apim-Subscription-Key': '{}'.format(KEY), #''  
}

img_url = 'https://upload.wikimedia.org/wikipedia/commons/3/37/Dagestani_man_and_woman.jpg'
data = {
    'url': '{}'.format(img_url),
}
payload = {
    'returnFaceId': 'true',
    'returnFaceLandmarks': 'flase',
    'returnFaceAttributes': '{}'.format('age,gender,glasses,smile,emotion,hair,makeup,accessories,blur,exposure,noise'), 
}
r = requests.post(BASE_URL,data=json.dumps(data),params = payload,headers=HEADERS)
r.status_code
r.content
print(type(r.json())) 
r.json()


# In[13]:


import requests
import json
KEY = 'f2fe9d6f60f94d17a57ca228b4ab3f0e'
BASE_URL = 'https://apifacialreco2020.cognitiveservices.azure.com//face/v1.0/detect'
HEADERS = {
    # Request headers
    'Content-Type': 'application/json',
    'Ocp-Apim-Subscription-Key': '{}'.format(KEY), #''  
}

img_url = 'https://upload.wikimedia.org/wikipedia/commons/3/37/Dagestani_man_and_woman.jpg'
data = {
    'url': '{}'.format(img_url),
}
payload = {
    'returnFaceId': 'true',
    'returnFaceLandmarks': 'flase',
    'returnFaceAttributes': '{}'.format('age,gender,glasses,smile,emotion,hair,makeup,accessories,blur,exposure,noise'), 
}
r = requests.post(BASE_URL,data=json.dumps(data),params = payload,headers=HEADERS)
r.status_code
r.content


# In[ ]:





# In[ ]:





# In[ ]:





# In[ ]:





# In[47]:


import requests
FaceListId = "internet01"
create_facelists_url = "https://apifacialreco2020.cognitiveservices.azure.com/face/v1.0/findsimilars/{}"
subscription_key = "f2fe9d6f60f94d17a57ca228b4ab3f0e"
assert subscription_key

HEADERS = {
    # Request headers
    'Content-Type': 'application/json',
    'Ocp-Apim-Subscription-Key': '{}'.format(KEY),   
}
data = {
    'name': 'api_facelist',
    'userDate':'同学簿',
    'recognitionModel':'recognition_03'
}
r = requests.put(create_facelists_url.format(FaceListId),headers=HEADERS,json=data)
r_create.content


# In[48]:


import requests
# 1、create  列表
# faceListId
faceListId = "internet02" #学生填写
create_facelists_url = "https://apifacialreco2020.cognitiveservices.azure.com/face/v1.0/facelists/{}"  #学生填写
subscription_key = "f2fe9d6f60f94d17a57ca228b4ab3f0e" #学生填写
assert subscription_key

headers = {
    # Request headers
    'Content-Type': 'application/json',
    'Ocp-Apim-Subscription-Key': subscription_key,
}
data = {
    'name': 'api_facelist',
    'userDate':'同学簿',
    'recognitionModel':'recognition_03'
# 学生填写
    
}

r_create = requests.put(create_facelists_url.format(faceListId),headers=headers,json=data) #学生填写
r_create.content


# In[49]:


import requests
# 1、create  列表
# faceListId
faceListId = "internet03" #学生填写
create_facelists_url = "https://apifacialreco2020.cognitiveservices.azure.com/face/v1.0/facelists/{}"  #学生填写
subscription_key = "f2fe9d6f60f94d17a57ca228b4ab3f0e" #学生填写
assert subscription_key

headers = {
    # Request headers
    'Content-Type': 'application/json',
    'Ocp-Apim-Subscription-Key': subscription_key,
}
data = {
    'name': '19网新2班',
    'userDate':'同学簿',
    'recognitionModel':'recognition_03'
# 学生填写   
}

r_create = requests.put(create_facelists_url.format(faceListId),headers=headers,json=data) #学生填写
r_create
r_create.content
#


# In[5]:


import requests
import json
KEY = 'f2fe9d6f60f94d17a57ca228b4ab3f0e'
BASE_URL = 'https://apifacialreco2020.cognitiveservices.azure.com/face/v1.0/detect'
HEADERS = {
    # Request headers
    'Content-Type': 'application/json',
    'Ocp-Apim-Subscription-Key': '{}'.format(KEY), #''  
}

img_url = 'https://upload.wikimedia.org/wikipedia/commons/3/37/Dagestani_man_and_woman.jpg'
data = {
    'url': '{}'.format(img_url),
}
payload = {
    'returnFaceId': 'true',
    'returnFaceLandmarks': 'flase',
    'returnFaceAttributes': '{}'.format('age,gender,glasses,smile,emotion,hair,makeup,accessories,blur,exposure,noise'), 
}
r = requests.post(BASE_URL,data=json.dumps(data),params = payload,headers=HEADERS)
r.status_code
r.content
print(type(r.json())) 
r.json()


# In[7]:


import requests
# 1、create  列表
# faceListId
faceListId = "irene_test" #学生填写(不能大写)
create_facelists_url = "https://apifacialreco2020.cognitiveservices.azure.com/face/v1.0/facelists/{}"  #学生填写
subscription_key = "f2fe9d6f60f94d17a57ca228b4ab3f0e" #学生填写
assert subscription_key

headers = {
    # Request headers
    'Content-Type': 'application/json',
    'Ocp-Apim-Subscription-Key': subscription_key,
}
data = {
    'name': '19网新2班',
    'userDate':'同学簿',
    'recognitionModel':'recognition_03'
# 学生填写   
}

r_create = requests.put(create_facelists_url.format(faceListId),headers=headers,json=data) #学生填写
r_create
r_create.content


# In[15]:


#先加一张脸试试
# 2、Add face
add_face_url = "https://apifacialreco2020.cognitiveservices.azure.com/face/v1.0/facelists/{}/persistedFaces"#学生填写

assert subscription_key
headers = {
    # Request headers
    'Content-Type': 'application/json',
    'Ocp-Apim-Subscription-Key': subscription_key,
}
img_url = "http://huangjieqi.gitee.io/picture_storage/Autumnhui.jpg" #学生填写

params_add_face={
    "userData":"丘天惠"
#学生填写
}

r_add_face = requests.post(add_face_url.format(faceListId),headers=headers,params=params_add_face,json={"url":img_url})
r_add_face.status_code


# In[16]:


r_add_face.json()


# In[17]:


def AddFace(img_url=str,userData=str):
    add_face_url =/facelists/{}/persistedFaces"
    assert subscription_key
    headers = {
        # Request headers
        'Content-Type': 'application/json',
        'Ocp-Apim-Subscription-Key': subscription_key,
    }
    img_url = img_url

    params_add_face={
        "userData":userData
    }
    r_add_face = requests.post(add_face_url.format(faceListId),headers=headers,params=params_add_face,json={"url":img_url})
    return r_add_face.status_code#返回出状态码


# In[18]:


AddFace("http://huangjieqi.gitee.io/picture_storage/L-Tony-info.jpg","林嘉茵")
AddFace("http://huangjieqi.gitee.io/picture_storage/TLINGP.jpg","汤玲萍")
AddFace("http://huangjieqi.gitee.io/picture_storage/WenYanZeng.jpg","曾雯燕")
AddFace("http://huangjieqi.gitee.io/picture_storage/XIEIC.jpg","谢依希")
AddFace("http://huangjieqi.gitee.io/picture_storage/YuecongYang.png","杨悦聪")
AddFace("http://huangjieqi.gitee.io/picture_storage/Zoezhouyu.jpg","周雨")
AddFace("http://huangjieqi.gitee.io/picture_storage/crayon-heimi.jpg","刘瑜鹏")
AddFace("http://huangjieqi.gitee.io/picture_storage/jiayichen.jpg","陈嘉仪")
AddFace("http://huangjieqi.gitee.io/picture_storage/kg2000.jpg","徐旖芊")
AddFace("http://huangjieqi.gitee.io/picture_storage/liuxinrujiayou.jpg","刘心如")
AddFace("http://huangjieqi.gitee.io/picture_storage/liuyu19.png","刘宇")
AddFace("http://huangjieqi.gitee.io/picture_storage/ltco.jpg","李婷")
AddFace("http://huangjieqi.gitee.io/picture_storage/lucaszy.jpg","黄智毅")
AddFace("http://huangjieqi.gitee.io/picture_storage/pingzi0211.jpg","黄慧文")
AddFace("http://huangjieqi.gitee.io/picture_storage/shmimy-cn.jpg","张铭睿")
AddFace("http://huangjieqi.gitee.io/picture_storage/yichenting.jpg","陈婷")
AddFace("http://huangjieqi.gitee.io/picture_storage/coco022.jpg","洪可凡")
AddFace("http://huangjieqi.gitee.io/picture_storage/lujizhi.png","卢继志")
AddFace("http://huangjieqi.gitee.io/picture_storage/zzlhyy.jpg","张梓乐")


# In[19]:


# Get facelist
get_facelist_url = "https://apifacialreco2020.cognitiveservices.azure.com/face/v1.0/facelists/irene_test"
r_get_facelist = requests.get(get_facelist_url,headers=headers)#学生填写
r_get_facelist.json()


# In[21]:


# 3、检测人脸的id
# replace <My Endpoint String> with the string from your endpoint URL
face_api_url = 'https://apifacialreco2020.cognitiveservices.azure.com/face/v1.0/detect'

# 请求正文
image_url = 'https://upload.wikimedia.org/wikipedia/commons/3/37/Dagestani_man_and_woman.jpg'

headers = {'Ocp-Apim-Subscription-Key': subscription_key}

# 请求参数
params = {
    'returnFaceId': 'true',
    'returnFaceLandmarks': 'false',
    # 选择model
    'recognitionModel':'recognition_03',#此参数需与facelist参数一致
    'detectionModel':'detection_01',
    # 可选参数,请仔细阅读API文档
    'returnFaceAttributes': 'age,gender,headPose,smile,facialHair,glasses,emotion,hair,makeup,occlusion,accessories,blur,exposure,noise',
}

response = requests.post(face_api_url, params=params,
                         headers=headers, json={"url": image_url})
# json.dumps 将json--->字符串
response.json()


# In[22]:


findsimilars_url = "https://api-hjq.cognitiveservices.azure.com/face/v1.0/findsimilars"

# 请求正文 faceId需要先检测一张照片获取
data_findsimilars = {
    "faceId":"68548402-46f3-437b-9123-d51bbbe33af8",#取上方的faceID
    "faceListId": "zhichao01",
    "maxNumOfCandidatesReturned": 10,
    "mode": "matchFace"#matchPerson #一种为验证模式，一种为相似值模式
    }

r_findsimilars = requests.post(findsimilars_url,headers=headers,json=data_findsimilars)


# In[23]:


r_findsimilars.json()


# In[27]:


#facelist里面的数据
import pandas as pd
adf = pd.json_normalize(r_get_facelist.json()["persistedFaces"])# 升级pandas才能运行
adf


# In[28]:


import pandas as pd
bdf = pd.json_normalize(r_findsimilars.json())# 升级pandas才能运行
bdf


# In[29]:


pd.merge(adf, bdf,how='inner', on='persistedFaceId').sort_values(by="confidence",ascending = False)


# In[31]:


# 1、先导入为们需要的模块
import requests


api_secret = "Tn7VD-BE_lfjxdJphHdeOH9wmd6LNts_"
# 2、输入我们API_Key
api_key = 'RQSRPGzY39dc4byfbCFx1F3wkBzkPkQm'  # Replace with a valid Subscription Key here.


# 3、目标url
# 这里也可以使用本地图片 例如：filepath ="image/tupian.jpg"
BASE_URL = 'https://api-cn.faceplusplus.com/facepp/v3/detect' 
img_url = 'http://newmedia.nfu.edu.cn/wcy/wp-content/uploads/2018/04/post_20180424__NFU_DoraHacks_imoji%E5%9B%A2%E9%98%9F.jpg'

# 4、沿用API文档的示范代码,准备我们的headers和图片(数据)

headers = {
    'Content-Type': 'application/json',
}

# 5、准备symbol ? 后面的数据

payload = {
    "image_url":img_url,
    'api_key': api_key,
    'api_secret': api_secret,
    'return_attributes':'gender,age,smiling,emotion', 
}


# In[32]:


#  6、requests发送我们请求
r = requests.post(BASE_URL, params=payload, headers=headers)
r.status_code


# In[33]:


r.content


# In[34]:


# requests 巧妙的方法   r = response
results = r.json() # 
results


# In[36]:


import pandas as pd
# from pandas.io.json import json_normalize
# results['faces']
df = pd.json_normalize(results,record_path='faces')
df


# In[37]:


df_face_rectangle = df[['face_token','face_rectangle.top','face_rectangle.left','face_rectangle.width','face_rectangle.height']]


# In[38]:


df


# In[39]:


# 立马试试pd.DataFrame 並观察
faces_data = results['faces']
df = pd.DataFrame(faces_data)
df

# attributes, face_rectangle, face_token
# 查一下API文檔


# In[40]:


# 取变量 (pandas cheetsheet)
df["face_rectangle"]


# In[41]:


df["face_rectangle"].to_dict()


# In[42]:


pd.DataFrame(df["face_rectangle"].to_dict())


# In[43]:


pd.DataFrame(df["face_rectangle"].to_dict()).T


# In[1]:


# 百度人脸识别
from aip import AipFace

""" 你的 APPID AK SK """
APP_ID = '22831596'
API_KEY = '4ox0zwUDCVSRO1mvTP24aL1M'
SECRET_KEY = 'KLx13PaGuGGkM9deK3FjF1ZKoLZnmbTg'

client = AipFace(APP_ID, API_KEY, SECRET_KEY)


# In[3]:


""" 读取图片 """
def get_file_content(filePath):
    with open(filePath, 'rb') as fp:
        return fp.read()

image = get_file_content('http://newmedia.nfu.edu.cn/wcy/wp-content/uploads/2018/04/post_20180424__NFU_DoraHacks_imoji%E5%9B%A2%E9%98%9F.jpg')

""" 调用人脸检测 """
client.detect(image);

""" 如果有可选参数 """
options = {}
options["max_face_num"] = 2
options["face_fields"] = "age"

""" 带参数调用人脸检测 """
client.detect(image, options)


# In[12]:


# 首先pip install baidu-aip
# SDK文档链接http://ai.baidu.com/docs#/Face-Python-SDK/top
import base64
from aip import AipFace


APP_ID = '22831596'
API_KEY = '4ox0zwUDCVSRO1mvTP24aL1M'
SECRET_KEY = 'KLx13PaGuGGkM9deK3FjF1ZKoLZnmbTg'

client = AipFace(APP_ID, API_KEY, SECRET_KEY)

def face_check(img_data):
    """
    人脸识别demo
    :param img_data: 二进制的图片数据
    :return:
    """

    data = base64.b64encode(img_data)

    image = data.decode()

    imageType = "BASE64"

    """ 调用人脸检测 """
    client.detect(image, imageType)

    """ 如果有可选参数 """
    options = {}
    options["face_field"] = "beauty,age,faceshape,expression,gender,glasses"
    options["max_face_num"] = 10

    """ 带参数调用人脸检测 """
    res = client.detect(image, imageType, options)
    print(res)
    try:
        res_list = res['result']
    except Exception as e:
        res_list = None

    return res_list

if __name__ == "__main__":
    with open("remember.png", "rb") as f:
        data = f.read()

    res = face_check(data)
    print(res)


# In[ ]:


## 计算机视觉


# In[2]:


import requests
# If you are using a Jupyter notebook, uncomment the following line.
# %matplotlib inline
import matplotlib.pyplot as plt
import json
from PIL import Image
from io import BytesIO

subscription_key = 'dd748cf10bf9404399e5416d9399e218'
analyze_url = 'https://api-computervvsion-cyl.cognitiveservices.azure.com/vision/v3.1/analyze'


# Set image_url to the URL of an image that you want to analyze.
image_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/1/12/" +     "Broadway_and_Times_Square_by_night.jpg/450px-Broadway_and_Times_Square_by_night.jpg"

headers = {'Ocp-Apim-Subscription-Key': subscription_key}
params = {'visualFeatures': 'Categories,Description,Color'}
data = {'url': image_url}
response = requests.post(analyze_url, headers=headers,
                         params=params, json=data)
response.raise_for_status()

# The 'analysis' object contains various fields that describe the image. The most
# relevant caption for the image is obtained from the 'description' property.
analysis = response.json()
print(json.dumps(response.json()))
image_caption = analysis["description"]["captions"][0]["text"].capitalize()

# Display the image and overlay it with the caption.
image = Image.open(BytesIO(requests.get(image_url).content))
plt.imshow(image)
plt.axis("off")
_ = plt.title(image_caption, size="x-large", y=-0.1)
plt.show()


# In[6]:


import os
import sys
import requests
# If you are using a Jupyter notebook, uncomment the following line.
# %matplotlib inline
import matplotlib.pyplot as plt
from PIL import Image
from io import BytesIO

subscription_key = 'dd748cf10bf9404399e5416d9399e218'
analyze_url = 'https://api-computervvsion-cyl.cognitiveservices.azure.com/vision/v3.1/analyze'


# Set image_path to the local path of an image that you want to analyze.
# Sample images are here, if needed:
# https://github.com/Azure-Samples/cognitive-services-sample-data-files/tree/master/ComputerVision/Images
image_path = "remember.png"

# Read the image into a byte array
image_data = open(image_path, "rb").read()
headers = {'Ocp-Apim-Subscription-Key': subscription_key,
           'Content-Type': 'application/octet-stream'}
params = {'visualFeatures': 'Categories,Description,Color'}
response = requests.post(
    analyze_url, headers=headers, params=params, data=image_data)
response.raise_for_status()

# The 'analysis' object contains various fields that describe the image. The most
# relevant caption for the image is obtained from the 'description' property.
analysis = response.json()
print(analysis)
image_caption = analysis["description"]["captions"][0]["text"].capitalize()

# Display the image and overlay it with the caption.
image = Image.open(BytesIO(image_data))
plt.imshow(image)
plt.axis("off")
_ = plt.title(image_caption, size="x-large", y=-0.1)
plt.show()


# In[7]:


import os
import sys
import requests
# If you are using a Jupyter notebook, uncomment the following lines.
# %matplotlib inline
# import matplotlib.pyplot as plt
from PIL import Image
from io import BytesIO

subscription_key = 'dd748cf10bf9404399e5416d9399e218'
analyze_url = 'https://api-computervvsion-cyl.cognitiveservices.azure.com/vision/v3.1/analyze'


thumbnail_url = "https://api-computervvsion-cyl.cognitiveservices.azure.com/vision/v3.1/generateThumbnail"

# Set image_url to the URL of an image that you want to analyze.
image_url = "https://upload.wikimedia.org/wikipedia/commons/9/94/Bloodhound_Puppy.jpg"

# Construct URL
headers = {'Ocp-Apim-Subscription-Key': subscription_key}
params = {'width': '50', 'height': '50', 'smartCropping': 'true'}
data = {'url': image_url}
# Call API
response = requests.post(thumbnail_url, headers=headers, params=params, json=data)
response.raise_for_status()

# Open the image from bytes
thumbnail = Image.open(BytesIO(response.content))

# Verify the thumbnail size.
print("Thumbnail is {0}-by-{1}".format(*thumbnail.size))

# Save thumbnail to file
thumbnail.save('thumbnail.png')

# Display image
thumbnail.show()

# Optional. Display the thumbnail from Jupyter.
# plt.imshow(thumbnail)
# plt.axis("off")


# In[8]:


import json
import os
import sys
import requests
import time
# If you are using a Jupyter notebook, uncomment the following line.
# %matplotlib inline
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
from PIL import Image
from io import BytesIO

missing_env = False

subscription_key = 'dd748cf10bf9404399e5416d9399e218'
analyze_url = 'https://api-computervvsion-cyl.cognitiveservices.azure.com/vision/v3.1/analyze'
text_recognition_url = "https://api-computervvsion-cyl.cognitiveservices.azure.com/vision/v3.1/read/analyze"

# Set image_url to the URL of an image that you want to recognize.
image_url = "https://raw.githubusercontent.com/MicrosoftDocs/azure-docs/master/articles/cognitive-services/Computer-vision/Images/readsample.jpg"

headers = {'Ocp-Apim-Subscription-Key': subscription_key}
data = {'url': image_url}
response = requests.post(
    text_recognition_url, headers=headers, json=data)
response.raise_for_status()

# Extracting text requires two API calls: One call to submit the
# image for processing, the other to retrieve the text found in the image.

# Holds the URI used to retrieve the recognized text.
operation_url = response.headers["Operation-Location"]

# The recognized text isn't immediately available, so poll to wait for completion.
analysis = {}
poll = True
while (poll):
    response_final = requests.get(
        response.headers["Operation-Location"], headers=headers)
    analysis = response_final.json()
    
    print(json.dumps(analysis, indent=4))

    time.sleep(1)
    if ("analyzeResult" in analysis):
        poll = False
    if ("status" in analysis and analysis['status'] == 'failed'):
        poll = False

polygons = []
if ("analyzeResult" in analysis):
    # Extract the recognized text, with bounding boxes.
    polygons = [(line["boundingBox"], line["text"])
                for line in analysis["analyzeResult"]["readResults"][0]["lines"]]

# Display the image and overlay it with the extracted text.
image = Image.open(BytesIO(requests.get(image_url).content))
ax = plt.imshow(image)
for polygon in polygons:
    vertices = [(polygon[0][i], polygon[0][i+1])
                for i in range(0, len(polygon[0]), 2)]
    text = polygon[1]
    patch = Polygon(vertices, closed=True, fill=False, linewidth=2, color='y')
    ax.axes.add_patch(patch)
    plt.text(vertices[0][0], vertices[0][1], text, fontsize=20, va="top")
plt.show()


# In[9]:


import os
import sys
import requests
# If you are using a Jupyter notebook, uncomment the following line.
# %matplotlib inline
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
from PIL import Image
from io import BytesIO

subscription_key = 'dd748cf10bf9404399e5416d9399e218'
analyze_url = 'https://api-computervvsion-cyl.cognitiveservices.azure.com/vision/v3.1/analyze'
ocr_url = "https://api-computervvsion-cyl.cognitiveservices.azure.com/vision/v3.1/ocr"

# Set image_url to the URL of an image that you want to analyze.
image_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/a/af/" +     "Atomist_quote_from_Democritus.png/338px-Atomist_quote_from_Democritus.png"

headers = {'Ocp-Apim-Subscription-Key': subscription_key}
params = {'language': 'unk', 'detectOrientation': 'true'}
data = {'url': image_url}
response = requests.post(ocr_url, headers=headers, params=params, json=data)
response.raise_for_status()

analysis = response.json()

# Extract the word bounding boxes and text.
line_infos = [region["lines"] for region in analysis["regions"]]
word_infos = []
for line in line_infos:
    for word_metadata in line:
        for word_info in word_metadata["words"]:
            word_infos.append(word_info)
word_infos

# Display the image and overlay it with the extracted text.
plt.figure(figsize=(5, 5))
image = Image.open(BytesIO(requests.get(image_url).content))
ax = plt.imshow(image, alpha=0.5)
for word in word_infos:
    bbox = [int(num) for num in word["boundingBox"].split(",")]
    text = word["text"]
    origin = (bbox[0], bbox[1])
    patch = Rectangle(origin, bbox[2], bbox[3],
                      fill=False, linewidth=2, color='y')
    ax.axes.add_patch(patch)
    plt.text(origin[0], origin[1], text, fontsize=20, weight="bold", va="top")
plt.show()
plt.axis("off")


# In[10]:


import os
import sys
import requests
# If you are using a Jupyter notebook, uncomment the following line.
# %matplotlib inline
import matplotlib.pyplot as plt
from PIL import Image
from io import BytesIO

subscription_key = 'dd748cf10bf9404399e5416d9399e218'
analyze_url = 'https://api-computervvsion-cyl.cognitiveservices.azure.com/vision/v3.1/analyze'
landmark_analyze_url = "https://api-computervvsion-cyl.cognitiveservices.azure.com/vision/v3.1/models/landmarks/analyze"

# Set image_url to the URL of an image that you want to analyze.
image_url = "https://upload.wikimedia.org/wikipedia/commons/f/f6/" +     "Bunker_Hill_Monument_2005.jpg"

headers = {'Ocp-Apim-Subscription-Key': subscription_key}
params = {'model': 'landmarks'}
data = {'url': image_url}
response = requests.post(
    landmark_analyze_url, headers=headers, params=params, json=data)
response.raise_for_status()

# The 'analysis' object contains various fields that describe the image. The
# most relevant landmark for the image is obtained from the 'result' property.
analysis = response.json()
assert analysis["result"]["landmarks"] is not []
print(analysis)
landmark_name = analysis["result"]["landmarks"][0]["name"].capitalize()

# Display the image and overlay it with the landmark name.
image = Image.open(BytesIO(requests.get(image_url).content))
plt.imshow(image)
plt.axis("off")
_ = plt.title(landmark_name, size="x-large", y=-0.1)
plt.show()


# In[ ]:




