import os
import sys
import requests
# If you are using a Jupyter notebook, uncomment the following line.
# %matplotlib inline
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
from PIL import Image
from io import BytesIO


endpoint = "https://computervisiontest01.cognitiveservices.azure.cn/"
subscription_key = "a73dc788ea304658a86d305400576ae5"
ocr_url =endpoint+"vision/v2.1/ocr"

image_url = "https://exp-picture.cdn.bcebos.com/b955ead0b503c8d239d0b1fa498333bf3aef2187.jpg?x-bce-process=image%2Fresize%2Cm_lfit%2Cw_500%2Climit_1"

headers = {'Ocp-Apim-Subscription-Key': subscription_key}
params = {'language':'unk','detectOrientation':'true'}
data = {'url': image_url}
response = requests.post(
    ocr_url, headers=headers,params=params,json=data)
response.raise_for_status()
analysis = response.json()

# Extract the word bounding boxes and text.
line_infos = [region["lines"] for region in analysis["regions"]]
word_infos = []
for line in line_infos:
    for word_metadata in line:
        for word_info in word_metadata["words"]:
            word_infos.append(word_info)
print(word_infos)

# Display the image and overlay it with the extracted text.
plt.figure(figsize=(5, 5))
image = Image.open(BytesIO(requests.get(image_url).content))
ax = plt.imshow(image, alpha=0.5)
for word in word_infos:
    bbox = [int(num) for num in word["boundingBox"].split(",")]
    text = word["text"]
    origin = (bbox[0], bbox[1])
    patch = Rectangle(origin, bbox[2], bbox[3],
                      fill=False, linewidth=2, color='y')
    ax.axes.add_patch(patch)
    plt.text(origin[0], origin[1], text, fontsize=20, weight="bold", va="top")
plt.show()
plt.axis("off")

