def location(f):
	# 图片主体检测
    request_url = "https://aip.baidubce.com/rest/2.0/image-classify/v1/multi_object_detect"
    img = base64.b64encode(f.read())
    params = {"image":img}
    access_token =access_token
    request_url = request_url + "?access_token=" + access_token
    headers = {'content-type': 'application/x-www-form-urlencoded'}
    response = requests.post(request_url, data=params, headers=headers)
    response.json()


def anaylse(f):
	# 文字识别试卷分析及识别
    request_url = "https://aip.baidubce.com/rest/2.0/ocr/v1/doc_analysis"
    img = base64.b64encode(f.read())
    params = {"image":img,"language_type":"CHN_ENG","result_type":"big"}
    request_url = request_url + "?access_token=" + access_token
    headers = {'content-type': 'application/x-www-form-urlencoded'}
    response = requests.post(request_url, data=params, headers=headers)
    response=response.json()
    return response

 def txt_correction(content):
 	# 文本纠错
    print ('原文：',content)
    url = 'https://aip.baidubce.com/rpc/2.0/nlp/v1/ecnet'
    params = dict()
    params['text'] = content
    params = json.dumps(params).encode('utf-8')
    access_token = token
    url = url + "?access_token=" + access_token
    request = urllib.request.Request(url=url, data=params)
    request.add_header('Content-Type', 'application/json')
    response = urllib.request.urlopen(request)
    content = response.read()
    if content:
        content=content.decode('GB2312')
        data = json.loads(content)

        item=data['item']
        print('纠错后：',item['correct_query'])
        print('Score：',item['score'])

 def DNN(content):
 	# DNN语言模式
    url = ' https://aip.baidubce.com/rpc/2.0/nlp/v2/dnnlm_cn'
    params = dict()
    params['text'] = content
    params = json.dumps(params).encode('utf-8')
    access_token = token
    url = url + "?access_token=" + access_token
    request = urllib.request.Request(url=url, data=params)
    request.add_header('Content-Type', 'application/json')
    response = urllib.request.urlopen(request)
    retrun response.json()

def category(title,content):
	# 文章分类
    url = 'https://aip.baidubce.com/rpc/2.0/nlp/v1/ecnet'
    params = dict()
    params['text'] = content
    params['title']=title
    params = json.dumps(params).encode('utf-8')
    access_token = '24.fc60dac90f6118f1dae66bc01608da6c.2592000.1613797053.282335-23570619'
    url = url + "?access_token=" + access_token
    request = urllib.request.Request(url=url, data=params)
    request.add_header('Content-Type', 'application/json')
    response = urllib.request.urlopen(request)
    response=response['item]['lv1_tag_list']['tag']
    return response

def compare(word1,word2):
	# 词义相似度
    url = 'https://aip.baidubce.com/rpc/2.0/nlp/v2/word_emb_sim'
    params = dict()
    params['word1'] = word1
    params['word2']=word2
    params = json.dumps(params).encode('utf-8')
    access_token = 'access_token'
    url = url + "?access_token=" + access_token
    request = urllib.request.Request(url=url, data=params)
    request.add_header('Content-Type', 'application/json')
    response = urllib.request.urlopen(request)
    respnse['score']
    return response.json()

 def writing(img):
    request_url = "https://aip.baidubce.com/rest/2.0/ocr/v1/doc_analysis"
    img = base64.b64encode(f.read())
    params = {"image":img,"language_type":"CHN_ENG","result_type":"big"}
    request_url = request_url + "?access_token=" + access_token
    headers = {'content-type': 'application/x-www-form-urlencoded'}
    response = requests.post(request_url, data=params, headers=headers)
    response=response.json()
    for sentence in  response:
        sentence=sentence.split('。') #以句号分开文章，单独拿出句子分析
    txt_correction=txt_correction(content1)   #调用文字纠错api
    sentence=content1
    DNN=DNN(content2)        #引入DNN语言模型
    content2=content1
    category=category(title,content3)    #引入文章分类
    title=sentence[0]                   #文章题目
    content3=sentence[1:]                # 文章内容
    compare=compare(word1,word2)
    word1=''  # 主题                 # 词义相似度
    word2='title' # 文章主题
    return response





