import sys
import json
import time

import pandas as pd
from pandas import DataFrame

from requests import Request
from urllib.request import urlopen
from urllib.request import Request
from urllib.error import URLError

# url请求token
token = "24.9d5c7a256d0a7e83ef48e7c83f5b1d8d.2592000.1622727923.282335-24104019"

# 文档要求的Header
Header = {
    'Content-Type': 'application/json'
}

# 请求地址 + 自己的 token
ACCESS_URL = 'https://aip.baidubce.com/rpc/2.0/nlp/v1/address?charset=UTF-8&access_token=' + token

# 个人全局dic
PATHDIR = {'file_path': './file/'}


# 自己写一个请求方法,方便查看异常
def request(url, data):
    """

    :param url: 请求URL
    :param data: 附带的请求data
    :return: 如果成功，返回响应后的json，否则打印错误信息
    """
    req = Request(url, data.encode('utf-8'), headers=Header)
    try:
        f = urlopen(req)
        result_str = f.read()
        result_str = result_str.decode()
        return result_str
    except  URLError as err:
        print(err)


def file_to_dataframe(fileName: str):
    """
    调用百度API,将文件中的所有地址解析为标准信息
    :param fileName:
    :return:not return ,file文件夹中新增一个 fileNamebaidu.csv
    """

    address_sum = []  # 存放待处理的地址数据
    address = []  # 二维list,每条解析信息，方便后面转成dataframe并写入csv
    df = pd.read_csv(PATHDIR['file_path'] + fileName + '.csv', usecols=[0])
    for i in df.values:
        if str(i[0]) == "nan":  # 空值不处理
            continue
        address_sum.append(str(i[0]))  # 存储所有的非空值，作为一个列与处理好的dataframe对应起来并保存

    try:
        for i in address_sum[17149:]:
            response = request(ACCESS_URL, json.dumps(
                {
                    "text": str(i),
                    "confidence": 50
                }))
            data = json.loads(response)
            text = data['text']
            province = data['province']
            city = data['city']
            county = data['county']
            town = data['town']
            detail = data['detail']
            lng = data['lng']  # 防止出错(空Key异常) 每次都定义一个变量来处理
            lat = data['lat']

            address.append([text, province, city, county, town, detail, lng, lat])
            print(data)
            time.sleep(0.1)
    except:
        dfSum = DataFrame(address)
        # 定义列名
        dfSum.columns = ['原地址', '省', '市', '区（县）', '街道（乡/镇）', '详细地址', '经度', '纬度']
        # 构建成一个csv
        dfSum.to_csv(PATHDIR['file_path'] + fileName + "baidu.csv", index=False, sep=',', encoding='utf_8_sig',
                     mode='a+')

    # 二维列表转dataframe
    dfSum = DataFrame(address)
    # 定义列名
    dfSum.columns = ['原地址', '省', '市', '区（县）', '街道（乡/镇）', '详细地址', '经度', '纬度']
    # 构建成一个csv
    dfSum.to_csv(PATHDIR['file_path'] + fileName + "baidu.csv", index=False, sep=',', encoding='utf_8_sig', mode='a+')


if __name__ == '__main__':
    # response = request(ACCESS_URL, json.dumps(
    #     {
    #         "text": "金隅国际",
    #         "confidence": 50
    #     }))
    # data = json.loads(response)
    # print([data['text'],data['province'],data['city'],data['county'],data['town'],data['detail'],data['lat'],data['lng']])
    print(file_to_dataframe('addr'))
