import requests
import pandas as pd
from urllib.parse import quote

# 高德地图API密钥
api_key = input("请输入你的密钥：")


# 地点的经纬度
locations = {
    'location1': (114.277158, 30.592578),
    'location2': (114.273801, 30.59283),
    'location3': (114.273599, 30.596733)
}

# 存储每个地点的网点数据
pois_by_location = {loc: [] for loc in locations}

# 对每个location发起API请求
for loc_name, (lng, lat) in locations.items():
    url = f"https://restapi.amap.com/v3/place/around?key={api_key}&location={quote(f'{lng},{lat}')}&radius=10000&types=070500&offset=1000"
    response = requests.get(url)
    if response.status_code == 200:
        data = response.json()
        pois = data.get('pois', [])
        pois_by_location[loc_name] = [{'name': poi['name'], 'location': poi['location'], 'distance': poi.get('distance', 0)} for poi in pois]
    else:
        print(f"请求失败，状态码：{response.status_code}")

# 找出共有网点
common_pois_names = set.intersection(*(set(loc['name'] for loc in pois) for pois in pois_by_location.values()))

# 累加共有网点的distance值
common_pois_data = []
for name in common_pois_names:
    total_distance = sum(int(poi['distance']) for pois in pois_by_location.values() for poi in pois if poi['name'] == name)
    for pois in pois_by_location.values():
        for poi in pois:
            if poi['name'] == name:
                common_pois_data.append({'name': name, 'total_distance': total_distance, 'location': poi['location']})

# 将距离数据转换为DataFrame
df_distances = pd.DataFrame(common_pois_data)

# 按total_distance排序并取最小的25个网点
df_min_distances = df_distances.sort_values(by='total_distance')

# 提取名称和位置列
df_final = df_min_distances[['name', 'location']]

# 保存到新的CSV文件
df_final.to_csv('top_25_common_express_networks.csv', index=False)












# 指定原始CSV文件路径
input_csv_file = 'top_25_common_express_networks.csv'
# 指定要提取的列名
columns_to_extract = ['name', 'location']
# 指定新CSV文件路径
output_csv_file = input("请输入表格名称(别忘了加.csv):")

# 读取CSV文件
df = pd.read_csv('top_25_common_express_networks.csv')

# 检查列是否存在于DataFrame中
if set(columns_to_extract).issubset(df.columns):

    # 选择要提取的列
    df_selected = df[columns_to_extract]

    df_unique = df_selected.drop_duplicates()

    # 获取前25行（如果数据不足25行，则取所有行）
    df_top25 = df_unique.head(25)

    # 保存到新的CSV文件
    df_top25.to_csv(output_csv_file, index=False)
    print(f'提取的列的前25个不重复的数据已保存到 {output_csv_file}')
else:
    print(f'在 {input_csv_file} 中找不到指定的列')

import os

# 指定要删除的CSV文件路径
csv_file_path = 'top_25_common_express_networks.csv'

# 检查文件是否存在
if os.path.exists(csv_file_path):
    # 删除文件
    os.remove(csv_file_path)
