{
 "metadata": {
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.8"
  },
  "orig_nbformat": 4,
  "kernelspec": {
   "name": "python3",
   "display_name": "Python 3.8.8 64-bit ('base': conda)"
  },
  "interpreter": {
   "hash": "0420c7ec21f97dee79321d356e99f2857a7b9f3bc9adae6d2bbba6d2122e09ae"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2,
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "#导入模块\n",
    "import requests\n",
    "import pandas as pd\n",
    "import numpy as np\n",
    "import json\n",
    "import sklearn\n",
    "import matplotlib.pyplot as plt\n",
    "import seaborn as sn\n",
    "import random\n",
    "from sklearn.cluster import DBSCAN\n",
    "import folium\n",
    "from folium import plugins\n",
    "from folium import FeatureGroup, Marker, LayerControl\n",
    "import os"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 参数模块\n",
    "\n",
    "#文件名,后续有可能做成列表\n",
    "work_dir = './data/'\n",
    "# name = '桐庐.xlsx'\n",
    "names = os.listdir(work_dir)\n",
    "out_dir = './html/'\n",
    "\n",
    "\n",
    "\n",
    "hotel_adress = '汉庭酒店(杭州临安万华店)'\n",
    "hotel_city= '杭州市'\n",
    "# hotel_point = [0,0]\n",
    "\n",
    "# 聚类参数\n",
    "eps=0.03\n",
    "min_samples=3\n",
    "\n",
    "#ga参数\n",
    "CROSS_RATE = 0.1\n",
    "MUTATE_RATE = 0.1\n",
    "POP_SIZE = 500\n",
    "N_GENERATIONS = 500\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [
    {
     "output_type": "stream",
     "name": "stderr",
     "text": [
      "<ipython-input-10-8cbe46bd79ae>:10: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  df_clean['distance'] = np.sqrt(np.square(df_clean['经度'] - hotel_point[0]) +np.square(df_clean['纬度'] - hotel_point[1])) #计算各点到旅馆的距离\n",
      "<ipython-input-13-fd794f0eb0c1>:42: DeprecationWarning: `np.bool` is a deprecated alias for the builtin `bool`. To silence this warning, use `bool` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.bool_` here.\n",
      "Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n",
      "  cross_points = np.random.randint(0, 2, self.DNA_size).astype(np.bool)   # choose crossover points\n",
      "<ipython-input-10-8cbe46bd79ae>:10: SettingWithCopyWarning: \n",
      "A value is trying to be set on a copy of a slice from a DataFrame.\n",
      "Try using .loc[row_indexer,col_indexer] = value instead\n",
      "\n",
      "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n",
      "  df_clean['distance'] = np.sqrt(np.square(df_clean['经度'] - hotel_point[0]) +np.square(df_clean['纬度'] - hotel_point[1])) #计算各点到旅馆的距离\n",
      "<ipython-input-13-fd794f0eb0c1>:42: DeprecationWarning: `np.bool` is a deprecated alias for the builtin `bool`. To silence this warning, use `bool` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.bool_` here.\n",
      "Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations\n",
      "  cross_points = np.random.randint(0, 2, self.DNA_size).astype(np.bool)   # choose crossover points\n"
     ]
    }
   ],
   "source": [
    "#main\n",
    "for name in names: #便利文件夹中每个文件\n",
    "    hotel_point = get_hotel_point(hotel_adress,hotel_city)\n",
    "    df = get_data(work_dir,name)\n",
    "    groups_dict = get_groups_dict(df)\n",
    "    html = My_html(hotel_point) #初始化画图对象\n",
    "    colors = ['red','green','blue','yellow']\n",
    "\n",
    "    #主循环\n",
    "    for i in range(3):\n",
    "        dis_dict = get_min_close(groups_dict) #获取距离字典\n",
    "        closest_label = sorted(get_min_close(groups_dict).items(), key=lambda obj: obj[1])[0][0] #选择最近的label\n",
    "        # random_label = random.choice(list(groups_dict.keys())) #随机取出一个组\n",
    "        uesd_df = get_group_and_upgrade_groups_dict(closest_label) #这些点直接开始塞\n",
    "\n",
    "        #GA初始化\n",
    "        N_CITIES = len(uesd_df)# DNA size\n",
    "        ga = GA(DNA_size=N_CITIES, cross_rate=CROSS_RATE, mutation_rate=MUTATE_RATE, pop_size=POP_SIZE)\n",
    "        city_position = uesd_df[['经度','纬度']].values\n",
    "        #GA训练\n",
    "        for generation in range(N_GENERATIONS):\n",
    "            lx, ly = ga.translateDNA(ga.pop, city_position)\n",
    "            fitness, total_distance = ga.get_fitness(lx, ly)\n",
    "            ga.evolve(fitness)\n",
    "            best_idx = np.argmax(fitness)\n",
    "            # print('Gen:', generation, '| best fit: %.2f' % total_distance[best_idx],)\n",
    "        #GA成果导出，result_df 只是按照ga的结果重新排序的used_df\n",
    "        # result_df = uesd_df.iloc[ga.pop[best_idx],:]\n",
    "\n",
    "        result_df = uesd_df.copy()\n",
    "        result_df = result_df.set_index('经度')\n",
    "        result_df = result_df.loc[lx[best_idx],:].drop_duplicates('公司名称').reset_index()\n",
    "\n",
    "        #开始画图\n",
    "        html.plot_point_and_line(result_df['纬度'],result_df['经度'],result_df['公司名称'],color=colors[i])\n",
    "    html.plot_point_and_line([hotel_point[1]],[hotel_point[0]],['旅馆'],color='black',icon=folium.Icon(color='black', icon='home',))\n",
    "    html_name = name.split('.')[0]+'.html' #保存的图片名称\n",
    "    html.save(out_dir+html_name) #保存图片"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": [
       "[119.704713, 30.219998]"
      ]
     },
     "metadata": {},
     "execution_count": 3
    }
   ],
   "source": [
    "def get_hotel_point(hotel_adress,hotel_city):\n",
    "    '''\n",
    "    获取旅馆经纬度\n",
    "    '''\n",
    "    key = '676288229ff9fa23f7bcebf6b1dc510a'\n",
    "    url = r'https://restapi.amap.com/v3/place/text?key=%s&keywords=%s&citylimit=%s'%(key,hotel_adress,hotel_city)\n",
    "\n",
    "    result = requests.get(url)\n",
    "    a = json.loads(result.text)\n",
    "    tmp = a['pois']\n",
    "    if len(tmp)>0:\n",
    "\n",
    "        hotel_point = list(map(float,a['pois'][0]['location'].split(',')))\n",
    "    return hotel_point\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_data(work_dir,name):\n",
    "    #读取临安组的最新数据\n",
    "    df = pd.read_excel(work_dir+name,index_col='index')  #这里从腾讯文档下载数据，要将数据打开查看，将多余的列处理，不然后面dropna会带来很大麻烦\n",
    "    df = df[df['是否调研']!=1] #去掉已经调研过得点\n",
    "    df =df[['公司名称','经度','纬度','分组']] #留下有用的字段\n",
    "    df.dropna(inplace=True)\n",
    "    return df"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_groups_dict(df):\n",
    "    '''\n",
    "    使用密度聚类，获得label。返回一个label为建，group_df为值得dict\n",
    "    '''\n",
    "    X = df[['经度','纬度']].astype('float').values #X是传入聚类算法的预测数据\n",
    "    clf = DBSCAN(eps = eps, min_samples = min_samples) #初始化聚类算法\n",
    "    y_pred = clf.fit_predict(X) #使用密度聚类进行预测。y_pred是一维向量，返回了各个数据对应的组别\n",
    "    df['label'] = y_pred #将得到的预测结果保存到初始的df中\n",
    "    df_clean = df[(df['label'] != -1)] #干掉孤立点\n",
    "    df_clean['distance'] = np.sqrt(np.square(df_clean['经度'] - hotel_point[0]) +np.square(df_clean['纬度'] - hotel_point[1])) #计算各点到旅馆的距离\n",
    "    groups = df_clean.groupby('label') #按照label对df_clean进行分组。df_clean可以理解为df.copy()\n",
    "    groups_dict = {} #因为要对group进行删改，构建一个group_dict存放df\n",
    "    for label,group in groups:\n",
    "        if len(group)>5: #剔除长度小于5的类别\n",
    "            groups_dict[label] = group\n",
    "    return groups_dict"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_min_close(dict_):\n",
    "    '''\n",
    "    制作各类与group的最近距离\n",
    "    '''\n",
    "    dis_dict = {}\n",
    "    for label,group in dict_.items():\n",
    "        dis_dict[label] = group['distance'].min()\n",
    "    return dis_dict\n",
    "def get_num(df):\n",
    "    '''\n",
    "    get_group_and_upgrade_groups_dict的辅助函数\n",
    "    根据df剩余的长度，决定是全部取完，还是先取10个，如果全部取完，则flag返回1\n",
    "    '''\n",
    "    length = len(df)\n",
    "    flag = 0 #这个组是否被取完\n",
    "    if length <= 20:\n",
    "        number = length\n",
    "        flag=1\n",
    "    else:\n",
    "        number = 10\n",
    "    return  flag, number\n",
    "\n",
    "def get_top_10_df(df):\n",
    "    '''\n",
    "    get_group_and_upgrade_groups_dict的辅助函数\n",
    "    如果一个df不取完，只取10个，那么先找离宾馆最近的点A，再找离A点最近的10个点（包括A本身）\n",
    "    '''\n",
    "    df.sort_values('distance',inplace=True)\n",
    "    a_index = df.index[0]\n",
    "    jingdu,weidu = df.loc[a_index,['经度','纬度']].values\n",
    "    df['distance2'] = ((df['经度']-jingdu)**2+(df['纬度']-weidu)**2)**0.5\n",
    "    df.sort_values('distance2',inplace=True)\n",
    "    return df.head(10)\n",
    "\n",
    "def get_group_and_upgrade_groups_dict(random_label):\n",
    "    '''\n",
    "    获取要进行tsp规划的df，同时更新groups_dict\n",
    "    '''\n",
    "    flag, number = get_num(groups_dict[random_label])\n",
    "    if flag==1:\n",
    "        group = groups_dict[random_label] \n",
    "        del groups_dict[random_label] \n",
    "        return group\n",
    "    elif flag==0:\n",
    "        top10_df = get_top_10_df(groups_dict[random_label])\n",
    "        groups_dict[random_label] = groups_dict[random_label][~groups_dict[random_label].index.isin(top10_df.index)]\n",
    "        return top10_df"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [],
   "source": [
    "#TSP辅助函数与类\n",
    "\n",
    "\n",
    "class GA(object):\n",
    "    #遗传算法主类\n",
    "    def __init__(self, DNA_size, cross_rate, mutation_rate, pop_size, ):\n",
    "        self.DNA_size = DNA_size\n",
    "        self.cross_rate = cross_rate\n",
    "        self.mutate_rate = mutation_rate\n",
    "        self.pop_size = pop_size\n",
    "        self.pop = np.vstack([np.random.permutation(DNA_size) for _ in range(pop_size)])\n",
    "\n",
    "    def translateDNA(self, DNA, city_position):     # get cities' coord in order\n",
    "        line_x = np.empty_like(DNA, dtype=np.float64)\n",
    "        line_y = np.empty_like(DNA, dtype=np.float64)\n",
    "        for i, d in enumerate(DNA):\n",
    "            city_coord = city_position[d]\n",
    "            line_x[i, :] = city_coord[:, 0]\n",
    "            line_y[i, :] = city_coord[:, 1]\n",
    "        return line_x, line_y\n",
    "\n",
    "    def get_fitness(self, line_x, line_y):\n",
    "        total_distance = np.empty((line_x.shape[0],), dtype=np.float64)\n",
    "        for i, (xs, ys) in enumerate(zip(line_x, line_y)):\n",
    "            # total_distance[i] = np.sum(np.sqrt(np.square(np.append(np.diff(xs),xs[-1]-xs[0])) + np.square(np.append(np.diff(ys),ys[-1]-ys[0]))))\n",
    "            total_distance[i] = np.sum(np.sqrt(np.square(np.diff(xs)) + np.square(np.diff(ys))))\n",
    "        total_distance*=10\n",
    "        fitness = np.exp(self.DNA_size * 2 / total_distance)\n",
    "        self.total_distance = total_distance\n",
    "        return fitness, total_distance\n",
    "\n",
    "    def select(self, fitness):\n",
    "        try:\n",
    "            idx = np.random.choice(np.arange(self.pop_size), size=self.pop_size, replace=True, p=(fitness) / (fitness).sum())\n",
    "        except:\n",
    "            print((fitness))\n",
    "        return self.pop[idx]\n",
    "\n",
    "    def crossover(self, parent, pop):\n",
    "        if np.random.rand() < self.cross_rate:\n",
    "            i_ = np.random.randint(0, self.pop_size, size=1)                        # select another individual from pop\n",
    "            cross_points = np.random.randint(0, 2, self.DNA_size).astype(np.bool)   # choose crossover points\n",
    "            keep_city = parent[~cross_points]                                       # find the city number\n",
    "            swap_city = pop[i_, np.isin(pop[i_].ravel(), keep_city, invert=True)]\n",
    "            parent[:] = np.concatenate((keep_city, swap_city))\n",
    "        return parent\n",
    "\n",
    "    def mutate(self, child):\n",
    "        for point in range(self.DNA_size):\n",
    "            if np.random.rand() < self.mutate_rate:\n",
    "                swap_point = np.random.randint(0, self.DNA_size)\n",
    "                swapA, swapB = child[point], child[swap_point]\n",
    "                child[point], child[swap_point] = swapB, swapA\n",
    "        return child\n",
    "\n",
    "    def evolve(self, fitness):\n",
    "        pop = self.select(fitness)\n",
    "        pop_copy = pop.copy()\n",
    "        for i,parent in enumerate(pop_copy):  # for every parent\n",
    "            child = self.crossover(parent, pop_copy)\n",
    "            child = self.mutate(child)\n",
    "            pop_copy[i,:] = child\n",
    "        self.pop = pop_copy\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [],
   "source": [
    "#画图模块\n",
    "class My_html():\n",
    "    def __init__(self,location):\n",
    "        self.san_map = folium.Map(\n",
    "                            location=[location[1],location[0]], #杭州为中心，不改了\n",
    "                            zoom_start=16,\n",
    "                            # 高德街道图\n",
    "                            tiles='http://webrd02.is.autonavi.com/appmaptile?lang=zh_cn&size=1&scale=1&style=7&x={x}&y={y}&z={z}',\n",
    "                            attr='default')\n",
    "        self.marker_cluster = plugins.MarkerCluster().add_to(self.san_map)\n",
    "\n",
    "    def plot_point_and_line(self,X_lat,X_lon,address_names,color,**kargs): \n",
    "        '''\n",
    "        天坑，要先传入纬度，后传入经度，神经病\n",
    "        '''\n",
    "\n",
    "        folium.PolyLine(np.array(list(zip(X_lat, X_lon))), color=color).add_to(self.san_map)\n",
    "        for x, y, name in zip(X_lat,X_lon,address_names):\n",
    "            folium.Marker([x, y], color='balck',popup=name,**kargs).add_to(self.marker_cluster)\n",
    "\n",
    "    def save(self,name):\n",
    "        self.san_map.save(name)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ]
}