#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
@author: xjx
@time: 2023/9/25 15:59 
@file: working_place_model.py
@project: nanchangproject
@describe: 检测工作地点位置并进行逆地理编码分析得出工作地点名称
"""
import os
import sys
import time
import django
import pandas as pd
from pandas import DataFrame
import numpy as np
import requests
import json
import jieba
import copy
from django_pandas.io import read_frame
from sklearn.cluster import DBSCAN, KMeans

os.environ["OMP_NUM_THREADS"] = '1'
sys.path.append('../../')
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'nanchangproject.settings')
django.setup()  # 很关键的配置，必须先setup在导入models类

from drivinginfo.models import Drivinginfo, BasicInfo, WorkingPlace
from django.core.exceptions import ObjectDoesNotExist


def data_to_dataframe(vin: str):
    """
    @param vin: 根据VIN码筛选数据
    @return: pandas.dataframe
    """
    qs = Drivinginfo.objects.filter(vin=vin)
    qs_dataframe = read_frame(qs=qs)
    print(f'车辆{vin}数据库数据已转化为dataframe')
    # print(qs_dataframe)
    return qs_dataframe


def find_interesting_site(dataframe: DataFrame):
    """
    @param dataframe: pandas.dataframe对象
    @return: 经纬度结果list
    """
    # 数据筛选步骤
    # 1.筛选停车点
    # 2.聚类，去除离群点
    # 3.统计每一个聚类的月内时间分布，若仅出现一两天，则意义不大，删去，若每天都有，则值得关注
    # 4.统计每一个聚类的日内时间分布，其中20：00-06：00的分布点应为居住点，而我们关心的是工作地点，因此应当筛选出06：00-20：00时间段内的停车点
    # 5.根据剩余的有效停车点，对每一个聚类计算其地理中心
    # 6.根据地理中心的POI数据筛选有价值的行业信息

    start_time = time.perf_counter()
    print(f'聚类算法正在执行')
    df1 = dataframe[['data_time', 'longitude', 'latitude', 'engine_status']].copy()  # 提取四个维度的信息
    df1 = df1.dropna(axis=0, how='all')  # 去除含有空值的数据
    df1['data_time'] = pd.PeriodIndex(df1['data_time'], freq="S")  # 对时间进行编码处理
    # 以固定的频率（如天、月、季度、年等）对时间进行切分和对齐，使得在处理时间序列数据时更加高效和方便
    df1['engine_status'].fillna(method='ffill', inplace=True)  # 用前一个非缺失值填补当前值
    df1 = df1[df1['longitude'] > 0]  # 排除异常值
    df1 = df1[df1['latitude'] > 0]  # 排除异常值
    level_map = {"启动": 1, "关闭": 0}  # 对启动状态进行编码
    df1['engine_status'] = df1['engine_status'].map(level_map)
    stop_p = df1[df1['engine_status'] == 0].copy()  # 筛选停车点
    data_p = stop_p[['longitude', 'latitude']].values  # 筛选停车点经纬度,一个文件大概4万多行
    kms_per_radian = 6371.0088
    epsilon = 0.5 / kms_per_radian  # 聚类半径
    dbscan1 = DBSCAN(eps=epsilon, min_samples=200, algorithm='ball_tree', metric='haversine')
    dbscan1.fit_predict(data_p)  # 聚类结果
    cluster_label = dbscan1.labels_  # labels出现-1代表离群点
    cluster_num = len(np.unique(cluster_label))  # 计算共有多少种标签
    # 当数据过于稀少时，聚类结果可能是全部为离散点，这时应当返回一个空值，或者提出警告
    if cluster_num == 1:
        print('该车数据过少，无法生成有效聚类')
        return []

    stop_p['label'] = cluster_label  # 聚类标签写入df中
    a = stop_p['data_time'].copy()
    t = a.map(lambda x: x.hour)
    stop_p['data_time_hour'] = t  # 筛选小时分布数据
    stop_p = stop_p[(stop_p['data_time_hour'] < 20) & (stop_p['data_time_hour'] > 6)]  # 由于是判断工作地点，因此筛选白天的停车点数据
    cluster_list = []
    cluster_list_every_num = []
    for i in range(cluster_num - 1):  # -1是为了去除聚类-1，即离群点
        cluster_list.append(stop_p[stop_p['label'] == i])
    for clu in cluster_list:
        # 对每一个聚类进行天数分布统计
        cluster_list_every_num.append(
            len(clu['data_time'].groupby(
                [clu['data_time'].dt.month, clu['data_time'].dt.day]).agg('count')))

    tl = cluster_list_every_num.copy()
    result_tl = []
    max_tl = max(tl)
    for i in tl:
        # 设置阈值0.3，即大于最大分布天数的0.3倍的聚类才被划分为有效聚类
        if i > 0.3 * max_tl:
            result_tl.append(tl.index(i))
    # print(result_tl)
    max_day = stop_p['data_time'].max()
    min_day = stop_p['data_time'].min()
    max_day_len = (max_day - min_day) / pd.Timedelta(1, 'D')  # 总数据的时间跨度，即采集了多少天
    # 若最大分布天数仍然小于总天数的5%，则聚类无效
    if max_tl < 0.05 * max_day_len:
        print('目前数据的聚类无法产生有效的判断')
        return []
    else:
        print(f'有效的标签为：{result_tl}')
    print(f'最近日期：{max_day},最远日期：{min_day}，日期差值：{round(max_day_len)}')

    interesting_site_list = []
    for num in result_tl:
        # num表示的是有效的聚类的标签
        interest_clu = stop_p[stop_p['label'] == num]  # 取聚类num的数据
        # interest_clu['采集小时'].hist()
        # plt.show() # 绘制时间分布直方图
        # 筛选6：00至20：00的停车数据作为分析对象
        clu_data = interest_clu[['longitude', 'latitude']].values
        print(f'标签{num}对应的停车点数据有{len(clu_data)}个')
        kmeans1 = KMeans(n_clusters=1, n_init='auto')  # 采用单一聚类寻找聚类中心
        kmeans1.fit(clu_data)
        interesting_site_list.append(kmeans1.cluster_centers_[0].tolist())  # 由于只有一个聚类中心，因此取第0个，顺序为先经度后纬度
    for index, item in enumerate(interesting_site_list):
        new_item = list(map(lambda x: round(x, 6), item))
        interesting_site_list[index] = new_item

    print(f'聚类算法执行完毕，耗时{time.perf_counter() - start_time}')

    return interesting_site_list


def poi_result(latitude, longitude, ak="GVbEaqlIMpwjYmXhvaamzieXhYIqBLB3", randius=1000):
    """
    @param latitude: 维度
    @param longitude: 经度
    @param ak: 地图api的许可证
    @param randius: 召回半径
    @return: 请求状态，省份，城市，poi信息
    """
    baiduUrl = "https://api.map.baidu.com/reverse_geocoding/v3/?ak=%s&extensions_poi=1&radius=%s&output=json&coordtype=wgs84ll&location=%s,%s" % (
        ak, randius, latitude, longitude)
    req = requests.get(baiduUrl)
    baiduAddr = json.loads(req.text)
    status = baiduAddr["status"]  # 请求状态码
    edz = baiduAddr["result"]["edz"]["name"]  # 开发区区域名称
    business = baiduAddr["result"]["business"]  # 商圈名称
    province = baiduAddr["result"]["addressComponent"]["province"]  # 省份
    city = baiduAddr["result"]["addressComponent"]["city"]  # 城市
    district = baiduAddr["result"]["addressComponent"]["district"]  # 区县
    town = baiduAddr["result"]["addressComponent"]["town"]  # 乡镇
    poi = baiduAddr["result"]["pois"]  # poi正常返回字典信息，返回200代表ak有问题，202表示被平台禁用等等
    return status, edz, business, province, city, district, town, poi


def find_site_name(site_list: list):
    site_name_list = []
    for index, item in enumerate(site_list):
        # 存储经纬度信息
        status, edz, business, province, city, district, town, poi = poi_result(longitude=item[0], latitude=item[1],
                                                                                randius=1000)

        if status != 0:
            # 状态码不为0则请求失败，可能的原因是该经纬度附近没有能召回的地理信息
            site_name_list.append({'index': index,
                                   'status': 0,
                                   'longitude': item[0],
                                   'latitude': item[1]
                                   })
            continue  # 跳过本次循环
        if not poi:
            # poi信息可能为空
            site_name_list.append({'index': index,
                                   'status': 0,
                                   'longitude': item[0],
                                   'latitude': item[1]
                                   })
            continue  # 跳过本次循环
        # print(status, edz, business, province, city, district, town)
        # print(f'逆地理信息编码为：{poi}')
        # for i in poi:
        #     k = i["tag"] + "," + i["distance"] + "," + i['name']
        #     print(k)
        remove_words = ['中国', '有限', '有限责任', '责任有限', '有限公司', '公司', '责任', '(', ')']
        remove_words2 = [province, city, district, town]
        for word_item in remove_words2:
            cut_item = jieba.lcut(word_item, cut_all=True)
            remove_words.extend(cut_item)
        # print(remove_words) # 排除词，部分分词是无效信息,例如某些地名会频繁出现必须排除
        jieba_list = []  # 储存分词结果
        first_tag_list = []  # 储存一级tag结果
        second_tag_list = []  # 储存二级tag结果

        for line in poi:
            if ';' not in line['tag']:
                if line['tag'] in ['出入口', '行政地标', '交通设施', '电子眼', '门址', '房地产']:
                    continue  # 这些tag的地理信息无效
                first_tag_list.append(line['tag'])
            if ';' in line['tag']:
                sub_tag = line['tag'].split(';')
                if sub_tag[0] in ['出入口', '行政地标', '交通设施', '电子眼', '门址', '房地产']:
                    continue  # 这些tag的地理信息无效
                first_tag_list.append(sub_tag[0])
                second_tag_list.append(sub_tag[1])
            jieba_result = jieba.lcut(line['name'], cut_all=True)  # 精细分词模式
            jieba_result2 = copy.deepcopy(jieba_result)
            for jieba_item in jieba_result:
                if jieba_item in remove_words:
                    jieba_result2.remove(jieba_item)  # 去除无效词
            jieba_list.extend(jieba_result2)
        new_jieba_list = list(set(jieba_list))  # 去除重复值
        if not new_jieba_list:
            # 对poi信息进行筛选并分词处理之后，有可能没有有效的信息，无法判断工作地点
            site_name_list.append({'index': index,
                                   'status': 0,
                                   'longitude': item[0],
                                   'latitude': item[1]
                                   })
            continue  # 跳过本次循环
        print(f'初始分词结果为：{new_jieba_list}')
        jieba_num = {}  # 存储分词统计数量的结果
        for count_item in new_jieba_list:
            jieba_num[count_item] = jieba_list.count(count_item)

        jieba_num2 = copy.deepcopy(jieba_num)
        for x_item in jieba_num.keys():
            if len(x_item) < 2:
                jieba_num2.pop(x_item)  # 去除精细分词带来的单一字符
        print(f'分词结果统计为：{jieba_num2}')

        max_word = max(jieba_num2, key=lambda x: jieba_num2[x])  # 返回出现频率最高的词汇
        max_word_num = jieba_num2[max_word]
        new_tag_list = list(set(first_tag_list))
        tag_num = {}  # 存储一级tag统计数量的结果
        for tag_item in new_tag_list:
            tag_num[tag_item] = first_tag_list.count(tag_item)
        print(f'标签结果统计为：{tag_num}')

        max_tag = max(tag_num, key=lambda x: tag_num[x])  # 返回出现频率最高的tag
        max_tag_num = tag_num[max_tag]
        max_site = ''  # 存储最近的地点
        max_site_distance = 10000
        for line in poi:
            # 统计最大标签下的最近地点
            if ';' not in line['tag']:
                if line['tag'] == max_tag:
                    if int(line['distance']) < max_site_distance:
                        max_site_distance = int(line['distance'])
                        max_site = line['name']
            if ';' in line['tag']:
                sub_tag = line['tag'].split(';')
                if sub_tag[0] == max_tag:
                    if int(line['distance']) < max_site_distance:
                        max_site_distance = int(line['distance'])
                        max_site = line['name']

        site_name_list.append(
            {'index': index,  # index为解析的经纬度信息在传入列表中的位置
             'status': 1,  # status为1则正常，为0则失败
             'longitude': item[0],
             'latitude': item[1],
             'max_word': max_word,  # max_word 出现频率最高的词汇
             'max_word_num': max_word_num,  # max_word_num 出现频率最高的词汇的出现次数
             'max_tag': max_tag,  # max_tag 出现频率最高的tag
             'max_tag_num': max_tag_num,  # max_tag 出现频率最高的tag的出现次数
             'nearest_site': max_site,  # max_site 距离最近的地点
             'nearest_site_distance': max_site_distance}  # max_site_distance 距离最近的地点的距离
        )

    return site_name_list


def working_place_to_database(vin: str):
    df = data_to_dataframe(vin=vin)
    interesting_site = find_interesting_site(dataframe=df)  # 返回聚类结果的经纬度信息list
    print(f'聚类结果为：{interesting_site}')
    if interesting_site:
        site_name_dict = find_site_name(interesting_site)  # 返回一个包含高频词汇/高频标签/最近地点/距离的字典列表
        print(f'工作地点分析为：{site_name_dict}')
    else:
        site_name_dict = {}
        print('无法产生有效聚类')
    if site_name_dict:
        try:
            basic_info = BasicInfo.objects.get(vin=vin)  # 获取外键对象
        except ObjectDoesNotExist:
            basic_info = BasicInfo.objects.create(
                vin=vin,
                car_number=''
            )
        else:
            for site_name_item in site_name_dict:
                if site_name_item['status'] == 0:
                    # 无效位置，无法判断工作地点，跳过本次循环
                    continue
                if site_name_item['max_word_num'] <= 1 or site_name_item['max_tag_num'] <= 2:
                    # 当区域的工作类型不固定，无法判断工作地点，跳过本次循环
                    continue
                else:
                    try:
                        WorkingPlace.objects.update_or_create(
                            basic_info=basic_info,
                            longitude=site_name_item['longitude'],
                            latitude=site_name_item['latitude'],
                            defaults={
                                'max_word': site_name_item['max_word'],
                                'max_word_num': site_name_item['max_word_num'],
                                'max_tag': site_name_item['max_tag'],
                                'max_tag_num': site_name_item['max_tag_num'],
                                'nearest_site': site_name_item['nearest_site'],
                                'nearest_site_distance': site_name_item['nearest_site_distance']
                            }
                        )
                    except Exception as e:
                        print(f'错误：{e} 工作地点无法写入数据库，请检查工作地点识别结果')
                    else:
                        print('工作地点写入数据库成功')


if __name__ == '__main__':
    working_place_to_database(vin='LETYBEG27LH039366')
