# coding=utf8
import time
import re
import requests
import json
import pandas as pd
import numpy as np
import math
from shapely.geometry import Polygon, Point
import os
from pyltp import Segmentor
from copy import deepcopy

LTP_DATA_DIR = "/home/mm/Downloads/ltp_data_v3.4.0/"
cws_model_path = os.path.join(LTP_DATA_DIR, 'cws.model')
segmentor = Segmentor()  # 初始化实例
segmentor.load(cws_model_path)  # 加载模型

# appkey = "fc254b20e2ebec432cb90276a0af37ea" # 个人开发者key
appkey = "5cad9ebf61ffa46e2eccecf37376c14b"  # 公司的key


def seg_word_set(strin):
    words = segmentor.segment(strin)
    set_words = set(list(words))
    return set_words


def jacaard_similarity(y_true, y_pred):
    return len(y_pred & y_true) / len(y_pred and y_true)


x_pi = 3.14159265358979324 * 3000.0 / 180.0
pi = 3.1415926535897932384626  # π
a = 6378245.0  # 长半轴
ee = 0.00669342162296594323  # 偏心率平方


def bd09_to_gcj02(coordinates):
    """
    百度坐标系(BD-09)转火星坐标系(GCJ-02)
    百度——>谷歌、高德
    :param bd_lat:百度坐标纬度
    :param bd_lon:百度坐标经度
    :return:转换后的坐标列表形式
    """
    bd_lon, bd_lat = list(coordinates)
    x = bd_lon - 0.0065
    y = bd_lat - 0.006
    z = math.sqrt(x * x + y * y) - 0.00002 * math.sin(y * x_pi)
    theta = math.atan2(y, x) - 0.000003 * math.cos(x * x_pi)
    gg_lng = z * math.cos(theta)
    gg_lat = z * math.sin(theta)
    return [gg_lng, gg_lat]


tujia = pd.read_pickle('/home/mm/Documents/bnb_data_wrangler/tujia_details.pkl')
airbnb = pd.read_pickle('/home/mm/Documents/bnb_data_wrangler/airbnb_details.pkl')
muniao = pd.read_pickle('/home/mm/Documents/bnb_data_wrangler/muniao_details.pkl')
xiaozhu = pd.read_pickle('/home/mm/Documents/bnb_data_wrangler/pig2_details.pkl')
mayi = pd.read_pickle('/home/mm/Documents/bnb_data_wrangler/mayi_details.pkl')


def polygon_init(filepath):
    string_list = parse_lat_lon(filepath)
    float_list = [(float(i.split(",")[0]), float(i.split(",")[1])) for i in string_list]
    return Polygon(float_list)


def parse_lat_lon(filepath):
    with open(filepath, 'r') as rf:
        str_json = rf.read()
        return re.findall(r'11\d.\d+,2\d.\d+', str(str_json))


listdir_index_json = {}
for i in os.listdir("/home/mm/PycharmProjects/travel_beau_guangzhou/json_gps_lon_lat_files"):
    if i.endswith("_border_polyline.json"):
        listdir_index_json[i.split("_")[0]] = polygon_init("/home/mm/PycharmProjects/travel_beau_guangzhou/json_gps_lon_lat_files/" + i)


# todo
def find_polygon(lon, lat):
    point = Point((float(lon), float(lat)))
    for index, polygon in listdir_index_json.items():
        if polygon.contains(point):
            return index
    return "其他"


def get_coord_gcj(k):
    global xiaozhugps
    aa, bb = np.ndenumerate(xiaozhugps[['longitude', 'latitude']].loc[xiaozhugps['address'] == k].values)
    lon = aa[1]
    lat = bb[1]
    return [k, lon, lat]


def get_towncode(longitude, latitude):
    location = str(longitude) + "," + str(latitude)
    url = "https://restapi.amap.com/v3/geocode/regeo?output=json&extensions=all&key={}&location={}".format(appkey, location)
    r = requests.get(url)
    json_res = json.loads(r.text)
    towncode = json_res["regeocode"]["addressComponent"]["towncode"]
    try:
        assert type(towncode) is str
        return towncode
    except Exception as e:
        return "api error"


def jacaard_similarity(y_true, y_pred):
    return len(y_pred & y_true) / len(y_pred and y_true)


def drop_dup_house_info(df_group):
    len_df_group = len(df_group.index)
    similar_pairs = []
    df_group_values = df_group[['hyper_link', 'house_info_set']].values
    scores_dict = {x: float(0) for x in df_group_values[1]}
    for i in range(0, len_df_group - 1):
        for j in range(i + 1, len_df_group):
            similarity = jacaard_similarity(df_group_values[i]['house_info_set'], df_group_values[j]['house_info_set'])
            scores_dict[df_group_values[i]['hyper_link']] += similarity
            scores_dict[df_group_values[j]['hyper_link']] += similarity
            if similarity > 0.8:
                similar_pairs.append([df_group_values[i]['hyper_link'], df_group_values[j]['hyper_link']])
    score_sorted = sorted([{hyperlink: scores_dict[hyperlink]} for hyperlink in scores_dict.keys()], key=lambda x: scores_dict[x])
    high_socres = []
    for pairs in similar_pairs:
        high_socres += pairs
    set_high_scores = set(high_socres)
    set_high_scores_copy = deepcopy(set_high_scores)
    for pairs in similar_pairs:
        link1, link2 = pairs
        if len(set_high_scores) == 1:
            break
        elif score_sorted.index(link1) > score_sorted.index(link2) and link1 in set_high_scores:
            set_high_scores.remove(link1)
        elif score_sorted.index(link1) < score_sorted.index(link2) and link2 in set_high_scores:
            set_high_scores.remove(link2)
    drop_links = list(set_high_scores_copy - set_high_scores)
    result = df_group.drop(df_group.isin({'hyper_link': drop_links}).index)
    return result


xiaozhugps = pd.read_csv("/home/mm/Documents/bnb_data_wrangler/xiaozhu_gps.csv")
# 使用merge方法将小猪地址对应的GPS信息join过来
xiaozhu = xiaozhu.merge(xiaozhugps, left_on='address', right_on='address', how='outer')

# 数据源里面含有噪音： 增城市 萝岗区 这些都是僵尸账户好吧

# for inn in [tujia, muniao, xiaozhu]:
#     inn.drop(inn[inn['address'].str.match(r".+萝岗.+|.+增城市.+|.+从化市.+|[^路]+路$")].index, inplace=True)

# tujia[['longitude_gcj', 'latitude_gcj']] = tujia[['longitude', 'latitude']].apply(lambda k: bd09_to_gcj02(k), axis=1, result_type='broadcast')
# muniao[['longitude_gcj', 'latitude_gcj']] = muniao[['longitude', 'latitude']].apply(lambda k: bd09_to_gcj02(k), axis=1, result_type='broadcast')
# xiaozhu[['longitude_gcj', 'latitude_gcj']] = xiaozhu[['longitude', 'latitude']].apply(lambda k: bd09_to_gcj02(k), axis=1, result_type='broadcast')
# mayi[['longitude_gcj', 'latitude_gcj']] = mayi[['longitude', 'latitude']].apply(lambda k: bd09_to_gcj02(k), axis=1, result_type='broadcast')
# airbnb[['longitude_gcj', 'latitude_gcj']] = airbnb[['latitude', 'longitude']]

tujia['hyper_link'] = tujia['id'].apply(lambda k: "https://www.tujia.com/detail/{}.htm?ssr=off".format(k))
airbnb['hyper_link'] = airbnb['id'].apply(lambda k: "https://zh.airbnb.com/rooms/{}".format(k))
muniao['hyper_link'] = muniao['id'].apply(lambda k: "https://www.muniao.com/room/{}.html".format(k))
xiaozhu['hyper_link'] = xiaozhu['id'].apply(lambda k: "https://gz.xiaozhu.com/fangzi/{}.html".format(k))
mayi['hyper_link'] = mayi['id'].apply(lambda k: "http://www.mayi.com/room/{}".format(k))

tujia['website'] = "tujia"
airbnb['website'] = "airbnb"
muniao['website'] = "muniao"
xiaozhu['website'] = "xiaozhu"
mayi['website'] = "mayi"

# for i in [tujia, muniao, xiaozhu, mayi]:
#     i[['longitude_gcj', 'latitude_gcj']] = i[['longitude', 'latitude']].apply(lambda k: bd09_to_gcj02(k), axis=1, result_type='broadcast')
homestay = pd.concat([airbnb, muniao, tujia, xiaozhu, mayi])
# 对跨站信息进行shuffle
# homestay = homestay.sample(frac=1).reset_index(drop=True)

# homestay["house_info"] = homestay.apply(lambda row: re.sub(r"\W", "", re.sub(r"\\n|[a-zA-Z]", "", row["house_info"])) if type(row['house_info']) is not None else "", axis=1)
# homestay["house_info"] = homestay.apply(lambda row: re.sub(r"\W", "", re.sub(r"\\n|[a-zA-Z]", "", row["house_info"])), axis=1)

# homestay["house_facility_set"] = homestay.apply(lambda row: set(re.split(r"\W", row["house_facility"])), axis=1)

# 使用正则 将过于长的GPS信息限制在米级别，从而有利於去重
homestay['latitude_gcj_ten'] = homestay['latitude_gcj'].apply(lambda k: re.sub(r"(?<=\.\d{4})\d+", "", str(k)))
homestay['longitude_gcj_ten'] = homestay['longitude_gcj'].apply(lambda k: re.sub(r"(?<=\.\d{4})\d+", "", str(k)))

homestay['latitude_gcj_hundred'] = homestay['latitude_gcj'].apply(lambda k: re.sub(r"(?<=\.\d{3})\d+", "", str(k)))
homestay['longitude_gcj_hundred'] = homestay['longitude_gcj'].apply(lambda k: re.sub(r"(?<=\.\d{3})\d+", "", str(k)))

homestay['latitude_gcj_thousand'] = homestay['latitude_gcj'].apply(lambda k: re.sub(r"(?<=\.\d{2})\d+", "", str(k)))
homestay['longitude_gcj_thousand'] = homestay['longitude_gcj'].apply(lambda k: re.sub(r"(?<=\.\d{2})\d+", "", str(k)))

# 跨网站 对GPS定位信息和房屋简介维度去重 34000条减少到23000条（基于一个假设，房屋详情是房东使用的自然语言，不太能重复）
homestay.drop_duplicates(['latitude_gcj_ten', 'longitude_gcj_ten', 'house_info'], inplace=True)
homestay.drop_duplicates(['latitude_gcj_hundred', 'longitude_gcj_hundred', 'house_info'], inplace=True)
homestay.drop_duplicates(['latitude_gcj_thousand', 'longitude_gcj_thousand', 'house_info'], inplace=True)

# 跨网站 对房东名字和房屋简介进行去重 又减少了1800条
homestay.drop_duplicates(['landlord_name', 'house_info'], inplace=True)

# 跨网站 对房东名字和设施进行去重 减少1200条  剩余20000条
homestay.drop_duplicates(['landlord_name', 'house_facility'], inplace=True)

# 跨网站 对一千米内相同的房东名字进行去重， 基于一个假设，民宿主不喜欢跟旁边的民宿主同名

homestay['district_gcj'] = homestay.apply(lambda row: find_polygon(row['longitude_gcj'], row['latitude_gcj']), axis=1)

homestay.drop(homestay[homestay['district_gcj'] == '其他'].index, inplace=True)

# 跨网站  房东名字和房屋名字进行去重
homestay.drop_duplicates(['landlord_name', 'house_name'], inplace=True)
# 跨网站  房东名字和房屋设施进行去重
homestay.drop_duplicates(['landlord_name', 'house_facility'], inplace=True)
# 根据房间设施集合来去重

homestay.drop_duplicates(['landlord_name', 'house_facility'], inplace=True)

# 对房间数量超过14间的滤出来 单独做一张表
homestay.drop(homestay[homestay['room_count'] > 14].index, inplace=True)

homestay.drop_duplicates('hyper_link', inplace=True)

# 对房屋介绍进行分词 用jaccard相似度计算，对一个房主的多个招挂房屋进行去重， 如果某房主招挂五个房源，其中三个相似度很高，直接合并成一个
# homestay['house_info_set'] = homestay.apply(lambda row: seg_word_set(row['house_info']), axis=1)

# homestay_towncode = pd.read_csv("/home/mm/Documents/bnb_data_wrangler/towncode_all.csv", dtype=str)

# homestay = homestay.merge(homestay_towncode, left_on='hyper_link', right_on='hyper_link', how='outer')


# homestay.drop(homestay[homestay['town_code'].isnull()].index, inplace=True)

# homestay_2add = homestay[homestay['town_code'].isnull()]
#
#
# homestay_2add['town_code'] = homestay_2add.apply(lambda row: get_towncode(row['longitude_gcj'], row['latitude_gcj']), axis=1)
# homestay_2add = pd.read_csv("/home/mm/Documents/bnb_data_wrangler/homestay_2add.csv", dtype=str)
# homestay_2add.astype(str)
# homestay = homestay.merge(homestay_2add[['hyper_link', 'town_code']], left_on='hyper_link', right_on='hyper_link', how='outer')
# homestay_2add_dict = {i[0]: str(i)[1][:9] for i in homestay_2add[['hyper_link', 'town_code']].values}

# towncode_all = {i[0]: i[1][:9] for i in homestay_towncode[['hyper_link', 'town_code']].values}
#
# for key, va in homestay_2add_dict.items():
#     towncode_all[key] = va
#
# homestay['town_code'] = homestay['hyper_link'].apply(lambda row: towncode_all[row] if row in towncode_all.keys() else "")



# homestay.drop(['address_filter', 'district', 'latitude_gcj', 'longitude_gcj', 'website', 'zoning_code', 'house_info', 'house_facility_set', 'latitude_gcj_ten', 'longitude_gcj_ten', 'latitude_gcj_hundred', 'longitude_gcj_hundred', 'latitude_gcj_thousand', 'longitude_gcj_thousand', 'house_info_set'], inplace=True)
#
# homestay.to_csv("/home/mm/Documents/bnb_data_wrangler/homestay_refined_notowncode.csv", dtype={'town_code': str})
#
# homestay.to_pickle("/home/mm/Documents/bnb_data_wrangler/homestay_refined_notowncode.pickle")

# homestay_refined = homestay.apply(lambda row: drop_dup_house_info(row), axis=1)
#
# homestay_refined.to_pickle("/home/mm/Documents/bnb_data_wrangler/homestay_refined_notowncode.pickle")

#
# homestay['town_code'] = homestay.apply(lambda row: get_towncode(row['longitude_gcj'], row['latitude_gcj']), axis=1)
#
# homestay.to_csv("/home/mm/Documents/bnb_data_wrangler/homestay.csv",
#                 dtype={'town_code': str, 'address': str, 'address_filter': str, 'bathroom_count': str, 'bed_count': str, 'city': str, 'discount_condition': str, 'discount_price': str, 'district': str, 'end_time': str, 'highest_score': str, 'house_facility': str, 'house_info': str, 'house_name': str, 'house_type': str, 'hyper_link': str, 'id': str, 'landlord_name': str, 'latitude': str,
#                        'latitude_gcj': str, 'live_count': str, 'longitude': str, 'longitude_gcj': str, 'order_or_not': str, 'original_price': str, 'other_price': str, 'province': str, 'registered_time': str, 'release_time': str, 'room_count': str, 'score': str, 'start_time': str, 'website': str, 'zoning_code': str, 'house_info': str, 'house_facility_set': str, 'latitude_gcj_ten': str,
#                        'longitude_gcj_ten': str, 'latitude_gcj_hundred': str, 'longitude_gcj_hundred': str, 'latitude_gcj_thousand': str, 'longitude_gcj_thousand': str, 'district_gcj': str, 'house_info_set': str})
# homestay.to_pickle("/home/mm/Documents/bnb_data_wrangler/homestay.pickle")
