import pandas as pd
import numpy as np
import random
import matplotlib.pyplot as plt
import os
from tqdm import tqdm

TRAIN_DATA_PATH = os.path.join('.', 'dataset', 'train.csv')
TEST_DATA_PATH = os.path.join('.', 'dataset', 'test.csv')
SUBMISSION_PATH = os.path.join('.', 'dataset', 'submissionrmle.txt')
USELESS_ATTRIBUTE = ['Index', 'max_floor', 'material', 'build_year', 'num_room', 'kitch_sq']
D_TYPE_OBJECT = ['timestamp', 'product_type', 'water_1line', 'big_road1_1line', 'railroad_1line', 'ecology']
MAX_PRICE = round(2e8)
PRODUCT_TYPE = ('Investment', 'OwnerOccupier')
RODE_WATER_LINE = ('no', 'yes')
ECOLOGY = ('good', 'excellent', 'poor', 'satisfactory', 'no data')


def get_origin_pd(is_train_data=True):
    file_path = ".\\public_data\\Train.csv" if is_train_data else '.\\public_data\\Test.csv'
    return pd.read_csv(file_path)


#####################################
# 训练数据价格筛取
# 总体数据中最小值为1e5 最大值为1.11111112e8
# 当价格在1e5~1e7区间覆盖了0.850的数据
# 1e5~1.25e7区间覆盖了0.920的数据
# 1e5~1.5e7区间覆盖了0.957的数据
# 1e5~1.75e7区间覆盖了0.975的数据
# 1e5~2e7区间覆盖了0.0.983的数据
# 选择为1e5~6e7区间作为有效数据 但效果不佳
# 最终不再根据价格筛取训练集
#####################################
def price_distribution():
    pd_f = get_origin_pd()
    prices = pd_f["price_doc"]
    prices = prices.tolist()
    """print(max(prices))
    print(min(prices))
    print(sum(_ < 1.25e7 for _ in prices) / len(prices))
    print(pd_f[pd_f["price_doc"].isnull().T.any()])"""
    plt.figure()
    plt.hist(prices, range=(1e5, 6e7))
    plt.show()


def del_price_out_range(data_frame: pd.DataFrame, max_price: int):
    return data_frame.loc[data_frame['price_doc'] < max_price]


#####################################
# 训练数据属性缺失
# life_sq                                   5588
# floor                                      167
# max_floor                                 9572
# material                                  9572
# build_year                               12918
# num_room                                  9572
# kitch_sq                                  9572
# state                                    13097
# metro_min_walk                              16
# metro_km_walk                               16
# railroad_station_walk_km                    16
# railroad_station_walk_min                   16
# prom_part_5000                             111
# cafe_sum_5000_min_price_avg                215
# cafe_sum_5000_max_price_avg                215
# cafe_avg_price_5000                        215
# 其中max_floor,material,build_year,num_room,kitch_sq,state
# 缺失数据过多且缺失数据的标签与未缺失数据的标签分布基本一致，在训练时可以忽略该属性
# state 可考虑将未知值置为 -1
# 对于floor采取随机值法，范围为0~15 该区间覆盖了约80%的已有数据
# 对于prom_part_5000也采用随机值法，范围为 3~15
# 对于metro_min_walk,metro_km_walk,railroad_station_walk_km,railroad_station_walk_min
# 根据对应的 avto 成比例调整
# 比例如下
# 同时比例应增加适当噪声
# metro_min_walk = metro_min_avto * 6.5469
# metro_km_walk = metro_km_avto * 0.9459
# railroad_station_walk_km = railroad_station_avto_km * 0.9656
# railroad_station_walk_min = railroad_station_avto_min * 8.2755
#
# 对于 cafe_sum_5000_min_price_avg,cafe_sum_5000_max_price_avg,cafe_avg_price_5000
# 直接置0或-1即可，因为对应数据中咖啡厅数量为0
#
# 对于交易时间，拆出年份和月份拼成新数据 减去201100
#
# 由于本人计划采取随机森林回归器进行回归运算，故在此不考虑降维，数据归一化等问题
#####################################

def fill_null_val(data_frame: pd.DataFrame):
    """
    删除忽略属性列
    修正 metro_min_walk,metro_km_walk,
    prom_part_5000,
    railroad_station_walk_km,railroad_station_walk_min
    cafe_sum_5000_min_price_avg,
    cafe_sum_5000_max_price_avg,
    cafe_avg_price_5000中的空值
    state,life_sq 空值置为-1
    :param data_frame: 原数据
    :return: 修改后的数据
    """
    for row in tqdm(data_frame.itertuples(), desc='fill null data'):
        # 修改metro_min_walk
        # print(row)
        # print(data_frame.loc[row.Index, 'metro_min_walk'])
        data_frame.loc[row.Index, 'metro_min_walk'] = row.metro_min_avto * (6.5469 + random.randint(0, 50) / 1e4) if \
            np.isnan(row.metro_min_walk) else row.metro_min_walk

        data_frame.loc[row.Index, 'metro_km_walk'] = row.metro_km_avto * (0.9459 + random.randint(0, 50) / 1e4) if \
            np.isnan(row.metro_km_walk) else row.metro_km_walk

        data_frame.loc[row.Index, 'railroad_station_walk_km'] = row.railroad_station_avto_km * (
                6.5469 + random.randint(0, 50) / 1e3) if \
            np.isnan(row.railroad_station_walk_km) else row.railroad_station_walk_km

        data_frame.loc[row.Index, 'railroad_station_walk_min'] = row.railroad_station_avto_min * (
                6.5469 + random.randint(0, 50) / 1e3) if \
            np.isnan(row.railroad_station_walk_min) else row.railroad_station_walk_min

        data_frame.loc[row.Index, 'floor'] = random.randint(1, 15) if np.isnan(row.floor) else row.floor

        data_frame.loc[row.Index, 'prom_part_5000'] = random.randint(300, 1500) / 100 if np.isnan(
            row.prom_part_5000) else row.prom_part_5000
        # cafe_sum_5000_min_price_avg,
        #     cafe_sum_5000_max_price_avg,
        #     cafe_avg_price_5000中的空值
        data_frame.loc[row.Index, 'cafe_sum_5000_min_price_avg'] = 0 if np.isnan(
            row.cafe_sum_5000_min_price_avg) else row.cafe_sum_5000_min_price_avg

        data_frame.loc[row.Index, 'cafe_sum_5000_max_price_avg'] = 0 if np.isnan(
            row.cafe_sum_5000_max_price_avg) else row.cafe_sum_5000_max_price_avg

        data_frame.loc[row.Index, 'cafe_avg_price_5000'] = 0 if np.isnan(
            row.cafe_avg_price_5000) else row.cafe_avg_price_5000

        data_frame.loc[row.Index, 'state'] = -1 if np.isnan(row.state) else row.state

        data_frame.loc[row.Index, 'life_sq'] = -1 if np.isnan(row.life_sq) else row.life_sq
    return data_frame


def change_type(data_frame: pd.DataFrame):
    global RODE_WATER_LINE, PRODUCT_TYPE, D_TYPE_OBJECT
    for row in tqdm(data_frame.itertuples(), desc='change type'):
        timestamp = [int(_) for _ in row.timestamp.split('-')]
        data_frame.loc[row.Index, 'timestamp'] = (timestamp[0] * 100 + timestamp[1]) - 201100
        data_frame.loc[row.Index, 'product_type'] = PRODUCT_TYPE.index(row.product_type)
        data_frame.loc[row.Index, 'water_1line'] = RODE_WATER_LINE.index(row.water_1line)
        data_frame.loc[row.Index, 'big_road1_1line'] = RODE_WATER_LINE.index(row.big_road1_1line)
        data_frame.loc[row.Index, 'railroad_1line'] = RODE_WATER_LINE.index(row.railroad_1line)
        data_frame.loc[row.Index, 'ecology'] = ECOLOGY.index(row.ecology)
    for name in D_TYPE_OBJECT:
        series = data_frame[name]
        data_frame[name] = pd.to_numeric(series)
    return data_frame


def del_useless_attribute(data_frame: pd.DataFrame):
    global USELESS_ATTRIBUTE
    for name in USELESS_ATTRIBUTE:
        data_frame.drop(name, axis=1, inplace=True)
    # print(data_frame.keys())
    return data_frame


def pretreated(data_frame: pd.DataFrame, is_train_data=True):
    global MAX_PRICE, TRAIN_DATA_PATH

    data_frame.rename(columns={data_frame.keys()[0]: 'Index'}, inplace=True)
    if is_train_data:
        data_frame = del_price_out_range(data_frame, MAX_PRICE)
    data_frame = change_type(data_frame)
    data_frame = fill_null_val(data_frame)
    data_frame = del_useless_attribute(data_frame)
    data_frame.set_index('id', drop=True, append=False, inplace=True)
    data_frame.to_csv(TRAIN_DATA_PATH if is_train_data else TEST_DATA_PATH)
    print(data_frame.isnull().sum())
    print(data_frame.dtypes)


if __name__ == '__main__':
    data_frame1 = get_origin_pd(False)
    pretreated(data_frame1, False)
    data_frame2 = get_origin_pd()
    pretreated(data_frame2)
