import numpy as np
import pandas as pd
import datetime


def read_data(dir_path, save_path, num, min_longitude, min_latitude, longitude_unit, latitude_unit, interval):
    columns_name = ['id', 'in_time', 'out_time', 'in_longitude',
                    'in_latitude', 'out_longitude', 'out_latitude']
    data = pd.DataFrame(columns=columns_name)
    for i in range(1, 31):
        if i >= 10:
            day = str(i)
        else:
            day = '0' + str(i)
        oneday_day = pd.read_csv(dir_path + 'chengdu_201611' + day + '/order_201611' + day,
                                 names=columns_name)
        data = data.append(oneday_day, ignore_index=True)

    data['in_time'] = data['in_time'].apply(lambda x : datetime.datetime.fromtimestamp(x))
    data.drop(['id', 'out_time', 'out_longitude', 'out_latitude'], axis=1, inplace=True)
    #时间相关属性
    data['time_chunk'] = data['in_time'].apply(lambda x: (x.hour*60+x.minute)//interval + 1)
    data['month_day'] = data['in_time'].apply(lambda x: x.day)
    data['hour'] = data['in_time'].apply(lambda x: x.hour)
    data['week_day'] = data['in_time'].apply(lambda x: x.weekday())
    #格子ID
    data['grid_id'] = (data['in_longitude'] - min_longitude) // longitude_unit + \
                      (data['in_latitude'] - min_latitude) // latitude_unit * num
    data_counts = data.groupby(['month_day', 'week_day', 'hour', 'time_chunk', 'grid_id']).size().\
        rename('counts').reset_index()
    data_counts['grid_id'] = data_counts['grid_id'].astype(int)
    data_counts.to_csv(save_path + str(interval)+'_counts_data_part1',index=False)


def clean_data(save_path, interval, num, month_length, time_chunk_size,scala):
    # 有发生过叫车行为的格子
    data = pd.read_csv(save_path + str(interval)+'_counts_data_part1', index_col=0)
    target_grid_id = data['grid_id'].unique()

    # 对于某些时段没有叫车的数据的填充
    month_day = pd.DataFrame([[i + 1, 0] for i in range(month_length)], columns=['month_day', 'key'])
    time_chunk = pd.DataFrame([[i + 1, 0] for i in range(24 * 60 // interval)], columns=['time_chunk', 'key'])
    grid_id = pd.DataFrame([[i + 1, 0] for i in range(num * num) if i + 1 in target_grid_id],
                           columns=['grid_id', 'key'])
    all_data = pd.merge(month_day, time_chunk, on='key', how='outer')
    all_data = pd.merge(all_data, grid_id, on='key', how='outer')
    all_data = all_data.drop('key', axis=1)
    all_data = pd.merge(all_data, data, on=['grid_id', 'month_day', 'time_chunk'], how='left')
    all_data = all_data.fillna(0)
    all_data['week_day'] = all_data['month_day'].apply(lambda x: x % 7 + 1)

    # 标准化
    if scala:
        scalar = max(all_data['counts']) - min(all_data['counts'])
        all_data['counts'] = all_data['counts'] / scalar

    # 添加历史平均数据
    all_data['total_time_chunk'] = (all_data['month_day'] - 1) * 24 * (60 / interval) + all_data['time_chunk']
    temp = all_data[['grid_id', 'total_time_chunk', 'counts']]
    temp_avg = temp.groupby(['grid_id', 'total_time_chunk']).sum().groupby(['grid_id']).cumsum().reset_index()
    temp_avg['avg_counts'] = temp_avg['counts'] / temp_avg['total_time_chunk']
    temp_avg.drop(['counts'], axis=1, inplace=True)
    all_data = pd.merge(all_data, temp_avg, on=['grid_id', 'total_time_chunk'])
    all_data.drop(['total_time_chunk'], axis=1, inplace=True)

    # 添加前n个小时的数据
    final_data = []
    for grid_id in target_grid_id:
        temp = all_data[all_data['grid_id'] == grid_id]
        target = temp[time_chunk_size:].copy()
        for i in range(1, time_chunk_size + 1):
            target['last' + str(i)] = list(temp[time_chunk_size - i:-i]['counts'])
        final_data.append(target)

    final_data = pd.concat(final_data)
    if interval == 60:
        final_data['hour'] = final_data['time_chunk'] - 1
    else:
        final_data['hour'] = final_data['time_chunk'] // (60 / interval)

    final_data.to_csv(save_path + str(interval) + '_counts_data_final',index=False)
