import pandas as pd
from datetime import datetime, timedelta

df = pd.read_csv('./dataset/pwv/pwv_origin_data.csv')

GPS_EPOCH = datetime(1980, 1, 6, 0, 0, 0)


def gps_to_datetime(week_number, week_milliseconds):
    days = week_number * 7
    seconds = week_milliseconds / 1000
    return GPS_EPOCH + timedelta(days=days, seconds=seconds)


# 先过滤 std
filtered = df[
    (df['device_id'] == 'GW_PWV_001') &
    (df['week_milliseconds'] % 300000 == 0) &
    (df['std'] <= 0.005)
    ].copy()

filtered['utc_time'] = filtered.apply(
    lambda row: gps_to_datetime(row['week_number'], row['week_milliseconds']),
    axis=1
)

# 按时间排序
filtered = filtered.sort_values('utc_time')

# 检查每个12小时窗口的连续性
filtered['date_12h'] = filtered['utc_time'].dt.floor('12H')
filtered['time_diff'] = filtered.groupby('date_12h')['utc_time'].diff()

# 标记不连续的12h窗口（相邻数据点差值应为5分钟）
bad_windows = filtered[filtered['time_diff'] > timedelta(minutes=5)]['date_12h'].unique()

# 删除不连续的12h窗口
result = filtered[~filtered['date_12h'].isin(bad_windows)]

# 只保留需要的列
result = result[['utc_time', 'pwv', 'temp', 'hum', 'press', 'wind_speed', 'rain']]
result = result.drop_duplicates(subset='utc_time', keep='first')

result.to_csv('./dataset/pwv/pwv_prepare_data.csv', index=False)

# 只保留需要的列
result = result[['utc_time', 'pwv', 'rain']]
result = result.drop_duplicates(subset='utc_time', keep='first')

result.to_csv('./dataset/pwv/pwv.csv', index=False)

print(f"找到 {len(result)} 条数据")
