import numpy as np
import os
import datetime
import time
import pandas as pd
import matplotlib.pyplot as plt
import h5py
import gc

#在所有日期中匹配测试日期
def find_idx_with_dates(all_times,test_dates):
    idx=[]
    for test_day in test_dates:
        test_day_end = test_day + datetime.timedelta(days = 1)
        idx+=np.nonzero((all_times>test_day)*(all_times<test_day_end))[0].tolist()
    return idx

def find_time_within_nparray(time_array, time_point):
    # 使用np.searchsorted找到time_point应该插入的位置以保持数组排序
    probable_idx = np.searchsorted(time_array, time_point)
    
    # 如果probable_idx等于数组长度，说明time_point大于数组中的所有元素
    if probable_idx == len(time_array):
        return None   
    
    # 检查找到的位置是否正好是目标时间点
    if time_array[probable_idx] == time_point: 
        return probable_idx
    else:
        # 如果不是，则返回None表示未找到确切匹配
        return None

def find_time_within_pdseries(time_array, time_point):
    # 使用np.searchsorted找到time_point应该插入的位置以保持数组排序
    probable_idx = np.searchsorted(time_array, time_point)
    
    # 如果probable_idx等于数组长度，说明time_point大于数组中的所有元素
    if probable_idx == len(time_array):
        return None   
    
    # 检查找到的位置是否正好是目标时间点
    if time_array.iloc[probable_idx] == time_point: 
        return probable_idx
    else:
        # 如果不是，则返回None表示未找到确切匹配
        return None
    
# 保存处理后的训练集和测试集    
def store_trainval_test(all_times,image_log,pv_log,pv_pred,pred_folder,classification_folder,test_dates):
    
    idx_test = find_idx_with_dates(all_times,test_dates)
    image_log_test = image_log[idx_test]
    pv_log_test = pv_log[idx_test]
    pv_pred_test = pv_pred[idx_test]
    times_test = all_times[idx_test]

    mask_trainval = np.ones_like(pv_pred,dtype = bool)
    mask_trainval[idx_test] = 0
    image_log_trainval = image_log[mask_trainval]
    pv_log_trainval = pv_log[mask_trainval]
    pv_pred_trainval = pv_pred[mask_trainval]
    times_trainval = all_times[mask_trainval]
    
    print("times_trainval.shape",times_trainval.shape)
    print("image_log_trainval.shape",image_log_trainval.shape)
    print("pv_log_trainval.shape",pv_log_trainval.shape)
    print("pv_pred_trainval.shape",pv_pred_trainval.shape)
    
    print("times_test.shape",times_test.shape)
    print("image_log_test.shape",image_log_test.shape)
    print("pv_log_test.shape",pv_log_test.shape)
    print("pv_pred_test.shape",pv_pred_test.shape)
    
    np.save(os.path.join(pred_folder,'image_log_trainval.npy'), image_log_trainval)
    np.save(os.path.join(pred_folder,'pv_log_trainval.npy'), pv_log_trainval)
    np.save(os.path.join(pred_folder,'pv_pred_trainval.npy'),pv_pred_trainval)
    np.save(f'./data/data_forecast/{classification_folder}/times_trainval.npy',times_trainval)

    np.save(os.path.join(pred_folder,'image_log_test.npy'), image_log_test)
    np.save(os.path.join(pred_folder,'pv_log_test.npy'), pv_log_test)
    np.save(os.path.join(pred_folder,'pv_pred_test.npy'),pv_pred_test)
    np.save(f'./data/data_forecast/{classification_folder}/times_test.npy',times_test)

#处理时间序列数据，主要是剔除掉无效日期
def data_process(all_times, all_images, pv_data):
    n_images = all_times.shape[0]
    sampling_interval_all = [2]
    stack_height = 15 
    output_img_shape = [64, 64, 3]
    forecast_horizon = 15
    for sampling_interval in sampling_interval_all:
        image_log = np.zeros([n_images,stack_height+1]+output_img_shape,dtype = 'uint8')
        pv_log = np.zeros((n_images,stack_height+1))
        pv_pred = np.zeros(n_images)
        validity_mask = np.ones(n_images,dtype = bool)
        tic = time.process_time()
        last_valid_index = 0

        sampling_interval_td = datetime.timedelta(minutes = sampling_interval) - datetime.timedelta(seconds=1)
        for i in range(0,n_images):

            if all_times[i] - all_times[last_valid_index] > sampling_interval_td:

                pred_time = all_times[i]+datetime.timedelta(minutes=forecast_horizon)

                pv_pred_idx = find_time_within_nparray(pv_data.index,pred_time)
                if pv_pred_idx is None:
                    validity_mask[i] = False
                else: 
                    pv_pred[i] = pv_data.iloc[pv_pred_idx] 
                for j in range(stack_height+1):
                    log_time = all_times[i] - datetime.timedelta(minutes = j)
                    log_time_idx = find_time_within_nparray(all_times,log_time)
                    if log_time_idx is not None:
                        image_log[i,j] = all_images[log_time_idx]
                    else:
                        validity_mask[i] = False
                        break
                    pv_log_idx = find_time_within_nparray(pv_data.index,log_time)
                    if pv_log_idx is None:
                        validity_mask[i] = False
                        break
                    else: 
                        pv_log[i,j] = pv_data.iloc[pv_log_idx]    

            else:
                validity_mask[i] = False

            if validity_mask[i]:
                last_valid_index = i

            # Prompt progress of current work
            if i%5000 == 0:
                print('processed {0}/{1} images'.format(i,len(all_times)))
                if i%10000 == 0 and i>0:
                    print('For sampling frequency: ',sampling_interval,' minutes')
                    print('Expected finishing time:', datetime.datetime.now()+
                               datetime.timedelta(seconds = (time.process_time() - tic)*(len(all_times)/i-1)))

        all_times = all_times[validity_mask]
        image_log = image_log[validity_mask]
        pv_log = pv_log[validity_mask]
        pv_pred = pv_pred[validity_mask]
        return all_times, image_log, pv_log, pv_pred  

#创建时序数据集用于深度学习模型的预测
def create_h5py(base_path, weather):
    np_files = {
        'trainval': [base_path + 'pv_log_trainval.npy', base_path + 'pv_pred_trainval.npy', base_path + 'image_log_trainval.npy'],
        'test': [base_path + 'image_log_test.npy', base_path + 'pv_log_test.npy', base_path + 'pv_pred_test.npy']
    }
    with h5py.File(f'./data/data_forecast/{weather}/forecast_dataset.hdf5', 'w') as f:
        for group_name, files in np_files.items():
            group = f.create_group(group_name)
            for file in files:
                data = np.load(file)
                dataset_name = os.path.splitext(os.path.basename(file))[0]
                group.create_dataset(dataset_name, data=data)

#生成数据(在main.py中调用)
def prepare_data():               
    project_path = os.getcwd()
    data_folder = os.path.join(project_path,'processing','data_expanded')
    pred_folder = os.path.join(data_folder,'data_forecast')
    pv_data_path = os.path.join(project_path,'processing','pv_data','pv_output_valid.pkl')

    image_name_format = '%Y%m%d%H%M%S'

    start_date = datetime.datetime(2017,1,1)
    end_date = datetime.datetime(2018,1,1)

    sunny_day = [(2017,9,15),(2017,10,6),(2017,10,22)]
    cloudy_day = [(2017,6,8),(2017,9,20),(2017,10,11)]

    sunny_datetime = [datetime.datetime(day[0],day[1],day[2]) for day in sunny_day]
    cloudy_datetime = [datetime.datetime(day[0],day[1],day[2]) for day in cloudy_day]
    test_dates = sunny_datetime + cloudy_datetime

    all_times = np.load(os.path.join(data_folder,'all_times_highfreq.npy'), allow_pickle=True)
    all_images = np.load(os.path.join(data_folder,'all_images_highfreq.npy'), allow_pickle=True)
    pv_data = np.load(pv_data_path, allow_pickle=True)

    relevant_mask = (all_times>=start_date)&(all_times<end_date)
    all_times = all_times[relevant_mask]
    all_images = all_images[relevant_mask]

    pv_data = pv_data[start_date:end_date]


    cluster_df = pd.read_csv('./data/solar_data/classified_pv_output.csv')
    cluster_df['Date'] = pd.to_datetime(cluster_df['Date']).dt.date  # 转换为日期类型

    date_cluster_map = cluster_df.set_index('Date')['Cluster'].to_dict()

    all_dates = np.array([t.date() for t in all_times])

    sunny_mask = np.array([date_cluster_map.get(d, -1) == 0 for d in all_dates])  # -1处理缺失日期
    cloudy_mask = np.array([date_cluster_map.get(d, -1) == 1 for d in all_dates])

    sunny_times = all_times[sunny_mask]
    cloudy_times = all_times[cloudy_mask]
    sunny_images = all_images[sunny_mask]
    cloudy_images = all_images[cloudy_mask]

    pv_dates = pv_data.index.date

    pv_sunny_mask = np.array([date_cluster_map.get(d, -1) == 0 for d in pv_dates])
    pv_cloudy_mask = np.array([date_cluster_map.get(d, -1) == 1 for d in pv_dates])

    sunny_pv_data = pv_data[pv_sunny_mask]
    cloudy_pv_data = pv_data[pv_cloudy_mask]

    sunny_times, sunny_images, sunny_pv_data, sunny_pv_pred = data_process(sunny_times, sunny_images, sunny_pv_data)
    pred_folder_child_sunny = os.path.join(pred_folder,'frequency_'+str(2),'sunny')
    store_trainval_test(sunny_times, sunny_images, sunny_pv_data, sunny_pv_pred, pred_folder_child_sunny,'sunny',test_dates)
    create_h5py('./processing/data_expanded/data_forecast/frequency_2/sunny/', "sunny")
    print(f"晴天数据量: {len(sunny_times)} 条时间点，{len(sunny_pv_data)} 条pv记录")
    del sunny_times, sunny_images, sunny_pv_data, sunny_pv_pred
    gc.collect()

    cloudy_times, cloudy_images, cloudy_pv_data, cloudy_pv_pred = data_process(cloudy_times, cloudy_images, cloudy_pv_data)
    pred_folder_child_cloudy = os.path.join(pred_folder,'frequency_'+str(2),'cloudy')
    store_trainval_test(cloudy_times, cloudy_images, cloudy_pv_data, cloudy_pv_pred, pred_folder_child_cloudy,'cloudy',test_dates)
    create_h5py('./processing/data_expanded/data_forecast/frequency_2/cloudy/', "cloudy")
    print(f"阴天数据量: {len(cloudy_times)} 条时间点，{len(cloudy_pv_data)} 条pv记录")
    del cloudy_times, cloudy_images, cloudy_pv_data, cloudy_pv_pred
    gc.collect()

    all_times, all_images, pv_data, pv_pred = data_process(all_times, all_images, pv_data)
    pred_folder_child_all = os.path.join(pred_folder,'frequency_'+str(2),'all')
    store_trainval_test(all_times, all_images, pv_data, pv_pred, pred_folder_child_all,'all',test_dates)  
    create_h5py('./processing/data_expanded/data_forecast/frequency_2/all/', "all")

    print("光伏预测数据集已处理完毕")