#!/usr/bin/env python
# coding: utf-8

# In[ ]:
import os
import csv

import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt

# In[158]:

def preplot(y_test_pre,y_test_rel,figpath):
    
    #预测图
    plt.plot(y_test_pre,color='red', label='Y_prediction')
    plt.plot(y_test_rel,color='blue', label='y_test')
    plt.xlabel('No. of Trading Hours')
    plt.ylabel('PM2.5 concentraint Value')
    plt.legend(loc='upper left')
    fig = plt.gcf()
    fig.set_size_inches(15, 5)
    fig.savefig(figpath, dpi=300)
    plt.show()
    
# In[ ]:
def generpath(path):
    if not os.path.exists(path):
        os.makedirs(path)
# In[ ]:
def generfile(path,filename,m):
    if not os.path.exists(path+filename):
        order = [x for x in range(1,m+1)]
        dataframe = pd.DataFrame({"order":order})
        dataframe.to_csv(path+filename,sep=',')

# In[ ]:
def rdata(datapath):
    # read data
    data1 = pd.read_csv(datapath,index_col=0)
    data1.fillna(method='pad', inplace=True) # 填充前一条数据的值，但是前一条也不一定有值
    
    return data1
# In[ ]:
def splitdata(data,sequence_length,horizon):
    all_data = []
    for dta in range(len(data) - sequence_length-horizon+1):
        all_data.append(data[dta: dta + sequence_length])
    
    all_data = np.array(all_data)
    ydata = data[horizon+sequence_length-1:]
    ydata = list(ydata)
# In[154]:
    #加入y变量
    all_data = pd.DataFrame(all_data)
    all_data["y"] = ydata
    all_data = np.array(all_data)
# In[155]:
    #split data
    row = round(0.2*len(data))
    #creating training data
    x_train_initial = all_data[:-int(row), :-1]
    x_test_initial = all_data[-int(row):, :-1]
    y_train_initial = all_data[:-int(row), -1]
    y_test_initial = all_data[-int(row):, -1]
    
    return x_train_initial,x_test_initial,y_train_initial,y_test_initial
# In[156]:
#标准化处理
def standata(x_train_initial,x_test_initial,y_train_initial,y_test_initial):
    x_scaler = StandardScaler()
    y_scaler = StandardScaler()
    x_scaled = x_scaler.fit_transform(x_train_initial)
    x_train = x_scaler.transform(x_train_initial)
    x_test = x_scaler.transform(x_test_initial)
    
    y_scaler = y_scaler.fit(y_train_initial.reshape(-1,1))
    y_train = y_scaler.transform(y_train_initial.reshape(-1,1))
    y_test = y_scaler.transform(y_test_initial.reshape(-1,1))
# In[157]:
    #生成正式数据格式
    amount_of_features = 1
    x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], amount_of_features))
    x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], amount_of_features)) 
    
    return x_train,x_test,y_train,y_test

def iverse_data(y_train_initial,pre,y_test):
    
    y_scaler = StandardScaler()
    y_scaler = y_scaler.fit(y_train_initial.reshape(-1,1))
    y_test_pre = y_scaler.inverse_transform(pre)
    y_test_rel = y_scaler.inverse_transform(y_test)
    return y_test_rel,y_test_pre


# In[ ]:
def datasave(savepath,saveindex,y_test_pre):
    sdata =pd.read_csv(savepath)
    sdata= pd.DataFrame(sdata)
    y_test_pre = np.array(y_test_pre)
    sdata[saveindex] = y_test_pre
    sdata.to_csv(savepath,index =False)

# In[ ]:
def read_data(datapath,index,sequence_length,horizon):
    data = rdata(datapath)
    data = data[index]
    x_train_initial,x_test_initial,y_train_initial,y_test_initial = splitdata(data,sequence_length,horizon)
    x_train,x_test,y_train,y_test = standata(x_train_initial,x_test_initial,y_train_initial,y_test_initial)
    return x_train_initial,x_test_initial,y_train_initial,y_test_initial,x_train,x_test,y_train,y_test