# -*- coding: utf-8 -*-
"""
Created on Mon Dec 21 21:29:20 2020

@author: 59567
"""
import pandas as pd
import torch.nn.functional as F
# from hyperopt import fmin, hp, tpe, STATUS_OK, Trials  ######调参
import numpy as np
import torch
import torch.optim as optim
import torch.nn as nn
from torch.nn.utils import weight_norm


class Chomp1d(nn.Module):
    def __init__(self, chomp_size):
        super(Chomp1d, self).__init__()
        self.chomp_size = chomp_size

    def forward(self, x):
        return x[:, :, :-self.chomp_size].contiguous()


class TemporalBlock(nn.Module):
    def __init__(self, n_inputs, n_outputs, kernel_size, stride, dilation, padding, dropout=0.2):
        super(TemporalBlock, self).__init__()
        self.conv1 = weight_norm(nn.Conv1d(n_inputs, n_outputs, kernel_size,
                                           stride=stride, padding=padding, dilation=dilation))
        self.chomp1 = Chomp1d(padding)
        self.relu1 = nn.ReLU()
        self.dropout1 = nn.Dropout(dropout)

        self.conv2 = weight_norm(nn.Conv1d(n_outputs, n_outputs, kernel_size,
                                           stride=stride, padding=padding, dilation=dilation))
        self.chomp2 = Chomp1d(padding)
        self.relu2 = nn.ReLU()
        self.dropout2 = nn.Dropout(dropout)

        self.net = nn.Sequential(self.conv1, self.chomp1, self.relu1, self.dropout1,
                                 self.conv2, self.chomp2, self.relu2, self.dropout2)
        self.downsample = nn.Conv1d(n_inputs, n_outputs, 1) if n_inputs != n_outputs else None
        self.relu = nn.ReLU()
        self.init_weights()

    def init_weights(self):
        self.conv1.weight.data.normal_(0, 0.01)
        self.conv2.weight.data.normal_(0, 0.01)
        if self.downsample is not None:
            self.downsample.weight.data.normal_(0, 0.01)

    def forward(self, x):
        out = self.net(x)
        res = x if self.downsample is None else self.downsample(x)
        return self.relu(out + res)


class TemporalConvNet(nn.Module):
    def __init__(self, num_inputs, num_channels, kernel_size=2, dropout=0.2):
        super(TemporalConvNet, self).__init__()
        layers = []
        num_levels = len(num_channels)
        for i in range(num_levels):
            dilation_size = 2 ** i
            in_channels = num_inputs if i == 0 else num_channels[i-1]
            out_channels = num_channels[i]
            layers += [TemporalBlock(in_channels, out_channels, kernel_size, stride=1, dilation=dilation_size,
                                     padding=(kernel_size-1) * dilation_size, dropout=dropout)]

        self.network = nn.Sequential(*layers)

    def forward(self, x):
        return self.network(x)
    
    
class TCN(nn.Module):
    def __init__(self, input_size, output_size, num_channels, kernel_size, dropout):
        super(TCN, self).__init__()
        self.tcn = TemporalConvNet(input_size, num_channels, kernel_size=kernel_size, dropout=dropout)
        self.linear = nn.Linear(num_channels[-1], output_size)
        self.init_weights()

    def init_weights(self):
        self.linear.weight.data.normal_(0, 0.01)

    def forward(self, x):
        x = x.permute(0, 2, 1)
        # print(x.shape)
        y1 = self.tcn(x)
        # print("******************")
        # print(y1.shape)
        return self.linear(y1[:, :, -1])

# if __name__ == "__main__":

# def train(units, num_classes, x_train, y_train, x_valid, y_valid, x_test, y_test, batch_size, epochs, patience, save_path):
#     """
#     units : lstm层神经元个数
#     x_train : 训练集x
#     y_train : 训练集y
#     x_valid : 验证集x
#     y_valid : 验证集y
#     batch_size : 每次放入的数量
#     epochs : 迭代次数
#     save_path : 模型保存的路径
#
#     return 训练结果
#     """
#     args = {}
#     args['cuda'] = False
#     args['nhid'] = 30 # 隐藏层神经元
#     args['levels'] = 8 # 隐藏层层数
#     args['lr'] = 0.0001
#     args['batch_size'] = batch_size
#     args['optim'] = 'Adam'
#     args['log_interval']= 1
#     args['clip'] = -1
#     args['seq_len'] = 6
#     time_step = 8
#     forcast_len = 6
#     input_channels = x_train.shape[1]
#     n_classes = 1
#
#     channel_sizes = [args['nhid']] * args['levels']
#     kernel_size = 2
#     dropout = 0.0
#
#     model = TCN(input_channels, n_classes, channel_sizes, kernel_size=kernel_size, dropout=dropout)
#     optimizer = getattr(optim, args['optim'])(model.parameters(), lr=args['lr'])
#
#     train_loss = []
#     valid_loss = []
#     for ep in range(1, epochs + 1):
#         print('x_train.shape:',x_train.shape)
#         print('y_train.shape:',y_train.shape)
#         train_loss.append(basic_train(ep, x_train, y_train, optimizer, args, model))
#         tloss,output = evaluate(model, x_valid, y_valid)
#         if ep == epochs:
#             print("llllllllllllllllllll")
#             _, output = evaluate(model, x_test, y_test)
#         valid_loss.append(tloss)
#     loss = {'train_loss':train_loss,'valid_loss':valid_loss}
#     return loss, model(x_train), model(x_valid), model(x_test)
#
#
# def basic_train(epoch, x_train, y_train, optimizer, args, model):
#     lr = args['lr']
#     model.train()
#     batch_idx = 1
#     total_loss = 0
#     batch_size = args['batch_size']
#     for i in range(0, x_train.size(0), batch_size):
#         if i + batch_size > x_train.size(0):
#             x, y = x_train[i:], y_train[i:]
#         else:
#             x, y = x_train[i:(i + batch_size)], y_train[i:(i + batch_size)]
#         optimizer.zero_grad()
#         output = model(x)
#         loss = F.mse_loss(output, y)
#         loss.backward()
#         if args['clip'] > 0:
#             torch.nn.utils.clip_grad_norm_(model.parameters(), args['clip'])
#         optimizer.step()
#         batch_idx += 1
#         total_loss += loss.item()
#
#         if batch_idx % args['log_interval'] == 0:
#             cur_loss = total_loss / args['log_interval']
#             processed = min(i + batch_size, x_train.size(0))
#             print('Train Epoch: {:2d} [{:6d}/{:6d} ({:.0f}%)]\tLearning rate: {:.4f}\tLoss: {:.6f}'.format(
#                 epoch, processed, x_train.size(0), 100. * processed / x_train.size(0), lr, cur_loss))
#             total_loss = 0
#     return cur_loss
#
#
# def evaluate(model, x, y):
#     model.eval()
#     with torch.no_grad():
#         output = model(x)
#         test_loss = F.mse_loss(output, y)
#         print('\nTest set: Average loss: {:.6f}\n'.format(test_loss.item()))
#         return test_loss.item(),output
#
#
# def to_tensor(x,y):
#     x_tensor = torch.tensor(np.array(x), dtype=torch.float32)
#     y_tensor = torch.tensor(np.array(y), dtype=torch.float32)
#     return x_tensor.permute(0,2,1), y_tensor
#
#
# def get_train_val_test(time_step,seq_len):
#     val_pctg = 0.15
#
#     data = pd.read_excel('y集合.xlsx',index_col=0)
#     all_data_x = data.iloc[1:53,2:8]
#     all_data_y = data.iloc[1:53,9]
#     n = all_data_x.shape[0] - time_step - seq_len + 1 # 高频有1,预测没有1
#
#     x_expend = [all_data_x.iloc[i:i+time_step,:].values for i in range(n)]
#     y_expend = [all_data_y.iloc[i+time_step-1:i+time_step-1+seq_len].tolist() for i in range(n)]
#
#     val_num = int(val_pctg * (len(x_expend) - 1))
#     x_test, y_test = [x_expend[-1]], [y_expend[-1]]
#     x_val, y_val = x_expend[-1 - val_num:-1], y_expend[-1 - val_num:-1]
#     x_train, y_train = x_expend[:-1 - val_num], y_expend[:-1 - val_num]
#
#     x_test, y_test = to_tensor(x_test, y_test)
#     x_val, y_val = to_tensor(x_val, y_val)
#     x_train, y_train = to_tensor(x_train, y_train)
#     return x_train, y_train, x_val, y_val, x_test, y_test
#
#
#
#     args = {}
#     args['batch_size'] = 16
#     args['cuda'] = False
#     args['dropout'] =0.0
#     args['epochs'] = 500
#     args['nhid'] = 30 # 隐藏层神经元
#     args['levels'] = 8 # 隐藏层层数
#     args['lr'] = 0.0001
#     args['ksize'] = 2
#     args['optim'] = 'Adam'
#     args['log_interval']= 1
#     args['clip'] = -1
#     args['seq_len'] = 6
#     time_step = 8
#     forcast_len = 6
#
#     x_train, y_train, x_val, y_val, x_test, y_test = get_train_val_test(time_step, forcast_len)
#
#     input_channels = x_train.shape[1]
#     n_classes = 6
#     batch_size = args['batch_size']
#     epochs = args['epochs']
#     channel_sizes = [args['nhid']] * args['levels']
#     kernel_size = args['ksize']
#     dropout = args['dropout']
#
#     model = TCN(input_channels, n_classes, channel_sizes, kernel_size=kernel_size, dropout=dropout)
#     optimizer = getattr(optim, args['optim'])(model.parameters(), lr=args['lr'])
#
#     train_loss = []
#     valid_loss = []
#     for ep in range(1, epochs + 1):
#         print('x_train.shape:',x_train.shape)
#         print('y_train.shape:',y_train.shape)
#         train_loss.append(train(ep, x_train, y_train))
#         tloss,output = evaluate(x_val,y_val)
#         if ep == epochs:
#             print("llllllllllllllllllll")
#             _, output = evaluate(x_test, y_test)
#         valid_loss.append(tloss)
#
#
#
#
