import torch
import torch.nn as nn
from math import exp

from fds import FDS

torch.manual_seed(7)

def activation(y):
    for i in range(y.shape[0]):
        a = y[i][0]
        if y[i][0]<0.9 and y[i][0]>0.7:
            y[i][0]=exp(y[i][0]-0.9)-0.1
        elif y[i][0]<0.7:
            y[i][0]=(exp(y[i][0])-exp(-y[i][0]))/(exp(y[i][0])+exp(-y[i][0]))
        else:
            y[i][0]=y[i][0]
    return y


class Act_fun(nn.Module):
    def __init__(self):
        super(Act_fun, self).__init__()

    def forward(self, y):
        for i in range(y.shape[0]):
            a = y[i][0]
            if y[i][0] < 0.85 and y[i][0] > 0.7:
                y[i][0] = exp(y[i][0] - 0.9) - 0.1
            else:
                pass
        return y

class NerualNet_yieldRate(nn.Module):
    def __init__(self, fds, bucket_num, bucket_start, start_update, start_smooth,
                 kernel, ks, sigma, dropout=None):
        super(NerualNet_yieldRate, self).__init__()

        self.layer1=nn.Linear(14, 6)
        self.layer1_activate=nn.Tanh()
        self.layer2=nn.Linear(6,1)
        self.selfAct=Act_fun()



        # self.layers = nn.Sequential(
        #     nn.Linear(14, 6),
        #     nn.Tanh(),
        #     nn.Linear(6, 1),


            # nn.Linear(4, 2),
            # nn.Tanh(),
            # nn.Linear(2, 1),

            # nn.Linear(128, 32),
            # nn.Tanh(),
            # nn.Linear(32, 8),
            # nn.Tanh(),
            # nn.Linear(8, 1),

            # nn.Linear(14, 8),
            # nn.ReLU(),
            # nn.Linear(8, 3),
        # )

        if fds:
            self.FDS = FDS(
                feature_dim=6, bucket_num=bucket_num, bucket_start=bucket_start,
                start_update=start_update, start_smooth=start_smooth, kernel=kernel, ks=ks, sigma=sigma,
            )
        self.fds = fds
        self.start_smooth = start_smooth



        # self.layer1_content = nn.Sequential(
        #     nn.Linear(3, 1),
        #     nn.Tanh(),
        # )
        # self.layer1_surface = nn.Sequential(
        #     nn.Linear(4, 1),
        #     nn.Tanh(),
        # )
        # self.layer1_element = nn.Sequential(
        #     nn.Linear(6, 1),
        #     nn.Tanh(),
        # )
        # self.layer2 = nn.Sequential(
        #     nn.Linear(4, 1),
        # )

    def forward(self, x, targets=None, epoch=None, training = True):
        # print('x.shape: ',x.shape)


        x = self.layer1(x)
        # x = self.layer1_activate(x)

        encodings=x

        if training and self.fds:
            if epoch >= self.start_smooth:
                x = self.FDS.smooth(encodings, targets, epoch)


        x = self.layer2(x)
        # x = self.selfAct(x)
        # x = activation(x)

        # x_weight=x[:,0:1]
        # x_content=self.layer1_content(x[:,1:4])
        # x_surface=self.layer1_surface(x[:,4:8])
        # x_element=self.layer1_element(x[:,8:14])
        # x=torch.cat([x_weight,x_content,x_surface,x_element],1)
        # x=self.layer2(x)

        # x = self.layers(x)
        x = x.squeeze(-1)

        if training and self.fds:
            return x, encodings
        else:
            return x

        # return x
