import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import config
from torch.utils.data import Dataset, DataLoader, TensorDataset
from sklearn.model_selection import train_test_split
from sklearn.metrics import log_loss, roc_auc_score
from sklearn.preprocessing import LabelEncoder, MinMaxScaler
from collections import OrderedDict, namedtuple,defaultdict

gpus = [0]
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
def orthogonal_init(layer, gain=1.0):
    nn.init.orthogonal_(layer.weight, gain=gain)
    nn.init.constant_(layer.bias, 0)
    
class DNN(nn.Module):
    def __init__(self, inputs_dim, hidden_units, dropout_rate):
        super(DNN, self).__init__()
        self.inputs_dim = 76
        self.hidden_units = hidden_units
        self.dropout = nn.Dropout(dropout_rate)

        self.hidden_units = [76] + list(self.hidden_units)
        
        self.linear = nn.ModuleList([
            nn.Linear(self.hidden_units[i], self.hidden_units[i+1]) for i in range(len(self.hidden_units)-1)
        ])
        for name, tensor in self.linear.named_parameters():
            if 'weight' in name:
                nn.init.normal_(tensor, mean=0, std=0.0001)

        self.activation = nn.ReLU()
        
    def forward(self, X):
        inputs = X
        #print(self.inputs_dim)
        #print(len(self.hidden_units))
        #print(self.hidden_units)
        for i in range(len(self.linear)):
            fc = self.linear[i](inputs)
            fc = self.activation(fc)
            fc = self.dropout(fc)
            inputs = fc
        return inputs

class CrossNet(nn.Module):
    def __init__(self, in_features, layer_num=3, parameterization='vector', seed=2022):
        super(CrossNet, self).__init__()
        self.layer_num = layer_num
        in_features = 76
        self.parameterization = parameterization
        if self.parameterization == 'vector':
            self.kernels = nn.Parameter(torch.Tensor(self.layer_num, in_features, 1))
        elif self.parameterization == 'matrix':
            self.kernels = nn.Parameter(torch.Tensor(self.layer_num, in_features, in_features))
        self.bias = nn.Parameter(torch.Tensor(self.layer_num, in_features, 1))

        for i in range(self.kernels.shape[0]):
            nn.init.xavier_normal_(self.kernels[i])
        for i in range(self.bias.shape[0]):
            nn.init.zeros_(self.bias[0])

    def forward(self, inputs):
        result = []
        for item in range(len(inputs)):
            x_0 = inputs[item].unsqueeze(2).clone()
            x_1 = x_0
            for i in range(self.layer_num):
            
                if self.parameterization == 'vector':
                    x1_w = torch.tensordot(x_1,self.kernels[i] , dims=([1], [0]))
                    dot_ = torch.matmul(x_0, x1_w)
                    x_1 = dot_ + self.bias[i] + x_1
                else:
                    x1_w = torch.tensordot(self.kernels[i], x_1)
                    dot_ = x1_w + self.bias[i]
                    x_1 = x_0 * dot_ + x_1
            x_1 = torch.squeeze(x_1, dim=2)
            result.append(x_1.clone())
            
        result = torch.stack(result)
        
        return result

class DCN(nn.Module):
    def __init__(self, feat_size, embedding_size, linear_feature_columns, dnn_feature_columns, cross_num=4,
                 cross_param='vector', dnn_hidden_units=(1024,512,128, 128), init_std=0.0001, seed=2022, l2_reg=0.00001,
                 drop_rate=0.5):
        super(DCN, self).__init__()
        self.feat_size = feat_size
        self.embedding_size = embedding_size
        self.dnn_hidden_units = dnn_hidden_units
        self.cross_num = 6
        self.cross_param = cross_param
        self.drop_rate = drop_rate
        self.l2_reg = 0.00001

        self.act = nn.ReLU()
        self.dropout = nn.Dropout(drop_rate)
        self.dense_feature_columns = list(filter(lambda x:x[1]=='dense', dnn_feature_columns))
        self.sparse_feature_columns = list(filter(lambda x:x[1]=='sparse', dnn_feature_columns))
        
        self.Linear1 = nn.Linear(707, 256)
        self.Linear2 = nn.Linear(256, self.embedding_size)
        self.Linear3 = nn.Linear(self.embedding_size, self.embedding_size)
        
        self.activate_func = [nn.ReLU(), nn.Tanh()][True]
        self.feature_index = defaultdict(int)
        start = 0
        for feat in self.feat_size:
            self.feature_index[feat] = start
            start += 1

        #更改状态数量需要修改
        inputs_dim = (config.EDGE_NODE_NUM + 1) * (self.embedding_size + 12)

        self.dnn = DNN(inputs_dim,self.dnn_hidden_units, 0.5)
        self.linear_layer = nn.Linear(832, 32)

        self.crossnet = CrossNet(inputs_dim, layer_num=self.cross_num, parameterization=self.cross_param)
        # 输出维度修改
        self.dnn_linear = nn.Linear(204,(config.EDGE_NODE_NUM+1)*32)
        
        #[731, 512,128,128,1]
        dnn_hidden_units = [len(feat_size)-12] + list(dnn_hidden_units) + [32]
        self.dnn_hidden_units = dnn_hidden_units


        self.linear = nn.ModuleList([
            nn.Linear(dnn_hidden_units[i], dnn_hidden_units[i+1]) for i in range(len(dnn_hidden_units)-1)
            
        ])
        
        orthogonal_init(self.Linear1)
        orthogonal_init(self.Linear2)
        orthogonal_init(self.Linear3)
        orthogonal_init(self.dnn_linear)
        
        for name, tensor in self.linear.named_parameters():
            if 'weight' in name:
                nn.init.normal_(tensor, mean=0, std=init_std)

    def forward(self, X):
        
        logit = X
        # Ensure the same data type for tensors
        logit = logit.to(torch.float32)  # Convert to the desired data type

        #print("Hidden layer dimensions:", self.dnn_hidden_units)
        #print(self.linear)
        for i in range(len(self.linear)):
            fc = self.linear[i](logit)
            fc = self.activate_func(fc)
            fc = self.dropout(fc)
            logit = fc
        #[1,1,65]
        logit = torch.flatten(logit, start_dim=1).unsqueeze(1)
        
        # a = []
        # for i in range(len(X)):
        #     a.torch.tensor(X[i][:,12:], dtype=torch.float))
        # a = torch.stack(a)
        #sparse_embedding  = self.activate_func(self.Linear1(X[0:len(X),:,12:]))
        sparse_embedding = self.activate_func(self.Linear1(X[:, 12:]))
        tmp_sparse_input = self.activate_func(self.Linear2(sparse_embedding))
        sparse_input = self.activate_func(self.Linear3(tmp_sparse_input))
        # print(lyh.shape)
        # sparse_input = []
        # for i in range(len(X)):
        #     item = X[i][:,12:].clone()
        #     sparse_embedding = self.activate_func(self.Linear1(item))
        #     #[1,65, 64]
        #     sparse_value = self.activate_func(self.Linear2(sparse_embedding))
        #     sparse_input.append(sparse_value)
        # sparse_input = torch.stack(sparse_input)
        
        # dense_values = [X[:,:, self.feature_index[feat[0]]].reshape(-1,-1, 1) for feat in self.dense_feature_columns]
        dense_values = [X[:, self.feature_index[feat[0]]].unsqueeze(-1) for feat in self.dense_feature_columns]
        dense_input = torch.cat(dense_values, dim=1)
        
        # dense_input = []
        # for i in range(len(X)):
        #     value = [X[i][:, self.feature_index[feat[0]]].clone().reshape(-1, 1) for feat in self.dense_feature_columns]
        #     dense_values = torch.cat(value, dim=1)
        #     dense_input.append(dense_values)
        #     # dense_values = [X[:, self.feature_index[feat[0]]].reshape(-1, 1) for feat in self.dense_feature_columns]
        # #[1, 65, 12]
        # dense_input = torch.stack(dense_input)
        
        dnn_value = torch.cat((dense_input, sparse_input), dim=-1)
        
        # 拉直 本来是 [batch_size, sparase特征数, 嵌入维度] =》 [batch_size, sparase特征数 * 嵌入维度]
        dnn_input = torch.flatten(dnn_value, start_dim=1).unsqueeze(1)
        #print(dnn_input.shape)
        
        
        # print('sparse input size', sparse_input.shape)
        # print('dense input size', dense_input.shape)
        # print('dnn input size', dnn_input.shape)
        
        #[1,1,128]
        deep_out = self.dnn(dnn_input)
        #print(deep_out.shape)
        # print("deep_out.shape is : ",deep_out.shape)
        #[1,1,2860]
        cross_out = self.crossnet(dnn_input)
        # print("cross_out.shape is : ",cross_out.shape)
        #[1,1,2988]
        stack_out = torch.cat((cross_out, deep_out), dim=-1)
    
        # print("logit.shape is : ",logit.shape)
        # print("stack_out.shape is : ",stack_out.shape)
        #[1,1,65]
        dnn_output = self.dnn_linear(stack_out)
        #print(dnn_output.shape)
        #print(logit.shape)
        # print('dnn_output size', dnn_output.shape)
        dnn_output = self.linear_layer(dnn_output)
        #[1,1,2080]
        logit = logit + dnn_output
        # print('logit size', logit.shape)
        # y_pred = torch.sigmoid(logit)
        return logit
