#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @File  : Paddle第二版本.py
# @Author: Richard Chiming Xu
# @Date  : 2021/12/24
# @Desc  : 参考别人的embedding实现一次
import pandas as pd
import pickle

# 综合配置
config = {
    'EMB_OUT_SIZE': 128,  # Embedding特征大小
    'EMB_LINEAR_SIZE': 64,  # Embedding后接Linear层神经元数量
    'FM_EMB_OUT_SIZE': 4,  # FM模型Embedding与Linear的输出维度
    'BATCH_SIZE': 4096,
    'LEARNING_RATE': 0.0001,
    'EPOCHS': 500
}
# 各字段网络类型
NETWORK_TAGS = {'android_id': None,
                'apptype': "emb",
                'carrier': "emb",
                'dev_height': "emb",
                'dev_ppi': "emb",
                'dev_width': "emb",
                'lan': "emb",
                'media_id': "emb",
                'ntt': "emb",
                'os': None,
                'osv': "emb",
                'package': "emb",
                'sid': None,
                'timestamp': "norm",
                'version': "emb",
                'fea_hash': "norm",
                'location': "emb",
                'fea1_hash': "norm",
                'cus_type': "emb"}
# 需要embedding的大小
EMB_SIZE = {}
# 训练字段
FEATURES = []

# 读取数据
train = pd.read_csv('train.csv')
test = pd.read_csv('test.csv')
test['label'] = -1

full_data = pd.concat([train, test])

# 处理fea_hash
print('-' * 20, '转换fea_hash', '-' * 20)


def trans_fea_hash(data):
    try:
        return float(data)
    except:
        return 499997879


full_data['fea_hash'] = full_data['fea_hash'].apply(lambda x: trans_fea_hash(x))

# 对不需要embedding进行归一化处理
unemb_cols = []
for k, v in NETWORK_TAGS.items():
    if v == 'norm':
        unemb_cols.append(k)
from sklearn.preprocessing import StandardScaler

scaler = StandardScaler()
scaler_data = scaler.fit_transform(full_data[unemb_cols])
full_data[unemb_cols] = scaler_data

# 转换embedding
print('-' * 20, '转换embedding', '-' * 20)


def load_vocab(name):
    with open('./vocab/' + name + '_vocab.pkl', 'rb') as f:
        return pickle.load(f)


for k, v in NETWORK_TAGS.items():
    if v == 'emb':
        emb_dict = load_vocab(k)
        EMB_SIZE[k] = len(emb_dict)
        full_data[k] = full_data[k].astype('str').map(emb_dict)
    if v is not None:
        FEATURES.append(k)

import math
import paddle
from paddle import nn, tensor
paddle.device.set_device('gpu:0')

'''
    DNN模型
'''


class EmbNet(nn.Layer):

    def __init__(self, emb_in_size, emb_out_size, out_size):
        super().__init__()
        self.emb = nn.Embedding(num_embeddings=emb_in_size, embedding_dim=emb_out_size)
        self.lstm = nn.LSTM(emb_out_size, 128, 1)
        self.lin = nn.Linear(in_features=128, out_features=out_size)
        self.drop = nn.layer.Dropout(p=0.02)
        self.relu = paddle.nn.LeakyReLU()

    def forward(self, input):
        x = self.emb(input)
        x, (_, _) = self.lstm(x)  # lstm会输出结果+最终状态。我们只需要获取状态
        x = self.lin(x)
        x = self.drop(x)
        output = self.relu(x)
        return output


class UnEmbNet(nn.Layer):

    def __init__(self):
        super().__init__()
        self.lin1 = nn.Linear(in_features=1, out_features=3)
        self.lin2 = nn.Linear(in_features=3, out_features=1)
        self.relu = paddle.nn.LeakyReLU()

    def forward(self, input):
        x = self.lin1(input)
        x = self.relu(x)
        x = self.lin2(x)
        output = self.relu(x)
        return output


class OutNet(nn.Layer):

    def __init__(self, in_size, out_size):
        super(OutNet, self).__init__()

        self.lin1 = nn.Linear(in_features=in_size, out_features=128)
        self.lin2 = nn.Linear(in_features=128, out_features=16)
        self.lin3 = nn.Linear(in_features=16, out_features=out_size)
        self.relu = nn.LeakyReLU()
        self.drop = nn.layer.Dropout(p=0.2)

    def forward(self, input):
        x = self.lin1(input)
        x = self.relu(x)
        x = self.drop(x)

        x = self.lin2(x)
        x = self.relu(x)
        x = self.drop(x)

        x = self.lin3(x)
        output = self.relu(x)
        return output


class DNNNet(nn.Layer):

    def __init__(self, tag_dict: dict, size_dict: dict, config: dict):
        super().__init__()
        # 新建一个隐藏层列表，用于存储各字段隐藏层对象
        self.hidden_layers_list = []
        # dict_list为需要处理的数据列表，forward中使用。注意如果前面字段改了，这里也需要改
        self.dict_list = []
        for k, v in tag_dict.items():
            if v is not None:
                self.dict_list.append(v)

        # 构建多个embed网络
        for tag, tag_method in tag_dict.items():
            if tag_method == "emb":
                emb = EmbNet(emb_in_size=size_dict[tag] + 1, emb_out_size=config['EMB_OUT_SIZE'],
                             out_size=config['EMB_LINEAR_SIZE'])
                self.hidden_layers_list.append(emb)
        # 构建共用的1个非embed网络
        self.unemb = UnEmbNet()
        # 构建最后输出网络
        self.outnet = OutNet(835, 2)

    # 前向推理部分 `*input_data`的`*`表示传入任一数量的变量
    def forward(self, input_data):
        layer_list = []  # 用于存储各字段特征结果
        num_id = 0
        for sample_data, tag_method in zip(input_data, self.dict_list):  # 读取前面定义的字段
            tmp = sample_data
            if tag_method == "emb":
                emb = self.hidden_layers_list[num_id]
                tmp = emb(tmp)
                num_id += 1
            elif tag_method == "norm":
                tmp = self.unemb(tmp)
            elif tag_method is None:
                continue

            layer_list.append(tensor.flatten(tmp, start_axis=1))  # flatten是因为原始shape为[batch size, 1 , *n], 需要变换为[bs, n]
        # 对所有字段的特征合并
        layers = tensor.concat(layer_list, axis=1)
        # 把特征放入用于输出层的网络
        output = self.outnet(layers)

        return output


'''
    FM模型
'''


class FMNet(nn.Layer):

    def __init__(self, tag_dict: dict, size_dict: dict, config: dict):
        super(FMNet, self).__init__()

        # 新建一个隐藏层列表，用于存储各字段隐藏层对象
        self.fm_layers_list_1 = []
        self.fm_layers_list_2 = []
        # dict_list为需要处理的数据列表，forward中使用。注意如果前面字段改了，这里也需要改
        self.dict_list = []
        for k, v in tag_dict.items():
            if v is not None:
                self.dict_list.append(v)

        # 第一层FM
        for tag, tag_method in tag_dict.items():
            if tag_method == "emb":
                emb = nn.Embedding(num_embeddings=size_dict[tag] + 1, embedding_dim=config['FM_EMB_OUT_SIZE'])
                self.fm_layers_list_1.append(emb)
            elif tag_method == 'norm':
                emb = nn.Linear(1, config['FM_EMB_OUT_SIZE'])
                self.fm_layers_list_1.append(emb)
        # 线性层
        self.lin = nn.Linear(config['FM_EMB_OUT_SIZE'],2)

    def forward(self, input_data, Xv):
        fm_result_1 = []  # 第一次fm的结果汇总

        # 第一次遍历
        for i in range(len(input_data)):
            tag_method = self.dict_list[i]
            X_vector = Xv[:, i]
            tmp = input_data[i]
            layer = self.fm_layers_list_1[i]
            if tag_method == "emb":
                tmp = (paddle.squeeze(layer(tmp), axis=1).t() * X_vector).t()
            elif tag_method == "norm":
                tmp = (paddle.squeeze(layer(tmp), axis=1).t() * X_vector).t()
            elif tag_method is None:
                continue
            fm_result_1.append(tmp)
        fm_result_1 = paddle.to_tensor(fm_result_1)

        sum_then_square = paddle.square(paddle.sum(fm_result_1, 0))
        square_then_sum = paddle.sum(paddle.square(fm_result_1), 0)

        result = self.lin(0.5 * (sum_then_square - square_then_sum))
        return result


class ClassifyNet(nn.Layer):

    def __init__(self, tag_dict: dict, size_dict: dict, config: dict):
        super(ClassifyNet, self).__init__()

        self.lin = DNNNet(tag_dict, size_dict, config)  # 线形层
        self.fm = FMNet(tag_dict, size_dict, config)  # fm层

    def forward(self, inputs, Xv):
        lin_result = self.lin(inputs)
        fm_result = self.fm(inputs, Xv)
        return paddle.nn.functional.softmax(lin_result + fm_result)


# 加载数据

from sklearn.model_selection import train_test_split
from paddle.io import Dataset, DataLoader
import numpy as np
import paddlenlp.transformers

class MineDataset(Dataset):

    def __init__(self, data: pd.DataFrame, features: list, tags: dict):
        super(MineDataset, self).__init__()
        self.len = len(data)
        self.features = features
        self.tags = tags
        self.X = data[features]
        self.y = data['label']

    def __getitem__(self, idx):
        X = []
        tmp_x = self.X.iloc[idx]
        Xv = paddle.normal(shape=[len(tmp_x)])

        # 转换数据类型
        for k, v in self.tags.items():
            if v == 'emb':
                X.append(np.array(tmp_x[k], dtype='int64').reshape([1]))
            elif v == 'norm':
                X.append(np.array(tmp_x[k], dtype='float32').reshape([1]))
        # 如果是预测，添加标签
        label = self.y.iloc[idx]
        if label != -1:
            label = np.array(label).astype("int64")

        return [X, Xv, label]

    def __len__(self):
        return self.len


from sklearn.utils import shuffle

train_data = shuffle(full_data[full_data['label'] != -1])
l = int(len(train_data) * 0.9)

train_dataset = MineDataset(train_data[:l], FEATURES, NETWORK_TAGS)
val_dataset = MineDataset(train_data[l:], FEATURES, NETWORK_TAGS)
test_data = MineDataset(full_data[full_data['label'] == -1], FEATURES, NETWORK_TAGS)

train_dataloader = DataLoader(train_dataset,
                              batch_size=config['BATCH_SIZE'],
                              shuffle=True,
                              drop_last=True)
val_dataloader = DataLoader(val_dataset,
                            batch_size=config['BATCH_SIZE'],
                            shuffle=True,
                            drop_last=True)
test_dataloader = DataLoader(test_data,
                             batch_size=config['BATCH_SIZE'],
                             shuffle=False,
                             drop_last=False)

# 初始化模型
model = ClassifyNet(NETWORK_TAGS, EMB_SIZE, config)
# 训练模式
model.train()
# 定义优化器
opt = paddle.optimizer.AdamW(learning_rate=config['LEARNING_RATE'], parameters=model.parameters())
loss_fn = nn.CrossEntropyLoss()

for epoch in range(config['EPOCHS']):
    for iter_id, mini_batch in enumerate(train_dataloader):
        x_train = mini_batch[0]
        Xv = mini_batch[1]
        y_train = mini_batch[2]
        # 前向传播
        y_pred = model(x_train, Xv)
        # 计算损失
        loss = nn.functional.loss.cross_entropy(y_pred, y_train)
        # 打印loss
        avg_loss = paddle.mean(loss)
        if iter_id % 10 == 0:
            acc = paddle.metric.accuracy(y_pred, paddle.unsqueeze(y_train,1))
            print("epoch: {}, iter: {}, loss is: {}, acc is: {}".format(epoch, iter_id, avg_loss.numpy(), acc.numpy()))

        # 反向传播
        avg_loss.backward()
        # 最小化loss,更新参数
        opt.step()
        # 清除梯度
        opt.clear_grad()

    # 校验
    print("===================================val===========================================")
    model.eval()
    accuracies = []
    losses = []
    for batch_id, data in enumerate(val_dataloader):
        inputs = data[0]
        Xv = data[1]
        labels = data[2]
        vals = model(inputs, Xv)
        loss = loss_fn(vals, labels)
        acc = paddle.metric.accuracy(vals, paddle.unsqueeze(labels,1))
        losses.append(loss.numpy())
        accuracies.append(acc.numpy())

    avg_acc, avg_loss = np.mean(accuracies), np.mean(losses)
    print("validation: loss is: {}, accuracy is: {}".format(avg_loss, avg_acc))
    model.train()

print("===================================predict===========================================")
model.eval()
result = []
for batch_id, data in enumerate(test_dataloader):
    inputs = data[0]
    Xv = data[1]
    labels = data[2]

    predict_tmp = model(inputs,Xv)
    for i in predict_tmp:
        if i[0] > 0.5:
            result.append(0)
        else:
            result.append(1)


res = pd.DataFrame(test['sid'])
res['label'] = result
res.to_csv('./result/paddle_fm_20211228_100.csv', index=False)



