#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @File  : Paddle第二版本.py
# @Author: Richard Chiming Xu
# @Date  : 2021/12/24
# @Desc  : 参考别人的embedding实现一次
import pandas as pd
import pickle

# 各字段网络类型
NETWORK_TAGS = {'android_id': None,
                'apptype': "emb",
                'carrier': "emb",
                'dev_height': "emb",
                'dev_ppi': "emb",
                'dev_width': "emb",
                'lan': "emb",
                'media_id': "emb",
                'ntt': "emb",
                'os': None,
                'osv': "emb",
                'package': "emb",
                'sid': None,
                'timestamp': "norm",
                'version': "emb",
                'fea_hash': "norm",
                'location': "emb",
                'fea1_hash': "norm",
                'cus_type': "emb"}
# 需要embedding的大小
EMB_SIZE = {}
# 训练字段
FEATURES = []

# 读取数据
train = pd.read_csv('train.csv')
test = pd.read_csv('test.csv')
test['label'] = -1

full_data = pd.concat([train, test])

# 处理fea_hash
print('-' * 20, '转换fea_hash', '-' * 20)


def trans_fea_hash(data):
    try:
        return float(data)
    except:
        return 499997879


full_data['fea_hash'] = full_data['fea_hash'].apply(lambda x: trans_fea_hash(x))

# 对不需要embedding进行归一化处理
unemb_cols = []
for k, v in NETWORK_TAGS.items():
    if v == 'norm':
        unemb_cols.append(k)
from sklearn.preprocessing import StandardScaler

scaler = StandardScaler()
scaler_data = scaler.fit_transform(full_data[unemb_cols])
full_data[unemb_cols] = scaler_data

# 转换embedding
print('-' * 20, '转换embedding', '-' * 20)


def load_vocab(name):
    with open('./vocab/' + name + '_vocab.pkl', 'rb') as f:
        return pickle.load(f)


for k, v in NETWORK_TAGS.items():
    if v == 'emb':
        emb_dict = load_vocab(k)
        EMB_SIZE[k] = len(emb_dict)
        full_data[k] = full_data[k].astype('str').map(emb_dict)
    if v is not None:
        FEATURES.append(k)

# dataset类
import paddle
from paddle.io import Dataset
import numpy as np


class MineDataset(Dataset):

    def __init__(self, data: pd.DataFrame, features: list, tags: dict):
        super(MineDataset, self).__init__()
        self.len = len(data)
        self.features = features
        self.tags = tags
        self.X = data[features]
        self.y = data['label']

    def __getitem__(self, idx):
        result = []
        tmp_x = self.X.iloc[idx]
        # 转换数据类型
        for k, v in self.tags.items():
            if v == 'emb':
                result.append(np.array(tmp_x[k], dtype='int64').reshape([1]))
            elif v == 'norm':
                result.append(np.array(tmp_x[k], dtype='float32').reshape([1]))
        # 如果是预测，添加标签
        label = self.y.iloc[idx]
        if label != -1:
            result.append(np.array(label).astype("int64"))

        return result

    def __len__(self):
        return self.len


import paddle.nn as nn
import paddle.tensor as tensor
from paddle.static import InputSpec
from paddle.metric import Accuracy

EMB_OUT_SIZE = 128  # Embedding特征大小
EMB_LINEAR_SIZE = 64  # Embedding后接Linear层神经元数量


class EmbNet(paddle.nn.Layer):

    def __init__(self, emb_in_size, emb_out_size, out_size):
        super().__init__()
        self.emb = nn.Embedding(num_embeddings=emb_in_size, embedding_dim=emb_out_size)
        self.lstm = nn.LSTM(emb_out_size, 128, 1)
        self.lin = nn.Linear(in_features=128, out_features=out_size)
        self.drop = nn.layer.Dropout(p=0.02)
        self.relu = paddle.nn.LeakyReLU()

    def forward(self, input):
        x = self.emb(input)
        x, (_, _) = self.lstm(x)  # lstm会输出结果+最终状态。我们只需要获取状态
        x = self.lin(x)
        x = self.drop(x)
        output = self.relu(x)
        return output


class UnEmbNet(paddle.nn.Layer):

    def __init__(self):
        super().__init__()
        self.lin1 = nn.Linear(in_features=1, out_features=3)
        self.lin2 = nn.Linear(in_features=3, out_features=1)
        self.relu = paddle.nn.LeakyReLU()

    def forward(self, input):
        x = self.lin1(input)
        x = self.relu(x)
        x = self.lin2(x)
        output = self.relu(x)
        return output


class OutNet(paddle.nn.Layer):

    def __init__(self, in_size, out_size):
        super(OutNet, self).__init__()

        self.lin1 = nn.Linear(in_features=in_size, out_features=128)
        self.lin2 = nn.Linear(in_features=128, out_features=16)
        self.lin3 = nn.Linear(in_features=16, out_features=out_size)
        self.relu = nn.LeakyReLU()
        self.drop = nn.layer.Dropout(p=0.2)

    def forward(self, input):
        x = self.lin1(input)
        x = self.relu(x)
        x = self.drop(x)

        x = self.lin2(x)
        x = self.relu(x)
        x = self.drop(x)

        x = self.lin3(x)
        output = self.relu(x)
        return output


class SampleNet(paddle.nn.Layer):

    def __init__(self, tag_dict: dict, size_dict: dict):
        super().__init__()
        # 新建一个隐藏层列表，用于存储各字段隐藏层对象
        self.hidden_layers_list = []
        # dict_list为需要处理的数据列表，forward中使用。注意如果前面字段改了，这里也需要改
        self.dict_list = []
        for k, v in tag_dict.items():
            if v is not None:
                self.dict_list.append(v)

        # 构建多个embed网络
        for tag, tag_method in tag_dict.items():
            if tag_method == "emb":
                emb = EmbNet(emb_in_size=size_dict[tag] + 1, emb_out_size=EMB_OUT_SIZE, out_size=EMB_LINEAR_SIZE)
                self.hidden_layers_list.append(emb)
            elif tag_method == "norm":
                unemb = UnEmbNet()
                self.hidden_layers_list.append(unemb)
            else:
                continue

        # 构建最后输出网络
        self.outnet = OutNet(835, 2)

    # 前向推理部分 `*input_data`的`*`表示传入任一数量的变量
    def forward(self, *input_data):
        layer_list = []  # 用于存储各字段特征结果
        num_id = 0
        for sample_data, tag_method in zip(input_data, self.dict_list):  # 读取前面定义的字段
            if tag_method is None:
                continue
            else:
                tmp = sample_data
                emb = self.hidden_layers_list[num_id]
                tmp = emb(tmp)
                num_id += 1
            # tmp = sample_data
            # if tag_method == "emb":
            #     emb = self.hidden_layers_list[num_id]
            #     tmp = emb(tmp)
            #     num_id += 1
            # elif tag_method == "norm":
            #     tmp = self.unemb(tmp)
            # elif tag_method is None:
            #     continue

            layer_list.append(tensor.flatten(tmp, start_axis=1))  # flatten是因为原始shape为[batch size, 1 , *n], 需要变换为[bs, n]
        # 对所有字段的特征合并
        layers = tensor.concat(layer_list, axis=1)
        # 把特征放入用于输出层的网络
        output = self.outnet(layers)

        return paddle.nn.functional.softmax(output)


# 配置训练环境
paddle.set_device('gpu:0')
EPOCHS = 500
# 定义网络输入
inputs = []
for tag_name, tag_m in NETWORK_TAGS.items():
    d_type = "float32"
    if tag_m == "emb":
        d_type = "int64"
    if tag_m is None:
        continue
    inputs.append(InputSpec(shape=[-1, 1], dtype=d_type, name=tag_name))
# 定义Label
labels = [InputSpec([-1, 1], 'int64', name='label')]

# 创建模型
model = paddle.Model(SampleNet(NETWORK_TAGS, EMB_SIZE), inputs=inputs, labels=labels)

from sklearn.utils import shuffle

train_data = shuffle(full_data[full_data['label'] != -1])
l = int(len(train_data) * 0.9)
train_dataset = MineDataset(train_data[:l], FEATURES, NETWORK_TAGS)
val_dataset = MineDataset(train_data[l:], FEATURES, NETWORK_TAGS)

# 定义优化器
optimizer = paddle.optimizer.Adam(learning_rate=0.0005, parameters=model.parameters())
# 模型训练配置

model.prepare(optimizer, paddle.nn.loss.CrossEntropyLoss(), Accuracy())
# 开始训练
model.fit(train_data=train_dataset,  # 训练集数据
          eval_data=val_dataset,  # 交叉验证集数据
          batch_size=2048,  # Batch size大小
          epochs=EPOCHS,  # 训练轮数
          log_freq=50,  # 日志打印间隔
          save_dir='./model/')  # checkpoint保存路径

test_data = full_data[full_data['label'] == -1]
test_dataset = MineDataset(test_data, FEATURES, NETWORK_TAGS)
predict_tmp = model.predict(test_dataset, 2048)
# 生成结果
result = []
for batch in predict_tmp[0]:
    for i in batch:
        if i[0] > 0.5:
            result.append(0)
        else:
            result.append(1)

res = pd.DataFrame(test['sid'])
res['label'] = result
res.to_csv('./result/paddle_embed_20211224_30.csv', index=False)
