#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @File  : Paddle第一版本.py
# @Author: Richard Chiming Xu
# @Date  : 2021/12/21
# @Desc  :


# ### 使用NN进行预测
# ### Score = ？

# In[1]:


import pandas as pd
import warnings

warnings.filterwarnings('ignore')

# 数据加载
train = pd.read_csv('train.csv')
test = pd.read_csv('test.csv')
test = test.iloc[:, 1:]
train = train.iloc[:, 1:]
train

# ##### Object类型： lan, os, osv, version, fea_hash
# ##### 有缺失值的字段： lan, osv

# In[2]:


# ['os', 'osv', 'lan', 'sid’]
features = train.columns.tolist()
features.remove('label')
print(features)

# In[3]:


for feature in features:
    print(feature, train[feature].nunique())


# In[4]:


# 对osv进行数据清洗
def osv_trans(x):
    x = str(x).replace('Android_', '').replace('Android ', '').replace('W', '')
    if str(x).find('.') > 0:
        temp_index1 = x.find('.')
        if x.find(' ') > 0:
            temp_index2 = x.find(' ')
        else:
            temp_index2 = len(x)

        if x.find('-') > 0:
            temp_index2 = x.find('-')

        result = x[0:temp_index1] + '.' + x[temp_index1 + 1:temp_index2].replace('.', '')
        try:
            return float(result)
        except:
            print(x + '#########')
            return 0
    try:
        return float(x)
    except:
        print(x + '#########')
        return 0


# train['osv'] => LabelEncoder ?
# 采用众数，进行缺失值的填充
train['osv'].fillna('8.1.0', inplace=True)
# 数据清洗
train['osv'] = train['osv'].apply(osv_trans)

# 采用众数，进行缺失值的填充
test['osv'].fillna('8.1.0', inplace=True)
# 数据清洗
test['osv'] = test['osv'].apply(osv_trans)

# In[5]:


# train['os'].value_counts()
train['lan'].value_counts()
# lan_map = {'zh-CN': 1, }
train['lan'].value_counts().index
lan_map = {'zh-CN': 1, 'zh_CN': 2, 'Zh-CN': 3, 'zh-cn': 4, 'zh_CN_#Hans': 5, 'zh': 6, 'ZH': 7, 'cn': 8, 'CN': 9,
           'zh-HK': 10, 'tw': 11, 'TW': 12, 'zh-TW': 13, 'zh-MO': 14, 'en': 15, 'en-GB': 16, 'en-US': 17, 'ko': 18,
           'ja': 19, 'it': 20, 'mi': 21}
train['lan'] = train['lan'].map(lan_map)
test['lan'] = test['lan'].map(lan_map)
test['lan'].value_counts()

# In[6]:


# 对于有缺失的lan 设置为22
train['lan'].fillna(22, inplace=True)
test['lan'].fillna(22, inplace=True)

# In[7]:


remove_list = ['os', 'sid']
col = features
for i in remove_list:
    col.remove(i)
col

# In[8]:


# train['timestamp'].value_counts()
# train['timestamp'] = pd.to_datetime(train['timestamp'])
# train['timestamp']
from datetime import datetime

# lambda 是一句话函数，匿名函数
train['timestamp'] = train['timestamp'].apply(lambda x: datetime.fromtimestamp(x / 1000))
# 1559892728241.7212
# 1559871800477.1477
# 1625493942.538375
# import time
# time.time()
test['timestamp'] = test['timestamp'].apply(lambda x: datetime.fromtimestamp(x / 1000))
test['timestamp']


# In[9]:


def version_trans(x):
    if x == 'V3':
        return 3
    if x == 'v1':
        return 1
    if x == 'P_Final_6':
        return 6
    if x == 'V6':
        return 6
    if x == 'GA3':
        return 3
    if x == 'GA2':
        return 2
    if x == 'V2':
        return 2
    if x == '50':
        return 5
    return int(x)


train['version'] = train['version'].apply(version_trans)
test['version'] = test['version'].apply(version_trans)
train['version'] = train['version'].astype('int')
test['version'] = test['version'].astype('int')

# In[10]:


# 特征筛选
features = train[col]
# 构造fea_hash_len特征
features['fea_hash_len'] = features['fea_hash'].map(lambda x: len(str(x)))
features['fea1_hash_len'] = features['fea1_hash'].map(lambda x: len(str(x)))
# Thinking：为什么将很大的，很长的fea_hash化为0？
# 如果fea_hash很长，都归为0，否则为自己的本身
features['fea_hash'] = features['fea_hash'].map(lambda x: 0 if len(str(x)) > 16 else int(x))
features['fea1_hash'] = features['fea1_hash'].map(lambda x: 0 if len(str(x)) > 16 else int(x))
features

test_features = test[col]
# 构造fea_hash_len特征
test_features['fea_hash_len'] = test_features['fea_hash'].map(lambda x: len(str(x)))
test_features['fea1_hash_len'] = test_features['fea1_hash'].map(lambda x: len(str(x)))
# Thinking：为什么将很大的，很长的fea_hash化为0？
# 如果fea_hash很长，都归为0，否则为自己的本身
test_features['fea_hash'] = test_features['fea_hash'].map(lambda x: 0 if len(str(x)) > 16 else int(x))
test_features['fea1_hash'] = test_features['fea1_hash'].map(lambda x: 0 if len(str(x)) > 16 else int(x))
test_features

# In[11]:


"""
train_set = set()
for x in train['sid'].value_counts().index.tolist():
    train_set.add(x)
#train_set.add(train['sid'].value_counts().index.tolist())
train_set
test_set = set()
for x in test['sid'].value_counts().index.tolist():
    test_set.add(x)
# test_set.add(test['sid'].value_counts().index.tolist())
# test['sid'].value_counts().index.tolist()
train_set & test_set # 没有重复值
"""

# In[11]:


# 对训练集的timestamp提取时间多尺度

# 使用to_datetime进行日期类型转换
# 创建时间戳索引
temp = pd.DatetimeIndex(features['timestamp'])
features['year'] = temp.year
features['month'] = temp.month
features['day'] = temp.day
features['week_day'] = temp.weekday  # 星期几
features['hour'] = temp.hour
features['minute'] = temp.minute

# 求时间的diff
start_time = features['timestamp'].min()
features['time_diff'] = features['timestamp'] - start_time
features['time_diff'] = features['time_diff'].dt.days + features['time_diff'].dt.seconds / 3600 / 24
features[['timestamp', 'year', 'month', 'day', 'week_day', 'hour', 'minute', 'time_diff']]

# 创建时间戳索引
temp = pd.DatetimeIndex(test_features['timestamp'])
test_features['year'] = temp.year
test_features['month'] = temp.month
test_features['day'] = temp.day
test_features['week_day'] = temp.weekday  # 星期几
test_features['hour'] = temp.hour
test_features['minute'] = temp.minute

# 求时间的diff
# start_time = features['timestamp'].min()
test_features['time_diff'] = test_features['timestamp'] - start_time
test_features['time_diff'] = test_features['time_diff'].dt.days + test_features['time_diff'].dt.seconds / 3600 / 24
# test_features[['timestamp', 'year', 'month', 'day', 'week_day', 'hour', 'minute', 'time_diff']]
test_features['time_diff']

# In[12]:


# test['version'].value_counts()
# features['version'].value_counts()
features['dev_height'].value_counts()
features['dev_width'].value_counts()
# 构造面积特征
features['dev_area'] = features['dev_height'] * features['dev_width']
test_features['dev_area'] = test_features['dev_height'] * test_features['dev_width']

# In[13]:


"""
Thinking：是否可以利用 dev_ppi 和 dev_area构造新特征
features['dev_ppi'].value_counts()
features['dev_area'].astype('float') / features['dev_ppi'].astype('float')
"""
# features['ntt'].value_counts()
features['carrier'].value_counts()
features['package'].value_counts()
# version - osv APP版本与操作系统版本差
features['osv'].value_counts()
features['version_osv'] = features['osv'] - features['version']
test_features['version_osv'] = test_features['osv'] - test_features['version']

# In[14]:


features = features.drop(['timestamp'], axis=1)
test_features = test_features.drop(['timestamp'], axis=1)

# In[16]:


# 特征归一化
from sklearn.preprocessing import StandardScaler

scaler = StandardScaler()
features1 = scaler.fit_transform(features)
test_features1 = scaler.transform(test_features)

import paddle
from paddle import nn
from paddle.io import Dataset, DataLoader
import numpy as np
paddle.device.set_device('gpu:0')


class MineDataset(Dataset):
    def __init__(self, X, y):
        super(MineDataset, self).__init__()
        self.num_samples = len(X)
        self.X = X
        self.y = y

    def __getitem__(self, idx):
        return self.X.iloc[idx].values.astype('float32'), np.array(self.y.iloc[idx]).astype('int64')

    def __len__(self):
        return self.num_samples


class ClassifyModel(nn.Layer):

    def __init__(self, features_len):
        super(ClassifyModel, self).__init__()

        self.fc1 = nn.layer.Linear(in_features=features_len, out_features=250)
        self.ac1 = nn.layer.ReLU()
        self.drop1 = nn.layer.Dropout(p=0.02)

        self.fc2 = nn.layer.Linear(in_features=250, out_features=100)
        self.ac2 = nn.layer.ReLU()
        self.drop2 = nn.layer.Dropout(p=0.02)

        self.fc3 = nn.layer.Linear(in_features=100, out_features=50)
        self.ac3 = nn.layer.ReLU()
        self.drop3 = nn.layer.Dropout(p=0.02)

        self.fc4 = nn.layer.Linear(in_features=50, out_features=25)
        self.ac4 = nn.layer.ReLU()
        self.drop4 = nn.layer.Dropout(p=0.02)

        self.fc5 = nn.layer.Linear(in_features=25, out_features=2)
        self.out = nn.layer.Sigmoid()

    def forward(self, input):
        x = self.fc1(input)
        x = self.ac1(x)
        x = self.drop1(x)

        x = self.fc2(x)
        x = self.ac2(x)
        x = self.drop2(x)

        x = self.fc3(x)
        x = self.ac3(x)
        x = self.drop3(x)

        x = self.fc4(x)
        x = self.ac4(x)
        x = self.drop4(x)

        x = self.fc5(x)
        output = self.out(x)
        return output


# 加载数据

from sklearn.model_selection import train_test_split


train_x, val_x, train_y, val_y = train_test_split(features1, train['label'], test_size=0.2, random_state=42)

train_x = pd.DataFrame(train_x, columns=features.columns)
val_x = pd.DataFrame(val_x, columns=features.columns)
train_y = pd.DataFrame(train_y, columns=['label'])
val_y = pd.DataFrame(val_y, columns=['label'])


train_dataloader = DataLoader(MineDataset(train_x, train_y),
                            batch_size=1024,
                            shuffle=True,
                            drop_last=True,
                            num_workers=2)

val_dataloader = DataLoader(MineDataset(val_x, val_y),
                            batch_size=1024,
                            shuffle=True,
                            drop_last=True,
                            num_workers=2)

test_dataloader = DataLoader(MineDataset(test_features1, pd.Series([0 for i in range(len(test_features1))])),
                            batch_size=1024,
                            shuffle=True,
                            drop_last=True,
                            num_workers=2)

# 初始化模型
model = ClassifyModel(int(len(features.columns)))
# 训练模式
model.train()
# 定义优化器
opt = paddle.optimizer.AdamW(learning_rate=0.001, parameters=model.parameters())
loss_fn = nn.CrossEntropyLoss()

EPOCHS = 10   # 设置外层循环次数
for epoch in range(EPOCHS):
    for iter_id, mini_batch in enumerate(train_dataloader):
        x_train = mini_batch[0]
        y_train = mini_batch[1]
        # 前向传播
        y_pred = model(x_train)
        # 计算损失
        loss = nn.functional.loss.cross_entropy(y_pred, y_train)
        # 打印loss
        avg_loss = paddle.mean(loss)
        if iter_id % 20 == 0:
            acc = paddle.metric.accuracy(y_pred, y_train)
            print("epoch: {}, iter: {}, loss is: {}, acc is: {}".format(epoch, iter_id, avg_loss.numpy(), acc.numpy()))

        # 反向传播
        avg_loss.backward()
        # 最小化loss,更新参数
        opt.step()
        # 清除梯度
        opt.clear_grad()

    # # 校验
    # print("===================================val===========================================")
    # model.eval()
    # accuracies = []
    # losses = []
    # for batch_id, data in enumerate(val_dataloader):
    #     inputs = data[0]
    #     labels = data[1]
    #     predicts = model(inputs)
    #     loss = loss_fn(predicts, labels)
    #     acc = paddle.metric.accuracy(predicts, labels)
    #     losses.append(loss.numpy())
    #     accuracies.append(acc.numpy())
    #
    # avg_acc, avg_loss = np.mean(accuracies), np.mean(losses)
    # print("validation: loss is: {}, accuracy is: {}".format(avg_loss, avg_acc))
    # model.train()
