#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @File  : TF第一版本.py
# @Author: Richard Chiming Xu
# @Date  : 2021/12/16
# @Desc  :
# !/usr/bin/env python
# coding: utf-8

# ### 使用NN进行预测
# ### Score = ？

# In[1]:


import pandas as pd
import warnings

warnings.filterwarnings('ignore')

# 数据加载
train = pd.read_csv('train.csv')
test = pd.read_csv('test.csv')
test = test.iloc[:, 1:]
train = train.iloc[:, 1:]
train

# ##### Object类型： lan, os, osv, version, fea_hash
# ##### 有缺失值的字段： lan, osv

# In[2]:


# ['os', 'osv', 'lan', 'sid’]
features = train.columns.tolist()
features.remove('label')
print(features)

# In[3]:


for feature in features:
    print(feature, train[feature].nunique())


# In[4]:


# 对osv进行数据清洗
def osv_trans(x):
    x = str(x).replace('Android_', '').replace('Android ', '').replace('W', '')
    if str(x).find('.') > 0:
        temp_index1 = x.find('.')
        if x.find(' ') > 0:
            temp_index2 = x.find(' ')
        else:
            temp_index2 = len(x)

        if x.find('-') > 0:
            temp_index2 = x.find('-')

        result = x[0:temp_index1] + '.' + x[temp_index1 + 1:temp_index2].replace('.', '')
        try:
            return float(result)
        except:
            print(x + '#########')
            return 0
    try:
        return float(x)
    except:
        print(x + '#########')
        return 0


# train['osv'] => LabelEncoder ?
# 采用众数，进行缺失值的填充
train['osv'].fillna('8.1.0', inplace=True)
# 数据清洗
train['osv'] = train['osv'].apply(osv_trans)

# 采用众数，进行缺失值的填充
test['osv'].fillna('8.1.0', inplace=True)
# 数据清洗
test['osv'] = test['osv'].apply(osv_trans)

# In[5]:


# train['os'].value_counts()
train['lan'].value_counts()
# lan_map = {'zh-CN': 1, }
train['lan'].value_counts().index
lan_map = {'zh-CN': 1, 'zh_CN': 2, 'Zh-CN': 3, 'zh-cn': 4, 'zh_CN_#Hans': 5, 'zh': 6, 'ZH': 7, 'cn': 8, 'CN': 9,
           'zh-HK': 10, 'tw': 11, 'TW': 12, 'zh-TW': 13, 'zh-MO': 14, 'en': 15, 'en-GB': 16, 'en-US': 17, 'ko': 18,
           'ja': 19, 'it': 20, 'mi': 21}
train['lan'] = train['lan'].map(lan_map)
test['lan'] = test['lan'].map(lan_map)
test['lan'].value_counts()

# In[6]:


# 对于有缺失的lan 设置为22
train['lan'].fillna(22, inplace=True)
test['lan'].fillna(22, inplace=True)

# In[7]:


remove_list = ['os', 'sid']
col = features
for i in remove_list:
    col.remove(i)
col

# In[8]:


# train['timestamp'].value_counts()
# train['timestamp'] = pd.to_datetime(train['timestamp'])
# train['timestamp']
from datetime import datetime

# lambda 是一句话函数，匿名函数
train['timestamp'] = train['timestamp'].apply(lambda x: datetime.fromtimestamp(x / 1000))
# 1559892728241.7212
# 1559871800477.1477
# 1625493942.538375
# import time
# time.time()
test['timestamp'] = test['timestamp'].apply(lambda x: datetime.fromtimestamp(x / 1000))
test['timestamp']


# In[9]:


def version_trans(x):
    if x == 'V3':
        return 3
    if x == 'v1':
        return 1
    if x == 'P_Final_6':
        return 6
    if x == 'V6':
        return 6
    if x == 'GA3':
        return 3
    if x == 'GA2':
        return 2
    if x == 'V2':
        return 2
    if x == '50':
        return 5
    return int(x)


train['version'] = train['version'].apply(version_trans)
test['version'] = test['version'].apply(version_trans)
train['version'] = train['version'].astype('int')
test['version'] = test['version'].astype('int')

# In[10]:


# 特征筛选
features = train[col]
# 构造fea_hash_len特征
features['fea_hash_len'] = features['fea_hash'].map(lambda x: len(str(x)))
features['fea1_hash_len'] = features['fea1_hash'].map(lambda x: len(str(x)))
# Thinking：为什么将很大的，很长的fea_hash化为0？
# 如果fea_hash很长，都归为0，否则为自己的本身
features['fea_hash'] = features['fea_hash'].map(lambda x: 0 if len(str(x)) > 16 else int(x))
features['fea1_hash'] = features['fea1_hash'].map(lambda x: 0 if len(str(x)) > 16 else int(x))
features

test_features = test[col]
# 构造fea_hash_len特征
test_features['fea_hash_len'] = test_features['fea_hash'].map(lambda x: len(str(x)))
test_features['fea1_hash_len'] = test_features['fea1_hash'].map(lambda x: len(str(x)))
# Thinking：为什么将很大的，很长的fea_hash化为0？
# 如果fea_hash很长，都归为0，否则为自己的本身
test_features['fea_hash'] = test_features['fea_hash'].map(lambda x: 0 if len(str(x)) > 16 else int(x))
test_features['fea1_hash'] = test_features['fea1_hash'].map(lambda x: 0 if len(str(x)) > 16 else int(x))
test_features

# In[11]:


"""
train_set = set()
for x in train['sid'].value_counts().index.tolist():
    train_set.add(x)
#train_set.add(train['sid'].value_counts().index.tolist())
train_set
test_set = set()
for x in test['sid'].value_counts().index.tolist():
    test_set.add(x)
# test_set.add(test['sid'].value_counts().index.tolist())
# test['sid'].value_counts().index.tolist()
train_set & test_set # 没有重复值
"""

# In[11]:


# 对训练集的timestamp提取时间多尺度

# 使用to_datetime进行日期类型转换
# 创建时间戳索引
temp = pd.DatetimeIndex(features['timestamp'])
features['year'] = temp.year
features['month'] = temp.month
features['day'] = temp.day
features['week_day'] = temp.weekday  # 星期几
features['hour'] = temp.hour
features['minute'] = temp.minute

# 求时间的diff
start_time = features['timestamp'].min()
features['time_diff'] = features['timestamp'] - start_time
features['time_diff'] = features['time_diff'].dt.days + features['time_diff'].dt.seconds / 3600 / 24
features[['timestamp', 'year', 'month', 'day', 'week_day', 'hour', 'minute', 'time_diff']]

# 创建时间戳索引
temp = pd.DatetimeIndex(test_features['timestamp'])
test_features['year'] = temp.year
test_features['month'] = temp.month
test_features['day'] = temp.day
test_features['week_day'] = temp.weekday  # 星期几
test_features['hour'] = temp.hour
test_features['minute'] = temp.minute

# 求时间的diff
# start_time = features['timestamp'].min()
test_features['time_diff'] = test_features['timestamp'] - start_time
test_features['time_diff'] = test_features['time_diff'].dt.days + test_features['time_diff'].dt.seconds / 3600 / 24
# test_features[['timestamp', 'year', 'month', 'day', 'week_day', 'hour', 'minute', 'time_diff']]
test_features['time_diff']

# In[12]:


# test['version'].value_counts()
# features['version'].value_counts()
features['dev_height'].value_counts()
features['dev_width'].value_counts()
# 构造面积特征
features['dev_area'] = features['dev_height'] * features['dev_width']
test_features['dev_area'] = test_features['dev_height'] * test_features['dev_width']

# In[13]:


"""
Thinking：是否可以利用 dev_ppi 和 dev_area构造新特征
features['dev_ppi'].value_counts()
features['dev_area'].astype('float') / features['dev_ppi'].astype('float')
"""
# features['ntt'].value_counts()
features['carrier'].value_counts()
features['package'].value_counts()
# version - osv APP版本与操作系统版本差
features['osv'].value_counts()
features['version_osv'] = features['osv'] - features['version']
test_features['version_osv'] = test_features['osv'] - test_features['version']

# In[14]:


features = features.drop(['timestamp'], axis=1)
test_features = test_features.drop(['timestamp'], axis=1)

# In[16]:


# 特征归一化
from sklearn.preprocessing import StandardScaler

scaler = StandardScaler()
features1 = scaler.fit_transform(features)
test_features1 = scaler.transform(test_features)

# In[ ]:


# SGD， Adam, tanh,
# Adamw 在Adam之后进行了改进 => 用更少的epochs 带来更好的效果


# In[45]:


# 使用神经网络进行训练
from tensorflow import keras
from tensorflow.keras.optimizers import Adam
from tensorflow.keras import regularizers

adam = Adam(lr=0.001)
# 搭建模型 加上正则化项
# 工程上 神经网络容易过拟合 => 因为神经网络的参数量大
model = keras.Sequential([
    keras.layers.Dense(250, activation='relu', input_shape=[len(features.columns)],
                       kernel_regularizer=regularizers.l2(0.001)),
    keras.layers.Dropout(0.02),
    keras.layers.Dense(100, activation='relu', kernel_regularizer=regularizers.l2(0.001)),
    keras.layers.Dropout(0.02),
    keras.layers.Dense(50, kernel_regularizer=regularizers.l2(0.001)),
    keras.layers.Dropout(0.02),
    keras.layers.Dense(25, kernel_regularizer=regularizers.l2(0.001)),
    keras.layers.Dropout(0.02),
    keras.layers.Dense(1, activation='sigmoid')  # 需要改写成sigmoid
])
model.compile(loss='binary_crossentropy', metrics=['accuracy'], optimizer=adam)
# batch_size 根据情况而选择，epochs可以一开始小一些

tmp = pd.DataFrame(features1)
tmp['label'] = train['label']
dev = tmp[[1,2,3,4,5,6,7,8,9,10,11,12,13,'label']]
imp = tmp[[14,15,16,17,18,19,20,21,22,23,24,25,26]]
dev.to_csv('dev神经网络测试.csv', index=False)
imp.to_csv('imp神经网络测试.csv', index=False)
tmp.to_csv('full神经网络测试.csv', index=False)
model.fit(features1, train['label'], batch_size=10240, epochs=500)


# batch_size 根据情况而选择，epochs可以一开始小一些
y_pred = model.predict(test_features1)
y_pred