# encoding: utf-8

#根据空值率和熵值删除一部分不能训练的字段以及看一下具体的数据类型字段

import pandas as pd
from pyspark.sql import SparkSession
from pyspark.sql import Row
from pyspark import SparkFiles
import numpy as np

df1 = spark.sql("select * from idealsh.temp_cyy_user_label_base_202105_2 where offer_flag_5g3 =0 limit 1000000")
df2 = spark.sql("select * from idealsh.temp_cyy_user_label_base_202105_2 where offer_flag_5g3 =1 limit 1000000")

pdf1 = df1.toPandas()
pdf2 = df2.toPandas()
pdf3 = pd.concat([pdf1, pdf2])
print(pdf3.head(10))
print(pdf3.shape)

# 空值率
for i in pdf2.columns:
    nul = len(pdf2.loc[pdf2[i].isna(),:])/len(pdf2)
    print(i,nul)

pdf3.info()

l = pdf3.describe()

pdf3.describe(include=['O'])

# 特征熵
def calc_shannon_ent(data, ent=0.0):
    '''calculate shannon ent'''
    cls_dict = {}
    for cls in data:
        if cls not in cls_dict.keys():
            cls_dict[cls] = 0
        cls_dict[cls] += 1  # 记录每个类别出现的次数
    for key in cls_dict:
        p = float(cls_dict[key]) / len(data)  # 概率 每个类别出现的次数/总数
        ent -= p * np.log2(p)  # 熵的计算公式
    return ent

objectdtype = [ i for i in pdf3.columns.tolist() if i not in l.columns.tolist()]
len(objectdtype)

for i in objectdtype:
    ent = calc_shannon_ent(pdf2[i].tolist())
    print(i, ent)