'''
calculate IV & WOE
https://www.listendata.com/2015/03/weight-of-evidence-woe-and-information.html

hdfs dfs -put -f /home/lidh/1w_pn_tmp.txt hdfs://hzxs-ga-hadoop-ds/user/lidh/
hdfs dfs -rm hdfs://getui-bi-ds/user/lidh/1w_pn_tmp.txt
hdfs dfs -cp hdfs://hzxs-ga-hadoop-ds/user/lidh/1w_pn_tmp.txt hdfs://getui-bi-ds/user/lidh


hdfs dfs -head hdfs://getui-bi-ds/user/lidh/1w_pn_tmp.txt

hdfs dfs -cp hdfs://getui-bi-ds/user/yanl/gezhi_arena/sample_day_id/imei_10w_4.txt hdfs://getui-bi-ds/user/lidh/
'''

# sc, spark = create_sc_spark()   

from pyspark import SparkContext, SparkConf
from pyspark.sql.session import SparkSession
from pyspark.sql import Row

import pyspark.sql.functions as F
from gai.v2.spark.transformer import IdMapper
from gai.v2.spark.transformer import FeatureRetriever

source_df = spark.read.csv('hdfs://getui-bi-ds/user/lidh/1w_pn_tmp.txt', 
                           sep='|', 
                           schema="day string, type string, pn_md5 string, label int")
print(source_df.head())
print(source_df.take(5))

#supported input ID type    
print("supported input id types:", IdMapper.supportedInputIdTypes())
    
id_mapper = IdMapper(inputIdCol='pn_md5', 
                     inputIdType='phone_md5', 
                     inputDayCol="day", 
                     outputIdCol="matched_gid", 
                     outputIdType="gid")

matched_gid_df = id_mapper.transform(source_df)
print(matched_gid_df.take(10))

matched_gid_df = matched_gid_df.filter(matched_gid_df['matched_gid'] != '')

# 7,193
print(matched_gid_df.count())

feature_retriever = FeatureRetriever(inputIdCol="matched_gid",
                                     inputDayCol="day",
                                     outputFeatureCols=['ft_usertags', 'ft_app_install_pkgs'],
                                     extraParams={'span.in.months':1})
retrieved_feature_df = feature_retriever.transform(matched_gid_df)
print(retrieved_feature_df.head(3))
print(retrieved_feature_df.count())

# supported retrived features
print('supported features : ', FeatureRetriever.supportedFeatures())

retrieved_feature_df = retrieved_feature_df.filter((retrieved_feature_df['ft_usertags'] != '') | \
                                                   (retrieved_feature_df['ft_app_install_pkgs'] != ''))
# 5,689
print(retrieved_feature_df.count())


from gai.v2.spark.feature import FeatureScoreCalculator, explode_list_plain

explode_list_by_pound = lambda x: explode_list_plain(x, '#')
score_calculator = FeatureScoreCalculator(labelCol='label',
                                          labelRange=[0, 1],
                                          compoCols=['ft_usertags', 'ft_app_install_pkgs'],
                                          compoExploders=[explode_list_plain, explode_list_by_pound],
                                          simpleCols=[])
feature_score_df = score_calculator.transform(retrieved_feature_df)
print(feature_score_df.head(20))


# Rank the scores separately
from gai.v2.spark.feature import RowRanker

# ascending=[False, False, False] -- order by info_value info_gain chi_sq desc
row_ranker = RowRanker(inputCols=['info_value', 'info_gain', 'chi_sq'],
                       outputCols=['info_value_rank', 'info_gain_rank', 'chi_sq_rank'],
                       ascending=[False, False, False])
ranked_score_df = row_ranker.transform(feature_score_df)
print(ranked_score_df.head(10))


# Determine the selected features
threshold_feature = 'info_value_rank'
cutoff = 100
features = ranked_score_df.filter(ranked_score_df[threshold_feature] <= cutoff) \
                                 .select('feature').collect()
features = [row['feature'] for row in features]    
feature_dict = {value: index for index, value in enumerate(features)}
print(len(feature_dict))
print(feature_dict)


# Vectorize the features of a record
from gai.v2.spark.feature import RowVectorizer

row_vectorizer = RowVectorizer(featureDict=feature_dict, labelCol='label',
                               compoCols=['ft_usertags', 'ft_app_install_pkgs'],
                               compoExploders=[explode_list_plain, explode_list_by_pound], 
                               simpleCols=[],                                             
                               outputCol='features')
vectorized_df = row_vectorizer.transform(retrieved_feature_df)
print(vectorized_df.head(10))


train_df, validation_df = vectorized_df.randomSplit(weights=[0.8, 0.2])

# train_df = train_df.withColumnRenamed('random_label', 'label')
# validation_df = validation_df.withColumnRenamed('random_label', 'label')

from pyspark.ml.classification import LogisticRegression, LogisticRegressionModel
log_reg = LogisticRegression(regParam=0.06, elasticNetParam=1.0) 
lr_model = log_reg.fit(train_df)

train_result_df = lr_model.transform(train_df)
validation_result_df = lr_model.transform(validation_df)
print(train_result_df.head(10))




