
from pyspark.context import SparkContext, SparkConf
from pyspark.sql import HiveContext
import sys
import copy

sparkconf = SparkConf().setAppName("liz_pred").set("spark.ui.showConsoleProgress", "false")
sc.setLogLevel('ERROR')
hiveCtx = HiveContext(sc)

db = 'liz'
in_tb = 'bi_ods.xdata_sample_gender'
hiveCtx.sql("use {db}".format(db=db))
df_tmp = hiveCtx.sql("select * from {in_tb}".format(in_tb=in_tb))


df_tmp_pd = df_tmp.toPandas()

df_tmp_pd['day'] = df_tmp_pd['day'].apply(lambda x: str(x))
df_tmp_pd['matched_gid'] = df_tmp_pd['gid']
df_tmp_df = spark.createDataFrame(df_tmp_pd)

from gai.v2.spark.transformer import DayRectifier
day_rectifier = DayRectifier(inputCol='day', outputCol='rectified_day')
rectified_day_df = day_rectifier.transform(df_tmp_df)
print(rectified_day_df.head())

from gai.v2.spark.transformer import FeatureRetriever
feature_retriever = FeatureRetriever(inputIdCol="matched_gid",
                                     inputDayCol="rectified_day",
                                     outputFeatureCols=['ft_brand', 
                                                        'ft_usertags', 
                                                        'ft_phone_model', 
                                                        'ft_app_install_pkgs'],
                                    extraParams={'span.in.months':1})

retrieved_feature_df = feature_retriever.transform(rectified_day_df)

print(retrieved_feature_df.head(2))
print(retrieved_feature_df.count())

# A = retrieved_feature_df.toPandas()
# A.to_csv('gender_ios.csv', header=True, index=False)

from pyspark.ml import PipelineModel
pipeline_model = PipelineModel(stages=[day_rectifier, feature_retriever])
alt_df = pipeline_model.transform(df_tmp_df)

import gai.version
print(gai.version.__version__)

# | 012200                 | 男                      |
# | 012202                 | 男_次精准                  |
# | 012100                 | 女                      |
# | 012102                 | 女_次精准                  |
import pyspark.sql.functions as F
synthesized_df1 = retrieved_feature_df.filter(
                                          (F.length(F.col('ft_usertags')) > 0)
                                        | (F.length(F.col('ft_app_install_pkgs')) > 0)) \
                                    .withColumn('label', F.when(F.col('gender')=='012200', 0).otherwise(1))\
                                    .withColumn('usertag_num', F.length(F.col('ft_usertags')))
    
print(synthesized_df.head(2))
print(synthesized_df.count())

synthesized_df = synthesized_df1.filter(F.substring(F.col('gid'),1,3)=='IOS')
synthesized_df.show()

synthesized_df.columns

from gai.v2.spark.transformer import PrefixAdder
from pyspark.sql.functions import udf
from pyspark.sql.types import StringType
from gai.v2.spark.transformer import ColumnMapper

prefix_adder_1 = PrefixAdder(inputCol='ft_usertags', 
                             outputCol='prefixed_usertags',
                             prefix='utg_',
                             separator=',')

@udf(returnType=StringType())
def add_prefix(cell_text):
    prefix = "pkg_"
    separator = '#'
    if len(cell_text) != 0:  # note that ``"".split(",")`` returns ``['']``
        prefixed_parts = [prefix + x for x in cell_text.split(separator)]
        cell_text = separator.join(prefixed_parts)
    return cell_text


column_mapper = ColumnMapper(fun=add_prefix,
                             inputCols=['ft_app_install_pkgs'],
                             outputCol='prefixed_install_pkgs')
prefix_adder_pipeline = PipelineModel(stages=[prefix_adder_1, column_mapper])
prefixed_df = prefix_adder_pipeline.transform(synthesized_df)
print(prefixed_df.head())

prefixed_df.columns

from gai.v2.spark.feature import FeatureScoreCalculator, explode_list_plain
explode_list_by_pound = lambda x: explode_list_plain(x, ',')
score_calculator = FeatureScoreCalculator(labelCol='label', 
                           labelRange=[0,1], 
                           compoCols=['ft_usertags'],
                           compoExploders=[explode_list_by_pound],
                           simpleCols=['usertag_num'])
feature_score_df = score_calculator.transform(prefixed_df)
print(feature_score_df.head())

from gai.v2.spark.feature import RowRanker
feature_score_pd = feature_score_df.toPandas()
sorted_pd = feature_score_pd.sort_values(['info_value', 'info_gain'])
sorted_pd.head()
row_ranker = RowRanker(inputCols=['info_value', 'info_gain', 'chi_sq'],
                       outputCols=['info_value_rank', 'info_gain_rank', 'chi_sq_rank'],
                       ascending=[False, False, False])
ranked_score_df = row_ranker.transform(feature_score_df)
print(ranked_score_df.head())

threshold_feature = 'info_value_rank'
cutoff = 100
features = ranked_score_df.filter(ranked_score_df[threshold_feature] <= cutoff) \
                                 .select('feature').collect()
features = [row['feature'] for row in features]    
feature_dict = {value: index for index, value in enumerate(features)}
print(len(feature_dict))

from gai.v2.spark.feature import RowVectorizer
row_vectorizer = RowVectorizer(featureDict=feature_dict, labelCol='random_label',
                               compoCols=['prefixed_usertags'],
                               compoExploders=[explode_list_plain, 
                                               explode_list_by_pound],
                               simpleCols=['usertag_num'],
                               outputCol='features')
vectorized_df = row_vectorizer.transform(prefixed_df)
print(vectorized_df.head())

from gai.v2.spark.feature import VanillaFeatureSelector
feature_selector = VanillaFeatureSelector(labelCol='random_label',
                                          labelRange=[0, 1],
                                          compoCols=['prefixed_usertags'],
                                          compoExploders=[explode_list_plain, explode_list_by_pound],
                                          simpleCols=['usertag_num'],
                                          featuresCol='features',
                                          selectionOption='info_value_rank',
                                          cutOff=cutoff)
row_vectorizer_alt = feature_selector.fit(prefixed_df)
vectorized_df_alt = row_vectorizer_alt.transform(prefixed_df)
print(vectorized_df_alt.head())

train_df, validation_df = vectorized_df.randomSplit(weights=[0.7,0.3])
train_df = train_df.withColumnRenamed('random_label', 'label')
validation_df = validation_df.withColumnRenamed('random_label', 'label')
from pyspark.ml.classification import LogisticRegression, LogisticRegressionModel
log_reg = LogisticRegression(regParam=0.06,elasticNetParam=1.0) 
lr_model = log_reg.fit(train_df)

from gai.v2.spark.evaluation import *
accuracy_of(train_result_df)
