from pyspark.context import SparkContext, SparkConf
from pyspark.sql import HiveContext
import sys
import copy

sparkconf = SparkConf().setAppName("liz_pred").set("spark.ui.showConsoleProgress", "false")
sc.setLogLevel('ERROR')
hiveCtx = HiveContext(sc)

db = 'liz'
in_tb = 'bi_ods.xdata_sample_gender'
hiveCtx.sql("use {db}".format(db=db))
df_tmp = hiveCtx.sql("select * from {in_tb}".format(in_tb=in_tb))
df_tmp_pd = df_tmp.toPandas()
df_tmp_pd['day'] = df_tmp_pd['day'].apply(lambda x: str(x))
df_tmp_pd['matched_gid'] = df_tmp_pd['gid']
df_tmp_df = spark.createDataFrame(df_tmp_pd)

from gai.v2.spark.transformer import DayRectifier

day_rectifier = DayRectifier(inputCol='day', outputCol='rectified_day')
rectified_day_df = day_rectifier.transform(df_tmp_df)
print(rectified_day_df.head())

cols = [
    'ft_2week_rest_active_period_ls',
    'ft_2week_rest_less_act_period_ls',
    'ft_night_stay_detail_ls',
    'ft_month_work_sum_act_times',
    'ft_city_stay_oneday_cnt',
    'ft_2week_work_most_act_times',
    'ft_workplace_stability',
    'ft_month_work_less_act_period_ls',
    'ft_work_cons_ls',
    'ft_month_work_active_period_ls',
    'ft_month_rest_less_act_times',
    'ft_2week_rest_less_act_times',
    'ft_month_work_most_act_times',
    'ft_month_work_less_act_times',
    'ft_dis_label',
    'ft_month_rest_active_period_ls',
    'ft_night_stay_twoday_cnt',
    'ft_2week_work_most_act_period_ls',
    'ft_2week_rest_most_act_times',
    'ft_2week_rest_sum_act_times',
    'ft_2week_work_less_act_period_ls',
    'ft_2week_work_active_period_ls',
    'ft_lbs_ktv_weekly',
    'ft_city_stay_city_ls',
    'ft_month_rest_most_act_period_ls',
    'ft_night_stay_wifimac_cnt',
    'ft_2week_work_less_act_period_ls',
    'ft_2week_work_active_period_ls',
    'ft_lbs_ktv_weekly',
    'ft_city_stay_city_ls',
    'ft_month_rest_most_act_period_ls',
    'ft_night_stay_wifimac_cnt',
    'ft_largest_cate_ls',
    'ft_city_stay_twoday_cnt',
    'ft_2week_work_sum_act_times',
    'ft_night_stay_oneday_cnt',
    'ft_pwoi_all_often_consum',
    'ft_pwoi_rest_mostoften',
    'ft_pwoi_all_mostoften',
    'ft_pwoi_hour_mostoften_ls',
    'ft_pwoi_all_often_ls',
    'ft_pwoi_rest_often_ls',
    'ft_pwoi_rest_times_ls',
    'ft_pwoi_hour_often_ls',
    'ft_pwoi_all_times_ls',
    'ft_pwoi_hour_times_ls',
    'ft_pwoi_all_mostoften_consume'
]

from gai.v2.spark.transformer import FeatureRetriever

feature_retriever = FeatureRetriever(inputIdCol="matched_gid",
                                     inputDayCol="rectified_day",
                                     outputFeatureCols=cols,
                                     extraParams={'span.in.months': 1})
retrieved_feature_df = feature_retriever.transform(rectified_day_df)
print(retrieved_feature_df.head(2))
print(retrieved_feature_df.count())

A = retrieved_feature_df.toPandas()
A.to_csv('fs6s/gender_ios.csv', header=True, index=False)

retrieved_feature_df.write.saveAsTable('gender_tb', None, "overwrite", None)

import os

os.getcwd()

# import pyspark.sql.functions as F
# synthesized_df1 = retrieved_feature_df.filter(
#                                           (F.length(F.col('ft_usertags')) > 0)) \
#                                     .withColumn('label', F.when(F.col('gender')=='012200', 0).otherwise(1))\
#                                     .withColumn('usertag_num', F.length(F.col('ft_usertags')))

# synthesized_df = synthesized_df1.filter(F.substring(F.col('gid'),1,3)=='IOS')
# synthesized_df.show()
