import numpy as np
import pandas as pd
import pyspark
import matplotlib.pyplot as plt
import pyspark.sql.functions as F
from pyspark.sql.window import Window
from pyspark.sql.types import IntegerType
from pyspark.context import SparkContext
from pyspark.sql.session import SparkSession

spark = SparkSession.builder.getOrCreate()

train_df = spark.read.csv('train.csv', header=True)
#lectures_df = spark.read.csv('hdfs://haruna/edu/weichengzhi/lectures.csv', header=True)
question_df = spark.read.csv('questions.csv', header=True)
# question_df.show()

# step 1
# first remove some solumns
print("---------------> step 1 <----------------")
train_df = train_df.drop('user_answer')
# change data type
train_df = train_df.withColumn("row_id", train_df["row_id"].cast(IntegerType()))
train_df = train_df.withColumn("user_id", train_df["user_id"].cast(IntegerType()))
train_df = train_df.withColumn("task_container_id", train_df["task_container_id"].cast(IntegerType()))

print(train_df.count())
# train_df.show()

# step 2
# remove those who have taken lecture(s)
print("---------------> step 2 <----------------")
w = Window.partitionBy("user_id")
pure_q_df = (train_df.withColumn("flag",F.sum("content_type_id").over(w))
                    .filter(F.col("flag")==0)
                    .drop("flag")
                    .sort("row_id"))
print(pure_q_df.count())
# pure_q_df.show()
pure_q_df.filter(F.col("row_id").between(16638,16646)).show()


# # step 3
# # remove problem without explanation
# # lag by -1
print("---------------> step 3 <----------------")
# w = Window().partitionBy().orderBy(F.col("row_id"))
# df3 = (pure_q_df.withColumn("prior_question_had_explanation", F.lag("prior_question_had_explanation", -1).over(w))
#                 .withColumn("prior_question_elapsed_time", F.lag("prior_question_elapsed_time", -1).over(w)))
# # concat user_id and task_container_id
# df3 = df3.withColumn("task_container_id", F.concat_ws("_", df3.user_id, df3.task_container_id))
# # sort
# df3 = df3.sort("row_id")
# # broadcast to task_container_id
# w = Window.partitionBy("task_container_id")
# df3 = (df3.withColumn("flag",F.when(F.col("prior_question_had_explanation")=='True',1).otherwise(0))
#           .withColumn("prior_question_had_explanation",F.last("flag").over(w))
#           .withColumn("time",F.last("prior_question_elapsed_time").over(w))
#           .filter(F.col("prior_question_had_explanation")!=0)
#           .drop("flag")
#           .sort("row_id"))
# # delete
# print(df3.count())
# # df3.show()


# # step 4
# # drop some useless columns
# df4 = df3.drop("prior_question_had_explanation", "prior_question_elapsed_time", "content_type_id")
# df4.show()


# # step 5
# # connect the train df and question df
# df5 = df4.withColumnRenamed('content_id', 'question_id')
# df5 = df5.join(question_df, on='question_id', how='left')
# # remove
# df5 = df5.drop("bundle_id", "correct_answer")
# df5 = df5.sort('row_id')

# print(df5.count())
# df5.show()


# # step 6
# # remove huge and small
# low = 50
# huge = 500
# w = Window.partitionBy("user_id")
# df6 = (df5.withColumn("count",F.count("user_id").over(w))
#           .filter(F.col("count") >= low)
#           .filter(F.col("count") <= huge)
#           .drop("count", "part")
#           .sort("row_id"))

# print(df6.count())
# df6.show()


# # step 7
# # remove students who solve problem with little data
# low = 50

# w1 = Window().partitionBy("question_id")
# w2 = Window().partitionBy("user_id")
# df7 = (df6.withColumn("count", F.count("question_id").over(w1))
#           .withColumn("min", F.min("count").over(w2))
#           .filter(F.col("min") > low)
#           .drop("count", "min")).sort("row_id")

# print(df7.count())
# df7.show()


# # step 8
# # remove problems 
# low = 50
# w = Window().partitionBy("question_id")
# df8 = (df7.withColumn("count", F.count("question_id").over(w1))
#           .filter(F.col("count") > low)
#           .drop("count")).sort("row_id")
# print(df8.count())
# df8.show()