from pyspark.sql import SparkSession
from pyspark.sql import Row
from pyspark.ml.feature import Word2Vec
from pyspark.ml.feature import HashingTF, IDF, Tokenizer

#spark = SparkSession.builder.appName('appName').master('local').config("spark.local.dir", "Y:/SparkTemp/").getOrCreate()

spark = SparkSession.builder.appName('appName').master('local').config("spark.local.dir", "Y:/SparkTemp/").config("spark.driver.cores", "4").config("spark.driver.maxResultSize","4096").getOrCreate()


sc = spark.sparkContext

# Load a text file and convert each line to a Row.
#lines = sc.textFile("J:/dmData/ad/contest_dataset_user_profile/part-00000-a513c1cf-2c00-448c-95e7-cdbb60db1447.txt")
#lines = sc.textFile("J:/dmData/ad/contest_dataset_user_profile/*.txt")
lines = sc.textFile("J:/spark-2.2.0-bin-hadoop2.7/examples/src/main/resources/tset.txt")
parts = lines.map(lambda l: l.split())
print("-----------------")
#print(parts.count())
user = parts.map(lambda p: Row(id = p[0], age = p[1]))

# Infer the schema, and register the DataFrame as a table.
schemaPeople = spark.createDataFrame(user)
schemaPeople.createOrReplaceTempView("user")

# SQL can be run over DataFrames that have been registered as a table.
someone = spark.sql("SELECT id,age FROM user where id = '562284'")

# The results of SQL queries are Dataframe objects.
# rdd returns the content as an :class:`pyspark.RDD` of :class:`Row`.
someoneInfo = someone.rdd.map(lambda p: "id: " + p.id + " age: " + p.age).collect()
print("data count: %s"%(someone.count()))
for info in someoneInfo:
    print(info)
# Name: Justin


print("Word2Vec ###############################")
# word to vector
# Input data: Each row is a bag of words from a sentence or document.
documentDF = spark.createDataFrame([
 ("Hi I heard Spark about".split(" "), ),
 ("Hi I heard about Spark".split(" "), ),
 ("Hi I heard about Spark".split(" "), ),
 ("I Hi heard about Spark".split(" "), ),
 ([],),
 ("I Hi heard about about Spark".split(" "), ),
 ("I wish Java could use case classes".split(" "), ),
 ("I wish Java could use case classes".split(" "), ),
 ("Logistic regression models are neat".split(" "), ),
 ("Yes Go".split(" "), )
], ["text"])

#documentDF = spark.createDataFrame([
#    (['1','2','3'],),
#    (['2','3','4'],),
#    (['1','2','3'],),
#    (['4','5','6'],),
#], ["text"])


# documentDF = spark.createDataFrame([
#     ([1,2,3],),
#     ([2,3,4],),
#     ([1,2,3],),
#     ([4,5,6],),
# ], ["text"])

# Learn a mapping from words to Vectors.
word2Vec = Word2Vec(vectorSize=3, minCount=0, inputCol="text", outputCol="result")
model = word2Vec.fit(documentDF)

result = model.transform(documentDF)
for row in result.collect():
    text, vector = row
    print("Text: [%s] => \nVector: %s\n" % (", ".join(text), str(vector)))

documentDFtest = spark.createDataFrame([
 ("Hi I heard Spark about".split(" "), ),
 ("Yes Go".split(" "), ),
  ("what the fuck".split(" "), )
], ["text"])

result = model.transform(documentDFtest)
for row in result.collect():
    text, vector = row
    print("Text: [%s] => \nVector: %s\n" % (", ".join(text), str(vector)))


print("HashingTF ###############################")
# TF-IDF & HashingTF


sentenceData = spark.createDataFrame([
    (0.0, "Hi I heard about Spark"),
    (0.0, "I wish Java could use case classes"),
    (1.0, "Logistic regression models are neat")
], ["label", "sentence"])

sentenceData.show(truncate=False)

tokenizer = Tokenizer(inputCol="sentence", outputCol="words")
wordsData = tokenizer.transform(sentenceData)

hashingTF = HashingTF(inputCol="words", outputCol="rawFeatures", numFeatures=20)
featurizedData = hashingTF.transform(wordsData)
# alternatively, CountVectorizer can also be used to get term frequency vectors

idf = IDF(inputCol="rawFeatures", outputCol="features")
idfModel = idf.fit(featurizedData)
rescaledData = idfModel.transform(featurizedData)

rescaledData.select("label", "features").show(truncate=False)
rescaledData.show(truncate=False)




print("NGram ###############################")

from pyspark.ml.feature import NGram

wordDataFrame = spark.createDataFrame([
    (0, ["Hi", "I", "heard", "about", "Spark"]),
    (1, ["I", "wish", "Java", "could", "use", "case", "classes"]),
    (2, ["Logistic", "regression", "models", "are", "neat"])
], ["id", "words"])

ngram = NGram(n=2, inputCol="words", outputCol="ngrams")

ngramDataFrame = ngram.transform(wordDataFrame)
ngramDataFrame.select("ngrams").show(truncate=False)



print("Tokenizer ###############################")

from pyspark.ml.feature import Tokenizer, RegexTokenizer
from pyspark.sql.functions import col, udf
from pyspark.sql.types import IntegerType

sentenceDataFrame = spark.createDataFrame([
    (0, "Hi I heard about Spark"),
    (1, "I wish Java could use case classes"),
    (2, "Logistic,regression,models,are,neat")
], ["id", "sentence"])

tokenizer = Tokenizer(inputCol="sentence", outputCol="words")

regexTokenizer = RegexTokenizer(inputCol="sentence", outputCol="words", pattern="\\W")
# alternatively, pattern="\\w+", gaps(False)

countTokens = udf(lambda words: len(words), IntegerType())

tokenized = tokenizer.transform(sentenceDataFrame)
tokenized.select("sentence", "words")\
    .withColumn("tokens", countTokens(col("words"))).show(truncate=False)

regexTokenized = regexTokenizer.transform(sentenceDataFrame)
regexTokenized.select("sentence", "words") \
    .withColumn("tokens", countTokens(col("words"))).show(truncate=False)