import findspark
findspark.init()
from pyspark import SparkContext
import os
import sys
from pyspark.sql import SparkSession
import scipy.sparse as sps
from pyspark.mllib.linalg import Vectors
import numpy as np

sc = SparkContext('local')
doc = sc.parallelize([["I Love Beijing"], ["I Love China"]])
words = doc.flatMap(lambda d: d).distinct().collect()
word_dict = {w: i for w, i in zip(words, range(len(words)))}
word_dict_b = sc.broadcast(word_dict)


def wordCountPerDoc(d):
    dic = dict()
    wd = word_dict_b.value
    for w in d:
        if dic.get(wd[w]):
            dic[wd[w]] += 1
        else:
            dic[wd[w]] = 1
    return dic


print(doc.map(wordCountPerDoc).collect())
print("successful!")

dv1 = np.array([1.0, 0.0, 3.0])
dv2 = [1.0, 0.0, 3.0]
sv1 = Vectors.sparse(3, [0, 2], [1.0, 3.0])
sv2 = sps.csc_matrix((np.array([1.0, 3.0]), np.array([0, 2]), np.array([0, 2])), shape=(3, 1))

print(sv2)
# 配置hive之后可以使用
# spark = SparkSession.builder.appName('CheckPyspark').master(master="local").enableHiveSupport().getOrCreate()
# spark.sql("show tables").show()
# print(spark.sparkContext.parallelize(range(6), 3).collect())
