from pyspark.sql.session import SparkSession
from pyspark.sql.functions import *
from pyspark.ml.linalg import Vectors, VectorUDT

from pyspark.ml.classification import LogisticRegression
from pyspark.sql.types import *

# 创建环境
spark = SparkSession.builder.getOrCreate()

# 1、读取图片数据
image_data = spark.read.format("image").load("D:\\data\\手写数字识别data\\train")

image_data.printSchema()
# 2、取出图片的路径和数据，数据是二进制（字节数组）
path_data_df = image_data.select(col("image.origin").alias("path"), col("image.data").alias("data"))


# 3、编写自定义函数处理数据
def image_fun(data):
    # 将字节数组转换成普通列表
    data = [int(i) for i in data]

    # 将图片像素值列表转换成特征向量
    features = Vectors.dense(data)

    return features


# 注册自定义函数
image_udf = udf(image_fun, VectorUDT())


# 自定义udf获取图片名称
def image_name_fun(path):
    return path.split("/")[-1]


image_name_udf = udf(image_name_fun, StringType())

# 通过自定义函数处理数据，取出文件名称和图片数据
features_df = path_data_df.select(image_name_udf("path").alias("name"), image_udf("data").alias("features"))

features_df.printSchema()
features_df.show(truncate=False)

# 4、读取标记数据
label_df = spark.read.format("csv") \
    .option("sep", " ").schema("name STRING,label DOUBLE") \
    .load("D:\\data\\手写数字识别data\\train.txt")

# 关联特征向量和目标值
data_df = features_df \
    .join(label_df.hint("broadcast"), "name") \
    .select("label", "features")

# 合并小文件输出
data_df.coalesce(10).write.format("libsvm").save("../../data/image_data")
