package cn.ltgodm.house.cleansing;

import cn.hutool.core.util.StrUtil;
import cn.ltgodm.house.util.SparkUtil;
import lombok.extern.slf4j.Slf4j;
import org.apache.hadoop.fs.*;
import org.apache.spark.sql.Column;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SparkSession;
import org.spark_project.guava.io.CharStreams;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Service;

import javax.annotation.Resource;
import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.nio.charset.Charset;
import java.nio.charset.CharsetDecoder;
import java.nio.charset.CodingErrorAction;
import java.nio.charset.StandardCharsets;
import java.util.Arrays;
import java.util.List;

import static org.apache.spark.sql.functions.*;

/**
 * @author ltgodm
 * @date 2024-06-24 14:12:28
 * 数据清洗的服务类
 */
@Service
@Slf4j
public class DataCleansingService {
    @Resource
    private SparkUtil sparkUtil;
    @Value("${spark.host}")
    private String SPARK_HOST;
    @Value("${spark.port}")
    private String SPARK_PORT;

    private String[] files = {"binjiang", "fuyang", "gongshu", "linpingqu", "qiantangqu", "linan", "shangcheng", "xiaoshan", "xihu", "yuhang","jiande","chunan","tonglu"};
    private List<String> columeNames = Arrays.asList("小区名称", "区域位置", "总价", "单价", "关注度", "房屋户型", "所在楼层", "建筑面积", "套内面积", "户型结构", "建筑类型", "房屋朝向", "装修情况", "梯户比例","配备电梯");
    private final List<String> cityQU = Arrays.asList("临安","富阳","萧山","余杭","西湖","滨江","上城","拱墅","钱塘","临平");
    private final List<String> citySHI = Arrays.asList("建德");
    private final List<String> cityXIAN = Arrays.asList("淳安","桐庐");
    //小区名称，区域位置，总价，单价，关注度，房屋户型，所在楼层，建筑面积，套内面积，户型结构，建筑类型，房屋朝向，装修情况，梯户比例,配备电梯
    public void cleansingData() {
        String hdfsPath = StrUtil.format("hdfs://{}:{}/house/", SPARK_HOST, SPARK_PORT);
        SparkSession spark = sparkUtil.getSparkSession();
        //抽取指定列合并到总文件
        mergeFile(hdfsPath, spark);
        //清洗数据
        executeCleansing(spark, hdfsPath);
    }

    private void executeCleansing(SparkSession spark, String hdfsPath) {
        Dataset<Row> df = spark.read().option("header","true").csv(hdfsPath + "all.csv")
                .filter(col("总价").isNotNull())
                .filter(col("单价").isNotNull().and(col("单价").gt(0)))
                .filter(col("关注度").isNotNull())
                .filter(col("房屋户型").isNotNull())
                .filter(col("所在楼层").isNotNull())
                .filter(col("建筑面积").isNotNull())
                .filter(col("套内面积").isNotNull().and(col("套内面积").notEqual("暂无数据")))
                .filter(col("户型结构").isNotNull())
                .filter(col("建筑类型").isNotNull())
                .filter(col("房屋朝向").isNotNull())
                .filter(col("装修情况").isNotNull())
                .filter(col("梯户比例").isNotNull())
                .filter(col("配备电梯").isNotNull())
                .withColumn("区域位置",split(col("区域位置"), "-").getItem(0))
                .withColumn("建筑面积", regexp_replace(col("建筑面积"), "㎡", ""))
                .withColumn("套内面积", regexp_replace(col("套内面积"), "㎡", ""))
                .withColumn("配备电梯", regexp_replace(col("配备电梯"), "暂无数据", "无"));

                for (String city : cityQU) {
                    df = df.withColumn("区域位置", when(col("区域位置").contains(city), city + "区").otherwise(col("区域位置")));
                }

                for (String city : citySHI) {
                    df = df.withColumn("区域位置", when(col("区域位置").equalTo(city), city + "市").otherwise(col("区域位置")));
                }

                for (String city : cityXIAN) {
                    df = df.withColumn("区域位置", when(col("区域位置").equalTo(city), city + "县").otherwise(col("区域位置")));
                }
                df.write().mode("overwrite").csv(hdfsPath + "cleaned");
        mergeAndRenameFilesInHdfs(hdfsPath + "cleaned", hdfsPath + "cleaned.csv");
        System.out.println("数据清洗完成");
    }

    /**
     * 合并文件
     *
     * @param hdfsPath hdfs路径
     * @return SparkSession
     */
    private void mergeFile(String hdfsPath, SparkSession spark) {
        try {
            FileSystem fs = sparkUtil.getHdfsFileSystem();

            Column[] columns = columeNames.stream().map(Column::new).toArray(Column[]::new);
            for (String file : files) {

                //将所有文件转换为UTF-8
                // 创建输入流
                FSDataInputStream inStream = null;
                inStream = fs.open(new Path(hdfsPath + file + ".csv"));

                // 创建GBK字符集解码器
                CharsetDecoder gbkDecoder = Charset.forName("GBK").newDecoder();
                // 设置解码时遇到无效字符的处理方式
                gbkDecoder.onMalformedInput(CodingErrorAction.IGNORE);

                // 创建输入流读取器
                InputStreamReader reader = new InputStreamReader(inStream, gbkDecoder);
                // 读取数据并转换为UTF-8编码
                String data = CharStreams.toString(reader);
                // 创建HDFS路径
                String tmpFilePath = hdfsPath + "tmp.csv";
                Path hdfsTempFilePath = new Path(tmpFilePath);
                // 在HDFS上创建新文件
                FSDataOutputStream outStream = fs.create(hdfsTempFilePath);
                // 将数据写入HDFS文件
                outStream.write(data.getBytes(StandardCharsets.UTF_8));
                // 关闭输出流
                outStream.close();

                Dataset<Row> df = spark.read().option("header", "true").option("multiline", true).csv(tmpFilePath);
                Dataset<Row> select = df.select(columns);
                select.show();
                select.write().option("header", "true").mode("append").csv(hdfsPath + "all");
            }

            //合并文件
            String directory = hdfsPath + "all";
            String newName = hdfsPath + "all.csv";

            mergeAndRenameFilesInHdfs(directory, newName);

            System.out.println("数据合并完成");
        } catch (IOException e) {
            throw new RuntimeException(e);
        }
    }


    public void mergeAndRenameFilesInHdfs(String directory, String newName) {
        try {
            FileSystem hdfsFileSystem = sparkUtil.getHdfsFileSystem();

            // 创建一个新的文件
            Path newFilePath = new Path(newName);
            FSDataOutputStream out = hdfsFileSystem.create(newFilePath);

            // 写入表头
            String header = String.join(",", columeNames);
            out.write((header + "\n").getBytes(StandardCharsets.UTF_8));


            // 从指定的目录中读取所有文件，并将它们合并到新的文件中
            FileStatus[] fileStatuses = hdfsFileSystem.listStatus(new Path(directory));
            boolean isFirstFile = true;
            for (FileStatus fileStatus : fileStatuses) {
                Path path = fileStatus.getPath();
                FSDataInputStream in = hdfsFileSystem.open(path);
                BufferedReader reader = new BufferedReader(new InputStreamReader(in));
                String line;
                String previousLine = null;
                while ((line = reader.readLine()) != null) {
                    // 如果不是第一个文件，跳过表头
                    if (!isFirstFile && line.startsWith("小区名称")) {
                        continue;
                    }
                    // 如果previousLine不为空，将当前行与previousLine合并
                    if (previousLine != null) {
                        line = previousLine + line;
                        previousLine = null;
                    }
                    // 检查该行是否包含正确的列数
                    String[] columns = line.split(","); // 你需要将","替换为你的实际列分隔符
                    if (columns.length != columeNames.size()) { // 你需要将14替换为你的实际列数
                        previousLine = line;
                        continue;
                    }
                    // 将字符串转换为UTF-8编码
                    byte[] utf8Bytes = line.getBytes(StandardCharsets.UTF_8);
                    out.write(utf8Bytes);
                    out.write('\n');
                }
                isFirstFile = false;
                reader.close();
                in.close();
            }
            out.close();

            // 删除原来的目录
            hdfsFileSystem.delete(new Path(directory), true);
        } catch (Exception e) {
            e.printStackTrace();
        }
    }
}
