package top.hekun.study.alink;


import com.alibaba.alink.operator.batch.BatchOperator;
import com.alibaba.alink.operator.batch.source.CsvSourceBatchOp;
import com.alibaba.alink.operator.stream.StreamOperator;
import com.alibaba.alink.pipeline.Pipeline;
import com.alibaba.alink.pipeline.clustering.KMeans;
import com.alibaba.alink.pipeline.dataproc.vector.VectorAssembler;

import java.io.File;

public class AlinkDemo {

    public static void main(String[] args) throws Exception {
        File filePath= new File (new File(new File(System.getProperty("user.dir"),"Alink"),"data"),"iris.csv");
//        String URL = "H:\\alink\\data\\iris.csv";
        String SCHEMA_STR = "sepal_length double, sepal_width double, petal_length double, petal_width double, category string";
        // 批处理操作员
        BatchOperator data = new CsvSourceBatchOp()
                .setFilePath(filePath.getPath()).setSchemaStr(SCHEMA_STR);

        VectorAssembler va = new VectorAssembler()
                .setSelectedCols(new String[]{"sepal_length", "sepal_width", "petal_length", "petal_width"})
                .setOutputCol("features");

        KMeans kMeans = new KMeans().setVectorCol("features").setK(3)
                // 设置预测列
                .setPredictionCol("预测结果")
                // 设置预测详细信息列
                .setPredictionDetailCol("预测细节")
                // 设置保留列
                .setReservedCols("category")
                .setMaxIter(100);

        Pipeline pipeline = new Pipeline().add(va).add(kMeans);

        BatchOperator<?> transform = pipeline
                .fit(data)
                .transform(data).print();
//        StreamOperator.execute();
    }

}
