package hadoop.mr08;


import hadoop.unit.GlobalConfiguration;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.db.DBConfiguration;
import org.apache.hadoop.mapreduce.lib.db.DBOutputFormat;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;



public class W_Driver {
    private static Configuration conf = GlobalConfiguration.getCon();

    // 运行任务
    public static void main(String[] args) throws Exception {
        // todo 配置本次任务使用的JDBC信息
        DBConfiguration.configureDB(
                conf,
                "com.mysql.jdbc.Driver",
                "jdbc:mysql://192.168.150.101:3306/use_hadoop?useSSL=false&useUnicode=true&characterEncoding=utf8",
                "root",
                "123456"
        );
        Job job = Job.getInstance(conf, "DdRead_Job");

        job.setJarByClass(W_Driver.class);
        job.setMapperClass(W_mapper.class);
        job.setReducerClass(W_reduce.class);

        job.setMapOutputKeyClass(NullWritable.class);
        job.setMapOutputValueClass(W_stuTable.class);

        job.setOutputKeyClass(W_stuTable.class);
        job.setOutputValueClass(NullWritable.class);


        FileInputFormat.setInputPaths(job, new Path("hdfs://master:1920/hadoop/Test/RestaurantDemo/demo3/chengji.csv"));
        // todo 设置写入MySQL的输出
        job.setOutputFormatClass(DBOutputFormat.class);
        // 设置写入数据库的表名和字段
        DBOutputFormat.setOutput(
                job,
                "bigData2331",
                "id",
                "name",
                "sex",
                "java",
                "linux",
                "political",
                "sport",
                "mind",
                "compiter",
                "english",
                "math"
        );

        int case_ = job.waitForCompletion(true) ? 0 : 2; // 0为正常退出,2为异常退出
        System.exit(case_);
    }
}

