package db.writer;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.TextInputFormat;
import org.apache.hadoop.mapred.lib.db.DBConfiguration;
import org.apache.hadoop.mapred.lib.db.DBOutputFormat;

import db.read.DBRecord;

/**
 * 把hdsf数据写入到oracle
 */
public class DBInsert {  
    public static void main(String[] args) throws Exception {  
  
           
  
        JobConf conf = new JobConf(WriteDB.class);  
        // 设置输入输出类型  
  
        conf.setInputFormat(TextInputFormat.class);  
        conf.setOutputFormat(DBOutputFormat.class);  
  
        // 不加这两句，通不过，但是网上给的例子没有这两句。  
        //Text, DBRecord  
        conf.setMapOutputKeyClass(Text.class);  
        conf.setMapOutputValueClass(DBRecord.class);  
        conf.setOutputKeyClass(Text.class);  
        conf.setOutputValueClass(DBRecord.class);  
        // 设置Map和Reduce类  
        conf.setMapperClass(WriteDB.Map.class);  
        conf.setReducerClass(WriteDB.Reduce.class);  
        // 设置输如目录  
        FileInputFormat.setInputPaths(conf, new Path("hdfs://localhost:9000/user/fpbaddress"));
        // 建立数据库连接  
        DBConfiguration.configureDB(conf,"oracle.jdbc.driver.OracleDriver", "jdbc:oracle:thin:@192.168.0.182:1521/orcl","linshi","linshi");
        String[] fields = {"id","name","intro" };  
        DBOutputFormat.setOutput(conf, "FPB_ADDRESS", fields);  
        JobClient.runJob(conf);  
    }  
  
} 