package db.read;
import luculent.HdfsUtils;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.lib.IdentityReducer;
import org.apache.hadoop.mapred.lib.db.DBConfiguration;
import org.apache.hadoop.mapred.lib.db.DBInputFormat;
import org.apache.hadoop.mapreduce.Job;
import org.apache.log4j.BasicConfigurator;

import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.util.jar.JarEntry;
import java.util.jar.JarOutputStream;
import java.util.jar.Manifest;


/**
 * 从数据库读取数据到hdfs
 * 提交到远程执行
 */
public class RDBAccess {
      public static void main(String[] args) throws Exception{

             JobConf conf = new JobConf(RDBAccess.class);
             conf.setOutputKeyClass(LongWritable.class);  
             conf.setOutputValueClass(Text.class);  
             conf.setInputFormat(DBInputFormat.class);

             HdfsUtils.deleteDir("/user/fpbaddress");
             Path path = new Path("hdfs://master:9000/user/fpbaddress");
             FileOutputFormat.setOutputPath(conf, path);  
             DBConfiguration.configureDB(conf,"oracle.jdbc.driver.OracleDriver", "jdbc:oracle:thin:@192.168.0.182:1521/orcl","linshi","linshi");
             String [] fields = {"id", "name", "intro"};  
             DBInputFormat.setInput(conf, DBRecord.class, "FPB_ADDRESS",  
                        null, "id", fields);  
             conf.setMapperClass(DBRecordMapper.class);  
             conf.setReducerClass(IdentityReducer.class);

             //conf.setJarByClass(RDBAccess.class);
             /*File jarFile = createTempJar("bin");
             conf.setJar(jarFile.toString());*/

             /*conf.addResource("mapred-site.xml");
             conf.addResource("yarn-site.xml");
             conf.addResource("core-site.xml");
             conf.addResource("hdfs-site.xml");*/

             //在你的文件地址前自动添加：hdfs://master:9000/
             conf.set("fs.defaultFS", "hdfs://master:9000/");
            // conf.set("hadoop.job.user","xiaolu");
             //指定jobtracker的ip和端口号，master在/etc/hosts中可以配置
            // conf.set("mapred.job.tracker","localhost:9000");
             conf.set("mapreduce.framework.name", "yarn");
             //conf.set("mapred.local.dir", "E:/tools/hadoop-3.1.4/mylogs");
             //conf.set("dfs.permissions", "false");
            // conf.set("mapreduce.jobtracker.staging.root.dir", "/user");
             // 跨平台提交
             conf.set("mapreduce.app-submission.cross-platform", "true");
             conf.set("mapreduce.job.jar","E:\\code\\ideawork\\hadoopone\\target\\hadoopone-0.0.1-SNAPSHOT.jar");
             //如果实在非hadoop用户环境下提交任务
             System.setProperty("HADOOP_USER_NAME","hadoop");

             Job job = Job.getInstance(conf);


             job.setNumReduceTasks(2);
             //当前线程需要等待
             //job.waitForCompletion(true);
             //当前线程不用等待
              job.submit();
             // 提交以后，可以拿到JobID。根据这个JobID可以打开网页查看执行进度。
             System.out.println("++++++:"+job.getJobID().toString());
             //job.waitForCompletion(true);
     		// System.out.println("Finished");
             //JobClient.runJob(conf);  
      }
}