
package com.sailing.lianxi.db;

import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;

import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;

import com.sailing.lianxi.common.Constants;
import com.sailing.lianxi.common.ObjTool;

/**
 * 
 * 类名称：KafkaToOracle
 * 类描述：kafka到topic
 * 创建人：wanggang
 * 创建时间：2018年4月12日 下午4:23:49
 * 修改人：wanggang
 * 修改时间：2018年4月12日 下午4:23:49
 * 修改备注：
 */
public class KafkaToOracle {


    
private static String driver = "oracle.jdbc.driver.OracleDriver"; //驱动
    
    private static String url = "jdbc:oracle:thin:@//172.20.46.241:1521/orcl"; //连接字符串
     
    private static String userName = "RYGK"; //用户名
     
    private static String passWord = "sailing123"; //密码
  
    public void extractData(){
        Connection conn = null;
        PreparedStatement pstat;
        ResultSet rs;
        try {
          String tableName = "TEST1";
          String execSql = "select * from "+tableName;
//            conn = DBUtil.getConnect(userName, passWord, driver, url);
//            //获取总记录数
//            String  sql="select /*+ parallel("+tableName+",8) */ count(1) from ("+execSql+")";
//            pstat = conn.prepareStatement(sql);
//            rs = pstat.executeQuery();
//            long totalNum = 0;
//            if(rs.next()){
//                totalNum = rs.getLong(1);
//            }
//            DBUtil.closeDB(conn, pstat, rs);
            long totalNum=10000L;
            System.out.println("总记录数："+totalNum);
            //根据总记录分片，启线程，启生产者
            int parallelism =1;// Constants.KAFKA_PARTITION_NUM;//并发数
            final int submitNum = 10000;
            final String[] columns = {"ID","XM","SFZ","CSRQ","XBDM","XBMC",
                                      "MZDM","MZMC","YYCSDM","YYCSMC","SWKSSJ",
                                      "XWSJ","RWSJ","SWZDH","JLRKSJ","ZHGXSJ","RKSJ"
                                     };
            List<String> list = convertSql(execSql,parallelism,totalNum);
            for(int i=0;i<list.size();i++){
                final int index = i;
                final String psql = list.get(i);
                new Thread(){
                    public void run(){
                        long beginDate = System.currentTimeMillis();
                        Connection conn =null;
                        PreparedStatement pstat = null;
                        ResultSet rs = null;
                        int flag =1;
                        try {
                            conn = DBUtil.getConnect(userName, passWord, driver, url);
                            pstat = conn.prepareStatement(psql);
                            System.out.println("执行sql="+psql);
                            rs = pstat.executeQuery();
                            List<Map<String,Object>> dataList = new ArrayList<Map<String,Object>>();
                            while(rs.next()){
                                Map<String, Object> map = new HashMap<String, Object>();
                                for(int j=0;j<columns.length;j++){
                                    map.put(columns[j],  rs.getObject(columns[j]));  
                                }
                                dataList.add(map);
                                if(flag%submitNum==0){
                                    productorData(dataList);
                                    dataList.clear();
                                }
                                flag ++;
                            }
                            if(dataList.size()>0){//剩余数据
                               System.out.println("线程："+Thread.currentThread()+",插入剩余数据....");
                                productorData(dataList);
                            }
                            long endDate = System.currentTimeMillis();
                            System.out.println("线程="+Thread.currentThread()+",耗时："+(endDate-beginDate)/1000);
                        } catch (Exception e) {
                            System.out.println("执行sql"+(index+1)+"报错了");
                        }finally{
                            DBUtil.closeDB(conn, pstat, rs);  
                        }
                       
                    }
                }.start();
            }
            
        } catch (Exception e) {
            // TODO Auto-generated catch block
            System.out.println("抽取执行失败。。。");
            e.printStackTrace();
            
        }
    }
    
    private void productorData(List<Map<String, Object>> dataList){
        Producer<String, byte[]> producer =null;
        try {
            Properties props = new Properties();
            //kafka服务器地址  
            props.put("bootstrap.servers",Constants.KAFKA_SERVERS);
            /* acks=0 客户端不会等待服务端的确认
                acks=1 只会等待leader分区的确认
                acks=all或者acks=-1 等待leader分区和follower分区的确认
             */
            props.put("acks", "all");
            //生产者发送失败后，重试的次数
            props.put("retries", 0);
            //生产者批量提交消息大小--批量提交必须
            props.put("batch.size", 16384); //16M
             //默认立即发送，这里这是发送的延时毫秒数--批量提交必须
            props.put("linger.ms", 10);
            //生产者缓冲大小，当缓冲区耗尽后，额外的发送调用将被阻塞。时间超过max.block.ms将抛出TimeoutException
            props.put("buffer.memory", 33554432);//32M
            //kafka消息key序列化类 若传入key的值，则根据该key的值进行hash散列计算出在哪个partition上  
            props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
            //kafka消息序列化类 即将传入对象序列化为字节数组  
            props.put("value.serializer", "org.apache.kafka.common.serialization.ByteArraySerializer");
            
            //创建kafka的生产者类
            producer = new KafkaProducer<String,byte[]>(props);
            //发送消息
            for(int i=0;i<dataList.size();i++){
                //如果不是创建topic时指定分区数，在生产时创建topic 则按照配置文件中配置的分区数和备份数创建topic
                Map<String, Object> map = dataList.get(i);
                int partition =i%Constants.KAFKA_PARTITION_NUM; 
                ProducerRecord message = new ProducerRecord<String, byte[]>(Constants.TOPIC_NAME,partition,Integer.toString(i),ObjTool.ObjectToByte(map)); 
                //发送消息
                RecordMetadata recordMetadata =(RecordMetadata) producer.send(message).get();
//                System.out.println("分区："+recordMetadata.partition()+"offset:"+recordMetadata.offset());
            }
            System.out.println("消费完成=："+dataList.size());
        } catch (Exception e) {
            e.printStackTrace();
        }finally{
            if(null!=producer){
                producer.close();
            }
        }
    }
    
    public static void main(String[] args) {
          new KafkaToOracle().extractData();
    }
    
    
    public static List<String> convertSql(String execSql,int parallelism,long totalCount){
        List<String> extractSqlList=new ArrayList<String>(parallelism);
      
        if(parallelism>1&&totalCount>10000){//并行度大于1且总数大于最小分片数量
            //分区抽取数量
            String sql="select * from (select o.*,rownum as rn from ("+execSql+") o) p";
            long patationCount=totalCount/parallelism;
            long remainder=totalCount%parallelism;
            for(int i=0;i<parallelism;i++){
                String returnSql="";
                if(i==(parallelism-1)){
                    returnSql=sql+" where rn>"+(i*patationCount)+" and rn<="+((i+1)*patationCount+remainder);
                }else{
                    returnSql=sql+" where rn>"+(i*patationCount)+" and rn<="+(i+1)*patationCount;
                }
                extractSqlList.add(returnSql);
            }
        }else{
            String returnSql="select * from (select o.*,rownum as rn from ("+execSql+") o where rownum<="+totalCount+") where rn>0";
            extractSqlList.add(returnSql);
        }
        return extractSqlList;
    }
   
}

