package com.ehualu.impda.hadoop;

import java.io.*;

import com.ehualu.impda.kafkasecurity.LoginUtilKafkaHuawei;
import com.ehualu.impda.security.LoginUtil;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;


import java.util.ArrayList;
import java.util.List;
import java.util.Properties;
import java.util.concurrent.ExecutionException;

import org.apache.http.HttpEntity;
import org.apache.http.NameValuePair;
import org.apache.http.client.config.RequestConfig;
import org.apache.http.client.entity.UrlEncodedFormEntity;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClients;
import org.apache.http.message.BasicNameValuePair;
import org.apache.http.util.EntityUtils;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;


public class HdfsExample {


    private final static Log LOG = LogFactory.getLog(HdfsExample.class.getName());


    public static KafkaProducer<String, String> producer;

    public static String topic;

    public static Boolean isAsync;


    public static String bootstrapServers = "bootstrap.servers";

    public static String clientId = "client.id";

    public static String keySerializer = "key.serializer";

    public static String valueSerializer = "value.serializer";

    public static String securityProtocol = "security.protocol";

    public static String saslKerberosServiceName = "sasl.kerberos.service.name";

    public static String kerberosDomainName = "kerberos.domain.name";

    public static int messageNumToSend = 100;

    /**
     * 用户自己申请的机机账号keytab文件名称
     */
    /**
     * 用户自己申请的机机账号keytab文件名称
     */
    private static final String USER_KEYTAB_FILE = "user.keytab";

    /**
     * 用户自己申请的机机账号名称
     */
    private static final String USER_PRINCIPAL = "super_rj";


    private static final String STORAGE_POLICY_HOT = "HOT";
    private static String PATH_TO_HDFS_SITE_XML = HdfsExample.class.getClassLoader().getResource("hdfs-site.xml")
            .getPath();
    private static String PATH_TO_CORE_SITE_XML = HdfsExample.class.getClassLoader().getResource("core-site.xml")
            .getPath();

    private static Configuration conf = null;

    private static String PRNCIPAL_NAME = "super_rj";
    private static String PATH_TO_KEYTAB = HdfsExample.class.getClassLoader().getResource("user.keytab").getPath();
    private static String PATH_TO_KRB5_CONF = HdfsExample.class.getClassLoader().getResource("krb5.conf").getPath();

    //private static String PATH_TO_SMALL_SITE_XML = HdfsExample.class.getClassLoader().getResource("smallfs-site.xml")
    //.getPath();

    private FileSystem fSystem; /* HDFS file system */
    private String DEST_PATH;
    private String FILE_NAME;

    public HdfsExample(String path, String fileName) throws IOException {
        this.DEST_PATH = path;
        this.FILE_NAME = fileName;
        instanceBuild();
    }

    /**
     * HDFS operator instance
     *
     * @throws IOException
     * @throws Exception
     */
    public void test() throws IOException {
        // create directory
        mkdir();

        // write file
        write();

        // append file
        append();

        // read file
        read();

        // delete file
        delete();

        // delete directory
        rmdir();
    }

    /**
     * build HDFS instance
     */
    private void instanceBuild() throws IOException {
        // get filesystem
        // 一般情况下，FileSystem对象JVM里唯一，是线程安全的，这个实例可以一直用，不需要立马close。
        // 注意：
        // 若需要长期占用一个FileSystem对象的场景，可以给这个线程专门new一个FileSystem对象，但要注意资源管理，别导致泄露。
        // 在此之前，需要先给conf加上：
        // conf.setBoolean("fs.hdfs.impl.disable.cache",
        // true);//表示重新new一个连接实例，不用缓存中的对象。
        fSystem = FileSystem.get(conf);

    }

    /**
     * delete directory
     *
     * @throws IOException
     */
    private void rmdir() throws IOException {
        Path destPath = new Path(DEST_PATH);
        if (!deletePath(destPath)) {
            LOG.error("failed to delete destPath " + DEST_PATH);
            return;
        }

        LOG.info("success to delete path " + DEST_PATH);

    }

    public static void securityPrepare() throws IOException {
        String filePath = System.getProperty("user.dir") + File.separator + "src" + File.separator + "main" + File.separator + "resources" + File.separator;
        String krbFile = filePath + "krb5.conf";
        String userKeyTableFile = filePath + USER_KEYTAB_FILE;

        //windows路径下分隔符替换
        userKeyTableFile = userKeyTableFile.replace("\\", "\\\\");
        krbFile = krbFile.replace("\\", "\\\\");

        LoginUtilKafkaHuawei.setKrb5Config(krbFile);
        LoginUtilKafkaHuawei.setZookeeperServerPrincipal("zookeeper/hadoop.hadoop.com");
        LoginUtilKafkaHuawei.setJaasFile(USER_PRINCIPAL, userKeyTableFile);
    }

    public static Boolean isSecurityModel() {
        Boolean isSecurity = false;
        String krbFilePath = System.getProperty("user.dir") + File.separator + "src" + File.separator + "main" + File.separator + "resources" + File.separator + "kafkaSecurityMode";

        Properties securityProps = new Properties();

        // file does not exist.
        if (!isFileExists(krbFilePath)) {
            return isSecurity;
        }

        try {
            securityProps.load(new FileInputStream(krbFilePath));
            if ("yes".equalsIgnoreCase(securityProps.getProperty("kafka.client.security.mode"))) {
                isSecurity = true;
            }
        } catch (Exception e) {
            LOG.info("The Exception occured : {}.", e);
        }

        return isSecurity;
    }

    /*
     * 判断文件是否存在
     */
    private static boolean isFileExists(String fileName) {
        File file = new File(fileName);

        return file.exists();
    }

    /**
     * create directory
     *
     * @throws IOException
     */
    private void mkdir() throws IOException {
        Path destPath = new Path(DEST_PATH);
        if (!createPath(destPath)) {
            LOG.error("failed to create destPath " + DEST_PATH);
            return;
        }

        LOG.info("success to create path " + DEST_PATH);
    }

    /**
     * set storage policy to path
     *
     * @param policyName
     *            Policy Name can be accepted:
     *            <li>HOT
     *            <li>WARN
     *            <li>COLD
     *            <li>LAZY_PERSIST
     *            <li>ALL_SSD
     *            <li>ONE_SSD
     * @throws IOException
     */
//	private void setStoragePolicy(String policyName) throws IOException {
//		if (fSystem instanceof DistributedFileSystem) {
//			DistributedFileSystem dfs = (DistributedFileSystem) fSystem;
//			Path destPath = new Path(DEST_PATH);
//			Boolean flag = false;
//
//			mkdir();
//			BlockStoragePolicySpi[] storage = dfs.getStoragePolicies();
//			for (BlockStoragePolicySpi bs : storage) {
//				if (bs.getName().equals(policyName)) {
//					flag = true;
//				}
//				LOG.info("StoragePolicy:" + bs.getName());
//			}
//			if (!flag) {
//				policyName = storage[0].getName();
//			}
//			dfs.setStoragePolicy(destPath, policyName);
//			LOG.info("success to set Storage Policy path " + DEST_PATH);
//			rmdir();
//		} else {
//			LOG.info("SmallFile not support to set Storage Policy !!!");
//		}
//	}

    /**
     * create file,write file
     *
     * @throws IOException
     */
    private void write() throws IOException {
        final String content = "hi, I am bigdata. It is successful if you can see me.";
        FSDataOutputStream out = null;
        try {
            out = fSystem.create(new Path(DEST_PATH + File.separator + FILE_NAME));
            out.write(content.getBytes());
            out.hsync();
            LOG.info("success to write.");
        } finally {
            // make sure the stream is closed finally.
            IOUtils.closeStream(out);
        }
    }


    private void writejs(String js) throws IOException {
        final String content = js;
        FSDataOutputStream out = null;
        try {
            out = fSystem.create(new Path(DEST_PATH + File.separator + FILE_NAME));
            out.write(content.getBytes());
            out.hsync();
            LOG.info("success to write.");
        } finally {
            // make sure the stream is closed finally.
            IOUtils.closeStream(out);
        }
    }

    /**
     * append file content
     *
     * @throws IOException
     */
    private void append() throws IOException {
        final String content = "I append this content.";
        FSDataOutputStream out = null;
        try {
            out = fSystem.append(new Path(DEST_PATH + File.separator + FILE_NAME));
            out.write(content.getBytes());
            out.hsync();
            LOG.info("success to append.");
        } finally {
            // make sure the stream is closed finally.
            IOUtils.closeStream(out);
        }
    }


    /**
     * read file
     *
     * @throws IOException
     */
    private void read() throws IOException {
        String strPath = DEST_PATH + File.separator + FILE_NAME;
        Path path = new Path(strPath);
        FSDataInputStream in = null;
        BufferedReader reader = null;
        StringBuffer strBuffer = new StringBuffer();

        try {
            in = fSystem.open(path);
            reader = new BufferedReader(new InputStreamReader(in));
            String sTempOneLine;

            // write file
            while ((sTempOneLine = reader.readLine()) != null) {
                strBuffer.append(sTempOneLine);
            }

            LOG.info("result is : " + strBuffer.toString());
            LOG.info("success to read.");

        } finally {
            // make sure the streams are closed finally.
            IOUtils.closeStream(reader);
            IOUtils.closeStream(in);
        }
    }

    /**
     * delete file
     *
     * @throws IOException
     */
    private void delete() throws IOException {
        Path beDeletedPath = new Path(DEST_PATH + File.separator + FILE_NAME);
        if (fSystem.delete(beDeletedPath, true)) {
            LOG.info("success to delete the file " + DEST_PATH + File.separator + FILE_NAME);
        } else {
            LOG.warn("failed to delete the file " + DEST_PATH + File.separator + FILE_NAME);
        }
    }

    /**
     * create file path
     *
     * @param filePath
     * @return
     * @throws IOException
     */
    private boolean createPath(final Path filePath) throws IOException {
        if (!fSystem.exists(filePath)) {
            fSystem.mkdirs(filePath);
        }
        return true;
    }

    /**
     * delete file path
     *
     * @param filePath
     * @return
     * @throws IOException
     */
    private boolean deletePath(final Path filePath) throws IOException {
        if (!fSystem.exists(filePath)) {
            return false;
        }
        // fSystem.delete(filePath, true);
        return fSystem.delete(filePath, true);
    }

    public static void main(String[] args) throws Exception {
        // 完成初始化和认证
        confLoad();
        authentication();
        securityPrepare();
        // 业务示例1：一个普通用例
        HdfsExample hdfs_examples = new HdfsExample("/user/hdfs-examples", "test.txt");
//		hdfs_examples.test();

//		hdfs_examples.write();


        String ISUrl = "http://37.158.40.53:8080/dahuaIS/rest/picrecord/search";

        // 执行get
        // 拼接查询条件
        List<NameValuePair> params = new ArrayList<NameValuePair>();
        params.add(new BasicNameValuePair("q",
                "{\"startId\": 1,\"endId\": 100,\"page\":{\"pageNo\": 1,\"pageSize\":20}}"));
        String str = EntityUtils.toString(new UrlEncodedFormEntity(params));
        HttpGet httpMethod = new HttpGet(ISUrl + "?" + str);
        // 设置超时时间
        RequestConfig requestConfig = RequestConfig.custom().setConnectionRequestTimeout(60000).setConnectTimeout(60000)
                .setSocketTimeout(60000).build();
        httpMethod.setConfig(requestConfig);
        httpMethod.addHeader("Accept", "application/json;charset=UTF-8");
        httpMethod.addHeader("Content-Type", "application/json;charset=UTF-8");
        httpMethod.addHeader("authorization", "YHLTP");
        // 创建客户端，请自行进行优化的custom设置
        CloseableHttpClient client = HttpClients.custom().build();
        CloseableHttpResponse response = client.execute(httpMethod);
        HttpEntity entity = response.getEntity();
        String result = EntityUtils.toString(entity, "UTF-8");
        hdfs_examples.writejs(result);
        // 释放资源
        response.close();
        httpMethod.releaseConnection();

        System.out.println(result);


        Properties props = new Properties();

        KafkaProperties kafkaProc = KafkaProperties.getInstance();

        // Broker地址列表
        props.put(bootstrapServers, kafkaProc.getValues(bootstrapServers, "37.158.97.137:21005,37.158.97.136:21005"));
        // 客户端ID
        props.put(clientId, kafkaProc.getValues(clientId, "DemoProducer"));
        // Key序列化类
        props.put(keySerializer,
                kafkaProc.getValues(keySerializer, "org.apache.kafka.common.serialization.IntegerSerializer"));
        // Value序列化类
        props.put(valueSerializer,
                kafkaProc.getValues(valueSerializer, "org.apache.kafka.common.serialization.StringSerializer"));
        // 协议类型:当前支持配置为SASL_PLAINTEXT或者PLAINTEXT
        props.put(securityProtocol, kafkaProc.getValues(securityProtocol, "SASL_PLAINTEXT"));
        // 服务名
        props.put(saslKerberosServiceName, "kafka");
        // 域名
        props.put(kerberosDomainName, kafkaProc.getValues(kerberosDomainName, "hadoop.hadoop.com"));

        producer = new KafkaProducer<String, String>(props);
        topic = "gaoshujson";


        ProducerRecord<String, String> record = new ProducerRecord<String, String>(topic, result);

        producer.send(record).get();
//        while (messageNo <= messageNumToSend) {
//            String messageStr = "Message_" + messageNo;
//            long startTime = System.currentTimeMillis();
//
//            // 构造消息记录
//
//
//
//
//            try {
//                // 同步发送
//
//            } catch (InterruptedException ie) {
//                LOG.info("The InterruptedException occured : {}.", ie);
//            } catch (ExecutionException ee) {
//                LOG.info("The ExecutionException occured : {}.", ee);
//            }
//
//            messageNo++;
//
//            if (messageNo % intervalMessages == 0) {
//                // 每发送intervalMessage条消息sleep1秒
//                try {
//                    Thread.sleep(1000);
//                } catch (InterruptedException e) {
//                    e.printStackTrace();
//                }
//                LOG.info("The Producer have send {} messages.", messageNo);
//            }
//        }


//		// 业务示例2：多线程
//		final int THREAD_COUNT = 2;
//		for (int threadNum = 0; threadNum < THREAD_COUNT; threadNum++) {
//			HdfsExampleThread example_thread = new HdfsExampleThread("hdfs_example_" + threadNum);
//			example_thread.start();
//		}

        // 业务示例3： 设置存储策略
        // System.out.println("begin to set Storage Policy");
        // hdfs_examples.setStoragePolicy(STORAGE_POLICY_HOT);
        // System.out.println("set Storage Policy end");
    }

    /**
     * Add configuration file if the application run on the linux ,then need
     * make the path of the core-site.xml and hdfs-site.xml to in the linux
     * client file
     */
    private static void confLoad() throws IOException {
        System.setProperty("java.security.krb5.conf", PATH_TO_KRB5_CONF);
        conf = new Configuration();
        // conf file
        conf.addResource(new Path(PATH_TO_HDFS_SITE_XML));
        conf.addResource(new Path(PATH_TO_CORE_SITE_XML));
        // conf.addResource(new Path(PATH_TO_SMALL_SITE_XML));
    }

    /**
     * kerberos security authentication if the application running on Linux,need
     * the path of the krb5.conf and keytab to edit to absolute path in Linux.
     * make the keytab and principal in example to current user's keytab and
     * username
     */
    private static void authentication() throws IOException {
        // security mode
        if ("kerberos".equalsIgnoreCase(conf.get("hadoop.security.authentication"))) {
            System.setProperty("java.security.krb5.conf", PATH_TO_KRB5_CONF);
            LoginUtil.login(PRNCIPAL_NAME, PATH_TO_KEYTAB, PATH_TO_KRB5_CONF, conf);
        }
    }

}

//class HdfsExampleThread extends Thread {
//	private final static Log LOG = LogFactory.getLog(HdfsExampleThread.class.getName());
//
//	/**
//	 *
//	 * @param threadName
//	 */
//	public HdfsExampleThread(String threadName) {
//		super(threadName);
//	}
//
//	public void run() {
//		HdfsExample example;
//		try {
//			example = new HdfsExample("/user/hdfs-examples/" + getName(), "test.txt");
//			example.test();
//		} catch (IOException e) {
//			LOG.error(e);
//		}
//	}
//}
