package org.elasticsearch;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.log4j.spi.LoggerFactory;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.core.config.Loggers;
import org.apache.logging.log4j.simple.SimpleLogger;
import org.apache.logging.log4j.simple.SimpleLoggerContext;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.TextField;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.index.Term;
import org.apache.lucene.search.Query;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.solr.store.hdfs.HdfsDirectory;
import org.elasticsearch.action.DocWriteRequest;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.routing.Murmur3HashFunction;
import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.env.NodeMetaData;
import org.elasticsearch.gateway.MetaDataStateFormat;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.mapper.*;

import java.io.IOException;
import java.nio.file.FileSystem;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.Arrays;
import java.util.regex.Matcher;

/**
 * Created by wuliao on 2017/7/20.
 */
public class Main {
    public static void main(String[] args){
        System.out.println("。。。。。。。。");
//        org.apache.hadoop.fs.Path path = new org.apache.hadoop.fs.Path("hdfs://192.168.11.128:9000/index/");
        Logger logger = new SimpleLoggerContext().getLogger("name");
        try {
            //获取Index的state
//            NodeMetaData metaData = NodeMetaData.FORMAT.loadLatestState(logger,
//                    NamedXContentRegistry.EMPTY, path);
            //获取Index的State

//        Configuration conf=new Configuration();
//        conf.set("fs.default.name", "hdfs://192.168.11.128:9000/");
//        org.apache.hadoop.fs.FileSystem fs = org.apache.hadoop.fs.FileSystem.get(conf);
//        FileStatus[] fileStatuses = fs.listStatus(new  org.apache.hadoop.fs.Path("hdfs://192.168.11.128:9000/index/indices/"));
//
//        for (FileStatus stateFile : fileStatuses) {
            String path = "G:\\BigData\\WorkSpace\\elasticsearch\\distribution\\src\\main\\resources\\data\\nodes\\0\\indices\\K6JR4TorSpOQiYdSm2ncjw\\";
            IndexMetaData indexMetaData = IndexMetaData.FORMAT2.loadLatestState(logger, NamedXContentRegistry.EMPTY,
                    Paths.get(path));

            String name = indexMetaData.getIndex().getName();
            String uuid = indexMetaData.getIndex().getUUID();
            int id = calculateScaledShardId(indexMetaData,499+"",0);
            path = path+id+"\\index";
           System.out.println(uuid+"...."+name);

            StandardAnalyzer analyzer = new StandardAnalyzer();
            IndexWriterConfig config = new IndexWriterConfig(analyzer);

//            org.apache.hadoop.fs.Path path1 = new org.apache.hadoop.fs.Path("hdfs://localhost:9000/index");
            Directory directory= FSDirectory.open(Paths.get(path));
            IndexWriter writer=new IndexWriter(directory, config);
            Document document = new Document();
            document.add(new Field("_uid", "book#499", TextField.TYPE_STORED));
            document.add(new Field("first_name", "Wuliao", TextField.TYPE_STORED));
            document.add(new Field("_index", "aa", TextField.TYPE_STORED));
            document.add(new Field("_type", "book", TextField.TYPE_STORED));
            document.add(new Field("_version", "1", TextField.TYPE_STORED));
            System.out.println(writer.addDocument(document));
            writer.commit();
            writer.close();


//            IndexRequest request = new IndexRequest("aa","book");
//            request.source("{\"first_name\" :  \"Jane\"}", XContentType.JSON);
//            request.id("40");
//            request.opType(DocWriteRequest.OpType.INDEX);
//            request.timestamp("1499832157899");

//
//            SourceToParse sourceToParse =
//                    SourceToParse.source(SourceToParse.Origin.PRIMARY, request.index(), request.type(), request.id(), request.source(),
//                            request.getContentType()).routing(request.routing()).parent(request.parent())
//                            .timestamp(request.timestamp()).ttl(request.ttl());
//




//             IndexSettings indexSettings = new IndexSettings(indexMetaData,settings);
//            MapperService mapperService = new MapperService(null, null, null,null);
//            DocumentMapperForType docMapper = mapperService.documentMapperWithAutoCreate(sourceToParse.type());
//
//
//
//
//            ParsedDocument doc = docMapper.getDocumentMapper().parse(sourceToParse);
//            if (docMapper.getMapping() != null) {
//                doc.addDynamicMappingsUpdate(docMapper.getMapping());
//            }
//            MappedFieldType uidFieldType = docMapper.getDocumentMapper().uidMapper().fieldType();
//            Query uidQuery = uidFieldType.termQuery(doc.uid(), null);
//            Term uid = MappedFieldType.extractTerm(uidQuery);


            //            if (indexMetaData != null) {
//                metaDataBuilder.put(indexMetaData, false);
//            } else {
//                logger.debug("[{}] failed to find metadata for existing index location");
//            }
//        }





        } catch (Exception e) {
            e.printStackTrace();
        }
    }

//    MetaData loadFullState(Logger logger) throws Exception {
//        MetaData globalMetaData = loadGlobalState();
//        MetaData.Builder metaDataBuilder;
//        if (globalMetaData != null) {
//            metaDataBuilder = MetaData.builder(globalMetaData);
//        } else {
//            metaDataBuilder = MetaData.builder();
//        }
//        //nodeEnv.availableIndexFolders()==》/index/indices/
//        //首先获取全部路径
//        Configuration conf=new Configuration();
//        conf.set("fs.default.name", "hdfs://192.168.11.128:9000/");
//        org.apache.hadoop.fs.FileSystem fs = org.apache.hadoop.fs.FileSystem.get(conf);
//        FileStatus[] fileStatuses = fs.listStatus(new  org.apache.hadoop.fs.Path("/index/indices/"));
//
//        for (FileStatus stateFile : fileStatuses) {
//            IndexMetaData indexMetaData = IndexMetaData.FORMAT.loadLatestState(logger, NamedXContentRegistry.EMPTY,
//                    stateFile.getPath());
//            if (indexMetaData != null) {
//                metaDataBuilder.put(indexMetaData, false);
//            } else {
//                logger.debug("[{}] failed to find metadata for existing index location");
//            }
//        }
//        return metaDataBuilder.build();
//    }
//
//    private MetaData loadGlobalState() {
//        return null;
//    }


    private static int calculateScaledShardId(IndexMetaData indexMetaData, String effectiveRouting, int partitionOffset) {
        final int hash = Murmur3HashFunction.hash(effectiveRouting) + partitionOffset;
        //effectiveRouting=>id  partitionOffset一般是0
        // we don't use IMD#getNumberOfShards since the index might have been shrunk such that we need to use the size
        // of original index to hash documents
        return Math.floorMod(hash, indexMetaData.getRoutingNumShards()) / indexMetaData.getRoutingFactor();
    }


}
