/*
 * Licensed to Elasticsearch under one or more contributor
 * license agreements. See the NOTICE file distributed with
 * this work for additional information regarding copyright
 * ownership. Elasticsearch licenses this file to you under
 * the Apache License, Version 2.0 (the "License"); you may
 * not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *    http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing,
 * software distributed under the License is distributed on an
 * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
 * KIND, either express or implied.  See the License for the
 * specific language governing permissions and limitations
 * under the License.
 */

package com.waleipt.api.es.Cluster;

import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Random;
import java.util.concurrent.CountDownLatch;
import java.util.function.Function;
import java.util.stream.Collectors;
import java.util.stream.Stream;

import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.cluster.ClusterModule;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.EmptyClusterInfoService;
import org.elasticsearch.cluster.LocalClusterUpdateTask;
import org.elasticsearch.cluster.NamedDiff;
import org.elasticsearch.cluster.NodeConnectionsService;
import org.elasticsearch.cluster.metadata.IndexGraveyard;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.metadata.RepositoriesMetaData;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.routing.allocation.AllocationService;
import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator;
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.UUIDs;
import org.elasticsearch.common.io.stream.NamedWriteable;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry.Entry;
import org.elasticsearch.common.io.stream.Writeable.Reader;
import org.elasticsearch.common.network.NetworkService;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.discovery.DiscoverySettings;
import org.elasticsearch.discovery.zen.ZenDiscovery;
import org.elasticsearch.gateway.GatewayAllocator;
import org.elasticsearch.indices.breaker.CircuitBreakerService;
import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;
import org.elasticsearch.ingest.IngestMetadata;
import org.elasticsearch.node.Node;
import org.elasticsearch.script.ScriptMetaData;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import org.elasticsearch.transport.TransportSettings;
import org.elasticsearch.transport.netty4.Netty4Transport;
import org.junit.Test;

public class SimpleTest {

	@Test
	public void testNodeCluster()
	{
		ThreadPool threadPool = new TestThreadPool(getClass().getName());
		Settings settings = Settings.builder()
				.put("cluster.name", "my-application")
				.put("node.name", "node-155-coding")
                .put("network.host", "10.100.33.155" )
                .put( "tcp.port", 9301 ) 
                .put( "http.port", 9200 ) 
                .put( "http.cors.enabled", true ) 
                .put( "http.cors.allow-origin", "*" ) 
                .put( "node.master", "false" ) 
                .put( "node.data", "true" ) 
                .put( "discovery.zen.ping.unicast.hosts", "10.100.33.155" ) 
                .build() ;
		
		starNode( settings, threadPool ) ;
		
		try {
			Thread.sleep( 50000 );
		} catch (InterruptedException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}
		
	}
	
	
	
	/**
	 * 模拟启动两个接单测试，测试集群服务中这两个节点是否归为一个集群中
	 */
	@Test
	public void testNodeStart() {
		ThreadPool threadPool = new TestThreadPool(getClass().getName());
		
		  Settings settings_node_0 = Settings.builder()
                .put(TransportSettings.BIND_HOST.getKey(), "10.100.33.155" )
                .put(TransportSettings.PORT.getKey(), 9300)
                .put("node.name", "node-1")
                .put("node.master", "node-1")
                .put("cluster.name", "test")
                .build() ;
		ClusterService clusterService1 = starNode(settings_node_0, threadPool);
		
		 Settings settings_node_1 = Settings.builder()
	                .put(TransportSettings.BIND_HOST.getKey(), "10.100.33.155" )
	                .put(TransportSettings.PORT.getKey(), 9301)
	                .put("node.name", "node-2")
	                .put("cluster.name", "test")
	                .build() ;
		ClusterService clusterService2 = starNode(settings_node_1, threadPool);
		
		try {
			Thread.sleep(1000);
		} catch (InterruptedException e) {
			throw new RuntimeException(e);
		}

		DiscoveryNodes cluster_nodes1 = clusterService1.state().getNodes();
		System.err.println(String.format("clusterService1->clusterName:%s;", clusterService1.getClusterName()));
		cluster_nodes1.forEach(node_item -> {
			System.err.println(String.format("\tNodeName:%s;Address:%s;%s", node_item.getName(), node_item.getAddress(),
					(node_item.isMasterNode() ? "master" : "")));
		});

		DiscoveryNodes cluster_nodes2 = clusterService2.state().getNodes();
		System.err.println(String.format("clusterService2->clusterName:%s;", clusterService2.getClusterName()));
		cluster_nodes2.forEach(node_item -> {
			System.err.println(String.format("\tNodeName:%s;Address:%s;%s", node_item.getName(), node_item.getAddress(),
					(node_item.isMasterNode() ? "master" : "")));
		});

	}


	 private static <T extends MetaData.Custom> void registerMetaDataCustom(List<Entry> entries, String name, Reader<? extends T> reader,
             Reader<NamedDiff> diffReader) {
		 registerCustom(entries, MetaData.Custom.class, name, reader, diffReader);
	}

	private static <T extends NamedWriteable> void registerCustom(List<Entry> entries, Class<T> category, String name,
	        Reader<? extends T> reader, Reader<NamedDiff> diffReader) {
	entries.add(new Entry(category, name, reader));
	entries.add(new Entry(NamedDiff.class, name, diffReader));
	}
	
	private ClusterService starNode(Settings settings, ThreadPool threadPool) {
 
		CircuitBreakerService circuitBreakerService = new NoneCircuitBreakerService();
		
//		NamedWriteableRegistry registry = new NamedWriteableRegistry(Collections.emptyList());
//		  List<NamedWriteableRegistry.Entry> namedWriteables = Stream.of(
//		        ClusterModule.getNamedWriteables().stream())
//		        .flatMap(Function.identity()).collect(Collectors.toList());
//		  final NamedWriteableRegistry registry = new NamedWriteableRegistry(namedWriteables);
		
		 List<Entry> entries = new ArrayList<>();
//		 registerMetaDataCustom(entries, RepositoriesMetaData.TYPE, RepositoriesMetaData::new, RepositoriesMetaData::readDiffFrom);
//		registerMetaDataCustom(entries, IngestMetadata.TYPE, IngestMetadata::new, IngestMetadata::readDiffFrom);
//		registerMetaDataCustom(entries, ScriptMetaData.TYPE, ScriptMetaData::new, ScriptMetaData::readDiffFrom);
		registerMetaDataCustom(entries, IndexGraveyard.TYPE, IndexGraveyard::new, IndexGraveyard::readDiffFrom);
		  List<NamedWriteableRegistry.Entry> namedWriteables = Stream.of(
				  entries.stream())
		        .flatMap(Function.identity()).collect(Collectors.toList());
		  final NamedWriteableRegistry registry = new NamedWriteableRegistry(namedWriteables);
		
		  
		// transport
		final Netty4Transport netty_node_0 = new Netty4Transport(settings, threadPool,
				new NetworkService(settings, Collections.emptyList()), BigArrays.NON_RECYCLING_INSTANCE, registry,
				circuitBreakerService);
		
		// transprotService
		TransportService transportService = new TransportService(settings, netty_node_0, threadPool,
				TransportService.NOOP_TRANSPORT_INTERCEPTOR,
				(boundAddress) -> DiscoveryNode.createLocal(settings, boundAddress.publishAddress(),
						settings.get(Node.NODE_NAME_SETTING.getKey(), UUIDs.randomBase64UUID())),
				null);
		transportService.start();

		DiscoveryNode node = transportService.getLocalNode();
		
		// clusterService
		ClusterService clusterService = new ClusterService(settings,
				new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), threadPool, () -> node);
		final NodeConnectionsService nodeConnectionsService = new NodeConnectionsService(settings, threadPool, transportService) ;
        nodeConnectionsService.start();
		clusterService.setNodeConnectionsService(nodeConnectionsService);
		clusterService.setClusterStatePublisher((event, ackListener) -> {
		});
		clusterService.setDiscoverySettings(new DiscoverySettings(Settings.EMPTY,
				new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS)));
		clusterService.start();
		final DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(clusterService.state().nodes());
		nodes.masterNodeId(clusterService.localNode().getId());

		setState(clusterService, ClusterState.builder(clusterService.state()).nodes(nodes).build());

		// allocationService
		AllocationService allocationService = new AllocationService(settings, 
																		randomAllocationDeciders( 
																				settings, 
																				new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) ) ,   
																		new GatewayAllocator(Settings.EMPTY, null, null), 
																		new BalancedShardsAllocator(settings),
																		EmptyClusterInfoService.INSTANCE) ; 
		ZenDiscovery zenDiscovery = new ZenDiscovery(settings, threadPool, transportService,
				new NamedWriteableRegistry(ClusterModule.getNamedWriteables()), clusterService, Collections::emptyList);
		 zenDiscovery.setAllocationService( allocationService ) ;
		zenDiscovery.start();

		transportService.acceptIncomingRequests();

		 zenDiscovery.start();
		zenDiscovery.startInitialJoin();

		return clusterService;
	}
	
	 public static AllocationDeciders randomAllocationDeciders(Settings settings, ClusterSettings clusterSettings) {
	        List<AllocationDecider> deciders = new ArrayList<>(
	            ClusterModule.createAllocationDeciders(settings, clusterSettings, Collections.emptyList()));
	        Collections.shuffle(deciders, new Random() );
	        return new AllocationDeciders(settings, deciders);
	    }

	public static void setState(ClusterService clusterService, ClusterState clusterState) {
		CountDownLatch latch = new CountDownLatch(1);
		clusterService.submitStateUpdateTask("test setting state", new LocalClusterUpdateTask() {
			@Override
			public ClusterTasksResult<LocalClusterUpdateTask> execute(ClusterState currentState) throws Exception {
				// make sure we increment versions as listener may depend on it
				// for change
				return newState(ClusterState.builder(clusterState).version(currentState.version() + 1).build());
			}

			@Override
			public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
				latch.countDown();
			}

			@Override
			public void onFailure(String source, Exception e) {
				e.printStackTrace();
			}
		});
		try {
			latch.await();
		} catch (InterruptedException e) {
			throw new ElasticsearchException("unexpected interruption", e);
		}
	}

}
