package drds.server.util.dataMigrator;

import java.io.IOException;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Properties;
import java.util.Set;

import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import drds.server.config.loader.SchemaLoader;
import drds.server.config.loader.xml.XmlSchemaLoader;
import drds.server.config.model.GroupDataSourceConfig;
import drds.server.config.model.GroupDataSourceNodeConfig;
import drds.server.config.model.MysqlInstanceInfo;
import drds.server.config.model.Schema;
import drds.server.config.model.ShardRule;
import drds.server.config.model.Table;
import drds.server.config.util.ConfigException;
import drds.server.route.shard_algorithm.AbstractShardAlgorithm;

/**
 * 数据迁移新旧配置文件加载、对比
 * 
 * 
 * 
 */
public class ConfigComparer {

	private static final Logger LOGGER = LoggerFactory.getLogger(ConfigComparer.class);
	/*
	 * 指定需要进行数据迁移的表及对应schema 配置文件格式 schema1=tb1,tb2,... schema2=all ...
	 */
	private final static String TABLES_FILE = "/migrateTables.properties";
	private final static String NEW_SCHEMA = "/newSchema.xml";
	private final static String NEW_RULE = "/newRule.xml";
	private final static String DN_INDEX_FILE = "/dnindex.properties";

	private SchemaLoader oldLoader;
	private SchemaLoader newLoader;

	private Map<String, GroupDataSourceConfig> oldDataHosts;
	private Map<String, GroupDataSourceNodeConfig> oldDataNodes;
	private Map<String, Schema> oldSchemas;

	private Map<String, GroupDataSourceConfig> newDataHosts;
	private Map<String, GroupDataSourceNodeConfig> newDataNodes;
	private Map<String, Schema> newSchemas;

	// 即使发生主备切换也使用主数据源
	private boolean isAwaysUseMaster;
	private Properties dnIndexProps;

	// 此类主要目的是通过加载新旧配置文件来获取表迁移信息，migratorTables就是最终要获取的迁移信息集合
	private List<TableMigrateInfo> migratorTables = new ArrayList<TableMigrateInfo>();

	public ConfigComparer(boolean isAwaysUseMaster) throws Exception {
		this.isAwaysUseMaster = isAwaysUseMaster;
		loadOldConfig();
		loadNewConfig();
		loadTablesFile();
	}

	public List<TableMigrateInfo> getMigratorTables() {
		return migratorTables;
	}

	private void loadOldConfig() {
		try {
			oldLoader = new XmlSchemaLoader();
			oldDataHosts = oldLoader.getGroupDataSourceConfigMap();
			oldDataNodes = oldLoader.getGroupDataSourceNodeConfigMap();
			oldSchemas = oldLoader.getSchemaMap();
		} catch (Exception e) {
			throw new ConfigException(" old config for migrate read fail!please check schema.xml or  rule.xml  " + e);
		}

	}

	private void loadNewConfig() {
		try {
			newLoader = new XmlSchemaLoader(NEW_SCHEMA, NEW_RULE);
			newDataHosts = newLoader.getGroupDataSourceConfigMap();
			newDataNodes = newLoader.getGroupDataSourceNodeConfigMap();
			newSchemas = newLoader.getSchemaMap();
		} catch (Exception e) {
			throw new ConfigException(" new config for migrate read fail!please check newSchema.xml or  newRule.xml  " + e);
		}

	}

	private void loadTablesFile() throws Exception {
		Properties pro = new Properties();
		if (!isAwaysUseMaster) {
			dnIndexProps = loadDnIndexProps();
		}
		try {
			pro.load(ConfigComparer.class.getResourceAsStream(TABLES_FILE));
		} catch (Exception e) {
			throw new ConfigException("tablesFile.properties read fail!");
		}
		Iterator<Entry<Object, Object>> it = pro.entrySet().iterator();
		while (it.hasNext()) {
			Entry<Object, Object> entry = it.next();
			String schemaName = entry.getKey().toString();
			String tables = entry.getValue().toString();
			loadMigratorTables(schemaName, getTables(tables));
		}
	}

	private String[] getTables(String tables) {
		if (tables.equalsIgnoreCase("all") || tables.isEmpty()) {
			return new String[] {};
		} else {
			return tables.split(",");
		}
	}

	/*
	 * 加载迁移表信息，tables大小为0表示迁移schema下所有表
	 */
	private void loadMigratorTables(String schemaName, String[] tables) {
		if (!DataMigratorUtil.isKeyExistIgnoreCase(oldSchemas, schemaName)) {
			throw new ConfigException("oldSchema:" + schemaName + " is not exists!");
		}
		if (!DataMigratorUtil.isKeyExistIgnoreCase(newSchemas, schemaName)) {
			throw new ConfigException("newSchema:" + schemaName + " is not exists!");
		}
		Map<String, Table> oldTables = DataMigratorUtil.getValueIgnoreCase(oldSchemas, schemaName).getTableMap();
		Map<String, Table> newTables = DataMigratorUtil.getValueIgnoreCase(newSchemas, schemaName).getTableMap();
		if (tables.length > 0) {
			// 指定schema下的表进行迁移
			for (int i = 0; i < tables.length; i++) {
				Table oldTable = DataMigratorUtil.getValueIgnoreCase(oldTables, tables[i]);
				Table newTable = DataMigratorUtil.getValueIgnoreCase(newTables, tables[i]);
				loadMigratorTable(oldTable, newTable, schemaName, tables[i]);
			}
		} else {
			// 迁移schema下所有的表
			// 校验新旧schema中的table配置是否一致
			Set<String> oldSet = oldTables.keySet();
			Set<String> newSet = newTables.keySet();
			if (!oldSet.equals(newSet)) {
				throw new ConfigException("new & old table config is not equal!");
			}
			for (String tableName : oldSet) {
				Table oldTable = oldTables.get(tableName);
				Table newTable = newTables.get(tableName);
				loadMigratorTable(oldTable, newTable, schemaName, tableName);
			}
		}

	}

	private void loadMigratorTable(Table oldTable, Table newTable, String schemaName, String tableName) {
		// 禁止配置非拆分表
		if (oldTable == null || newTable == null) {
			throw new ConfigException("please check tableFile.properties,make sure " + schemaName + ":" + tableName + " is sharding table ");
		}
		// 忽略全局表
		if (oldTable.isGlobalTable() || newTable.isGlobalTable()) {
			String message = "global table: " + schemaName + ":" + tableName + " is ignore!";
			System.out.println("Warn: " + message);
			LOGGER.error(message);
		} else {
			List<DataNode> oldDN = getDataNodes(oldTable, oldDataNodes, oldDataHosts);
			List<DataNode> newDN = getDataNodes(newTable, newDataNodes, newDataHosts);
			// 忽略数据节点分布没有发生变化的表
			if (isNeedMigrate(oldDN, newDN)) {
				checkRuleConfig(oldTable.getShardRule(), newTable.getShardRule(), schemaName, tableName);
				ShardRule newRC = newTable.getShardRule();
				TableMigrateInfo tmi = new TableMigrateInfo(schemaName, tableName, oldDN, newDN, newRC.getShardAlgorithm(), newRC.getColumnName());
				migratorTables.add(tmi);
			} else {
				String message = schemaName + ":" + tableName + " is ignore,no need to migrate!";
				LOGGER.error(message);
				System.out.println("Warn: " + message);
			}

		}
	}

	// 对比前后表数据节点分布是否一致
	private boolean isNeedMigrate(List<DataNode> oldDN, List<DataNode> newDN) {
		if (oldDN.size() != newDN.size()) {
			return true;
		}
		return false;
	}

	// 获取拆分表对应节点列表,具体到实例地址、库
	private List<DataNode> getDataNodes(Table table, Map<String, GroupDataSourceNodeConfig> groupDataSourceNodeConfigMap, Map<String, GroupDataSourceConfig> groupDataSourceConfigMap) {
		List<DataNode> dataNodeList = new ArrayList<DataNode>();
		// TO-DO
		ArrayList<String> dataNodeNames = table.getDataNodeList();
		int i = 0;
		for (String name : dataNodeNames) {
			GroupDataSourceNodeConfig groupDataSourceNodeConfig = groupDataSourceNodeConfigMap.get(name);
			String databaseName = groupDataSourceNodeConfig.getDatabase();
			String dataHost = groupDataSourceNodeConfig.getDataHost();
			GroupDataSourceConfig groupDataSourceConfig = groupDataSourceConfigMap.get(dataHost);
			MysqlInstanceInfo[] writeHosts = groupDataSourceConfig.getWriteHosts();
			MysqlInstanceInfo currentWriteHost;
			if (isAwaysUseMaster) {
				currentWriteHost = writeHosts[0];
			} else {
				// 迁移数据发生在当前切换后的数据源
				currentWriteHost = writeHosts[Integer.valueOf(dnIndexProps.getProperty(groupDataSourceConfig.getName()))];
			}
			DataNode dataNode = new DataNode(name, currentWriteHost.getIp(), currentWriteHost.getPort(), currentWriteHost.getUserName(), currentWriteHost.getPassword(), databaseName, i++);
			dataNodeList.add(dataNode);
		}

		return dataNodeList;
	}

	// 校验前后路由规则是否一致
	private void checkRuleConfig(ShardRule oldShardRule, ShardRule newShardRule, String schemaName, String tableName) {
		if (!oldShardRule.getColumnName().equalsIgnoreCase(newShardRule.getColumnName())) {
			throw new ConfigException(schemaName + ":" + tableName + " old & new partition column is not same!");
		}
		AbstractShardAlgorithm oldShardAlgorithm = oldShardRule.getShardAlgorithm();
		AbstractShardAlgorithm newShardAlgorithm = newShardRule.getShardAlgorithm();
		// 判断路由算法前后是否一致
		if (!oldShardAlgorithm.getClass().isAssignableFrom(newShardAlgorithm.getClass())) {
			throw new ConfigException(schemaName + ":" + tableName + " old & new rule Algorithm is not same!");
		}
	}

	private Properties loadDnIndexProps() {
		Properties properties = new Properties();
		InputStream inputStream = null;
		try {
			inputStream = ConfigComparer.class.getResourceAsStream(DN_INDEX_FILE);
			properties.load(inputStream);
		} catch (Exception e) {
			throw new ConfigException("please check file \"dnindex.properties\" " + e.getMessage());
		} finally {
			try {
				if (inputStream != null) {
					inputStream.close();
				}
			} catch (IOException e) {
				throw new ConfigException(e.getMessage());
			}
		}
		return properties;
	}
}
