package drds.server.config.loader.xml;

import java.io.IOException;
import java.io.InputStream;
import java.net.URI;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Calendar;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;

import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.w3c.dom.Element;
import org.w3c.dom.Node;
import org.w3c.dom.NodeList;

import drds.server.config.loader.SchemaLoader;
import drds.server.config.model.GroupDataSourceConfig;
import drds.server.config.model.GroupDataSourceNodeConfig;
import drds.server.config.model.MysqlInstanceInfo;
import drds.server.config.model.Schema;
import drds.server.config.model.ShardRule;
import drds.server.config.model.Table;
import drds.server.config.model.TableRule;
import drds.server.config.util.ConfigException;
import drds.server.config.util.ConfigUtil;
import drds.server.datasource.GroupDataSource;
import drds.server.route.shard_algorithm.AbstractShardAlgorithm;
import drds.server.route.shard_algorithm.TableRuleAware;
import drds.server.util.ObjectUtil;

/**
 * 
 */
@SuppressWarnings("unchecked")
public class XmlSchemaLoader implements SchemaLoader {

	private static final Logger LOGGER = LoggerFactory.getLogger(XmlSchemaLoader.class);

	private final static String DEFAULT_DTD = "/schema.dtd";
	private final static String DEFAULT_XML = "/schema.xml";

	private final Map<String, TableRule> tableRuleMap;
	private final Map<String, GroupDataSourceConfig> groupDataSourceConfigMap;
	private final Map<String, GroupDataSourceNodeConfig> groupDataSourceNodeConfigMap;
	private final Map<String, Schema> schemaMap;

	public XmlSchemaLoader(String schemaFilePath, String tableRuleFilePath) {
		// 先读取rule.xml
		TableRuleLoader tableRuleLoader = new TableRuleLoader(tableRuleFilePath);
		// 将tableRules拿出，用于这里加载Schema做rule有效判断，以及之后的分片路由计算
		this.tableRuleMap = tableRuleLoader.getTableRuleMap();
		// 释放ruleLoader
		tableRuleLoader = null;
		this.groupDataSourceConfigMap = new HashMap<String, GroupDataSourceConfig>();
		this.groupDataSourceNodeConfigMap = new HashMap<String, GroupDataSourceNodeConfig>();
		this.schemaMap = new HashMap<String, Schema>();
		// 读取加载schema配置
		this.load(DEFAULT_DTD, schemaFilePath == null ? DEFAULT_XML : schemaFilePath);
	}

	public XmlSchemaLoader() {
		this(null, null);
	}

	@Override
	public Map<String, TableRule> getTableRuleMap() {
		return tableRuleMap;
	}

	@Override
	public Map<String, GroupDataSourceConfig> getGroupDataSourceConfigMap() {
		return (Map<String, GroupDataSourceConfig>) (groupDataSourceConfigMap.isEmpty() ? Collections.emptyMap() : groupDataSourceConfigMap);
	}

	@Override
	public Map<String, GroupDataSourceNodeConfig> getGroupDataSourceNodeConfigMap() {
		return (Map<String, GroupDataSourceNodeConfig>) (groupDataSourceNodeConfigMap.isEmpty() ? Collections.emptyMap() : groupDataSourceNodeConfigMap);
	}

	@Override
	public Map<String, Schema> getSchemaMap() {
		return (Map<String, Schema>) (schemaMap.isEmpty() ? Collections.emptyMap() : schemaMap);
	}

	private void load(String dtdFile, String xmlFile) {
		InputStream dtdFileInputStream = null;
		InputStream schemaFileInputStream = null;
		try {
			dtdFileInputStream = XmlSchemaLoader.class.getResourceAsStream(dtdFile);
			schemaFileInputStream = XmlSchemaLoader.class.getResourceAsStream(xmlFile);
			Element rootElement = ConfigUtil.getDocument(dtdFileInputStream, schemaFileInputStream).getDocumentElement();
			// 先加载所有的DataHost
			loadHosts(rootElement);
			// 再加载所有的DataNode
			loadDataNodes(rootElement);
			// 最后加载所有的Schema
			loadSchemas(rootElement);
		} catch (ConfigException e) {
			throw e;
		} catch (Exception e) {
			throw new ConfigException(e);
		} finally {

			if (dtdFileInputStream != null) {
				try {
					dtdFileInputStream.close();
				} catch (IOException e) {
				}
			}

			if (schemaFileInputStream != null) {
				try {
					schemaFileInputStream.close();
				} catch (IOException e) {
				}
			}
		}
	}

	private void loadHosts(Element rootElement) {
		NodeList hostNodeList = rootElement.getElementsByTagName("host");
		for (int i = 0, length = hostNodeList.getLength(); i < length; ++i) {

			Element element = (Element) hostNodeList.item(i);
			String name = element.getAttribute("name");
			// 判断是否重复
			if (groupDataSourceConfigMap.containsKey(name)) {
				throw new ConfigException("dataHost name " + name + "duplicated!");
			}
			// 读取最小连接数
			int minSize = Integer.parseInt(element.getAttribute("min_size"));
			// 读取最大连接数
			int maxSize = Integer.parseInt(element.getAttribute("max_size"));

			/**
			 * <pre>
			 * 读取负载均衡配置 
			 * 1.balance="0", 不开启分离机制，所有读操作都发送到当前可用的 writeHost 上。
			 * 2.balance="1"，全部的 readHost 和 stand by writeHost 参不 select 的负载均衡
			 * 3.balance="2"，所有读操作都随机的在 writeHost、readhost 上分发。
			 * 4.balance="3"，所有读请求随机的分发到 wiriterHost 对应的 readhost 执行，writerHost不负担读压力
			 * </pre>
			 */
			int balanceType = Integer.parseInt(element.getAttribute("balance_type"));
			/**
			 * <pre>
			 * 读取切换类型 
			 * -1 表示不自动切换 
			 * 1 默认值，自动切换 
			 * 2 基于MySQL主从同步的状态决定是否切换 心跳询句为 show slave status 
			 * 3 基于 MySQL galary cluster 的切换机制
			 * </pre>
			 */
			String switchTypeString = element.getAttribute("switch_type");
			int switchType = switchTypeString.equals("") ? -1 : Integer.parseInt(switchTypeString);
			// 读取从延迟界限
			String slaveThresholdString = element.getAttribute("slave_threshold");
			int slaveThreshold = slaveThresholdString.equals("") ? -1 : Integer.parseInt(slaveThresholdString);

			// 如果 tempReadHostAvailable 设置大于 0 则表示写主机如果挂掉， 临时的读服务依然可用
			String tempReadHostAvailableString = element.getAttribute("temp_read_host_available");
			boolean tempReadHostAvailable = !tempReadHostAvailableString.equals("") && Integer.parseInt(tempReadHostAvailableString) > 0;
			/**
			 * 读取 写类型 这里只支持 0 - 所有写操作仅配置的第一个 writeHost
			 */

			String filters = element.getAttribute("filters");
			String logTimeString = element.getAttribute("log_time");
			long logTime = "".equals(logTimeString) ? GroupDataSource.LONG_TIME : Long.parseLong(logTimeString);
			// 读取心跳语句
			String heartbeatSql = element.getElementsByTagName("heartbeat").item(0).getTextContent();
			// 读取writeHost
			NodeList writeHostNodeList = element.getElementsByTagName("write_host");
			MysqlInstanceInfo[] writeHosts = new MysqlInstanceInfo[writeHostNodeList.getLength()];
			Map<Integer, MysqlInstanceInfo[]> readHostsMap = new HashMap<Integer, MysqlInstanceInfo[]>(2);
			for (int j = 0; j < writeHosts.length; j++) {
				Element writeHostElement = (Element) writeHostNodeList.item(j);
				writeHosts[j] = createHost(name, writeHostElement, maxSize, minSize, filters, logTime);
				NodeList readHostNodeList = writeHostElement.getElementsByTagName("read_host");
				// 读取对应的每一个readHost
				if (readHostNodeList.getLength() != 0) {
					MysqlInstanceInfo[] readHosts = new MysqlInstanceInfo[readHostNodeList.getLength()];
					for (int k = 0; k < readHosts.length; k++) {
						Element readHostElement = (Element) readHostNodeList.item(k);
						readHosts[k] = createHost(name, readHostElement, maxSize, minSize, filters, logTime);
					}
					readHostsMap.put(j, readHosts);
				}
			}

			GroupDataSourceConfig groupDataSourceConfig = new GroupDataSourceConfig(name, writeHosts, readHostsMap, switchType, slaveThreshold, tempReadHostAvailable);

			groupDataSourceConfig.setMaxSize(maxSize);
			groupDataSourceConfig.setMinCon(minSize);
			groupDataSourceConfig.setBalanceType(balanceType);
			groupDataSourceConfig.setHearbeatSQL(heartbeatSql);
			groupDataSourceConfig.setFilters(filters);
			groupDataSourceConfig.setLogTime(logTime);
			groupDataSourceConfigMap.put(groupDataSourceConfig.getName(), groupDataSourceConfig);
		}
	}

	private void loadSchemas(Element rootElement) {
		NodeList schemaNodeList = rootElement.getElementsByTagName("schema");
		for (int i = 0, length = schemaNodeList.getLength(); i < length; i++) {
			Element schemaElement = (Element) schemaNodeList.item(i);
			// 读取各个属性
			String name = schemaElement.getAttribute("name");
			String dataNode = schemaElement.getAttribute("data_node");
			String checkSQLSchema = schemaElement.getAttribute("check_sql_schema");
			String sqlMaxLimitString = schemaElement.getAttribute("sql_max_limit");
			int sqlMaxLimit = -1;
			// 读取sql返回结果集限制
			if (sqlMaxLimitString != null && !sqlMaxLimitString.isEmpty()) {
				sqlMaxLimit = Integer.parseInt(sqlMaxLimitString);
			}

			// 校验检查并添加dataNode
			if (dataNode != null && !dataNode.isEmpty()) {
				List<String> dataNodeList = new ArrayList<String>(1);
				dataNodeList.add(dataNode);
				checkDataNodeExists(dataNodeList);
			} else {
				dataNode = null;
			}
			// 加载schema下所有tables
			Map<String, Table> tableMap = loadTableMap(schemaElement);
			// 判断schema是否重复
			if (schemaMap.containsKey(name)) {
				throw new ConfigException("schema " + name + " duplicated!");
			}

			// 设置了table的不需要设置dataNode属性，没有设置table的必须设置dataNode属性
			if (dataNode == null && tableMap.size() == 0) {
				throw new ConfigException("schema " + name + " didn't config tables,so you must set dataNode property!");
			}

			Schema schema = new Schema(name, dataNode, tableMap, sqlMaxLimit, "true".equalsIgnoreCase(checkSQLSchema));

			for (String dataNodeName : groupDataSourceNodeConfigMap.keySet()) {
				GroupDataSourceNodeConfig groupDataSourceNodeConfig = groupDataSourceNodeConfigMap.get(dataNodeName);
				String dataHost = groupDataSourceNodeConfig.getDataHost();
				GroupDataSourceConfig groupDataSourceConfig = groupDataSourceConfigMap.get(dataHost);
				if (groupDataSourceConfig != null) {
				}
			}
			schemaMap.put(name, schema);
		}
	}

	/**
	 * 处理动态日期表, 支持 YYYYMM、YYYYMMDD 两种格式
	 * 
	 * YYYYMM格式： yyyymm,2015,01,60 YYYYMMDD格式: yyyymmdd,2015,01,10,50
	 */
	private String doTableNameSuffix(String tableName, String tableNameSuffix) {

		String newTableName = tableName;

		String[] params = tableNameSuffix.split(",");
		String suffixFormat = params[0].toUpperCase();
		if (suffixFormat.equals("YYYYMM")) {
			// 读取参数
			int yyyy = Integer.parseInt(params[1]);
			int mm = Integer.parseInt(params[2]);
			int mmEnd = Integer.parseInt(params[3]);

			// 日期处理
			SimpleDateFormat yyyyMM = new SimpleDateFormat("yyyyMM");
			Calendar calendar = Calendar.getInstance();
			calendar.set(Calendar.YEAR, yyyy);
			calendar.set(Calendar.MONTH, mm - 1);
			calendar.set(Calendar.DATE, 0);
			// 表名改写
			StringBuffer tableNameBuffer = new StringBuffer();
			for (int mmIndex = 0; mmIndex <= mmEnd; mmIndex++) {
				tableNameBuffer.append(tableName);
				tableNameBuffer.append(yyyyMM.format(calendar.getTime()));
				calendar.add(Calendar.MONTH, 1);
				if (mmIndex != mmEnd) {
					tableNameBuffer.append(",");
				}
			}
			newTableName = tableNameBuffer.toString();
			return newTableName;

		} else if (suffixFormat.equals("YYYYMMDD")) {

			// 读取参数
			int yyyy = Integer.parseInt(params[1]);
			int mm = Integer.parseInt(params[2]);
			int dd = Integer.parseInt(params[3]);
			int ddEnd = Integer.parseInt(params[4]);

			// 日期处理
			SimpleDateFormat yyyyMMdd = new SimpleDateFormat("yyyyMMdd");

			Calendar calendar = Calendar.getInstance();
			calendar.set(Calendar.YEAR, yyyy);
			calendar.set(Calendar.MONTH, mm - 1);
			calendar.set(Calendar.DATE, dd);

			// 表名改写
			StringBuffer tableNameBuffer = new StringBuffer();
			for (int ddIndex = 0; ddIndex <= ddEnd; ddIndex++) {
				tableNameBuffer.append(tableName);
				tableNameBuffer.append(yyyyMMdd.format(calendar.getTime()));
				calendar.add(Calendar.DATE, 1);
				if (ddIndex != ddEnd) {
					tableNameBuffer.append(",");
				}
			}
			newTableName = tableNameBuffer.toString();
			return newTableName;
		} else {
			throw new RuntimeException("不支持该日期格式");
		}

	}

	private Map<String, Table> loadTableMap(Element rootElement) {
		Map<String, Table> tableMap = new HashMap<String, Table>();
		NodeList tableNodeList = rootElement.getElementsByTagName("table");
		for (int i = 0; i < tableNodeList.getLength(); i++) {
			Element tableElement = (Element) tableNodeList.item(i);
			String name = tableElement.getAttribute("name").toLowerCase();
			// 路由, 增加对动态日期表的支持
			String tableNameSuffix = tableElement.getAttribute("nameSuffix").toUpperCase();
			if (!"".equals(tableNameSuffix)) {
				if (name.split(",").length > 1) {
					throw new ConfigException("nameSuffix " + tableNameSuffix + ", require name parameter cannot multiple breaks!");
				}
				// 前缀用来标明日期格式
				name = doTableNameSuffix(name, tableNameSuffix);
			}
			// 记录主键，用于之后路由分析，以及启用自增长主键

			String primaryKey = tableElement.hasAttribute("primaryKey") ? tableElement.getAttribute("primaryKey").toLowerCase() : null;
			// 记录是否主键自增，默认不是，（启用全局sequence handler）
			boolean autoIncrement = false;
			if (tableElement.hasAttribute("auto_increment")) {
				autoIncrement = Boolean.parseBoolean(tableElement.getAttribute("auto_increment"));
			}
			// 记录是否需要加返回结果集限制，默认需要加
			boolean needAddLimit = true;
			if (tableElement.hasAttribute("need_add_limit")) {
				needAddLimit = Boolean.parseBoolean(tableElement.getAttribute("need_add_limit"));
			}
			// 记录type，是否为global
			String tableTypeString = tableElement.hasAttribute("type") ? tableElement.getAttribute("type") : null;
			int tableType = Table.TYPE_NOT_GLOBAL_TABLE_AND_ER_PARENT_TABLE;
			if ("global".equalsIgnoreCase(tableTypeString)) {
				tableType = Table.TYPE_GLOBAL_TABLE;
			}

			TableRule tableRule = null;
			if (tableElement.hasAttribute("table_rule_id")) {
				String tableRuleId = tableElement.getAttribute("table_rule_id");
				tableRule = tableRuleMap.get(tableRuleId);
				if (tableRule == null) {
					throw new ConfigException("rule " + tableRuleId + " is not found!");
				}
			}

			String[] tableNames = name.split(",");
			if (tableNames == null) {
				throw new ConfigException("table name is not found!");
			}
			// 记录dataNode，就是分布在哪些dataNode上
			String dataNode = tableElement.getAttribute("dataNode");
			// distribute函数，重新编排dataNode
			String distPrex = "distribute(";
			boolean distTableDns = dataNode.startsWith(distPrex);
			if (distTableDns) {
				dataNode = dataNode.substring(distPrex.length(), dataNode.length() - 1);
			}
			// 分表功能
			String subTables = tableElement.getAttribute("subTables");

			for (int j = 0; j < tableNames.length; j++) {

				String tableName = tableNames[j];
				TableRule $tableRule = tableRule;
				if ($tableRule != null) {
					// 对于实现TableRuleAware的function进行特殊处理 根据每个表新建个实例
					ShardRule shardRule = $tableRule.getShardRule();
					if (shardRule.getShardAlgorithm() instanceof TableRuleAware) {
						$tableRule = (TableRule) ObjectUtil.copyObject($tableRule);
						tableRuleMap.remove($tableRule.getName());
						String newRuleName = $tableRule.getName() + "_" + tableName;
						$tableRule.setName(newRuleName);
						TableRuleAware tableRuleAware = (TableRuleAware) $tableRule.getShardRule().getShardAlgorithm();
						tableRuleAware.setRuleName(newRuleName);
						tableRuleAware.setTableName(tableName);
						$tableRule.getShardRule().getShardAlgorithm().init();
						tableRuleMap.put(newRuleName, $tableRule);
					}
				}

				Table table = new Table(tableName, tableType, primaryKey, autoIncrement, needAddLimit, dataNode, ($tableRule != null) ? $tableRule.getShardRule() : null, false, null, null, null, null, null, subTables);

				checkDataNodeExists(table.getDataNodeList());
				// 检查分片表分片规则配置是否合法
				if (table.getShardRule() != null) {
					checkRuleSuitTable(table);
				}

				if (distTableDns) {
					distributeDataNodes(table.getDataNodeList());
				}
				// 检查去重
				if (tableMap.containsKey(table.getName())) {
					throw new ConfigException("table " + tableName + " duplicated!");
				}
				// 放入map
				tableMap.put(table.getName(), table);
			}
			// 只有tableName配置的是单个表（没有逗号）的时候才能有子表
			if (tableNames.length == 1) {
				Table table = tableMap.get(tableNames[0]);
				// process child tables
				processChildTables(tableMap, table, dataNode, tableElement);
			}
		}
		return tableMap;
	}

	/**
	 * distribute datanodes in multi hosts,means ,dn1 (host1),dn100
	 * (host2),dn300(host3),dn2(host1),dn101(host2),dn301(host3)...etc
	 * 将每个host上的datanode按照host重新排列
	 * 。比如上面的例子host1拥有dn1,dn2，host2拥有dn100，dn101，host3拥有dn300，dn301, 按照host重新排列：
	 * 0->dn1
	 * (host1),1->dn100(host2),2->dn300(host3),3->dn2(host1),4->dn101(host2
	 * ),5->dn301(host3)
	 * 
	 * @param dataNodeList
	 */
	private void distributeDataNodes(ArrayList<String> dataNodeList) {
		Map<String, ArrayList<String>> dataNodeListMap = new HashMap<String, ArrayList<String>>(groupDataSourceConfigMap.size());
		for (String dataNode : dataNodeList) {
			GroupDataSourceNodeConfig groupDataSourceNodeConfig = groupDataSourceNodeConfigMap.get(dataNode);
			String dataHost = groupDataSourceNodeConfig.getDataHost();
			ArrayList<String> dataNodeLis = dataNodeListMap.get(dataHost);
			dataNodeLis = (dataNodeLis == null) ? new ArrayList<String>() : dataNodeLis;
			dataNodeLis.add(dataNode);
			dataNodeListMap.put(dataHost, dataNodeLis);
		}

		ArrayList<String> result = new ArrayList<String>(dataNodeList.size());
		boolean hasData = true;
		while (hasData) {
			hasData = false;
			for (ArrayList<String> dns : dataNodeListMap.values()) {
				if (!dns.isEmpty()) {
					result.add(dns.remove(0));
					hasData = true;
				}
			}
		}
		dataNodeList.clear();
		dataNodeList.addAll(result);
	}

	private void processChildTables(Map<String, Table> tableMap, Table parentTable, String dataNodes, Element tableElement) {

		// parse child tables
		NodeList childTableNodeList = tableElement.getChildNodes();
		for (int j = 0; j < childTableNodeList.getLength(); j++) {
			Node childTableNode = childTableNodeList.item(j);
			if (!childTableNode.getNodeName().equals("child_table")) {
				continue;
			}
			Element childTableElement = (Element) childTableNode;
			// 读取子表信息
			String childTableName = childTableElement.getAttribute("name").toLowerCase();
			String primaryKey = childTableElement.hasAttribute("primary_key") ? childTableElement.getAttribute("primary_key").toLowerCase() : null;

			boolean autoIncrement = false;
			if (childTableElement.hasAttribute("auto_increment")) {
				autoIncrement = Boolean.parseBoolean(childTableElement.getAttribute("auto_increment"));
			}
			boolean needAddLimit = true;
			if (childTableElement.hasAttribute("needAddLimit")) {
				needAddLimit = Boolean.parseBoolean(childTableElement.getAttribute("need_add_limit"));
			}
			String subTables = childTableElement.getAttribute("sub_tables");
			// 子表join键，和对应的parent的键，父子表通过这个关联
			String joinKey = childTableElement.getAttribute("join_key").toLowerCase();
			String parentKey = childTableElement.getAttribute("parent_key").toLowerCase();
			//
			Table rootParentTable = parentTable;// not 递归寻找
			String rootParentTablekeyName = parentKey;// not 递归寻找
			Table table = new Table(childTableName, Table.TYPE_NOT_GLOBAL_TABLE_AND_ER_PARENT_TABLE, primaryKey, autoIncrement, needAddLimit, dataNodes, null, true, parentTable, joinKey, parentKey, rootParentTable, rootParentTablekeyName, subTables);

			if (tableMap.containsKey(table.getName())) {
				throw new ConfigException("table " + table.getName() + " duplicated!");
			}
			tableMap.put(table.getName(), table);
			// 对于子表的子表，递归处理
			processChildTables(tableMap, table, dataNodes, childTableElement);
		}
	}

	private void checkDataNodeExists(Collection<String> nodes) {
		if (nodes == null || nodes.size() < 1) {
			return;
		}
		for (String node : nodes) {
			if (groupDataSourceNodeConfigMap.containsKey(node)) {
				throw new ConfigException("dataNode '" + node + "' is not found!");
			}
		}
	}

	/**
	 * 检查分片表分片规则配置, 目前主要检查分片表分片算法定义与分片dataNode是否匹配<br>
	 * 例如分片表定义如下:<br>
	 * {@code <table name="hotnews" primaryKey="ID" * autoIncrement="true" dataNode="dn1,dn2" rule="mod-long" />
	 * * } <br>
	 * 分片算法如下:<br>
	 * {@code <function name="mod-long"
	 * class="io..route.function.PartitionByMod"> <!-- how many data nodes
	 * --> <property name="count">3</property> </function> * } <br>
	 * shard table datanode(2) < function count(3) 此时检测为不匹配
	 */
	private void checkRuleSuitTable(Table table) {
		AbstractShardAlgorithm shardAlgorithm = table.getShardRule().getShardAlgorithm();
		int suitValue = shardAlgorithm.suitableFor(table);
		switch (suitValue) {
		case -1:
			// 少节点,给提示并抛异常
			throw new ConfigException("Illegal table conf : table [ " + table.getName() + " ] rule function [ " + table.getShardRule().getShardAlgorithmId() + " ] partition size : " + table.getShardRule().getShardAlgorithm().getShardNum() + " > table datanode size : " + table.getDataNodeList().size() + ", please make sure table datanode size = function partition size");
		case 0:
			// table datanode size == rule function partition size
			break;
		case 1:
			// 有些节点是多余的,给出warn log
			LOGGER.error("table conf : table [ {} ] rule function [ {} ] partition size : {} < table datanode size : {} , this cause some datanode to be redundant", new String[] { table.getName(), table.getShardRule().getShardAlgorithmId(), String.valueOf(table.getShardRule().getShardAlgorithm().getShardNum()), String.valueOf(table.getDataNodeList().size()) });
			break;
		}
	}

	private void loadDataNodes(Element root) {
		// 读取DataNode分支
		NodeList dataNodeNodeList = root.getElementsByTagName("data_node");
		for (int i = 0, length = dataNodeNodeList.getLength(); i < length; i++) {
			Element element = (Element) dataNodeNodeList.item(i);
			String dataNodeNameString = element.getAttribute("name");

			String hostString = element.getAttribute("host");
			String databaseString = element.getAttribute("database");

			// 字符串不为空
			if (empty(dataNodeNameString) || empty(databaseString) || empty(hostString)) {
				throw new ConfigException("dataNode " + dataNodeNameString + " define error ,attribute can't be empty");
			}
			// dnNames（name）,databases（database）,hostStrings（dataHost）都可以配置多个，以',',
			// '$', '-'区分，但是需要保证database的个数*dataHost的个数=name的个数
			// 多个dataHost与多个database如果写在一个标签，则每个dataHost拥有所有database
			// 例如：<dataNode name="dn1$0-75" dataHost="localhost$1-10"
			// database="db$0-759" />
			// 则为：localhost1拥有dn1$0-75,localhost2也拥有dn1$0-75（对应db$76-151）
			String[] dataNodeNames = drds.server.util.SplitUtil.split(dataNodeNameString, ',', '$', '-');
			String[] databases = drds.server.util.SplitUtil.split(databaseString, ',', '$', '-');
			String[] hosts = drds.server.util.SplitUtil.split(hostString, ',', '$', '-');

			if (dataNodeNames.length > 1 && dataNodeNames.length != databases.length * hosts.length) {
				throw new ConfigException("dataNode " + dataNodeNameString + " define error ,dnNames.length must be=databases.length*hostStrings.length");
			}
			if (dataNodeNames.length > 1) {

				List<String[]> hostDatabaseList = mergerHostDatabase(hosts, databases);
				for (int j = 0; j < dataNodeNames.length; j++) {
					String[] hostDatabase = hostDatabaseList.get(j);
					String dataNodeName = dataNodeNames[j];
					//
					String hostName = hostDatabase[0];
					String databaseName = hostDatabase[1];

					createDataNode(dataNodeName, databaseName, hostName);
				}

			} else {
				createDataNode(dataNodeNameString, databaseString, hostString);
			}

		}
	}

	/**
	 * 匹配DataHost和Database，每个DataHost拥有每个Database名字
	 * 
	 * @param hosts
	 * @param databases
	 * @return
	 */
	private List<String[]> mergerHostDatabase(String[] hosts, String[] databases) {
		List<String[]> hostDatabaseList = new ArrayList<String[]>();
		for (int i = 0; i < hosts.length; i++) {
			String host = hosts[i];
			for (int j = 0; j < databases.length; j++) {
				String database = databases[j];
				String[] hostDatabase = new String[2];
				hostDatabase[0] = host;
				hostDatabase[1] = database;
				hostDatabaseList.add(hostDatabase);
			}
		}
		return hostDatabaseList;
	}

	private void createDataNode(String dataNodeName, String databaseName, String hostName) {

		GroupDataSourceNodeConfig groupDataSourceNodeConfig = new GroupDataSourceNodeConfig(dataNodeName, databaseName, hostName);
		if (groupDataSourceNodeConfigMap.containsKey(groupDataSourceNodeConfig.getName())) {
			throw new ConfigException("dataNode " + groupDataSourceNodeConfig.getName() + " duplicated!");
		}

		if (!groupDataSourceConfigMap.containsKey(hostName)) {
			throw new ConfigException("dataNode " + dataNodeName + " reference dataHost:" + hostName + " not exists!");
		}
		groupDataSourceNodeConfigMap.put(groupDataSourceNodeConfig.getName(), groupDataSourceNodeConfig);
	}

	private boolean empty(String dnName) {
		return dnName == null || dnName.length() == 0;
	}

	private MysqlInstanceInfo createHost(String dataHost, Element element, int maxSize, int minSize, String filters, long logTime) {

		String id = element.getAttribute("host_name");
		String nodeUrl = element.getAttribute("url");
		String username = element.getAttribute("user");
		String password = element.getAttribute("password");
		String weightString = element.getAttribute("weight");
		int weight = "".equals(weightString) ? GroupDataSource.WEIGHT : Integer.parseInt(weightString);

		String ip = null;
		int port = 0;
		if (empty(id) || empty(nodeUrl) || empty(username)) {
			throw new ConfigException("dataHost " + dataHost + " define error,some attributes of this element is empty: " + id);
		}
		URI url;
		try {
			url = new URI(nodeUrl.substring(5));
		} catch (Exception e) {
			throw new ConfigException("invalid jdbc url " + nodeUrl + " of " + dataHost);
		}
		ip = url.getHost();
		port = url.getPort();
		MysqlInstanceInfo mii = new MysqlInstanceInfo(id, ip, port, username, password);
		mii.setMaxSize(maxSize);
		mii.setMinSize(minSize);
		mii.setWeight(weight); // 新增权重
		return mii;
	}

}
