package junior.util.address;

import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashSet;
import java.util.List;
import java.util.NavigableSet;
import java.util.Set;
import java.util.TreeMap;
import java.util.TreeSet;

import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.tuple.Pair;
import org.springframework.util.CollectionUtils;

import junior.util.lambda.FuncLRO;
import junior.util.lambda.Function;
import junior.util.wrap.Scored;

/**
 * 用于地址分词和标注<br/>
 * 如果有{@link AddressIndex}配合，可以增强分词能力，同时可以进行地址匹配
 * 
 * @author ZhangXiaoye
 * @date 2016年11月19日 下午3:52:16
 */
public class AddressTokenizer {
	
	final TreeMap<String, AddressTokenLevel> suffixMap = new TreeMap<String, AddressTokenLevel>();
	
	public AddressTokenizer(){
		for(AddressTokenLevel level: AddressTokenLevel.values()){
			for(String suffix: level.suffixs){
				suffixMap.put(suffix, level);
			}
		}
	}
	
	/**
	 * 对地址进行匹配
	 * 
	 * @param address 地址
	 * @param index 在指定的{@link AddressIndex}中匹配
	 * @return Pair(地址分词结果, List(Pair(匹配地址，T)))
	 * @author ZhangXiaoye
	 * @date 2016年11月19日 下午3:55:13
	 */
	public <T> AddressIndexResult<T> match(String address, AddressIndex<T> index){
		
		Address addr = tokenize(address);
		
		if(CollectionUtils.isEmpty(addr.getTokens())){
			return new AddressIndexResult<T>(addr);
		}
		
		Set<Scored<Integer>> docIdSet = new TreeSet<Scored<Integer>>(new Comparator<Scored<Integer>>() {
			@Override
			public int compare(Scored<Integer> o1, Scored<Integer> o2) {
				return Integer.compare(o1.value, o2.value);
			}
		});
		
		for(int i = addr.getTokens().size() - 1; i >= 0; i --){
			AddressToken at = addr.getTokens().get(i);
			Set<Integer> docIds = index.indexMap.get(at);
			if(docIds != null){
				for(int docId: docIds){
					final Pair<Address, T> pair = index.list.get(docId);
					final double simi = addr.similarity(pair.getLeft(), index);
					if(simi > 1){
						docIdSet.add(new Scored<Integer>(docId, simi));
					}
				}
			}
		}
		
		if(docIdSet.isEmpty()){
			return new AddressIndexResult<T>(addr);
		}
		
		double maxScore = 0;
		List<Pair<Address, T>> ret = new ArrayList<Pair<Address,T>>();
		for(Scored<Integer> scoredDocId: docIdSet){
			final int compRes = Double.compare(maxScore, scoredDocId.score);
			if(compRes < 0){
				ret.clear();
				ret.add(index.list.get(scoredDocId.value));
				maxScore = scoredDocId.score;
			}else if(compRes == 0){
				ret.add(index.list.get(scoredDocId.value));
			}
		}
		
		return new AddressIndexResult<T>(addr, ret);
	}
	
	/**
	 * 对地址进行分词
	 * 
	 * @param address 地址
	 * @return 分词结果
	 * @author ZhangXiaoye
	 * @date 2016年11月19日 下午3:56:11
	 */
	public Address tokenize(String address){
		return tokenize(address, null);
	}
	
	/**
	 * 对地址进行增强分词
	 * 
	 * @param address 地址
	 * @param index {@link AddressIndex}
	 * @return 分词结果
	 * @author ZhangXiaoye
	 * @date 2016年11月19日 下午3:56:28
	 */
	public Address tokenize(String address, AddressIndex<?> index){
		if(StringUtils.isBlank(address)){
			return new Address();
		}
		List<Address> addrs = tokenizeAll(address, index);
		if(CollectionUtils.isEmpty(addrs)){
			return new Address();
		}
		if(addrs.size() == 1){
			return addrs.get(0);
		}
		return Function.reduce(addrs, new FuncLRO<Address, Address, Address>() {
			@Override
			public Address call(Address lhs, Address rhs) throws RuntimeException {
				if(lhs.getScore() > rhs.getScore()){
					return lhs;
				}else{
					return rhs;
				}
			}
		});
	}
	
	/**
	 * 对地址进行分词，返回所有分词可能
	 * 
	 * @param address 地址
	 * @return 所有分词可能
	 * @author ZhangXiaoye
	 * @date 2016年11月19日 下午3:57:23
	 */
	public List<Address> tokenizeAll(String address){
		return tokenizeAll(address, null);
	}

	/**
	 * 对地址进行增强分词，返回所有分词可能
	 * 
	 * @param address 地址
	 * @param index {@link AddressIndex}
	 * @return 所有分词可能
	 * @author ZhangXiaoye
	 * @date 2016年11月19日 下午3:57:47
	 */
	public List<Address> tokenizeAll(String address, AddressIndex<?> index){
		if(StringUtils.isBlank(address)){
			return Collections.emptyList();
		}
		List<Address> list = new ArrayList<Address>();
		List<AddressToken> tokens = new ArrayList<AddressToken>();
		_tokenize(address.trim(), 0, tokens, list, index);
		return list;
	}
	
	/**
	 * 深度优先搜索递归
	 * 
	 * @param remainAddressString
	 * @param routing
	 * @param previousTokens
	 * @param collector
	 * @param index
	 * @author ZhangXiaoye
	 * @date 2016年11月19日 下午3:58:54
	 */
	private void _tokenize(String remainAddressString, int routing, List<AddressToken> previousTokens, List<Address> collector, AddressIndex<?> index){
		if(remainAddressString.length() <= 0){
			// 长度为0直接返回
			if(! previousTokens.isEmpty()){
				collector.add(new Address(previousTokens));
			}
			return;
		}
		boolean lastWordBranched = false;
		// 从第1个字符开始
		for(int i = 1; i < remainAddressString.length(); i ++){
			final String subString = remainAddressString.substring(i);
			// 倒序的可能suffix
			NavigableSet<String> set = suffixMap.headMap(subString, true).descendingKeySet();
			// subString以suffix开头
			String suffix = null;
			for(String s: set){
				if(subString.startsWith(s)){
					suffix = s;
					break;
				}else if(! s.startsWith(subString.substring(0, 1))){
					break;
				}
			}
			
			Set<AddressToken> possibleTokens = null;
			
			// 查主成分获得的
			final Set<AddressToken> tokenSet = index != null? index.primaryMap.get(remainAddressString.substring(0, i)): null;
			if(tokenSet != null){
				for(AddressToken token: tokenSet){
					Integer tokenRouting = AddressTokenLevel.isPossibleRouting(routing, token.getLevel());
					if(tokenRouting != null){
						if(possibleTokens == null){
							possibleTokens = new HashSet<AddressToken>();
						}
						if(suffix != null && Arrays.binarySearch(token.getLevel().suffixs, suffix) >= 0){
							possibleTokens.add(new AddressToken(token.getLevel(), token.getPrimary() + suffix, suffix, tokenRouting));
						}else if(AddressTokenLevel.isFuzzy(token.getLevel())){
							possibleTokens.add(new AddressToken(token.getLevel(), token.getPrimary(), "", tokenRouting));
						}
					}
				}
			}
			
			// 根据后缀获得
			final AddressTokenLevel suffixLevel = suffix != null? suffixMap.get(suffix): null;
			Integer tokenRouting = AddressTokenLevel.isPossibleRouting(routing, suffixLevel);
			if(tokenRouting != null){
				// 切分
				AddressToken token = new AddressToken(suffixLevel, remainAddressString.substring(0, i + suffix.length()), suffix, tokenRouting);
				if(possibleTokens == null){
					possibleTokens = new HashSet<AddressToken>();
				}
				possibleTokens.add(token);
			}
			
			// 如果能切词，进行递归尝试
			if(possibleTokens != null){
				for(AddressToken token: possibleTokens){
					List<AddressToken> currentTokens = new ArrayList<AddressToken>(previousTokens.size() + 2);
					currentTokens.addAll(previousTokens);
					currentTokens.add(token);
					final String remain = remainAddressString.substring(i + token.getSuffix().length());
					if(remain.length() <= 0){
						lastWordBranched = true;
					}
					_tokenize(remain, token.getRouting(), currentTokens, collector, index);
				}
			}
		}
		// 最后整个词
		if(! lastWordBranched){
			AddressToken lastToken = new AddressToken(AddressTokenLevel.UNKNOW, remainAddressString);
			previousTokens.add(lastToken);
			collector.add(new Address(previousTokens));
		}
	}
	
}
