
package risk.analysis;

import java.io.IOException;
import java.io.StringReader;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;

import org.apache.log4j.Logger;
import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.kr.KoreanAnalyzer;
import org.apache.lucene.analysis.kr.KoreanTokenizer;
import org.apache.lucene.analysis.kr.morph.AnalysisOutput;
import org.apache.lucene.analysis.kr.morph.CompoundEntry;
import org.apache.lucene.analysis.kr.morph.MorphAnalyzer;
import org.apache.lucene.analysis.kr.morph.MorphException;
import org.apache.lucene.analysis.kr.morph.PatternConstants;
import org.apache.lucene.analysis.kr.morph.WordEntry;
import org.apache.lucene.analysis.kr.morph.WordSpaceAnalyzer;
import org.apache.lucene.analysis.kr.utils.DictionaryUtil;

import risk.analysis.WordInDoc;
import risk.analysis.WordNotDic;




/**
 * @author Ryu Seung Wan
 */
public class KoreanAnalysisManager {

	private static MorphAnalyzer morph;
	private static WordSpaceAnalyzer wordSpaceAnalyzer= new WordSpaceAnalyzer();
	private static KoreanAnalyzer analyzer;
	private static ArrayList<WordNotDic> arrWordNotDic = null;
	private static ArrayList<WordInDoc>  arrWordInDoc  = null;
	private boolean synonum = false;//동의어 분석 여부
	
	public KoreanAnalysisManager(){
		morph = new MorphAnalyzer();
		analyzer = new KoreanAnalyzer();
	}


	public void morphAnalyze(String input) throws Exception{
		String[] tokens = input.split(" ");
		for(String token:tokens) {
			try {
				List<AnalysisOutput> results = morph.analyze(token);
				for(AnalysisOutput o:results) {
					System.out.print(o.toString()+"->");
					for(int i=0;i<o.getCNounList().size();i++){
						System.out.print(o.getCNounList().get(i).getWord()+"/");
					}
					System.out.println("<"+o.getScore()+">");
				}
			} catch (MorphException e) {
				e.printStackTrace();
			}
		}
	}
	

	/**
	 * <p>형태소 분석 </p>
	 * 
	 * returnValue.get("0") = 분석 성공 단어
	 * returnValue.get("1") = 분석 실패 단어 
	 * ArrayList Type = ArrayList(String)
	 * 
	 * @param text : 분석 텍스트
	 * @param addChk : True 중복허용, false 중복 제거
	 * @return HashMap<String, ArrayList>
	 * @throws RsnfException
	 */
	public HashMap<String, ArrayList> doAnalysis(String text, boolean addChk, boolean wordSpace, boolean misWordChk, boolean synChk) throws RsnfException{
		
		// korean analysis result Hashmap
		HashMap<String, ArrayList> krAnlRstmap = new HashMap();

		// 형태소 분석 결과를 담을 임시 배열 
		ArrayList<WordInDoc> arrWordInDocTemp = null;

		// 형태소 분석 최종 결과를 담을 배열 
		ArrayList<WordInDoc> arrWordInDoc = null;

		// 형태소 분석이 되지 않은 단어 목록을 담을 임시 배열
		ArrayList<WordNotDic> arrWordNotDicTemp = null;

		// 형태소 분석이 되지 않은 단어 최종 목록을 담을 배열
		ArrayList<WordNotDic> arrWordNotDic = null;

		// 문서와 단어의 연관관계의 고유번호(seq) 초기화
		int nextSeq=0;

		synonum = synChk;

		arrWordInDocTemp = new ArrayList<WordInDoc>();
		arrWordInDoc = new ArrayList<WordInDoc>();

		arrWordNotDicTemp = new ArrayList<WordNotDic>();
		arrWordNotDic = new ArrayList<WordNotDic>();

		
		ArrayList<String> list = new ArrayList<String>();
		List<AnalysisOutput> results = new ArrayList<AnalysisOutput>();

		AnalysisOutput output;
		try {
			/**
			 * 띄어쓰기 추가 띄어쓰기가 없을경우 띄어쓰기를 실행
			 */
			if(wordSpace){
				String temp_text = "";
				results = wordSpaceAnalyzer.analyze(text);
				WordEntry entity;
				for (int i = 0; i < results.size(); i++) {
					output = (AnalysisOutput)results.get(i);
					if((entity=DictionaryUtil.getWord(output.getStem()))!=null && !entity.isBreak_word()) {
						
					}
					list.add(output.getSource().trim() + " ");
				}
				
				for(String str : list){
					temp_text += str;
				}
				
				text = temp_text;
			}
		} catch (MorphException e) {
			e.printStackTrace();
		}
		
		
		/**
		 * 오타 단어 수정 요청이 있을경우
		 */
		if(misWordChk){
			text = DictionaryUtil.changeMisWord(text);
		}
		
		
		String[] arrWord = text.split(" ");
		
		// 내용 분석
		for(String word : arrWord){
			krAnlRstmap = analysis("contents", word);
			arrWordInDocTemp.addAll(krAnlRstmap.get("0"));
			arrWordNotDicTemp.addAll(krAnlRstmap.get("1"));
		}

		// 정렬 (시작 위치값을 기준으로)
//		ComparatorUtil cu = new ComparatorUtil();
//		arrWordInDocTemp = cu.wordPostionComparator(arrWordInDocTemp);

		ArrayList<String> tempArrWordInDoc = new ArrayList<String>();
		boolean addCheck = true;
		for(WordInDoc wordInDoc:arrWordInDocTemp){
			addCheck = true;
			if(!addChk){
				for(String tempWordInDoc:tempArrWordInDoc){
					if(wordInDoc.getWord().equals(tempWordInDoc)){
						addCheck = false;
						break;
					}
				}
			}
			if(addCheck){
//				if(String.valueOf(wordInDoc.getPos()).equals("N") && wordInDoc.getWord().length() > 1){
				if(String.valueOf(wordInDoc.getPos()).equals("N")){
					tempArrWordInDoc.add(wordInDoc.getWord());
				}
			}
		}
//		d.setArrWordInDoc(tempArrWordInDoc);

		// 미등록단어들 중에서 중복된 단어를 제거한다.
		ArrayList<String> tempArrWordnotDoc = new ArrayList<String>();
		wid1:for(WordNotDic wnd1:arrWordNotDicTemp){
			for(String wnd2:tempArrWordnotDoc){
				if(wnd1.getWord().equals(wnd2)){
					continue wid1;
				}
			}					
			tempArrWordnotDoc.add(wnd1.getWord());
		}
//		d.setArrWordNotDic(arrWordNotDic);
		
		krAnlRstmap.put("0", tempArrWordInDoc);
		krAnlRstmap.put("1", tempArrWordnotDoc);
		
		return krAnlRstmap;
	}

	/**
	 * <p>형태소 분석 </p>
	 * 
	 * @author Ryu Seung Wan
	 */
	public HashMap<String, ArrayList> analysis(String field, String text) throws RsnfException{
		TokenStream input;
		HashMap<String, ArrayList> map = new HashMap();
		arrWordNotDic = new ArrayList<WordNotDic>();
		arrWordInDoc  = new ArrayList<WordInDoc>();
		ArrayList<WordInDoc>  curTokenInWords  = null;
		try {
			Token token = new Token();
			input = analyzer.tokenStream("", new StringReader(text));

			// text의 결과 토큰들에서 의 결과를 담고 있는 배열
			curTokenInWords = new ArrayList<WordInDoc>();
			
			
			
			tokenWhile:while((token = input.next(token)) != null) {	

				//System.out.println("2 : " + token.toString());
				
				if(token.type().equals(KoreanTokenizer.TOKEN_TYPES[KoreanTokenizer.KOROREAN]) || token.type().equals(KoreanTokenizer.TOKEN_TYPES[KoreanTokenizer.ALPHANUM])) {
					String strInput = token.termText();
					List<AnalysisOutput> outputs = morph.analyze(strInput);
					WordInDoc wid = null;

					AnalysisOutput rstOutput = null;
					AnalysisOutput tmpOutput = null;

					// 현재의 토큰을 형태소 분석한 결과에서 단어가 1개 이상 추출되고..
					// 스코어가 100인 단어가 1개 이상인 경우.
					// 가장 긴 단어를 선택한다. 
					if(outputs.size() > 1){
						int maxScoreCount = 0;
						for(int i=0;i< outputs.size();i++) {
							rstOutput = (AnalysisOutput)outputs.get(i);
							maxScoreCount = rstOutput.getScore() == 100 ? ++maxScoreCount : maxScoreCount;
						}
						if(maxScoreCount > 1){
							for(int i=0;i< outputs.size();i++) {
								rstOutput = (AnalysisOutput)outputs.get(i);
								if(tmpOutput == null){
									if(rstOutput.getScore() == 100){
										tmpOutput = rstOutput;
									}
								}else if(rstOutput.getScore() == 100 && rstOutput.getStem().length() > tmpOutput.getStem().length()){
									tmpOutput = rstOutput;
								}
							}
							rstOutput = tmpOutput;
						}else if(maxScoreCount == 1){
							for(int i=0;i< outputs.size();i++) {
								tmpOutput = (AnalysisOutput)outputs.get(i);
								if(tmpOutput.getScore() == 100){
									rstOutput = tmpOutput;
								}
							}
						}else{
							continue tokenWhile;
						}
					}else{
						rstOutput = (AnalysisOutput)outputs.get(0);
					}
					
					// 현재의 토큰들에서 여러개의 명사가 추출된 경우
					// 가장 긴 명사를 선택한다. 
					for(WordInDoc tw : curTokenInWords){
						if(tw.getWord().equals(rstOutput.getStem())){
							continue tokenWhile;
						}else if(tw.getWord().length() > rstOutput.getStem().length()){
							continue tokenWhile;
						}
					}
					
					wid = new WordInDoc();
					wid.setWord(rstOutput.getStem());
					wid.setScore(rstOutput.getScore());
					wid.setPos(rstOutput.getPos());
					wid.setStart(token.startOffset());
					wid.setEnd(token.endOffset());
//					System.out.println("--"+rstOutput.getStem() + ", " +rstOutput.getPos() +" " + rstOutput.getScore() + "  ");
		    		int index = 0;
		    		WordInDoc wordInDoc = null;
					if(wid.getScore() == 100){
	    				WordEntry we = DictionaryUtil.getWord(wid.getWord());
						if(we != null && we.isBreak_word()){	
							index = wid.getWord().indexOf(wid.getWord());
							wordInDoc = new WordInDoc(
									we.getSeq(),
									we.getP_word() != null && synonum ? we.getP_word() : wid.getWord() ,
									wid.getStart()+(index!=-1?index:0) == 0?1 :wid.getStart()+(index!=-1?index:0) ,
									index!=-1?wid.getEnd()+index+wid.getWord().length():wid.getEnd(),
									field.equals("title") ? '0' : field.equals("contents") ? '1' : '2',
									"ko",
									wid.getPos(),
									0,
									0
							);
							arrWordInDoc.add(wordInDoc);
							curTokenInWords.add(wordInDoc);
						}
	    			}else{
						arrWordNotDic.add(new WordNotDic(text, "ko"));
		    		}
				}
			}
			
			map.put("0", arrWordInDoc);
			map.put("1", arrWordNotDic);

		} catch (MorphException me) {
			throw new RsnfException(me.getMessage(), me);
		} catch (IOException ie) {
			throw new RsnfException(ie.getMessage(), ie);
		}
		return map;
	}


	public static void main(String[] args){

		KoreanAnalysisManager kam = new KoreanAnalysisManager(); 
		try {

			//kam.morphAnalyze("구축함과");
			
			// 언어 구하기.
			//System.out.println(kam.valueOfLan("asdfe"));

			// 형태소 분석 결과 확인하기.
			String text = "녹발생. 무한도전 아방이";

//			String text = "녹발생 멜랑폰케이스 폰케이스법무부법무부법무부법무부법무부는5일서울중앙지검장에조영곤(사법연수원16기·이하괄호안연수원기수)대구지검장을임명하는등검찰고위간부인사를오는10일자로단행했다.";
			
//			String text, boolean addChk, boolean wordSpace, boolean misWordChk, boolean synChk
			HashMap<String, ArrayList> krAnlRstmap = kam.doAnalysis(text, false, false, false, false);

			ArrayList<String> widList = krAnlRstmap.get("0");
			System.out.println("======= 형태소 분석 성공 목록  ==========");
			for(String wid:widList){
				System.out.println(" " + wid);
			}
			ArrayList<String> wndList = krAnlRstmap.get("1");
			System.out.println("======= 형태소 분석 실패 목록  ==========");
			for(String wnd :wndList){
				System.out.println(wnd);
			}
		} catch (Exception e) {
			e.printStackTrace();
		}
	}


}


