package com.taikang.t.helper;

import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;

import edu.stanford.nlp.parser.lexparser.LexicalizedParser;
import edu.stanford.nlp.trees.TreebankLanguagePack;
import edu.stanford.nlp.trees.TypedDependency;
import edu.stanford.nlp.ling.CoreLabel;  
import edu.stanford.nlp.trees.*;
import edu.stanford.nlp.trees.international.pennchinese.ChineseTreebankLanguagePack;
import edu.stanford.nlp.util.StringUtils;

public class StanfordParserHelper {

	private List<CoreLabel> rawWords = new ArrayList<CoreLabel>();

	private String sentence;
	
	private String[] sent;
	
	//状语短语
	@SuppressWarnings("unused")
	private static final String  advmod = "advmod";
	//名词短语
	@SuppressWarnings("unused")
	private static final String  nsubj = "nsubj";
	//从属短语
	@SuppressWarnings("unused")
	private static final String  dep = "dep";
	//宾语短语
	@SuppressWarnings("unused")
	private static final String  dobj = "dobj";
	//数字修饰
	@SuppressWarnings("unused")
	private static final String  nummod = "nummod";
	//情态动词
	@SuppressWarnings("unused")
	private static final String  mmod = "mmod";
	//跟结构
	@SuppressWarnings("unused")
	private static final String  root = "root";
	//否定修饰词
	@SuppressWarnings("unused")
	private static final String  neg = "neg";
	
	
	 public StanfordParserHelper(String sentence) {
		 	super();
		 	sent = sentence.split(" ");
		    for (String word : sent) {
			      CoreLabel l = new CoreLabel();
			      l.setWord(word);
			      rawWords.add(l);
		    }
	}
	 
	 




	public List<CoreLabel> getRawWords() {
		return rawWords;
	}






	public void setRawWords(List<CoreLabel> rawWords) {
		this.rawWords = rawWords;
	}






	public String getSentence() {
		return sentence;
	}






	public void setSentence(String sentence) {
		this.sentence = sentence;
	}






	public static void main(String[] args) {
		    LexicalizedParser lp = LexicalizedParser.loadModel("edu/stanford/nlp/models/lexparser/chinesePCFG.ser.gz");
		    String text = "半 天 啊 站 起来 这 都 炸 烂了 功夫 不负 苦心人";//"I am a little girl";//

		    grammar(lp,text);
		  }


		  
		  
		  public static void grammar(LexicalizedParser lp,String sentence) {

			    // This option shows parsing a list of correctly tokenized words

			    String[] sent = sentence.split(" ");//{ "This", "is", "an", "easy", "sentence", "." };

			//String[] sent = { "This", "is", "an", "easy", "sentence", "." };

			    List<CoreLabel> rawWords = new ArrayList<CoreLabel>();

			    for (String word : sent) {

			    System.out.println(word);

			      CoreLabel l = new CoreLabel();

			      l.setWord(word);

			      rawWords.add(l);

			    }

			    Tree parse = lp.apply(rawWords);

			    parse.pennPrint();


			    TreebankLanguagePack tlp = new ChineseTreebankLanguagePack();

			    GrammaticalStructureFactory gsf = tlp.grammaticalStructureFactory();

			    GrammaticalStructure gs = gsf.newGrammaticalStructure(parse);

			    System.out.println(StringUtils.join(gs.typedDependenciesCollapsed(true), "/n"));
			    
			    List<Tree> leaves =  parse.getLeaves();
			    int i;
			    for(i=0;i<leaves.size();i++)
			    {
			    	Tree node = leaves.get(i);
			    	
			    	if(node.toString().equals("烂了"))
			    	{
			    		break;
			    	}
			    }
			    Tree pointNode = leaves.get(i);
			    Tree morther = pointNode.ancestor(2, parse);
			    System.out.println(morther);
		  }
		  
		  
		  public static List<TypedDependency> grammarChinese(String sentence) {

			    // This option shows parsing a list of correctly tokenized words
			  	LexicalizedParser lp = LexicalizedParser.loadModel("edu/stanford/nlp/models/lexparser/chinesePCFG.ser.gz");
			    String[] sent = sentence.split(" ");//{ "This", "is", "an", "easy", "sentence", "." };

			    List<CoreLabel> rawWords = new ArrayList<CoreLabel>();

			    for (String word : sent) {
			      CoreLabel l = new CoreLabel();
			      l.setWord(word);
			      rawWords.add(l);
			    }

			    Tree parse = lp.apply(rawWords);

			    TreebankLanguagePack tlp = new ChineseTreebankLanguagePack();

			    GrammaticalStructureFactory gsf = tlp.grammaticalStructureFactory();

			    GrammaticalStructure gs = gsf.newGrammaticalStructure(parse);

			    return gs.typedDependenciesCollapsed(true);
		  }
		  
		  //语法逻辑类
		  @SuppressWarnings("unchecked")
		  public String getKeys(){
			  //如果句子数组只有一个元素，直接返回该元素
			  if(sent.length==1)
			  {
				  return sent[0];
			  }
			//如果句子数组只有2个元素，直接返回元素链接
			  if(sent.length==2)
			  {
				  return sent[0]+sent[1];
			  }
			  
			   // This option shows parsing a list of correctly tokenized words
			  	LexicalizedParser lp = LexicalizedParser.loadModel("edu/stanford/nlp/models/lexparser/chinesePCFG.ser.gz");
			    
			  	//将输入分词语句根据语法文件导入语法结构树
			    List<CoreLabel> rawWords = new ArrayList<CoreLabel>();
			    for (String word : sent) {
			      CoreLabel l = new CoreLabel();
			      l.setWord(word);
			      rawWords.add(l);
			    }
			    Tree parse = lp.apply(rawWords);
			    
			    //根据语法结构树导出词组数组写入
			    TreebankLanguagePack tlp = new ChineseTreebankLanguagePack();
			    GrammaticalStructureFactory gsf = tlp.grammaticalStructureFactory();
			    GrammaticalStructure gs = gsf.newGrammaticalStructure(parse);
			    List<TypedDependency> words = gs.typedDependenciesCollapsed(true);
			    
			    
			    if(words!=null)
				{
					Map m = new HashMap();
					Iterator word = rawWords.iterator();
					while(word.hasNext())
					{
						CoreLabel cl = (CoreLabel) word.next();
						String clText = cl.word();
						int i = 0 ;
						//判断每个语法结构数组，得出主要语法项与依赖语法项，依次将主要语法项与其出现次数写入hash表
						Iterator phrase = words.iterator();
						while(phrase.hasNext())
						{
							TypedDependency td = (TypedDependency) phrase.next();
//							String relation = td.reln().getShortName();
							String governor = td.gov().value();//主要
//							String dependent = td.dep().value();//依赖
							if(clText.equals(governor))
							{
								i++;
							}
						}
						m.put(clText, i);
					}
					//轮训hash表，取得出现次数最多的主要语法项，即为关键词根
					String keyWord = "";
					Iterator it = m.entrySet().iterator();
					Integer max = 0;
					while(it.hasNext())
					{
						Entry e = (Entry)it.next();
						String key  = (String) e.getKey();
						Integer value = (Integer) e.getValue();
						if(value>max)
						{
							max = value;
							keyWord = key;
						}
						
					}
					System.out.println("---------------------------------------");
					System.out.println(keyWord);
					System.out.println("---------------------------------------");
					//由于建立短语知识库数据量偏大，此部分处理程序舍弃
//					//在语法树中找到关键词根节点
					List<Tree> leaves =  parse.getLeaves();
				    int i;
				    for(i=0;i<leaves.size();i++)
				    {
				    	Tree node = leaves.get(i);
				    	
				    	if(node.toString().equals(keyWord))
				    	{
				    		break;
				    	}
				    }
				    Tree pointNode = leaves.get(i);
				    //取得关键词根平行深度的所有词，组成词组，作为查询条件
				    Tree morther = pointNode.ancestor(2, parse);
				    List<Tree> keyTree = morther.getLeaves();
				    StringBuffer keyWords = new StringBuffer();
				    for(i=0;i<keyTree.size();i++)
				    {
				    	Tree node = keyTree.get(i);
				    	keyWords.append(node.toString());
				    }
					System.out.println(keyWords);
					return keyWords.toString();
//					return keyWord;
				}
				return null;
		  	}
		  
		  
			@SuppressWarnings("unchecked")
			public String[] getKeys(List<TypedDependency> words)
			{
				if(words!=null)
				{
					Map m = new HashMap();
					Iterator word = rawWords.iterator();
					while(word.hasNext())
					{
						CoreLabel cl = (CoreLabel) word.next();
						String clText = cl.originalText();
						int i = 0 ;
						
						Iterator phrase = words.iterator();
						while(phrase.hasNext())
						{
							TypedDependency td = (TypedDependency) phrase.next();
							@SuppressWarnings("unused")
							String relation = td.reln().getShortName();
							String governor = td.gov().value();//主要
							@SuppressWarnings("unused")
							String dependent = td.dep().value();//依赖
							if(clText.equals(governor))
							{
								i++;
							}
						}
						m.put(clText, i);
					}
					String keyWord = "";
					Iterator it = m.entrySet().iterator();
					Integer max = 0;
					while(it.hasNext())
					{
						Entry e = (Entry)it.next();
						String key  = (String) e.getKey();
						Integer value = (Integer) e.getValue();
						
						if(value>max)
						{
							max = value;
							keyWord = key;
						}
						
					}
					System.out.println(keyWord);
					
				}
				return null;
			}
}
