package NLP;



import java.util.List;
import java.util.Properties;
import java.util.Set;

import com.thoughtworks.xstream.XStream;

import edu.stanford.nlp.ling.CoreAnnotations.LemmaAnnotation;
import edu.stanford.nlp.ling.CoreAnnotations.NamedEntityTagAnnotation;
import edu.stanford.nlp.ling.CoreAnnotations.PartOfSpeechAnnotation;
import edu.stanford.nlp.ling.CoreAnnotations.SentencesAnnotation;
import edu.stanford.nlp.ling.CoreAnnotations.TextAnnotation;
import edu.stanford.nlp.ling.CoreAnnotations.TokensAnnotation;
import edu.stanford.nlp.ling.CoreLabel;
import edu.stanford.nlp.ling.IndexedWord;
import edu.stanford.nlp.pipeline.Annotation;
import edu.stanford.nlp.pipeline.StanfordCoreNLP;
import edu.stanford.nlp.trees.Tree;
import edu.stanford.nlp.trees.TreeCoreAnnotations.TreeAnnotation;
import edu.stanford.nlp.trees.semgraph.SemanticGraph;
import edu.stanford.nlp.trees.semgraph.SemanticGraphCoreAnnotations.CollapsedCCProcessedDependenciesAnnotation;
import edu.stanford.nlp.trees.semgraph.SemanticGraphEdge;
import edu.stanford.nlp.util.CoreMap;

public class Test {
	
		public static void main(String[] args){
		
		System.out.println("prep_on".length());
		System.out.println("prep_on".substring(5,"prep_on".length()));
	    // creates a StanfordCoreNLP object, with POS tagging, lemmatization, NER, parsing, and coreference resolution 
	    Properties props = new Properties();
	    props.put("annotators", "tokenize, ssplit, pos, lemma, ner, parse, dcoref");
	    StanfordCoreNLP pipeline = new StanfordCoreNLP(props);
	    XStream xstream = new XStream();
	    System.out.println(xstream.toXML(props));
	    
	    // read some text in the text variable
		String text = "that rich tall woman walks to the street. I like sports";
	    
	    // create an empty Annotation just with the given text
	    Annotation document = new Annotation(text);
	    
	    // run all Annotators on this text
	    pipeline.annotate(document);
	    
	    // these are all the sentences in this document
	    // a CoreMap is essentially a Map that uses class objects as keys and has values with custom types
	    List<CoreMap> sentences = document.get(SentencesAnnotation.class);
	    int count =1;
	    for(CoreMap sentence: sentences) {
	    	System.out.println("sentence n' "+count);
	    	count++;
	      // traversing the words in the current sentence
	      // a CoreLabel is a CoreMap with additional token-specific methods
	      for (CoreLabel token: sentence.get(TokensAnnotation.class)) {
	        // this is the text of the token
	        String word = token.get(TextAnnotation.class);
	        // this is the POS tag of the token
	        System.out.println(word);
	        System.out.println(token.get(LemmaAnnotation.class));
	        String pos = token.get(PartOfSpeechAnnotation.class);
	        // this is the NER label of the token
	        System.out.println(pos);
	        String ne = token.get(NamedEntityTagAnnotation.class);    
	        SemanticGraph dependencies = sentence.get(CollapsedCCProcessedDependenciesAnnotation.class);
	        IndexedWord indWord=null;
	        System.out.println("index: "+token.index());
	        try {
	        indWord=  dependencies.getNodeByIndex(token.index());
	        System.out.println("out from "+token.lemma()+" "+ dependencies.outgoingEdgeList(indWord));
	        System.out.println("in to "+token.lemma()+" "+ dependencies.incomingEdgeList(indWord));
	        
	
	        }
	        catch (IllegalArgumentException e) {
	    	System.out.println("NOT INTO GRAPH :"+ token.lemma());
	        }
	        
			
	        
	        
	        System.out.println(ne);
	      }
	
	      // this is the parse tree of the current sentence
	      Tree tree = sentence.get(TreeAnnotation.class);
	      System.out.println(tree);
	
	      // this is the Stanford dependency graph of the current sentence
	      SemanticGraph dependencies = sentence.get(CollapsedCCProcessedDependenciesAnnotation.class);
	      System.out.println(dependencies);
	      Set<SemanticGraphEdge> edges= dependencies.getEdgeSet();
	      for (SemanticGraphEdge edge : edges){
	    	  System.out.println(edge.getSource().lemma());
	    	  System.out.println(edge.getSource().get(PartOfSpeechAnnotation.class));
	    	  System.out.println("nsubj : "+edge.getRelation()+ edge.getRelation().getShortName().equals("nsubj"));
	    	  System.out.println(edge.getTarget().lemma());
		        System.out.println("outgoing from "+edge.getSource().lemma()+dependencies.outgoingEdgeList((edge.getSource())));
	      }
	    }
	
	    // This is the coreference link graph
	    // Each chain stores a set of mentions that link to each other,
	    // along with a method for getting the most representative mention
	    // Both sentence and token offsets start at 1!
		}
}
