package edu.washington.cs.cmdsynth.main;

import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Set;

import plume.Pair;

import edu.stanford.nlp.dcoref.CorefChain;
import edu.stanford.nlp.dcoref.CorefCoreAnnotations.CorefChainAnnotation;
import edu.stanford.nlp.ling.CoreAnnotations.NamedEntityTagAnnotation;
import edu.stanford.nlp.ling.CoreAnnotations.PartOfSpeechAnnotation;
import edu.stanford.nlp.ling.CoreAnnotations.SentencesAnnotation;
import edu.stanford.nlp.ling.CoreAnnotations.TextAnnotation;
import edu.stanford.nlp.ling.CoreAnnotations.TokensAnnotation;
import edu.stanford.nlp.ling.CoreLabel;
import edu.stanford.nlp.ling.IndexedWord;
import edu.stanford.nlp.pipeline.Annotation;
import edu.stanford.nlp.pipeline.StanfordCoreNLP;
import edu.stanford.nlp.semgraph.SemanticGraph;
import edu.stanford.nlp.semgraph.SemanticGraphEdge;
import edu.stanford.nlp.semgraph.SemanticGraphCoreAnnotations.CollapsedCCProcessedDependenciesAnnotation;
import edu.stanford.nlp.trees.Tree;
import edu.stanford.nlp.trees.TreeCoreAnnotations.TreeAnnotation;
import edu.stanford.nlp.util.CoreMap;
import edu.washington.cs.cmdsynth.CmdRepository;
import edu.washington.cs.cmdsynth.CommandParser;
import edu.washington.cs.cmdsynth.NLPEngine;

public class Example {
	
	public static void main(String[] args) {
		String text = "delete all files";
		text = CmdRepository.cmd1;
		
		CommandParser parser = new CommandParser();
		parser.setCommand(text);
		parser.parse();
		List<Pair<String, String>> pList = parser.extractVerbObjectPairs();
		System.out.println(pList);
		
//		NLPEngine engine = new NLPEngine();
//		engine.parseText(text);
//		engine.showRelations();
//		engine.showCorefGraph();
//		System.out.println("------------");
//		
//		text = "add all files";
//		engine.parseText(text);
//		engine.showRelations();
//		engine.showCorefGraph();
	}

	public static void main2(String[] args) {
		// creates a StanfordCoreNLP object, with POS tagging, lemmatization, NER, parsing, and coreference resolution 
	    Properties props = new Properties();
	    props.put("annotators", "tokenize, ssplit, pos, lemma, ner, parse, dcoref");
	    StanfordCoreNLP pipeline = new StanfordCoreNLP(props);
	    
	    // read some text in the text variable
	    String text = "delete all files"; // Add your text here!
	    text = "find all files starting with \"abc\" and then sort them";
	    
	    // create an empty Annotation just with the given text
	    Annotation document = new Annotation(text);
	    
	    // run all Annotators on this text
	    pipeline.annotate(document);
	    
	    // these are all the sentences in this document
	    // a CoreMap is essentially a Map that uses class objects as keys and has values with custom types
	    List<CoreMap> sentences = document.get(SentencesAnnotation.class);
	    
	    System.out.println("Number of sentences: " + sentences.size());
	    
	    for(CoreMap sentence: sentences) {
	      System.out.println(sentence);
	      System.out.println(sentence.getClass());
	      // traversing the words in the current sentence
	      // a CoreLabel is a CoreMap with additional token-specific methods
	      for (CoreLabel token: sentence.get(TokensAnnotation.class)) {
	        // this is the text of the token
	        String word = token.get(TextAnnotation.class);
	        // this is the POS tag of the token
	        String pos = token.get(PartOfSpeechAnnotation.class);
	        // this is the NER label of the token
	        String ne = token.get(NamedEntityTagAnnotation.class);  
	        System.out.println(" token: " + token + ",  its id: " + token.index());
	        System.out.println(" word: " + word + ", pos: " + pos + ", NER: " + ne);
	        System.out.println();
	      }

	      // this is the parse tree of the current sentence
	      Tree tree = sentence.get(TreeAnnotation.class);

	      System.out.println("Parse tree: ");
	      System.out.println(tree);
	      
	      // this is the Stanford dependency graph of the current sentence
	      SemanticGraph dependencies = sentence.get(CollapsedCCProcessedDependenciesAnnotation.class);
	      System.out.println("dependence graph: ");
	      System.out.println(dependencies);
	      
	      Set<IndexedWord> set = dependencies.vertexSet();
	      for(IndexedWord w : set) {
	    	  System.out.println(w.value() + ",  tag: " + w.tag());
	    	  List<SemanticGraphEdge> edges = dependencies.getOutEdgesSorted(w);
	    	  for(SemanticGraphEdge edge : edges) {
	    		  System.out.println("   edge: " + edge + ",  " + edge.getTarget());
	    	  }
	      }
	    }

	    // This is the coreference link graph
	    // Each chain stores a set of mentions that link to each other,
	    // along with a method for getting the most representative mention
	    // Both sentence and token offsets start at 1!
	    Map<Integer, CorefChain> graph = 
	      document.get(CorefChainAnnotation.class);
	    System.out.println("Co-ref graph, size: " + graph.size());
	    for(Integer key : graph.keySet()) {
	    	System.out.println(" " + key + ", " + graph.get(key).toString());
	    }
	}
	
}