package ClassifierAndSummaryGenerator;

import java.io.BufferedReader;
import java.io.FileNotFoundException;
import java.io.FileReader;
import java.io.FileWriter;
import java.io.IOException;
import java.io.InputStream;
import java.io.PrintWriter;
import java.net.MalformedURLException;
import java.net.URL;
import java.net.URLConnection;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Set;

import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import javax.xml.parsers.ParserConfigurationException;

import org.w3c.dom.Document;
import org.w3c.dom.Element;
import org.w3c.dom.Node;
import org.w3c.dom.NodeList;
import org.xml.sax.SAXException;

public class ClassifierAndSummaryGenerator {
	
	private static final String yahooURL_1 = "http://boss.yahooapis.com/ysearch/web/v1/";
	private static final String yahooURL_2 = "?appid=";
	private static final String yahooURL_3 = "&format=xml&abstract=long&sites=";
	
	//http://boss.yahooapis.com/ysearch/web/v1/hospital%20medical%20?appid=5Vcli0zV34FI2uCoFMSdfZJJpSJPKdTNSKNdNT6aVgh75p2vJcHc0ni1Q40Ofr10bQ--&format=xml&abstract=long&sites=diabetes.org
	
	private Category root;
	
	private HashMap<String, String>nodeNameToFileMap;
	
	private String yahooID;
	
	private String domain;	
	
	private double sThreshold;

	private int cThreshold;
	
	
	private String FormQuery(ArrayList<String> keywords)
    {
		StringBuilder sb = new StringBuilder(yahooURL_1);
		for(String s : keywords)
		{
			sb.append(s+"%20");
		}
		sb.append(yahooURL_2);
		sb.append(yahooID);
		sb.append(yahooURL_3);
		sb.append(domain);
		String query = sb.toString();
		return query;
    }

	public ClassifierAndSummaryGenerator(String yahooID, double sThreshold, int cThreshold, String domain) 
	{
		super();
		this.yahooID = yahooID;
		this.domain = domain;
		this.nodeNameToFileMap = new HashMap<String, String>();
		this.sThreshold = sThreshold;
		this.cThreshold = cThreshold;
		parseHierarchy("hierarchy.txt");
		this.root = new Category("Root");
	}
	
	private void parseHierarchy(String hierarchyFile)
	{
		try {
			FileReader fileReader = new FileReader(hierarchyFile);
			BufferedReader reader = new BufferedReader(fileReader);			
			String newLine;
			while((newLine = reader.readLine()) != null)
			{
			
				String[] substrings = newLine.split("#");
				if(substrings.length == 2)
				{
					//System.out.println("One line: "+substrings[0]+"  "+substrings[1]);
					nodeNameToFileMap.put(substrings[0], substrings[1]);
				}
			}				
		} catch (FileNotFoundException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		} catch (IOException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}
	}
	
	
	private HashSet<Category> CrawlNode(Category node, double sThreshold, double cThreshold, double parentSpecificity)
	{
		
		HashSet<Category> result = new HashSet<Category>();
		
		if(node.isLeaf())
		{
			result.add(node);
			return result;
		}
		
		try {
			FileReader fileReader = new FileReader(node.file);
			//e.g. node "Computer" has file computer.txt with its children's names and the query wordss
			BufferedReader reader = new BufferedReader(fileReader);			
			String newLine;
			while((newLine = reader.readLine()) != null)
			{
				String[] substrings = newLine.split(" ");
				if(!node.childNameToChildNodeMap.containsKey(substrings[0]))
				{
					node.childNameToChildNodeMap.put((substrings[0]), new Category(substrings[0]));
				}				
				String queryURL = yahooURL_1;
				for(int i=1; i<substrings.length; i++)
				{
					queryURL += "%20";
					queryURL += substrings[i];
				}
				queryURL+=yahooURL_2;
				queryURL+=yahooID;
				queryURL+=yahooURL_3;
				queryURL+=domain;
				// get the hit of search results. getHitsAndArticlesByURL also calls getWordsLynx to parse the pages and put them into articles.				
				int count = getHitsAndArticlesByURL(node, queryURL);						
				node.childNodeToCountMap.increaseCount(node.childNameToChildNodeMap.get(substrings[0]), count);			
			}					
		} catch (FileNotFoundException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		} catch (IOException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}
		
		
		// Now we have worked out all the queries, make summary and goes down to lower level of hierarchy.
			int totalCount = 0;
			for(Category child :node.childNodeToCountMap.keySet())
			{
				totalCount += node.childNodeToCountMap.get(child);
			}
			//System.out.println("totalCount "+totalCount);
			
			
			for(Category child :node.childNodeToCountMap.keySet())
			{
//				System.out.println("Child Name "+child.name+"\t"+"count: "+node.childNodeToCountMap.get(child));
				double specificity = (double)node.childNodeToCountMap.get(child)/totalCount * parentSpecificity;
				int coverage = node.childNodeToCountMap.get(child);
				System.out.println("Specificity for category: " + child.name + " is "+specificity);
				System.out.println("Coverage for category: "+ child.name + " is "+ coverage);
				if(specificity > sThreshold && coverage > cThreshold)
				{
					result.add(node);
					result.addAll(CrawlNode(child, sThreshold, cThreshold, specificity));
				}
			}
			// Terminate here
			if(result.isEmpty())
			{
				result.add(node);
				return result;
			}
			else
				return result;
	}
	/**
	 * 
	 * @param nodes: the set of categories that a certain website belongs to. e.g. the set "nodes" of java.sun.com 
	 * contains "root", "computer" and "programming"
	 * @param node: the category that is currently constructing summary
	 */
	
	private void extractSummary(HashSet<Category> nodes, Category node)
	{
		//if it is a leaf like "programming", no need to construct summary
		if(node.isLeaf())
			return;
		//if it has child, extract summary for the child first and add the child's articles to the parent's articles
		for(Category child : node.childNodeToCountMap.keySet())
		{
			if(nodes.contains(child))
			{
				extractSummary(nodes, child);
				node.articles.addAll(child.articles);
			}
		}
		// wordToCountMap maps each word to its doc frequency
		// since wordToCountMap is a TreeMap, it also sorts the words automatically
		SortedCountMap<String> wordToCountMap = new SortedCountMap<String>();
		for(Article article: node.articles)
			for(String word: article.words)
			{
				wordToCountMap.increment(word);
			}

		StringBuilder sb = new StringBuilder();
		sb.append(node.name);
		sb.append("-");
		sb.append(this.domain);
		sb.append(".txt");
		String fileToWrite = sb.toString();
		try {
			PrintWriter pw = new PrintWriter(new FileWriter(fileToWrite));
			for(String word: wordToCountMap.keySet())
			{
				pw.println(word+"#"+wordToCountMap.get(word));
			}
			pw.close();			
		} catch (IOException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}
	}

	private void printClassficationResult(HashSet<Category> nodes){
		ArrayList<String> results = new ArrayList<String>();
		int hasChild = 0;
		for(Category tempnode : nodes){
			if(root.childNodeToCountMap.keySet().contains(tempnode)){
				hasChild = 1;
				String result = "Root/" + tempnode.name + "/";
				int hasGrandchild = 0;
				for(Category tempnode2 : nodes){
					if(tempnode.childNodeToCountMap.keySet().contains(tempnode2)){
						hasGrandchild = 1;
						String result1 = result + tempnode2.name;
						results.add(result1);
					}
				}
				if(hasGrandchild == 0){
					results.add(result);
				}
			}
		}
		if(hasChild == 0){
			String result = "Root"; 
			results.add(result);
		}
		for (String res: results)
			System.out.println(res);
		
		System.out.println();
	}

	private void run()
	{
		
		System.out.println("Classifying...");
		HashSet<Category> nodes = this.CrawlNode(root, sThreshold, cThreshold, 1);
		for(Category node: nodes)
		{
			System.out.println("Creating Content Summary for: "+node.name);
			//System.out.println(node.articles.size());
			for(Article article: node.articles)
			{
				//actually we have got the pages and parsed them in CrawlNode, here just output the URLs
				System.out.println("Getting page: "+article.URL);
			}
			System.out.println();
			
		}
		System.out.println("Classification:");
		printClassficationResult(nodes);
		System.out.println("Extracting topic content summaries...");
		extractSummary(nodes, root);
		
	}
	
	//sh /home/gravano/6111/Html/Proj2/bin/project2.sh diabetes.org 0.6 100 5Vcli0zV34FI2uCoFMSdfZJJpSJPKdTNSKNdNT6aVgh75p2vJcHc0ni1Q40Ofr10bQ--

	
	
	
	/**
	 * get totalhits from Yahoo. This program also calls getWordsLynx.runLynx to parse the top 4 pages and stores the results
	 * @param url: the query URL generated and sent to Yahoo Search API
	 * @return: the number of hits
	 */
	private int getHitsAndArticlesByURL(Category node, String url)
	{
		DocumentBuilderFactory domfac = DocumentBuilderFactory.newInstance();
		int NumRes = 0;
		String totalHits;
		try{
			URL queryurl = new URL(url);
			URLConnection connection = queryurl.openConnection();
			connection.setDoInput(true);
			InputStream inStream = connection.getInputStream();
			DocumentBuilder dombuilder = domfac.newDocumentBuilder();
			Document doc = dombuilder.parse(inStream);
			Element root = doc.getDocumentElement();
			NodeList resultset_web = root.getElementsByTagName("resultset_web");
			Node resultset = resultset_web.item(0);
			
			totalHits = resultset.getAttributes().getNamedItem("totalhits").getNodeValue();
			NumRes = Integer.parseInt(totalHits);
			
			int count = 0;
			NodeList results = resultset.getChildNodes();  
			if(results != null)
			{			
				for(int i = 0 ; i < results.getLength(); i++){
					Node result = results.item(i);								
					Article newArticle = new Article();
					
					if(result.getNodeType()==Node.ELEMENT_NODE){
						
						for(Node node1 = result.getFirstChild(); node1 != null; node1 = node1.getNextSibling()){
							if(node1.getNodeType()==Node.ELEMENT_NODE){														
								if(node1.getNodeName().equals("url") && node1.getFirstChild() != null && count < 4){
									count++;
									String pageUrl = node1.getFirstChild().getNodeValue();
									Article article = new Article();
									article.URL = pageUrl;
									article.words = getWordsLynx.runLynx(pageUrl);					
									node.articles.add(article);
								}
							}
						}
					}	
				}
			}
			
		}catch(ParserConfigurationException e){
			e.printStackTrace();
			return -1;
		}catch(SAXException e){
			e.printStackTrace();
			return -1;
		}catch(IOException e){
			e.printStackTrace();
			return -1;
		}

		return NumRes;
	}
	
	
	public class Article {
		protected String URL;
		Set<String>words;
		@Override
		public int hashCode() {
			final int prime = 31;
			int result = 1;
			result = prime * result + getOuterType().hashCode();
			result = prime * result + ((URL == null) ? 0 : URL.hashCode());
			return result;
		}
		@Override
		public boolean equals(Object obj) {
			if (this == obj)
				return true;
			if (obj == null)
				return false;
			if (getClass() != obj.getClass())
				return false;
			Article other = (Article) obj;
			if (!getOuterType().equals(other.getOuterType()))
				return false;
			if (URL == null) {
				if (other.URL != null)
					return false;
			} else if (!URL.equals(other.URL))
				return false;
			return true;
		}
		private ClassifierAndSummaryGenerator getOuterType() {
			return ClassifierAndSummaryGenerator.this;
		}
		

	}
	

	/**
	 * @author Yang Ye
	 * Inner class used to denote a node in the category hierarchy tree
	 */
	public class Category {
		
		HashSet<Article>articles;
		
		/**
		 * Map from a child Node (Computer, Health or Sports) to its count
		 */
		protected CountMap<Category> childNodeToCountMap;
		
		public String name;   
		public String file;
		
		// rootNode  {"Computer : computerNode", "Health : healthNode"}
		public HashMap<String, Category> childNameToChildNodeMap;
		
				
		@Override
		public int hashCode() {
			final int prime = 31;
			int result = 1;
			result = prime * result + getOuterType().hashCode();
			result = prime * result + ((name == null) ? 0 : name.hashCode());
			return result;
		}


		@Override
		public boolean equals(Object obj) {
			if (this == obj)
				return true;
			if (obj == null)
				return false;
			if (getClass() != obj.getClass())
				return false;
			Category other = (Category) obj;
			if (!getOuterType().equals(other.getOuterType()))
				return false;
			if (name == null) {
				if (other.name != null)
					return false;
			} else if (!name.equals(other.name))
				return false;
			return true;
		}


		public boolean isLeaf()
		{
			return file.equals("NONE");
		}

		
		public Category(String name) {
			this.name = name;
			this.childNameToChildNodeMap = new HashMap<String, Category>();
			this.file = nodeNameToFileMap.get(name);
			this.articles = new HashSet<Article>();
			this.childNodeToCountMap = new CountMap<Category>();
		}


		private ClassifierAndSummaryGenerator getOuterType() {
			return ClassifierAndSummaryGenerator.this;
		}
		
	}
	
	
	public static void main(String[] args)
	{
		if(args.length!=4)
		{
			System.out.println("Usage: ClassifierAndSummaryGenerator <database> <specificity threshold> <coverage thershold> <Yahoo ID>");

		}else{
			String database = args[0];
			double sThres = Double.parseDouble(args[1]);
			int cThres = Integer.parseInt(args[2]);
			String YahooID = args[3];
			ClassifierAndSummaryGenerator QProber = new ClassifierAndSummaryGenerator(YahooID, sThres, cThres, database);

			QProber.run();
		}
		
	}
	
	
}


