package main;


import java.io.BufferedInputStream;
import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URISyntaxException;
import java.net.URL;
import java.util.Collections;
import java.util.LinkedList;

import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import javax.xml.parsers.ParserConfigurationException;

import org.w3c.dom.Document;
import org.w3c.dom.Node;
import org.w3c.dom.NodeList;
import org.xml.sax.SAXException;

/**
 * Asynchronous RSS reader, that downloads and structures data from the CNN RSS feed
 *
 * @author Joonas Vali
 * 
 */
public class RSSreader{	
		
	private LinkedList<News> news = new LinkedList<News>();
	public boolean newContent = false;
	URL url; 
	
	/**
	 * new RSSreader, which takes the data from the provided source. Data will be instantly downloaded after
	 * instancing. RSSreader is asynchronous and will be acting separately from the main thread.
	 * @param newsType
	 */
	public RSSreader(NewsType newsType) {
		try {
			url = new URL(newsType.getLink());
		}
		catch (MalformedURLException e) {	}
		
		Thread thread = new Thread(){
			public static final int SLEEP_TIME = 6000000; // wait 10 minutes before refresh			
			public void run(){
				while(true){			
					try {
						news.clear();
						readData();
						Thread.sleep(SLEEP_TIME);
					}
					catch (InterruptedException e) {			
						e.printStackTrace();
					}
				}
			}
		};
		
		thread.setDaemon(true);		
		thread.start();
	}	
	
	
	
	/**
	 * Reads data from the selected CNN news stream, which will be made available through getNews() method.
	 */
	private void readData(){		
		DocumentBuilder db;
		Document doc;
		try{
			BufferedInputStream in = new BufferedInputStream(url.openStream());
			DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
			db = dbf.newDocumentBuilder();
			doc = db.parse(in);
			in.close();
		} catch(IOException e){
			e.printStackTrace();
			return; 
		} catch (ParserConfigurationException e) {			
			e.printStackTrace();
			return;
		} catch (SAXException e) {			
			e.printStackTrace();
			return;
		}
		
		doc.getDocumentElement().normalize();				
		NodeList itemList = doc.getElementsByTagName("item");		
		news.clear();
		for(int itemNum =0; itemNum < itemList.getLength(); itemNum++){
			Node item = itemList.item(itemNum);
			NodeList contents = item.getChildNodes();
			String description =null, link=null, title=null;
			for(int contentNum = 0; contentNum < contents.getLength(); contentNum++){				
				Node content = contents.item(contentNum);				
				if(content.getNodeType()!=Node.ELEMENT_NODE) continue; //Sometimes tabulations and white spaces are considered as a data.
				/* using if statements to recognize the data. It's important not to expect the data arriving in any certain order in case the source
				 ever changes it's output order or adds new data between. Although this approach is eating more resources, we shouldn't be too 
				 concerned, because the amount of data processed isn't very big.  */ 
				
				if("title".equalsIgnoreCase(content.getNodeName())){					
					title = content.getTextContent();							
				} 
				if("description".equalsIgnoreCase(content.getNodeName())){					
					description = content.getTextContent().split("<div")[0]; // Some RSS contains "feedflare" in description, which shouldn't be included here.
				} 
				if("link".equalsIgnoreCase(content.getNodeName())){					
					link = content.getTextContent();
				}				
			}	
			
			try {
				this.news.add(new News(title, link, description));
			} catch (URISyntaxException e) {				
				System.err.println("error: "+e);
			} catch (NullPointerException e){
				System.err.println("error: "+e);
			}
		}		
		Collections.shuffle(this.news);
		newContent = true;
	}	
	
	/**
	 * Get the downloaded news
	 * @return the list of news
	 */
	public LinkedList<News> getNews(){
		return news;
	}
	
}
