package net.Stratus.ImageDumper.Sites;

import java.io.File;
import java.util.ArrayList;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import net.Stratus.ImageDumper.Job;
import net.Stratus.ImageDumper.Logger;
import net.Stratus.ImageDumper.Site;
import net.Stratus.ImageDumper.Dumper.Internet;

/**
 * A class for dumping a set from flickr.com
 * 
 * @author Stratus
 */
public class Flickr extends Site {
	
	private static final boolean CAN_COUNT = true;
	private static final String IDENTIFIER = "flickr.com set";
	// for harvesting photo urls
	private static final String GALLERY_KEY_OFFSET = "<span class=\"photo_container pc_s\">";
	private static final String GALLERY_KEY = "href=\"";
	private static final String GALLERY_KEY_END = "\"";
	// for harvesting photos
	private static final String PHOTO_KEY_OFFSET = "<div id=\"allsizes-photo\">";
	private static final String PHOTO_KEY = "<img src=\"";
	private static final String PHOTO_KEY_END = "\">";
	private static final String PHOTO_REGEX = "http://[a-zA-z0-9.]+.flickr.com/photos/[^/]+/[0-9]+/";
	private static final String VALIDATION_REGEX = "http://[a-zA-z0-9.]+.flickr.com/photos/[^/]+/sets/[0-9]+/";
	// http://www.flickr.com/photos/williamfultz/sets/72157608636289131/with/6274440809/
	private Job job;
	private String galUrl;
	private ArrayList<String> photoUrls;
	
	/**
	 * Creates a new flickr dumper
	 * 
	 * @param url
	 */
	public Flickr(Job job) {
		this.job = job;
		job.setSite(this, IDENTIFIER, CAN_COUNT);
		photoUrls = new ArrayList<String>();
	}
	

	@Override
	public void dump() {
		initDump(job);
		if(ioh.isLoaded()) {
			Logger.logln(job.getID(), "IO Handler loaded!");
			// get gallery url
			String url = job.getUrl();
			Pattern p = Pattern.compile(VALIDATION_REGEX);
			Matcher m = p.matcher(url);
			if(m.find()) {
				galUrl = url.substring(m.start(), m.end())+"?page=";
				Logger.logln(job.getID(), "URL of Gallery: "+galUrl);
				harvestGalleries();
				harvestPhotos();
				Logger.logln(job.getID(), "Done");
			}
			//TODO: handle failed jobs/errors
		}
	}
	
	/**
	 * Download all pictures
	 */
	private void harvestPhotos() {
		// for each pic
		for(int i = 0; i < photoUrls.size(); i++) {
			// download page's html
			String resp = Internet.httpRequest(photoUrls.get(i));
			// find src of img tag containing picture
			int offset = resp.indexOf(PHOTO_KEY_OFFSET);
			if(offset != -1) {
				int start = resp.indexOf(PHOTO_KEY, offset)+PHOTO_KEY.length();
				int end = resp.indexOf(PHOTO_KEY_END, start);
				if(start != -1 && end != -1 && start < end) {
					String src = resp.substring(start, end);
					// find file extension, then get File for new file
				    String ext = src.substring(src.lastIndexOf(".")+1);
				    File file = new File(job.getDirectory()+i+"."+ext);
				    if(Internet.urlToFile(src, file)) job.addDownloaded();
				    
				}
			}
		}
	}
	
	/**
	 * Harvests all of the galleries 
	 **/
	private void harvestGalleries() {
		int galID = 1;
		while(harvestGallery(galID)) { galID++; };
		job.setTotal(photoUrls.size());
	}
	
	/**
	 * Harvests all of the photos in a gallery
	 * 
	 * @param galID gallery id to harvest
	 * @return if the gallery could be harvested
	 */
	private boolean harvestGallery(int galID) {
		Logger.log(job.getID(), "Harvesting galID "+galID+"... ");		
		String resp = Internet.httpRequest(galUrl+galID);
		if(resp.contains("<div id=\"photo-display-container\"")) {
			int harvested = 0;
			int last = 0;
			
			Pattern p = Pattern.compile(PHOTO_REGEX);			
			while((last = resp.indexOf(GALLERY_KEY_OFFSET, last)) != -1) {
				last += GALLERY_KEY_OFFSET.length();
				int start = resp.indexOf(GALLERY_KEY, last)+GALLERY_KEY.length();
				int end = resp.indexOf(GALLERY_KEY_END, start);
				try {
					// build the url from fragment
					String url = "http://www.flickr.com"+resp.substring(start, end);
					/* First make sure the url is in right format (with regex).
					 * Then find the url for the image in the largest possible
					 * size (by adding "sizes/o/" to the end).
					 */
					Matcher m = p.matcher(url);
					if(m.find()) {
						url = url.substring(m.start(), m.end())+"sizes/o/";
						photoUrls.add(url);
						harvested++;
					}
				} catch(Exception e) {}
			}
			
			Logger.logln(job.getID(), harvested+" Harvested");
			return true;
		} else {
			Logger.logln(job.getID(), "Failed, ending gallary harvesting");
			return false;
		}
	}
	
	
	/**
	 * Uses validation RegEx to check if url is valid
	 * 
	 * @return url validity
	 */
	public static boolean isValidURL(String url) {
		return validateURL(VALIDATION_REGEX, url);
	}
}
