/**
 * FileFetcher.java
 * 
 * FreeZzaph is free software; you can redistribute it
 * and/or modify it under the terms of the GNU General Public License as
 * published by the Free Software Foundation; either version 3 of
 * the License, or (at your option) any later version.
 *
 * FreeZzaph is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; see the file COPYING.
 */
package freezzaph;

import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.PrintStream;
import java.net.URL;
import java.text.NumberFormat;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.regex.PatternSyntaxException;

import freezzaph.exceptions.URLFetchException;
import freezzaph.plugins.URLFetcher;

/**
 * Class for fetching URLs and saving them to file.
 * 
 * @author FreeZzaph
 */
final class FileFetcher {
	
	/**
	 * Fetches URLs that match the given filter from the given URLs
	 * and stores them in the location given by the location parameter.
	 * 
	 * @param urls the URLs in which to look for files
	 * @param filter the kind of files to look for
	 * @param location where to save the found files
	 * @throws PatternSyntaxException if the given filter is not a
	 * valid regular expression
	 */
	public static void fetch(URL[] urls, String filter, String location)
	throws PatternSyntaxException {
		PrintStream o = System.out;
		
		// If the desired storage location does not exist, create it.
		File folder = new File(location);
		try {
			if (!folder.exists()) {
				folder.mkdirs();
			}
		} catch (SecurityException se) {
			try {
				o.printf("Error: Could not access/create " + folder.getCanonicalPath());
			} catch (IOException e) {
				o.printf("Error: Could not access/create " + location);
			} catch (SecurityException se2) {
				o.printf("Error: Could not access/create " + location);
			}
			return;
		}
		
		ArrayList<URL[]> urlList = new ArrayList<URL[]>();
		HashMap<String, Object> blackList = new HashMap<String, Object>();
		FileURLFetcher fuf = new FileURLFetcher(filter);
		
		int linkCount = 0, fileNumber = 0;
		HashMap<String, Object> duplicatePreventor = new HashMap<String, Object>(linkCount);
		
		// For each URL given...
		int urlCount = urls.length, currentUrl = 1;
		for (URL url : urls) {
			
			// Get hostname part of URL
			String host = url.getAuthority();
			
			// ...if the hostname of the URL
			// isn't blacklisted...
			if (!blackList.containsKey(host)) {
				
				// ...and it's not a duplicate of a
				// previous URL...
				if (!duplicatePreventor.containsKey(url.toString())) {
					o.printf("Gathering URLs from %s (%d/%d)...", url.toString(), currentUrl,urlCount);
					duplicatePreventor.put(url.toString(), null);
					try {
						// Get URLs that match the given filter
						URL[] fileURLs = fuf.getURLsFrom(url);
						o.printf("found %d.\n", fileURLs.length);
						linkCount += fileURLs.length;
						urlList.add(fileURLs);
						currentUrl++;
					} catch (URLFetchException urlfe) {
						o.printf("failed! Blacklisting address.\n");
						blackList.put(host, null);
						urlCount--;
					}
				} else {
					o.printf("Skipping duplicate address. (%d/%d)\n", currentUrl, --urlCount);
				}
			} else {
				o.printf("Skipping blacklisted address. (%d/%d)\n", currentUrl, --urlCount);
			}
		}
		o.printf("Found a total of %d files to download.\n\n", linkCount);
		
		// For formatting the file numbering
		NumberFormat nf = NumberFormat.getInstance();
		nf.setGroupingUsed(false);
		nf.setMinimumIntegerDigits((linkCount + "").length());
		
		// For avoiding duplicate file downloads
		duplicatePreventor.clear();
		for (URL[] fileURLs : urlList) {
			for (URL url : fileURLs) {
				o.printf("Downloading %s (%d/%d)...", url.toString(), fileNumber + 1, linkCount);
				if (!duplicatePreventor.containsKey(url.toString())) {
					duplicatePreventor.put(url.toString(), null);
					
					// If not a duplicate file, fetch the byte data of the URL
					ByteArrayOutputStream baos;
					try {
						baos = URLFetcher.fetchBytes(url);
						o.printf("done.\n");
					} catch (URLFetchException e) {
						o.printf("failed!\n");
						linkCount--;
						continue;
					}
					
					// Create the filename
					String fileNum = nf.format(fileNumber++);
					String urlName = url.getPath();
					String ext = urlName.substring(urlName.lastIndexOf('.')+1, urlName.length());
					File fileName = new File(folder, "fzz" + fileNum + "." + ext);
					
					// Write the bytes to file
					try {
						FileOutputStream fos = new FileOutputStream(fileName);
						baos.writeTo(fos);
						fos.close();
					} catch (IOException ioe) {
						o.printf("Error: Failed writing to file '%s'!\n", fileName);
					} catch (SecurityException se) {
						o.printf("Error: Access denied to file '%s'!\n", fileName);
					}
				} else {
					o.printf("duplicate! Skipping.\n");
					linkCount--;
				}
			}
		}
	}
}
