package core;

import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.io.Serializable;
import java.io.StringReader;
import java.net.MalformedURLException;
import java.net.URL;
import java.security.NoSuchAlgorithmException;
import java.security.NoSuchProviderException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Date;
import java.util.List;
import java.util.ListIterator;
import java.util.Set;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import javax.xml.transform.Transformer;
import javax.xml.transform.TransformerException;
import javax.xml.transform.TransformerFactory;
import javax.xml.transform.dom.DOMSource;
import javax.xml.transform.stream.StreamResult;

import org.bouncycastle.openpgp.PGPException;
import org.bouncycastle.openpgp.PGPPublicKey;
import org.bouncycastle.openpgp.PGPSecretKey;
import org.w3c.dom.Document;
import org.w3c.dom.Node;
import org.w3c.dom.NodeList;
import org.xml.sax.InputSource;

import p2pmodule.Item;
import p2pmodule.P2PModule;

import com.sun.syndication.feed.synd.SyndContentImpl;
import com.sun.syndication.feed.synd.SyndEntryImpl;
import com.sun.syndication.feed.synd.SyndFeed;
import com.sun.syndication.io.FeedException;
import com.sun.syndication.io.SyndFeedInput;

import dustconf.DustConf;
import dustdispatcher.DispatchableTask;
import dustdispatcher.DustDispatcher;
import dustlog.LogController;
import events.AddedPublicKeyEvent;
import events.AddedPublicKeyListener;
import events.AddedSourceEvent;
import events.AddedSourceListener;
import events.FileDownloadedEvent;
import events.FileDownloadedListener;
import events.FileFoundEvent;
import events.FileFoundListener;
import events.UpdateCompleteEvent;
import events.UpdateCompleteListener;

/**
 * TODO: IMHO this class (the core of Dust in fact) needs to be designed again.
 * There are so much things that I think make this code so ugly, hard to
 * maintain and obscure to understand that it could be much better for Dust to
 * do it again with no hurries.
 */
/**
 * Class that represents a Blog.
 * 
 * @author ole
 */
public class Blog implements Serializable {
	// Class's constants.
	private static final long serialVersionUID = 1;
	public static final String BLOGEXT = ".xml";
	public static final String BLOGDUSTEXT = ".xml.dust";
	private static final long TIME2WAIT4UPDATE = 1 * 1000;
	private static final String NOAUTHOR_BLOG = "Post with no author.";
	
	// Class's attributes.
	private String title = null;
	private String owner = null;
	private List<URL> urls = null;
	
	// BEWARE!: This attribute should be always accessed in synchronized
	// blocks, many threads access and modify it so it is always created
	// wrapping it with Collections.synchronizedList() and the API specifies
	// that "It is imperative that the user manually synchronize on the
	// returned list when iterating over it". So if you plan to extend Dust
	// take specially care with this list.
	private List<Post> posts = null;
	
	// BEWARE!: This attribute should be always accessed using the synchronized
	// methods getContent() and setContent().
	private String feed_content;
	private Update_type last_update_type = Update_type.NO_UPDATE;
	private String channel = null;
	private List<String> pub_key_paths = null;
	private transient List<PGPPublicKey> pub_keys = null;
	private SyndFeed actual_feed = null;
	private int threads_updating = 0;
	private transient List<AddedPublicKeyListener> apk_listeners =
		new ArrayList<AddedPublicKeyListener>();
	private transient List<AddedSourceListener> as_listeners =
		new ArrayList<AddedSourceListener>();
	private transient List<UpdateCompleteListener> uc_listeners =
		new ArrayList<UpdateCompleteListener>();
	
	// Types of updates of a Blog.
	public static enum Update_type {
		NO_UPDATE,
		HTTP,
		P2P
	}
	
	/*
	 * Private classes.
	 */
	
	/**
	 * This class are slaves created for the main thread executing update().
	 * 
	 * These slaves search a RSS where they are told to search (an url or in
	 * the P2P network) and retrieve the SyndFeed of the RSS found or if not
	 * found nothing they just finish.
	 */
	private class UpdateSlave extends DispatchableTask {
		// Class attributes.
		private URL url = null;
		private PGPPublicKey pub_key = null;
		private List<UpdateCompleteListener> callbacks;
		
		/*
		 * Private classes of UpdateSlave.
		 */
		
		/**
		 * This class do all the work needed to do when an image is downloaded. It
		 * checks that the file just downloaded is correctly signed.
		 */
		private class WorkWhenImgDownloaded implements FileDownloadedListener {
			private PGPPublicKey key;
			
			public WorkWhenImgDownloaded(PGPPublicKey key) {
				this.key = key;
			}
			
			@Override
			public void handleEvent(FileDownloadedEvent event) {
				String fname, abs_fname, img_hash, result_name;
				File fd;
				FileInputStream fis;
				int start, end;
				
				fname = Util.checkDirectoryPath(DustConf.getTempFolder()) +
						event.getFileName();
				
				// Log the action.
				LogController.log("Image " + fname + " downloaded.");
				
				/** TODO: Horribly coded, improve this OMG! **/
				try {
					fis = new FileInputStream(fname);
				} catch (FileNotFoundException e) {
					e.printStackTrace();
					return;
				}
				fd = new File(fname);
				result_name = Util.checkDirectoryPath(DustConf.getTempFolder());
				result_name += fd.getName();
				if (result_name.indexOf(".asc") > 0)
					result_name = result_name.substring(0, result_name.indexOf(".asc"));
				
				// Check that the image is correctly signed.
//				if (Util.deDusterize(fname, DustConf.getTempFolder(), key)) {
				try {
					if (KeyUtils.verifyFile(fis, key, result_name)) {
						// Log the action.
						LogController.log("Image " + fname + " well-signed.");
						
						// Move the dust file to the shared folder since it is
						// well-signed, so we start sharing it.
						if (!Util.moveFile(fname, DustConf.getSharedFolder()))
							System.err.println("[WARN]: Couldn't move file " +
											   fname + " to the shared " +
											   "directory.");
						
						// Get the absolute path to the image.
						fd = new File(fname);
						abs_fname = Util.checkDirectoryPath(DustConf.getTempFolder());
						abs_fname += fd.getName().replace(".asc", "");
						fd = new File(abs_fname);
						abs_fname = fd.getAbsolutePath();
						
						// Modify the posts of the Blog to make them reference to
						// the image.
						start = abs_fname.lastIndexOf(System.getProperty("file.separator"));
						end = abs_fname.indexOf(Util.EXT_SEPARATOR);
						img_hash = abs_fname.substring(start + 1, end);
						synchronized(posts) {
							for (Post post: posts)
								post.modifyImgSrcFromAlt(img_hash, abs_fname);
						}
					}
					else
						// Log the action.
						LogController.log("Image " + fname + " BAD-SIGNED!!!");
				} catch (Exception e) {
					e.printStackTrace();
					return;
				}
			}
		}
		
		/**
		 * This class handles when an Image is found in the P2P network.
		 */
		private class ImgFoundListener implements FileFoundListener {
			private PGPPublicKey key;
			
			public ImgFoundListener(PGPPublicKey key) {
				this.key = key;
			}
			
			@Override
			public void handleEvent(FileFoundEvent event) {
				Item item_found;
				WorkWhenImgDownloaded after_download_work;
					
				// Get the item found.
				item_found = event.getItemFound();
				
				// Check if we have already this item.
				if (fileAlreadyDownloaded(item_found.getName(),
										  DustConf.getSharedFolder()))
					return;
				
				// Log the action.
				LogController.log("Image " + item_found.getName() + " found.");
				
				// Prepare the handler for when the image is downloaded.
				after_download_work = new WorkWhenImgDownloaded(key);
				
				// Download the image.
				P2PModule.downloadItem(item_found, after_download_work);
			}
		}
		
		/**
		 * This class implements the callbacks needed to be called when an
		 * UpdateSlave has finished of download a presumably feed from the P2P
		 * network. Here we will do a couple of things:
		 * 	- The file already downloaded is checked to confirm that it is
		 * 	  correctly signed.
		 *  - It is checked that the file is a valid RSS.
		 *  - The images it is referencing are searched and downloaded from the
		 *    P2P network.
		 *  - The RSS content is modified to make images be shown from the
		 *    images in disk.
		 *  - The Blog is updated with this new RSS.
		 */
		private class WorkWhenFeedDownloaded implements FileDownloadedListener {
			private PGPPublicKey key;
			
			public WorkWhenFeedDownloaded(PGPPublicKey key) {
				this.key = key;
			}
			
			@Override
			public void handleEvent(FileDownloadedEvent event) {
				String fname, content, result_name;
				File fd;
				FileInputStream fis;
				Set<String> img_list;
				ImgFoundListener img_found;
				UpdateCompleteEvent update_event;
				List<Post> posts_added;
				byte byte_content[];
				int len;
				
				fname = Util.checkDirectoryPath(DustConf.getTempFolder()) +
						event.getFileName();
				
				// Log the action.
				LogController.log("File " + fname + " downloaded.");
				
				/** TODO: Horribly coded. Do it right, do it nice. **/
				try {
					fis = new FileInputStream(fname);
				} catch (FileNotFoundException e) {
					e.printStackTrace();
					return;
				}
				fd = new File(fname);
				result_name = Util.checkDirectoryPath(DustConf.getTempFolder());
				result_name += fd.getName();
				if (result_name.indexOf(".asc") > 0)
					result_name = result_name.substring(0, result_name.indexOf(".asc"));
				
				// Check that the image is correctly signed.
//				if (Util.deDusterize(fname, DustConf.getTempFolder(), key)) {
				try {
					if (KeyUtils.verifyFile(fis, key, result_name)) {
						// Log the action.
						LogController.log("File " + fname + " is well-signed.");
						
						// Move the dust file to the shared folder since it is
						// well-signed, so we start sharing it.
						if (!Util.moveFile(fname, DustConf.getSharedFolder()))
							System.err.println("[WARN]: Couldn't move file " +
											   fname + " to the shared " +
											   "directory.");
						
						fd = new File(result_name);
						try {
							fis = new FileInputStream(fd);
							len = (int)fd.length();
							byte_content = Util.recoverDatafromInputStream(fis,
																		   len);
							content = new String(byte_content);
							posts_added = Blog.this.internal_update(content,
																	Update_type.P2P);
							
							// There are new posts.
							if (posts_added.size() > 0) {
								img_list = Util.getImagesFromFeedFile(result_name,
																	  "GNUTELLA");
								
								// Search and download all the images from the P2P
								// network.
								for (String img_name: img_list) {
									// Log the action.
									LogController.log("Searching image " +
													  img_name + ".");
										
										img_found = new ImgFoundListener(key);
										P2PModule.searchInName(img_name,
															   img_found);
									}
									
								// Raise the event if the Blog were really updated.
								if (posts_added.size() > 0) {
									update_event = new UpdateCompleteEvent(Blog.this);
									update_event.addNewPosts(posts_added);
									for (UpdateCompleteListener callback: UpdateSlave.this.callbacks)
										callback.handleEvent(update_event);
								}
							}
						}	
						catch (Exception e) {
							e.printStackTrace();
						}
					}
					// The downloaded file is not well-signed.
					else
						// Log the action.
						LogController.log("File " + fname + " BAD-SIGNED!!!");
				} catch (Exception e) {
					e.printStackTrace();
					return;
				}
			}
		}
		
		/**
		 * This class implements the callbacks needed to be called when an
		 * UpdateSlave finds something in the P2P network.
		 */
		private class RSSFoundListener implements FileFoundListener {
			private PGPPublicKey key;
			
			public RSSFoundListener(PGPPublicKey key) {
				this.key = key;
			}		
	
			@Override
			public void handleEvent(FileFoundEvent event) {
				Item item_found;
				WorkWhenFeedDownloaded after_download_work;
				String escaped_channel;
					
				// Get the item found.
				item_found = event.getItemFound();
				
				// Check that the item found is for this channel. Not every
				// item with the same hash found is what we are looking for,
				// the same private key can sign different feeds, but we will
				// just download those items that has the channel of this Blog
				// in its name. This is the reason why to subscribe to a Blog
				// via P2P just the public key is not enough but also the
				// channel name that its author gives to it.
				escaped_channel = Util.cleanString(Blog.this.channel); 
				if (!item_found.getName().contains(escaped_channel))
					return;
				
				// Check if we already have this item.
				if (fileAlreadyDownloaded(item_found.getName(),
										  DustConf.getSharedFolder()))
					return;
				
				// Log the action.
				LogController.log("RSS " + item_found.getName() + " has " +
								  "been found.");
				
				// Prepare the handler for when the feed is download.
				after_download_work = new WorkWhenFeedDownloaded(key);
				
				// Download the feed.
				P2PModule.downloadItem(item_found, after_download_work);
			}
		}
		
		/*
		 * Private methods of class UpdateSlave.
		 */
		
		/**
		 * Checks if a file with the same name already exists in the directory
		 * passed. Its purpose is avoid download files we already have.
		 * 
		 * @param fname:
		 * 		The filename of the file we are checking for.
		 * @param dir:
		 * 		The directory name where we will look for the file.
		 * @return:
		 * 		True if the file exists in that directory, false elsewhere or
		 * 		if dir wasn't exists or wasn't a directory.
		 */
		private boolean fileAlreadyDownloaded(String fname, String dir) {
			File fd_file, fd_dir;
			String checked_path;
			
			checked_path = Util.checkDirectoryPath(dir);
			
			// Check that the directory exists, it is a directory indeed and
			// the file also exists.
			fd_dir = new File(checked_path);
			fd_file = new File(checked_path + fname);
			if (!fd_dir.exists() ||
				!fd_dir.isDirectory() ||
				!fd_file.exists())
				return false;
			
			// All conditions meet.
			return true;
		}
		
		/**
		 * Try to update via P2P.
		 */
		private void updateViaP2P() {
			String hash_in_name;
			RSSFoundListener downloadRSS;
			
			// Prepare for a P2P update, get the hash of the public key of
			// this Publisher and the callback to be execute if something is
			// found.
			hash_in_name = Util.keyHash(pub_key);
			downloadRSS = new RSSFoundListener(pub_key);
			
			// Log the action.
			LogController.log("Trying to update " + Blog.this.channel +
					  		  " via P2P, searching for " + hash_in_name + ".");
			
			// Perform the search.
			P2PModule.searchInName(hash_in_name, downloadRSS);
		}
		
		/**
		 * Try to update via HTTP.
		 */
		private void updateViaURL() {
			String feed_content;
			UpdateCompleteEvent event;
			List<Post> posts_added;
			
			// Log the action.
			LogController.log("Trying to update " + Blog.this.channel +
							  " via HTTP (" + url.toString() + ").");
			
			feed_content = null;
			try {
				// Retrieve the content from the URL.
				feed_content = Util.getURLContent(url);
			}
			catch (Exception e) {
				e.printStackTrace();
			}
			
			// Log the action.
			if (feed_content != null)
				LogController.log("Content retrieved from " + url.toString());
			else
				LogController.log("Cannot retrieve content from " +
								  url.toString());
			
			if (feed_content != null) {
				try {
					// Update the Blog with that content.
					posts_added = internal_update(feed_content,
												  Update_type.HTTP);
				
					// Raise the event if the Blog were really updated.
					if (posts_added.size() > 0) {
						event = new UpdateCompleteEvent(Blog.this);
						event.addNewPosts(posts_added);
						for (UpdateCompleteListener callback: this.callbacks)
							callback.handleEvent(event);
					}
				}
				catch (Exception e) {
					e.printStackTrace();
				}
			}
		}
		
		/*
		 * Public methods of class UpdateSlave.
		 */
		
		/**
		 * Class constructor for slaves that search in HTTP urls.
		 * 
		 * @param url:
		 * 		The URL where the slave must search.	
		 * @param queue:
		 * 		The queue where the slave must put the SyndFeed it found.
		 */
		public UpdateSlave(URL url) {
			this.url = url;
			this.callbacks = new ArrayList<UpdateCompleteListener>();
			this.setName("Update slave with url");
		}
		
		/**
		 * Class constructor for slaves that search in the P2P network.
		 * 
		 * @param pub:
		 * 		The public key this slave will use to search in the P2P
		 * 		networks.	
		 * @param queue:
		 * 		The queue where the slave must put the SyndFeed it found.
		 */
		public UpdateSlave(PGPPublicKey pub_key) {
			this.pub_key = pub_key;
			this.callbacks = new ArrayList<UpdateCompleteListener>();
			this.setName("Update slave with PublicKey");
		}
		
		public void addUpdateCompleteListener(UpdateCompleteListener handler) {
			callbacks.add(handler);
		}

		@Override
		public void run() {
			// Update depending in what we have.
			if (url != null)
				updateViaURL();
			else
				updateViaP2P();
			
			/** TODO: Debug here, setFinishUpdate() cannot be called here.
			 * Since updateViaP2P() is not blocking the thread executing this
			 * will probably execute it before threads doing all the work of
			 * a P2P update finish. **/
			// This thread has finish to do its job.
			setFinishUpdate();
			this.finalize();
		}
	}
	
	/*
	 * Private methods.
	 */
	
	/**
	 * Builds a SyndFeed from the content passed.
	 * 
	 * @param content:
	 * 		An string with the feed content.
	 * @return:
	 * 		The SyndFeed built from that content. Null if there where an
	 * 		error.
	 */
	private static SyndFeed createFeedFromContent(String content) {
		SyndFeedInput feed_input;
		StringReader feed_reader;
		SyndFeed feed;
		
		try {
			feed_reader = new StringReader(content);
			feed_input = new SyndFeedInput();
			feed = feed_input.build(feed_reader);
		}
		catch (Exception e) {
			System.err.println("[WARN]: Content wasn't RSS compliant.");
			e.printStackTrace();
			return null;
		}
		
		return feed;
	}
	
	/**
	 * Function that creates a suitable names of the form
	 * "<channel>-<timestamp>-<hash(Key)>".
	 * 
	 * @param key
	 * 		The key needed to calculate the hash.
	 * @throws IOException 
	 * @throws NoSuchProviderException 
	 * @throws NoSuchAlgorithmException 
	 */
	private String createName(PGPPublicKey key) throws NoSuchAlgorithmException,
	NoSuchProviderException, IOException {
		String name;
		
		name = Util.cleanString(channel);
		name += Util.NAME_SEPARATOR + Util.getStrTimeStamp();
		name += Util.NAME_SEPARATOR + Util.getHashFromKey(key);
		return name;
	}
	
	/**
	 * Check if exists a file containing the specified hash in his name.
	 * It's done to try to don't download twice the same image if it's
	 * used in more than one post. It should be noticed that we can't
	 * look for a specific name because we generate image's names from their 
	 * hash and extension, and to know the extension we need to download the 
	 * image, so we look only for files containing the hash in its name and 
	 * assume it's the same image but we cannot assure this at 100%.
	 * 
	 * @param path
	 * 		Path to look for if the image is already downloaded. 
	 * @param hashname
	 * 		Hash to look for in the name of the files inside the specified
	 * 		path. 
	 * @return
	 * 		True if a file with the given hashname is found inside the path.
	 * @throws Exception 
	 */
	private boolean imageAlreadyDownloaded(String path, String hashname)
	throws Exception {
		File fd;
		String[] list_of_files;
		
		// Reading all the filenames in the directory.
		fd = new File(Util.checkDirectoryPath(path));
		list_of_files = fd.list();
		
		// Checks if there are a file containing the hash in its name, we
		// suppose that this file is the image we are looking for.
		if (list_of_files == null)
			throw new RuntimeException(path + " is not a directory.");
		
		for (String str: list_of_files)
			if (str.contains(hashname))
				return true;
		return false;
	}
	
	/**
	 * This function makes all the work to update this Blog from the content
	 * passed. It is execute by each UpdateSlave created when update() was
	 * called so it is synchronized. The content passed could not be RSS
	 * compliant, the string passed is just that, a string.
	 * 
	 * @param content:
	 * 		A string that presumably has a RSS.
	 * @param via:
	 * 		Tells from where the content had been retrieved in order to mark it
	 * 		in the Posts and the Blog.
	 * @return:
	 * 		A List with the added Posts.
	 * @throws Exception:
	 * 		If the content passed is not RSS compliant.
	 * @throws IOException:
	 * 		If there are I/O problems.
	 */
	@SuppressWarnings("unchecked")
	private synchronized List<Post> internal_update(String content,
													Update_type via)
	throws Exception {
		Date feed_date, actual_feed_date, post_date;
		List<SyndEntryImpl> post_entries;
		ListIterator<SyndEntryImpl> backwards_iter;
		SyndFeed feed;
		SyndEntryImpl post_entry;
		SyndContentImpl synd_content;
		String post_title, post_author, post_content;
		URL post_url;
		Post post;
		List<Post> posts_added;
		
		// First of all try to build a SyndFeed from that content. If we cannot
		// build it it may be caused because content is not a real RSS, finish.
		feed = createFeedFromContent(content);
		if (feed == null)
			throw new Exception("Cannot create feed from that content.");
		
		posts_added = new ArrayList<Post>();
		
		// Here a little trick, when the blog is just created or recovered
		// actual_feed is null, we assume that the "actual_feed" (which in
		// fact doesn't exists) published date was The Epoch. Don't try to play
		// with feeds published before this date :D.
		feed_date = feed.getPublishedDate();
		if (actual_feed != null)
			actual_feed_date = actual_feed.getPublishedDate();
		else
			actual_feed_date = new Date(0);
		
		// Check if this feed is newer than the actual one and if so update
		// the blog. We also check that the feed has a publication day, some
		// feeds doesn't set this attribute, if it doesn't we consider this
		// feed newer, what else can we do!? :(
		if (feed_date == null ||
			feed_date.after(actual_feed_date) ||
			feed_date.equals(actual_feed_date)) {
			
			// The title and owner weren't initialized at construction time
			// because we didn't get the feed until this update() is invoked.
			// Neither the actual_feed nor the feed_content.
			title = feed.getTitle();
			owner = feed.getAuthor() != null? feed.getAuthor(): NOAUTHOR_BLOG;
			actual_feed = feed;
			last_update_type  = via;
			setContent(content);
			
			// Mark all the Posts as archived, new posts will be marked as
			// actual and if there were Posts in common between the last feed
			// and this new feed those Posts will be marked as actual again.
			setAllPostsAsArchived();
			/** TODO: Right now we change every icon and then change it again
			 * if it is in the feed. This waste a lot of CPU time. Although it
			 * isn't a big waste of time it would be much nicer if optimized.
			 * **/
			
			// Get the entries (posts) of this feed and add them to the list of
			// posts the Blog instance maintains. The entries are traversed
			// backwards to have the list ordered. This probably could be done
			// with sorted list or something like that (future improvements)...
			post_entries = feed.getEntries();
			backwards_iter = post_entries.listIterator(post_entries.size());
			while (backwards_iter.hasPrevious()) {
				post_entry = backwards_iter.previous();
				
				// Check if the post already exists in the list and don't put it
				// again but mark it as actual.
				post = postAlreadyExists(post_entry.getTitle());
				if (post != null) {
					post.setActual();
					continue;
				}
				
				post_date = post_entry.getPublishedDate();
				post_title = post_entry.getTitle();
				post_author = post_entry.getAuthor();
				if (post_entry.getLink() != null)
					post_url = new URL(post_entry.getLink());
				else
					post_url = null;
				
				// Here we have a bit of magic. Posts content are intended to be
				// retrieved with the getContents().get(0) and getDescription()
				// returns just the description (the first few lines of the post),
				// but some blogs has the content empty and the whole post content
				// is in its description. In order to make this as reliable as
				// possible we try to take the content from the
				// getContents().get(0) but if it is empty (because getContents()
				// returns null or because the list is empty), we get the
				// description and use it as content. Remember, this is a bad use
				// (although correct) of the RSS specification but you can find
				// blogs doing it.
				// There could be also another situation, the feed don't has
				// content, just description. Usually those kind of blogs use
				// to has one feed just with descriptions and another one with
				// the whole content (i.e.: Schneier on security).
				if (post_entry.getContents() != null &&
					!post_entry.getContents().isEmpty()) {
					synd_content = (SyndContentImpl)post_entry.getContents().get(0);
					post_content = synd_content.getValue();
				}
				else
					post_content = post_entry.getDescription().getValue();
				
				post = new Post(post_date, post_title, post_url,
								post_author, post_content);
				post.setUpdateType(via);
				post.setUpdateType(last_update_type);
				post.setActual();
				posts.add(0, post);
				posts_added.add(0, post);
			}
		}
		
		return posts_added;
	}
	
	/**
	 * Function that loads the public keys referenced by the list of public key
	 * paths into the list of public keys.
	 * 
	 * @throws PGPException 
	 * @throws IOException 
	 */
	private void loadPubKeys() throws IOException, PGPException {
		if (pub_keys == null)
			pub_keys = new ArrayList<PGPPublicKey>();
			
		for (String key_path: pub_key_paths)
			pub_keys.add(KeyUtils.recoverPublicKey(key_path));
	}
	
	/**
	 * Function that checks if a post already exists inside this blog, a post
	 * already exists if there is a post with the same title, and returns it.
	 * 
	 * @param post_title
	 * 		A string with the post_title to search for.
	 * @return
	 * 		The Post with the title passed or null if there is no Post with
	 * 		that title.
	 */
	private Post postAlreadyExists(String post_title) {
		synchronized(posts) {
			for (Post post: posts)
				if (post.getTitle() != null &&
						post.getTitle().equals(post_title))
					return post;
			return null;
		}
	}
	
	/**
	 * Sets all the Posts of this Blog as archived.
	 */
	private void setAllPostsAsArchived() {
		synchronized(posts) {
			for (Post post: posts)
				post.setArchived();
		}
	}
	
	/**
	 * Sets the feed_content attribute. This method will be called from the
	 * UpdateSlaves so it needs to be synchronized.
	 * 
	 * @param content:
	 * 		The content to be set.
	 */
	private synchronized void setContent(String content) {
		feed_content = content;
	}
	
	/**
	 * This method must be called when a thread trying to update this Blog
	 * finish its work.
	 */
	private synchronized void setFinishUpdate() {
		// Decrease the number of threads that are trying to update this blog.
		threads_updating--;
	}
	
	/**
	 * This method must be called when a Blog is starting to be update.
	 * 
	 * @param n_threads:
	 * 		The number of threads that will try to update the Blog.
	 */
	private synchronized void setStartUpdate(int n_threads) {
		threads_updating = n_threads;
	}
	
	/**
	 * Function that saves the feed of the blog into a file.
	 * 
	 * @param fname
	 * 		The filename of the resulting file.
	 * @param doc
	 * 		The document to be written. In fact, this method will write to disk
	 * 		any type of document, not only XML documents.
	 * @throws TransformerException 
	 */
	private void writeFeed2File(String fname, Document doc)
	throws TransformerException {
		TransformerFactory tf;
		Transformer transformer;
		DOMSource domsrc;
		StreamResult sr;

		tf = TransformerFactory.newInstance();
		transformer = tf.newTransformer();

		domsrc = new DOMSource(doc);
		sr = new StreamResult(fname);
		transformer.transform(domsrc, sr);
	}
	
	/*
	 * Class's methods.
	 */

	/**
	 * No-args class constructor. It is implemented in order to make possible
	 * the serialization of a Blog. To understand in depth this reasons read
	 * the documentation of the Java Serializable interface. 
	 */
	public Blog() {
		posts = Collections.synchronizedList(new ArrayList<Post>());
		setContent(null);
	}

	/**
	 * Creates a Blog object from a given URL.
	 * 
	 * @param url
	 * 		The URL to the feed.
	 * @throws IOException 
	 * @throws FeedException 
	 * @throws IllegalArgumentException 
	 */
	public Blog(URL url, String channel) throws IllegalArgumentException,
	FeedException, IOException {
		this.posts = Collections.synchronizedList(new ArrayList<Post>());
		this.urls = new ArrayList<URL>();
		this.pub_keys = new ArrayList<PGPPublicKey>();
		this.pub_key_paths = new ArrayList<String>();
		this.urls.add(url);
		this.channel = channel;
		setContent(null);
	}
	
	/**
	 * Create a blog from a feed contained in a file.
	 * 
	 * Just to clarify, this method expects the file passed to be a XML file,
	 * while the createBlog() method expects a serialized Blog object created
	 * with the save() method. Check its documentation.
	 * 
	 * @throws Exception 
	 **/
	public Blog(String feed_file, String channel) throws Exception {
		File fd;
		FileInputStream fis;
		byte [] ba_content;
		int feed_len;
		
		fd = new File(feed_file);
		if (!fd.exists())
			throw new RuntimeException("File " + feed_file +
									   " doesn't exists.");
		
		// Read the content of the file and store it.
		fis = new FileInputStream(fd);
		feed_len = (int)fd.length();
		ba_content = Util.recoverDatafromInputStream(fis, feed_len);
		
		// Fill the fields.
		this.channel = channel;
		posts = Collections.synchronizedList(new ArrayList<Post>());
		urls = new ArrayList<URL>();
		pub_keys = new ArrayList<PGPPublicKey>();
		pub_key_paths = new ArrayList<String>();
		internal_update(new String(ba_content), last_update_type);
	}
	
	/**
	 * Adds a new handler for the event AddedPublisherEvent.
	 * 
	 * @param handler:
	 * 		The handler that will be executed with an AddedPublisherEvent is
	 * 		raised.
	 */
	public void addAddedPublisherListener(AddedPublicKeyListener handler) {
		if (apk_listeners == null)
			apk_listeners = new ArrayList<AddedPublicKeyListener>();
		apk_listeners.add(handler);
	}
	
	/**
	 * Adds a new handler for the event AddedSourceEvent.
	 * 
	 * @param handler:
	 * 		The handler that will be executed with an AddedSourceEvent is
	 * 		raised.
	 */
	public void addAddedSourceListener(AddedSourceListener handler) {
		if (as_listeners == null)
			as_listeners = new ArrayList<AddedSourceListener>();
		as_listeners.add(handler);
	}
	
	/**
	 * Add a new public key to the Blog.
	 * 
	 * @param key:
	 * 		The public key to be added.
	 * @param path:
	 * 		The path to the key.
	 */
	public void addPublicKey(PGPPublicKey key, String path) {
		AddedPublicKeyEvent event;
		
		// Add the key to this Blog.
		pub_keys.add(key);
		pub_key_paths.add(path);
		
		// Raise the event.
		event = new AddedPublicKeyEvent(this, key);
		for (AddedPublicKeyListener handler: apk_listeners)
			handler.handleEvent(event);
	}
	
	/**
	 * Adds a new handler for when a blog update is completed.
	 * 
	 * @param handler:
	 * 		The handler to be executed when this event is raised.
	 */
	public void addUpdateCompleteListener(UpdateCompleteListener handler) {
		if (handler != null)
			uc_listeners.add(handler);
		return;
	}
	
	/** TODO: Right now it is possible adding the same source more than
	 * once... maybe this should be checked and avoided. **/
	/**
	 * Adds a new URL to the possible sources for this Blog.
	 * 
	 * @param url:
	 * 		The URL to add.
	 */
	public void addUrl(URL url) {
		AddedSourceEvent event;
		
		// Add the new source.
		urls.add(url);
		
		// Raise the event.
		event = new AddedSourceEvent(this, url);
		for (AddedSourceListener handler: as_listeners)
			handler.handleEvent(event);
	}
	
	/**
	 * Make a Blog object from a file that contains a previously saved one.
	 * 
	 * Note that this method is intended to be used with the files created by
	 * the save() method.
	 * 
	 * @param fname
	 * 		The filename of the file from where the Blog will be restored.
	 * @throws IOException 
	 * @throws ClassNotFoundException 
	 * @throws PGPException 
	 */
	public static Blog createBlog(String fname)
	throws IOException, ClassNotFoundException, PGPException {
		File fd = new File(fname);
		FileInputStream fis;
		ObjectInputStream ois;
		Blog blog_restored;

		// Check if file exists and we can read from it.
		if (!fd.exists())
			throw new RuntimeException("File " + fname + " doesn't exists.");
		
		// Check if we have read access.
		if (!fd.canRead())
			throw new RuntimeException("No read access for file " + fname);
		
		// Recover the blog from the file.
		fis = new FileInputStream(fd);
		ois = new ObjectInputStream(fis);
		blog_restored = (Blog)ois.readObject();
		
		// Try to load the public keys referenced by the list of public key
		// paths.
		blog_restored.loadPubKeys();

		// Close everything and finish.
		ois.close();
		fis.close();
		return blog_restored;
	}
	
	/**
	 * Each Blog maintains two sorts of Posts, the "actual" ones and the
	 * "archived" ones. The actual Posts are those Posts that can be published
	 * because we have the feed were they are. The archived Posts are those
	 * Posts that, despite we have them, we cannot publish them because we
	 * haven't the feed where they are. That is because the Blog class
	 * maintains the feed content of the last update but also all the Posts it
	 * has contained. In the feed it just has a subset of all the Posts this
	 * Blog has.
	 * 
	 * @return:
	 * 		A list containing the Posts that are considered actuals.
	 */
	public List<Post> getActualPosts() {
		List<Post> actual_posts;
		
		actual_posts = new ArrayList<Post>();
		synchronized(posts) {
			for (Post post: posts)
				if (post.isActual())
					actual_posts.add(post);
		}
		
		return actual_posts;
	}
	
	/**
	 * This method search inside the feed content commentaries with other
	 * HTTP sources. Those commentaries must be with the format
	 * <!--httpsource="<URL>"-->, where <URL> is the url where the feed for
	 * this Blog can be also found.
	 * 
	 * @return:
	 * 		A list of strings with the URLs of the sources found.
	 */
	public List<String> getAlternativeSources() {
		String regex = "<!--httpsource=\"([^\"]+)\"-->";
		Pattern pattern;
		Matcher matcher;
		List<String> list;
		String url_str;
		
		list = new ArrayList<String>();
		pattern = Pattern.compile(regex);
		matcher = pattern.matcher(feed_content);
		while (matcher.find()) {
			url_str = matcher.group(1);
			list.add(url_str);
		}
		
		return list;
	}
	
	/**
	 * Returns a list with the Post maintained by this Blog that are considered
	 * archived as described by the documentation of the method
	 * getActualPosts().
	 * 
	 * @return:
	 * 		A list containing the Posts that are considered archived.
	 */
	public List<Post> getArchivedPosts() {
		List<Post> archived_posts;
		
		archived_posts = new ArrayList<Post>();
		synchronized(posts) {
			for (Post post: posts)
				if (post.isArchived())
					archived_posts.add(post);
		}
		
		return archived_posts;
	}
	
	/**
	 * Retrieve a list containing the authors of this blog.
	 * 
	 * @return
	 * 		A list with authors names. 
	 */
	public List<String> getAuthors() {
		List<String> authors_list = new ArrayList<String>();
		
		synchronized(posts) {
			for (Post post: posts)
				if (!authors_list.contains(post.getAuthor()))
					authors_list.add(post.getAuthor());
		}
		
		return authors_list;
	}
	
	/**
	 * Returns the channel of the blog.
	 * 
	 * @return
	 * 		A string with the channel's name.
	 */
	public String getChannel() {
		return channel;
	}
	
	/**
	 * Returns the content of the feed. Since this Blog could be being update
	 * this method is synchronized.
	 * 
	 * @return
	 * 		The XML of the content.
	 */
	public synchronized String getContent() {
		return feed_content;
	}
	
	/**
	 * Returns the owner of the blog. This method should be called after one
	 * of the update() methods had been called.
	 * 
	 * @return
	 * 		Owner's name.
	 */
	public String getOwner() {
		return owner;
	}

	/**
	 * Method that returns the nth posts of the blog.
	 * 
	 *  @param n
	 *  	The nth newer post.
	 *  @return
	 *  	The nth Post object.
	 */
	public Post getPost(int n) {
		synchronized(posts) {
			if (n > posts.size() - 1 || n < 0)
				throw new RuntimeException("Index out of bounds.");
		
			return posts.get(n);
		}
	}
	
	/**
	 * Returns the index inside this Blog of that post, this value is a int
	 * starting at 0. If the post is not contained inside this Blog -1 is
	 * returned.
	 * 
	 * @param post
	 * 		The post to be searched.
	 * @return
	 * 		An integer with the index of that post inside this Blog, -1 if the
	 * 		post isn't inside this blog.
	 */
	public int getPostIndex(Post post) {
		synchronized(posts) {
			return posts.indexOf(post);
		}
	}
	
	/**
	 * Method that returns a list of Posts objects representing the posts of
	 * the actual blog.
	 * 
	 * BEWARE!: It is returning the own list, don't modify it!.
	 * 
	 * @return
	 * 		A list with the posts of this blog.
	 */
	public List<Post> getPosts() {
		synchronized(posts) {
			return posts;
		}
	}

	/**
	 * Retrieves posts published by an author.
	 * 
	 * @param author
	 * 		A string with the name of the author.
	 * @return
	 * 		A list containing the author's posts.
	 */
	public List<Post> getPostsByAuthor(String author) {
		List<Post> posts_sublist = new ArrayList<Post>();
		
		synchronized(posts) {
			for (Post post: posts)
				if (post.getAuthor().equals(author))
					posts_sublist.add(post);
		}
		
		return posts_sublist;
	}
	
	/**
	 * Returns the list of public keys associated to this Blog.
	 * 
	 * @return:
	 * 		A List of PublicKey instances.
	 */
	public List<PGPPublicKey> getPublicKeys() {
		return pub_keys;
	}
	
	/**
	 * Returns the title of this blog.  This method should be called after one
	 * of the update() methods had been called.
	 * 
	 * @return
	 * 		Blog's title.
	 */
	public String getTitle() {
		return title;
	}
	
	/**
	 * Returns the type of the last update.
	 * 
	 * @return
	 * 		One of the Update_type enum values.
	 */
	public Update_type getUpdateType() {
		return last_update_type;
	}

	/**
	 * Returns the first URL that was specified at the creation of this blog
	 * where the feed can be found. Note that this method returns just one URL
	 * but this blog could had much more URLs as possible sources.
	 * 
	 * @return
	 * 		Blog's URL.
	 */
	public URL getUrl() {
		return urls.get(0);
	}
	
	/**
	 * Returns the list of possible sources for this Blog.
	 * 
	 * @return
	 * 		A list of URLs.
	 */
	public List<URL> getUrls() {
		return urls;
	}
	
	/**
	 * Checks if this Blog is being updated.
	 * 
	 * @return:
	 * 		True if this Blog has threads trying to update it or false
	 * 		elsewhere.
	 */
	public boolean isBeingUpdate() {
		return (threads_updating > 0);
	}
	
	/**
	 * Convenience method to publish all the posts contained in this blog. It
	 * just iterate over each post calling the publishPostZipped() method.
	 * 
	 * @param path:
	 * 		The path where the zip should be put.
	 * @param key:
	 * 		The private key used to sign the posts.
	 * @return
	 * 		A list with the names of the files created.
	 * @throws InvalidKeyException
	 * @throws NoSuchAlgorithmException
	 * @throws NoSuchProviderException
	 * @throws SignatureException
	 * @throws IOException
	 * @throws BadLocationException
	 */
//	public List<String> publishAllPostsZipped(String path, PrivateKey key)
//	throws InvalidKeyException, NoSuchAlgorithmException,
//	NoSuchProviderException, SignatureException, IOException,
//	BadLocationException {
//		List<String> result = new ArrayList<String>();
//		
//		synchronized(posts) {
//			for (int i = 0; i < posts.size(); i++)
//				result.add(publishPostZipped(i, path, key));
//		}
//		
//		return result;
//	}
	
	/**
	 * Publish the Blog. This will create inside the specified path all the
	 * images of the Blog dusterized (signed) and the XML file with the feed
	 * also dusterized.
	 * 
	 * This method expects that the Blog has something, since when a Blog is
	 * created it is empty, this method expects that the method update()
	 * has been called before calling this one.
	 * 
	 * Just the Posts with state Post_state.ACTUAL will be published.
	 * 
	 * @param path:
	 * 		The directory where the files must be created.
	 * @param secr_key:
	 * 		The secret key used to sign the Blog.
	 * @param pub_key:
	 * 		The public key associated with the private key passed that will
	 * 		be used to generate the hash needed for the name of the feed
	 * 		once dusterized.
	 * @param pass:
	 * 		The password needed to access the private key inside the secret
	 * 		key.
	 * @throws Exception
	 */
	public void publishBlog(String path, PGPSecretKey secr_key,
							PGPPublicKey pub_key, String pass)
	throws Exception {
		String checked_path = Util.checkDirectoryPath(path);
		String fname, fname_image, mangled_post_content;
		DocumentBuilderFactory dbf;
		DocumentBuilder db;
		Document doc;
		NodeList item_nodes, item_children_nodes;
		Node descr_node, item_node, child_node, title_node;
		InputSource is;
		BufferedReader br;
		File fd;
		
		// Check if the content had been loaded.
		if (feed_content == null)
			throw new RuntimeException("Blog's content hasn't been loaded.");
		
		// Creates the Document object with the content and the nodes that
		// represent each post.
		br = new BufferedReader(new StringReader(feed_content));
		is = new InputSource(br);
		dbf = DocumentBuilderFactory.newInstance();
		db = dbf.newDocumentBuilder();
		doc = db.parse(is);
        
        // Get from the whole XML document of the feed just those "item" nodes
        // (the posts).
        item_nodes = doc.getElementsByTagName("item");
        for (int i = 0; i < item_nodes.getLength(); i++) {
        	item_node = item_nodes.item(i);
        	item_children_nodes = item_node.getChildNodes();
        	
        	// Search in the children Elements of the "item" node the
        	// title and description
        	title_node = null;
        	descr_node = null;
        	for (int j = 0; j < item_children_nodes.getLength(); j++) {
        		child_node = item_children_nodes.item(j);
        		
        		// Is the "description" node?
        		if (child_node.getNodeName().equals("description"))
        			descr_node = child_node;
        		// Is the "title" node?
        		else if (child_node.getNodeName().equals("title"))
        			title_node = child_node;
        		
        		// Once we have both items (title and description) get out
        		// of the loop.
        		if (descr_node != null && title_node != null)
        			break;
        	}
        	
        	// We have (maybe) the title and description of an actual post,
        	// we will use the title as an identifier to search for the Post
        	// related with this node and the description will be changed a
        	// bit.
        	
        	// Check that this post has title. This is restrictive, RSS
        	// specification let an "item" not to has title, so in the wild we
        	// can find valid posts with no title or description at all...
        	// Sorry.
        	if (title_node == null || descr_node == null)
        		continue;
        	
    		for (Post post: getActualPosts()) {
    			if (title_node.getTextContent().equals(post.getTitle())) {
    				// Download each image of the post and dusterize it.
    				try {
    					post.loadImages();
    				}
    				catch (RuntimeException e) {
    					if (e.getMessage().equals("Read time out"))
    						continue;
    				}
    				
    				for (Img image: post.getImages()) {
    					
    					// Don't download images we already have.
    					if (imageAlreadyDownloaded(checked_path,
    											   image.getHash()))
    						continue;
    					
    					// Create the name of this image.
    					fname_image = checked_path + image.getHash() +
    					  			  Util.EXT_SEPARATOR + image.getType();
    					
    					// Log the action.
    					LogController.log("Dusterizing image " + fname_image);
    					
    					// Save and dusterize the image.
    					image.save2File(fname_image);
    					KeyUtils.signFile(fname_image, secr_key, pass);
    					
    					fd = new File(fname_image);
    					fd.delete();
    				}
    				
    				// Modify this post content to make it have the alt
    				// attribute of each img tag contain the hash of the image
    				// and to clean the src of each img tag.
    				post.mangleAlts();
    				mangled_post_content = post.cleanSrcs();
    				descr_node.setTextContent(mangled_post_content);
    			}
        	}
        }

		// Create the name of the RSS file that will be dusterized.
		fname = checked_path + createName(pub_key) + Util.EXT_SEPARATOR +
				Util.XMLEXTENSION;
		
		// Log the action.
		LogController.log("Dusterizing blog to file " + fname);
		
		// Write the feed to a file and dusterize it.
		writeFeed2File(fname, doc);
		KeyUtils.signFile(fname, secr_key, pass);
		
		// Delete the file not signed.
		fd = new File(fname);
		fd.delete();
	}
	
//	/**
//	 * Publish the specified post. This will create a zip file dusterized
//	 * (signed) containing the html of the post and all the images.
//	 * 
//	 * @param post_index
//	 * 		The index of the post that will be published.
//	 * @param path
//	 * 		The path where the zip should be put.
//	 * @param pub
//	 * 		The Publisher object that will sign the zip.
//	 * @return
//	 * 		Returns a string with the complete name of the file created, the
//	 * 		path and the filename.
//	 * @throws IOException 
//	 * @throws NoSuchProviderException 
//	 * @throws NoSuchAlgorithmException 
//	 * @throws BadLocationException 
//	 * @throws SignatureException 
//	 * @throws InvalidKeyException 
//	 */
//	public String publishPostZipped(int post_index, String path,
//									PrivateKey key)
//	throws NoSuchAlgorithmException, NoSuchProviderException, IOException,
//	BadLocationException, InvalidKeyException, SignatureException {
//		String name, zipname, checked_path;
//		Post post;
//		
//		synchronized(posts) {
//			post = posts.get(post_index);
//		}
//		
//		checked_path = Util.checkDirectoryPath(path);
//		name = createName(key);
//		zipname = post.createZip(checked_path, name);
//		Util.dusterize(zipname, key);
//		return zipname + Util.DUSTEXTENSION;
//	}
	
	/**
	 * TODO: Right now a post must has an "ARCHIVED" state to be removed. This
	 * is a bit restrictive. The reason for this is that blog is maintaining
	 * a list of posts with all the posts it has ever had, and also a feed
	 * content where it has just the last feed it downloaded (P2P or HTTP).
	 * Removing an "ARCHIVED" post is easy because it is only in the list so we
	 * just remove it and work done, but removing it if it is "ACTUAL" is quite
	 * complicated because it must be removed from the feed content. Although
	 * it is possible I have no time right now to do it.
	 */
	/**
	 * Method that tries to remove a Post from this Blog. The Post will be
	 * removed if it exists inside this Blog and its state is "ARCHIVED".
	 * 
	 * @param post:
	 * 		The Post to be removed.
	 * @return:
	 * 		The instance of the Post removed or null if it cannot be removed
	 * 		because any of the previous reasons doesn't meet.
	 */
	public boolean removePost(Post post) {
		// Check that the post is archived, if not we cannot remove it.
		if (!post.isArchived())
			return false;
			
		// The Post is archived, remove it.
		return posts.remove(post);
	}
	
	/** TODO: Since the addPubliKey() method permits add multiple times the
	 * same key, it could happend that we want to remove a key and remove
	 * another one. That maybe doesn't affects but at least I want to point it
	 * out (I haven't tried it).
	 */
	/**
	 * Removes the key passed from the key sources of this blog.
	 * 
	 * @param key:
	 * 		A String representing the key to be removed.
	 * @return:
	 * 		True if the PublicKey were removed, false elsewhere.
	 */
	public boolean removePublicKey(String key) {
		// Search for the element and remove it.
		for (PGPPublicKey key_source: pub_keys)
			if (key_source.toString().equals(key))
				return pub_keys.remove(key_source);
		
		return false;
	}
	
	/** TODO: Since the addUrl() method permits add multiple times the same
	 * URL, it could happend that we want to remove an URL and remove another
	 * one. That maybe doesn't affects but at least I want to point it out.
	 */
	/**
	 * Removes the URL passed from the url sources of this blog.
	 * 
	 * @param url:
	 * 		A String representing the URL to be removed.
	 * @return:
	 * 		True if an URL were removed, false elsewhere.
	 */
	public boolean removeUrl(String url) {
		// Search for an element that whos string representation is the same as
		// the string passed.
		for (URL url_source: urls)
			if (url_source.toString().equals(url))
				return urls.remove(url_source);
		
		return false;
	}
	
	/**
	 * Serializes the Blog object to a file. The name of the file containing
	 * the blog is "blogtitle.serialized".
	 * 
	 * @param path
	 * 		The path where the blog must be saved.
	 * @return
	 * 		A string with the name of the file created.
	 * @throws IOException
	 */
	public String save(String path) throws IOException {
		String checked_path;
		FileOutputStream file;
		ObjectOutputStream oos;
		String name;
		
		// Create the absolute path.
		checked_path = Util.checkDirectoryPath(path);
		name = title + Util.EXT_SEPARATOR + Util.BLOG_EXT;
		
		// Write the file.
		file = new FileOutputStream(checked_path + name);
		oos = new ObjectOutputStream(file);
		oos.writeObject(this);
		
		return name;
	}
	
	/**
	 * Search a directory for a file with a properly name of a blog saved with
	 * the method save() and delete it.
	 * 
	 * @param path
	 * 		Path to search for the blog file.
	 * @throws Exception 
	 */
	public boolean searchAndDelete(String path) {
		String checked_path = Util.checkDirectoryPath(path);
		String name;
		File fd;
		
		// Build the name that the blog should has.
		name = checked_path + title + Util.EXT_SEPARATOR + Util.BLOG_EXT;
		
		// Check if that file exists and delete it.
		fd = new File(name);
		if (fd.exists())
			return fd.delete();
		else
			return false;
	}
	
	/**
	 * Set the owner of this blog to the owner passed.
	 * 
	 * @param new_owner
	 * 		A string with the new owner.
	 */
	public void setOwner(String new_owner) {
		owner = new_owner;
	}
	
	/**
	 * Set the title of this blog to the title passed.
	 * 
	 * @param new_title
	 * 		A string with the new title.
	 */
	public void setTitle(String new_title) {
		title = new_title;
	}
	
	/**
	 * TODO: Right now this method is quite stupid, instead of killing the
	 * updating threads (or ask them to stop), it just wait them to finish.
	 */
	/**
	 * Stop any update on this Blog.
	 */
	public void stopUpdate() {
		waitForUpdate();
	}
	
	/**
	 * Method that tells the blog to get update. This is an asynchronous
	 * method, it start the update proccess and exits, the Blog will be
	 * updated later.
	 * 
	 * Updating a Blog could imply that many updates are done (one for each
	 * source).
	 * 
	 * Calling this method has the same that update(null).
	 */
	public void update() throws MalformedURLException, InterruptedException {
		update((UpdateCompleteListener)null);
	}
	
	/**
	 * Method that tells the blog to get update. This is an asynchronous
	 * method, it start the update proccess and exits, the Blog will be
	 * updated later.
	 * 
	 * Updating a Blog could imply that many updates are done (one for each
	 * source).
	 * 
	 * Calling this method has the same that update().
	 * 
	 * @param callback:
	 * 		The action to be executed when the update is done.
	 */
	public void update(UpdateCompleteListener callback)
	throws MalformedURLException, InterruptedException {
		List<UpdateCompleteListener> list;
		list = new ArrayList<UpdateCompleteListener>();
		if (callback != null)
			list.add(callback);
		update(list);
	}

	/**
	 * Method that tells the blog to get update. This is an asynchronous
	 * method, it start the update proccess and exits, the Blog will be
	 * updated later.
	 * 
	 * Updating a Blog could imply that many updates are done (one for each
	 * source).
	 * 
	 * @param callbacks:
	 * 		An array of UpdateCompleteListeners that will be executed when
	 * 		an update is successful.
	 */
	public void update(List<UpdateCompleteListener> callbacks)
	throws InterruptedException, MalformedURLException {
		UpdateSlave slave;
		Integer n_threads;
		
		// Set this Blog as being updated with the specified number of threads.
		n_threads = urls.size() + pub_keys.size();
		setStartUpdate(n_threads);
		
		// Log the action.
		LogController.log(n_threads.toString() + " thread(s) will try to " +
						  "update " + channel + ".");
		
		// Create an UpdateSlave for each HTTP source and start it.
		for (URL url: urls) {
			slave = new UpdateSlave(url);
			
			// Tell the slave what to do when it finish.
			if (callbacks != null)
				for (UpdateCompleteListener callback: callbacks)
					slave.addUpdateCompleteListener(callback);
			
			DustDispatcher.dispatchTask(slave);
		}
		
		// Create an UpdateSlave for each Publisher and start it.
		for (PGPPublicKey pub: pub_keys) {
			slave = new UpdateSlave(pub);
			
			// Tell the slave what to do when it finish.
			if (callbacks != null)
				for (UpdateCompleteListener callback: callbacks)
					slave.addUpdateCompleteListener(callback);
			
			DustDispatcher.dispatchTask(slave);
		}
	}
	
	/**
	 * This method blocks until the update of this Blog ends. Some times you
	 * may would need to create a Blog, update it and then continue doing
	 * whatever you want to do with it, for example testing.
	 * 
	 * Due to the asynchronous behaviour of the update() methods this could
	 * cause some obscure weird problems and debugging could not help to detect
	 * them, in fact it can help to make them even more obscure and weird.
	 * NullPointerExceptions could be common during execution but unexistant
	 * during debugging, that's a pain in the ass.
	 * 
	 * This method should be used after an update, in order to make the program
	 * waits until the Blog is in a consistent state again. Some blog
	 * attributes are not initialized until update has finished and other
	 * methods could try to access them.
	 * 
	 * My advice, if you plan write a piece of code that makes an update and
	 * immediately after that use another Blog method, use this method before
	 * it.
	 */
	public void waitForUpdate() {
		while (isBeingUpdate()) {
			try {
				Thread.sleep(TIME2WAIT4UPDATE);
			}
			catch (InterruptedException e) {}
		}
	}
}
