package EurekaProcessor;

import java.io.BufferedInputStream;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStreamWriter;
import java.io.UnsupportedEncodingException;
import java.util.HashMap;
import java.util.Vector;
import java.util.Map.Entry;

import Config.Configure;
import Config.GlobalLog;

/**
 * 
 * @author Xenophon
 * @version 2009-12-13 The PreProcessor processes all the files crawled.That is,
 *          delete all the garbage files we don't want to analyze.
 * 
 */
public class PreProcessor {
	private String directory;
	private String postDirectory;

	/**
	 * @author Xenophon When we get a page, the first thing is to decide whether
	 *         it is a post.If not, we can ignore it and process next page.
	 */
	private class PagePeekInfo {
		public boolean isPost = true;
		public boolean hasNext = false;
		public boolean hasPre = false;
		public String url = "";
	}

	private class Statitics {
		public HashMap<String, Integer> fileTitlePool = new HashMap<String, Integer>();
	}

	/**
	 * when get a post which has following pages,we will store the current page
	 * so that when we merge the following pages,we known where the main post
	 * is. The information is store as <ULI,FileName> tuple.
	 */
	private HashMap<String, String> tmpMainPostPool = new HashMap<String, String>();
	private HashMap<String, Vector<String>> tmpFollowingPagePool = new HashMap<String, Vector<String>>();
	private Statitics stat = new Statitics();

	public PreProcessor() {
		this.directory = Configure.BASE_DIR;
		this.postDirectory = Configure.DEST_DIR;
	}

	/**
	 * @deprecated
	 * @param directory
	 * @param postDirectory
	 */
	public PreProcessor(String directory, String postDirectory) {
		this.directory = directory;
		this.postDirectory = postDirectory;
	}

	public String GetStringFromFile(File f) {
		BufferedInputStream bis;
		try {
			bis = new BufferedInputStream(new FileInputStream(f));
			byte[] res = new byte[bis.available()];
			bis.read(res);
			bis.close();
		    String result = new String(res, "GB2312");
		    res = null;
			return result;
		} catch (Exception e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}
		return null;
	}
	
	public String GetTitleFromString(String fileStr) {
		String[] strs = fileStr.split("<title>");
		if (strs.length == 1) {
			GlobalLog.AddLog("Can't Get title from the file");
			return "An error may occur,I can't get the title...";
		}
		String res = strs[1].substring(0, strs[1].indexOf("</title>"));
		strs = null;
		return res;
	}

	public String GetURLFromString(String fileStr) {
		String[] strs = fileStr.split("本文链接: <a href='");
		if (strs.length == 1)
			return "An error may occur,I can't get the URL...";

		String[] urlsegs = strs[1].split("'>");
		if (urlsegs.length == 1) {
			GlobalLog.AddLog("An error may occur,I can't get the URL...");
			return "An error may occur,I can't get the URL...";
		}
		String result = urlsegs[1].substring(0, urlsegs[1].indexOf("</a>"));
		strs = null;
		urlsegs = null;
		return result;
	}

	public PagePeekInfo PeekPage(String filestr) {
		PagePeekInfo info = new PagePeekInfo();
		String title = GetTitleFromString(filestr);
		String[] segs = title.split(":");
		if (!segs[0].equals("北大未名站 同主题阅读"))
			info.isPost = false;
		else {
			// Whether there are some pages follow?
			if (filestr.indexOf("下一页</a>") != -1)
				info.hasNext = true;
			// Whether there are some pages precede?
			if (filestr.indexOf("上一页</a>") != -1)
				info.hasPre = true;
			info.url = GetURLFromString(filestr);
		}
		segs = null;
		return info;
	}

	public void SearchDirectory(File dir, String curPath) {
		if (dir.isDirectory()) {
			GlobalLog.AddLog("Processing Directory: " + dir.getName());
			File postDir = new File(postDirectory + curPath);
			postDir.mkdir();
			GlobalLog.AddLog("Create new Directory in " + postDirectory
					+ curPath);
			File[] files = dir.listFiles();
			for (File file : files)
				SearchDirectory(file, curPath + "\\" + file.getName());
		} else {
			GlobalLog.AddLog("Processing File: " + curPath);
			String filestr = GetStringFromFile(dir);

			PagePeekInfo pInfo = PeekPage(filestr);
			if (!pInfo.isPost)
				return;
			// This is the first page of a multi-page topic...
			if (!pInfo.hasPre && pInfo.hasNext)
				tmpMainPostPool.put(pInfo.url, curPath);
			if (pInfo.hasPre) {
				if (!tmpMainPostPool.containsKey(pInfo.url)) {
					Vector<String> res = new Vector<String>();
					if (tmpFollowingPagePool.containsKey(pInfo.url))
						res = tmpFollowingPagePool.remove(pInfo.url);
					res.add(curPath);
					tmpFollowingPagePool.put(pInfo.url, res);
					res = null;
					return;
				}
				GlobalLog.AddLog("Merging File :" + curPath);
			}

			File mainPost = pInfo.hasPre ? new File(postDirectory
					+ tmpMainPostPool.get(pInfo.url)) : new File(postDirectory
					+ "\\" + curPath);
			if (pInfo.hasPre && !mainPost.exists())
				GlobalLog.AddLog("Severe Error: There is no file:"
						+ postDirectory + tmpMainPostPool.get(pInfo.url));
			if (!pInfo.hasPre) {
				try {
					mainPost.createNewFile();
				} catch (IOException e) {
					e.printStackTrace();
				}
			}
			BufferedWriter newWriter;
			try {
				newWriter = new BufferedWriter(new OutputStreamWriter(
						new FileOutputStream(mainPost, true), "GB2312"));
				newWriter.write(filestr);
				newWriter.flush();
				newWriter.close();
				if (dir != null) {
					if (dir.delete())
						GlobalLog.AddLog("Delete File " + dir.getPath());
				}
			} catch (Exception e) {
				e.printStackTrace();
			}
		}
	}

	public void PostSearch() {
		GlobalLog.AddLog("Begin Post-Search Processing now...");
		if (tmpMainPostPool.size() == 0) {
			GlobalLog.AddLog("Quit PostSearch 'couse tmpMainPostPool = 0");
			return;
		}
		if (tmpFollowingPagePool.size() == 0) {
			GlobalLog.AddLog("Quit PostSearch 'couse tmpFollowingPagePool = 0");
			return;
		}
		for (Entry<String, Vector<String>> entry : tmpFollowingPagePool
				.entrySet()) {
			String url = entry.getKey();
			if (!tmpMainPostPool.containsKey(url)) {
				GlobalLog.AddLog("Something may have been wrong," + url
						+ " can't find its main post...");
				continue;
			}
			File mainPost = new File(postDirectory + tmpMainPostPool.get(url));
			GlobalLog.AddLog("Post Search: " + tmpMainPostPool.get(url));
			if (!mainPost.exists()) {
				GlobalLog.AddLog("Something may have been wrong,"
						+ postDirectory + tmpMainPostPool.get(url)
						+ " doesn't exsit in the PostSearch processing...");
				continue;
			}
			BufferedWriter newWriter = null;
			try {
				newWriter = new BufferedWriter(new OutputStreamWriter(
						new FileOutputStream(mainPost, true), "GB2312"));
			} catch (UnsupportedEncodingException e) {
				e.printStackTrace();
			} catch (FileNotFoundException e) {
				e.printStackTrace();
			}
			for (String path : entry.getValue()) {
				File page = new File(directory + path);
				if (!page.exists()) {
					GlobalLog.AddLog("Something may have been wrong,"
							+ directory + path
							+ " doesn't exsit in the PostSearch processing...");
					continue;
				}
				String filestr = GetStringFromFile(page);
				try {
					newWriter.write(filestr);
					newWriter.flush();
				} catch (Exception e) {
					// TODO Auto-generated catch block
					e.printStackTrace();
				}
			}
			try {
				newWriter.close();
			} catch (Exception e) {
				e.printStackTrace();
			}
		}
		GlobalLog.AddLog("End Post-Search Processing now...");
	}

	public void StartSearch() {
		File baseDir = new File(directory);
		if (!baseDir.isDirectory()) {
			GlobalLog.AddLog("The " + directory + " is not Directory...");
			return;
		}
		File desDir = new File(postDirectory);
		desDir.mkdir();
		// process all the file...
		SearchDirectory(baseDir, "");
		PostSearch();
		//
	}
}
