package com.exceptionhandler.service.crawl;

import java.io.File;
import java.util.ArrayList;
import java.util.List;

import org.apache.commons.io.FileUtils;
import org.apache.log4j.Logger;
import org.neocrawler.crawler.CrawlerManager;
import org.neocrawler.dao.WebContentDao;
import org.neocrawler.entity.WebContent;
import org.neocrawler.util.Configuration;

import com.exceptionhandler.entity.ExceptionInfo;
import com.exceptionhandler.entity.OpenSourceProject;
import com.exceptionhandler.service.exception.ExceptionFactory;
import com.exceptionhandler.service.exception.ExceptionRepository;
import com.exceptionhandler.util.Config;

import data.processor.ExceptionExtractor;
import data.processor.PageContentExtractor;

public class ExceptionCrawler {
	protected static Logger log = Logger.getLogger(ExceptionCrawler.class);
	public static final String BASE_STORE_PATH = "E:\\Crawler\\";
	public static final String TYPE_FORUM_THREAD = "FORUM_THREAD";
	protected static final String PARSED = "PARSED";
	protected OpenSourceProject project;
	protected Configuration configuration;

	// ===== End of Apache Mail Archive Configurations ===== //
	public ExceptionCrawler(String projectName, String seedUrls,
			String storePath, String parserName, int threadPoolSize,
			boolean useProxyOrNot, String proxyUrl, int proxyPort) {
		// 1. Get the open source project info.
		project = new OpenSourceProject();
		project.setName(projectName);
		project.setForumUrls(seedUrls);

		// 2. Set configuration.
		configuration = new Configuration(Config.getDBUrl(),
				Config.getDBUser(), Config.getDBPassword(), storePath);
		configuration.setParserClassName(parserName);
		configuration.setThreadPoolSize(threadPoolSize);
		configuration.setUseProxyOrNot(useProxyOrNot);
		configuration.setProxyPort(proxyPort);
		configuration.setProxyUrl(proxyUrl);
	}

	public void start() {
		// Set the url of the open source project forum as the seed.
		List<WebContent> seeds = new ArrayList<WebContent>();

		String[] forumUrls = project.getForumUrls().split(
				OpenSourceProject.SPLITER);
		for (int i = 0; i < forumUrls.length; i++) {
			System.err.println(i + ": " + forumUrls[i]);

			WebContent webContent = new WebContent();
			webContent.setUrl(forumUrls[i]);
			webContent.setType(WebContent.TYPE_SEED_PAGE);
			webContent.setReserve02(project.getName());
			seeds.add(webContent);
		}

		CrawlerManager.start(configuration, seeds);

		while (true) {
			try {
				Thread.sleep(15000);
			} catch (Exception e) {
				e.printStackTrace();
			}
			extractExceptionInfoFromCrawlerDatabase();
		}

	}

	public void restart() {
		// ========== Restart ========== //

		System.err.println(configuration.getDbUrl());
		CrawlerManager.setConfiguration(configuration);
		WebContentDao webContentDao = new WebContentDao();

		System.err.println(configuration.getDbUrl());
		String sqlUnvisited = "select * from web_content where status!=? and reserve02=?";
		String sqlVisited = "select * from web_content where status=? and reserve02=?";
		Object[] params = { WebContent.STATUS_OK, project.getName() };
		List<WebContent> visited = webContentDao.find(sqlVisited, params);
		List<WebContent> unvisited = webContentDao.find(sqlUnvisited, params);
		CrawlerManager.restart(configuration, visited, unvisited);
		// =========== End of Restart ========== //

		while (true) {
			try {
				Thread.sleep(15000);
			} catch (Exception e) {
				e.printStackTrace();
			}
			extractExceptionInfoFromCrawlerDatabase();
		}

	}

	public void extractExceptionInfoFromCrawlerDatabase() {
		// Get all candidate web pages by WebContent Type.
		WebContentDao webContentDao = new WebContentDao();
		String sql = "SELECT * FROM web_content WHERE type=? and status=? and reserve02=? and (reserve01!=? or reserve01 is null)";
		List<WebContent> webContents = webContentDao.find(sql, new Object[] {
				TYPE_FORUM_THREAD, WebContent.STATUS_OK, project.getName(),
				PARSED });
		System.err
				.println("!!!!!!!!!!!!!!!!!!!========================================="
						+ webContents.size());

		// Extract exception stack traces from the candidate web pages.
		for (int i = 0; webContents != null && i < webContents.size(); i++) {
			try {
				WebContent webContent = webContents.get(i);
				if (webContent.getLocalStorePath() == null) {
					continue;
				} else {
					log.info(" =====尝试从 " + webContent.getUrl()
							+ " 中发现异常堆栈信息！===== ");
					String webPage = FileUtils.readFileToString(new File(
							webContents.get(i).getLocalStorePath()));
					List<String> stackTraceStrings = ExceptionExtractor
							.getExceptionList(PageContentExtractor
									.extractTextFromPage(webPage, 0));
					for (int j = 0; stackTraceStrings != null
							&& j < stackTraceStrings.size(); j++) {
						try {
							// log.info("=====发现的异常堆栈信息如下 ===== \n "
							// + stackTraceStrings.get(j)
							// + " \n\t\t ==========");

							log.info("\n =====成功发现的异常堆栈信息!! ===== \n");

							ExceptionInfo exceptionInfo = ExceptionFactory
									.create(stackTraceStrings.get(j));
							exceptionInfo.setUrl(webContent.getUrl());
							exceptionInfo.setReserve01(webContent.getUuid());
							exceptionInfo.setReserve02(webContent
									.getLocalStorePath());
							exceptionInfo.setReserve03(webContent
									.getReserve02());
							if (exceptionInfo != null) {
								ExceptionRepository.save(exceptionInfo);
							}
						} catch (Exception e) {
							e.printStackTrace();
							continue;
						}
					}
					webContent.setReserve01(PARSED);
					webContentDao.update(webContent);
				}
			} catch (Exception e) {
				e.printStackTrace();
				continue;
			}
		}

	}

}
