package com.flute.icrawler.app.processor.extractor.algo;

import java.io.File;

import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import com.flute.icrawler.app.service.CrawlService;
import com.flute.icrawler.app.util.ParameterKey;
import com.flute.icrawler.framework.framework.CrawlUrl;
import com.flute.icrawler.framework.framework.container.AdditionalUrlInformation;
import com.flute.icrawler.framework.framework.container.NumberStringValue;
import com.flute.icrawler.framework.framework.container.AdditionalUrlInformation.Additionals;
import com.flute.icrawler.framework.processor.result.AddUrlProcessorResult;
import com.flute.icrawler.framework.processor.result.RetryProcessorResult;
import com.flute.icrawler.framework.processor.result.SuccessProcessorResult;

public class FileDirectoryExtractAlgo implements IExtractorAlgo {

	private static final Logger LOGGER = LoggerFactory
			.getLogger(FileDirectoryExtractAlgo.class);

	@Override
	public void extract(CrawlUrl url) {
		// TODO Auto-generated method stub
		File file = new File(url.getUrl());

		if (!file.isDirectory()) {
			LOGGER.debug("not a Directory {}", url.getUrl());
			return;
		}
		File[] files = CrawlService.getInstance().getFilesIterator(
				url.getUrl(), 100);

		if (null != files && 0 == files.length) {
			url.registerProcessorResult(new SuccessProcessorResult());
			LOGGER.info("Directory deal ok {}", file);
			return;
		}

		LOGGER.info("begin deal {}", file);
		addNewFiles(url, files);
		LOGGER.info("end deal {}", file);

	}

	/**
	 * 将发现的所有新链接加入
	 * 
	 * @param crawlUrl
	 *            当前处理的链接
	 * @param urls
	 *            由当前链接发现的链接
	 */
	private void addNewFiles(CrawlUrl crawlUrl, File[] files) {
		// 获取不为空则加入到URL池中
		AddUrlProcessorResult parameter = new AddUrlProcessorResult();
		parameter.setName(ParameterKey.HTTP_LINKS);

		for (int i = 0; i < files.length; i++) {

			File eachFile = files[i];
			String path = eachFile.getAbsolutePath();

			CrawlUrl newUrl = new CrawlUrl(crawlUrl.getJob(), path);

			AdditionalUrlInformation info = crawlUrl
					.getAdditionalUrlInformation().newInstance();

			// 获取当前深度
			NumberStringValue currentDeep = (NumberStringValue) crawlUrl
					.getAdditionalUrlInformation().getValue(Additionals.Deep);

			info.updateValue(Additionals.Deep, new NumberStringValue(
					currentDeep.getNumber().intValue()));

			// 解析文件并将文件名作为一个ID
			int id = 0;
			try {
				id = Integer.parseInt(eachFile.getName().substring(0, 5));
			} catch (NumberFormatException e) {
				LOGGER.error("{},{}", e, eachFile);
			}

			info.updateValue(Additionals.ResourceID, new NumberStringValue(id));

			newUrl.setAdditionalUrlInformation(info);
			parameter.addUrl(newUrl);

		}

		crawlUrl.registerProcessorResult(parameter);
		crawlUrl.registerProcessorResult(new RetryProcessorResult());

	}

}
