package com.sinaapp.gavinzhang.GSpider.webaddress;

import java.util.HashSet;

/**
 * @author gavin
 * @version 1.0
 * @date 2016/3/19
 * @description 爬虫系统自带的已爬列表信息
 * 实现方式采用HashSet，主要考虑其唯一性
 * 目前准备采用plugin中的concurrent包下的ConcurrentFoundWebUlr代替默认已访问列表
 */
public class VisitedWebUrlList extends WebUrlCollection {

	private HashSet<WebAddress> visitedList;
	public VisitedWebUrlList() {
		visitedList = new HashSet<>();
	}

	public static VisitedWebUrlList newVisitedWebUrlList(){
		return new VisitedWebUrlList();
	}

	@Override
	public boolean exist(WebAddress webUrl) {
		synchronized(visitedList) {
			return visitedList.contains(webUrl);
		}
	}

	@Override
	public WebAddress get() {
		//这里不应该返回数值
		return null;
	}

	@Override
	public WebUrlCollection add(WebAddress webUrl) {
		synchronized(visitedList) {
		//不管添加成不成功，只要list中有这个网址就算成功
			visitedList.add(webUrl);
			return this;
		}
	}

	@Override
	public WebUrlCollection remove(WebAddress webUrl) {
		synchronized(visitedList) {
			//不会报错
			visitedList.remove(webUrl);
			return this;
		}
	}

	@Override
	public int size() {
		synchronized(visitedList) {
			return visitedList.size();
		}
	}
	@Deprecated
	public HashSet<WebAddress> getFoundList() {
		return visitedList;
	}
	@Deprecated
	public void setFoundList(HashSet<WebAddress> foundList) {
		this.visitedList = foundList;
	}

	public HashSet<WebAddress> getVisitedList() {
		return visitedList;
	}

	public void setVisitedList(HashSet<WebAddress> visitedList) {
		this.visitedList = visitedList;
	}
}
