package cn.wanghaomiao.crawlers;

import java.util.List;

import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;

import cn.wanghaomiao.dao.HBaseDao;
import cn.wanghaomiao.dao.InvitationDao;
import cn.wanghaomiao.model.Invitation;
import cn.wanghaomiao.seimi.annotation.Crawler;
import cn.wanghaomiao.seimi.def.BaseSeimiCrawler;
import cn.wanghaomiao.seimi.struct.Request;
import cn.wanghaomiao.seimi.struct.Response;
import cn.wanghaomiao.xpath.exception.XpathSyntaxErrorException;
import cn.wanghaomiao.xpath.model.JXDocument;

@Crawler(name = "I")
public class InvitationCrawler extends BaseSeimiCrawler {
	private final static String PREFIX = "http://tieba.baidu.com"; // 抓取链接的前缀

	@Override
	public String[] startUrls() {
		// 创建数据库
		HBaseDao.createTable("invitation", new String[] { "inviId",
				"inviTitle", "inviContent", "inviLv", "univName", "inviNum",
				"inviDp", "inviName", "inviClient", "inviTime" });

		String[] values = new String[] {};
		// 从数据库中获取需要爬取的链接
		Cell[] cells = HBaseDao
				.getCell("university", "univ", "info", "univUrl");
		for (int i = 0; i < cells.length; i++) {
			values[i] = new String(CellUtil.cloneValue(cells[i]));
		}
		return values;
	}

	@Override
	public void start(Response response) {
		JXDocument doc = response.document();
		try {
			// 第一页的帖子的链接
			List<Object> urls = doc
					.sel("//div[@class='threadlist_title pull_left j_th_tit ']/a/@href");
			for (Object s : urls) {
				// 判断链接是否存在
				if (StringUtils.isNotBlank(s.toString())) {
					push(Request.build(PREFIX + s.toString(), "invitationBean"));
				} else {
					System.out.println("链接不存在.");
				}
			}
		} catch (XpathSyntaxErrorException e) {
			e.printStackTrace();
		}
	}

	public void invitationBean(Response response) {
		try {
			Invitation inv = response.render(Invitation.class);
			InvitationDao.insert(inv);
		} catch (Exception e) {
			e.printStackTrace();
		}
	}
}
