/*
 * Copyright (C) 2015 hu
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version 2
 * of the License, or (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
 */
package com.wangnian.service;

import cn.edu.hfut.dmic.webcollector.model.CrawlDatums;
import cn.edu.hfut.dmic.webcollector.model.Page;
import cn.edu.hfut.dmic.webcollector.plugin.berkeley.BreadthCrawler;
import cn.edu.hfut.dmic.webcollector.util.HttpUtil;
import com.wangnian.dao.SumTieZiDao;
import com.wangnian.entity.SumTieZi;
import com.wangnian.entity.SumTieZi1;
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.select.Elements;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;

import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.List;

import static com.sun.tools.doclint.Entity.sub;


/**
 * WebCollector 2.x版本的tutorial(version>=2.20) 2.x版本特性：
 * 1）自定义遍历策略，可完成更为复杂的遍历业务，例如分页、AJAX
 * 2）可以为每个URL设置附加信息(MetaData)，利用附加信息可以完成很多复杂业务，例如深度获取、锚文本获取、引用页面获取、POST参数传递、增量更新等。
 * 3）使用插件机制，WebCollector内置两套插件。
 * 4）内置一套基于内存的插件（RamCrawler)，不依赖文件系统或数据库，适合一次性爬取，例如实时爬取搜索引擎。
 * 5）内置一套基于Berkeley DB（BreadthCrawler)的插件：适合处理长期和大量级的任务，并具有断点爬取功能，不会因为宕机、关闭导致数据丢失。
 * 6）集成selenium，可以对javascript生成信息进行抽取
 * 7）可轻松自定义http请求，并内置多代理随机切换功能。 可通过定义http请求实现模拟登录。
 * 8）使用slf4j作为日志门面，可对接多种日志
 * <p>
 * 可在cn.edu.hfut.dmic.webcollector.example包中找到例子(Demo)
 *
 * @author hu
 */
@Component
public class Tieba1Crawler extends BreadthCrawler {
    private static Logger logger = LoggerFactory.getLogger(Tieba1Crawler.class);

    public Tieba1Crawler(String crawlPath, boolean autoParse) {
        super(crawlPath, autoParse);
    }

    public Tieba1Crawler() {
        super("c1", true);
    }

    @Autowired
    public SumTieZiDao sumTieZiDao;

    @Override
    public void visit(Page page, CrawlDatums next) throws Exception {
        loucengid(page);
    }


    /**
     * 获取某帖子的所有楼层
     */
    public void loucengid(Page page) throws IOException {

        Document document = Jsoup.parse(new URL("http://tieba.baidu.com/p/1304432694"), 1000);
        Elements elements2 = document.getElementsByClass("red");
        String sumPage = elements2.get(1).text();//帖子多少页
        Integer sumint = Integer.parseInt(sumPage);
        for (int i = 1; i < sumint + 1; i++) {
            Document document2 = Jsoup.parse(new URL("http://tieba.baidu.com/p/1304432694?pn=" + i), 1000);
            Elements elements = document2.select("a[class=p_author_name j_user_card]");
            Elements elements1 = document2.select("div[class=d_post_content j_d_post_content  clearfix]");
            for (int k = 0; k < elements.size(); i++) {
                String author = elements.get(i).text();
                String context = elements1.get(i).text();
                String pid = elements1.get(i).attr("id");
                System.out.printf(pid.substring(pid.lastIndexOf("_")+1));
                System.out.printf(context);
                SumTieZi1 sumTieZi1=new SumTieZi1();
                sumTieZi1.setTieZiAuthor(author);
                sumTieZi1.setPn(String.valueOf(i));
                sumTieZi1.setTid();
                sumTieZi1.setTieZiPid(pid.substring(pid.lastIndexOf("_")+1));

            }

        }



    }


}
