package com.weibo.main;

import cn.edu.hfut.dmic.webcollector.model.CrawlDatum;
import cn.edu.hfut.dmic.webcollector.model.CrawlDatums;
import cn.edu.hfut.dmic.webcollector.model.Page;
import cn.edu.hfut.dmic.webcollector.net.HttpRequest;
import cn.edu.hfut.dmic.webcollector.net.HttpResponse;
import cn.edu.hfut.dmic.webcollector.plugin.berkeley.BreadthCrawler;
import com.weibo.berkeleydb.BDBUtil;
import com.weibo.berkeleydb.Blog;
import com.weibo.berkeleydb.User;
import com.weibo.utils.WeiboCN;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;

import java.net.URLEncoder;

/**
 * 利用WebCollector和获取的cookie爬取新浪微博并抽取数据
 * @author zhangzhen
 */
public class WeiboCrawler extends BreadthCrawler {

    BDBUtil<String,User> user_db = new BDBUtil<String, User>("user");
    BDBUtil<String,Blog> blog_db = new BDBUtil<String, Blog>("blog");
    String cookie;

	public WeiboCrawler(String crawlPath, boolean autoParse) throws Exception {
        super(crawlPath, autoParse);
        /**
         * 获取新浪微博的cookie，账号密码以明文形式传输，请使用小号
         * 针对微博：weibo.cn
         * 目前登录有问题,暂时不使用该功能
         * */
        cookie = WeiboCN.getSinaCookie("xxx", "yyyy");//设置成自己的微博用户和密码

    }

    /**
     * @param crawlDatum
     * 封装http请求
     * */
    @Override
    public HttpResponse getResponse(CrawlDatum crawlDatum) throws Exception {
        HttpRequest request = new HttpRequest(crawlDatum);

        String method = crawlDatum.meta("method");//请求方法
        if (method!=null){
            request.setMethod(method);
            if (method.equals("POST")||method.equals("post")){//初次查找

                String cache = crawlDatum.meta("Cache");//请求头
                if (cache!=null){
                    request.setHeader("Cache-Control",cache);
                }
                String key=crawlDatum.meta("keyword");//请求参数
                if(key!=null){
                    request.setOutputData(("keyword="+URLEncoder.encode(key,"utf-8")+"&smblog="+URLEncoder.encode("搜微博","utf-8")).getBytes("utf-8"));
                }
            }
        }

        request.setCookie(cookie);
        return request.getResponse();
    }

    //新浪首页有js生成的
    public void visit(Page page, CrawlDatums next) {
        /*抽取微博*/
        int pageNum = Integer.valueOf(page.meta("pageNum"));
        /*抽取微博*/
        Elements weibos = page.select("div[class=c][id]");
        for (Element weibo : weibos) {
            //获取微博用户
            Element user = weibo.select("a[class=nk][href]").first();
            String url = user.attr("href");
            String nick = user.text();

            User u = new User(nick,url);//用户实体
            User tag = (User) user_db.get(url);
            if (tag==null){
                user_db.put(url,u,true);//如果不存在该用户,则需要进行添加
            }

            //获取微博内容
            String wid = weibo.attr("id");//获取微博id
            String text = weibo.select("span[class=ctt]").text();
            if (text.startsWith(":"))
                text = text.substring(1);//去掉第一个":"字符

            Blog blog = new Blog(wid,url,text);//保存微博id,用户url,微博内容
            Blog tBlog = (Blog) blog_db.get(wid);
            if (tBlog==null){
                blog_db.put(wid,blog,true);//添加新Blog
            }
            System.out.println("第" + pageNum + "页\t" + nick+":"+ text);//可以进行输出控制
        }

    }

    public static void main(String[] args) throws Exception {
        WeiboCrawler crawler = new WeiboCrawler("weibo", false);
        crawler.setThreads(3);
        String[] keys={"学习","小学生","数学"};//自定义 小学生教育相关的主题
        crawler.setThreads(1);//对应几个主题词,设置相应的线程,根据机器环境而定
        //TODO 根据关键词爬去用户
        for (String key:keys){//根据关键词查询微博

            crawler.addSeed(new CrawlDatum("http://weibo.cn/search/")
                    .meta("method", "POST")
                    .meta("pageNum", "1")
                    .meta("Cache","max-age=0")
                    .meta("keyword",key));

            for (int i=2;i<1001;i++){
                crawler.addSeed(new CrawlDatum("http://weibo.cn/search/mblog?hideSearchFrame=&keyword="+URLEncoder.encode(key,"utf-8")+"&page="+i).meta("pageNum", i+""));
                break;//非测试可以放开
            }
            break;//非测试可以放开
        }
        /*对某人微博前5页进行爬取
        for (int i = 1; i <= 5; i++) {
            crawler.addSeed(new CrawlDatum("http://weibo.cn/zhouhongyi?vt=4&page=" + i).meta("pageNum", i + ""));
        }*/
        //crawler.addSeed(new CrawlDatum("http://weibo.cn/").meta("pageNum", 1+ ""));
        crawler.start(1);
    }

}