package com.nlp.visualization.crawler.pipeline;

import com.nlp.visualization.common.CONSTANTS;
import com.nlp.visualization.core.sentence.SentenceType;
import com.nlp.visualization.pojo.NLP.seg.SegmentEntity;
import com.nlp.visualization.pojo.NLP.sen.SentenceEntity;
import com.nlp.visualization.service.IDataSentenceService;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import us.codecraft.webmagic.ResultItems;
import us.codecraft.webmagic.Task;
import us.codecraft.webmagic.pipeline.Pipeline;

import java.util.LinkedList;
import java.util.List;

/**
 * @author LXM
 * @Title: ${file_name}
 * @Description: 处理爬虫句法分析的任务类
 * @date 2018/3/13下午10:18
 */
@Service
public class SentenceTask implements Pipeline {


    @Autowired
    IDataSentenceService dataSentenceService;

    SentenceEntity entity;

    Logger logger = LoggerFactory.getLogger(SentenceTask.class);

    public SentenceEntity getEntity() {
        return entity;
    }

    public void setEntity(SentenceEntity entity) {
        this.entity = entity;
    }

    @Override
    public void process(ResultItems resultItems, Task task) {
        try {
            String title = resultItems.get("title").toString();
            String content = resultItems.get("content").toString();
            SentenceEntity entity = dataSentenceService.parseSingle(content, SentenceType.HANLP_CRF);
            setEntity(entity);
        } catch (Exception e) {
            logger.error("爬虫抽取提取字段异常");
            e.printStackTrace();
        }
    }
}
