package net.trustie.one;

import core.ModelPipeline;
import core.ModelPipeline4Update;
import core.PageModelPipeline;
import core.PageModelPipeline4Update;
import extension.RawPage;
import net.trustie.dao.RecordDao;
import net.trustie.downloader.DataBasePageErrorOutPut;
import net.trustie.downloader.PageDao;
import net.trustie.utils.Constant;
import net.trustie.utils.DateHandler;
import org.springframework.beans.BeansException;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.context.annotation.Scope;
import org.springframework.stereotype.Component;

import javax.annotation.Resource;
import java.util.*;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;

/**
 * Created by LouAnt on 2017/2/13 0013
 * Description：Openhub 社区项目Homepage字段重新抽取
 */
@Component("ReExtractor")
public class OpenhubReExtractor {
    /** 抽取状态：indicate if a site is being extracted ，状态为true时表示正在抽取中无需重复抽取*/
    public static Map<String,Boolean> extractState = new HashMap<String,Boolean>();
    /** 线程池，用以装载抽取线程 参数：20为最大线程数 */
    private ExecutorService pool = Executors.newFixedThreadPool(Constant.POOL_MAX_SIZE);

    public static void main(String[] args){
        ((OpenhubReExtractor) AppContext.appContext.getBean("ReExtractor")).start();
    }

    public void start(){
        while(true){
            ReExtractThread reExtractThread = (ReExtractThread)AppContext.appContext.getBean("reExtractThread");
            pool.execute(reExtractThread);
        }
    }
}

/**
 * 重新抽取的主线程
 */
@Component("reExtractThread")
@Scope("prototype")
class ReExtractThread implements  Runnable{
    /** 待抽取的站点名称 */
    private String site;
    /** 待抽取结果条数限制 */
    private static final int BatchExtractSize = Constant.BATCH_EXTRACT_SIZE;
    /** 模板名称列表 */
    public List<String> modelName = new ArrayList<String>();
    /** 动态生成站点模板 */
    private Class pageModel;
    @Resource
    private PageDao pagedao;

    /** 输出错误页面 */
    @Qualifier("errorPageToDB")
    @Autowired
    private DataBasePageErrorOutPut pageErrorOutPut;

    /** records表操作接口实例化 */
    @Resource
    private RecordDao recordDao;

    public void run(){
        /**站点名称*/
        String site = "openhub";
        Thread.currentThread().setName(site + "reextractor_thread");
        this.pageErrorOutPut.setTableName(site+"_error_page");

        try {
            pageModel = Class.forName("net.trustie.model."+site+"_Model");

            modelName.add(pageModel.getCanonicalName());

            int lastId = getLastId(site);

            List<RawPage> pages = getPages(site,lastId);

            Extractor extractor = new Extractor();
            RawPage rawPage = null;

            while(pages.size() > 0){
                for(RawPage page : pages){
                    try {
                        rawPage = extractor.extract(page, pageModel);
                        saveResult(site, rawPage);
                    }catch (Exception e){
                        e.printStackTrace();
//                                                pageErrorOutPut.returnErrorPage(page, e); //错误页面
                    }
                }
                //更新抽取游标
                updateLastId(site,lastId + pages.size());
                lastId = getLastId(site);
                pages = getPages(site,lastId);
            }
            Application.extractState.put(site, false);
            System.out.println(Thread.currentThread().getName() + ": " +
                    "extract over, id=" + lastId + " @ " + DateHandler.getExtractTime());
        }  catch (ClassNotFoundException e) {
            e.printStackTrace();
        } catch(Exception e){
            System.out.println(e.getMessage());
            e.printStackTrace();
            System.out.println(Thread.currentThread().getName() + ": " +" 发生异常 ");
        }
    }

    private void saveResult(String site,RawPage rawPage){
        ModelPipeline4Update pl = new ModelPipeline4Update();
        try {
            pl.put(pageModel, (PageModelPipeline4Update) AppContext.appContext.getBean(site + "_pipeline_4update"));
        }catch (BeansException e){
            e.printStackTrace();
        }

        if(!rawPage.getPage().getResultItems().isSkip()){
            pl.processUpdate(rawPage.getPage().getResultItems(),rawPage.getUrl(),null);
        }
    }

    /**
     * 获取站点site_html_detail表中，id大于lastId的 BatchExtractSize 条记录
     * @param site
     * @param lastId
     * @return
     */
    private List<RawPage> getPages(String site, int lastId) {
        LinkedList<RawPage> pages = new LinkedList<RawPage>();
        pages = pagedao.getDetailPages(site + "_html_detail",lastId,BatchExtractSize);
        return pages;
    }

    /**
     * 获取站点site的最后id
     * @param site
     * @return
     */
    private int getLastId(String site) {
        /** 从records表中，获取最新的抽取记录的id */
        Integer lastRecord = recordDao.getLastRecord(site+"_reextractor");
        if(lastRecord == null){//最新抽取记录为空，则新增
            //从对应站点HTML详情表site_html_detail中，获取最新的id
            lastRecord = pagedao.getMinId(site + "_html_detail") - 1;
            //最新id插入records表
            recordDao.insertRecord(site+"_reextractor",lastRecord);
        }
        return lastRecord;
    }

    private void updateLastId(String site, int currentId) {
        recordDao.updateRecord(site+"_reextractor",currentId);
    }
}
