package com.seaboxdata.scheduler;

import com.seaboxdata.controller.IndicatorsController;
import com.seaboxdata.dao.HdfsDao;
import com.seaboxdata.entity.HiveDataInfo;
import com.seaboxdata.entity.TableConfig;
import com.seaboxdata.service.HiveDataInfoService;
import com.seaboxdata.service.TableConfigService;
import org.apache.commons.collections4.CollectionUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Configuration;
import org.springframework.scheduling.annotation.Scheduled;
import org.springframework.stereotype.Component;

import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;

@Component
@Configuration
public class HiveDataMonitorJob {

    private static Logger log = LoggerFactory.getLogger(HiveDataMonitorJob.class);
    @Autowired
    private TableConfigService tableConfigService;

    @Autowired
    private HiveDataInfoService hiveDataInfoService;

    @Value("${sms.interval}")
    private String invalidIntervalSecond;

    @Value("${isSmsMonitor}")
    private String isSmsMonitor;

    @Autowired
    private HdfsDao hdfsDao;
    /*
     *监控hive表数据的统计,通过读取postgre的配置表,获取要监控的表信息,然后组装SQL去查hive的数据
     * @return {@link {@link null}}
     * @throws
     * @author zyw
     * @date
     *
     **/


    @Scheduled(cron = "${interface.hiveDataMonitor}")
    public void doCollect() {
        if("true".equals(isSmsMonitor)){
            return;
        }
        log.info("=======HiveDataMonitorJob start=======");
        //获取所有要监控的表信息
        List<TableConfig> tableConfigs = tableConfigService.list();
        List<HiveDataInfo> hiveDataInfos = new ArrayList<>();
        if (!CollectionUtils.isEmpty(tableConfigs)) {
            for (TableConfig config : tableConfigs) {
                HiveDataInfo hiveDataInfo = hiveDataInfoService.getDataInfo(config);
                if (hiveDataInfo != null) {
                    hiveDataInfo.setTableId(config.getTableId());
                    hiveDataInfo.setDataDate(new Date());
                    hiveDataInfos.add(hiveDataInfo);
                }
            }
            //遍历设置存储大小
            hdfsDao.setDataInfos(tableConfigs, hiveDataInfos);
        }
        log.info("==base_db.data_stat_info插入数据=={}",hiveDataInfos.size());
        if (!CollectionUtils.isEmpty(hiveDataInfos)) {
            hiveDataInfoService.saveBatch(hiveDataInfos);
        }
        log.info("=======HiveDataMonitorJob end=======");
    }
}
