package com.dkt.blogboot.util;

import com.dkt.blogboot.entity.Article;
import com.dkt.blogboot.mapper.ArticleMapper;
import org.apache.ibatis.session.SqlSession;
import org.apache.ibatis.session.SqlSessionFactory;
import org.quartz.Job;
import org.quartz.JobExecutionContext;
import org.quartz.JobExecutionException;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.jdbc.core.JdbcTemplate;
import org.springframework.stereotype.Component;

import javax.sql.DataSource;
import java.time.LocalDateTime;
import java.util.List;

@Component
public class BlogScanJob implements Job {
    @Autowired
    private SqlSessionFactory sqlSessionFactory;
    private DataSource dataSourceCold;//冷数据库的数据源
    List<Article> articles = null;//保存所有要迁移的数据（根据LastProcessTime和status）
    List<Article> articlesColdFlag = null;//保存所有要迁移的数据（根据coldFlag）
    ArticleMapper articlesMapperHot = null;//热数据库的操作

    int batchSize = 3;//每次"批量处理"的记录数
    int offset = 0;//当前批次的起始位置，随着处理批次的递增而增加


    @Override
    public void execute(JobExecutionContext jobExecutionContext) throws JobExecutionException {
        SqlSession sqlSession = null;
        try {
            //获取mybatis的SqlSession对象
            sqlSession = sqlSessionFactory.openSession();
            //获取并执行TicketMapper00对象的selectTicketsToMove方法找到要迁移的数据
            articlesMapperHot = sqlSession.getMapper(ArticleMapper.class);
            LocalDateTime twoMonthAgo = LocalDateTime.now().minusMonths(2);
            articles = articlesMapperHot.selectArticlesToMove(twoMonthAgo);
            System.out.println("要迁移的数据量为："+articles.size()+"条");
            for (Article article : articles) {
                articlesMapperHot.updateStatus(twoMonthAgo);
                articlesMapperHot.updateColdFlag(article.getId());
            }
            articlesColdFlag = articlesMapperHot.selectArticlesByColdFlag();
            do {
                System.out.println("开启本批次的数据迁移！offset=:"+offset);
                List<Article> batchToMove = articlesColdFlag.subList(offset, Math.min(offset + batchSize, articlesColdFlag.size()));
                processBatch(batchToMove);
                offset += batchSize;
            } while (offset < articlesColdFlag.size());
        }catch (Exception e){
            throw new JobExecutionException(e);
        }finally {
            if(sqlSession!=null){
                sqlSession.close();
            }
        }
    }

    private void processBatch(List<Article> batchToMove) {
        // 处理每批次记录的逻辑
        for (Article articleToMove : batchToMove) {
            // 处理逻辑
            System.out.println("开始迁移"+articleToMove+"数据");
            //获取冷数据库的jdbcTemplate
            JdbcTemplate coldJdbcTemplate = new JdbcTemplate(ColdDataSourceConfig.dataSourceCold());
            //获取要迁移的信息，组合成一个object对象，插入冷数据库的ticket11中
            //加入了ON DUPLICATE KEY UPDATE ,实现了”幂等性“：冷数据库中若已存在该数据，再加入一遍也不报错(为了一致性)
            Object[] objToMove = new Object[]{articleToMove.getId(),articleToMove.getTitle(),articleToMove.getContent(),articleToMove.getDate(),articleToMove.getView(),articleToMove.getPraise(),articleToMove.getStatus(),
                    articleToMove.getTitle(),articleToMove.getContent(),articleToMove.getDate(),articleToMove.getView(),articleToMove.getPraise(),articleToMove.getStatus()};
            coldJdbcTemplate.update(
                    "INSERT INTO article_cold(id,title,content,date,view,praise,status) VALUES (?,?,?,?,?,?,?) " +
                            "ON DUPLICATE KEY UPDATE title=?,content=?,date=?,view=?,praise=?,status=?",objToMove);
            System.out.println("ararar"+articleToMove);
        }
    }
}
