package jourre;

import cn.hutool.core.collection.CollectionUtil;
import cn.hutool.core.io.FileUtil;
import cn.hutool.db.Db;
import cn.hutool.db.Entity;
import cn.hutool.db.ds.DSFactory;
import lombok.SneakyThrows;
import org.apache.commons.lang3.StringUtils;
import utils.Utils;

import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.stream.IntStream;

/**
 * @Author: gcr
 * @Date: 2022-5-30 15:46
 */
public class JourCacu {

    @SneakyThrows
    public static void main(String[] args) {

        // 模拟期刊推荐的计算


        Db use = Db.use(DSFactory.get("server_local"));
        String db = "pubmed_vip_extend_0530";
        String dbJour = "jour_merge_supp";


        // 通过pmid 查询本地数据 pmid 顺序就是相关性
        // 维度 五个
        // 近两年中国人发文占比趋势 近三年发文占比  检索结果期刊数量 最相关论文期刊影响因子 期刊收录相关性
        List<String> pmids = new LinkedList<>();

        IntStream.rangeClosed(31000001, 31010001).forEach(k -> pmids.add(k + ""));


        // 近两年中国人发文趋势
        Double trendRecentTwoYears = 0.4;
        // 近三年发文占比
        Double percentOfThreeYears = 0.2;
        //检索结果中期刊的数量
        Double resultPercent = 0.2;
        // 最相关论文期刊影响因子
        Double JourIF = 0.1;
        //期刊收录相关性
        Double fuJourRelevant5 = 0.1;

        String join = String.join(",", pmids);

//        String sql = "select pmid,med_nlm_id,ischina,pub_year from " + db + " where pmid in (" + join + " )";
        String sql = "select pmid,med_nlm_id,ischina,pub_year from " + db;
        List<Entity> ls = use.query(sql);


        String sqlJour = "select nc_nlm_id,jcr_if from " + dbJour;
        List<Entity> lsJour = use.query(sqlJour);

        //期刊对应的影响因子
        Map<String, Double> jourAndSocre = new ConcurrentHashMap<>();

        for (Entity entity : lsJour) {
            String nc_nlm_id = entity.getStr("nc_nlm_id");
            Double sc = entity.getDouble("jcr_if");
            if (StringUtils.isNotEmpty(nc_nlm_id) && sc != null) {
                jourAndSocre.put(nc_nlm_id, sc);
            }
        }


        // 构思 根据维度在当前循环尽可能的多的得出数据

        // 今年 可以认为去年
        Integer nowYear = Utils.getNowYear();
//        Integer nowYear = 2020;
        // 去年
        Integer nowyearB1 = nowYear - 1;
        // 去去年
        Integer nowyearB2 = nowYear - 2;

        //近两年
        Set<Integer> nearTwoYears = new HashSet<>();
        nearTwoYears.add(nowYear);
        nearTwoYears.add(nowyearB1);

        //近三年
        Set<Integer> nearThreeYears = new HashSet<>();
        nearThreeYears.add(nowYear);
        nearThreeYears.add(nowyearB1);
        nearThreeYears.add(nowyearB2);

        // 存储期刊最后得分
        Map<String, Double> journalFinalScore = new HashMap<>();

        // 年份-期刊-对应的文章集合 只含有中文的才可以 用于近两年发文趋势
        Map<Integer, Map<String, List<Integer>>> yearJourAndAidChina = new ConcurrentHashMap<>();
        // 期刊-是不是国人-对应的数量   用于近三年发文占比用
        Map<String, Map<Integer, Integer>> recentThreeYearAid = new ConcurrentHashMap<>();
        // 近三年期刊全部发文数量 期刊--发文数量
        Map<String, Integer> recentThreeYearAidAllSize = new ConcurrentHashMap<>();
        // 期刊对应的文章集合 全部
        Map<String, List<Integer>> jourAndPmidList = new ConcurrentHashMap<>();


        for (Entity l : ls) {
            Integer pub_year = l.getInt("pub_year");
            Integer pmid = l.getInt("pmid");
            String NLMID = l.getStr("med_nlm_id") == null ? "" : l.getStr("med_nlm_id");
            Integer Ischia = l.getInt("ischina");

            if (pub_year != null && StringUtils.isNotEmpty(NLMID) && Ischia == 1) {
                yearJourAndAidChina.compute(pub_year, (k, v) -> {
                    if (v == null) {
                        v = new ConcurrentHashMap<>();
                        v.compute(NLMID, (k1, v1) -> {
                            if (v1 == null) {
                                v1 = Collections.synchronizedList(new ArrayList<>());
                                v1.add(pmid);
                                return v1;
                            } else {
                                v1.add(pmid);
                                return v1;
                            }

                        });
                        return v;
                    } else {
                        v.compute(NLMID, (k1, v1) -> {
                            if (v1 == null) {
                                v1 = Collections.synchronizedList(new ArrayList<>());
                                v1.add(pmid);
                                return v1;
                            } else {
                                v1.add(pmid);
                                return v1;
                            }
                        });
                        return v;
                    }
                });
            }

            // 存放期刊对应的文章集合
            if (StringUtils.isNotEmpty(NLMID)) {
                jourAndPmidList.compute(NLMID, (k, v) -> {
                    if (v == null) {
                        v = Collections.synchronizedList(new ArrayList<>());
                        v.add(pmid);
                    } else {
                        v.add(pmid);
                    }
                    return v;
                });
            }

            if (StringUtils.isNotEmpty(NLMID) && pub_year != null) {
                if (nearThreeYears.contains(pub_year)) {
                    recentThreeYearAidAllSize.compute(NLMID, (k, v) -> {
                        if (v == null) {
                            v = 1;
                            return v;
                        } else {
                            v = v + 1;
                            return v;
                        }
                    });
                    // 期刊-是不是国人-对应的数量   用于近三年发文占比用  Map<String, Map<Integer, Integer>>
                    recentThreeYearAid.compute(NLMID, (k, v) -> {
                        if (v == null) {
                            v = new ConcurrentHashMap<>();
                            v.compute(Ischia, (k1, v1) -> {
                                if (v1 == null) {
                                    v1 = 1;
                                    return v1;
                                } else {
                                    v1 = v1 + 1;
                                    return v1;
                                }
                            });
                            return v;
                        } else {
                            v.compute(Ischia, (k1, v1) -> {
                                if (v1 == null) {
                                    v1 = 1;
                                    return v1;
                                } else {
                                    v1 = v1 + 1;
                                    return v1;
                                }
                            });
                            return v;
                        }
                    });
                }
            }

        }

        // 开始计算

        Map<String, List<Integer>> near1List = yearJourAndAidChina.get(nowYear);

        // 近第二年
        Map<String, List<Integer>> near2List = yearJourAndAidChina.get(nowyearB1);

        Map<String, Double> jourScore1 = getJourScore(near1List, near2List);
        // 进行得分计算
        Map<String, Double> 近两年中国人发文占比趋势结果Map = JourUtils.computeMapWithMaxValue(jourScore1, trendRecentTwoYears);

        // 1 近两年 结束

        // 2.近三年发文占比 开始
        // 各个期刊对应的中国发文占比
        Map<String, Double> chinaAidPercent = new ConcurrentHashMap<>();
        recentThreeYearAid.forEach((k, v) -> {
            int china = 0;
            if (v.containsKey(1)) {
                china = v.get(1);
                // 只看中文的 所以这个计算在里面
                chinaAidPercent.put(k, JourUtils.div(china, recentThreeYearAidAllSize.get(k)));
            }
        });
        Map<String, Double> 近三年发文占比结果Map = JourUtils.computeMapWithMaxValue(chinaAidPercent, percentOfThreeYears);
        //2.近三年发文占比 结束
        //3.检索结果中期刊的数量 开始
        // 存储各个期刊的的发文数量
        Map<String, Double> jourAidSizeDouble = new ConcurrentHashMap<>();
        Map<String, Double> allJourAllDouble = new ConcurrentHashMap<>();

        jourAndPmidList.forEach((k, v) -> {
            if (jourAndSocre.containsKey(k)) {
                allJourAllDouble.put(k, jourAndSocre.get(k));
            }
            jourAidSizeDouble.put(k, Double.valueOf(v.size()));
        });

        Map<String, Double> 检索结果期刊数量结果Map = JourUtils.computeMapWithMaxValue(jourAidSizeDouble, resultPercent);


        //3.检索结果中期刊的数量 结束
        //4.最相关论文期刊影响因子 开始
        // 思路拿前五的pmid 对应的期刊的影响因子平均分 与剩下计算相关度
        // 取前五文章对应期刊得分平均值
        Double allSocre = 0.0;
        int s = 0;
        for (int i = 0; i < 5; i++) {
            Entity entity = ls.get(i);
            String NLMID = entity.getStr("med_nlm_id") == null ? "" : entity.getStr("med_nlm_id");
            if (StringUtils.isNotEmpty(NLMID)) {
                Double aDouble = jourAndSocre.get(NLMID);
                if (aDouble != null) {
                    s++;
                    allSocre += aDouble;
                }
            }
        }
        // 平均值
        double avgIF = JourUtils.div(allSocre, s);
        // 最大值
        Double mapMaxValue = JourUtils.getMapMaxValue(allJourAllDouble);
        // 最小值
        Double mapMinValue = JourUtils.getMapMinValue(allJourAllDouble);

        Map<String, Double> 最相关论文期刊影响因子结果Map = new ConcurrentHashMap<>();
        allJourAllDouble.forEach((k, v) -> {
            double v1 = JourUtils.forceUnifyScore(mapMaxValue, mapMinValue, JourIF, Math.abs(v - avgIF), 0.01);
            最相关论文期刊影响因子结果Map.put(k, v1);
        });


        // 4.最相关论文期刊影响因子 结束

        // 5.期刊收录相关性 开始
        // 当前结果期刊对应的文章数量除以pm全表数量 得分最高的是满分
        //  首先跑一边全表 表是 nlmid_count_gcr
        List<Entity> query = use.query("select * from nlmid_count_gcr");
        Map<String, Integer> jourAllAidSize = new ConcurrentHashMap<>();
        query.forEach(entity -> {
            jourAllAidSize.put(entity.getStr("nlmid"), entity.getInt("all_count"));
        });

        //期刊占比百分比
        Map<String, Double> jourAidSizePercentDouble = new ConcurrentHashMap<>();

        jourAndPmidList.forEach((k, v) -> {

            if (jourAllAidSize.containsKey(k)) {
                double div = JourUtils.div(v.size(), jourAllAidSize.get(k));
                jourAidSizePercentDouble.put(k, div);
            }
        });
        Map<String, Double> 期刊收录相关性结果Map = JourUtils.computeMapWithMaxValue(jourAidSizePercentDouble, fuJourRelevant5);
        期刊收录相关性结果Map.forEach((k, v) -> {
            FileUtil.appendString(k + "___" + v + "\n", "/usr/local/gcr/期刊收录相关性结果Map.log", "UTF-8");
        });
        // 5.期刊收录相关性 结束
        //进行累加得分

        // 近两年中国人发文占比趋势 近三年发文占比  检索结果期刊数量 最相关论文期刊影响因子 期刊收录相关性
        Set<String> strings = jourAndPmidList.keySet();
        for (String string : strings) {
            addScore(近两年中国人发文占比趋势结果Map, journalFinalScore, string);
            addScore(近三年发文占比结果Map, journalFinalScore, string);
            addScore(检索结果期刊数量结果Map, journalFinalScore, string);
            addScore(最相关论文期刊影响因子结果Map, journalFinalScore, string);
            addScore(期刊收录相关性结果Map, journalFinalScore, string);
        }
        Map<String, Double> stringDoubleMap = JourUtils.mapSortValue(journalFinalScore);
        stringDoubleMap.forEach((k, v) -> {
            FileUtil.appendString(k + "___" + v + "\n", "/usr/local/gcr/journalFinalScore.log", "UTF-8");
        });


        //最相关期刊论文影响
        System.out.println("ssss");


    }


    public static void addScore(Map<String, Double> sorce, Map<String, Double> to, String jour) {
        if (sorce.containsKey(jour)) {
            if (to.containsKey(jour)) {
                to.put(jour, JourUtils.add(to.get(jour), sorce.get(jour)));
            } else {
                to.put(jour, JourUtils.add(0.0, sorce.get(jour)));
            }
        }


    }

    /**
     * 近两年趋势得分计算
     *
     * @param near1List 倒数第一年
     * @param near2List 倒数第二年
     * @return
     */
    public static Map<String, Double> getJourScore(Map<String, List<Integer>> near1List, Map<String, List<Integer>> near2List) {
        Map<String, Double> res = new LinkedHashMap<>();
        if (near1List == null) {
            near1List = new HashMap<>();
        }
        if (near2List == null) {
            near2List = new HashMap<>();
        }
        Set<String> set1 = near1List.keySet();
        Set<String> set2 = near2List.keySet();
        // 先取交集 都存在的计算 剩下单独计算
        Collection<String> intersection = CollectionUtil.intersection(set1, set2);
        for (String s : intersection) {
            List<Integer> integers1 = near1List.get(s);
            List<Integer> integers2 = near2List.get(s);
            double v = JourUtils.divIntScore(integers2.size(), integers1.size());
            res.put(s, v);
        }
        Map<String, List<Integer>> finalNear2List = near2List;
        near1List.forEach((k, v) -> {
            if (!intersection.contains(k)) {
                if (finalNear2List.containsKey(k)) {
                    double v2 = JourUtils.divIntScore(finalNear2List.get(k).size(), v.size());
                    res.put(k, v2);
                } else {
                    double v2 = JourUtils.divIntScore(0, v.size());
                    res.put(k, v2);
                }
            }
        });
        Map<String, List<Integer>> finalNear1List = near1List;
        Map<String, List<Integer>> finalNear2List1 = near2List;
        near2List.forEach((k, v) -> {
            if (!intersection.contains(k)) {
                if (finalNear1List.containsKey(k)) {
                    double v2 = JourUtils.divIntScore(finalNear2List1.get(k).size(), v.size());
                    res.put(k, v2);
                } else {
                    double v2 = JourUtils.divIntScore(finalNear2List1.get(k).size(), 0);
                    res.put(k, v2);
                }
            }
        });


        return res;
    }
}
