package com.qf.bigdata.repository;


import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.*;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.IOException;
import java.util.HashMap;
import java.util.Map;

/**
 *  QF
 *  实现HBase 表查询操作
 *  2020/1/27 精简代码实现：合并HBaseAutoConfig HBaseProperties HBaseQuery HBaseTemplate为一个代码。只需在DataSourceConfig进行HBaseTemplate的构造注入即可
 */
public class HBaseTemplate {
    private static final Logger LOGGER = LoggerFactory.getLogger(HBaseTemplate.class);
    private static final String HBASE_QUORUM = "hbase.zookeeper.quorum";
    private static final String HBASE_ROOTDIR = "hbase.rootdir";
    private static final String HBASE_ZNODE_PARENT = "zookeeper.znode.parent";
    private static final String FEATURE_TABLE_NAME = "recommend_bk201:union-feature";

    private Configuration configuration;
    private volatile Connection connection; //每次读取volatile变量,都应该从主存读取,而不是从CPU缓存读取;每次写入一个volatile变量，应该写到主存中，而不是仅仅写到CPU缓存

    public HBaseTemplate(String quorum,String rootDir,String zNodeParent) {

        Configuration configuration = HBaseConfiguration.create();
        configuration.set(HBASE_QUORUM, quorum);
        configuration.set(HBASE_ROOTDIR, rootDir);
        configuration.set(HBASE_ZNODE_PARENT, zNodeParent);
        this.setConfiguration(configuration);
    }

    public Configuration getConfiguration() {
        return configuration;
    }

    public void setConfiguration(Configuration configuration) {
        configuration.set("hbase.client.operation.timeout","10000");
        this.configuration = configuration;
    }

    public Connection getConnection() {
        if (null == this.connection) {
            synchronized (this) {
                if (null == this.connection) {
                    try {
                        this.connection = ConnectionFactory.createConnection(configuration);
                        LOGGER.info("hbase connection:"+ this.connection);
                    } catch (IOException e) {
                        LOGGER.error("hbase connection创建失败");
                    }
                }
            }
        }
        return this.connection;
    }

    //从Hbase mode-feature 取出所有的列column-value 转换成map=> key->column value->value
    public Map<String,String> parseFeatures(String uid) {
        Map<String,String> map = new HashMap<String,String>();
        try {
            Get get = new Get(uid.getBytes());
            Table htable = this.getConnection().getTable(TableName.valueOf(FEATURE_TABLE_NAME));
            Result rs = htable.get(get);
            if(rs.isEmpty()) {
                LOGGER.error("user not exist" + ":" + uid);
                return null;
            }else {
                Cell[] cells = rs.rawCells();
                for(Cell cell : cells ) {
                    String qualifier = new String(CellUtil.cloneQualifier(cell));
                    String value = new String(CellUtil.cloneValue(cell),"UTF-8");
                    if(!"".equals(value)) {
                        map.put(qualifier, value);
                    }
                }
            }
        } catch (IOException e) {
            e.printStackTrace();
        }
        return map;
    }

    // 将从hbase取出的特征数据，转换为每个物品列表及其对应的相关向量
    // 注意这里将所有的als和itemcf算法产生的结果都取出来了，实际情况下我们应该每个算法里面取topk,然后
    // 对topk的数据排序，同时取过的数据，下次就不能再使用了，并且每个算法取的数据比例是可控的，这个逻辑
    // 需要大家自己实现

    /**
     *
     * @param uid
     * @return  Map<aid,features>
     */
    public Map<String,String> transItemFeatureList(String uid) {
        //<row,col>
        Map<String,String> parseFeatures = parseFeatures(uid);
        if (parseFeatures==null){
            return null;
        }
        Map<String,String> itemFeatureMap = new HashMap<String,String>();
        String userVector = parseFeatures.get("uf");
        String als = parseFeatures.get("als");
        String itemcf = parseFeatures.get("itemcf");

        if (itemcf==null|| userVector==null|| als==null){
            LOGGER.error("user info [uf,als,itemcf] some features is null");
            return null;
        }

        String[]  alsItemArray = als.split(";");
        for (String alsItem :alsItemArray){
            String[] alsInfo = alsItem.split(":");
            String alsItemID = alsInfo[0];
            String alsItemVector = alsInfo[2];
            String alsItemEmbedding = alsInfo[3];
            String unionFeature = StringUtils.strip(userVector,"[]")+","+
                    StringUtils.strip(alsItemVector,"[]")+","+
                    StringUtils.strip(alsItemEmbedding,"[]");
            itemFeatureMap.put(alsItemID,unionFeature);
        }

        String[]  itemcfArray = itemcf.split(";");
        for (String itemcfItem :itemcfArray){
            String[] itemcfInfo = itemcfItem.split(":");
            String itemcfItemID = itemcfInfo[0];
            String itemcfItemVector = itemcfInfo[2];
            String itemcfItemEmbedding = itemcfInfo[3];
            String unionFeature = StringUtils.strip(userVector,"[]")+","+
                    StringUtils.strip(itemcfItemVector,"[]")+","+
                    StringUtils.strip(itemcfItemEmbedding,"[]");
            itemFeatureMap.put(itemcfItemID,unionFeature);
        }

        return itemFeatureMap;

    }


    public static void main(String[] args) {
        LRModelPredict lrModelPredict = new LRModelPredict();
        lrModelPredict.init();
        HBaseTemplate hBaseTemplate = new HBaseTemplate("hadoop00", "hdfs://hadoop00:8020/hbase", "/hbase");
        hBaseTemplate.transItemFeatureList("1647").forEach((k,v)->{
            System.out.println(k+"|"+lrModelPredict.predictProbability(v));

        });

    }

}

