package com.shujia.datahub;

import com.aliyun.datahub.client.DatahubClient;
import com.aliyun.datahub.client.DatahubClientBuilder;
import com.aliyun.datahub.client.auth.AliyunAccount;
import com.aliyun.datahub.client.common.DatahubConfig;
import com.aliyun.datahub.client.exception.*;
import com.aliyun.datahub.client.http.HttpConfig;
import com.aliyun.datahub.client.model.*;

import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;

public class DatahubComsumer {
    //点位消费示例，并在消费过程中进行点位的提交
    public static void example() {
        String projectName = "sentiment";
        String topic = "test1";
        String subId = "1617691113893TNKAB";

        // Endpoint以Region: 华东1为例，其他Region请按实际情况填写
        String endpoint = "https://dh-cn-shanghai.aliyuncs.com";
        String accessId = "LTAIvXl3QTWkqvYc";
        String accessKey = "LUkTK7tY2meir3XuPfSa8RKmbuLQGU";

        // 创建DataHubClient实例
        DatahubClient datahubClient = DatahubClientBuilder.newBuilder()
                .setDatahubConfig(
                        new DatahubConfig(endpoint,
                                // 是否开启二进制传输，服务端2.12版本开始支持
                                new AliyunAccount(accessId, accessKey), true))
                //专有云使用出错尝试将参数设置为           false
                // HttpConfig可不设置，不设置时采用默认值
                .setHttpConfig(new HttpConfig()
                        .setCompressType(HttpConfig.CompressType.LZ4) // 读写数据推荐打开网络传输 LZ4压缩
                        .setConnTimeout(10000))
                .build();
        // 相当于kafka中的Partition
        String shardId = "0";

        List<String> shardIds = Arrays.asList("0", "1", "2");

        // 创建SubscriptionSession
        OpenSubscriptionSessionResult openSubscriptionSessionResult = datahubClient.openSubscriptionSession(projectName, topic, subId, shardIds);

        // 根据shardId 获取 当前shard分片消费的offset
        SubscriptionOffset subscriptionOffset = openSubscriptionSessionResult.getOffsets().get(shardId);

        // cursor 表示你从哪个位置开始消费
        // 1、获取当前点位的cursor，如果当前点位已过期则获取生命周期内第一条record的cursor，未消费同样获取生命周期内第一条record的cursor
        String cursor = null;
        // sequence < 0说明未消费
        if (subscriptionOffset.getSequence() < 0) {
            // 获取生命周期内第一条record的cursor
            cursor = datahubClient.getCursor(projectName, topic, shardId, CursorType.OLDEST).getCursor();
        } else {
            // 获取下一条记录的Cursor
            long nextSequence = subscriptionOffset.getSequence() + 1;
            try {
                //按照SEQUENCE getCursor可能报SeekOutOfRange错误，表示当前cursor的数据已过期
                cursor = datahubClient.getCursor(projectName, topic, shardId, CursorType.SEQUENCE, nextSequence).getCursor();
            } catch (SeekOutOfRangeException e) {
                // 获取生命周期内第一条record的cursor
                cursor = datahubClient.getCursor(projectName, topic, shardId, CursorType.OLDEST).getCursor();
            }
        }

        // 点位实际上就是offset
        // 2、读取并保存点位，这里以读取Tuple数据为例，并且每1000条记录保存一次点位
        long recordCount = 0L;

        RecordSchema recordSchema = datahubClient.getTopic(projectName, topic).getRecordSchema();
        // 每次读取10条record
        int fetchNum = 10;

        for (String shard : shardIds) {
            // 多线程

        }
        // 消费某个特定shard分片的数据
        while (true) {
            try {
                GetRecordsResult getRecordsResult = datahubClient.getRecords(projectName, topic, shardId, recordSchema, cursor, fetchNum);
                if (getRecordsResult.getRecordCount() <= 0) {
                    // 无数据，sleep后读取
                    Thread.sleep(1000);
                    continue;
                }
                for (RecordEntry recordEntry : getRecordsResult.getRecords()) {

                    //消费数据
                    TupleRecordData data = (TupleRecordData) recordEntry.getRecordData();

                    System.out.println("field1:" + data.getField("id") + "\t"
                            + "field2:" + data.getField("name") + "field3:" + data.getField("age"));

                    // 使用数据库连接池写数据

                    // 处理数据完成后，设置点位

                    ++recordCount;

                    subscriptionOffset.setSequence(recordEntry.getSequence());
                    subscriptionOffset.setTimestamp(recordEntry.getSystemTime());

                    if (recordCount % 1000 == 0) {
                        // 向Datahub提交点位（消费的offset）
                        Map<String, SubscriptionOffset> offsetMap = new HashMap<>();
                        offsetMap.put(shardId, subscriptionOffset);
                        datahubClient.commitSubscriptionOffset(projectName, topic, subId, offsetMap);
                        // 避免recordCount无限增大
                        recordCount = recordCount - 1000;
                        System.out.println("commit offset successful");
                    }
                }
                cursor = getRecordsResult.getNextCursor();
            } catch (SubscriptionOfflineException | SubscriptionSessionInvalidException e) {
                // 退出. Offline: 订阅下线; SubscriptionSessionInvalid: 表示订阅被其他客户端同时消费
                break;
            } catch (SubscriptionOffsetResetException e) {
                // 表示点位被重置，重新获取SubscriptionOffset信息，这里以Sequence重置为例
                // 如果以Timestamp重置，需要通过CursorType.SYSTEM_TIME获取cursor
                subscriptionOffset = datahubClient.getSubscriptionOffset(projectName, topic, subId, shardIds).getOffsets().get(shardId);
                long nextSequence = subscriptionOffset.getSequence() + 1;
                cursor = datahubClient.getCursor(projectName, topic, shardId, CursorType.SYSTEM_TIME, nextSequence).getCursor();
            } catch (DatahubClientException e) {
                // TODO: 针对不同异常决定是否退出
            } catch (Exception e) {
                break;
            }
        }
    }

    public static void main(String[] args) {
        DatahubComsumer.example();
    }
}
