package com.flink.hbase.elasticsearch2kafka;

import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import org.apache.flink.api.common.functions.RuntimeContext;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.functions.source.RichParallelSourceFunction;
import org.apache.http.HttpHost;
import org.apache.http.auth.AuthScope;
import org.apache.http.auth.UsernamePasswordCredentials;
import org.apache.http.client.CredentialsProvider;
import org.apache.http.impl.client.BasicCredentialsProvider;
import org.apache.http.impl.nio.client.HttpAsyncClientBuilder;
import org.elasticsearch.action.search.ClearScrollRequest;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.action.search.SearchScrollRequest;
import org.elasticsearch.client.RequestOptions;
import org.elasticsearch.client.RestClient;
import org.elasticsearch.client.RestClientBuilder;
import org.elasticsearch.client.RestHighLevelClient;
import org.elasticsearch.client.indices.GetIndexRequest;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.search.SearchHit;
import org.elasticsearch.search.builder.SearchSourceBuilder;
import org.elasticsearch.search.slice.SliceBuilder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.IOException;
import java.util.*;
import java.util.concurrent.*;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicLong;

/**
 * 基于 RichParallelSourceFunction 的 Elasticsearch 多线程并行读取源
 * 支持分片平均分配和异步并发读取
 */
public abstract class ElasticsearchMultithreadedSource<T> extends RichParallelSourceFunction<T> {
    private static final Logger LOG = LoggerFactory.getLogger(ElasticsearchMultithreadedSource.class);
    
    private final String[] indices;
    private final String query;
    private final int scrollSize;
    private final int scrollTimeoutMs;
    private final int readThreads;
    private final Map<String, Object> esConfig;
    
    private transient RestHighLevelClient client;
    private transient ExecutorService executor;
    private transient List<ShardAssignment.ShardInfo> assignedShards;
    private transient AtomicBoolean isRunning;
    private transient AtomicLong totalDocumentsRead;
    private transient ObjectMapper objectMapper;
    
    protected ElasticsearchMultithreadedSource(String[] indices, String query, 
                                             int scrollSize, int scrollTimeoutMs, 
                                             int readThreads, Map<String, Object> esConfig) {
        this.indices = indices;
        this.query = query;
        this.scrollSize = scrollSize;
        this.scrollTimeoutMs = scrollTimeoutMs;
        this.readThreads = readThreads;
        this.esConfig = esConfig;
    }
    
    @Override
    public void open(Configuration parameters) throws Exception {
        super.open(parameters);
        
        LOG.info("Opening Elasticsearch multithreaded source for indices: {}", Arrays.toString(indices));
        
        // 初始化 ES 客户端
        initializeElasticsearchClient();
        
        // 初始化 ObjectMapper
        objectMapper = new ObjectMapper();
        
        // 获取当前任务的分片分配
        assignedShards = getShardAssignments();
        
        // 初始化线程池
        executor = Executors.newFixedThreadPool(readThreads);
        isRunning = new AtomicBoolean(true);
        totalDocumentsRead = new AtomicLong(0);
        
        LOG.info("Elasticsearch source initialized with {} shards assigned to task {}", 
                assignedShards.size(), getRuntimeContext().getIndexOfThisSubtask());
    }
    
    @Override
    public void run(SourceContext<T> ctx) throws Exception {
        if (assignedShards == null || assignedShards.isEmpty()) {
            LOG.warn("No shards assigned to task {}", getRuntimeContext().getIndexOfThisSubtask());
            return;
        }
        
        // 为每个分配的分片创建读取任务
        List<Future<Void>> futures = new ArrayList<>();
        
        for (ShardAssignment.ShardInfo shard : assignedShards) {
            Future<Void> future = executor.submit(() -> {
                try {
                    readShard(shard, ctx);
                } catch (Exception e) {
                    LOG.error("Error reading shard: {}", shard.getShardIdentifier(), e);
                    throw new RuntimeException(e);
                }
                return null;
            });
            futures.add(future);
        }
        
        // 等待所有任务完成
        try {
            for (Future<Void> future : futures) {
                future.get();
            }
        } catch (Exception e) {
            LOG.error("Error in shard reading tasks", e);
            throw e;
        }
        
        LOG.info("Task {} completed reading {} shards, total documents: {}", 
                getRuntimeContext().getIndexOfThisSubtask(), assignedShards.size(), totalDocumentsRead.get());
    }
    
    @Override
    public void cancel() {
        if (isRunning != null) {
            isRunning.set(false);
        }
        
        if (executor != null && !executor.isShutdown()) {
            executor.shutdown();
            try {
                if (!executor.awaitTermination(60, TimeUnit.SECONDS)) {
                    executor.shutdownNow();
                }
            } catch (InterruptedException e) {
                executor.shutdownNow();
                Thread.currentThread().interrupt();
            }
        }
    }
    
    @Override
    public void close() throws Exception {
        cancel();
        
        if (client != null) {
            client.close();
        }
        
        super.close();
    }
    
    /**
     * 初始化 Elasticsearch 客户端
     */
    private void initializeElasticsearchClient() {
        // 解析主机配置
        String hostsConfig = (String) esConfig.get("hosts");
        String[] hostArray = hostsConfig.split(",");
        HttpHost[] hosts = new HttpHost[hostArray.length];
        
        for (int i = 0; i < hostArray.length; i++) {
            String[] hostPort = hostArray[i].trim().split(":");
            String host = hostPort[0];
            int port = hostPort.length > 1 ? Integer.parseInt(hostPort[1]) : 9200;
            String scheme = (String) esConfig.getOrDefault("scheme", "http");
            hosts[i] = new HttpHost(host, port, scheme);
        }
        
        // 构建客户端
        RestClientBuilder builder = RestClient.builder(hosts);
        
        // 配置认证
        if (esConfig.containsKey("username") && esConfig.containsKey("password")) {
            String username = (String) esConfig.get("username");
            String password = (String) esConfig.get("password");
            
            final CredentialsProvider credentialsProvider = new BasicCredentialsProvider();
            credentialsProvider.setCredentials(AuthScope.ANY, 
                new UsernamePasswordCredentials(username, password));
            
            builder.setHttpClientConfigCallback(new RestClientBuilder.HttpClientConfigCallback() {
                @Override
                public HttpAsyncClientBuilder customizeHttpClient(HttpAsyncClientBuilder httpClientBuilder) {
                    return httpClientBuilder.setDefaultCredentialsProvider(credentialsProvider);
                }
            });
        }
        
        // 配置超时
        builder.setRequestConfigCallback(requestConfigBuilder -> {
            return requestConfigBuilder
                .setConnectTimeout((Integer) esConfig.getOrDefault("connect.timeout", 5000))
                .setSocketTimeout((Integer) esConfig.getOrDefault("socket.timeout", 60000))
                .setConnectionRequestTimeout((Integer) esConfig.getOrDefault("connection.request.timeout", 5000));
        });
        
        client = new RestHighLevelClient(builder);
    }
    
    /**
     * 获取当前任务的分片分配
     */
    private List<ShardAssignment.ShardInfo> getShardAssignments() throws IOException {
        RuntimeContext runtimeContext = getRuntimeContext();
        int taskIndex = runtimeContext.getIndexOfThisSubtask();
        int totalParallelism = runtimeContext.getNumberOfParallelSubtasks();
        
        // 获取所有分片信息
        List<ShardAssignment.ShardInfo> allShards = getAllShards();
        
        // 只使用主分片进行读取（避免重复读取）
        List<ShardAssignment.ShardInfo> primaryShards = new ArrayList<>();
        for (ShardAssignment.ShardInfo shard : allShards) {
            if (shard.isPrimary() && "STARTED".equals(shard.getState())) {
                primaryShards.add(shard);
            }
        }
        
        // 计算每个任务应该分配的分片数量
        int totalShards = primaryShards.size();
        int baseShardsPerTask = totalShards / totalParallelism;
        int remainder = totalShards % totalParallelism;
        
        // 计算当前任务的分片范围
        int startIndex = taskIndex * baseShardsPerTask + Math.min(taskIndex, remainder);
        int endIndex = startIndex + baseShardsPerTask + (taskIndex < remainder ? 1 : 0);
        
        // 特殊处理：如果无法平均分配，余下的全部放在最后一个分区
        if (taskIndex == totalParallelism - 1) {
            endIndex = totalShards;
        }
        
        List<ShardAssignment.ShardInfo> assignedShards = new ArrayList<>();
        for (int i = startIndex; i < endIndex; i++) {
            assignedShards.add(primaryShards.get(i));
        }
        
        LOG.info("Task {}/{} assigned {} shards (index {}-{})", 
                taskIndex, totalParallelism, assignedShards.size(), startIndex, endIndex - 1);
        
        return assignedShards;
    }
    
    /**
     * 获取所有分片信息
     */
    private List<ShardAssignment.ShardInfo> getAllShards() throws IOException {
        List<ShardAssignment.ShardInfo> shards = new ArrayList<>();
        
        // 获取分片信息
        String endpoint = "/_cat/shards/" + String.join(",", indices) + "?format=json&h=index,shard,prirep,state,docs,node";
        
        try {
            // 使用低级别客户端获取分片信息
            org.elasticsearch.client.Response response = client.getLowLevelClient()
                .performRequest(new org.elasticsearch.client.Request("GET", endpoint));
            
            String responseBody = org.elasticsearch.client.EntityUtils.toString(response.getEntity());
            JsonNode shardsArray = objectMapper.readTree(responseBody);
            
            for (JsonNode shardNode : shardsArray) {
                String indexName = shardNode.get("index").asText();
                int shardId = shardNode.get("shard").asInt();
                String prirepStr = shardNode.get("prirep").asText();
                boolean isPrimary = "p".equals(prirepStr);
                String state = shardNode.get("state").asText();
                long docs = shardNode.has("docs") && !shardNode.get("docs").isNull() ? 
                    shardNode.get("docs").asLong() : 0;
                String nodeName = shardNode.has("node") && !shardNode.get("node").isNull() ? 
                    shardNode.get("node").asText() : "unknown";
                
                shards.add(new ShardAssignment.ShardInfo(
                    indexName, shardId, nodeName, nodeName, isPrimary, docs, state
                ));
            }
        } catch (Exception e) {
            LOG.error("Error getting shard information", e);
            throw new IOException("Failed to get shard information", e);
        }
        
        // 按索引名和分片ID排序
        shards.sort((s1, s2) -> {
            int indexCompare = s1.getIndexName().compareTo(s2.getIndexName());
            if (indexCompare != 0) {
                return indexCompare;
            }
            return Integer.compare(s1.getShardId(), s2.getShardId());
        });
        
        return shards;
    }
    
    /**
     * 读取指定的分片
     */
    private void readShard(ShardAssignment.ShardInfo shard, SourceContext<T> ctx) throws IOException {
        LOG.info("Starting to read shard: {}", shard.getShardIdentifier());
        
        // 创建搜索请求
        SearchRequest searchRequest = new SearchRequest(shard.getIndexName());
        SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
        
        // 设置查询条件
        if (query != null && !query.trim().isEmpty()) {
            searchSourceBuilder.query(QueryBuilders.wrapperQuery(query));
        } else {
            searchSourceBuilder.query(QueryBuilders.matchAllQuery());
        }
        
        // 设置分片路由和大小
        searchSourceBuilder.size(scrollSize);
        searchRequest.routing(String.valueOf(shard.getShardId()));
        searchRequest.preference("_shards:" + shard.getShardId());
        
        // 设置滚动时间
        searchRequest.scroll(TimeValue.timeValueMillis(scrollTimeoutMs));
        searchRequest.source(searchSourceBuilder);
        
        String scrollId = null;
        long batchCount = 0;
        
        try {
            // 执行初始搜索
            SearchResponse searchResponse = client.search(searchRequest, RequestOptions.DEFAULT);
            scrollId = searchResponse.getScrollId();
            
            while (isRunning.get()) {
                SearchHit[] hits = searchResponse.getHits().getHits();
                
                if (hits.length == 0) {
                    break;
                }
                
                // 处理文档
                for (SearchHit hit : hits) {
                    try {
                        T document = parseHit(hit);
                        if (document != null) {
                            synchronized (ctx) {
                                ctx.collect(document);
                            }
                            totalDocumentsRead.incrementAndGet();
                            batchCount++;
                        }
                    } catch (Exception e) {
                        LOG.error("Error parsing hit from shard {}: {}", 
                                shard.getShardIdentifier(), hit.getId(), e);
                        // 继续处理下一个文档
                    }
                }
                
                // 定期输出处理进度
                if (batchCount % 10000 == 0) {
                    LOG.info("Shard {} processed {} documents", 
                            shard.getShardIdentifier(), batchCount);
                }
                
                // 继续滚动
                SearchScrollRequest scrollRequest = new SearchScrollRequest(scrollId);
                scrollRequest.scroll(TimeValue.timeValueMillis(scrollTimeoutMs));
                searchResponse = client.scroll(scrollRequest, RequestOptions.DEFAULT);
            }
            
            LOG.info("Completed reading shard: {}, total documents: {}", 
                    shard.getShardIdentifier(), batchCount);
                    
        } finally {
            // 清理滚动上下文
            if (scrollId != null) {
                try {
                    ClearScrollRequest clearScrollRequest = new ClearScrollRequest();
                    clearScrollRequest.addScrollId(scrollId);
                    client.clearScroll(clearScrollRequest, RequestOptions.DEFAULT);
                } catch (Exception e) {
                    LOG.warn("Error clearing scroll context for shard {}: {}", 
                            shard.getShardIdentifier(), e.getMessage());
                }
            }
        }
    }
    
    /**
     * 解析 Elasticsearch SearchHit 为目标对象
     * 子类需要实现此方法
     */
    protected abstract T parseHit(SearchHit hit) throws Exception;
    
    /**
     * 获取 ObjectMapper（供子类使用）
     */
    protected ObjectMapper getObjectMapper() {
        return objectMapper;
    }
} 