package wanda.commd;


import wanda.exception.HoodieIOException;
import wanda.model.HoodieKey;
import wanda.model.HoodieRecord;
import org.apache.avro.generic.GenericRecord;
import org.apache.kafka.clients.consumer.ConsumerRecord;

import java.io.IOException;
import java.io.Serializable;
import java.time.LocalDate;
import java.time.LocalDateTime;
import java.util.*;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import java.util.stream.Stream;

public class DataGens {


  // based on examination of sample file, the schema produces the following per record size

  public static final String DEFAULT_SECOND_PARTITION_PATH =getDate();
  private static  String getDate(){
    LocalDateTime currentTime = LocalDateTime.now();
    LocalDate date = currentTime.toLocalDate();
    return  date.toString();
  }


  public static final String[] DEFAULT_PARTITION_PATHS = {
          DEFAULT_SECOND_PARTITION_PATH

  };

  private static Random rand = new Random(46474747);

  private final Map<Integer, KeyPartition> existingKeys;
  private final String[] partitionPaths;
  private int numExistingKeys;

  public DataGens(String[] partitionPaths) {
    this(partitionPaths, new HashMap<>());
  }

  public DataGens() {
    this(DEFAULT_PARTITION_PATHS);
  }

  public DataGens(String[] partitionPaths, Map<Integer, KeyPartition> keyPartitionMap) {
    this.partitionPaths = Arrays.copyOf(partitionPaths, partitionPaths.length);
    this.existingKeys = keyPartitionMap;
  }



  private static void call(ConsumerRecord<Object, Object> rds) {
    rds.value().toString();
  }



  /**
   * Generates a new avro record of the above schema format, retaining the key if optionally provided.
   */
  public TestRawTripPayload generateRandomValue(HoodieKey key, String TRIP_EXAMPLE_SCHEMA, GenericRecord rec) throws IOException {
//    GenericRecord rec = generateGenericRecord(key.getRecordKey());
    return new TestRawTripPayload(rec.toString(), key.getRecordKey(), key.getPartitionPath(), TRIP_EXAMPLE_SCHEMA);
  }

  /**

  /**
   * Generates new inserts, uniformly across the partition paths above. It also updates the list of existing keys.
   */
  public List<HoodieRecord> generateInserts( String commitKey, Integer n, GenericRecord rec) throws IOException {
    return generateInsertsStream(commitKey, n, rec).collect(Collectors.toList());
  }

  /**
   * Generates new inserts, uniformly across the partition paths above. It also updates the list of existing keys.
   */
  public Stream<HoodieRecord> generateInsertsStream(String commitKey, Integer n, GenericRecord rec) {
    int currSize = getNumExistingKeys();
//用于key值的创建登操作
    return IntStream.range(0, n).boxed().map(i -> {
      String partitionPath = partitionPaths[rand.nextInt(partitionPaths.length)];
      HoodieKey key = new HoodieKey(UUID.randomUUID().toString(), partitionPath);
      KeyPartition kp = new KeyPartition();
      kp.key = key;
      kp.partitionPath = partitionPath;
      existingKeys.put(currSize + i, kp);
      numExistingKeys++;
      try {
        return new HoodieRecord(key, generateRandomValue(key, commitKey, rec));
      } catch (IOException e) {
        throw new HoodieIOException(e.getMessage(), e);
      }
    });
  }


  public String[] getPartitionPaths() {
    return partitionPaths;
  }

  public int getNumExistingKeys() {
    return numExistingKeys;
  }

  public static class KeyPartition implements Serializable {
    HoodieKey key;
    String partitionPath;
  }

  public void close() {
    existingKeys.clear();
  }


}
