#!/bin/bash
#set -xeuo pipefail
set -o errexit

if [ $# -eq 1 ]; then
 echo "reset index template"

# 索引不保留冗余副本
# curl -H "Content-Type: application/json" -XPUT 'http://localhost:9200/_all/_settings?preserve_existing=true' -d '{ "index.number_of_replicas" : "0" }'
curl -XPUT 'http://localhost:9200/_settings' -H 'Content-Type: application/json' -d '{ "settings": {	 "number_of_replicas" : 0  }}'

echo ""
curl -X DELETE "localhost:9200/aggt"
curl -X DELETE "localhost:9200/aggt_t"
curl -X DELETE "localhost:9200/_transform/aggt"

echo "create index..."
curl -XPUT "localhost:9200/aggt" -H 'Content-Type: application/json' -d'
{
  "settings": {
    "index": {
      "number_of_replicas": "0"
    }
  },
  "mappings": {
      "properties": {
        "clientip": {
          "type": "ip"
        },          
        "port": {
          "type": "integer"
        },

        "sql":{
          "type": "keyword"
        },
        "tm": {
          "type": "date",
          "format": "yyyy-MM-dd HH:mm:ss||yyyy-MM-dd||epoch_second"
        }
      }
    }
}
'
echo "load data..."
curl -v -H 'Content-Type: application/x-ndjson' -XPOST 'localhost:9200/aggt/_bulk?pretty' --data-binary @esdata/$1
:<<EOF
ES默认的http数据请求限制100m https://github.com/elastic/elasticsearch/issues/2902
修改这个可以调大http.max_content_length
但如果实例的物理内存有限制[512m],仍是出现错误
        "reason" : "[parent] Data too large, data for [<http_request>] would be [629069976/599.9mb], which is larger than the limit of [510027366/486.3mb], real usage: [340070400/324.3mb], new bytes reserved: [288999576/275.6mb], usages [request=0/0b, fielddata=0/0b, in_flight_requests=288999576/275.6mb, accounting=127900/124.9kb]",
        "bytes_wanted" : 629069976,
        "bytes_limit" : 510027366,
        "durability" : "TRANSIENT"

将文件切分
pushd esdata; split -l 800000 aggt.json aggt; popd

curl -v -H 'Content-Type: application/x-ndjson' -XPOST 'localhost:9200/aggt/_bulk?pretty' --data-binary @esdata/aggtaa
curl -v -H 'Content-Type: application/x-ndjson' -XPOST 'localhost:9200/aggt/_bulk?pretty' --data-binary @esdata/aggtab
curl -v -H 'Content-Type: application/x-ndjson' -XPOST 'localhost:9200/aggt/_bulk?pretty' --data-binary @esdata/aggtac
EOF

echo "transforms..."
curl -X PUT "localhost:9200/_transform/aggt?pretty" -H 'Content-Type: application/json' -d'
{
  "source": {
    "index": [
      "aggt"
    ]
  },
  "dest" : { 
    "index" : "aggt_t"
  },
  "pivot": {
    "group_by": {
      "clientip": {
        "terms": {
          "field": "clientip"
        }
      },
      "port": {
        "histogram": {
          "field": "port",
          "interval": "1"
        }
      }
    },
    "aggregations": {
      "tm.value_count": {
        "value_count": {
          "field": "tm"
        }
      }
    }
  }
}
'
# curl -XPOST "localhost:9200/_transform/aggt/_start?pretty"

else
FILENAME=./esdata/aggt.json
 echo "generate data"
 if [ -f $FILENAME ]; then
  echo "data ready, rm esdata/aggt.json to delete"
  exit 1
 else
  echo "generate..."
##  
PORT=100
IP=100
echo "..."
n=1
for((ip1=0; ip1<=$IP; ip1++));
do   
 for((ip2=0; ip2<=$IP; ip2++));
 do
  #
  for((port=1; port<=$PORT; port++));
  do
    cat <<EOF >> $FILENAME
{"index":{"_index":"aggt","_id":$n}}
{"clientip":"10.10.$ip1.$ip2", "port":$port, "sql":"中国北京市海淀区", "tm": "2020-11-01 10:00:00"}
EOF
   n=$[$n+1]
  done #port
  echo "port done"
 done #
 echo "ip: $ip1 $ip2"
done
## 
  echo "gen data done."
 fi
 
 #echo "load data to index..."
 # curl -v -H 'Content-Type: application/x-ndjson' -XPOST 'localhost:9200/aggt/_bulk?pretty' --data-binary @esdata/aggt.json
fi

