# coding=utf-8
import datetime
import urllib.request as requests
import json
import csv

c = {}


def gettime():
    now = datetime.datetime.now()
    delta = datetime.timedelta(seconds=60)
    onebefore = now - delta
    format = "%Y-%m-%dT%H:%M:%S"
    return onebefore.strftime(format), now.strftime(format)


def getindex():
    now = datetime.datetime.now()
    format = "%Y_%m_%d"
    return "logstash-ngcs_" + now.strftime(format)


def getdata(index, costtime, starttime, endtime):
    # urllist = ['http://192.168.223.130:9200/', index, '/_search']
    urllist = ['http://192.168.192.141:9200/', index, '/_search']
    url = ''.join(urllist)
    headers = ("Content-Type", "application/json")
    values = """ {
          "query": {
            "bool": {
              "must": [
                {
                  "range": {
                    "@timestamp": {
                      "gte": "%s",
                      "lte": "%s",
                      "time_zone": "+08:00"
                    }
                  }
                },
                {
                  "match": {
                    "status": "502"
                  }
                }
              ],
              "should": []
            }
          },
          "aggs": {
            "502count": {
              "value_count": {
                "field": "status"
              }
            }
          }
        }""" % (starttime, endtime)
    values1 = """ {
"size": 0,
    "query": {
        "bool": {
            "must": [{"range": {
                "request_time": {
                    "gte": %s
                }
            }
            }
            ]
        }
    },
    "aggs": {
        "hour": {
                "date_histogram": {
                    "field":"@timestamp",
                    "interval": "day",
                    "time_zone": "+08:00",
                    "format": "yyyy-MM-dd"
                },"aggs":{"area": {
            "terms": {
                "field": "upst_area.keyword"
            },
            "aggs": {
                "city": {
                    "terms": {
                        "field": "geoip.region_code.keyword"
                    }
                }
            }
        }
    }
  }
 }
}""" % (costtime)
    # r = requests.Request(url, headers=headers, data=values)
    request = requests.Request(url)
    data1 = json.loads(values1)
    data = json.dumps(data1)
    request.add_header("Content-Type", "application/json")
    r = requests.urlopen(request, data=bytes(data, 'utf-8'))
    a = json.loads(r.read())
    for key in a["aggregations"]["hour"]["buckets"]:
        print(key["key_as_string"], key["doc_count"])
    # print(a)
    return a
    # print bodydict.get('aggregations').get('502count').get('value')


def get_quey_data(data1, data3):
    for a1 in data1["aggregations"]["hour"]["buckets"]:
        c1 = a1["area"]["buckets"]
        c["date"] = a1["key_as_string"]
        c["count"] = a1["doc_count"]
        for a3 in data3["aggregations"]["hour"]["buckets"]:
            if a3["key_as_string"] == c["date"]:
                c3 = a3["area"]["buckets"]
                c["count_3"] = a3["doc_count"]
                per = (1.0 - c["count_3"] / c["count"]) * 100
                per3 = c["count_3"] / c["count"] * 100
                c["per"] = "%.3f%%" % per
                c["per3"] = "%.3f%%" % per3
                for i in c1:
                    # print(i["city"]["buckets"])
                    c["group"] = i["key"][1:2]
                    c["gcount"] = i["doc_count"]
                    for city in i["city"]["buckets"]:
                        c["city"] = city["key"]
                        c["c_count"] = city["doc_count"]
                        g_per = c["gcount"] / c["count"] * 100
                        c["g_per"] = "%.3f%%" % g_per
                        for k in c3:
                            if c["group"] == k["key"][1:2]:
                                c["gcount_3"] = k["doc_count"]
                                g_per3 = c["gcount_3"] / c["count_3"] * 100
                                c["g_per3"] = "%.3f%%" % g_per3
                                # print(c["group"],k["doc_count"])
                                # print(c)
                            for city3 in k["city"]["buckets"]:
                                if c["city"] == city3["key"]:
                                    c["c_count3"] = city3["doc_count"]
                        print(c)
                        get_cvs(c)


def cvs_header():
    file = '%s.csv' % yesterday
    file = 'es/' + file
    with open(file, 'w') as f:
        head = ['app', 'date', 'count', 'per', 'count_3', 'per3', 'group', 'gcount', 'g_per', 'gcount_3', 'g_per3',
                'city', 'c_count', 'c_count3']
        writer = csv.DictWriter(f, head)
        writer.writeheader()


def get_cvs(data):
    if type(data) == dict:
        file = '%s.csv' % yesterday
        file = 'es/' + file
        with open(file, 'a') as f:
            head = ['app', 'date', 'count', 'per', 'count_3', 'per3', 'group', 'gcount', 'g_per', 'gcount_3', 'g_per3',
                    'city', 'c_count', 'c_count3']
            writer = csv.DictWriter(f, head)
            writer.writerow(data)


if __name__ == '__main__':
    timelist = gettime()
    endtime = timelist[1]
    starttime = timelist[0]
    now = datetime.datetime.now()
    today = datetime.date.today()
    oneday = datetime.timedelta(days=1)
    yesterday = today - oneday
    print(yesterday)
    cvs_header()
    # index = getindex()
    for i in ["ngcs", "ngmtt", "ngbusi", "ngkm", "ngcct", "ngwf", "ngoc"]:
        #index = "logstash-" + i +"*"
        index = "logstash-" + i + "*%s*" % yesterday
        print(i)
        c["app"] = i
        data1 = getdata(index, 0, starttime, endtime)
        data3 = getdata(index, 3, starttime, endtime)
        get_quey_data(data1, data3)