#!/usr/bin/env python
# -*- coding: utf-8 -*-

from __future__ import print_function
import time
import sys
import logging
import datetime
import json
from concurrent.futures import ThreadPoolExecutor
from pymongo import MongoClient
import traceback
import requests
import boto3
import re
from botocore.client import Config
from botocore.exceptions import EndpointConnectionError
from Queue import Queue
from threading import Thread, Lock
import concurrent.futures
from collections import defaultdict

UPLOAD_FILE = "/data/scripts/30M_file"
handle_operation_timeout = 35 # put/get/del操作的执行超时限制

DEFAULT_TIMEOUT_THRESHOLD = 35 # 默认告警阈值
TIMEOUT_THRESHOLD_DIC = defaultdict(lambda: DEFAULT_TIMEOUT_THRESHOLD)
#TIMEOUT_THRESHOLD_DIC['rg2_bjxq'] = 20
#TIMEOUT_THRESHOLD_DIC['rg1_gdgz'] = 10
#TIMEOUT_THRESHOLD_DIC['oss-hefei-a2a.openstorage.cn:443'] = 30


alert_msg = {}
request_timeout_times = defaultdict(int)
temp_timeout_times = defaultdict(int)
timeout_times_threshold = 3
normal_status_code = ['200','204','206','304']


def send_alert(cluster, result):
    template = {
        "version": "4",
        "groupKey": '{}:{alertname="对象存储异常", job=".ceph_exporter.s3"}',
        "status": "firing",
        "receiver": "email",
        "groupLabels": {"alertname": "对象存储异常", "job": cluster+".ceph_exporter.s3"},
        "commonLabels": {
            "alertname": "对象存储异常",
            "instance": " host ip ",
            "job": cluster+".ceph_exporter.s3",
            "id": "207",
            "severity": "重要",
        },
        "commonAnnotations": {"description": "通知值班人", "summary": "对象存储异常"},
        "externalURL": "http://no.use",
        "alerts": [],
    }
    d = datetime.datetime.now()
    nowstr = d.isoformat("T")
    alertlist = []
    alert_info = {
        "status": "firing",
        "labels": {
            "id": "207",
            "alertname": "对象存储大文件主动监控异常",
            "severity": "紧急",
            "instance": "host ip ",
        },
        "annotations": {
            "description": result,
            "summary": u"%s对象存储大文件主动监控异常" % cluster,
        },
        "startsAt": nowstr,
        "endsAt": nowstr,
        "generatorURL": "http://no.no",
    }
    alertlist.append(alert_info)
    template["alerts"] = alertlist
    requests.post(
        "http://xx.xx.xx.xx:xxxx/alert", data=json.dumps(template), timeout=30
    )

def _handle_operation(method, s3client, s3resource, bucket_name, filename):
    status_code = None 
    if method == "PUT":
        bucket = s3resource.Bucket(bucket_name)
        with open(UPLOAD_FILE, "r") as obj_file:
            #response = bucket.put_object(Bucket=bucket_name, Key=filename, Body=obj_file)
            #status_code = response["ResponseMetadata"]["HTTPStatusCode"]
            response = s3resource.meta.client.put_object(Bucket=bucket_name, Key=filename, Body=obj_file)
            status_code = response["ResponseMetadata"]["HTTPStatusCode"]

    elif method == "GET":
        #s3client.get_object(Bucket=bucket_name, Key=filename)["Body"].read()
        response = s3client.get_object(Bucket=bucket_name, Key=filename)
        data = response["Body"].read()
        status_code = response["ResponseMetadata"]["HTTPStatusCode"]

    elif method == "DEL":
        #s3client.delete_object(Bucket=bucket_name, Key=filename)
        response = s3client.delete_object(Bucket=bucket_name, Key=filename)
        status_code = response["ResponseMetadata"]["HTTPStatusCode"]
    return status_code 

def handle_object(name, method, s3client, s3resource, bucket_name, filename, lock):
    status_code = None
    try:
        with concurrent.futures.ThreadPoolExecutor() as executor:
            # 将操作提交到线程池执行并设置超时时间
            future = executor.submit(_handle_operation, method, s3client, s3resource, bucket_name, filename)
            status_code = future.result(timeout=handle_operation_timeout)  # 超过 30 秒会抛出 TimeoutError
        with lock:
             if str(status_code) not in normal_status_code:
                logging.error("%s %s %s 操作返回状态码异常：%s" %(name,method,filename,status_code)) 
                alert_msg[name].append("%s %s %s 操作返回状态码异常：%s" %(name,method,filename,status_code))
    except concurrent.futures.TimeoutError:
        with lock:
            if temp_timeout_times[name] == 0:
               temp_timeout_times[name] = temp_timeout_times[name] + 1

               logging.error("%s %s %s 执行时间超限制:%ss 已中断" % (name,method, filename, handle_operation_timeout ))
               alert_msg[name].append("%s %s %s 执行时间超限制,已中断" %(name,method,filename))

    except Exception as e:
        with lock:
            logging.error("%s %s %s 异常: %s" % (name,method,filename,str(e)))
            #alert_msg[name].append("%s %s %s 异常: %s" %(name, method,filename,str(e)))


def start_monitor(name, endpoint, key, queue, lock):
    bucket_name = "oss_monitor"
    start = time.strftime("%Y%m%d%H%M%S", time.localtime(time.time()))
    filename = "%s_%s_30M" % (start, endpoint.replace(":", "_"))

    access_key = key.get("access_key") or key.get("access")
    secret_key = key.get("secret_key") or key.get("secret")

    config = Config(
        connect_timeout=10, read_timeout=10, retries={"max_attempts": 0}
        #connect_timeout=10, read_timeout=10
    )

    # 使用会话对象创建客户端
    session = boto3.Session(
        aws_access_key_id=access_key,
        aws_secret_access_key=secret_key,
    )

    if name == 'rg3_hfa2' and bool(re.search(r'[a-zA-Z]', endpoint)): # 羚羊集群域名使用了https加密
       url="https://%s" % endpoint
       s3client = session.client(
           "s3",
           endpoint_url=url,
           config=config,
           verify=False
       )
       s3resource = session.resource(
           "s3",
           endpoint_url=url,
           config=config,
           verify=False
       )
    else:
       s3client = session.client(
           "s3",
           endpoint_url="http://%s" % endpoint,
           config=config
       )
       s3resource = session.resource(
           "s3",
           endpoint_url="http://%s" % endpoint,
           config=config
       )
    #print('工作线程执行顺序:',endpoint,s3client,s3resource)

#    s3client = boto3.client(
#        "s3",
#        endpoint_url="http://%s" % endpoint,
#        aws_access_key_id=access_key,
#        aws_secret_access_key=secret_key,
#        config=config,
#    )
#    print("test",endpoint,s3client)
#    s3resource = boto3.resource(
#        "s3",
#        endpoint_url="http://%s" % endpoint,
#        aws_access_key_id=access_key,
#        aws_secret_access_key=secret_key,
#        config=config,
#    )
#    print("test",endpoint,s3resource)

#    def handle_object(method):
#        if method == "PUT":
#            bucket = s3resource.Bucket(bucket_name)
#            with open(UPLOAD_FILE, "r") as obj_file:
#                 bucket.put_object(Bucket=bucket_name, Key=filename, Body=obj_file)
#        elif method == "GET":
#            s3client.get_object(Bucket=bucket_name, Key=filename)["Body"].read()
#        elif method == "DEL":
#            s3client.delete_object(Bucket=bucket_name, Key=filename)

    for method in ["PUT", "GET", "DEL"]:
        try:
            #obj_file = open(UPLOAD_FILE, "r")
            start_time = time.time()
            handle_object(name, method, s3client, s3resource, bucket_name, filename, lock)
            #handle_object(method)
            end_time = time.time()
            #obj_file.close()

            duration = end_time - start_time
            with lock:
                if endpoint in TIMEOUT_THRESHOLD_DIC:
                    if duration > TIMEOUT_THRESHOLD_DIC[endpoint]:
                        if temp_timeout_times[name] == 0:
                           temp_timeout_times[name] = temp_timeout_times[name] + 1
                            
                        logging.error(
                            "%15s %s %33s 超时\t: %ss > %ss"
                            % (name, method, filename, duration, TIMEOUT_THRESHOLD_DIC[endpoint])
                        )
                        alert_msg[name].append(
                            "%s文件%s:%s超时:%ss>%ss"
                            % (name, filename, method, duration, TIMEOUT_THRESHOLD_DIC[endpoint])
                        )
                    else:
                        logging.info(
                            "%15s %s %33s : %ss" % (name, method, filename, duration)
                        )
                        #request_timeout_times[name] = 0 
                else:
                    if duration > TIMEOUT_THRESHOLD_DIC[name]:
                        if temp_timeout_times[name] == 0:
                           temp_timeout_times[name] = temp_timeout_times[name] + 1
                        logging.error(
                            "%15s %s %33s 超时\t: %ss > %ss"
                            % (name, method, filename, duration, TIMEOUT_THRESHOLD_DIC[endpoint])
                        )
                        alert_msg[name].append(
                            "%s文件%s:%s超时:%ss>%ss"
                            % (name, filename, method, duration, TIMEOUT_THRESHOLD_DIC[endpoint])
                        )

                    else:
                        logging.info(
                            "%15s %s %33s : %ss" % (name, method, filename, duration)
                        )
                        #request_timeout_times[name] = 0
        except Exception:
            error_msg = traceback.format_exc()
            with lock:
                logging.error(
                    "%15s %s %33s ERROR\n%s" % (name, method, filename, error_msg)
                )
                alert_msg[name].append("%s文件%s:%s异常" % (name, filename, method))
    queue.task_done()

def worker(queue, lock):
    while True:
        item = queue.get()
        if item is None:
            break
        name, endpoint, key = item
        #print('工作线程领取任务顺序:',name,endpoint)
        start_monitor(name, endpoint, key, queue, lock)

def main():
    uri = (
        "mongodb://autobot:7xxxxxxxxxxxxxxxxxxxxxxx@xx.xx.xx.xx/admin"
    )
    connection = MongoClient(uri)
    clusters = connection.autobot.cluster

    queue = Queue()
    lock = Lock()
    threads = []

    for _ in range(10):  # 最大10个线程数
        t = Thread(target=worker, args=(queue, lock))
        t.start()
        threads.append(t)

    for cluster in clusters.find(
        {
            "type": "ceph_s3",
            "$and": [
                {
                   "$or": [
                        {"name": {"$regex": "hf"}},
                        {"name": {"$regex": "gd"}}
                    ]
                },
                {"name": {"$not": re.compile("gw")}},
                {"name": {"$ne": "rg6_bjsh"}},
                {"cname": {"$not": re.compile("测试")}},
                {"cname": {"$not": re.compile("实验室")}}
            ]
        }
    ):
        if cluster.get("as") == "1":
            try:
                logging.warning(cluster["name"]+'开始读写删除大文件')
                alert_msg[cluster["name"]] = []
                #print(cluster["monitor_ip"])
                for endpoint in cluster["monitor_ip"]:
                    #print('------',cluster["name"],endpoint)
                    queue.put((cluster["name"], endpoint, cluster["monitor_key"]))
            except Exception as e:
                logging.error("cluster %s error info:%s" % (cluster["name"],str(e)))
    connection.close()

    queue.join() #确保队列内所有任务执行完毕后再继续执行后续代码，防止主线程提前退出

    for _ in range(10):
        queue.put(None)
    for t in threads:
        t.join()

    for key,value in temp_timeout_times.items():
        #print('temp_timeout_times',key,'---',value)
        if value == 1:
           request_timeout_times[key] = request_timeout_times[key] + value
           temp_timeout_times[key] = 0
        if value == 0: #只有连续出现超时，才会计入request_timeout_times
           request_timeout_times[key] = 0
           alert_msg[key] = [] 
    for key,value in request_timeout_times.items():
        #print('request_timeout_times',key,'-----',value)
        if value >= 3:
           send_alert(key, "|".join(alert_msg[key]))
           logging.error("|".join(alert_msg[key]))
           request_timeout_times[key] = 0
           alert_msg[key] = []
    #for key,value in alert_msg.items():
    #    if '状态码异常' in value:
    #       send_alert(key, "|".join(alert_msg[key]))
    #       logging.error("|".join(alert_msg[key]))

   
   # add alert
    #for cluster in alert_msg.keys():
    #    if len(alert_msg[cluster]) != 0 and 'NoSuchKey' not in alert_msg[cluster]:
    #        send_alert(cluster, "|".join(alert_msg[cluster]))
    #        logging.error(
    #            "|".join(alert_msg[cluster])
    #        )

if __name__ == "__main__":
    reload(sys)
    sys.setdefaultencoding("utf8")

    logging.basicConfig(
        filename="/var/log/s3_monitor_bigFile.log",
        filemode="a",
        format="%(asctime)s - [line:%(lineno)d] - %(levelname)s: %(message)s",
        #level=logging.WARNING,
        level=logging.INFO,
    )
    while True:
        main()
        time.sleep(180)

