# encoding: utf-8  
'''
Created on 2018年8月29日
description:数据收集器
@author: szy
'''
import requests
import time
import json
import sys
sys.path.append('./../helper')
from helper import InfoMonConstants
from helper.ConnectionHelper import ConnectionHelper
from threading import Thread, Condition
import datetime
import pytz

class Collector:
    #收集数据列表
    uploadData = []
    #时间戳
    ts = int(time.time())
    
    def run(self):
        self.collect()
        self.send()

    #获取serverStatus监控指标
    def getServerStatus(self, conn):
        serverStatus = 'serverStatus'
        OldServerStatusData = conn.getValue(serverStatus)
        OldOpCounterData = OldServerStatusData['opcounters']
        time.sleep(1)
        NewServerStatusData = conn.getValue(serverStatus)
        NewOpCounterData = NewServerStatusData['opcounters']
        #获取数据库的insert、update、delete、query、getmore、command的效率，单位次数
        for key, value in OldOpCounterData.items():
            self.uploadData.append({
                "endpoint": InfoMonConstants.ENDPOINT,
                "metric": "serverStatus_opcounter_"+key,
                "timestamp": self.ts,
                "step": 60,
                "value": NewOpCounterData[key]-value,
                "counterType": "GAUGE",
                "tags": "name=opcounters",
            })
        OldConnectionData = OldServerStatusData['connections']
        NewConnectionData = NewServerStatusData['connections']
        #获取数据库连接会话的数量，current:当前连接数; available:可用连接数; totalCreated:创建的连接数包括已经关闭的会话
        for key, value in OldConnectionData.items():
            #totalCreated包括已经关闭的连接，差值过大代表频繁的连接和关闭较多
            if key == 'totalCreated':
                self.uploadData.append({
                    "endpoint": InfoMonConstants.ENDPOINT,
                    "metric": "serverStatus_connection_" + key+"_diff",
                    "timestamp": self.ts,
                    "step": 60,
                    "value": NewConnectionData[key] - value,
                    "counterType": "GAUGE",
                    "tags": "name=connections",
                })
            else:
                self.uploadData.append({
                    "endpoint": InfoMonConstants.ENDPOINT,
                    "metric": "serverStatus_connection_" + key,
                    "timestamp": self.ts,
                    "step": 60,
                    "value": value,
                    "counterType": "GAUGE",
                    "tags": "name=connections",
                })
        MemoryData = OldServerStatusData['mem']
        self.uploadData.append({
            "endpoint": InfoMonConstants.ENDPOINT,
            "metric": "serverStatus_memory_usage",
            "timestamp": self.ts,
            "step": 60,
            "value": MemoryData['resident'],
            "counterType": "GAUGE",
            "tags": "name=memory_usage_Mb",
        })
        self.uploadData.append({
            "endpoint": InfoMonConstants.ENDPOINT,
            "metric": "serverStatus_memory_swap_usage",
            "timestamp": self.ts,
            "step": 60,
            "value": round(MemoryData['virtual']/1024, 0),
            "counterType": "GAUGE",
            "tags": "name=memory_usage_Mb",
        })
        #mongod接收或者发出的网络带宽计算
        OldNetworkData = OldServerStatusData['network']
        NewNetworkData = NewServerStatusData['network']
        self.uploadData.append({
            "endpoint": InfoMonConstants.ENDPOINT,
            "metric": "serverStatus_network_in",
            "timestamp": self.ts,
            "step": 60,
            "value": round((NewNetworkData['bytesIn'] - OldNetworkData['bytesIn']) / 1024 / 1024, 0),
            "counterType": "GAUGE",
            "tags": "name=network_Mb",
        })
        self.uploadData.append({
            "endpoint": InfoMonConstants.ENDPOINT,
            "metric": "serverStatus_network_out",
            "timestamp": self.ts,
            "step": 60,
            "value": round((NewNetworkData['bytesOut'] - OldNetworkData['bytesOut']) / 1024 / 1024, 0),
            "counterType": "GAUGE",
            "tags": "name=network_Mb",
        })

    #获取数据库信息, db.stats()
    def getDbStats(self, conn):
        command = 'dbstats'
        dbs = conn.getDatabaseName()
        for dbname in dbs:
            data = conn.getDbStats(dbname, command)
            #如果不是连接的mongos进程
            # print(type(InfoMonConstants.TYPE))
            if(InfoMonConstants.TYPE in ('1', '2')):
                #但实例和副本集模式
                self.uploadData.append({
                    "endpoint": InfoMonConstants.ENDPOINT,
                    "metric": "dbstats_"+data['db']+"_collections",
                    "timestamp": self.ts,
                    "step": 60,
                    "value": data['collections'],
                    "counterType": "GAUGE",
                    "tags": "name=dbstats_collections_counts",
                })
                self.uploadData.append({
                    "endpoint": InfoMonConstants.ENDPOINT,
                    "metric": "dbstats_" + data['db']+"_views",
                    "timestamp": self.ts,
                    "step": 60,
                    "value": data['views'],
                    "counterType": "GAUGE",
                    "tags": "name=dbstats_views_counts",
                })
                self.uploadData.append({
                    "endpoint": InfoMonConstants.ENDPOINT,
                    "metric": "dbstats_" + data['db']+"_objects",
                    "timestamp": self.ts,
                    "step": 60,
                    "value": data['objects'],
                    "counterType": "GAUGE",
                    "tags": "name=dbstats_objects_counts",
                })
                self.uploadData.append({
                    "endpoint": InfoMonConstants.ENDPOINT,
                    "metric": "dbstats_" + data['db']+"_avgObjSize",
                    "timestamp": self.ts,
                    "step": 60,
                    "value": round(data['avgObjSize'], 0),
                    "counterType": "GAUGE",
                    "tags": "name=dbstats_avgObjSize_bytes",
                })
                #磁盘空间占用大小
                self.uploadData.append({
                    "endpoint": InfoMonConstants.ENDPOINT,
                    "metric": "dbstats_" + data['db']+"_dataSize",
                    "timestamp": self.ts,
                    "step": 60,
                    "value": round(data['dataSize'] / 1024 / 1024, 0),
                    "counterType": "GAUGE",
                    "tags": "name=dbstats_dataSize_Mb",
                })
                #实际collection占用空间大小
                self.uploadData.append({
                    "endpoint": InfoMonConstants.ENDPOINT,
                    "metric": "dbstats_" + data['db']+"_storageSize",
                    "timestamp": self.ts,
                    "step": 60,
                    "value": round(data['storageSize'] / 1024 / 1024, 0),
                    "counterType": "GAUGE",
                    "tags": "name=dbstats_storageSize_Mb",
                })
                #索引数量
                self.uploadData.append({
                    "endpoint": InfoMonConstants.ENDPOINT,
                    "metric": "dbstats_" + data['db']+"_indexes",
                    "timestamp": self.ts,
                    "step": 60,
                    "value": data['indexes'],
                    "counterType": "GAUGE",
                    "tags": "name=dbstats_indexes",
                })
                #索引大小
                self.uploadData.append({
                    "endpoint": InfoMonConstants.ENDPOINT,
                    "metric": "dbstats_" + data['db']+"_indexSize",
                    "timestamp": self.ts,
                    "step": 60,
                    "value": round(data['indexSize'] / 1024 / 1024, 0),
                    "counterType": "GAUGE",
                    "tags": "name=dbstats_indexSize_Mb",
                })
            elif (InfoMonConstants.TYPE == '3'):
                dataRawShardingInfo = data['raw']
                for key, value in dataRawShardingInfo.items():
                    shardingName = key.split('/')[0]
                    self.uploadData.append({
                        "endpoint": InfoMonConstants.ENDPOINT,
                        "metric": "dbstats_" + value['db'] + "_sharding_"+ shardingName + "_collections",
                        "timestamp": self.ts,
                        "step": 60,
                        "value": value['collections'],
                        "counterType": "GAUGE",
                        "tags": "name=dbstats_sharding_collections",
                    })
                    self.uploadData.append({
                        "endpoint": InfoMonConstants.ENDPOINT,
                        "metric": "dbstats_" + value['db'] + "_sharding_" + shardingName + "_views",
                        "timestamp": self.ts,
                        "step": 60,
                        "value": value['views'],
                        "counterType": "GAUGE",
                        "tags": "name=dbstats_sharding_views",
                    })
                    self.uploadData.append({
                        "endpoint": InfoMonConstants.ENDPOINT,
                        "metric": "dbstats_" + value['db'] + "_sharding_" + shardingName + "_objects",
                        "timestamp": self.ts,
                        "step": 60,
                        "value": value['objects'],
                        "counterType": "GAUGE",
                        "tags": "name=dbstats_sharding_objects",
                    })
                    self.uploadData.append({
                        "endpoint": InfoMonConstants.ENDPOINT,
                        "metric": "dbstats_" + value['db'] + "_sharding_" + shardingName + "_avgObjSize",
                        "timestamp": self.ts,
                        "step": 60,
                        "value": round(value['avgObjSize'], 0),
                        "counterType": "GAUGE",
                        "tags": "name=dbstats_sharding_avgObjSize_bytes",
                    })
                    self.uploadData.append({
                        "endpoint": InfoMonConstants.ENDPOINT,
                        "metric": "dbstats_" + value['db'] + "_sharding_" + shardingName + "_dataSize",
                        "timestamp": self.ts,
                        "step": 60,
                        "value": round(value['dataSize'] / 1024 / 1024, 0),
                        "counterType": "GAUGE",
                        "tags": "name=dbstats_sharding_dataSize_Mb",
                    })
                    self.uploadData.append({
                        "endpoint": InfoMonConstants.ENDPOINT,
                        "metric": "dbstats_" + value['db'] + "_sharding_" + shardingName + "_storageSize",
                        "timestamp": self.ts,
                        "step": 60,
                        "value": round(value['storageSize'] / 1024 / 1024, 0),
                        "counterType": "GAUGE",
                        "tags": "name=dbstats_sharding_storageSize_Mb",
                    })
                    self.uploadData.append({
                        "endpoint": InfoMonConstants.ENDPOINT,
                        "metric": "dbstats_" + value['db'] + "_sharding_" + shardingName + "_indexes",
                        "timestamp": self.ts,
                        "step": 60,
                        "value": value['indexes'],
                        "counterType": "GAUGE",
                        "tags": "name=dbstats_sharding_indexes",
                    })
                    self.uploadData.append({
                        "endpoint": InfoMonConstants.ENDPOINT,
                        "metric": "dbstats_" + value['db'] + "_sharding_" + shardingName + "_indexSize",
                        "timestamp": self.ts,
                        "step": 60,
                        "value": round(value['indexSize'] / 1024 / 1024, 0),
                        "counterType": "GAUGE",
                        "tags": "name=dbstats_sharding_indexSize_Mb",
                    })

    #oplog的windows时间窗口长度
    def getOplogWindows(self, conn):
        if int(InfoMonConstants.TYPE) == 2:
            dbname = 'local'
            tableName = 'oplog.rs'
            oplogData = conn.getDataOne(dbname, tableName)
            oplogFirstTime = oplogData['ts'].as_datetime()
            currentTime = datetime.datetime.now().replace(tzinfo=pytz.timezone('Asia/Shanghai'))
            OplogTimeWindows = (currentTime - oplogFirstTime).days * 24 + (currentTime - oplogFirstTime).seconds / 60 / 60
            self.uploadData.append({
                "endpoint": InfoMonConstants.ENDPOINT,
                "metric": "OplogWindows",
                "timestamp": self.ts,
                "step": 60,
                "value": round(OplogTimeWindows, 2),
                "counterType": "GAUGE",
                "tags": "name=OplogWindows_hours",
            })
        else:
            self.uploadData.append({
                "endpoint": InfoMonConstants.ENDPOINT,
                "metric": "OplogWindows",
                "timestamp": self.ts,
                "step": 60,
                "value": 0,
                "counterType": "GAUGE",
                "tags": "name=OplogWindows",
            })

    def getReplicaCount(self, conn):
        if int(InfoMonConstants.TYPE) == 2:
            statusData = conn.getValue("replSetGetStatus")
            replicasum = 0
            get_list = ['PRIMARY', 'SECONDARY', 'ARBITER']
            for row in statusData["members"]:
                if row["stateStr"] in get_list:
                    replicasum += 1
            self.uploadData.append({
                "endpoint": InfoMonConstants.ENDPOINT,
                "metric": "status_ReplicatActiveCount",
                "timestamp": self.ts,
                "step": 60,
                "value": replicasum,
                "counterType": "GAUGE",
                "tags": "name=ReplicatActiveCount",
            })
        elif int(InfoMonConstants.TYPE) == 3:
            statusData = conn.getValue("dbstats")
            self.uploadData.append({
                "endpoint": InfoMonConstants.ENDPOINT,
                "metric": "status_ShardingCount",
                "timestamp": self.ts,
                "step": 60,
                "value": len(statusData['raw']),
                "counterType": "GAUGE",
                "tags": "name=ReplicatCount",
            })
        else:
            self.uploadData.append({
                "endpoint": InfoMonConstants.ENDPOINT,
                "metric": "status_ReplicatCount",
                "timestamp": self.ts,
                "step": 60,
                "value": 1,
                "counterType": "GAUGE",
                "tags": "name=ReplicatCount",
            })


    #数据收集
    def collect(self):
        connHelper = ConnectionHelper(InfoMonConstants.SID)

        #thread 收集数据
        threads = []
        t1 = Thread(target=self.getServerStatus, args=(connHelper,))
        t2 = Thread(target=self.getDbStats, args=(connHelper,))
        t3 = Thread(target=self.getOplogWindows, args=(connHelper,))
        t4 = Thread(target=self.getReplicaCount, args=(connHelper,))
        threads.append(t1)
        threads.append(t2)
        threads.append(t3)
        threads.append(t4)
        for t in threads:
            # t.setDaemon(True)
            t.start()
        for t in threads:
            t.join()

        for line in self.uploadData:
            print(line)
        connHelper.conn.close()


    #发送数据到transfer
    def send(self):
        if 0 != len(self.uploadData):
            r = requests.post(InfoMonConstants.FALCON_CLINENT, data=json.dumps(self.uploadData))
            print(r.text)
    

