import arrow
from pymongo import MongoClient
from dateutil import tz as dateutil_tz
import csv

from conf.settings import (
    CLOUDMONITOR_MONGODB,
    CLOUDMONITOR_DB,
    CLOUDMONITOR_MONGODB_BK,
    CLOUDMONITOR_DB_BK
)


def utc_time_to_local(t, is_format=False, fmt='%Y-%m-%d %H:%M:%S'):
    """utc时间转换成本地时间"""
    t = arrow.get(t).to(tz=dateutil_tz.tzlocal()).datetime
    if is_format:
        return t.strftime(fmt)
    return t


class Statistic(object):

    def __init__(self):
        self.bk_db = MongoClient(CLOUDMONITOR_MONGODB_BK)[CLOUDMONITOR_DB_BK]
        self.db = MongoClient(CLOUDMONITOR_MONGODB)[CLOUDMONITOR_DB]

    @staticmethod
    def save_csv(headers, path, data_list):
        with open(path, "w") as f:
            csv_writer = csv.writer(f)
            csv_writer.writerow(headers)
            csv_writer.writerows(data_list)

    def user_packages(self, path=None):
        """用户套餐"""
        pkg_map = {p["_id"]: p["name"] for p in self.db["packages"].find({"key": {"$ne": "scanv_free"}})}
        balance_map = {b["uid"]: b.get("planExpireTime", "") for b in self.db["balances"].find({})}
        data = []
        for user in self.db["users"].find({}):
            pkg_name = pkg_map.get(user.get("planId"), "免费版")
            exp_time = balance_map.get(user.get("_id"), "")
            is_expired = True
            if exp_time and arrow.get(exp_time).datetime > arrow.utcnow().datetime:
                is_expired = False
            exp_time_str = ""
            if exp_time:
                exp_time_str = utc_time_to_local(exp_time, is_format=True)
            data.append({"username": user["username"], "expire_time": exp_time_str, "pkg_name": pkg_name, "is_expired": is_expired})
        headers = ["用户名", "过期时间", "套餐名称", "是否过期"]
        csv_data = [[d["username"], d["expire_time"], d["pkg_name"], d["is_expired"]] for d in data]
        path = path if path else "/var/log/scripts/user_packages.csv"
        self.save_csv(headers=headers, path=path, data_list=csv_data)
        return data

    def _add_job_data(self, group_by, path, query=None):
        data = {}
        query = {} if not query else query
        agg = [
            {"$match": query},
            {"$project": {"startTime": 1, f"{group_by}": 1}},
            {"$group": {"_id": f"${group_by}", "startTime": {"$first": "$startTime"}}}
        ]
        for task in self.bk_db["tasks"].aggregate(agg):
            start_time = task["startTime"]
            time_str = utc_time_to_local(start_time, is_format=True)[:7]
            data.setdefault(time_str, {"time": start_time, "count": 0, "time_str": time_str})
            data[time_str]["count"] += 1
        data = list(sorted(data.values(), key=lambda x: x["time"]))
        headers = ["时间", "新增资产数量"]
        csv_data = [[d["time_str"], d["count"]] for d in data]
        self.save_csv(headers=headers, path=path, data_list=csv_data)
        return data

    def add_job_data_by_id(self, query=None, path=None):
        """每月新增资产数， 按照ID去重"""
        path = path if path else "/var/log/scripts/add_job_data_by_id.csv"
        data = self._add_job_data(group_by="jobId", path=path, query=query)
        return data

    def add_job_data_by_target(self, query=None, path=None):
        """每月新增资产数， 按照地址去重"""
        path = path if path else "/var/log/scripts/add_job_data_by_target.csv"
        data = self._add_job_data(group_by="target", path=path, query=query)
        return data

    def _exist_job_data(self, group_by, path, query=None):
        data = {}
        query = {} if not query else query
        agg = [
            {"$match": query},
            {"$project": {"year": {"$year": "$startTime"}, "month": {"$month": "$startTime"}, f"{group_by}": 1}},
            {"$group": {"_id": {f"{group_by}": f"${group_by}", "year": "$year", "month": "$month"}}}
        ]
        for task in self.bk_db["tasks"].aggregate(agg):
            month = task['_id']['month']
            if not month:
                continue
            month_s = f"0{str(month)}" if int(month) < 10 else str(month)
            time_str = f"{str(task['_id']['year'])}-{month_s}"
            data.setdefault(time_str, 0)
            data[time_str] += 1
        headers = ["时间", "资产数量"]
        csv_data = [[t, c] for t, c in data.items()]
        self.save_csv(headers=headers, path=path, data_list=csv_data)
        return data

    def exist_job_data_by_id(self, query=None, path=None):
        """每月存在资产数， 按照ID去重"""
        path = path if path else "/var/log/scripts/exist_job_data_by_id.csv"
        data = self._exist_job_data(group_by="jobId", path=path, query=query)
        return data

    def exist_job_data_by_target(self, query=None, path=None):
        """每月存在资产数, 按照地址去重"""
        path = path if path else "/var/log/scripts/job_data_all_by_target.csv"
        data = self._exist_job_data(group_by="target", path=path, query=query)
        return data

    def job_data_year(self, query=None, path=None):
        """年度资产数据统计， 按地址去重"""
        data = {}
        query = {} if not query else query
        agg = [
            {"$match": query},
            {"$project": {"year": {"$year": "$startTime"}, "target": 1}},
            {"$group": {"_id": {"target": "$target", "year": "$year"}}}
        ]
        for task in self.bk_db["tasks"].aggregate(agg):
            time_str = task['_id']['year']
            data.setdefault(time_str, 0)
            data[time_str] += 1
        path = path if path else "/var/log/scripts/job_data_year_by_target.csv"
        headers = ["时间", "资产数量"]
        csv_data = [[f"{t}年", c] for t, c in data.items()]
        self.save_csv(headers=headers, path=path, data_list=csv_data)
        return data


if __name__ == "__main__":
    sta = Statistic()
