import arrow
from enum import Enum
from typing import Dict, List, Optional, Tuple
from bson import ObjectId
from flask import g
from app.db import Pagination
from app.db.models.jobs import Job
from app.db.models.users import User
from app.db.models.tasks import Task


class QueryType(Enum):
    FIND = "find"
    AGG = "aggregate"
    CREATE = "create"
    UPDATE = "update"
    DELETE = "delete"


class Result(object):
    def __init__(self, results, result_type: str, count=0):
        self._count: int = count if count else len(results)
        self._results: List = results
        self._type = result_type

    @property
    def count(self):
        return self._count

    @property
    def results(self):
        return self._results

    @property
    def type(self):
        return self._type

    def __iter__(self):
        for r in self._results:
            yield r

    @classmethod
    def from_find_res(cls, res):
        if isinstance(res, Pagination):
            results = res.items
        else:
            results = res
        ins = cls(results, QueryType.FIND.value)
        return ins

    @classmethod
    def from_agg_res(cls, res):
        ins = cls(res, QueryType.AGG.value, count=1)
        return ins

    @classmethod
    def from_create_res(cls, res):
        ins = cls(res, QueryType.CREATE.value)
        return ins

    @classmethod
    def from_update_res(cls, res):
        if res == 0:
            ins = cls([], QueryType.UPDATE.value)
        else:
            ins = cls([res], QueryType.UPDATE.value)
        return ins

    @classmethod
    def from_delete_res(cls, res):
        if res == 0:
            ins = cls([], QueryType.DELETE.value)
        else:
            ins = cls([res], QueryType.DELETE.value)
        return ins

    @classmethod
    def from_none(cls):
        ins = cls([], "")
        return ins


class Query(object):
    def __init__(
        self,
        query_type: Enum,
        queries: Dict or List,
        selected_fields: Optional[List] = [],
        pagination: Optional[Tuple[int, int]] = None,
        order_by: Optional[str] = "",
        col=None
    ):
        """
        :param query_type:
        :param queries:
        :param selected_fields:
        :param pagination: (page, limit)
        :param order_by: e.g. order_by="age"，升序；order_by="-age"，降序
        :param col:
        """
        self.query_type = query_type
        self.queries = queries
        self.pagination = pagination
        self.selected_fields = selected_fields
        self.order_by = order_by
        self.col = col

    def execute(self, col=None) -> Result:
        col = col if col else self.col
        if not self.selected_fields:
            self.selected_fields = list(self.col._fields.keys())
        res = getattr(self, f"_{self.query_type.value}")(col)
        return res

    def _find(self, col) -> Result:
        if self.pagination:
            if self.order_by:
                res = (
                    col.objects.only(*self.selected_fields)
                    .find(self.queries)
                    .order_by(self.order_by)
                    .skip((self.pagination[0] - 1) * self.pagination[1])
                    .limit(self.pagination[1])
                )
            else:
                res = (
                    col.objects.only(*self.selected_fields)
                    .find(self.queries)
                    .skip((self.pagination[0] - 1) * self.pagination[1])
                    .limit(self.pagination[1])
                )
        else:
            if self.order_by:
                res = (
                    col.objects.only(*self.selected_fields)
                    .find(self.queries)
                    .order_by(self.order_by)
                )
            else:
                res = col.objects.only(*self.selected_fields).find(
                    self.queries
                )
        result = Result.from_find_res(res)
        return result

    def _aggregate(self, col) -> Result:
        pipeline: List = self.queries
        if self.pagination:
            pipeline.extend(
                [
                    {"$limit": self.pagination[0] * self.pagination[1]},
                    {"$skip": (self.pagination[0] - 1) * self.pagination[1]},
                ]
            )
        if self.selected_fields:
            pipeline.append(
                {"$project": {field: 1 for field in self.selected_fields}}
            )
        res = col.objects.aggregate(pipeline)
        result = Result.from_agg_res(res)
        return result

    def _create(self, col) -> Result:
        if not isinstance(self.queries, list):
            self.queries = [self.queries]
        inserts = [col.from_dict(_query) for _query in self.queries]
        res = col.objects.insert(inserts)
        result = Result.from_create_res(res)
        return result

    def _update(self, col) -> Result:
        filters = self.queries.get("filters", {})
        upsert = self.queries.get("upsert", False)
        if not filters:
            return Result.from_none()
        update = self.queries.get("update", {})
        res = col.objects.filter(**filters).update(upsert=upsert, **update)
        result = Result.from_update_res(res)
        return result

    def _delete(self, query, col) -> Result:
        filters = query.queries.get("filters", {})
        if not filters:
            return Result.from_none()
        res = col.objects.filter(**filters).delete()
        result = Result.from_delete_res(res)
        return result


def get_job_id_by_asset(asset: str, regex: bool = True) -> Result:
    if regex:
        queries = {
            "targetUrl": {
                "$regex": asset
            }
        }
    else:
        queries = {
            "targetUrl": asset
        }
    query = Query(
        query_type=QueryType.FIND,
        queries=queries,
        col=Job
    )
    res = query.execute()
    return res


def get_asset_by_job_id(job_ids: List[str]) -> Result:
    query = Query(
        query_type=QueryType.FIND,
        queries={
            "_id": {
                "$in": [ObjectId(job_id) for job_id in job_ids]
            }
        },
        selected_fields=[Job.id.name, Job.targetUrl.name, Job.sourceIp.name, Job.note.name],
        col=Job
    )
    res = query.execute()
    return res


def get_job(queries: Dict, fields: List = []) -> Result:
    query = Query(
        query_type=QueryType.FIND,
        queries=queries,
        selected_fields=fields,
        col=Job
    )
    res = query.execute()
    return res


def get_user_name_by_id(uid: str) -> str:
    user = User.objects.filter(pk=uid).first()
    return user.username if user else ""


def get_ava_results(
    task_type, limit=10, uid=None, hours=-12, ex_query=None, abnormal=False
):
    end = arrow.now()
    begin = end.shift(hours=hours)
    if not uid:
        uid = g.user.id
    if not ex_query:
        ex_query = {}
    pipeline = [
        {
            "$match": {
                "status": "completed",
                "taskType": f"{task_type}",
                "uid": uid,
                "endTime": {"$gt": begin.datetime, "$lt": end.datetime},
                **ex_query,
            }
        },
        {"$sort": {"endTime": -1}},
        {
            "$group": {
                "_id": "$taskId",
                "total": {"$sum": "$result.addition.nodeTotalCount"},
                "abnormal": {"$sum": "$result.addition.nodeAbnormalCount"},
                "jobId": {"$first": "$jobId"},
                "name": {"$first": "$name"},
                "target": {"$first": "$target"},
            }
        },
        {
            "$group": {
                "_id": "$jobId",
                "total": {"$sum": "$total"},
                "abnormal": {"$sum": "$abnormal"},
                "jobId": {"$first": "$jobId"},
                "name": {"$first": "$name"},
                "target": {"$first": "$target"},
                "taskId": {"$first": "_id"},
            }
        },
        {
            "$lookup": {
                "from": "jobs",
                "localField": "jobId",
                "foreignField": "_id",
                "as": "job",
            }
        },
    ]
    data = Task.objects.aggregate(pipeline)

    new_data = []
    for _data in data:
        if _job := _data.get("job"):
            _job = _job[0]
            _temp_dict = {
                "job_id": str(_job["_id"]),
                "name": "unknown",
                "target": "unknown",
                "availability": 0.0,
                "total": _data.get("total", 0),
                "abnormal": _data.get("abnormal", 0),
            }
            if (_total := _data.get("total")) and _total != 0:
                _temp_dict["availability"] = round(
                    (1 - (_data.get("abnormal", 0) / _total)) * 100, 2
                )
                _temp_dict["name"] = _data.get("name")
                _temp_dict["target"] = _data.get("target")
            if abnormal and _temp_dict["availability"] < 100:
                new_data.append(_temp_dict)
            elif not abnormal:
                new_data.append(_temp_dict)
    new_data.sort(key=lambda x: x["availability"])
    return new_data[:limit]


def get_ipv6_ava_results(uid, job_id):
    data_list = get_ava_results(
        "http",
        limit=1,
        uid=uid,
        hours=-24,
        ex_query={"jobId": job_id, "sourceIp": None},
    )
    if data_list:
        return data_list[0]
    return {}
