from loguru import logger
from kubernetes import client, config
from typing import *
from datetime import datetime, timedelta
from pytimeparse.timeparse import timeparse
import pandas as pd
import os


def get_network_delay() -> pd.DataFrame:
    """
        Return: name, start_time, duration, service, value
    """
    config.load_kube_config()
    cl = client.CustomObjectsApi()
    ret = cl.list_cluster_custom_object(
        group='chaos-mesh.org',
        version='v1alpha1',
        plural='networkchaos')

    results = []

    for item in ret['items']:
        if item['spec']['action'] != 'delay': continue

        start_time = datetime.strptime(item['metadata']['creationTimestamp'], '%Y-%m-%dT%H:%M:%SZ')
        duration = item['spec']['duration']

        results.append({
            'name': item['metadata']['name'],
            'delay': item['spec']['delay']['latency'],
            'service': item['spec']['selector']['labelSelectors']['app'],
            'start_time': start_time,
            'end_time': start_time + timedelta(seconds=timeparse(duration)),
        })

    result = pd.DataFrame(results)

    return result


def get_network_loss() -> pd.DataFrame:
    """
        Return: name, start_time, duration, service, value
    """
    config.load_kube_config()
    cl = client.CustomObjectsApi()
    ret = cl.list_cluster_custom_object(
        group='chaos-mesh.org',
        version='v1alpha1',
        plural='networkchaos')

    results = []

    for item in ret['items']:
        if item['spec']['action'] != 'loss': continue

        start_time = datetime.strptime(item['metadata']['creationTimestamp'], '%Y-%m-%dT%H:%M:%SZ')
        duration = item['spec']['duration']

        results.append({
            'name': item['metadata']['name'],
            'loss': item['spec']['loss']['loss'],
            'service': item['spec']['selector']['labelSelectors']['app'],
            'start_time': start_time,
            'end_time': start_time + timedelta(seconds=timeparse(duration)),
        })

    result = pd.DataFrame(results)

    return result


def get_pod_cpu() -> pd.DataFrame:
    """
        Return: name, start_time, duration, service, cpu
    """
    config.load_kube_config()
    cl = client.CustomObjectsApi()
    ret = cl.list_cluster_custom_object(
        group='chaos-mesh.org',
        version='v1alpha1',
        plural='stresschaos')

    results = []

    for item in ret['items']:
        if 'stressors' not in item['spec']: continue

        start_time = datetime.strptime(item['metadata']['creationTimestamp'], '%Y-%m-%dT%H:%M:%SZ')
        duration = item['spec']['duration']

        results.append({
            'name': item['metadata']['name'],
            'cpu': item['spec']['stressors']['cpu']['load'],
            'service': item['spec']['selector']['labelSelectors']['app'],
            'start_time': start_time,
            'end_time': start_time + timedelta(seconds=timeparse(duration)),
        })

    result = pd.DataFrame(results)

    return result


if __name__ == '__main__':
    os.makedirs('output', exist_ok=True)
    logger.info('Starting fetching ...')

    get_network_delay().to_csv(os.path.join('output', 'network_delay.csv'), index=False)
    get_network_loss().to_csv(os.path.join('output', 'network_loss.csv'), index=False)
    get_pod_cpu().to_csv(os.path.join('output', 'pod_cpu.csv'), index=False)

    logger.info('Finished.')
