from urllib import response
import grpc
import query_pb2_grpc
import query_pb2
from datetime import datetime, timedelta
from typing import *
import pandas as pd
from google.protobuf.timestamp_pb2 import Timestamp
from google.protobuf.json_format import MessageToJson
from loguru import logger
from tqdm import tqdm
from kubernetes import client, config
from snappy import compress
import pickle
import os


def get_node_port() -> int:
    config.load_kube_config()
    cl = client.CoreV1Api()
    ret = cl.read_namespaced_service_with_http_info(name='jaeger-frontend', namespace='default')

    for port_spec in ret[0].spec.ports:
        if port_spec.name == '16685':
            return port_spec.node_port

    raise RuntimeError("Service nodeport not found for: jaeger-frontend in default namespace.")


def print_service_list():
    channel = grpc.insecure_channel(f'localhost:{get_node_port()}', options=(('grpc.enable_http_proxy', 0),))
    stub = query_pb2_grpc.QueryServiceStub(channel)
    response = stub.GetServices(query_pb2.GetServicesRequest())
    print(response)
    

def query(start_time: datetime, end_time: datetime, interval: int=30):
    os.makedirs('raw_output', exist_ok=True)

    channel = grpc.insecure_channel(f'localhost:{get_node_port()}', options=(('grpc.enable_http_proxy', 0),))
    stub = query_pb2_grpc.QueryServiceStub(channel)

    logger.info(f"From {start_time.strftime('%Y-%m-%d %H:%M:%S')} "
                f"to {end_time.strftime('%Y-%m-%d %H:%M:%S')}")

    trace_set = set()

    spans = []
    cur_index = 0

    # Split if too long
    for i in range(int((end_time - start_time).total_seconds()) // interval + 1):
        cur_trace_set = set()

        cur_start = start_time + timedelta(seconds=interval * i)
        cur_end = start_time + timedelta(seconds=interval * (i + 1))

        ts1 = Timestamp()
        ts1.FromDatetime(cur_start)

        ts2 = Timestamp()
        ts2.FromDatetime(cur_end)

        while True:
            try:
                response = stub.FindTraces(query_pb2.FindTracesRequest(
                    query=query_pb2.TraceQueryParameters(
                        # service_name='adservice',
                        start_time_min=ts1,
                        start_time_max=ts2,
                        search_depth=1000
                    )
                ))

                for span_chunk in response:
                    for span in span_chunk.spans:
                        trace_set.add(span.trace_id)
                        cur_trace_set.add(span.trace_id)
                        # spans.append(span)

                        spans.append(MessageToJson(span))

                logger.info(f"{cur_start.strftime('%Y-%m-%d %H:%M:%S')}: {len(cur_trace_set)} traces.")

                # Save file
                if len(spans) >= 400000:
                    with open(os.path.join('raw_output', f'{cur_index}.pkl'), 'wb') as f:
                        f.write(compress(pickle.dumps(spans)))
                    cur_index += 1
                    spans.clear()
                    
                break
            except Exception as err:
                logger.error(str(err))
                logger.error(f"Failed when trying {cur_start.strftime('%Y-%m-%d %H:%M:%S')}, try again...")

    with open(os.path.join('raw_output', f'{cur_index}.pkl'), 'wb') as f:
        f.write(compress(pickle.dumps(spans)))

    logger.info(f'{len(trace_set)} traces loaded.')


def convert_to_csv(spans, save_path: str):
    if not os.path.exists(save_path):
        # Create a new file with head
        with open(save_path, 'wt') as f:
            f.write('traceIdHigh,traceIdLow,spanId,parentSpanId,serviceName,operationName,startTime,duration,nanosecond,DBhash,status\n')

    with open(save_path, 'at+') as f:
        for span in spans:
            trace_id = int.from_bytes(span.trace_id, 'big')
            traceIdHigh = trace_id // (1 << 64)
            traceIdLow = trace_id % (1 << 64)

            spanId = int.from_bytes(span.span_id, 'big')
            if len(span.references) > 0:
                parentSpanId = int.from_bytes(span.references[0].span_id, 'big')
            else:
                parentSpanId = 0

            serviceName = span.process.service_name
            operationName = span.operation_name

            start_datetime = datetime.fromtimestamp(span.start_time.seconds).strftime('%Y-%m-%d %H:%M:%S')
            startTime = start_datetime
            duration = span.duration.nanos / 1_000_000
            nanosecond = span.start_time.nanos

            cur_status = 0
            for tag in span.tags:
                if tag.key == 'rpc.grpc.status_code':
                    cur_status = tag.v_int64
            status = cur_status
            DBhash = 0

            f.write(f'{traceIdHigh},{traceIdLow},{spanId},{parentSpanId},{serviceName},{operationName},{startTime},{duration},{nanosecond},{DBhash},{status}\n')


if __name__ == '__main__':
    print_service_list()
    # start_time = datetime.utcnow() - timedelta(seconds=900)
    # end_time = datetime.utcnow()

    start_time = datetime.fromisoformat('2022-05-27 14:00:00')
    end_time = datetime.fromisoformat('2022-05-28 14:00:00')

    query(start_time, end_time)
