import pandas as pd
import numpy as np
import utils
import json
import pickle
import os


def prepare_unsatisfied_traces_ids(frag=1):
    if not os.path.exists(f'data/unsatisfied_traces/{frag}'):
        os.makedirs(f'data/unsatisfied_traces/{frag}')

    # 获取 unsatisfied_traces
    query = '* | set session mode=scan; select batchID, traceIDs, service, name, fromTimestampSec, toTimestampSec, sloUs from log limit 1000000'
    start, end = utils.get_time_range_by_frag(frag)
    unsatisfied_traces = utils.exec_sls_query(query, 'unsatisfied_traces', start, end)
    unsatisfied_traces.to_csv(f'data/unsatisfied_traces/{frag}/unsatisfied_traces_batch.csv', index=False)

    # 将 unsatisfied_traces 每个批次中的 traceID 展开
    unsatisfied_traces_ids = []
    for _, row in unsatisfied_traces.iterrows():
        batchID = row['batchID']
        for traceID in json.loads(row['traceIDs']):
            unsatisfied_traces_ids.append([batchID, traceID])

    unsatisfied_traces_ids = pd.DataFrame(unsatisfied_traces_ids, columns=['batch_id', 'trace_id']).drop_duplicates()
    unsatisfied_traces_ids.to_csv(f'data/unsatisfied_traces/{frag}/unsatisfied_traces_ids.csv', index=False)
    print(f'#unsatisfied_traces: {len(unsatisfied_traces_ids)}')

    return list(unsatisfied_traces_ids['trace_id'])


def prepare_normal_traces_ids(n, frag=1):
    if not os.path.exists(f'data/normal_traces/{frag}'):
        os.makedirs(f'data/normal_traces/{frag}')

    # 随机采样 traceID
    query = f"parentSpanID:\"\" not health not 'grpc.health.v1.Health/Check' | " \
            f"SELECT traceID FROM log TABLESAMPLE BERNOULLI (10) limit {n}"
    # query = f"* | SELECT traceID FROM log TABLESAMPLE BERNOULLI (10) GROUP BY traceID " \
    #         f"having count(*) BETWEEN 100 AND 4000 limit {n}"
    start, end = utils.get_time_range_by_frag(frag)
    sample_traces_ids = set(utils.exec_sls_query(query, 'traces', start, end)['traceID'])

    # 从 sample_traces 中 移除 unsatisfied_traces
    unsatisfied_traces_ids = set(pd.read_csv(f'data/unsatisfied_traces/{frag}/unsatisfied_traces_ids.csv')['trace_id'])
    normal_traces_ids = sample_traces_ids - unsatisfied_traces_ids
    pd.DataFrame(normal_traces_ids, columns=['trace_id']).to_csv(f'data/normal_traces/{frag}/normal_traces_ids.csv', index=False)
    print('#normal_traces:', len(normal_traces_ids))

    return list(normal_traces_ids)


def fetch_and_build_trace(trace_type, traceIDs, step, frag=1):
    pd.DataFrame(columns=['traceID', 'spanID', 'parentSpanID', 'service', 'name',
                          'start', 'end', 'duration', 'self_duration', 'level']) \
        .to_csv(f'data/{trace_type}_traces/{frag}/{trace_type}_traces_spans.csv', index=False)
    traceMap = {}
    for i in range(0, len(traceIDs), step):
        if i % step == 0:
            utils.log(f'progress --- {i}/{len(traceIDs)}')
        where_phase = " or ".join([f"traceID = '{traceID}'" for traceID in traceIDs[i:i + step]])
        query = f"* | select traceID, spanID, parentSpanID, service, name, start, \"end\", duration from log where {where_phase} limit 1000000"
        start, end = utils.get_time_range_by_frag(frag)
        traces = utils.exec_sls_query(query, 'traces', start, end)
        traces = traces.astype({'start': np.int64, 'end': np.int64, 'duration': np.int64})
        # 遍历本次 step 获取到的所有 trace
        for traceID in traceIDs[i:i + step]:
            trace = traces[traces['traceID'] == traceID]
            # 检测 trace 是否完整
            if not utils.check_trace(trace):
                print(f'warning: incomplete trace - {traceID}')
            else:
                spanMap = utils.build_trace(trace)
                traceMap[traceID] = spanMap
                utils.spanMap2Df(traceID, spanMap).to_csv(f'data/{trace_type}_traces/{frag}/{trace_type}_traces_spans.csv',
                                                          mode='a', header=False, index=False)

    with open(f'data/{trace_type}_traces/{frag}/{trace_type}_traces.pkl', 'wb') as file:
        pickle.dump(traceMap, file)


def stats_normal_traces(frag=1):
    utils.log('stats normal traces')
    normal_traces = pd.read_csv(f'data/normal_traces/{frag}/normal_traces_spans.csv')
    grouped = normal_traces.groupby(['service', 'name'])
    mean = np.round(grouped[['duration', 'self_duration']].mean(), 2)
    std = np.round(grouped[['duration', 'self_duration']].std(), 2)
    q1 = np.round(grouped[['duration', 'self_duration']].quantile(0.25), 2)
    q2 = np.round(grouped[['duration', 'self_duration']].quantile(0.50), 2)
    q3 = np.round(grouped[['duration', 'self_duration']].quantile(0.75), 2)
    iqr = q3 - q1
    lower = q1 - 1.5 * iqr
    upper = q3 + 1.5 * iqr
    upper2 = q1 + 1.5 * iqr

    df = pd.concat([mean, std, q1, q2, q3, lower, upper, upper2], axis=1)
    df.columns = ['duration_mean', 'self_duration_mean',
                  'duration_std', 'self_duration_std',
                  'duration_q1', 'self_duration_q1',
                  'duration_q2', 'self_duration_q2',
                  'duration_q3', 'self_duration_q3',
                  'duration_lower', 'self_duration_lower',
                  'duration_upper', 'self_duration_upper',
                  'duration_upper2', 'self_duration_upper2']
    df.to_csv(f'data/normal_traces/{frag}/normal_traces_stats.csv')
