import pandas as pd
import os
import utils


def rca(frag=1):
    utils.log(f'loading unsatisfied traces')
    batchIDs_traceIDs = pd.read_csv(f'data/unsatisfied_traces/{frag}/unsatisfied_traces_ids.csv')
    traceMap = pd.read_pickle(f'data/unsatisfied_traces/{frag}/unsatisfied_traces.pkl')
    stats = pd.read_csv(f'data/normal_traces/{frag}/normal_traces_stats.csv').set_index(['service', 'name'])

    traceIDs_spanIDs = []
    for i, traceID in enumerate(traceMap.keys()):
        if i % 250 == 0:
            utils.log(f'progress --- {i}/{len(traceMap.keys())}')

        spanMap = traceMap[traceID]
        spans = list(spanMap.values())

        # 计算异常分数
        for idx, span in enumerate(spans):
            span.idx = idx
            span.self_duration_anomaly_score = span.self_duration - stats.loc[(span.service, span.name)]['self_duration_upper']
            # span.self_duration_anomaly_score = (span.self_duration - stats.loc[(span.service, span.name)]['self_duration_mean']) \
            #                                    / stats.loc[(span.service, span.name)]['self_duration_std']

        # 按异常分数排序
        spans.sort(key=lambda span: span.self_duration_anomaly_score, reverse=True)

        spanIDs = spans[0].spanID
        traceIDs_spanIDs.append([traceID, spanIDs])

    traceIDs_spanIDs = pd.DataFrame(traceIDs_spanIDs, columns=['trace_id', 'span_ids'])
    predictions = pd.merge(batchIDs_traceIDs, traceIDs_spanIDs, on='trace_id')
    if not os.path.exists(f'predictions/{frag}'):
        os.makedirs(f'predictions/{frag}')
    predictions.to_csv(f'predictions/{frag}/predictions.csv', index=False)
    utils.log(f'closing unsatisfied traces')
