import pickle
import random
from datetime import datetime
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_geometric.utils import scatter
from sklearn.metrics import precision_score, recall_score, f1_score, accuracy_score


def get_cmd(argvs):
    return "python " + " ".join(argvs)


def save_npy(save_path, data):
    np.save(save_path, data)


def load_npy(save_path):
    return np.load(save_path)


def save_pkl(save_path, data):
    assert save_path.split('.')[-1] == 'pkl'
    with open(save_path,'wb') as file:
        pickle.dump(data,file)


def load_pkl(file_path):
    assert file_path.split('.')[-1] == 'pkl'
    with open(file_path,'rb') as file:
        data = pickle.load(file)
    return data


def init_dl_programe(args):
    args.cuda = not args.no_cuda and torch.cuda.is_available()

    seed = args.seed
    random.seed(seed)
    seed += 1
    np.random.seed(seed)
    seed += 1
    torch.manual_seed(seed)
    if args.cuda:
        torch.cuda.manual_seed(seed)

    device = torch.device('cpu')
    if args.cuda:
        device = torch.device('cuda:0')
    args.device = device


def align_trace_metric(trace_feats, mt, time_index, metric_all_data, idx_node_map, service_list, window_size):
    '''
    :param time_index: dict
    :param metric_all_data: (#service, #timestamp, F_me)
    :return:
    '''

    total_len = metric_all_data.shape[1]

    endpoint = mt['endpoint']
    start_time = int(datetime.strptime(mt['start_time'], "%Y-%m-%d %H:%M:%S").timestamp())

    # align metric window
    target_timestamp = start_time - 28800
    inc = 0
    while target_timestamp not in time_index and inc < 5:
        target_timestamp += 1
        inc += 1

    if inc >= 5:
        return None

    target_index = time_index[target_timestamp]
    if target_index + window_size - 1 >= total_len:
        return None

    metric_window = metric_all_data[:, target_index:target_index+window_size, :]
    metric_feats = metric_window.reshape(metric_window.shape[0], -1)   # (#service, w * F_me)

    # align services
    metric_service_idx_map = {s:i for i,s in enumerate(service_list)}
    trace_service_list = [node.split(':')[0] for idx, node in idx_node_map.items()]
    trace_idx_list = [metric_service_idx_map[s] for s in trace_service_list]

    x = torch.concat([trace_feats, metric_feats[trace_idx_list]], dim=1)  # (L, F_s + w * F_me)
    return x


def minmax_normalize_trace(trace_feat_list):
    total_feat = torch.concat(trace_feat_list, dim=0)
    min_vals, _ = torch.min(total_feat, dim=0, keepdim=True)
    max_vals, _ = torch.max(total_feat, dim=0, keepdim=True)
    for i in range(len(trace_feat_list)):
        trace_feat_list[i] = (trace_feat_list[i] - min_vals) / (max_vals - min_vals)


def normalize(trace_feat):
    mu = torch.mean(trace_feat, dim=0, keepdim=True)
    sigma = torch.std(trace_feat, dim=0, keepdim=True)
    trace_feat = (trace_feat - mu) / (sigma)
    return trace_feat


def standard_scale_trace(trace_feat_list):
    total_feat = torch.concat(trace_feat_list, dim=0)
    mu = torch.mean(total_feat, dim=0, keepdim=True)
    sigma = torch.std(total_feat, dim=0, keepdim=True)
    for i in range(len(trace_feat_list)):
        trace_feat_list[i] = (trace_feat_list[i] - mu) / (sigma + 1e-8)


def minmax_normalize_metric(data):
    mi = np.min(np.min(data, axis=1), axis=0)
    ma = np.max(np.max(data, axis=1), axis=0)
    data = (data - mi) / (ma - mi)
    return data


def standard_scale_metric(data):
    mu = np.mean(np.mean(data, axis=1), axis=0)
    sigma = np.std(np.std(data, axis=1), axis=0)
    data = (data - mu) / (sigma + 1e-8)
    return data


def to_sparse_batch(x, batch):
    '''
    :param x: [B, max(L), F]
    :param batch: [num_all_nodes]
    :return: [~BL, F]
    '''
    batch_size = int(batch.max()) + 1
    num_nodes = scatter(batch.new_ones(batch.size(0)), batch, dim=0,
                        dim_size=batch_size, reduce='sum')
    cum_nodes = torch.cat([batch.new_zeros(1), num_nodes.cumsum(dim=0)])

    max_num_nodes = int(num_nodes.max())

    tmp = torch.arange(batch.size(0), device=x.device) - cum_nodes[batch]
    idx = tmp + (batch * max_num_nodes)

    size = [batch.shape[0]] + [x.shape[-1]]

    x = x.reshape(batch_size * max_num_nodes, -1)  # (B*maxL, F)
    out = torch.zeros(size).to(x.device)          # (~BL, F)
    out[:] = x[idx]
    return out


def score_report(y, y_pred):
    prec = precision_score(y, y_pred)
    rec = recall_score(y, y_pred)
    f1 = f1_score(y, y_pred)
    acc = accuracy_score(y, y_pred)

    return prec, rec, f1, acc