import pandas as pd
import json
import numpy as np
from utils.load_data import *

def delnullcol(data):
    data[data=='null'] = None
    data.dropna(how='all', axis=1, inplace=True)

transtime = lambda x: int((pd.Timestamp(x) - pd.Timestamp('1970-01-01T00:00:00Z')) / np.timedelta64(1, 's')) # -60*60*8

def getmethod(x,key):
    for i in x:
        if i['key'] == key:
            return i['value']

def read_filter_csv(path,fromt = 0, tot = 2749110400):
    data = pd.read_csv(path)
    return data[(data['timestamp']>fromt)&((data['timestamp']<tot))]

def read_node_infra(basepath = '/app/jupyter/aiops/aiopschallengedata2025/sample/abnormal/case1/./metric-parquet/infra',
                  date = '2025-04-28'):
    indics = [
    "node_cpu_usage_rate",
    "node_disk_read_bytes_total",
    "node_disk_read_time_seconds_total",
    "node_disk_write_time_seconds_total",
    "node_disk_written_bytes_total",
    "node_filesystem_free_bytes",
    "node_filesystem_size_bytes",
    "node_filesystem_usage_rate",
    "node_memory_MemAvailable_bytes",
    "node_memory_MemTotal_bytes",
    "node_memory_usage_rate",
    "node_network_receive_bytes_total",
    "node_network_receive_packets_total",
    "node_network_transmit_bytes_total",
    "node_network_transmit_packets_total",
    "node_sockstat_TCP_inuse",
    ]

    for indic_id in range(len(indics)):
        if indic_id == 0:
            node_indics = load_in_file(f'{basepath}/infra_node/infra_node_{indics[indic_id]}_{date}.parquet')
            delnullcol(node_indics)
            node_indics['timestamp'] = node_indics['time'].apply(lambda x:transtime(x))
            node_indics = node_indics[['timestamp','kubernetes_node','instance',indics[indic_id]]]
            node_indics = node_indics.groupby(['timestamp','kubernetes_node','instance'])[indics[indic_id]].max().reset_index()
        else:
            tmp_data =  load_in_file(f'{basepath}/infra_node/infra_node_{indics[indic_id]}_{date}.parquet')
            delnullcol(tmp_data)
            tmp_data['timestamp'] = tmp_data['time'].apply(lambda x:transtime(x))
            # groupby to reset data
            if tmp_data.shape[0] > node_indics.shape[0]:
                #print (f'Before group by:{tmp_data.head()}')
                tmp_data = tmp_data.groupby(['timestamp','kubernetes_node','instance'])[indics[indic_id]].max().reset_index()
                #print (f'After group by:{tmp_data.head()}')
            node_indics = pd.merge(node_indics,tmp_data[['timestamp','kubernetes_node',indics[indic_id]]],on=['timestamp','kubernetes_node'],how='outer')
    print(node_indics)
    return node_indics

def read_tidb_infra(basepath = '/app/jupyter/aiops/aiopschallengedata2025/sample/abnormal/case1/./metric-parquet/infra',
                  date = '2025-04-28'):
    indics = [
	"block_cache_size",
	"connection_count",
	"cpu_usage",
	"duration_95th",
	"duration_99th",
	"duration_avg",
	"failed_query_ops",
	"memory_usage",
	"qps",
	"server_is_up",
	"top_sql_cpu",
	"transaction_retry",
	#"uptime",
    ]

    for indic_id in range(len(indics)):
        if indic_id == 0:
            node_indics = load_in_file(f'{basepath}/infra_tidb/infra_tidb_{indics[indic_id]}_{date}.parquet')
            delnullcol(node_indics)
            node_indics['timestamp'] = node_indics['time'].apply(lambda x:transtime(x))
            node_indics = node_indics[['timestamp',indics[indic_id]]]
            node_indics.columns = ['timestamp','tidb_'+indics[indic_id]]
            node_indics = node_indics.groupby(['timestamp'])['tidb_'+indics[indic_id]].max().reset_index()
        else:
            tmp_data =  load_in_file(f'{basepath}/infra_tidb/infra_tidb_{indics[indic_id]}_{date}.parquet')
            delnullcol(tmp_data)
            tmp_data['timestamp'] = tmp_data['time'].apply(lambda x:transtime(x))
            if tmp_data.shape[0] > node_indics.shape[0] :
                #print (f'Before group by:{tmp_data.head()}')
                tmp_data = tmp_data.groupby(['timestamp'])[indics[indic_id]].max().reset_index()
                #print (f'After group by:{tmp_data.head()}')
            tmp_data = tmp_data[['timestamp',indics[indic_id]]]
            tmp_data.columns = ['timestamp','tidb_'+indics[indic_id]]
            node_indics = pd.merge(node_indics,tmp_data,on=['timestamp'],how='outer')
    print(node_indics)
    return node_indics

def read_tikv_infra(basepath = '/app/jupyter/aiops/aiopschallengedata2025/sample/abnormal/case1/./metric-parquet/infra',
                  date = '2025-04-28'):
    indics = [
    "available_size",
    "capacity_size",
    "cpu_usage",
    "grpc_qps",
    "io_util",
    "memory_usage",
    "qps",
    "raft_apply_wait",
    "raft_propose_wait",
    "read_mbps",
    "region_pending",
    "rocksdb_write_stall",
    "server_is_up",
    "snapshot_apply_count",
    "store_size",
    "threadpool_readpool_cpu",
    "write_wal_mbps",
    ]

    for indic_id in range(len(indics)):
        if indic_id == 0:
            node_indics = load_in_file(f'{basepath}/other/infra_tikv_{indics[indic_id]}_{date}.parquet')
            delnullcol(node_indics)
            node_indics['timestamp'] = node_indics['time'].apply(lambda x:transtime(x))
            node_indics = node_indics[['timestamp',indics[indic_id]]]
            node_indics.columns = ['timestamp','tikv_'+indics[indic_id]]
            node_indics = node_indics.groupby(['timestamp'])['tikv_'+indics[indic_id]].max().reset_index()
        else:
            tmp_data =  load_in_file(f'{basepath}/other/infra_tikv_{indics[indic_id]}_{date}.parquet')
            delnullcol(tmp_data)
            tmp_data['timestamp'] = tmp_data['time'].apply(lambda x:transtime(x))
            if tmp_data.shape[0] > node_indics.shape[0]:
                tmp_data = tmp_data.groupby(['timestamp'])[indics[indic_id]].max().reset_index()
            tmp_data = tmp_data[['timestamp',indics[indic_id]]]
            tmp_data.columns = ['timestamp','tikv_'+indics[indic_id]]
            node_indics = pd.merge(node_indics,tmp_data,on=['timestamp'],how='outer')
    print(node_indics)
    return node_indics

def read_pd_infra(basepath = '/app/jupyter/aiops/aiopschallengedata2025/sample/abnormal/case1/./metric-parquet/infra',
                  date = '2025-04-28'):
    indics = [
    "abnormal_region_count",
    "cpu_usage",
    "leader_count",
    "leader_primary",
    "learner_count",
    "memory_usage",
    "region_count",
    "region_health",
    "storage_capacity",
    "storage_size",
    "storage_used_ratio",
    "store_down_count",
    "store_low_space_count",
    "store_slow_count",
    "store_unhealth_count",
    "store_up_count",
    "witness_count",
    ]

    for indic_id in range(len(indics)):
        if indic_id == 0:
            node_indics = load_in_file(f'{basepath}/other/infra_pd_{indics[indic_id]}_{date}.parquet')
            delnullcol(node_indics)
            node_indics['timestamp'] = node_indics['time'].apply(lambda x:transtime(x))
            node_indics = node_indics[['timestamp',indics[indic_id]]]
            node_indics.columns = ['timestamp','pd_'+indics[indic_id]]
            node_indics = node_indics.groupby(['timestamp'])['pd_'+indics[indic_id]].max().reset_index()
        else:
            tmp_data =  load_in_file(f'{basepath}/other/infra_pd_{indics[indic_id]}_{date}.parquet')
            delnullcol(tmp_data)
            tmp_data['timestamp'] = tmp_data['time'].apply(lambda x:transtime(x))
            if tmp_data.shape[0] > node_indics.shape[0]:
                tmp_data = tmp_data.groupby(['timestamp'])[indics[indic_id]].max().reset_index()
            tmp_data = tmp_data[['timestamp',indics[indic_id]]]
            tmp_data.columns = ['timestamp','pd_'+indics[indic_id]]
            node_indics = pd.merge(node_indics,tmp_data,on=['timestamp'],how='outer')
    print(node_indics)
    return node_indics


#read_apm_pod(basepath='../phaseone/2025-06-06/metric-parquet/apm/',pod='ns_hipstershop',date='2025-06-06')
# read_apm_pod(basepath='../phaseone/2025-06-06/metric-parquet/apm/pod/',pod='adservice-0',date='2025-06-06')
def read_apm_pod(basepath = '/app/jupyter/aiops/phaseone/2025-06-06/metric-parquet/apm/',pod='ns_hipstershop',date='2025-06-06',fromt = 0, tot = 2749110400):

    pod_indics = None
    fname = f'{basepath}/pod_{pod}_{date}.parquet'
    try:
        pod_indics = load_in_file(fname)
        delnullcol(pod_indics)
        pod_indics['timestamp'] = pod_indics['time'].apply(lambda x:transtime(x))
        pod_indics = pod_indics[(pod_indics['timestamp']>fromt)&((pod_indics['timestamp']<tot))]
    except Exception as err:
        raise Exception(f'Failed to load in {fname} ')
    return pod_indics


def read_apm_service(basepath = '/app/jupyter/aiops/phaseone/2025-06-06/metric-parquet/apm/service/',
                  date = '2025-06-06',pod='shippingservice',fromt = 0, tot = 2749110400):

    pod_indics = None
    fname = f'{basepath}/service_{pod}_{date}.parquet'
    try:
        pod_indics = load_in_file(fname)
        delnullcol(pod_indics)
        pod_indics['timestamp'] = pod_indics['time'].apply(lambda x:transtime(x))
        pod_indics = pod_indics[(pod_indics['timestamp']>fromt)&((pod_indics['timestamp']<tot))]
    except Exception as err:
        raise Exception(f'Failed to load in {fname} ')
    return pod_indics

    

def read_pod_infra(basepath = '/app/jupyter/aiops/aiopschallengedata2025/sample/abnormal/case1/./metric-parquet/infra',
                  date = '2025-04-28'):

    indics = [
    "pod_cpu_usage",
    "pod_fs_reads_bytes",
    "pod_fs_writes_bytes",
    "pod_memory_working_set_bytes",
    "pod_network_receive_bytes",
    "pod_network_receive_packets",
    "pod_network_transmit_bytes",
    "pod_network_transmit_packets",
    "pod_processes",
    ]
    for indic_id in range(len(indics)):
        if indic_id == 0:
            pod_indics = load_in_file(f'{basepath}/infra_pod/infra_pod_{indics[indic_id]}_{date}.parquet')
            delnullcol(pod_indics)
            pod_indics['timestamp'] = pod_indics['time'].apply(lambda x:transtime(x))
            pod_indics = pod_indics[['timestamp','instance','pod',indics[indic_id]]]
            pod_indics = pod_indics.groupby(['timestamp','pod','instance'])[indics[indic_id]].max().reset_index()
        else:
            tmp_data =  load_in_file(f'{basepath}/infra_pod/infra_pod_{indics[indic_id]}_{date}.parquet')
            delnullcol(tmp_data)
            tmp_data['timestamp'] = tmp_data['time'].apply(lambda x:transtime(x))
            if tmp_data.shape[0] > pod_indics.shape[0]:
                tmp_data = tmp_data.groupby(['timestamp','pod','instance'])[indics[indic_id]].max().reset_index()
            pod_indics = pd.merge(pod_indics,tmp_data[['timestamp','instance','pod',indics[indic_id]]],on=['timestamp','instance','pod'],how='outer')
    pod_indics['service'] = pod_indics['pod'].apply(lambda x: '-'.join(x.split('-')[0:-1]))
    print(pod_indics)
    return pod_indics
    

def pod2nodes(data_pod,data_node,topology=None):
    values = data_node[['instance','kubernetes_node']].drop_duplicates().values
    i2n =  {i[1]:i[0] for i in values}

    values = data_pod[['instance','pod']].drop_duplicates().values
    p2i =  {i[1]:i[0] for i in values}
    
    if topology is None:
        topology = {
            p:{
                #'node':i2n[i],
                'service':p.split('-')[0],
                'instance': i,
            }
            for p,i in p2i.items()
        }
    else:
        for p,i in p2i.items():
            if p in topology.keys():
                # if changed ?
                if i != topology[p]['instance']:
                    print('Changed')
                    print(topology[p])
                    print(i,p)
               
            else:
                topology[p] = {
                    #'node':i2n[i],
                    'service':p.split('-')[0],
                    'instance': i,
                }
    return topology

class servicelog():
    def __init__(self,basepath,datetime,fromt = 0, tot = 2749110400):
        data = None
        self.basepath = basepath
        for ip,idatetime in enumerate(datetime):
            if data is None:
                try:
                    data = load_in_file(f'{basepath}/log_filebeat-server_{idatetime}.parquet' )
                except Exception as err:
                    print(f'Failed to loadding {idatetime}')
                    continue
            else:
                try:
                    data = pd.concat([data,load_in_file(f'{basepath}/log_filebeat-server_{idatetime}.parquet' )],axis = 0)
                except Exception as err:
                    print(f'Failed to loadding {idatetime}')
                    continue
        self.fulllogs = {}
        if data is not None:
            delnullcol(data)
            data['timestamp'] = data['@timestamp'].apply(lambda x: transtime(x))
            # filter data based on time span
            data = data[(data['timestamp']>fromt)&((data['timestamp']<tot))]
            data['kps'] = data['k8_pod'].apply(lambda x: x.split('-')[0])
            for kps in data['kps'].drop_duplicates().values:
                self.fulllogs[kps] = data[data['kps']==kps]

    def preprocess(self):
        for kps in self.fulllogs:
            try:
                self.fulllogs[kps].drop(columns=['kps','k8_namespace','agent_name','@timestamp'],inplace=True)
                self.fulllogs[kps]['severity'] = self.fulllogs[kps]['message'].apply(lambda x: json.loads(x)['severity'] if len(x) > 0 else '')
                self.fulllogs[kps]['error'] = self.fulllogs[kps]['message'].apply(lambda x: json.loads(x)['error'].split(':')[0] if 'error' in json.loads(x).keys()  else '')
                
            except Exception as err:
                #print(f'Error while processing {kps}:',err)
                self.fulllogs[kps]['severity'] = ''
                self.fulllogs[kps].drop(columns=['message'])
                continue
    def save(self):
        import pickle
        pickle.dump(self.fulllogs,open(f'{self.basepath}/service_logs.pkl','wb'))


class tracelog():
    def __init__(self,basepath,datetime,fromt = 0, tot = 2749110400):
        self.data = None
        self.basepath = basepath
        for ip,idatetime in enumerate(datetime):
            if self.data is None:
                try:
                    self.data = load_in_file(f'{basepath}/trace_jaeger-span_{idatetime}.parquet' )
                except Exception as err:
                    print(f'Failed to loadding {idatetime}')
                    continue
            else:
                try:
                    self.data = pd.concat([self.data,load_in_file(f'{basepath}/trace_jaeger-span_{idatetime}.parquet' )],axis = 0)
                except Exception as err:
                    print(f'Failed to loadding {idatetime}')
                    continue
        if self.data is not None:
            # filter data based on time span
            self.data['timestamp'] = self.data['startTimeMillis']//1000
            self.data = self.data[(self.data['timestamp']>fromt)&((self.data['timestamp']<tot))]

    def preprocess(self):
        if self.data is not None:
            self.data['peer_method'] = self.data['tags'].apply(lambda x: getmethod(x,'rpc.method'))
            self.data['peer_ip'] = self.data['tags'].apply(lambda x: getmethod(x,'net.peer.ip'))
            self.data['name'] = self.data['process'].apply(lambda x: getmethod(x['tags'],'name'))
            self.data['node_name'] = self.data['process'].apply(lambda x: getmethod(x['tags'],'node_name'))
            self.data['servicename'] = self.data['process'].apply(lambda x: x['serviceName'])
            self.data['code'] = self.data['tags'].apply(lambda x: getmethod(x,'status.code'))
            self.data['message'] = self.data['tags'].apply(lambda x: getmethod(x,'status.message'))
            self.data['severity'] = self.data['message'].apply(lambda x: 'error' if (x is not None) and ('error' in x or 'failed' in x) else 'info')
            self.data['codes'] = self.data['message'].apply(lambda x: x.split(':')[1].strip() if (x is not None) and ('status code' in x) else 0)
            self.data['message'] = self.data['message'].apply(lambda x: x.split(':')[0] if x is not None else x)
            self.data['peer_service'] = self.data['operationName'].apply(lambda x: x.split('/')[0].split('.')[-1].lower())
            self.data['father'] = self.data.references.apply(lambda x: x[0]['spanID'] if len(x)>0 else -1)

    def give_duration_statis(self,fromt,tot):
        if self.data is not None:
            des = 'Trace duration '
            abnormal_describe = self.data[ (self.data['timestamp'] >= fromt ) & (self.data['timestamp'] <= tot ) ].groupby(['servicename','peer_service']).duration.describe().T
            normal_before_describe = self.data[self.data['timestamp'] < fromt - 300 ].groupby(['servicename','peer_service']).duration.describe().T
            normal_after_describe = self.data[self.data['timestamp'] > tot + 300 ].groupby(['servicename','peer_service']).duration.describe().T
            # Drop what we dont need
            abnormal_describe.drop(index=['count','std'],inplace=True)
            normal_before_describe.drop  (index=['count','std'],inplace=True)
            normal_after_describe.drop  (index=['count','std'],inplace=True)

            abnormal_describe.columns =      [ f'{i[0]} -> {i[1]} duration' for i in abnormal_describe.columns]
            normal_before_describe.columns = [ f'{i[0]} -> {i[1]} duration' for i in normal_before_describe.columns]
            normal_after_describe.columns =  [ f'{i[0]} -> {i[1]} duration' for i in normal_after_describe.columns]

            # Drop what not statistically significant
            todel_before = delunstatiscolumns(normal_before_describe,abnormal_describe,threshold=0.3) 
            todel_after = delunstatiscolumns(normal_after_describe,abnormal_describe,threshold=0.3) 
            
            todel = list(set(todel_before).intersection(set(todel_after)))
            #print(todel)
            
            abnormal_describe.drop(columns = todel,inplace=True)
            normal_before_describe.drop(  columns = todel,inplace=True)
            normal_after_describe.drop(  columns = todel,inplace=True)
            #print(abnormal_describe)
            #print(normal_before_describe)
            #print(normal_after_describe)

            if abnormal_describe.shape[0] ==0 or abnormal_describe.shape[1] == 0:
                return f'\n there is no statistically significant difference  for {des}'
            abnormal_describe.index = [i+'(abnormal)' for i in abnormal_describe.index]
            normal_before_describe.index = [i+'(before)' for i in normal_before_describe.index]
            normal_after_describe.index = [i+'(after)' for i in normal_after_describe.index]
            return f'The statistic info of {des} is \n\n'+pd.concat([normal_before_describe,abnormal_describe,normal_after_describe],axis=0).dropna().to_markdown() + '\n'
        else:
            return ''
        
    def save(self):
        self.data.to_csv(self.basepath+'/trace_log.csv')
        
