import pandas as pd
import numpy as np

from .preprocessing import *
from .load_data import *

splitter = '\n-----------------------------------------\n'
def filter_pod_infradata(podinfradata,error_services,fromt = 0, tot = 2749110400):
    filtereddata = podinfradata[podinfradata['service'].apply(lambda x: x in error_services)]
    filtereddata = filtereddata[(filtereddata['timestamp']>fromt)&((filtereddata['timestamp']<tot))]
    return filtereddata

def filter_pod_infradata_pod(podinfradata,error_pods,fromt = 0, tot = 2749110400):
    filtereddata = podinfradata[podinfradata['pod'].apply(lambda x: x in error_pods)]
    filtereddata = filtereddata[(filtereddata['timestamp']>fromt)&((filtereddata['timestamp']<tot))]
    return filtereddata

def filter_node_infradata(nodeinfradata,error_nodes,fromt = 0, tot = 2749110400):
    filtereddata = nodeinfradata[nodeinfradata['kubernetes_node'].apply(lambda x: x in error_nodes)]
    filtereddata = filtereddata[(filtereddata['timestamp']>fromt)&((filtereddata['timestamp']<tot))]
    return filtereddata


def describeindics(nodeinfradata,error_start_time, error_end_time,des = ''):
    indics = nodeinfradata
    if indics.shape[0] ==0 or indics.shape[1] == 0:
        return f'\n there is no valid data for {des}'
    abnormal_part = indics[ (indics['timestamp'] >= error_start_time ) & (indics['timestamp'] <= error_end_time ) ]
    normal_part_before = indics[indics['timestamp'] < error_start_time - 300 ]
    normal_part_after = indics[indics['timestamp'] > error_end_time + 300 ]

    abnormal_describe = abnormal_part.describe(percentiles = [.75])
    normal_before_describe = normal_part_before.describe(percentiles = [.75])
    normal_after_describe = normal_part_after.describe(percentiles = [.75])

    # Drop what we dont need
    abnormal_describe.drop(index=['count','std'],inplace=True)
    normal_before_describe.drop  (index=['count','std'],inplace=True)
    normal_after_describe.drop  (index=['count','std'],inplace=True)
    # Drop what we dont need
    abnormal_describe.drop(columns = ['timestamp'],inplace=True)
    normal_before_describe.drop(columns = ['timestamp'],inplace=True)
    normal_after_describe.drop(columns = ['timestamp'],inplace=True)

    # Drop what not statistically significant
    todel_before = delunstatiscolumns(normal_before_describe,abnormal_describe) 
    todel_after = delunstatiscolumns(normal_after_describe,abnormal_describe) 
    
    todel = list(set(todel_before).intersection(set(todel_after)))
    
    abnormal_describe.drop(columns = todel,inplace=True)
    normal_before_describe.drop(  columns = todel,inplace=True)
    normal_after_describe.drop(  columns = todel,inplace=True)

    if abnormal_describe.shape[0] ==0 or abnormal_describe.shape[1] == 0:
        return f'\n there is no statistically significant difference  for {des}'
    abnormal_describe.index = [i+'(abnormal)' for i in abnormal_describe.index]
    normal_before_describe.index = [i+'(before)' for i in normal_before_describe.index]
    normal_after_describe.index = [i+'(after)' for i in normal_after_describe.index]
    return f'The statistic info of {des} is \n\n'+pd.concat([normal_before_describe,abnormal_describe,normal_after_describe],axis=0).dropna().to_markdown() + '\n'

# Load given logs
def ana_indics(service_nodes,error_services,podinfradata,fromt,tot,padding,basepathday,days):
    fileterdnodedata = {}
    apm_podlog = {}
    apm_servicelog = {}
    context = ''
    for service, node in service_nodes:
        try:
            apm_podlog[service] = read_apm_pod(f'{basepathday}/metric-parquet/apm/pod/',date = days[0],pod=service,
                              fromt = fromt-padding, tot = tot+padding)
        except:
            context += f'\n No logs for service {service}, which means the pod may be shutdown abnormally'
            continue
            
    for key, indics in apm_podlog.items():
        context += f'\nInformation for {key} \n :{describeindics(indics,fromt,tot,key)}'
        
    return context

def Loadtidb(tidbinfradata,tikvinfradata,pdinfradata,fromt,tot,padding):
    context = ''
    filteredtidb = tidbinfradata[(tidbinfradata['timestamp']>fromt)&((tidbinfradata['timestamp']<tot))]
    context += f'''\n TIDB info:\n {describeindics(filteredtidb,fromt+padding,tot-padding,"tidb")}'''
    filteredtikv = tikvinfradata[(tikvinfradata['timestamp']>fromt)&((tikvinfradata['timestamp']<tot))]
    context += f'''\n TIKV info:\n {describeindics(filteredtikv,fromt+padding,tot-padding,"tikv")}'''
    filteredpd = pdinfradata[(pdinfradata['timestamp']>fromt)&((pdinfradata['timestamp']<tot))]
    context += f'''\n PD info:\n {describeindics(filteredpd,fromt+padding,tot-padding,"pd")}'''
    return context

def LoadMetrics_node(nodeinfradata,node,fromt,tot,padding):
    try:
        filterednode = filter_node_infradata(nodeinfradata,[node],fromt = fromt, tot = tot)
        context = f'''\n {node} info:\n {describeindics(filterednode,fromt+padding,tot-padding,node)}'''
        return context
    except Exception as err:
        print(err)
        return f'\n Information of {node} is not found, check input or missing logs'
    
def LoadMetrics_pod(podinfradata,pod,fromt,tot,padding):
    try:
        filteredpod = filter_pod_infradata_pod(podinfradata,[pod],fromt = fromt, tot = tot)
        context = f'''\n {pod} info:\n {describeindics(filteredpod,fromt+padding,tot-padding,pod)}'''
        return context
    except Exception as err:
        return f'\n Information of {pod} is not found, check input or missing logs'

def LoadMetrics_service_pod(podinfradata,error_services,fromt,tot,padding,basepathday,days):
    # Filter the error service pod info
    podinfradata = filter_pod_infradata(podinfradata,error_services,fromt = fromt+padding, tot = tot-padding)
    service_nodes = podinfradata[['pod','instance']].drop_duplicates().values
    context = f'\n The relationship between service and instances are {";".join(["==>".join([service,pod]) for service,pod in service_nodes])}\n'
    context += ana_indics(service_nodes,error_services,podinfradata,fromt+padding,tot-padding,padding,basepathday,days)
    return context,service_nodes

def TraceAnalysis(trace,fromt,tot):
    tracedata = trace.data
    context  = ''
    context += trace.give_duration_statis(fromt,tot)
    error_traces = tracedata[tracedata['severity'] == 'error' ] if tracedata is not None else [] 
    if len(error_traces)>0:
        error_trace_start_time = error_traces['timestamp'].min()
        error_trace_end_time = error_traces['timestamp'].max()
        context += f'\n Found trace service from {error_trace_start_time} to {error_trace_end_time}:\n\n'
        context += error_traces[['servicename','peer_service','message','severity']].drop_duplicates().to_markdown()
        error_services = error_traces['peer_service'].drop_duplicates().values.tolist()

        return context,error_services
    else:
        return context,[]

def LoadMetrics_service(basepath = '/app/jupyter/aiops/phaseone/2025-06-06/metric-parquet/apm/service/',
                  date = '2025-06-06',pod='shippingservice',fromt = 0, tot = 2749110400,padding = 1000):
    try:
        serviceinfo = read_apm_service(basepath, date ,pod,fromt, tot)
        return describeindics(serviceinfo,fromt+padding, tot-padding,pod)
    except Exception as err:
        return f'log of {pod} not found'


def LogSearch(logdata):
    # see service log information:
    abnormalmessage = {}
    for key in logdata.keys():
        abnormalrecord = logdata[key][(logdata[key]['severity'] != '') &(logdata[key]['severity'] != 'INFO') &(logdata[key]['severity'] != 'info') & (logdata[key]['severity'] != 'debug')]
        if abnormalrecord.shape[0]>0:
            abnormalmessage[key] = abnormalrecord['error'].drop_duplicates().to_markdown()
    if len(abnormalmessage)>0: # we find the abnormal message !
        return  f'*checkfrontend called and We found the error log info:''' + "\n".join( [ key + ":\n\n" + value for key,value in abnormalmessage.items()])
    else:
        return 'no error found !'
        

def LoadMetrics_allservice(basepath = '/app/jupyter/aiops/phaseone/2025-06-06/metric-parquet/apm/service/',
                  date = '2025-06-06',fromt = 0,allservices = [], tot = 2749110400,padding = 1000):
    context = ''
    for service in allservices:
        try:
            serviceinfo = read_apm_service(basepath, date ,service,fromt, tot)
            context += describeindics(serviceinfo,fromt+padding, tot-padding,service)
        except Exception as err:
            context +=  f'log of {service} not found'
    return context 

def LoadMetrics_allnode(nodeinfradata,fromt,tot,padding):
    context = ''
    for node in nodeinfradata['kubernetes_node'].drop_duplicates().values:
        try:
            filterednode = filter_node_infradata(nodeinfradata,[node],fromt = fromt, tot = tot)
            context += f''' {describeindics(filterednode,fromt+padding,tot-padding,node)}'''
        except Exception as err:
            context += f'\n Information of {node} is not found, check input or missing logs'
    return context 


