import pandas as pd
from utils.preprocessing import transtime,tracelog,servicelog,read_filter_csv,read_apm_pod,read_apm_service
import json
import re
import datetime
from utils.analysis import *
#from utils.prompt import *
from utils.prompt_en import *
from utils.llm import *
from tqdm import tqdm
from openai import OpenAI
from threading import Thread

import random
import sys
import os
import dbm
import argparse


def handling(inputs,basepath,start,end,processed_uuids,usedbm,root_path):
    full_result = []
    full_trace = []
    end = len(inputs) if end==-1 else end 
    print(f'Handling {start} to {end}')
    allservices = ["adservice",
        "cartservice",
        "currencyservice",
        "productcatalogservice",
        "checkoutservice",
        "recommendationservice",
        "shippingservice",
        "emailservice",
        "paymentservice",
        #"tidb-pd",
        #"tidb-tidb",
        #"tidb-tikv"
    ]
    try:
        for ano in tqdm(inputs[start:end]):
            history = ''
            error_service_explained = []
            error_node_explained = []
            error_services = []
            error_nodes = []
            des = ano['Anomaly Description']
            uuid = ano['uuid']
            if uuid in processed_uuids: 
                continue
            pattern=re.compile('2025-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}Z')  
            result=pattern.findall(des)
            #print(result)
            result_8 = [(datetime.datetime.strptime(i, "%Y-%m-%dT%H:%M:%SZ")+ datetime.timedelta(hours=8)).strftime("%Y-%m-%dT%H:%M:%SZ") for i in result]
            #print(result_8)
            datetimes = [transtime(i) for i in result ] 
            if (datetimes[1] - datetimes[0] )< 600:
                datetimes[0] -= 300
                datetimes[1] += 300
            hours = [i[0:13].replace('T','_')+'-00-00' for i in result_8]
            days = [i[0:10] for i in result_8]
        
            # basepathday
            basepathday = f'{basepath}/{days[0]}'
            widetable  = f'{root_path}/widetable/{days[0]}'
            
            # Read in trace first
            trace = tracelog(basepath=f'{basepathday}/trace-parquet/',datetime=[hours[0]],fromt = datetimes[0]-padding,tot = datetimes[1] + padding)
            trace.preprocess()
            # read in log then
            log = servicelog (basepath=f'{basepathday}/log-parquet/',datetime=[hours[0]],fromt = datetimes[0]-padding,tot = datetimes[1] + padding)
            log.preprocess()
            logdata = log.fulllogs
            # Read in infra info
            nodeinfradata = read_filter_csv(path=f'{widetable}/node_all.csv',fromt = datetimes[0]-padding,tot = datetimes[1]+ padding)
            # Read in pod info
            podinfradata = read_filter_csv(path=f'{widetable}/pod_all.csv',fromt = datetimes[0]-padding,tot = datetimes[1]+ padding)
            # Read in tidb info
            tidbinfradata = read_filter_csv(path=f'{widetable}/tidb_all.csv',fromt = datetimes[0]-padding,tot = datetimes[1]+ padding)
            # Read in tikv info
            tikvinfradata = read_filter_csv(path=f'{widetable}/tikv_all.csv',fromt = datetimes[0]-padding,tot = datetimes[1]+ padding)
            # Read in pd info
            pdinfradata = read_filter_csv(path=f'{widetable}/pd_all.csv',fromt = datetimes[0]-padding,tot = datetimes[1]+ padding)
        
            success = False
            
            topology = pod2nodes(podinfradata,nodeinfradata)
            topology = pd.DataFrame(topology).T.to_markdown()
        
            history = ''
            context = ''
            rounds = 1
            while True:
                history,context,suffix = prompt(des,topology,history,context)
                actions = callllm('\n'.join([history,context,suffix]),usedbm = usedbm)
                #print('\n************************ROUND*********************\n')
                #print(f'\n {actions}')
                history += f'\n## 交互{rounds} \n### Thinking: \n {actions} \n### Action & Result'
                rounds += 1
                functions = []
                for i in actions.split('\n'):
                    if 'METHOD:' in i:
                        functions.append(i.split(':')[1].strip())
                    elif 'SERVICE:' in i:
                        error_service_explained.append(i.split(':')[1])
                        error_services.append(i.split(':')[1].split(':')[0].strip()) 
                    elif 'NODE:' in i:
                        error_node_explained.append(i.split(':')[1])
                        error_nodes.append(i.split(':')[1].split(':')[0].strip())
                    elif 'GOCHA:' in i:
                        success = True
                    else:
                        continue
        
                if (success and len(functions) == 0) or rounds > 20:
                    break

             
                context = ''
                if len(functions)>0:
                    for function in functions:
                        if 'LogSearch' in function:
                            result = LogSearch(logdata)
                        elif 'TraceAnalysis' in function :
                            result,error_services_tmp = TraceAnalysis(trace,fromt = datetimes[0],tot = datetimes[1] )
                            if len(error_services_tmp) >0:
                                error_services += error_services_tmp
                        elif 'LoadMetrics_full_service' in function:
                            result = LoadMetrics_service(basepath = basepathday+'/metric-parquet/apm/service/',date = days[0],pod=function.split('(')[1].split(')')[0],
                                                  fromt = datetimes[0]-padding,tot = datetimes[1]+ padding,padding = padding)
                        elif 'LoadMetrics_service_pod' in function:
                            result,service_nodes =  LoadMetrics_service_pod(podinfradata,error_services = [function.split('(')[1].split(')')[0]],
                                                                           fromt = datetimes[0]-padding,tot = datetimes[1]+ padding,padding = padding,
                                                                           basepathday = basepathday,days =  days)
                        elif 'LoadMetrics_node' in function:
                            result = LoadMetrics_node(nodeinfradata,function.split('(')[1].split(')')[0],
                                                                           fromt = datetimes[0]-padding,tot = datetimes[1]+ padding,padding = padding)
                        elif 'LoadMetrics_pod' in function:
                            result = LoadMetrics_pod(podinfradata,function.split('(')[1].split(')')[0],
                                                                           fromt = datetimes[0]-padding,tot = datetimes[1]+ padding,padding = padding)
                        elif 'Loadtidb' in function:
                            result = Loadtidb(tidbinfradata, tikvinfradata, pdinfradata, fromt = datetimes[0]-padding,tot = datetimes[1]+ padding,padding = padding)

                        elif 'LoadMetrics_allservice' in function:
                            result = LoadMetrics_allservice(basepath = basepathday+'/metric-parquet/apm/service/',date = days[0],allservices = allservices,
                                                  fromt = datetimes[0]-padding,tot = datetimes[1]+ padding,padding = padding)
                        elif 'LoadMetrics_allnode' in function:
                            result = LoadMetrics_allnode(nodeinfradata,
                                                                           fromt = datetimes[0]-padding,tot = datetimes[1]+ padding,padding = padding)
                        else: 
                            break
                        history += f'\n - ACTION:called function {function} '
                        history += f'\n - RESULT OF {function}: {result} '
                else:
                    context = '\n 请给出下一步需要调用的方法！'
                
        
                #print('HISTORY:------------------------\n',history)
                #print('context:------------------------\n',context)

            trace = json.dumps({'uuid':uuid,'trace':history})
            with open(f'{root_path}/result/trace.jsonl','a') as fp:
                fp.write(trace+'\n')

            written = False
            input_str = gen_result(re.sub(r'(^#)|(\n#)|( +#)','\n##',history),uuid)
            while not written:
                result = callllm_result(input_str).strip()
                try:
                    result = json.loads(result[result.index('```')+7:-3])
                    result['uuid'] = uuid 
                    trace = json.dumps({'uuid':uuid,'trace':history})
                    if 'uuid' in result.keys() and "component" in result.keys() and  "reason" in result.keys() and "reasoning_trace"  in result.keys():
                        with open(f'{root_path}/result/answer.jsonl','a') as fp:
                            fp.write(json.dumps(result)+'\n')
                        written = True
                except Exception as err:
                    print(err,'Rewritten')
                
    except Exception as err:
        print(err)

    #finally:

if __name__ == '__main__':
    
    parser = argparse.ArgumentParser(description='Using llm to analysis root cause')
    parser.add_argument('--root_path', type=str, default='./data', help='datasourcedir')
    parser.add_argument('--phase', type=str, default='phaseone', help='phase one or two')
    parser.add_argument('--deepseek', action='store_true', help='using deepseek , or cstcloud')
    parser.add_argument('--usedbm',    action='store_true', help='using dbm cache, or not(default path is {root_path}/data/cache)')
    parser.add_argument('--numthread', type=int, default=10, help='how many threads to process the data')

    # Gloal parameters
    args = parser.parse_args()
    root_path = args.root_path
    usedbm = args.usedbm
    padding = 1000
    basepath = f'{root_path}/{args.phase}/'

    print(args)

    # Cache database 
    dbpath = f'{root_path}/cache/db'
    dbmhandler = dbm.open(dbpath,'c')
    
    if not args.deepseek:
        print ('Using CSTC')
        client = OpenAI(api_key=os.getenv('CSTCKEY'),base_url="https://uni-api.cstcloud.cn/v1")
        callllm = llm_handler(client,dbmhandler = dbmhandler)#,'deepseek-r1:671b')
        callllm_result = llm_handler(client,dbmhandler = dbmhandler,systemprompt = "Please analyze the provided historical analysis trajectory and extract the root cause analysis results in the specified format. ")
    else:
        print ('Using DeepSeek')
        client = OpenAI(api_key=os.getenv('DEEPSEEKKEY'), base_url="https://api.deepseek.com")
        callllm = llm_handler(client,'deepseek-chat',dbmhandler = dbmhandler)
        callllm_result = llm_handler(client,'deepseek-chat',dbmhandler = dbmhandler, systemprompt = "Please analyze the provided historical analysis trajectory and extract the root cause analysis results in the specified format. ")
    print(callllm('how are you',usedbm=usedbm))
    
    
    processed_uuids = []
    try:
        with open(f'{root_path}/result/answer.jsonl','r') as fp:
            processed_uuids = [json.loads(i)['uuid'] for i in fp.readlines()]
        print(f"{len(processed_uuids) } processed !")
    except Exception as err:
        pass

    # inputs
    inputs = json.load(open(f'{root_path}/{args.phase}/input.json','r'))
    inputs = inputs[0:100:2]
    random.shuffle(inputs)

    numthread = args.numthread 
    threads = []
    start_end = list(range(0,len(inputs),len(inputs)//numthread))
    for i,start in enumerate(start_end): 
        if i <len(start_end)-1:
            threads.append(Thread(target=handling,args=(inputs,basepath,start,start_end[i+1],processed_uuids,usedbm,root_path))) 
        else:
            threads.append(Thread(target=handling,args=(inputs,basepath,start,-1,processed_uuids,usedbm,root_path))) 
    for t in threads:
        t.start()
    for t in threads:
        t.join()
   
