import csv
from datetime import datetime, timedelta
import glob
import os
import shutil
import fnmatch
from concurrent.futures import ThreadPoolExecutor
import json
import paramiko


import paramiko.rsakey
import requests

from common.path import ROOT_DIR
from component.ConfigManager import config_manager
from .const import *
from .plt import plt_topology
from .exception import raise_exception, DuplicateError
from .model import *
from .mysql import mysqlconnector


def getPageTotal(count_condition: str, table_name: str, page_size: int) -> tuple:
    count_total = mysqlconnector.count(count_condition, table_name)
    page_total = count_total // page_size + 1
    return count_total, page_total


def formatOrganizationDetails(d: ItemOrganization) -> str:
    details = f"%dC%dG" % (d.cpu_cores, d.cpu_memory)
    if d.gpu_cores > 0:
        details += f", %d卡%dGB %s" % (d.gpu_cores, d.gpu_memory, d.gpu_name)
    return details


def HandleOrganizationInfo(page_id: int, page_size: int, page_total: int):
    count_total = 0
    # TODO 这样修改有问题，如果page_total
    if page_total == 0:
        count_total, page_total = getPageTotal(count_condition="id", table_name=TBL_NAME_NODE, page_size=page_size)
    print(f"after_args_check:page_id=%s,page_size=%d,page_total=%d" % (page_id, page_size, page_total))
    # crud action
    sql = f"""select 
        a.*, b.name, b.desc, b.domain 
        from %s a left join %s b on a.organization_id=b.id 
        order by a.id desc
        limit %d,%d
    """ % (TBL_NAME_NODE, TBL_NAME_ORGANIZATION, (page_id - 1) * page_size, page_size)

    '''
    select 
        a.name, a.desc, a.domain, b.* 
        from organization a left join node b on a.id=b.organization_id 
        order by a.id, b.id
    '''

    print(sql)
    records = mysqlconnector.query(sql)
    # response: change into BaseModel
    data = {"count_total": count_total,
            "page_id": page_id,
            "page_size": page_size,
            "page_total": page_total,
            "info": []}
    for r in records:
        print("one record: ", r)
        iteminfo = ItemOrganization(**r)
        iteminfo.details = formatOrganizationDetails(iteminfo)
        data["info"].append(iteminfo)
    print("response.data:", data)
    return ResponseOrganizationInfo(**data)


def HandleOrganizationSelectInfo():
    sql = "select id as organization_id, name, domain from %s order by id" % TBL_NAME_ORGANIZATION
    records = mysqlconnector.query(sql)
    return records


def HandleOrganizationAdd(p: Organization):
    sql = """insert into %s(`name`, `desc`, `domain`) values('%s', '%s', '%s')""" % (
        TBL_NAME_ORGANIZATION, p.name, p.desc, p.domain
    )
    print(sql)
    lastid, affectrows = mysqlconnector.execute(sql)
    print(f"sql execute insert, lastid=%d, affectrows=%d" % (lastid, affectrows))
    # TODO 想办法改支持dict
    # return {"organization_id": lastid}
    return ResponseAddLastID(id=lastid)


def HandleOrganizationUpdate(p: Organization):
    sql = """update %s set `name`='%s', `desc`='%s',`domain`='%s' where `id`=%d""" % (
        TBL_NAME_ORGANIZATION, p.name, p.desc, p.domain, p.id
    )
    try:
        _, affectrows = mysqlconnector.execute(sql, is_insert=False)
    except DuplicateError as e:
        # name_unique error, only need update domain and desc field
        sql = """update %s set `desc`='%s',`domain`='%s' where `id`=%d""" % (
            TBL_NAME_ORGANIZATION, p.desc, p.domain, p.id
        )
        _, affectrows = mysqlconnector.execute(sql, is_insert=False)
    print(f"sql execute update, affectrows=%d" % (affectrows))
    return None


def HandleNodeSelectInfo():
    sql = """select 
        a.id, a.ip, a.organization_id, b.name 
        from %s a left join %s b on a.organization_id=b.id
        order by a.organization_id, a.id""" % (
        TBL_NAME_NODE, TBL_NAME_ORGANIZATION
    )
    print(sql)
    records = mysqlconnector.query(sql)
    for r in records:
        r["detail"] = f"%s/%s"%(r["name"],r["ip"])
    return records


def HandleNodeAdd(p: Node):
    sql = """insert into %s(`ip`, `organization_id`) values('%s', %d)""" % (
        TBL_NAME_NODE, p.ip, p.organization_id
    )
    print(sql)
    lastid, affectrows = mysqlconnector.execute(sql)
    print(f"sql execute insert, lastid=%d, affectrows=%d" % (lastid, affectrows))
    return ResponseAddLastID(id=lastid)


def get_now_time():
    return datetime.now().strftime("%Y-%m-%d %H:%M:%S")

def HandleNodeRefresh(p: PostBodyNodeRefresh):
    # 1. 确定agent节点的对外可访问域名
    domain_uri = p.domain
    if not domain_uri:
        domain_uri = queryOrganizationDomain(p.organization_id)
    if not domain_uri:
        raise_exception((ER.VALUE_MISS_DOMAIN_VALUE, "The node domain url is not set."))

    # 2. 访问agent代理服务，获取资源计算资源
    node_dict = getAgentNodeResource(ip=p.ip, domain=domain_uri)
    node_dict["organization_id"] = p.organization_id
    node_dict["refresh_time"] = get_now_time()
    node = Node(**node_dict)

    # 3. 将返回的详情，重新刷入node表
    sql = """update %s set `hostname`='%s', `os`='%s'
        , `cpu_cores`=%d, `cpu_name`='%s', `cpu_memory`=%d
        , `gpu_cores`=%d, `gpu_name`='%s', `gpu_memory`=%d
        , `refresh_time`='%s'
        where ip='%s' and organization_id=%d""" % (
        TBL_NAME_NODE,
        node.hostname, node.os,
        node.cpu_cores, node.cpu_name, node.cpu_memory,
        node.gpu_cores, node.gpu_name, node.gpu_memory,
        node_dict["refresh_time"],
        p.ip, p.organization_id
    )
    print(sql)
    _, affectrows = mysqlconnector.execute(sql, is_insert=False)
    print(f"sql execute update, affectrows=%d" % (affectrows))
    return node  # node.model_dump(exclude={"id", "create_time", "update_time"}) TODO 如何解决这个排除报错的问题


def queryOrganizationDomain(id: int) -> str:
    sql = "select domain from %s where id=%d" % (TBL_NAME_ORGANIZATION, id)
    print(sql)
    ret = mysqlconnector.query(sql, is_dict=False)
    print("ret: ", ret)
    return ret[0][0]


def getAgentNodeResource(ip: str, domain: str) -> Node:
    '''TODO 暂时不必实现，使用假数据来代替
    '''
    dummy_data_dict = {
        "ip": ip,
        "hostname": "DummyY_%s" % ip,
        "os": "Centos7.9",
        "cpu_cores": 16,
        "cpu_name": "Intel xxx",
        "cpu_momery": 32,
        "gpu_cores": 2,
        "gpu_name": "A100",
        "gpu_memory": 48
    }
    return dummy_data_dict


def HandleDataInfo(page_id: int, page_size: int, page_total: int):
    count_total = 0
    if page_total == 0:
        count_total, page_total = getPageTotal(count_condition="id", table_name=TBL_NAME_DATA_SOURCE,
                                               page_size=page_size)
    print(f"after_args_check:page_id=%s,page_size=%d,page_total=%d" % (page_id, page_size, page_total))
    sql = f"""select a.*, b.name as organization
        from data_source a left join organization b on a.organization_id=b.id
        order by a.id
        limit %d, %d
    """ % ((page_id - 1) * page_size, page_size)
    print(sql)

    records = mysqlconnector.query(sql)

    data = {"count_total": count_total, 
            "page_id": page_id, 
            "page_size": page_size, 
            "page_total": page_total,
            "info": []}
    for r in records:
        print("one record: ", r)
        iteminfo = ItemData(**r)
        data["info"].append(iteminfo)
    print("response.data:", data)
    return ResponseDataInfo(**data)


# TODO 一个机构-> 多个node, 一个node下面，可以配置多个data_srouce 
def HandleDataSelectInfo():
    sql = f"""select a.id,a.name,a.ip,b.name as organization
        from data_source a left join organization b on a.organization_id=b.id
        order by b.id, a.ip, a.id"""
    print(sql)
    records = mysqlconnector.query(sql)
    for r in records:
        r["detail"] = f"%s/%s/%s"%(r["organization"],r["ip"],r["name"])
    return records
    # TODO return records 建议这样直接返回，不需要定义response。没必要定义，另外将mysql 改为 as organization_name


def HandleDataSearch(name: str):
    sql = f"""select a.id, a.name, a.ip, a.organization_id, b.name as organization_name
        from data_source a left join organization b on a.organization_id=b.id
        where a.name like '%%%s%%'
        order by b.id, a.ip, a.id""" % (name)
    print(sql)
    records = mysqlconnector.query(sql)
    return records


def HandleDataAdd(data: DataSource):
    # card_id = getAgentDataResource(data.data_path,data.ip)
    # if card_id:
    #     sql = "INSERT INTO data_source (`ip`,`organization_id`,`name`,`desc`,`data_path`,`card_id`) " \
    #         "VALUES('%s',%d,'%s','%s','%s',%d)" % (
    #         data.ip, data.organization_id, data.name, data.desc, data.data_path,card_id
    #     )  # TODO card_id如何处理？？？
    # else:
    sql = "INSERT INTO data_source (`ip`,`organization_id`,`name`,`desc`,`data_path`) " \
        "VALUES('%s',%d,'%s','%s','%s')" % (
        data.ip, data.organization_id, data.name, data.desc, data.data_path
    ) 
    print(sql)
    lastid, affectrows = mysqlconnector.execute(sql)
    print(f"sql execute insert, lastid=%d, affectrows=%d" % (lastid, affectrows))
    return ResponseAddLastID(id=lastid)


# def getAgentDataResource(path: str):
#     # 如果有图片保存到cdn/image目录下，并返回图片名，
#     files= os.listdir(path)
#     print(files)
#     for filename in files:
#         if filename.endswith(('jpg','png','jpeg','gif')):
#             print(filename)
#             shutil.copy('%s/%s'%(path,filename),'%s/cdn/image'%ROOT_DIR)
#             sql = "INSERT INTO data_card (`visual`,`create_time`,`update_time`) VALUES('%s',NOW(),NOW())"%filename
#             lastid, affectrows = mysqlconnector.execute(sql)
#             return lastid
#     else:
#         return None

def getAgentDataResource(path: str,ip: str):
    # 如果有图片保存到cdn/image目录下，并返回图片名，
    transport = paramiko.Transport(ip,22)
    private_key = paramiko.rsakey.RSAKey(filename='/home/zjlab/.ssh/id_rsa')
    transport.connect(username='zjlab',pkey=private_key)
    sftp = transport.open_sftp_client()
    print(1)
    for filename in sftp.listdir(path):
        if filename.endswith(('.jpg','.png','jpeg','.gif')):
            print(filename)
            sftp.get('%s/%s'%(path,filename),'%s/cdn/image/%s'%(ROOT_DIR,filename))
            sql = "INSERT INTO data_card (`visual`,`create_time`,`update_time`) VALUES('%s',NOW(),NOW())"%filename
            lastid, affectrows = mysqlconnector.execute(sql)
            return lastid
    else:
        sftp.close()
        transport.close()
        return None


def HandleDataCard(id: int):  # TODO 参数类型写上  这里逻辑修改一下，直接读取data_card表中的content即可
    sql = f"""select ip,name,data_path from data_source where id=%d""" % id
    print(sql)
    record = mysqlconnector.query(sql)
    print(record)
    filepath = record[0]['data_path']
    files = [f for f in glob.glob(pathname='%s/*' % filepath) if fnmatch.fnmatch(f,'*readme*')]
    if not files:
        raise_exception((ER.VALUE_CARD_NOT_EXIST, "The data does not have card."))
    return files[0]


# # TODO 要按照真实的逻辑预留接口，而不是直接返回。。。。
# def HandleDataViewer(id):
#     sql = f"""select ip,name,data_path from data_source where id=%d""" % id
#     print(sql)
#     record = mysqlconnector.query(sql)
#     print(record)
#     filepath = record[0]['data_path']
#     print(filepath)
#     files = glob.glob(pathname='%s/*.csv' % filepath)
#     if not files:
#        return ResponseDataViewer(**{"field": [], "value": [], "total": 0})
#     file= files[0]
#     print(file)
#     csv_reader = csv.reader(open(file))
#     csv_info = []
#     for row in csv_reader:
#         csv_info.append(row)
#     info = {"field": csv_info[0],
#             "value": csv_info[1: DEFAULT_DATA_VIEWER_COUNT+1],
#             "total": DEFAULT_DATA_VIEWER_COUNT}
#     return ResponseDataViewer(**info)


def HandleDataViewer(id):
    sql = f"""select ip,name,data_path from data_source where id=%d""" % id
    print(sql)
    record = mysqlconnector.query(sql)
    print(record)
    filepath = record[0]['data_path']
    print(filepath)
    transport = paramiko.Transport(record[0]['ip'],22)
    private_key = paramiko.rsakey.RSAKey(filename='/home/zjlab/.ssh/id_rsa')
    transport.connect(username='zjlab',pkey=private_key)
    sftp = transport.open_sftp_client()
    for filename in sftp.listdir(filepath):
        if filename.endswith('.csv'):
            break
    else:
        sftp.close()
        transport.close()
        return ResponseDataViewer(**{"field": [], "value": [], "total": 0})
    with sftp.open(filepath+'/'+filename) as f:
        csv_reader = csv.reader(f)
        csv_info = []
        for index,row in enumerate(csv_reader):
            print(row)
            csv_info.append(row)
            if index == DEFAULT_DATA_VIEWER_COUNT+1:
                break
        info = {"field": csv_info[0],
                "value": csv_info[1: DEFAULT_DATA_VIEWER_COUNT+1],
                "total": DEFAULT_DATA_VIEWER_COUNT}
        sftp.close()
        transport.close()
    return ResponseDataViewer(**info)


def HandleDataVisual(id):
    sql = f"""select b.visual as name from data_source a left join data_card b on a.card_id=b.id where a.id=%d""" % id
    print(sql)
    record = mysqlconnector.query(sql)[0]
    file = record['name']
    if not file:
        data = {"is_image": False, "image": None}
    else:
        data = {"is_image": True, "image":{"name":file, "url":"image/%s"%file}}
        
    print(data)
    return ResponseDataVisual(**data)


def HandleTrainTaskInfo(page_id: int, page_size: int, page_total: int):
    count_total = 0
    if page_total == 0:
        count_total, page_total = getPageTotal(count_condition="id", table_name=TBL_NAME_TRAIN_TASK,
                                               page_size=page_size)
    print(f"after_args_check:page_id=%s,page_size=%d,page_total=%d" % (page_id, page_size, page_total))
    sql = f"""select id,name,status,create_time,start_time,finish_time,update_time,client_ids from train_task
        order by id desc
        limit %d,%d

    """ % ((page_id - 1) * page_size, page_size)
    print(sql)
    records = mysqlconnector.query(sql)

    data = {"count_total": count_total, "page_id": page_id, "page_size": page_size, "page_total": page_total,
            "info": []}
    t = ThreadPoolExecutor(count_total)
    for r in records:
        task = t.submit(HandleTrainTaskStatus, r['id'])
        print("one record: ", r)
        client_ids = r['client_ids']
        sql = "select b.name as organization,a.ip,a.id from node a left join organization b on a.organization_id=b.id where a.id in (%s)" % client_ids
        client_records = mysqlconnector.query(sql)
        print(client_records)
        organizations = ""
        for client_record in client_records:
            if not organizations:
                organizations += '%s/%s' % (client_record['organization'], client_record['ip'])
            else:
                organizations += '|%s/%s' % (client_record['organization'], client_record['ip'])
        r['organizations'] = organizations
        r.pop('client_ids')
        detail_info = task.result()
        r['status_info']=detail_info.detail_info
        iteminfo = ItemTrainTask(**r)
        data["info"].append(iteminfo)
    print("response.data:", data)
    return ResponseTrainTaskInfo(**data)


def HandleTrainTaskQuery(id: int):
    sql = f"""select a.id,a.name,a.status,a.arch,a.topology,a.client_ids,a.data_ids,a.create_time,a.image,
        a.start_time,a.finish_time,a.update_time,d.name as server,b.ip as server_ip,f.name as label,e.ip as 
        label_ip,c.name as model from train_task a left join node b on a.server_id=b.id  left join 
        organization d on b.organization_id=d.id left join model c on a.model_id=c.id left join 
        node e on a.label_id=e.id left join organization f on e.organization_id=f.id where a.id=%d""" % id 
    print(sql)
    records = mysqlconnector.query(sql)
    r = records[0]
    if r['image']:
        image_dict = json.loads(r['image'])
        r['image'] = image_dict
    print(r['image'])


    print("one record: ", r)
    client_ids = r['client_ids']
    sql = "select b.name as organization,a.ip from node a left join organization b on a.organization_id=b.id where a.id in (%s)" % client_ids
    client_records = mysqlconnector.query(sql)
    data_ids = r['data_ids']
    sql = "select a.name ,b.name as organization from data_source a left join organization b on a.organization_id =b.id where a.id in (%s)" % data_ids
    data_records = mysqlconnector.query(sql)
    clients = []
    datas = []
    for client_record in client_records:
        clients.append('%s/%s' % (client_record['organization'], client_record['ip']))
    for data_record in data_records:
        datas.append('%s/%s' % (data_record['organization'], data_record['name']))
    print(r)
    r['clients'] = clients
    r['data'] = datas
    r['server'] = '%s/%s' % (r['server'], r['server_ip'])
    r['label'] = '%s/%s' % (r['label'], r['label_ip'])
    return ResponseTrainTaskDetail(**r)


def HandleTrainTaskAdd(data: TrainTask):
    client_ids = ','.join([str(client_id) for client_id in data.client_ids])
    data_ids = ','.join([str(data_id) for data_id in data.data_ids])
    # 保存topo图
    sql = "select b.name as organization,a.ip from node a left join organization b on a.organization_id=b.id where a.id in (%s)" % client_ids
    client_records = mysqlconnector.query(sql)
    clients = []
    for client_record in client_records:
        clients.append('%s/%s' % (client_record['organization'], client_record['ip']))
    sql = "select b.name as server,a.ip from node a left join organization b on a.organization_id=b.id where a.id=%d" % data.server_id
    r = mysqlconnector.query(sql)[0]

    server = '%s/%s' % (r['server'], r['ip'])
    print(server,clients[0],clients[1],f'%s/cdn/train_task/%s.png'%(ROOT_DIR,data.name))
    plt_topology(server,clients[0],clients[1],f'%s/cdn/train_task/%s.png'%(ROOT_DIR,data.name))
    image = {'name':'%s.png'%data.name,'url':'train_task/%s.png'%data.name}
    image = json.dumps(image,ensure_ascii=False)
    print(image)
    sql = "INSERT INTO train_task (`name`,`arch`,`topology`,`server_id`,`client_ids`,`data_ids`,`label_id`,`model_id`,`status`,`create_time`,`image`) VALUES('%s',%d,%d,%d,'%s','%s',%d,%d,'%s',NOW(),'%s')" % (
        data.name, data.arch, data.topology, data.server_id, client_ids, data_ids, data.label_id, data.model_id,
        data.status,image)
    print(sql)
    lastid, affectrows = mysqlconnector.execute(sql)
    print(f"sql execute insert, lastid=%d, affectrows=%d" % (lastid, affectrows))
    return ResponseAddLastID(id=lastid)

def HandleTrainTaskUpdate(data: TrainTask):
    client_ids = ','.join(data.client_ids)
    data_ids = ','.join(data.data_ids)
    sql = """update %s set `arch`=%d, `topology`='%d',`server_id`=%d  `client_ids`=%s, `data_ids`='%d',`label_id`=%d ,model_id=%d
    where `id`=%d""" % (
        TBL_NAME_TRAIN_TASK, data.arch, data.topology, data.server_id, client_ids, data_ids, data.label_id,
        data.model_id, data.id)
    _, affectrows = mysqlconnector.execute(sql, is_insert=False)
    print(f"sql execute update, affectrows=%d" % (affectrows))
    return None
    

def HandleTrainTaskStart(id: int):
    try:
        res = requests.post(url='http://192.168.88.112:8000/launch_job', data={"name": "mnist"},timeout=5).json()
        print(res)
    except Exception as e:
        print(e)
        raise raise_exception(ER.TRAIN_TASK_START_ERROR,
                              "Unable to access URL")
    sql = """update train_task set `start_time` = NOW(),`status`='training' where id = %d""" % ( id)
    _, affectrows = mysqlconnector.execute(sql, is_insert=False)
    print(f"sql execute update, affectrows=%d" % (affectrows))
    return None


def HandleTrainTaskStatus(id: int):
    sql = f"""select * from train_task where id = %d""" % id
    print(sql)
    record = mysqlconnector.query(sql)[0]
    print(record)
    if record['status'] == "unstart":
        return ResponseTrainTaskStatus(**{'status': record['status'], "detail_info": {
            "job_id": "job-mnist-lr-server-service",
            "start_time": record['start_time'],
            "finsh_time": record['finish_time'],
            "total_round": 0,
            "current_round": 0,
            "finish": True,
            "metric_url": "stdout"
        }, 'id': id})
    if record['status'] == "finished":
        return ResponseTrainTaskStatus(**{'status': record['status'], "detail_info": {
            "job_id": "job-mnist-lr-server-service",
            "start_time": record['start_time'],
            "finsh_time": record['finish_time'],
            "total_round": 1,
            "current_round": 1,
            "finish": True,
            "metric_url": "stdout"
        }, 'id': id})
    if record['status'] == 'error':
        # todo error处理
        pass
    start_time = record['start_time']
    try:
        res = requests.get(url='http://192.168.88.112:8000/job?job_id=job-mnist-lr-server-service',timeout=5).json()
    except Exception as e:
        # todo 网络不通错误处理
        print(e)
        res = {}
    print("+++")
    if not res:
        print("+++")
        sql = """update train_task set `finish_time` = NOW(), `status` = 'finished',`update_time`=NOW() where id = %d
                       """ % id
        _, affectrows = mysqlconnector.execute(sql, is_insert=False)
        return ResponseTrainTaskStatus(**{'status': 'finished', "detail_info": {
            "job_id": "job-mnist-lr-server-service",
            "start_time": record['start_time'],
            "finsh_time": record['finish_time'],
            "total_round": 1,
            "current_round": 1,
            "finish": True,
            "metric_url": "stdout"
        }, 'id': id})
    # 比较任务时间是不是60s以内，是则是本次任务不是则是其他任务
    real_start_time = datetime.strptime(res['start_time'],"%Y-%m-%d %H:%M:%S")
    print(real_start_time)
    database_start_time = record["start_time"]
    if not database_start_time < real_start_time < database_start_time + timedelta(seconds=100):
    
    
        sql = """update train_task set `finish_time` = NOW(), `status` = 'finished',`update_time`=NOW() where id = %d
                       """ % id
        _, affectrows = mysqlconnector.execute(sql, is_insert=False)
        return ResponseTrainTaskStatus(**{'status': 'finished', "detail_info": {
            "job_id": "job-mnist-lr-server-service",
            "start_time": record['start_time'],
            "finsh_time": record['finish_time'],
            "total_round": 1,
            "current_round": 1,
            "finish": True,
            "metric_url": "stdout"
        }, 'id': id})
    if res['finish']:
        sql = """update train_task set `finish_time` = '%s', `status` = 'finished',`update_time`=NOW() where `id` = %d
        """ % (res['end_time'],id)
        _, affectrows = mysqlconnector.execute(sql, is_insert=False)
        return ResponseTrainTaskStatus(**{'status': 'finished', "detail_info": res, 'id': id})
    sql = """update train_task set `update_time` = NOW() where `id` = %d""" % id
    _, affectrows = mysqlconnector.execute(sql, is_insert=False)
    return ResponseTrainTaskStatus(**{'status': 'training', "detail_info": res, 'id': id})
    
    
def HandleModelSelectInfo():
    sql = f"""select id,name from model where type=0 order by id"""
    records = mysqlconnector.query(sql)
    return records


def HandleAnalysisAlgorithm():
    sql = f"""select id, name from model where type=1 order by id """
    records = mysqlconnector.query(sql)
    return records


# async def HandleAnalysisSearch(p: PostBodyAnalysisSearch):
#     # TODO: 该远程命令临时写死，后续可以通过web前端传递
#     remote_cli = 'cd ~/workspace/analysis-tee-scripts/ && python3 ~/workspace/analysis-tee-scripts/analysis_search.py'

#     from .ssh import ssh_execute_command
#     # 由于远程命令执行耗时，此处进行异步处理
#     result = await ssh_execute_command(
#         hostname=config_manager.get_value("ssh_tee", "hostname"),
#         username=config_manager.get_value("ssh_tee", "username"),
#         # 如果已经配置了免密登录，其实可以不需要密码
#         password=None if config_manager.get_value("ssh_tee", "password")=="None" \
#             else config_manager.get_value("ssh_tee", "password"),
#         command=remote_cli,
#     )
#     print("remote_ssh_result:", result)
#     # TODO 处理远程执行结果
#     job_id = "mysql_protein_demo_search"
#     analysis_job_task[job_id]["start_time"] = get_now_time()  # 暂时直接从内存上修改时间
#     analysis_job_task[job_id]["finished_time"] = None
#     analysis_job_task[job_id]["status"] = "unstart"
#     print(analysis_job_task[job_id])
#     return ResponseAsyncExecuteID(job_id=job_id)


# async def HandleAnalysisExecute(p: PostBodyAnalysisExecute):
#     # TODO: 该远程命令临时写死，后续可以通过web前端传递
#     remote_cli = 'cd ~/yemd/analysis-tee-scripts/ && python3 ~/yemd/analysis-tee-scripts/analysis_execute_cli.py'

#     from .ssh import ssh_execute_command
#     # 由于远程命令执行耗时，此处进行异步处理
#     result = await ssh_execute_command(
#         hostname=config_manager.get_value("ssh_tee", "hostname"),
#         username=config_manager.get_value("ssh_tee", "username"),
#         # 如果已经配置了免密登录，其实可以不需要密码
#         password=None if config_manager.get_value("ssh_tee", "password")=="None" \
#             else config_manager.get_value("ssh_tee", "password"),
#         command=remote_cli,
#     )
#     print("remote_ssh_result:", result)
#     # TODO 处理远程执行结果
#     job_id = "mybusybox-csv"
#     analysis_job_task[job_id]["start_time"] = get_now_time()  # 暂时直接从内存上修改时间
#     analysis_job_task[job_id]["finished_time"] = None
#     analysis_job_task[job_id]["status"] = "unstart"
#     print(analysis_job_task[job_id])
#     return ResponseAsyncExecuteID(job_id=job_id)

async def HandleAnalysisSearch(p: PostBodyAnalysisSearch):
    remote_cli = {
        "host": "ssh root@10.109.44.1",
        "cli": f"mysql -uroot -p; %s" % p.script
    }
    ssh_cli = """ssh %s "%s;" """ % (
        remote_cli["host"], remote_cli["cli"]
    )
    result = await remote_ssh_execute_script(ssh_cli) # 由于远程命令执行耗时，此处进行异步处理
    print("remote_ssh_result:", result)
    job_id = "mysql_protein_demo_search"
    # TODO 处理远程执行结果
    analysis_job_task[job_id]["start_time"] = get_now_time()  # 暂时直接从内存上修改时间
    analysis_job_task[job_id]["finished_time"] = None
    analysis_job_task[job_id]["status"] = "unstart"
    print(analysis_job_task[job_id])
    return ResponseAsyncExecuteID(job_id=job_id)


async def HandleAnalysisExecute(p: PostBodyAnalysisExecute):
    analysis_result = {"directory": "/root/hail/gwas_results"}  # TODO 暂时先强制定义，后续这块由Tee服务器给出结果，协议待商量
    analysis_tasks = {"task": "gwas", "pod_name":"mybusybox-csv", "pod_config": "/host/template/gwas.yaml"} # TODO 此处暂时写死
    remote_cli = {
        "ssh": "ssh root@10.109.44.1",
        "delete": f"kubectl delete pod %s" % analysis_tasks["pod_name"],
        "apply": f"kubectl apply -f %s" % analysis_tasks["pod_config"],
        "status": f"kubectl get pod %s" % analysis_tasks["pod_name"]
    }
    ssh_cli = """ssh root@10.109.44.1 "%s; %s; %s" """ % (
        remote_cli["delete"], remote_cli["apply"], remote_cli["status"]
    )
    # ssh远程执行命令式，需要进行等待，先释放backend请求资源
    result = await remote_ssh_execute_script(ssh_cli) # 由于远程命令执行耗时，此处进行异步处理
    print("remote_ssh_result:", result)
    # TODO 处理远程执行结果
    job_id = "mybusybox-csv"
    analysis_job_task[job_id]["start_time"] = get_now_time()  # 暂时直接从内存上修改时间
    analysis_job_task[job_id]["finished_time"] = None
    analysis_job_task[job_id]["status"] = "unstart"
    print(analysis_job_task[job_id])
    return ResponseAsyncExecuteID(job_id=job_id)



async def remote_ssh_execute_script(cli: str):
    print("cli:", cli)
    return None


'''
临时写死job_dict任务列表，等后续落盘存储表
'''
analysis_job_task = {
    "mysql_protein_demo_search": {
        "job_id": "mysql_protein_demo_search",
        "type": "search",
        "token": "zjlab/ip/protein_demo/1", # 格式为机构/ip/data_name/data_id
        "status": "unstart",
        "runtime": 0,     # 任务运行时间
        "start_time": None,
        "finish_time": None
    },
    "mybusybox-csv": {
        "job_id": "mybusybox-csv",
        "type": "analysis",
        "token": "gwas",       # 格式为密态分析算法名
        "status": "unstart",
        "runtime": 0,  
        "start_time": None,
        "finish_time": None
    }
}

def HandleAnalysisStatus(job_id: str):
    print(job_id)
    print(analysis_job_task[job_id])
    if job_id not in analysis_job_task.keys():
        raise raise_exception((
            ER.VALUE_ANALYSIS_JOB_ID_NOT_EXIST,
            f"Input value job_id=%s is not exist." % job_id
        ))
    # TODO 状态临时处理，等ssh命令完善后增加此功能
    start_time = analysis_job_task[job_id]["start_time"]
    if start_time is None:
        raise_exception((
            ER.JOB_NOT_START, 
            f"Input job_id=%s is not start, please start job first."%job_id
        ))
    analysis_job_task[job_id]["status"] = "running"
    delta_ts = compute_job_time_delta(start_time)
    analysis_job_task[job_id]["runtime"] = delta_ts
    if delta_ts >= 8:
        analysis_job_task[job_id]["status"] = "finished"
        analysis_job_task[job_id]["finish_time"] = get_now_time()
        analysis_job_task[job_id]["runtime"] = 60
    print(analysis_job_task[job_id])
    return ResponseAsyncExecuteStatus(**analysis_job_task[job_id])


def compute_job_time_delta(start_time: str) -> int:
    start_ts = datetime.strptime(start_time, "%Y-%m-%d %H:%M:%S")
    now_ts = datetime.now()
    delta_ts = (now_ts - start_ts).seconds 
    print(f"delta_ts:%d, start_ts:%s, now_ts:%s" % (delta_ts, start_ts, now_ts))
    return delta_ts



def HandleAnalysisCallback(job_id: str):
    print(job_id)
    print(analysis_job_task[job_id])
    if job_id not in analysis_job_task.keys():
        raise raise_exception((
            ER.VALUE_ANALYSIS_JOB_ID_NOT_EXIST,
            f"Input value job_id=%s is not exist." % job_id 
        ))
    if analysis_job_task[job_id]["status"] != "finished":
        raise_exception((
            ER.JOB_ERROR,
            f"Input job_id=%s cannot do job callback now. its' status is %s" % (
                job_id,
                analysis_job_task[job_id]["status"]
            )
        ))
    # 根据job类型，执行不同的回调
    if analysis_job_task[job_id]["type"] == ANALYSIS_ACTION_SEARCH:
        return callback_analysis_search()
    elif analysis_job_task[job_id]["type"] == ANALYSIS_ACTION_ANALYSIS:
        return callback_analysis_execute(analysis_job_task[job_id]["token"].lower())
    else:
        raise raise_exception((
            ER.VALUE_ANALYSIS_ACTION_NOT_EXIST,
            f"the job type=%s is not exist." % analysis_job_task[job_id]["type"]
        ))


def callback_analysis_search():
    '''返回加密搜索的结果
    '''
    filepath = "/data/data.ppml.secret/protein_demo" # cas9_data_demo.csv  临时写死
    file = glob.glob(pathname='%s/*.csv' % filepath)[0]
    if not file:
        raise_exception((ER.VALUE_ERROR, 'analysis_search *.csv file is not exist'))
    csv_reader = csv.reader(open(file))
    csv_info = []
    # TODO 以下为临时设置的变量，待实际执行结果
    tmp_total = 5
    tmp_iter = 0
    for row in csv_reader:
        if tmp_iter >= tmp_total:
            break
        tmp_iter = tmp_iter + 1
        csv_info.append(row)
    data = {
        "total": tmp_total,
        "field": csv_info[0],
        "value": csv_info[1:]
    }
    return ResponseAnalysisSearch(**data)


def callback_analysis_execute(token: str):
    '''返回密态分析的执行结果，已经由analysis/execute接口执行，并在本地路径存储图片
    '''
    import os
    image_url_prefix = IMAGE_SUBDIR_ANALYSIS + token
    image_folder_dir = ROOT_DIR / CDN_IMAGE_DIR / IMAGE_SUBDIR_ANALYSIS / token
    print(type(image_folder_dir))
    print(image_folder_dir)
    data = []
    for filename in os.listdir(image_folder_dir):
        data.append(Image(name=filename, url=image_url_prefix+"/"+filename))
    print("data_response: ", data)
    return data

def get_analysis_data_source_id(token: str):
    return token.split('/')[-1]