import pandas as pd
import tos
import os
import subprocess
import json
import time
import concurrent.futures
from datetime import datetime

# Access key 和 Secret key 可在用户火山引擎账号中查找
ak = "AKLTMmFmMzVmNmY5ZDEyNDNmNWEyZTU2MDM3Y2EzMDJlZTk"
sk = "WVdNeFpXRXdNekV4Tm1ZME5EZGxPRGc0TUdFNE1HWXdNV1ZqTkdNNU9HRQ=="
# your endpoint 和 your region 填写Bucket 所在区域对应的Endpoint。# 以华北2(北京)为例，your endpoint 填写 tos-cn-beijing.volces.com，your region 填写 cn-beijing。
endpoint = "tos-cn-beijing.ivolces.com"
region = "cn-beijing"


def get_st_100ms_path(pk_list):
    path_st_100ms = ""
    for path in pk_list:
        if "st_100ms.pkl" in path:
            path_st_100ms = path
    return path_st_100ms


def get_st_can_out(pk_list):
    path_st_100ms = ""
    for path in pk_list:
        if "st_can_out.pkl" in path:
            path_st_100ms = path
    return path_st_100ms

def uploadjson(bucket_name, object_key, jsonstr):
    try:
        client = tos.TosClientV2(ak, sk, endpoint, region)
        # 若在上传对象时设置文件存储类型（x-tos-storage-class）和访问权限 (x-tos-acl), 请在 put_object中设置相关参数
        # 用户在上传对象时，可以自定义元数据，以便对对象进行自定义管理
        # result = client.put_object(bucket_name, object_key, content=content, acl=tos.ACLType.ACL_Private, storage_class=tos.StorageClassType.Storage_Class_Standard, meta={'name': '张三', 'age': '20'})
        result = client.put_object(bucket_name, object_key, content=jsonstr)
        # HTTP状态码
        print('http status code:{}'.format(result.status_code))
        # 请求ID。请求ID是本次请求的唯一标识，建议在日志中添加此参数
        print('request_id: {}'.format(result.request_id))
        # hash_crc64_ecma 表示该对象的64位CRC值, 可用于验证上传对象的完整性
        print('crc64: {}'.format(result.hash_crc64_ecma))
    except tos.exceptions.TosClientError as e:
        # 操作失败，捕获客户端异常，一般情况为非法请求参数或网络异常
        print('fail with client error, message:{}, cause: {}'.format(e.message, e.cause))
    except tos.exceptions.TosServerError as e:
        # 操作失败，捕获服务端异常，可从返回信息中获取详细错误信息
        print('fail with server error, code: {}'.format(e.code))
        # request id 可定位具体问题，强烈建议日志中保存
        print('error with request id: {}'.format(e.request_id))
        print('error with message: {}'.format(e.message))
        print('error with http code: {}'.format(e.status_code))
        print('error with ec: {}'.format(e.ec))
        print('error with request url: {}'.format(e.request_url))
    except Exception as e:
        print('fail with unknown error: {}'.format(e))


def get_2300(targert_bucket_name, df_2300_path, vehicle_id, triggerid, daystr, hourstr, filename):
    try:
        df_acc_100ms = pd.read_pickle(df_2300_path)
        print(df_acc_100ms.columns)
    except Exception as e:
        print('data report read error, ', str(e))

    df_acc_100ms_list = df_acc_100ms[
        ['start_time_str', 'path','ADCS12_longitudCtrlSysInfo','ADCS12_longitudDisableInfo']].values.tolist()

    found_index = -9999
    # 遍历列表
    for index, element in enumerate(df_acc_100ms_list):
        if element[2] == 1 or element[3] != 0:
            found_index = index
            break  # 找到第一个符合条件的元素就跳出循环
    if found_index== -9999:
        start_time=df_acc_100ms_list[0][0]
        path = df_acc_100ms_list[0][1]
    else:
        start_time=df_acc_100ms_list[found_index][0]
        path = df_acc_100ms_list[found_index][1]

    data = {
        "vehicle_id": vehicle_id,
        "triggerid": triggerid,
        "start_time": start_time,
        "day": daystr,
        "hour": hourstr,
        "path": path,
        "driver_type": "drive",
        "plat": "TDA4"
    }
    json_data = json.dumps(data, indent=None)  # indent参数用于缩进，使得生成的JSON更易读
    print("json_data: " + json_data)

    prefix2 = "EP40/TDA4/SC/" + "${date}" + "/" + "${hour}" + "/" + vehicle_id + "/" + str(filename) + ".json"
    print("prefix2: " + prefix2)
    uploadjson(targert_bucket_name, prefix2, json_data)


def get_2301(targert_bucket_name, df_2301_path, vehicle_id, triggerid, daystr, hourstr, filename):
    try:
        df_acc_100ms = pd.read_pickle(df_2301_path)
        print(df_acc_100ms.columns)
    except Exception as e:
        print('data report read error, ', str(e))


    df_acc_100ms_list = df_acc_100ms[
        ['start_time_str', 'path','ADCS12_NpilotSysInfo']].values.tolist()

    found_index = -9999
    # 遍历列表
    for index, element in enumerate(df_acc_100ms_list):
        if element[2] == 1 or element[2]== 0 or element[2]== 3 or element[2]== 4 or element[2]== 5 or element[2]== 6 or element[2]== 7\
            or element[2] == 8 or element[2] == 9 or element[2] ==10 or element[2] == 11 or element[2] == 12 :
            found_index = index
            break  # 找到第一个符合条件的元素就跳出循环
    if found_index== -9999:
        start_time=df_acc_100ms_list[0][0]
        path = df_acc_100ms_list[0][1]
    else:
        start_time=df_acc_100ms_list[found_index][0]
        path = df_acc_100ms_list[found_index][1]

    data = {
        "vehicle_id": vehicle_id,
        "triggerid": triggerid,
        "start_time": start_time,
        "day": daystr,
        "hour": hourstr,
        "path": path,
        "driver_type": "drive",
        "plat": "TDA4"
    }
    json_data = json.dumps(data, indent=None)  # indent参数用于缩进，使得生成的JSON更易读

    prefix2 = "EP40/TDA4/SC/" + "${date}" + "/" + "${hour}" + "/" + vehicle_id + "/" + str(filename) + ".json"
    print("prefix2: " + prefix2)
    uploadjson(targert_bucket_name, prefix2, json_data)


def get_2302(targert_bucket_name, df_2301_path, vehicle_id, triggerid, daystr, hourstr, filename):
    try:
        df_acc_100ms = pd.read_pickle(df_2301_path)
        print(df_acc_100ms.columns)
    except Exception as e:
        print('data report read error, ', str(e))

    df_acc_100ms_list = df_acc_100ms[
        ['start_time_str', 'path','ADCS8_NNPSysState','ADCS12_NNP_State_Reminder','ADCS12_P2N_State_Reminder']].values.tolist()

    found_index = -9999
    # 遍历列表
    for index, element in enumerate(df_acc_100ms_list):
        if element[2] == 1 or element[3] == 3 or element[3] == 7 or element[4] == 7:
            found_index = index
            break  # 找到第一个符合条件的元素就跳出循环
    if found_index== -9999:
        start_time=df_acc_100ms_list[0][0]
        path = df_acc_100ms_list[0][1]
    else:
        start_time=df_acc_100ms_list[found_index][0]
        path = df_acc_100ms_list[found_index][1]

    data = {
        "vehicle_id": vehicle_id,
        "triggerid": triggerid,
        "start_time": start_time,
        "day": daystr,
        "hour": hourstr,
        "path": path,
        "driver_type": "drive",
        "plat": "TDA4"
    }
    json_data = json.dumps(data, indent=None)  # indent参数用于缩进，使得生成的JSON更易读

    prefix2 = "EP40/TDA4/SC/" + "${date}" + "/" + "${hour}" + "/" + vehicle_id + "/" + str(filename) + ".json"
    print("prefix2: " + prefix2)
    uploadjson(targert_bucket_name, prefix2, json_data)


def get_2303(targert_bucket_name, df_2301_path, vehicle_id, triggerid, daystr, hourstr, filename):
    try:
        df_acc_100ms = pd.read_pickle(df_2301_path)
        print(df_acc_100ms.columns)
    except Exception as e:
        print('data report read error, ', str(e))

    df_acc_100ms_list = df_acc_100ms[
        ['start_time_str', 'path','ADCS8_longitudCtrlTakeOverReq','ADCS8_lateralCtrtakeove']]

    found_index = -9999
    # 遍历列表
    for index, element in enumerate(df_acc_100ms_list):
        if element[2] == 1 or element[3] == 3 :
            found_index = index
            break  # 找到第一个符合条件的元素就跳出循环
    if found_index== -9999:
        start_time=df_acc_100ms_list[0][0]
        path = df_acc_100ms_list[0][1]
    else:
        start_time=df_acc_100ms_list[found_index][0]
        path = df_acc_100ms_list[found_index][1]


    data = {
        "vehicle_id": vehicle_id,
        "triggerid": triggerid,
        "start_time": start_time,
        "day": daystr,
        "hour": hourstr,
        "path": path,
        "driver_type": "drive",
        "plat": "TDA4"
    }
    json_data = json.dumps(data, indent=None)  # indent参数用于缩进，使得生成的JSON更易读

    prefix2 = "EP40/TDA4/SC/" + "${date}" + "/" + "${hour}" + "/" + vehicle_id + "/" + str(filename) + ".json"
    print("prefix2: " + prefix2)
    uploadjson(targert_bucket_name, prefix2, json_data)


def get_2304(targert_bucket_name, df_2301_path, vehicle_id, triggerid, daystr, hourstr, filename):
    try:
        df_acc_100ms = pd.read_pickle(df_2301_path)
        print(df_acc_100ms.columns)
    except Exception as e:
        print('data report read error, ', str(e))

    df_acc_100ms_list = df_acc_100ms[
        ['start_time_str', 'path','ADCS12_NNP_State_Reminder','ADCS12_NNP_RINO']]
    found_index = -9999
    # 遍历列表
    for index, element in enumerate(df_acc_100ms_list):
        if element[2] == 5 or element[2] == 6 or element[2] == 7 or element[3] == 6:
            found_index = index
            break  # 找到第一个符合条件的元素就跳出循环
    if found_index== -9999:
        start_time=df_acc_100ms_list[0][0]
        path = df_acc_100ms_list[0][1]
    else:
        start_time=df_acc_100ms_list[found_index][0]
        path = df_acc_100ms_list[found_index][1]


    data = {
        "vehicle_id": vehicle_id,
        "triggerid": triggerid,
        "start_time": start_time,
        "day": daystr,
        "hour": hourstr,
        "path": path,
        "driver_type": "drive",
        "plat": "TDA4"
    }
    json_data = json.dumps(data, indent=None)  # indent参数用于缩进，使得生成的JSON更易读

    prefix2 = "EP40/TDA4/SC/" + "${date}" + "/" + "${hour}" + "/" + vehicle_id + "/" + str(filename) + ".json"
    print("prefix2: " + prefix2)
    uploadjson(targert_bucket_name, prefix2, json_data)


def get_2305(targert_bucket_name, df_2301_path, vehicle_id, triggerid, daystr, hourstr, filename):
    try:
        df_acc_100ms = pd.read_pickle(df_2301_path)
        print(df_acc_100ms.columns)
    except Exception as e:
        print('data report read error, ', str(e))

    df_acc_100ms_list = df_acc_100ms[
        ['start_time_str', 'path']]
    df_100_list = df_acc_100ms_list.iloc[99].values.tolist()
    start_time = df_100_list[0]
    path = df_100_list[1]

    data = {
        "vehicle_id": vehicle_id,
        "triggerid": triggerid,
        "start_time": start_time,
        "day": daystr,
        "hour": hourstr,
        "path": path,
        "driver_type": "drive",
        "plat": "TDA4"
    }
    json_data = json.dumps(data, indent=None)  # indent参数用于缩进，使得生成的JSON更易读

    prefix2 = "EP40/TDA4/SC/" + "${date}" + "/" + "${hour}" + "/" + vehicle_id + "/" + str(filename) + ".json"
    print("prefix2: " + prefix2)
    uploadjson(targert_bucket_name, prefix2, json_data)


def get_3307(targert_bucket_name, df_2301_path, vehicle_id, triggerid, daystr, hourstr, filename):
    try:
        df_acc_100ms = pd.read_pickle(df_2301_path)
        print(df_acc_100ms.columns)
    except Exception as e:
        print('data report read error, ', str(e))

    df_acc_100ms_list = df_acc_100ms[
        ['start_time_str', 'path']]
    df_100_list = df_acc_100ms_list.iloc[99].values.tolist()
    start_time = df_100_list[0]
    path = df_100_list[1]

    data = {
        "vehicle_id": vehicle_id,
        "triggerid": triggerid,
        "start_time": start_time,
        "day": daystr,
        "hour": hourstr,
        "path": path,
        "driver_type": "drive",
        "plat": "TDA4"
    }
    json_data = json.dumps(data, indent=None)  # indent参数用于缩进，使得生成的JSON更易读

    prefix2 = "EP40/TDA4/SC/" + "${date}" + "/" + "${hour}" + "/" + vehicle_id + "/" + str(filename) + ".json"
    print("prefix2: " + prefix2)
    uploadjson(targert_bucket_name, prefix2, json_data)


def get_3308(targert_bucket_name, df_2301_path, vehicle_id, triggerid, daystr, hourstr, filename):
    try:
        df_acc_100ms = pd.read_pickle(df_2301_path)
        print(df_acc_100ms.columns)
    except Exception as e:
        print('data report read error, ', str(e))

    df_acc_100ms_list = df_acc_100ms[
        ['start_time_str', 'path','ADCS2_AEBPartialBrake','ADCS2_AEBFullBrake']]

    found_index = -9999
    # 遍历列表
    for index, element in enumerate(df_acc_100ms_list):
        if element[2] == 1 or element[3] == 1 :
            found_index = index
            break  # 找到第一个符合条件的元素就跳出循环
    if found_index== -9999:
        start_time=df_acc_100ms_list[0][0]
        path = df_acc_100ms_list[0][1]
    else:
        start_time=df_acc_100ms_list[found_index][0]
        path = df_acc_100ms_list[found_index][1]


    data = {
        "vehicle_id": vehicle_id,
        "triggerid": triggerid,
        "start_time": start_time,
        "day": daystr,
        "hour": hourstr,
        "path": path,
        "driver_type": "drive",
        "plat": "TDA4"
    }
    json_data = json.dumps(data, indent=None)  # indent参数用于缩进，使得生成的JSON更易读

    prefix2 = "EP40/TDA4/SC/" + "${date}" + "/" + "${hour}" + "/" + vehicle_id + "/" + str(filename) + ".json"
    print("prefix2: " + prefix2)
    uploadjson(targert_bucket_name, prefix2, json_data)


def get_2309(targert_bucket_name, df_2301_path, vehicle_id, triggerid, daystr, hourstr, filename):
    try:
        df_acc_100ms = pd.read_pickle(df_2301_path)
        print(df_acc_100ms.columns)
    except Exception as e:
        print('data report read error, ', str(e))

    df_acc_100ms_list = df_acc_100ms[
        ['start_time_str', 'path','ADCS8_FCWStatus']]

    found_index = -9999
    # 遍历列表
    ADCS8_FCWStatus_last=df_acc_100ms_list[0][2]
    for index, element in enumerate(df_acc_100ms_list):
        if (element[2] == 2 and ADCS8_FCWStatus_last == 0) or (element[2] == 2 and ADCS8_FCWStatus_last == 1) :
            found_index = index
            break  # 找到第一个符合条件的元素就跳出循环
        ADCS8_FCWStatus_last=element[2]
    if found_index== -9999:
        start_time=df_acc_100ms_list[0][0]
        path = df_acc_100ms_list[0][1]
    else:
        start_time=df_acc_100ms_list[found_index][0]
        path = df_acc_100ms_list[found_index][1]

    data = {
        "vehicle_id": vehicle_id,
        "triggerid": triggerid,
        "start_time": start_time,
        "day": daystr,
        "hour": hourstr,
        "path": path,
        "driver_type": "drive",
        "plat": "TDA4"
    }
    json_data = json.dumps(data, indent=None)  # indent参数用于缩进，使得生成的JSON更易读

    prefix2 = "EP40/TDA4/SC/" + "${date}" + "/" + "${hour}" + "/" + vehicle_id + "/" + str(filename) + ".json"
    print("prefix2: " + prefix2)
    uploadjson(targert_bucket_name, prefix2, json_data)


def get_2311(targert_bucket_name, df_2301_path, vehicle_id, triggerid, daystr, hourstr, filename):
    try:
        df_acc_100ms = pd.read_pickle(df_2301_path)
        print(df_acc_100ms.columns)
    except Exception as e:
        print('data report read error, ', str(e))

    df_acc_100ms_list = df_acc_100ms[
        ['start_time_str', 'path','ADCS8_NPilot_SysState','ADCS8_NNPSysState']]
    found_index = -9999
    # 遍历列表
    ADCS8_NNPSysState_last=df_acc_100ms_list[0][3]
    for index, element in enumerate(df_acc_100ms_list):
        if (element[2] == 2 ) and (element[3] != 2 and ADCS8_NNPSysState_last == 2) :
            found_index = index
            break  # 找到第一个符合条件的元素就跳出循环
        ADCS8_NNPSysState_last=element[3]
    if found_index== -9999:
        start_time=df_acc_100ms_list[0][0]
        path = df_acc_100ms_list[0][1]
    else:
        start_time=df_acc_100ms_list[found_index][0]
        path = df_acc_100ms_list[found_index][1]


    data = {
        "vehicle_id": vehicle_id,
        "triggerid": triggerid,
        "start_time": start_time,
        "day": daystr,
        "hour": hourstr,
        "path": path,
        "driver_type": "drive",
        "plat": "TDA4"
    }
    json_data = json.dumps(data, indent=None)  # indent参数用于缩进，使得生成的JSON更易读

    prefix2 = "EP40/TDA4/SC/" + "${date}" + "/" + "${hour}" + "/" + vehicle_id + "/" + str(filename) + ".json"
    uploadjson(targert_bucket_name, prefix2, json_data)


def get_2313(targert_bucket_name, df_2301_path, vehicle_id, triggerid, daystr, hourstr, filename):
    try:
        df_acc_100ms = pd.read_pickle(df_2301_path)
        print(df_acc_100ms.columns)
    except Exception as e:
        print('data report read error, ', str(e))

    df_acc_100ms_list = df_acc_100ms[
        ['start_time_str', 'path','ADAS8_LateralCtrHandOffReleasewarning']]
    found_index = -9999
    # 遍历列表
    for index, element in enumerate(df_acc_100ms_list):
        if (element[2] == 1 ):
            found_index = index
            break  # 找到第一个符合条件的元素就跳出循环
    if found_index== -9999:
        start_time=df_acc_100ms_list[0][0]
        path = df_acc_100ms_list[0][1]
    else:
        start_time=df_acc_100ms_list[found_index][0]
        path = df_acc_100ms_list[found_index][1]

    data = {
        "vehicle_id": vehicle_id,
        "triggerid": triggerid,
        "start_time": start_time,
        "day": daystr,
        "hour": hourstr,
        "path": path,
        "driver_type": "drive",
        "plat": "TDA4"
    }
    json_data = json.dumps(data, indent=None)  # indent参数用于缩进，使得生成的JSON更易读

    prefix2 = "EP40/TDA4/SC/" + "${date}" + "/" + "${hour}" + "/" + vehicle_id + "/" + str(filename) + ".json"
    uploadjson(targert_bucket_name, prefix2, json_data)


def get_2314(targert_bucket_name, df_2301_path, vehicle_id, triggerid, daystr, hourstr, filename):
    try:
        df_acc_100ms = pd.read_pickle(df_2301_path)
        print(df_acc_100ms.columns)
    except Exception as e:
        print('data report read error, ', str(e))

    df_acc_100ms_list = df_acc_100ms[
        ['start_time_str', 'path','ADCS8_ACCState','ADCS8_NPilot_SysState']]
    found_index = -9999
    # 遍历列表
    ADCS8_ACCState_last=df_acc_100ms_list[0][2]
    ADCS8_NPilot_SysState_last = df_acc_100ms_list[0][3]
    for index, element in enumerate(df_acc_100ms_list):
        if (ADCS8_ACCState_last==2 and element[2] == 6 ) or (element[2]==2 and (ADCS8_NPilot_SysState_last==2 and(element[3]==1  or element[3]==4 or element[3]==5) )):
            found_index = index
            break  # 找到第一个符合条件的元素就跳出循环
        ADCS8_ACCState_last=element[2]
        ADCS8_NPilot_SysState_last=element[3]

    if found_index== -9999:
        start_time=df_acc_100ms_list[0][0]
        path = df_acc_100ms_list[0][1]
    else:
        start_time=df_acc_100ms_list[found_index][0]
        path = df_acc_100ms_list[found_index][1]

    data = {
        "vehicle_id": vehicle_id,
        "triggerid": triggerid,
        "start_time": start_time,
        "day": daystr,
        "hour": hourstr,
        "path": path,
        "driver_type": "drive",
        "plat": "TDA4"
    }
    json_data = json.dumps(data, indent=None)  # indent参数用于缩进，使得生成的JSON更易读

    prefix2 = "EP40/TDA4/SC/" + "${date}" + "/" + "${hour}" + "/" + vehicle_id + "/" + str(filename) + ".json"
    uploadjson(targert_bucket_name, prefix2, json_data)


def get_2310(targert_bucket_name, df_2301_path, vehicle_id, triggerid, daystr, hourstr, filename):
    try:
        df_acc_100ms = pd.read_pickle(df_2301_path)
        print(df_acc_100ms.columns)
    except Exception as e:
        print('data report read error, ', str(e))

    df_acc_100ms_list = df_acc_100ms[
        ['start_time_str', 'path','ADCS8_NPilot_SysState','ADCS8_NNPSysState','IDB1_BrakePedalApplied','EPS1_TorsionBarTorque']]
    found_index = -9999
    # 遍历列表

    for index, element in enumerate(df_acc_100ms_list):
        if (element[2] == 2 or element[3]  == 2) and (element[4] == 1 or element[5]>3) :
            found_index = index
            break  # 找到第一个符合条件的元素就跳出循环
    if found_index== -9999:
        start_time=df_acc_100ms_list[0][0]
        path = df_acc_100ms_list[0][1]
    else:
        start_time=df_acc_100ms_list[found_index][0]
        path = df_acc_100ms_list[found_index][1]

    data = {
        "vehicle_id": vehicle_id,
        "triggerid": triggerid,
        "start_time": start_time,
        "day": daystr,
        "hour": hourstr,
        "path": path,
        "driver_type": "drive",
        "plat": "TDA4"
    }
    json_data = json.dumps(data, indent=None)  # indent参数用于缩进，使得生成的JSON更易读

    prefix2 = "EP40/TDA4/SC/" + "${date}" + "/" + "${hour}" + "/" + vehicle_id + "/" + str(filename) + ".json"
    uploadjson(targert_bucket_name, prefix2, json_data)


def read_dict(source_dir, target_path):
    with open(source_dir, 'r') as file:
        with open(target_path, 'w') as output_file:
            for line in file:
                result_dict = {}
                data = json.loads(line)
                ST_Upload_100ms_t = data["ST_Upload_100ms_t"]
                path = data["path"]

                # 时间格式化：
                Tbox_TimeYHDHMS = str(ST_Upload_100ms_t["Tbox_TimeYHDHMS"])
                start_time_str = Tbox_TimeYHDHMS[0:4] + "-" + Tbox_TimeYHDHMS[4:6] + "-" + Tbox_TimeYHDHMS[
                                                                                           6:8] + " " + Tbox_TimeYHDHMS[
                                                                                                        8:10] + ":" + Tbox_TimeYHDHMS[
                                                                                                                      10:12] + ":" + Tbox_TimeYHDHMS[
                                                                                                                                     12:14]
                result_dict["start_time_str"] = start_time_str

                json_string = json.dumps(result_dict, indent=None)

                # 写入处理后的结果到文件
                output_file.write(json_string + '\n')


def get_tos_path(bucket_name, prefix):
    pathlist = []
    try:
        # 创建 TosClientV2 对象，对桶和对象的操作都通过 TosClientV2 实现
        client = tos.TosClientV2(ak, sk, endpoint, region)

        # 列举指定桶下特定前缀所有对象
        truncated = True
        continuation_token = ''

        while truncated:
            result = client.list_objects_type2(bucket_name, prefix=prefix, continuation_token=continuation_token)
            for iterm in result.contents:
                path = str(iterm.key)

                arr = path.split("/")
                filename = arr[6]
                filearr = filename.split("-")
                file_code = filearr[4]
                # print("file_code: "+file_code)
                if "3200" not in file_code and "3201" not in file_code:
                    print("path: " + path)
                    file_list = filename.split("-")
                    file = file_list[0]
                    pathlist.append(path)

            truncated = result.is_truncated
            continuation_token = result.next_continuation_token
    except tos.exceptions.TosClientError as e:
        # 操作失败，捕获客户端异常，一般情况为非法请求参数或网络异常
        print('fail with client error, message:{}, cause: {}'.format(e.message, e.cause))
    except tos.exceptions.TosServerError as e:
        # 操作失败，捕获服务端异常，可从返回信息中获取详细错误信息
        print('fail with server error, code: {}'.format(e.code))
        # request id 可定位具体问题，强烈建议日志中保存
        print('error with request id: {}'.format(e.request_id))
        print('error with message: {}'.format(e.message))
        print('error with http code: {}'.format(e.status_code))
    except Exception as e:
        print('fail with unknown error: {}'.format(e))

    return pathlist


def getvin(bucket_name, fold):
    pathlist = []
    try:
        # 创建 TosClientV2 对象，对桶和对象的操作都通过 TosClientV2 实现
        client = tos.TosClientV2(ak, sk, endpoint, region)
        # 列举指定桶下特定前缀所有对象)
        truncated = True
        continuation_token = ''
        while truncated:
            result = client.list_objects_type2(bucket_name, prefix=fold, delimiter="/",
                                               continuation_token=continuation_token, max_keys=1000)

            for prefix in result.common_prefixes:
                prefix_dir = prefix.prefix
                # print("prefix_dir: "+prefix_dir)
                dirs = prefix_dir.split('/')
                pathlist.append(dirs[3])
            truncated = result.is_truncated
            continuation_token = result.next_continuation_token

    except Exception as e:
        print('fail with unknown error: {}'.format(e))
    return pathlist


# 文件解密流程
def data_jiemi(file_list, vechicle_id, daystr, hourstr):
    for file in file_list:
        # /data/code/input/v2212963022/20231227/17/fcm5uk2bc77u0nv1lv4og-EP40_TDA4_drive_sc-20231227-171028-2302.zip/asw/StUpload/20231227-171019/20231227-171019-hz_st_upload-2302.bag
        print("file: " + file)
        bag_file_name = os.path.basename(file)
        baglist = bag_file_name.split("-")
        bagid = baglist[len(baglist) - 1].replace(".bag", "")
        bagtype = baglist[2]

        # /data/code/input/v2305175911/20231225/19/fcm4mhvrc77u0nv1hhbqg/
        # /data/code/input/v2305175911/20231225/19/fcm4mhvrc77u0nv1hhbqg/3308/
        # /data/code/input/v2305175911/20231225/19/fcm4mhvrc77u0nv1hhbqg/3308/aggregator/
        outputfile = file.split("/asw/StUpload/")[0] + "/source_json/" + "/" + bagtype + "/"
        print("outputfile: " + outputfile)
        decrypter(file, outputfile, vechicle_id, daystr, hourstr)


# 文件解密：
def decrypter(zipfile, outputfile, vechicle_id, daystr, hourstr):
    path_exsist(outputfile)
    commands = "export LD_LIBRARY_PATH=/data/code/parser_bag/libs/:$LD_LIBRARY_PATH; cd /data/code/parser_bag;  ./parser_bag  " + zipfile + "  " + outputfile + ";"
    print("command: " + commands)
    result2 = subprocess.run(commands, shell=True, capture_output=True, text=True)
    print(result2)


# 列举文件：
def list_files_in_directory(directory):
    file_list = []
    for root, dirs, files in os.walk(directory):
        for filename in files:
            filepath = os.path.join(root, filename)
            file_list.append(filepath)
    return file_list


# 列举文件：
def list_pk_100ms(directory):
    file_list = []
    for root, dirs, files in os.walk(directory):
        for filename in files:
            if ".pkl" in filename:
                filepath = os.path.join(root, filename)
                file_list.append(filepath)
    return file_list


# 对文件进行解压：
def unzip(localpath):
    directory_path = os.path.dirname(localpath)
    print("directory_path: " + directory_path)
    commands = "cd " + directory_path + "; unzip " + localpath
    print("unzip 指令为： " + commands)
    result2 = subprocess.run(commands, shell=True, capture_output=True, text=True)
    print(result2)


# 判断文件是否存在
def path_exsist(folder_path):
    if not os.path.exists(folder_path):
        # 如果文件夹不存在，创建它
        os.makedirs(folder_path)
        print("创建成功！")
    else:
        print("目录已经存在。")


# 下载数据
def download(bucket_name, object_key, file_name, localpath):
    print("判断文件是否存在，不存在则创建。。")
    path_exsist(localpath)
    try:
        # 创建 TosClientV2 对象，对桶和对象的操作都通过 TosClientV2 实现
        client = tos.TosClientV2(ak, sk, endpoint, region)
        # 若 file_name 为目录则将对象下载到此目录下, 文件名为对象名
        client.get_object_to_file(bucket_name, object_key, file_name)
    except tos.exceptions.TosClientError as e:
        # 操作失败，捕获客户端异常，一般情况为非法请求参数或网络异常
        print('fail with client error, message:{}, cause: {}'.format(e.message, e.cause))
    except tos.exceptions.TosServerError as e:
        # 操作失败，捕获服务端异常，可从返回信息中获取详细错误信息
        print('fail with server error, code: {}'.format(e.code))
        # request id 可定位具体问题，强烈建议日志中保存
        print('error with request id: {}'.format(e.request_id))
        print('error with message: {}'.format(e.message))
        print('error with http code: {}'.format(e.status_code))
    except Exception as e:
        print('fail with unknown error: {}'.format(e))


# 上传数据：
def upload(bucket_name, object_key, file_name):
    try:
        # 创建 TosClientV2 对象，对桶和对象的操作都通过 TosClientV2 实现
        client = tos.TosClientV2(ak, sk, endpoint, region)
        # 将本地文件上传到目标桶中
        # file_name为本地文件的完整路径。
        client.put_object_from_file(bucket_name, object_key, file_name)
    except tos.exceptions.TosClientError as e:
        # 操作失败，捕获客户端异常，一般情况为非法请求参数或网络异常
        print('fail with client error, message:{}, cause: {}'.format(e.message, e.cause))
    except tos.exceptions.TosServerError as e:
        # 操作失败，捕获服务端异常，可从返回信息中获取详细错误信息
        print('fail with server error, code: {}'.format(e.code))
        # request id 可定位具体问题，强烈建议日志中保存
        print('error with request id: {}'.format(e.request_id))
        print('error with message: {}'.format(e.message))
        print('error with http code: {}'.format(e.status_code))
    except Exception as e:
        print('fail with unknown error: {}'.format(e))


# 时间戳转时间字符串：
def get_time_str(secs):
    # 使用datetime.fromtimestamp()将时间戳转换为datetime对象
    dt_object = datetime.fromtimestamp(secs)
    # 使用strftime()方法将datetime对象格式化为字符串
    time_string = dt_object.strftime('%Y-%m-%d %H:%M:%S')
    return time_string


# 读取json文件以字符串形势读取
def readJsonFileToStr(source_dir):
    data_list = []
    json_list = []
    with open(source_dir, 'r') as file:

        for line in file:
            # 用 json.loads 解析每一行的 JSON 对象
            json_str = line.replace(" ", "").replace("\n", "")

            # and json_str == '}{':

            if json_str is not None and json_str != '}{':
                # print(json_str)
                json_list.append(json_str)
            elif json_str is not None and json_str == '}{':
                json_list.append("}")
                data_list.append(json_list)
                json_list = []
                json_list.append("{")
        data_list.append(json_list)
        return data_list


# write_2_local(source_100ms_dir, upload_100ms_file,object_key)
def write_2_local(source_dir, local_dir, object_key):
    data_list = readJsonFileToStr(source_dir)
    with open(local_dir, 'w') as file:
        for list1 in data_list:
            contnet = ""
            for str_js in list1:
                contnet += str_js
            data = json.loads(contnet)
            data['path'] = object_key
            json_string = json.dumps(data, ensure_ascii=False)
            file.write(json_string)
            file.write('\n')


def read_can_in(upload_can_dir, dict_can_dir):
    with open(upload_can_dir, 'r') as file:
        with open(dict_can_dir, 'w') as output_file:
            for line in file:
                dict = {}
                data = json.loads(line)
                ST_NOP_CANIn = data["ST_NOP_CANIn"]
                path = data["path"]
                ACU1_Driver_Buckle = ST_NOP_CANIn["ACU1_Driver_Buckle"]
                IDB7_HDCAtive = ST_NOP_CANIn["IDB7_HDCAtive"]
                Region_A_SwError = ST_NOP_CANIn["Region_A_SwError"]

                dict["ACU1_Driver_Buckle"] = ACU1_Driver_Buckle
                dict["IDB7_HDCAtive"] = IDB7_HDCAtive
                dict["Region_A_SwError"] = Region_A_SwError

                # 记录发生的时间：
                header_ = ST_NOP_CANIn["header_"]
                secs = header_["secs"]
                start_time_str = get_time_str(secs)
                dict["start_time_str"] = start_time_str
                dict["path"] = path

                json_string = json.dumps(dict, indent=None)
                # 写入处理后的结果到文件
                output_file.write(json_string + '\n')


def read_can_out(upload_can_dir, dict_can_dir):
    with open(upload_can_dir, 'r') as file:
        with open(dict_can_dir, 'w') as output_file:
            for line in file:
                dict = {}
                data = json.loads(line)
                ST_NOP_CANOut = data["ST_NOP_CANOut"]
                path = data["path"]
                ADCS8_NPilot_SysState = ST_NOP_CANOut["ADCS8_NPilot_SysState"]
                ADCS2_longitudCtrlType = ST_NOP_CANOut["ADCS2_longitudCtrlType"]
                ADCS2_ADAS_EPSLateralCtrlType = ST_NOP_CANOut["ADCS2_ADAS_EPSLateralCtrlType"]
                ADCS12_longitudDisableInfo = ST_NOP_CANOut["ADCS12_longitudDisableInfo"]
                ADCS8_NNPSysState = ST_NOP_CANOut["ADCS8_NNPSysState"]
                ADCS12_NPilotSysInfo = ST_NOP_CANOut["ADCS12_NPilotSysInfo"]
                ADCS12_NNP_State_Reminder = ST_NOP_CANOut["ADCS12_NNP_State_Reminder"]
                ADCS12_P2N_State_Reminder = ST_NOP_CANOut["ADCS12_P2N_State_Reminder"]
                ADCS12_NNP_RINO = ST_NOP_CANOut["ADCS12_NNP_RINO"]

                dict["ADCS8_NPilot_SysState"] = ADCS8_NPilot_SysState
                dict["ADCS2_longitudCtrlType"] = ADCS2_longitudCtrlType
                dict["ADCS2_ADAS_EPSLateralCtrlType"] = ADCS2_ADAS_EPSLateralCtrlType
                dict["ADCS12_longitudDisableInfo"] = ADCS12_longitudDisableInfo
                dict["ADCS12_NPilotSysInfo"] = ADCS12_NPilotSysInfo
                dict["ADCS8_NNPSysState"] = ADCS8_NNPSysState
                dict["ADCS12_NNP_State_Reminder"] = ADCS12_NNP_State_Reminder
                dict["ADCS12_P2N_State_Reminder"] = ADCS12_P2N_State_Reminder
                dict["ADCS12_NNP_RINO"] = ADCS12_NNP_RINO

                # 记录发生的时间：
                header_ = ST_NOP_CANOut["header_"]
                secs = header_["secs"]
                start_time_str = get_time_str(secs)
                dict["start_time_str"] = start_time_str
                dict["path"] = path

                json_string = json.dumps(dict, indent=None)
                # 写入处理后的结果到文件
                output_file.write(json_string + '\n')


def read_upload_100ms(source_dir, target_path):
    with open(source_dir, 'r') as file:
        with open(target_path, 'w') as output_file:
            for line in file:
                result_dict = {}
                data = json.loads(line)
                ST_Upload_100ms_t = data["ST_Upload_100ms_t"]
                path = data["path"]

                TriggerSignals = ST_Upload_100ms_t["TriggerSignals"]
                ADCS12_longitudCtrlSysInfo = TriggerSignals["ADCS12_longitudCtrlSysInfo"]
                ADCS8_ACCState = TriggerSignals["ADCS8_ACCState"]
                ADCS12_longitudDisableInfo = TriggerSignals["ADCS12_longitudDisableInfo"]
                ADCS2_AEBPartialBrake = TriggerSignals["ADCS2_AEBPartialBrake"]
                ADCS2_AEBFullBrake = TriggerSignals["ADCS2_AEBFullBrake"]

                # 新增字段：
                IDB1_BrakePedalApplied = TriggerSignals["IDB1_BrakePedalApplied"]
                IDB1_BrakePedalAppliedV = TriggerSignals["IDB1_BrakePedalAppliedV"]

                ADCS12_NPilotSysInfo = TriggerSignals["ADCS12_NpilotSysInfo"]
                ADCS8_NPilot_SysState = TriggerSignals["ADCS8_NPilot_SysState"]
                ADCS8_NNPSysState = TriggerSignals["ADCS8_NNPSysState"]
                ACU2_VehicleDynYawRate = TriggerSignals["ACU2_VehicleDynYawRate"]
                EPS1_TorsionBarTorque = TriggerSignals["EPS1_TorsionBarTorque"]
                EPS1_SteerAngleSpd = TriggerSignals["EPS1_SteerAngleSpd"]
                IDB3_VehicleSpd = TriggerSignals["IDB3_VehicleSpd"]
                ICU2_Odometer = TriggerSignals["ICU2_Odometer"]

                XCP_SignalGroup_1 = ST_Upload_100ms_t["XCP_SignalGroup_1"]
                VLCCDHypotheses_Hypothesis_0_fVrelY = XCP_SignalGroup_1["VLCCDHypotheses_Hypothesis_0_fVrelY"]
                VLCCDHypotheses_Hypothesis_0_fVrelX = XCP_SignalGroup_1["VLCCDHypotheses_Hypothesis_0_fVrelX"]
                VLCCDHypotheses_Hypothesis_0_eEBAObjectClass = XCP_SignalGroup_1[
                    "VLCCDHypotheses_Hypothesis_0_eEBAObjectClass"]

                ADCS8_lateralCtrtakeove = TriggerSignals["ADCS8_lateralCtrtakeove"]
                IDB5_AEBactive = TriggerSignals["IDB5_AEBactive"]
                ADCS2_AEB_DBSLevel = TriggerSignals["ADCS2_AEB_DBSLevel"]
                ACU2_LongAccSensorValue = TriggerSignals["ACU2_LongAccSensorValue"]

                result_dict["ADCS8_ACCState"] = ADCS8_ACCState
                result_dict["ADCS12_longitudCtrlSysInfo"] = ADCS12_longitudCtrlSysInfo
                result_dict["ADCS12_longitudDisableInfo"] = ADCS12_longitudDisableInfo
                result_dict["ADCS2_AEBPartialBrake"] = ADCS2_AEBPartialBrake
                result_dict["ADCS2_AEBFullBrake"] = ADCS2_AEBFullBrake
                result_dict["IDB1_BrakePedalApplied"] = IDB1_BrakePedalApplied
                result_dict["IDB1_BrakePedalAppliedV"] = IDB1_BrakePedalAppliedV
                result_dict["ADCS12_NPilotSysInfo"] = ADCS12_NPilotSysInfo
                result_dict["ADCS8_NPilot_SysState"] = ADCS8_NPilot_SysState
                result_dict["ADCS8_NNPSysState"] = ADCS8_NNPSysState
                result_dict["ACU2_VehicleDynYawRate"] = ACU2_VehicleDynYawRate
                result_dict["EPS1_TorsionBarTorque"] = EPS1_TorsionBarTorque
                result_dict["EPS1_SteerAngleSpd"] = EPS1_SteerAngleSpd
                result_dict["IDB3_VehicleSpd"] = IDB3_VehicleSpd
                result_dict["ICU2_Odometer"] = ICU2_Odometer
                result_dict["ADCS8_lateralCtrtakeove"] = ADCS8_lateralCtrtakeove

                result_dict["IDB5_AEBactive"] = IDB5_AEBactive
                result_dict["ADCS2_AEB_DBSLevel"] = ADCS2_AEB_DBSLevel
                result_dict["ACU2_LongAccSensorValue"] = ACU2_LongAccSensorValue

                result_dict["VLCCDHypotheses_Hypothesis_0_fVrelY"] = VLCCDHypotheses_Hypothesis_0_fVrelY
                result_dict["VLCCDHypotheses_Hypothesis_0_fVrelX"] = VLCCDHypotheses_Hypothesis_0_fVrelX
                result_dict[
                    "VLCCDHypotheses_Hypothesis_0_eEBAObjectClass"] = VLCCDHypotheses_Hypothesis_0_eEBAObjectClass
                result_dict["path"] = path

                # 时间格式化：
                Tbox_TimeYHDHMS = str(ST_Upload_100ms_t["Tbox_TimeYHDHMS"])
                start_time_str = Tbox_TimeYHDHMS[0:4] + "-" + Tbox_TimeYHDHMS[4:6] + "-" + Tbox_TimeYHDHMS[
                                                                                           6:8] + " " + Tbox_TimeYHDHMS[
                                                                                                        8:10] + ":" + Tbox_TimeYHDHMS[
                                                                                                                      10:12] + ":" + Tbox_TimeYHDHMS[
                                                                                                                                     12:14]
                result_dict["start_time_str"] = start_time_str
                json_string = json.dumps(result_dict, indent=None)

                # 写入处理后的结果到文件
                output_file.write(json_string + '\n')


def save_100ms(source_100ms_dir, upload_100ms_file, dict_100ms_file, savepath, object_key):
    upload_100ms_dir = os.path.dirname(upload_100ms_file)
    dict_100ms_dir = os.path.dirname(dict_100ms_file)
    path_exsist(upload_100ms_dir)
    path_exsist(dict_100ms_dir)
    path_exsist(savepath)
    write_2_local(source_100ms_dir, upload_100ms_file, object_key)
    read_upload_100ms(upload_100ms_file, dict_100ms_file)
    df_100ms = pd.read_json(dict_100ms_file, lines=True)

    if df_100ms.shape[0] > 0:
        print(savepath + 'st_100ms' + '.pkl')
        df_100ms.to_pickle(savepath + 'st_100ms' + '.pkl')

    return df_100ms


def save_can_in(source_can_dir, upload_can_in_file, dict_can_in_file, savepath, object_key):
    upload_can_in_dir = os.path.dirname(upload_can_in_file)
    dict_can_in_dir = os.path.dirname(dict_can_in_file)
    path_exsist(upload_can_in_dir)
    path_exsist(dict_can_in_dir)
    path_exsist(savepath)

    print("upload_can_in_dir: " + upload_can_in_dir)
    write_2_local(source_can_dir, upload_can_in_file, object_key)
    read_can_in(upload_can_in_file, dict_can_in_file)
    df_can_in = pd.read_json(dict_can_in_file, lines=True)

    if df_can_in.shape[0] > 0:
        print(savepath + 'st_can_in' + '.pkl')
        df_can_in.to_pickle(savepath + 'st_can_in' + '.pkl')

    return df_can_in


def save_can_out(source_can_dir, upload_can_out_file, dict_can_out_file, savepath, object_key):
    upload_can_out_dir = os.path.dirname(upload_can_out_file)
    dict_can_out_dir = os.path.dirname(dict_can_out_file)
    path_exsist(upload_can_out_dir)
    path_exsist(dict_can_out_dir)
    path_exsist(savepath)

    write_2_local(source_can_dir, upload_can_out_file, object_key)
    read_can_out(upload_can_out_file, dict_can_out_file)
    df_can_out = pd.read_json(dict_can_out_file, lines=True)

    if df_can_out.shape[0] > 0:
        print(savepath + 'st_can_out' + '.pkl')
        df_can_out.to_pickle(savepath + 'st_can_out' + '.pkl')

    return df_can_out

def task2(object_key):
    # 使用split函数将字符串分割成部分
    parts = object_key.split('-')
    # 提取最后一部分
    last_part = parts[-1]
    # 可以选择去掉文件扩展名（.zip）
    bagid = last_part.split('.')[0]

    if "3200" not in bagid and "3201" not in bagid:
        arr = object_key.split("/")
        vechicle_id = arr[3]
        daystr = arr[4]
        hourstr = arr[5]
        filename = arr[6]
        filearr = filename.split("-")
        file_prefix = filearr[0]

        localpath = "/data/code/input/" + vechicle_id + "/" + daystr + "/" + hourstr + "/" + filename
        file_name_dir = "/data/code/input/" + vechicle_id + "/" + daystr + "/" + hourstr + "/" + filename + "/" + filename
        download(bucket_name, object_key, file_name_dir, localpath)
        unzip(file_name_dir)
        # bag_dir="./test/fcm3979bc77u0nv1c3ghg-EP40_TDA4_drive_sc-20231223-160031-3308/asw/StUpload/"
        bag_dir = localpath + "/asw/StUpload"
        # 列举所有的bag 包
        file_list = list_files_in_directory(bag_dir)
        print("file_list: " + str(file_list))

        # 对bag包进行解密
        data_jiemi(file_list, vechicle_id, daystr, hourstr)
        # 列举所有的json文件：
        # /data/code/input/v2305175911/20231225/19/fcm4mhvrc77u0nv1hhbqg/3308/aggregator/
        json_file = "/data/code/input/" + vechicle_id + "/" + daystr + "/" + hourstr + "/" + filename + "/source_json/"
        json_file_list = list_files_in_directory(json_file)
        print("json_file_list: " + str(json_file_list))

        for jsonfile in json_file_list:
            # /data/code/input/v2212963022/20231227/17/fcm5uk2bc77u0nv1lv4og-EP40_TDA4_drive_sc-20231227-171028-2302.zip/source_json/aggregator/json/NOPOutputModuleOutput.json
            # json_name: NOPOutputModuleOutput.json
            json_name = jsonfile.split("/json/")[1]
            # json_dir: /data/code/input/v2212963022/20231227/17/fcm5uk2bc77u0nv1lv4og-EP40_TDA4_drive_sc-20231227-171028-2302.zip/source_json/aggregator
            json_dir = jsonfile.split("/json/")[0]
            if json_name in "ST_Upload_100ms_t.json":
                print("ST_Upload_100ms_t.json")
                upload_100ms_dir = json_dir + "/upload/ST_Upload_100ms_t.json"
                dict_100ms_dir = json_dir + "/dict/ST_Upload_100ms_t.json"
                savepath = json_dir + "/pk/"
                save_100ms(jsonfile, upload_100ms_dir, dict_100ms_dir, savepath, object_key)
            elif json_name in "ST_NOP_CANIn.json":
                print("ST_NOP_CANIn.json")
                upload_can_in_file = json_dir + "/upload/ST_NOP_CANIn.json"
                dict_can_in_file = json_dir + "/dict/ST_NOP_CANIn.json"
                savepath = json_dir + "/pk/"
                save_can_in(jsonfile, upload_can_in_file, dict_can_in_file, savepath, object_key)
            elif json_name in "ST_NOP_CANOut.json":
                upload_can_out_file = json_dir + "/upload/ST_NOP_CANOut.json"
                dict_can_out_file = json_dir + "/dict/ST_NOP_CANOut.json"
                savepath = json_dir + "/pk/"
                save_can_out(jsonfile, upload_can_out_file, dict_can_out_file, savepath, object_key)

    pk_path = "/data/code/input/" + vechicle_id + "/" + daystr + "/" + hourstr + "/" + filename + "/source_json/"
    pk_list= list_pk_100ms(pk_path)

    if len(pk_list) >= 1:
        pk_file = get_st_100ms_path(pk_list)
        vechicle_id = pk_file.split("/")[4]
        day = pk_file.split("/")[5]
        hour = pk_file.split("/")[6]
        bag_file = pk_file.split("/")[7]
        bagarr = bag_file.replace(".zip/", "").split("-")
        bagid = bagarr[len(bagarr) - 1].replace(".zip", "")

        targert_bucket_name = "testcarsignal"

        if bagid=="2300":
            get_2300(targert_bucket_name, pk_file, vechicle_id, bagid, day, hour, filename.replace(".zip", ""))
        elif bagid=="2301":
            get_2301(targert_bucket_name, pk_file, vechicle_id, bagid, day, hour, filename.replace(".zip", ""))
        elif bagid=="2302":
            st_can_out_path=get_st_can_out(pk_list)
            get_2302(targert_bucket_name, st_can_out_path, vechicle_id, bagid, day, hour, filename.replace(".zip", ""))
        elif bagid=="2303":
            get_2303(targert_bucket_name, pk_file, vechicle_id, bagid, day, hour, filename.replace(".zip", ""))
        elif bagid=="2304":
            st_can_out_path = get_st_can_out(pk_list)
            get_2304(targert_bucket_name, st_can_out_path, vechicle_id, bagid, day, hour, filename.replace(".zip", ""))
        elif bagid=="2305":
            #还存在问题：
            get_2305(targert_bucket_name, pk_file, vechicle_id, bagid, day, hour, filename.replace(".zip", ""))
        elif bagid=="3307":
            #还存在问题
            get_3307(targert_bucket_name, pk_file, vechicle_id, bagid, day, hour, filename.replace(".zip", ""))
        elif bagid=="3308":
            get_3308(targert_bucket_name, pk_file, vechicle_id, bagid, day, hour, filename.replace(".zip", ""))
        elif bagid=="2309":
            get_2309(targert_bucket_name, pk_file, vechicle_id, bagid, day, hour, filename.replace(".zip", ""))
        elif bagid=="2310":
            get_2310(targert_bucket_name, pk_file, vechicle_id, bagid, day, hour, filename.replace(".zip", ""))
        elif bagid=="2311":
            get_2311(targert_bucket_name, pk_file, vechicle_id, bagid, day, hour, filename.replace(".zip", ""))
        elif bagid=="2313":
            get_2311(targert_bucket_name, pk_file, vechicle_id, bagid, day, hour, filename.replace(".zip", ""))
        elif bagid=="2314":
            get_2311(targert_bucket_name, pk_file, vechicle_id, bagid, day, hour, filename.replace(".zip", ""))

if __name__ == '__main__':
    bucket_name = "advc-data-upload-2100095994-cn-beijing"
    vinlist = getvin(bucket_name, "EP40/TDA4/SC/")
    # object_key="EP40/TDA4/SC/v2311281292/20240104/21/fcmbbf2rc77u172eiohr0-EP40_TDA4_drive_sc-20240104-180657-2301.zip"
    # task2(object_key)
    for vechicl_id in vinlist:
        task2(vechicl_id)