import time, sys
import numpy as np
import pandas as pd
import uuid
import csv
import math
import wbgapi as wb
import difflib
import ast

import warnings, logging
sys.path.append('./')
# 配置日志
from utils.file_unrar import *
from utils.invoke_llm import *
from utils.load_access_data import *
from utils.logging_config import setup_logger

logger = setup_logger('File Process')

class Config:
    jar_dir = "jarfiles/UCanAccess"
    target_cols = ['channelno', 'dataid', 'iesign', 'datatype', 'writeoffflag'
            , 'writeoffdataid', 'outputdate', 'origincountrycode', 'origincountry', 'countrycodeofdelivery'
            , 'countryofdelivery', 'importername', 'importeraddress', 'importercontact', 'suppliername'
            , 'supplieraddress', 'suppliercontact', 'hscode', 'hscodedescription', 'commoditydescription'
            , 'totalcifvalue', 'totalfobvalue', 'grossweight', 'netweight', 'quantity'
            , 'quantityunit', 'teu', 'importer_forwarderagent', 'supplier_forwarderagent', 'abnormaldata'
            , 'portofloading', 'portofdestination', 'loadingcountrycode', 'loadingcountry', 'transportterm'
            , 'tradeterm', 'paymentterm', 'carrier', 'containerno', 'vesselname'
            , 'brand', 'version', 'country', 'IMPORTER_ID', 'SUPPLIER_ID', 'cif_currency', 'fob_currency']
    chunk_size=50000
config = Config
jar_dir = config.jar_dir

# processor = CountryTranslate_OpenAI()
processor = CountryTranslate_Requests()
# processor = CountryTranslate_Requests_ZJU()

def distinct_ie_data(root_dir, country_list):
    # 文件区分为进口数据和出口数据
    rslt = dict()
    for country in country_list:
        tmp = dict()
        ctr_dir = root_dir / country
        _, file_paths = get_dir_files(ctr_dir)
        if country in ['美国']:
            tmp['import_paths'] = [x for x in file_paths if 'out' not in str(x)]
            tmp['export_paths'] = [x for x in file_paths if 'out' in str(x)]
            rslt[country]=tmp
        elif country in ['喀麦隆', '俄罗斯陆运']:
            tmp['import_paths'] = file_paths
            tmp['export_paths'] = []
            rslt[country]=tmp
        else:
            tmp['import_paths'] = [x for x in file_paths if 'IMP' in str(x)]
            tmp['export_paths'] = [x for x in file_paths if 'IMP' not in str(x)]
            rslt[country]=tmp
    return rslt


def get_source_csv(country_list, from_dir, to_dir, country36_dict):
    start = time.time()
    
    dir_paths, file_paths = get_dir_files(from_dir)
    file_tree = distinct_ie_data(from_dir, country_list)
    
    # 进出口都为.accdb
    access_list = ['阿根廷', '埃塞俄比亚', '巴基斯坦', '巴拉圭', '巴拿马', '博兹瓦纳', '厄瓜多尔'
                   , '菲律宾', '哥伦比亚', '哥斯达黎加', '哈萨克斯坦', '莱索托', '孟加拉', '秘鲁', '秘鲁-海运'
                   , '秘鲁-空运', '纳米比亚', '尼日利亚', '斯里兰卡', '坦桑尼亚', '乌干达', '乌拉圭'
                   , '乌兹别克斯坦', '印度', '英国', '智利', '乌克兰']
    # 进出口为.mdb
    mdb_list = ['墨西哥']
    # 进出口都为.xlsx
    xlsx_list = ['加纳', '喀麦隆', '科特迪瓦', '肯尼亚', '马拉维', '越南']
    # 美国出口为.txt， 进口为.accdb
    usa_list = ['美国']
    # '俄罗斯陆运'出口和进口在同一文件，格式为.accdb
    rus_list = ['俄罗斯陆运']
    # # 乌克兰出口1月份为xlsx，其他为.accdb, 其他为
    # other_list = ['乌克兰']
    
    src_all_ctry_cols = dict()
    for ctry in tqdm(country_list):
        if ctry in access_list+mdb_list:
            logger.info(ctry)
            src_ctry_cols = dict()
            if file_tree[ctry]['import_paths']:
                for file_pth in tqdm(file_tree[ctry]['import_paths']):
                    logger.info(f'file_pth : {file_pth}')
                    # 获取表名
                    conn, stmt = connect_access(file_pth, jar_dir)
                    table_names = get_table_names(conn, stmt)
                    logger.info(table_names)
                    # table_names
                    conn, stmt = connect_access(file_pth, jar_dir)
                    imp_df = load_table(table_names[0], conn, stmt, chunk_size=config.chunk_size)
                    
                    imp_df['dataid'] = [str(uuid.uuid4()).replace('-', '')[0:16] for _ in range(imp_df.shape[0])]
                    imp_df['src_country'] = country36_dict[ctry].get('国别编码')
                    imp_df['iesign'] = 'I'
                    logger.info(f'table columns: {list(imp_df.columns.values)}')
                    logger.info(f'table shape: {imp_df.shape}')
                    file_name = ''.join(str(file_pth).split(os.sep)[-1].split('.')[:-1])
                    to_file_pth = to_dir / ctry / 'import_paths' / f'{file_name}.csv'
                    os.makedirs(os.sep.join(str(to_file_pth).split(os.sep)[:-1]), exist_ok=True)
                    imp_df.to_csv(to_file_pth
                               , sep=','
                               , index=False
                               , quotechar='"'         # 双引号导引符
                               , quoting=csv.QUOTE_ALL        # 强制所有字段加引号
                               , lineterminator='\n', header=True, encoding='utf-8')
                src_ctry_cols['import_paths'] = list(imp_df.columns.values)
            if file_tree[ctry]['export_paths']:
                for file_pth in tqdm(file_tree[ctry]['export_paths']):
                    logger.info(f'file_pth : {file_pth}')
                    # 获取表名
                    conn, stmt = connect_access(file_pth, jar_dir)
                    table_names = get_table_names(conn, stmt)
                    logger.info(table_names)
                    # table_names
                    conn, stmt = connect_access(file_pth, jar_dir)
                    exp_df = load_table(table_names[0], conn, stmt, chunk_size=config.chunk_size)
                    
                    exp_df['dataid'] = [str(uuid.uuid4()).replace('-', '')[0:16] for _ in range(exp_df.shape[0])]
                    exp_df['src_country'] = country36_dict[ctry].get('国别编码')
                    exp_df['iesign'] = 'E'
                    logger.info(f'table columns: {list(exp_df.columns.values)}')
                    logger.info(f'table shape: {exp_df.shape}')
                    file_name = ''.join(str(file_pth).split(os.sep)[-1].split('.')[:-1])
                    to_file_pth = to_dir / ctry / 'export_paths' / f'{file_name}.csv'
                    os.makedirs(os.sep.join(str(to_file_pth).split(os.sep)[:-1]), exist_ok=True)
                    exp_df.to_csv(to_file_pth
                               , sep=','
                               , index=False
                               , quotechar='"'         # 双引号导引符
                               , quoting=csv.QUOTE_ALL        # 强制所有字段加引号
                               , lineterminator='\n', header=True, encoding='utf-8')
                src_ctry_cols['export_paths'] = list(exp_df.columns.values)
        if ctry in xlsx_list:
            logger.info(ctry)
            src_ctry_cols = dict()
            if file_tree[ctry]['import_paths']:
                for file_pth in tqdm(file_tree[ctry]['import_paths']):
                    logger.info(f'file_pth : {file_pth}')
                    # 获取数据
                    excel_file = pd.ExcelFile(file_pth)
                    sheet_names = excel_file.sheet_names
                    imp_df = pd.read_excel(file_pth, sheet_name=sheet_names[0])
                    
                    imp_df['dataid'] = [str(uuid.uuid4()).replace('-', '')[0:16] for _ in range(imp_df.shape[0])]
                    imp_df['src_country'] = country36_dict[ctry].get('国别编码')
                    imp_df['iesign'] = 'I'
                    logger.info(f'table columns: {list(imp_df.columns.values)}')
                    logger.info(f'table shape: {imp_df.shape}')
                    file_name = ''.join(str(file_pth).split(os.sep)[-1].split('.')[:-1])
                    to_file_pth = to_dir / ctry / 'import_paths' / f'{file_name}.csv'
                    os.makedirs(os.sep.join(str(to_file_pth).split(os.sep)[:-1]), exist_ok=True)
                    imp_df.to_csv(to_file_pth
                               , sep=','
                               , index=False
                               , quotechar='"'         # 双引号导引符
                               , quoting=csv.QUOTE_ALL        # 强制所有字段加引号
                               , lineterminator='\n', header=True, encoding='utf-8')
                src_ctry_cols['import_paths'] = list(imp_df.columns.values)    
            if file_tree[ctry]['export_paths']:
                for file_pth in tqdm(file_tree[ctry]['export_paths']):
                    logger.info(f'file_pth : {file_pth}')
                    # 获取数据
                    excel_file = pd.ExcelFile(file_pth)
                    sheet_names = excel_file.sheet_names
                    exp_df = pd.read_excel(file_pth, sheet_name=sheet_names[0])
                    
                    exp_df['dataid'] = [str(uuid.uuid4()).replace('-', '')[0:16] for _ in range(exp_df.shape[0])]
                    exp_df['src_country'] = country36_dict[ctry].get('国别编码')
                    exp_df['iesign'] = 'E'
                    logger.info(f'table columns: {list(exp_df.columns.values)}')
                    logger.info(f'table shape: {exp_df.shape}')
                    file_name = ''.join(str(file_pth).split(os.sep)[-1].split('.')[:-1])
                    to_file_pth = to_dir / ctry / 'export_paths' / f'{file_name}.csv'
                    os.makedirs(os.sep.join(str(to_file_pth).split(os.sep)[:-1]), exist_ok=True)
                    exp_df.to_csv(to_file_pth
                               , sep=','
                               , index=False
                               , quotechar='"'         # 双引号导引符
                               , quoting=csv.QUOTE_ALL        # 强制所有字段加引号
                               , lineterminator='\n', header=True, encoding='utf-8')
                src_ctry_cols['export_paths'] = list(exp_df.columns.values)    
        if ctry in usa_list:
            logger.info(ctry)
            src_ctry_cols = dict()
            if file_tree[ctry]['import_paths']:
                for file_pth in tqdm(file_tree[ctry]['import_paths']):
                    logger.info(f'file_pth : {file_pth}')
                    # 获取表名
                    conn, stmt = connect_access(file_pth, jar_dir)
                    table_names = get_table_names(conn, stmt)
                    logger.info(table_names)
                    # table_names
                    conn, stmt = connect_access(file_pth, jar_dir)
                    imp_df = load_table(table_names[0], conn, stmt, chunk_size=config.chunk_size)
                    
                    imp_df['dataid'] = [str(uuid.uuid4()).replace('-', '')[0:16] for _ in range(imp_df.shape[0])]
                    imp_df['src_country'] = country36_dict[ctry].get('国别编码')
                    imp_df['iesign'] = 'I'
                    logger.info(f'table columns: {list(imp_df.columns.values)}')
                    logger.info(f'table shape: {imp_df.shape}')
                    file_name = ''.join(str(file_pth).split(os.sep)[-1].split('.')[:-1])
                    to_file_pth = to_dir / ctry / 'import_paths' / f'{file_name}.csv'
                    os.makedirs(os.sep.join(str(to_file_pth).split(os.sep)[:-1]), exist_ok=True)
                    imp_df.to_csv(to_file_pth
                               , sep=','
                               , index=False
                               , quotechar='"'         # 双引号导引符
                               , quoting=csv.QUOTE_ALL        # 强制所有字段加引号
                               , lineterminator='\n', header=True, encoding='utf-8')
                src_ctry_cols['import_paths'] = list(imp_df.columns.values)        
            if file_tree[ctry]['export_paths']:
                for file_pth in tqdm(file_tree[ctry]['export_paths']):
                    logger.info(f'file_pth : {file_pth}')
                    # 获取数据
                    exp_df = pd.read_csv(file_pth, sep='\t', encoding='utf-8')  # 读取文本文件
                    
                    exp_df['dataid'] = [str(uuid.uuid4()).replace('-', '')[0:16] for _ in range(exp_df.shape[0])]
                    exp_df['src_country'] = country36_dict[ctry].get('国别编码')
                    exp_df['iesign'] = 'E'
                    logger.info(f'table columns: {list(exp_df.columns.values)}')
                    logger.info(f'table shape: {exp_df.shape}')
                    file_name = ''.join(str(file_pth).split(os.sep)[-1].split('.')[:-1])
                    to_file_pth = to_dir / ctry / 'export_paths' / f'{file_name}.csv'
                    os.makedirs(os.sep.join(str(to_file_pth).split(os.sep)[:-1]), exist_ok=True)
                    exp_df.to_csv(to_file_pth
                               , sep=','
                               , index=False
                               , quotechar='"'         # 双引号导引符
                               , quoting=csv.QUOTE_ALL        # 强制所有字段加引号
                               , lineterminator='\n', header=True, encoding='utf-8')
                src_ctry_cols['export_paths'] = list(exp_df.columns.values)
        if ctry in rus_list:
            logger.info(ctry)
            src_ctry_cols = dict()
            if file_tree[ctry]['import_paths']:
                for file_pth in tqdm(file_tree[ctry]['import_paths']):
                    logger.info(f'file_pth : {file_pth}')
                    # 获取表名
                    conn, stmt = connect_access(file_pth, jar_dir)
                    table_names = get_table_names(conn, stmt)
                    logger.info(table_names)
                    # table_names
                    conn, stmt = connect_access(file_pth, jar_dir)
                    imp_df = load_table(table_names[0], conn, stmt, chunk_size=config.chunk_size)
                    
                    imp_df['dataid'] = [str(uuid.uuid4()).replace('-', '')[0:16] for _ in range(imp_df.shape[0])]
                    imp_df['src_country'] = country36_dict[ctry].get('国别编码')
                    imp_df['iesign'] = ['I' if x=='Импорт' else 'E' if x=='Экспорт' else 'NA' for x in imp_df['Type of transportation'].values] # 进出口标志
                    logger.info(f'table columns: {list(imp_df.columns.values)}')
                    logger.info(f'table shape: {imp_df.shape}')
                    file_name = ''.join(str(file_pth).split(os.sep)[-1].split('.')[:-1])
                    to_file_pth = to_dir / ctry / 'import_paths' / f'{file_name}.csv'
                    os.makedirs(os.sep.join(str(to_file_pth).split(os.sep)[:-1]), exist_ok=True)
                    imp_df.to_csv(to_file_pth
                               , sep=','
                               , index=False
                               , quotechar='"'         # 双引号导引符
                               , quoting=csv.QUOTE_ALL        # 强制所有字段加引号
                               , lineterminator='\n', header=True, encoding='utf-8')
                src_ctry_cols['import_paths'] = list(imp_df.columns.values)  
        src_all_ctry_cols[ctry] = src_ctry_cols
    end = time.time()
    logger.info(f'data extract to csv takes {round((end - start)/60, 2)} minutes')
    return src_all_ctry_cols


def save_data_to_csv(target_tab, csv_files_path, file_name, country_code, IE_tag='IMPORT'):
    # 确保保存路径存在
    os.makedirs(csv_files_path, exist_ok=True)
    file_csv = file_name.replace(".accdb",".csv").replace(".xlsx",".csv").replace(".txt",".csv").replace(".mdb",".csv")
    # imp_csv_file = f'{country_code}-'+file_csv
    # csv_save_path = csv_files_path / imp_csv_file
    csv_country_path1 = csv_files_path / country_code
    os.makedirs(csv_country_path1, exist_ok=True)
    csv_country_path2 = csv_country_path1 / IE_tag
    os.makedirs(csv_country_path2, exist_ok=True)
    csv_save_path = csv_country_path2 / file_csv
    # target_tab
    target_tab.to_csv(csv_save_path, sep=',', index=False, header=True, encoding='utf-8')
    logger.info(f'CSV data saved to {csv_save_path}')
    return


def flatten(arr):
    """
    多位数组转为一维list
    """
    return [x for sublist in arr for x in sublist]


def is_nan(value):
    """
    检查值是否是各种形式的nan
    """
    if isinstance(value, str):
        return (value.lower() == 'nan') or (value.lower() == '')
    elif isinstance(value, (float, np.floating)):
        return math.isnan(value)
    return False


def get_col_str_type(cols):
    col_type = dict()
    for x in config.target_cols:
        col_type[x] = 'str'
    return col_type


def write_list_to_file(str_list, filename):
    """将字符串列表写入文本文件"""
    with open(filename, 'w', encoding='utf-8') as f:
        # 每个元素单独一行写入
        f.write('\n'.join(str_list))


def read_list_from_file(filename):
    """从文本文件读取内容并返回字符串列表"""
    with open(filename, 'r', encoding='utf-8') as f:
        # 读取所有行并去除每行末尾的换行符
        return [line.strip() for line in f.readlines()]


# # 写入文件
# output_file = 'string_list.txt'
# write_list_to_file(original_list, output_file)

# # 从文件读取
# loaded_list = read_list_from_file(output_file)


def read_and_fix_encoded_file(filename):
    # 以二进制模式读取文件
    with open(filename, 'rb') as f:
        raw_bytes = f.read()
    
    # 尝试直接修复字节级别的编码问题
    # 将可能错误的UTF-8字节序列替换为正确的序列
    # Ã\xad 实际上是 UTF-8 编码的 í 被错误解释为 Latin-1
    # UTF-8 í = C3 AD, 被错误解释为 Latin-1 时显示为 Ã­
    
    # 直接修复字节序列
    # 将 Latin-1 解释的 Ã­ (C3 AD) 转换回正确的 UTF-8 í (C3 AD)
    # 这看起来是相同的字节，但关键在于如何解释它们
    
    # 尝试多种编码方式
    encodings = ['utf-8', 'latin-1', 'cp1252']
    best_content = None
    best_encoding = None
    
    for encoding in encodings:
        try:
            content = raw_bytes.decode(encoding)
            # 检查内容中是否包含明显的编码错误
            if 'Ã' not in content and '\\x' not in content:
                best_content = content
                best_encoding = encoding
                break
            elif best_content is None or content.count('Ã') < best_content.count('Ã'):
                best_content = content
                best_encoding = encoding
        except UnicodeDecodeError:
            continue
    
    if best_content is None:
        # 如果所有编码都失败，使用错误忽略策略
        best_content = raw_bytes.decode('utf-8', errors='ignore')
        best_encoding = 'utf-8 (with ignore)'
    
    logger.info(f"使用编码: {best_encoding}")
    
    # 修复常见的编码问题
    content = best_content
    
    # 修复 Ã\xad 序列 - 注意这里使用原始字符串表示
    content = content.replace('Ã\\xad', 'Í')
    content = content.replace('Ã\xad', 'Í')  # 处理没有转义的情况
    
    # 修复其他常见问题
    content = content.replace('Ã©', 'É')
    content = content.replace('Ã¡', 'Á')
    content = content.replace('Ã³', 'Ó')
    content = content.replace('Ãº', 'Ú')
    content = content.replace('Ã±', 'Ñ')
    content = content.replace('\\xa0', ' ')
    content = content.replace('\xa0', ' ')
    
    # 使用 ast.literal_eval 安全地解析为字典
    try:
        data_dict = ast.literal_eval(content)
        return data_dict
    except (SyntaxError, ValueError) as e:
        print(f"解析错误: {e}")
        # 尝试手动修复可能的问题
        return manual_dict_parse(content)

def manual_dict_parse(content):
    """手动解析字典内容"""
    # 这里实现一个简单的手动解析器
    # 由于内容可能复杂，这里只提供基本实现
    import re
    pattern = r"'([^']+)':\s*{([^}]+)}"
    matches = re.findall(pattern, content)
    
    result = {}
    for key, value_str in matches:
        # 修复键中的编码问题
        key = key.replace('Ã\\xad', 'Í').replace('Ã\xad', 'Í')
        
        # 解析值字典
        value_dict = {}
        value_pairs = value_str.split(',')
        for pair in value_pairs:
            if ':' in pair:
                k, v = pair.split(':', 1)
                k = k.strip().strip("'\"")
                v = v.strip().strip("'\"")
                value_dict[k] = v
        
        result[key] = value_dict
    
    return result


def batch_query_LLM(dt_country, country_code_dict, batch=20):
    start=time.time()
    N = len(dt_country)//batch +1
    result = []
    for i in range(N):
        country_lst = dt_country[i*batch:(i+1)*batch]
        rslt = processor.translate_country_lst(country_lst, country_code_dict)
        logger.info(rslt)
        result+=rslt
        # time.sleep(0.2)
    end=time.time()
    logger.info(f'All query takes {round(end-start)} seconds')
    return result


def getn_ctry_ie_tab(country_list, csv_files_path, csv_normal_path):
    """
    输入数据表格country_list，对应文件夹必须要必须有'import_paths'和'export_paths'
    """
    start = time.time()
    col_type = get_col_str_type(config.target_cols)
    N = len(country_list)
    tab_list = []
    target_path_lst = []
    for i in range(N):
        ctry_nm = country_list[i]
        file_tree = distinct_ie_data(csv_files_path, [ctry_nm])
        tab_imp_path = csv_normal_path / ctry_nm / 'IMP.csv'
        tab_exp_path = csv_normal_path / ctry_nm / 'EXP.csv'
        target_path_lst.append(tab_imp_path)
        target_path_lst.append(tab_exp_path)
        tab_imp_tmp = pd.DataFrame(columns=config.target_cols)
        tab_exp_tmp = pd.DataFrame(columns=config.target_cols)
        for d in file_tree[ctry_nm]['import_paths']:
            tab_tmp = pd.read_csv(d,encoding='utf-8',header=0,dtype=col_type)
            tab_imp_tmp = pd.concat([tab_imp_tmp, tab_tmp], axis=0, ignore_index=True)
        logger.info(f'tab_imp_tmp shape: {tab_imp_tmp.shape}')
        tab_list.append(tab_imp_tmp)
        for d in file_tree[ctry_nm]['export_paths']:
            tab_tmp = pd.read_csv(d,encoding='utf-8',header=0,dtype=col_type)
            tab_exp_tmp = pd.concat([tab_exp_tmp, tab_tmp], axis=0, ignore_index=True)
        logger.info(f'tab_exp_tmp shape: {tab_exp_tmp.shape}')
        tab_list.append(tab_exp_tmp)
    end = time.time()
    logger.info(f'load and concat csv files takes {round(end-start)} seconds')
    return tab_list, target_path_lst


def check_llm_cols(dt_country, country_code_dict, batch, key_col = '原始国名'):
    # 校验LLM,生成满意结果
    result = batch_query_LLM(dt_country, country_code_dict, batch=batch)
    llm_country = pd.DataFrame(result)
    llm_cols = list(llm_country.columns.values)
    if key_col in llm_cols:
        return llm_country
    else:
        logger.info(f'llm_cols is {llm_cols}')
        check_llm_cols(dt_country, country_code_dict, batch, key_col = '原始国名')


def data_normalization_Nfiles(tab_list, path_list, batch=20):
    """
    输入数据表格tab_list必须有出口和进口
    """
    assert len(tab_list)==len(path_list)
    N = len(tab_list)
    start = time.time()
    ctries_cols = ['origincountry', 'countryofdelivery', 'loadingcountry', 'country']
    tab_df_countries = pd.DataFrame(columns=ctries_cols)
    for i in range(N):
        for col in ctries_cols:
            tab_list[i][col]=tab_list[i][col].str.upper()
        tab_df_countries = pd.concat([tab_df_countries, tab_list[i].loc[:, ctries_cols]],axis=0, ignore_index=True)
    
    # 处理国家字段
    # src_country_list = [x.upper for x in list(set(flatten(tab_df_countries.values)))]
    dt_country = [x for x in list(set(flatten(tab_df_countries.values))) if not is_nan(x)]
    dt_country.sort()
    logger.info(f'This file include {len(dt_country)} countries')
    
    # LLM结果
    llm_country = check_llm_cols(dt_country, country_code_dict, batch, key_col = '原始国名')
    
    tag_country = pd.DataFrame()
    tag_country['原始国名'] = dt_country

    merged_country = pd.merge(tag_country, llm_country, on='原始国名', how='left')
    merged_country = pd.merge(merged_country, country_code.loc[:, ['英文国名', '国别编码']], on='英文国名', how='left')
    # print(merged_country.head())
    # merged_country
    country_mapping = dict()
    for x in merged_country.values:
        country_mapping[x[0]] = {'中文国名':x[1] ,'英文国名':x[2] ,'国别编码':x[3]}
    # country_mapping
    
    tab_rslt = []
    for i in range(N):
        tmp = dict()
        tab_tmp = tab_list[i]
        tab_pth = path_list[i]
        tab_pth_dir = Path(os.sep.join(str(tab_pth).split(os.sep)[:-1]))
        
        os.makedirs(tab_pth_dir, exist_ok=True)
        tab_tmp['origincountrycode'] = tab_tmp['origincountry'].apply(lambda x: country_mapping[x].get('国别编码') if not is_nan(x) else x)
        tab_tmp['countrycodeofdelivery'] = tab_tmp['countryofdelivery'].apply(lambda x: country_mapping[x].get('国别编码') if not is_nan(x) else x)
        tab_tmp['origincountry'] = tab_tmp['origincountry'].apply(lambda x: country_mapping[x].get('英文国名') if not is_nan(x) else x)
        tab_tmp['countryofdelivery'] = tab_tmp['countryofdelivery'].apply(lambda x: country_mapping[x].get('英文国名') if not is_nan(x) else x)
        tab_tmp['loadingcountry'] = tab_tmp['loadingcountry'].apply(lambda x: country_mapping[x].get('英文国名') if not is_nan(x) else x)
        tab_tmp['country'] = tab_tmp['country'].apply(lambda x: country_mapping[x].get('英文国名') if not is_nan(x) else x)
        tab_tmp.to_csv(tab_pth, sep=',', index=False, header=True, encoding='utf-8')
        logger.info(f'tab_{i} saved to {tab_pth}')
        tmp['tab'] = tab_tmp
        tmp['pth'] = tab_pth
        tab_rslt.append(tmp)
    end = time.time()
    logger.info(f'Normal Process 5files takes {round(end-start)} seconds')
    return tab_rslt, country_mapping


def get_all_data_countries(file_paths):
    start = time.time()
    col_type = get_col_str_type(config.target_cols)
    dt_country_list = []
    # logger.info(file_paths)
    for pth in file_paths:
        logger.info(f'the path : {pth}')
        tab_tmp = pd.read_csv(pth,encoding='utf-8',header=0,dtype=col_type)
        ctries_cols = ['origincountry', 'countryofdelivery', 'loadingcountry', 'country']
        tab_tmp = tab_tmp.loc[:, ctries_cols]
        for col in ctries_cols:
            tab_tmp[col]=tab_tmp[col].str.upper()
        dt_country = [x for x in list(set(flatten(tab_tmp.values))) if not is_nan(x)]
        logger.info(f'file path : {pth}; country set : {dt_country}')
        logger.info(f'country num : {len(dt_country)}')
        dt_country_list += dt_country
    # dt_country_list = list(set(dt_country_list))
    dt_country_list = list(set([x.strip() for x in dt_country_list]))
    dt_country_list.sort()
    end = time.time()
    logger.info(f'get all distinct countries takes {round(end-start)} seconds')
    return dt_country_list


def get_llm_country_normal(all_data_country_list, batch=20, step=15):
    start = time.time()
    N = len(all_data_country_list)
    epoch = N//(step*batch)+1
    country_mapping = dict()
    for i in tqdm(range(epoch)):
        country_lst = all_data_country_list[i*batch*step:(i+1)*batch*step]
        # LLM结果
        llm_country = check_llm_cols(country_lst, country_code_dict, batch=20, key_col = '原始国名')
        tag_country = pd.DataFrame()
        tag_country['原始国名'] = country_lst
        
        merged_country = pd.merge(tag_country, llm_country, on='原始国名', how='left')
        merged_country = pd.merge(merged_country, country_code.loc[:, ['英文国名', '国别编码']], on='英文国名', how='left')
        merged_country = merged_country.loc[:, ['原始国名', '中文国名', '英文国名', '国别编码']]
        for x in merged_country.values:
            country_mapping[x[0]] = {'中文国名':x[1] ,'英文国名':x[2] ,'国别编码':x[3]}
        torch.cuda.empty_cache()
        time.sleep(30)
        logger.info(f'time sleep 30 seconds!')
    end = time.time()
    logger.info(f'get_llm_country_normal takes {round(end-start)} seconds')
    return country_mapping

def get_llm_country_normal2(all_data_country_list, country_code_dict, batch=20, step=15):
    start = time.time()
    N = len(all_data_country_list)
    epoch = N//(step*batch)+1
    llm_country_describe=[]
    for i in range(epoch):
        country_lst = all_data_country_list[i*batch*step:(i+1)*batch*step]
        # LLM结果
        result = batch_query_LLM(country_lst, country_code_dict, batch=batch)
        llm_country_describe.append(result)
        logger.info(f'time sleep 30 seconds!')
    end = time.time()
    logger.info(f'get_llm_country_normal takes {round(end-start)} seconds')
    return llm_country_describe


def parse(text):
    cleaned_text = text.replace('[]','').replace(']\n\n[',', ')
    # cleaned_text
    cleaned_text = content_single_space = re.sub(r' +', ' ', cleaned_text)
    cleaned_text = cleaned_text.replace('\n','').replace('[{', '').replace('}]', '')
    cleaned_text = cleaned_text.replace('\\xad', '\\u00ad').replace('\\xed', '\\u00ed')
    cleaned_text = cleaned_text.replace('\\xa0', ' ').replace('原数据', '原始国名')
    cleaned_text = cleaned_text.replace('中英文国名', '中文国名').replace('english国名', '英文国名')
    result = re.split(r'},{', cleaned_text)
    result1 = [re.split(r'}, {', x) for x in result]
    result2 = [y.strip() for x in result1 for y in x]
    result3 = [json.loads('{'+x+'}') for x in result2]
    # print(result)  # 输出: ['one', 'two', 'three']
    return result3


def merge_country_info(src_country, llm_country, code_country):
    tag_country = pd.DataFrame()
    tag_country['原始国名'] = src_country
    
    llm_country = pd.DataFrame(llm_country)
    merged_country = pd.merge(tag_country, llm_country, on='原始国名', how='left')
    merged_country = pd.merge(merged_country, code_country.loc[:, ['英文国名', '国别编码']], on='英文国名', how='left')
    # print(merged_country.head())
    # merged_country
    country_mapping = dict()
    for x in merged_country.values:
        country_mapping[x[0]] = {'中文国名':x[1] ,'英文国名':x[2] ,'国别编码':x[3]}
    return country_mapping


def data_normalization_Nfiles2(tab_list, path_list, country_mapping):
    """
    输入数据表格tab_list必须有出口和进口
    """
    assert len(tab_list)==len(path_list)
    N = len(tab_list)
    start = time.time()
    # ctries_cols = ['origincountry', 'countryofdelivery', 'loadingcountry', 'country']
    
    tab_rslt = []
    for i in range(N):
        # tmp = dict()
        tab_tmp = tab_list[i]
        tab_pth = path_list[i]
        tab_pth_dir = Path(os.sep.join(str(tab_pth).split(os.sep)[:-1]))
        
        os.makedirs(tab_pth_dir, exist_ok=True)
        tab_tmp['origincountrycode'] = tab_tmp['origincountry'].apply(lambda x: country_mapping[x].get('国别编码') if not is_nan(x) else x)
        tab_tmp['countrycodeofdelivery'] = tab_tmp['countryofdelivery'].apply(lambda x: country_mapping[x].get('国别编码') if not is_nan(x) else x)
        tab_tmp['origincountry'] = tab_tmp['origincountry'].apply(lambda x: country_mapping[x].get('英文国名') if not is_nan(x) else x)
        tab_tmp['countryofdelivery'] = tab_tmp['countryofdelivery'].apply(lambda x: country_mapping[x].get('英文国名') if not is_nan(x) else x)
        tab_tmp['loadingcountry'] = tab_tmp['loadingcountry'].apply(lambda x: country_mapping[x].get('英文国名') if not is_nan(x) else x)
        tab_tmp['country'] = tab_tmp['country'].apply(lambda x: country_mapping[x].get('英文国名') if not is_nan(x) else x)
        tab_tmp.to_csv(tab_pth, sep=',', index=False, header=True, encoding='utf-8')
        logger.info(f'tab_{i} saved to {tab_pth}')
        # tmp['tab'] = tab_tmp
        # tmp['pth'] = tab_pth
        # tab_rslt.append(tmp)
    end = time.time()
    logger.info(f'Normal Process 5files takes {round(end-start)} seconds')
    # return tab_rslt
    return


def country_normal_mapping(country_mapping, country_list, csv_files_path, pth_change_tag='20251t3_csv_correct'):
    # N = len(file_paths)
    start = time.time()
    pth_tag = str(csv_files_path).split(os.sep)[-1]
    for ctry in country_list:
        tmp_path = csv_files_path / ctry
        _, file_paths = get_dir_files(tmp_path)
        col_type = get_col_str_type(config.target_cols)
        for i in range(len(file_paths)):
            file_path = file_paths[i]
            correct_save_path = str(file_path).replace(pth_tag, pth_change_tag)
            os.makedirs(os.sep.join(correct_save_path.split(os.sep)[:-1]), exist_ok=True)
            correct_save_path = Path(correct_save_path)
            logger.info(file_path)
            tab_tmp = pd.read_csv(file_path,encoding='utf-8',header=0,dtype=col_type)
            
            tab_tmp['origincountry']=tab_tmp['origincountry'].str.upper()
            tab_tmp['countryofdelivery']=tab_tmp['countryofdelivery'].str.upper()
            tab_tmp['loadingcountry']=tab_tmp['loadingcountry'].str.upper()
            
            ctries_cols = ['origincountry', 'countryofdelivery', 'loadingcountry']
            tab_df_countries = tab_tmp.loc[:, ctries_cols]
            dt_country = [x for x in list(set(flatten(tab_df_countries.values))) if not is_nan(x)]
            for x in dt_country:
                if x not in country_mapping.keys():
                    country_mapping[x]={'中文国名':'', '英文国名':'', '国别编码':''}
                    


            tab_tmp['origincountrycode'] = tab_tmp['origincountry'].apply(lambda x: country_mapping[x].get('国别编码', '') if not (is_nan(x) or is_nan(country_mapping[x].get('国别编码', ''))) else x)
            tab_tmp['countrycodeofdelivery'] = tab_tmp['countryofdelivery'].apply(lambda x: country_mapping[x].get('国别编码', '') if not (is_nan(x) or is_nan(country_mapping[x].get('国别编码', ''))) else x)
            tab_tmp['loadingcountrycode'] = tab_tmp['loadingcountry'].apply(lambda x: country_mapping[x].get('国别编码', '') if not (is_nan(x) or is_nan(country_mapping[x].get('英文国名', ''))) else x)
            tab_tmp['origincountry'] = tab_tmp['origincountry'].apply(lambda x: country_mapping[x].get('英文国名', '') if not (is_nan(x) or is_nan(country_mapping[x].get('英文国名', ''))) else x)
            tab_tmp['countryofdelivery'] = tab_tmp['countryofdelivery'].apply(lambda x: country_mapping[x].get('英文国名', '') if not (is_nan(x) or is_nan(country_mapping[x].get('英文国名', ''))) else x)
            tab_tmp['loadingcountry'] = tab_tmp['loadingcountry'].apply(lambda x: country_mapping[x].get('英文国名', '') if not (is_nan(x) or is_nan(country_mapping[x].get('英文国名', ''))) else x)
            # tab_tmp['country'] = tab_tmp['country'].apply(lambda x: country_mapping[x].get('英文国名') if not is_nan(x) else x)
            tab_tmp.to_csv(correct_save_path, sep=',', index=False, header=True, encoding='utf-8')
            logger.info(f'correct country saved to {str(correct_save_path)}')
    end = time.time()
    logger.info(f'Correct country takes {round(end-start)} seconds')
    return len(country_list)


def get_data_month(from_dir, to_dir, country_list):
    """
    输出结果将每月数据合并到一起，输出为表中['dataid', 'version']两列
    """
    country_list_all = ['阿根廷', '埃塞俄比亚', '巴基斯坦', '巴拉圭', '巴拿马', '博兹瓦纳', '俄罗斯陆运', '厄瓜多尔'
                        , '菲律宾', '哥伦比亚', '哥斯达黎加', '哈萨克斯坦', '加纳', '喀麦隆', '科特迪瓦', '肯尼亚'
                        , '莱索托', '马拉维', '美国', '孟加拉', '秘鲁', '秘鲁-海运', '秘鲁-空运', '墨西哥'
                        , '纳米比亚', '尼日利亚', '斯里兰卡', '坦桑尼亚', '乌干达', '乌克兰', '乌拉圭', '乌兹别克斯坦'
                        , '印度', '英国', '越南', '智利']
    # 开头月份
    country_list1 = ['秘鲁', '墨西哥', '孟加拉', '哥斯达黎加', '巴基斯坦', '埃塞俄比亚', '阿根廷'
                    , '乌拉圭', '厄瓜多尔', '巴拉圭', '纳米比亚', '坦桑尼亚', '斯里兰卡', '巴拿马'
                    ,'英国', '博兹瓦纳', '哈萨克斯坦', '乌兹别克斯坦', '哥伦比亚', '智利']
    # 结尾月份
    country_list2 = ['秘鲁-空运', '秘鲁-海运', '科特迪瓦', '莱索托', '乌干达', '乌克兰'
    , '加纳', '喀麦隆', '菲律宾', '马拉维', ]
    # 单独处理国家
    country_list3 = ['越南', '尼日利亚', '美国', '印度', '俄罗斯陆运', '肯尼亚']
    
    dir_paths, file_paths = get_dir_files(from_dir)
    file_tree = distinct_ie_data(from_dir, country_list_all)
    col_type = get_col_str_type(config.target_cols)
    
    for i in tqdm(range(len(country_list))):
        ctry = country_list[i]
        logger.info(ctry)
        # 开头月份
        if ctry in country_list1:
            if file_tree[ctry]['import_paths']:
                version_pth = to_dir / ctry / 'IMPORT.csv'
                ver_cols = ['dataid', 'version']
                ver_tab = pd.DataFrame(columns=ver_cols)
                for file_pth in file_tree[ctry]['import_paths']:
                    logger.info(f'read data: {file_pth}')
                    mth = str(file_pth).split('/')[-1][0:6]
                    print(mth)
                    tab_tmp = pd.read_csv(file_pth,encoding='utf-8',header=0,dtype=col_type)
                    ver_tab = pd.concat([ver_tab, tab_tmp.loc[:,ver_cols]], axis=0, ignore_index=True)
                os.makedirs(os.sep.join(str(version_pth).split(os.sep)[:-1]), exist_ok=True)
                ver_tab.to_csv(version_pth, sep=',', index=False, header=True, encoding='utf-8')
                logger.info(f'data: {ver_tab.shape}')
                logger.info(f'new data save: {version_pth}')
            if file_tree[ctry]['export_paths']:
                version_pth = to_dir / ctry / 'EXPORT.csv'
                ver_cols = ['dataid', 'version']
                ver_tab = pd.DataFrame(columns=ver_cols)
                for file_pth in file_tree[ctry]['export_paths']:
                    logger.info(f'read data: {file_pth}')
                    mth = str(file_pth).split('/')[-1][0:6]
                    print(mth)
                    tab_tmp = pd.read_csv(file_pth,encoding='utf-8',header=0,dtype=col_type)
                    ver_tab = pd.concat([ver_tab, tab_tmp.loc[:,ver_cols]], axis=0, ignore_index=True)
                os.makedirs(os.sep.join(str(version_pth).split(os.sep)[:-1]), exist_ok=True)
                ver_tab.to_csv(version_pth, sep=',', index=False, header=True, encoding='utf-8')
                logger.info(f'data: {ver_tab.shape}')
                logger.info(f'new data save: {version_pth}')
        # 结尾月份
        if ctry in country_list2:
            if file_tree[ctry]['import_paths']:
                version_pth = to_dir / ctry / 'IMPORT.csv'
                ver_cols = ['dataid', 'version']
                ver_tab = pd.DataFrame(columns=ver_cols)
                for file_pth in file_tree[ctry]['import_paths']:
                    logger.info(f'read data: {file_pth}')
                    mth = str(file_pth).split('/')[-1].split('.')[0][-6:]
                    print(mth)
                    tab_tmp = pd.read_csv(file_pth,encoding='utf-8',header=0,dtype=col_type)
                    ver_tab = pd.concat([ver_tab, tab_tmp.loc[:,ver_cols]], axis=0, ignore_index=True)
                os.makedirs(os.sep.join(str(version_pth).split(os.sep)[:-1]), exist_ok=True)
                ver_tab.to_csv(version_pth, sep=',', index=False, header=True, encoding='utf-8')
                logger.info(f'data: {ver_tab.shape}')
                logger.info(f'new data save: {version_pth}')
            if file_tree[ctry]['export_paths']:
                version_pth = to_dir / ctry / 'EXPORT.csv'
                ver_cols = ['dataid', 'version']
                ver_tab = pd.DataFrame(columns=ver_cols)
                for file_pth in file_tree[ctry]['export_paths']:
                    logger.info(f'read data: {file_pth}')
                    mth = str(file_pth).split('/')[-1].split('.')[0][-6:]
                    print(mth)
                    tab_tmp = pd.read_csv(file_pth,encoding='utf-8',header=0,dtype=col_type)
                    ver_tab = pd.concat([ver_tab, tab_tmp.loc[:,ver_cols]], axis=0, ignore_index=True)
                os.makedirs(os.sep.join(str(version_pth).split(os.sep)[:-1]), exist_ok=True)
                ver_tab.to_csv(version_pth, sep=',', index=False, header=True, encoding='utf-8')
                logger.info(f'data: {ver_tab.shape}')
                logger.info(f'new data save: {version_pth}')       
        # 越南
        if ctry in ['越南', '肯尼亚']:
            if file_tree[ctry]['import_paths']:
                version_pth = to_dir / ctry / 'IMPORT.csv'
                ver_cols = ['dataid', 'version']
                ver_tab = pd.DataFrame(columns=ver_cols)
                for file_pth in file_tree[ctry]['import_paths']:
                    logger.info(f'read data: {file_pth}')
                    mth = str(file_pth).split('/')[-1].split('_')[2][0:6]
                    print(mth)
                    tab_tmp = pd.read_csv(file_pth,encoding='utf-8',header=0,dtype=col_type)
                    ver_tab = pd.concat([ver_tab, tab_tmp.loc[:,ver_cols]], axis=0, ignore_index=True)
                os.makedirs(os.sep.join(version_pth.split(os.sep)[:-1]), exist_ok=True)
                ver_tab.to_csv(str(version_pth), sep=',', index=False, header=True, encoding='utf-8')
                logger.info(f'data: {ver_tab.shape}')
                logger.info(f'new data save: {version_pth}')
            if file_tree[ctry]['export_paths']:
                version_pth = to_dir / ctry / 'EXPORT.csv'
                ver_cols = ['dataid', 'version']
                ver_tab = pd.DataFrame(columns=ver_cols)
                for file_pth in file_tree[ctry]['export_paths']:
                    logger.info(f'read data: {file_pth}')
                    mth = str(file_pth).split('/')[-1].split('_')[2][0:6]
                    print(mth)
                    tab_tmp = pd.read_csv(file_pth,encoding='utf-8',header=0,dtype=col_type)
                    ver_tab = pd.concat([ver_tab, tab_tmp.loc[:,ver_cols]], axis=0, ignore_index=True)
                os.makedirs(os.sep.join(str(version_pth).split(os.sep)[:-1]), exist_ok=True)
                ver_tab.to_csv(version_pth, sep=',', index=False, header=True, encoding='utf-8')
                logger.info(f'data: {ver_tab.shape}')
                logger.info(f'new data save: {version_pth}') 
        # 尼日利亚
        if ctry=='尼日利亚':
            if file_tree[ctry]['import_paths']:
                version_pth = to_dir / ctry / 'IMPORT.csv'
                ver_cols = ['dataid', 'version']
                ver_tab = pd.DataFrame(columns=ver_cols)
                for file_pth in file_tree[ctry]['import_paths']:
                    logger.info(f'read data: {file_pth}')
                    mth = str(file_pth).split('/')[-1][0:6]
                    print(mth)
                    tab_tmp = pd.read_csv(file_pth,encoding='utf-8',header=0,dtype=col_type)
                    ver_tab = pd.concat([ver_tab, tab_tmp.loc[:,ver_cols]], axis=0, ignore_index=True)
                os.makedirs(os.sep.join(str(version_pth).split(os.sep)[:-1]), exist_ok=True)
                ver_tab.to_csv(version_pth, sep=',', index=False, header=True, encoding='utf-8')
                logger.info(f'data: {ver_tab.shape}')
                logger.info(f'new data save: {version_pth}')
            if file_tree[ctry]['export_paths']:
                version_pth = to_dir / ctry / 'EXPORT.csv'
                ver_cols = ['dataid', 'version']
                ver_tab = pd.DataFrame(columns=ver_cols)
                for file_pth in file_tree[ctry]['export_paths']:
                    logger.info(f'read data: {file_pth}')
                    mth = str(file_pth).split('/')[-1][-6:]
                    print(mth)
                    tab_tmp = pd.read_csv(file_pth,encoding='utf-8',header=0,dtype=col_type)
                    ver_tab = pd.concat([ver_tab, tab_tmp.loc[:,ver_cols]], axis=0, ignore_index=True)
                os.makedirs(os.sep.join(str(version_pth).split(os.sep)[:-1]), exist_ok=True)
                ver_tab.to_csv(version_pth, sep=',', index=False, header=True, encoding='utf-8')
                logger.info(f'data: {ver_tab.shape}')
                logger.info(f'new data save: {version_pth}') 
        # 美国
        if ctry=='美国':
            if file_tree[ctry]['import_paths']:
                version_pth = to_dir / ctry / 'IMPORT.csv'
                ver_cols = ['dataid', 'version']
                ver_tab = pd.DataFrame(columns=ver_cols)
                for file_pth in file_tree[ctry]['import_paths']:
                    logger.info(f'read data: {file_pth}')
                    mth = str(file_pth).split('/')[-1][2:8]
                    print(mth)
                    tab_tmp = pd.read_csv(file_pth,encoding='utf-8',header=0,dtype=col_type)
                    ver_tab = pd.concat([ver_tab, tab_tmp.loc[:,ver_cols]], axis=0, ignore_index=True)
                os.makedirs(os.sep.join(str(version_pth).split(os.sep)[:-1]), exist_ok=True)
                ver_tab.to_csv(version_pth, sep=',', index=False, header=True, encoding='utf-8')
                logger.info(f'data: {ver_tab.shape}')
                logger.info(f'new data save: {version_pth}')
            if file_tree[ctry]['export_paths']:
                version_pth = to_dir / ctry / 'EXPORT.csv'
                ver_cols = ['dataid', 'version']
                ver_tab = pd.DataFrame(columns=ver_cols)
                for file_pth in file_tree[ctry]['export_paths']:
                    logger.info(f'read data: {file_pth}')
                    mth = str(file_pth).split('/')[-1][0:6]
                    print(mth)
                    tab_tmp = pd.read_csv(file_pth,encoding='utf-8',header=0,dtype=col_type)
                    ver_tab = pd.concat([ver_tab, tab_tmp.loc[:,ver_cols]], axis=0, ignore_index=True)
                os.makedirs(os.sep.join(str(version_pth).split(os.sep)[:-1]), exist_ok=True)
                ver_tab.to_csv(version_pth, sep=',', index=False, header=True, encoding='utf-8')
                logger.info(f'data: {ver_tab.shape}')
                logger.info(f'new data save: {version_pth}') 
        # 印度
        if ctry=='印度':
            if file_tree[ctry]['import_paths']:
                version_pth = to_dir / ctry / 'IMPORT.csv'
                ver_cols = ['dataid', 'version']
                ver_tab = pd.DataFrame(columns=ver_cols)
                for file_pth in file_tree[ctry]['import_paths']:
                    logger.info(f'read data: {file_pth}')
                    mth = str(file_pth).split('/')[-1].split('_')[3][0:6]
                    print(mth)
                    tab_tmp = pd.read_csv(file_pth,encoding='utf-8',header=0,dtype=col_type)
                    ver_tab = pd.concat([ver_tab, tab_tmp.loc[:,ver_cols]], axis=0, ignore_index=True)
                os.makedirs(os.sep.join(str(version_pth).split(os.sep)[:-1]), exist_ok=True)
                ver_tab.to_csv(version_pth, sep=',', index=False, header=True, encoding='utf-8')
                logger.info(f'data: {ver_tab.shape}')
                logger.info(f'new data save: {version_pth}')
            if file_tree[ctry]['export_paths']:
                version_pth = to_dir / ctry / 'EXPORT.csv'
                ver_cols = ['dataid', 'version']
                ver_tab = pd.DataFrame(columns=ver_cols)
                for file_pth in file_tree[ctry]['export_paths']:
                    logger.info(f'read data: {file_pth}')
                    mth = str(file_pth).split('/')[-1].split('_')[3][0:6]
                    print(mth)
                    tab_tmp = pd.read_csv(file_pth,encoding='utf-8',header=0,dtype=col_type)
                    ver_tab = pd.concat([ver_tab, tab_tmp.loc[:,ver_cols]], axis=0, ignore_index=True)
                os.makedirs(os.sep.join(str(version_pth).split(os.sep)[:-1]), exist_ok=True)
                ver_tab.to_csv(version_pth, sep=',', index=False, header=True, encoding='utf-8')
                logger.info(f'data: {ver_tab.shape}')
                logger.info(f'new data save: {version_pth}') 
        # 俄罗斯陆运
        if ctry=='俄罗斯陆运':
            if file_tree[ctry]['import_paths']:
                version_pth = to_dir / ctry / 'IMPORT.csv'
                ver_cols = ['dataid', 'version']
                ver_tab = pd.DataFrame(columns=ver_cols)
                for file_pth in file_tree[ctry]['import_paths']:
                    logger.info(f'read data: {file_pth}')
                    mth1 = str(file_pth).split('/')[-1].split('_')[1]
                    mth2 = str(file_pth).split('/')[-1].split('_')[2]
                    mth = mth2+mth1
                    print(mth)
                    tab_tmp = pd.read_csv(file_pth,encoding='utf-8',header=0,dtype=col_type)
                    ver_tab = pd.concat([ver_tab, tab_tmp.loc[:,ver_cols]], axis=0, ignore_index=True)
                os.makedirs(os.sep.join(str(version_pth).split(os.sep)[:-1]), exist_ok=True)
                ver_tab.to_csv(version_pth, sep=',', index=False, header=True, encoding='utf-8')
                logger.info(f'data: {ver_tab.shape}')
                logger.info(f'new data save: {version_pth}')
            if file_tree[ctry]['export_paths']:
                version_pth = to_dir / ctry / 'EXPORT.csv'
                ver_cols = ['dataid', 'version']
                ver_tab = pd.DataFrame(columns=ver_cols)
                for file_pth in file_tree[ctry]['export_paths']:
                    logger.info(f'read data: {file_pth}')
                    mth1 = str(file_pth).split('/')[-1].split('_')[1]
                    mth2 = str(file_pth).split('/')[-1].split('_')[2]
                    mth = mth2+mth1
                    print(mth)
                    tab_tmp = pd.read_csv(file_pth,encoding='utf-8',header=0,dtype=col_type)
                    ver_tab = pd.concat([ver_tab, tab_tmp.loc[:,ver_cols]], axis=0, ignore_index=True)
                os.makedirs(os.sep.join(str(version_pth).split(os.sep)[:-1]), exist_ok=True)
                ver_tab.to_csv(version_pth, sep=',', index=False, header=True, encoding='utf-8')
                logger.info(f'data: {ver_tab.shape}')
                logger.info(f'new data save: {version_pth}')
    return


def get_data_month2(from_dir, to_dir, country_list, country36_cols_dict):
    """
    输出结果将每月数据合并到一起，输出为全表
    """
    country_list_all = ['阿根廷', '埃塞俄比亚', '巴基斯坦', '巴拉圭', '巴拿马', '博兹瓦纳', '俄罗斯陆运', '厄瓜多尔'
                        , '菲律宾', '哥伦比亚', '哥斯达黎加', '哈萨克斯坦', '加纳', '喀麦隆', '科特迪瓦', '肯尼亚'
                        , '莱索托', '马拉维', '美国', '孟加拉', '秘鲁', '秘鲁-海运', '秘鲁-空运', '墨西哥'
                        , '纳米比亚', '尼日利亚', '斯里兰卡', '坦桑尼亚', '乌干达', '乌克兰', '乌拉圭', '乌兹别克斯坦'
                        , '印度', '英国', '越南', '智利']
    # 开头月份 20
    country_list1 = ['秘鲁', '墨西哥', '孟加拉', '哥斯达黎加', '巴基斯坦', '埃塞俄比亚', '阿根廷'
                    , '乌拉圭', '厄瓜多尔', '巴拉圭', '纳米比亚', '坦桑尼亚', '斯里兰卡', '巴拿马'
                    ,'英国', '哈萨克斯坦', '乌兹别克斯坦', '哥伦比亚', '智利']
    # 结尾月份 10
    country_list2 = ['秘鲁-空运', '秘鲁-海运', '科特迪瓦', '莱索托', '乌干达', '乌克兰'
    , '加纳', '喀麦隆', '菲律宾', '马拉维', '博兹瓦纳']
    # 单独处理国家 6
    country_list3 = ['越南', '尼日利亚', '美国', '印度', '俄罗斯陆运', '肯尼亚']
    
    dir_paths, file_paths = get_dir_files(from_dir)
    file_tree = distinct_ie_data(from_dir, country_list_all)
    col_type = get_col_str_type(config.target_cols)
    
    for i in tqdm(range(len(country_list))):
        ctry = country_list[i]
        logger.info(ctry)
        # 开头月份
        if ctry in country_list1:
            if file_tree[ctry]['import_paths']:
                ver_cols = country36_cols_dict[ctry]['import_paths']
                for file_pth in file_tree[ctry]['import_paths']:
                    logger.info(f'read data: {file_pth}')
                    file_name = str(file_pth).split(os.sep)[-1]
                    version_pth = to_dir / ctry / 'import_paths' / file_name
                    mth = str(file_pth).split('/')[-1][0:6]
                    print(mth)
                    tab_tmp = pd.read_csv(file_pth,encoding='utf-8',header=0,dtype=col_type)
                    tab_tmp['version'] = mth
                    os.makedirs(os.sep.join(str(version_pth).split(os.sep)[:-1]), exist_ok=True)
                    tab_tmp.to_csv(version_pth, sep=',', index=False, header=True, encoding='utf-8')
                    logger.info(f'data: {tab_tmp.shape}')
                    logger.info(f'new data save: {version_pth}')
            if file_tree[ctry]['export_paths']:
                ver_cols = country36_cols_dict[ctry]['export_paths']
                for file_pth in file_tree[ctry]['export_paths']:
                    logger.info(f'read data: {file_pth}')
                    file_name = str(file_pth).split(os.sep)[-1]
                    version_pth = to_dir / ctry / 'export_paths' / file_name
                    mth = str(file_pth).split('/')[-1][0:6]
                    print(mth)
                    tab_tmp = pd.read_csv(file_pth,encoding='utf-8',header=0,dtype=col_type)
                    tab_tmp['version'] = mth
                    os.makedirs(os.sep.join(str(version_pth).split(os.sep)[:-1]), exist_ok=True)
                    tab_tmp.to_csv(version_pth, sep=',', index=False, header=True, encoding='utf-8')
                    logger.info(f'data: {tab_tmp.shape}')
                    logger.info(f'new data save: {version_pth}')
        # 结尾月份
        if ctry in country_list2:
            if file_tree[ctry]['import_paths']:
                ver_cols = country36_cols_dict[ctry]['import_paths']
                for file_pth in file_tree[ctry]['import_paths']:
                    logger.info(f'read data: {file_pth}')
                    file_name = str(file_pth).split(os.sep)[-1]
                    version_pth = to_dir / ctry / 'import_paths' / file_name
                    mth = str(file_pth).split('/')[-1].split('.')[0][-6:]
                    print(mth)
                    tab_tmp = pd.read_csv(file_pth,encoding='utf-8',header=0,dtype=col_type)
                    tab_tmp['version'] = mth
                    os.makedirs(os.sep.join(str(version_pth).split(os.sep)[:-1]), exist_ok=True)
                    tab_tmp.to_csv(version_pth, sep=',', index=False, header=True, encoding='utf-8')
                    logger.info(f'data: {tab_tmp.shape}')
                    logger.info(f'new data save: {version_pth}')
            if file_tree[ctry]['export_paths']:
                ver_cols = country36_cols_dict[ctry]['export_paths']
                for file_pth in file_tree[ctry]['export_paths']:
                    logger.info(f'read data: {file_pth}')
                    file_name = str(file_pth).split(os.sep)[-1]
                    version_pth = to_dir / ctry / 'export_paths' / file_name
                    mth = str(file_pth).split('/')[-1].split('.')[0][-6:]
                    print(mth)
                    tab_tmp = pd.read_csv(file_pth,encoding='utf-8',header=0,dtype=col_type)
                    tab_tmp['version'] = mth
                    os.makedirs(os.sep.join(str(version_pth).split(os.sep)[:-1]), exist_ok=True)
                    tab_tmp.to_csv(version_pth, sep=',', index=False, header=True, encoding='utf-8')
                    logger.info(f'data: {tab_tmp.shape}')
                    logger.info(f'new data save: {version_pth}')       
        # 越南
        if ctry in ['越南', '肯尼亚']:
            if file_tree[ctry]['import_paths']:
                ver_cols = country36_cols_dict[ctry]['import_paths']
                for file_pth in file_tree[ctry]['import_paths']:
                    logger.info(f'read data: {file_pth}')
                    file_name = str(file_pth).split(os.sep)[-1]
                    version_pth = to_dir / ctry / 'import_paths' / file_name
                    mth = str(file_pth).split('/')[-1].split('_')[2][0:6]
                    print(mth)
                    tab_tmp = pd.read_csv(file_pth,encoding='utf-8',header=0,dtype=col_type)
                    tab_tmp['version'] = mth
                    os.makedirs(os.sep.join(str(version_pth).split(os.sep)[:-1]), exist_ok=True)
                    tab_tmp.to_csv(version_pth, sep=',', index=False, header=True, encoding='utf-8')
                    logger.info(f'data: {tab_tmp.shape}')
                    logger.info(f'new data save: {version_pth}')
            if file_tree[ctry]['export_paths']:
                ver_cols = country36_cols_dict[ctry]['export_paths']
                for file_pth in file_tree[ctry]['export_paths']:
                    logger.info(f'read data: {file_pth}')
                    file_name = str(file_pth).split(os.sep)[-1]
                    version_pth = to_dir / ctry / 'export_paths' / file_name
                    mth = str(file_pth).split('/')[-1].split('_')[2][0:6]
                    print(mth)
                    tab_tmp = pd.read_csv(file_pth,encoding='utf-8',header=0,dtype=col_type)
                    tab_tmp['version'] = mth
                    os.makedirs(os.sep.join(str(version_pth).split(os.sep)[:-1]), exist_ok=True)
                    tab_tmp.to_csv(version_pth, sep=',', index=False, header=True, encoding='utf-8')
                    logger.info(f'data: {tab_tmp.shape}')
                    logger.info(f'new data save: {version_pth}') 
        # 尼日利亚
        if ctry=='尼日利亚':
            if file_tree[ctry]['import_paths']:
                ver_cols = country36_cols_dict[ctry]['import_paths']
                for file_pth in file_tree[ctry]['import_paths']:
                    logger.info(f'read data: {file_pth}')
                    file_name = str(file_pth).split(os.sep)[-1]
                    version_pth = to_dir / ctry / 'import_paths' / file_name
                    mth = str(file_pth).split('/')[-1][0:6]
                    print(mth)
                    tab_tmp = pd.read_csv(file_pth,encoding='utf-8',header=0,dtype=col_type)
                    tab_tmp['version'] = mth
                    os.makedirs(os.sep.join(str(version_pth).split(os.sep)[:-1]), exist_ok=True)
                    tab_tmp.to_csv(version_pth, sep=',', index=False, header=True, encoding='utf-8')
                    logger.info(f'data: {tab_tmp.shape}')
                    logger.info(f'new data save: {version_pth}')
            if file_tree[ctry]['export_paths']:
                ver_cols = country36_cols_dict[ctry]['export_paths']
                for file_pth in file_tree[ctry]['export_paths']:
                    logger.info(f'read data: {file_pth}')
                    file_name = str(file_pth).split(os.sep)[-1]
                    version_pth = to_dir / ctry / 'export_paths' / file_name
                    mth = str(file_pth).split('/')[-1].split('.')[0][-6:]
                    print(mth)
                    tab_tmp = pd.read_csv(file_pth,encoding='utf-8',header=0,dtype=col_type)
                    tab_tmp['version'] = mth
                    os.makedirs(os.sep.join(str(version_pth).split(os.sep)[:-1]), exist_ok=True)
                    tab_tmp.to_csv(version_pth, sep=',', index=False, header=True, encoding='utf-8')
                    logger.info(f'data: {tab_tmp.shape}')
                    logger.info(f'new data save: {version_pth}') 
        # 美国
        if ctry=='美国':
            if file_tree[ctry]['import_paths']:
                ver_cols = country36_cols_dict[ctry]['import_paths']
                for file_pth in file_tree[ctry]['import_paths']:
                    logger.info(f'read data: {file_pth}')
                    file_name = str(file_pth).split(os.sep)[-1]
                    version_pth = to_dir / ctry / 'import_paths' / file_name
                    mth = str(file_pth).split('/')[-1][2:8]
                    print(mth)
                    tab_tmp = pd.read_csv(file_pth,encoding='utf-8',header=0,dtype=col_type)
                    tab_tmp['version'] = mth
                    os.makedirs(os.sep.join(str(version_pth).split(os.sep)[:-1]), exist_ok=True)
                    tab_tmp.to_csv(version_pth, sep=',', index=False, header=True, encoding='utf-8')
                    logger.info(f'data: {tab_tmp.shape}')
                    logger.info(f'new data save: {version_pth}')
            if file_tree[ctry]['export_paths']:
                version_pth = to_dir / ctry / 'EXPORT.csv'
                ver_cols = country36_cols_dict[ctry]['export_paths']
                for file_pth in file_tree[ctry]['export_paths']:
                    logger.info(f'read data: {file_pth}')
                    file_name = str(file_pth).split(os.sep)[-1]
                    version_pth = to_dir / ctry / 'export_paths' / file_name
                    mth = str(file_pth).split('/')[-1][0:6]
                    print(mth)
                    tab_tmp = pd.read_csv(file_pth,encoding='utf-8',header=0,dtype=col_type)
                    tab_tmp['version'] = mth
                    os.makedirs(os.sep.join(str(version_pth).split(os.sep)[:-1]), exist_ok=True)
                    tab_tmp.to_csv(version_pth, sep=',', index=False, header=True, encoding='utf-8')
                    logger.info(f'data: {tab_tmp.shape}')
                    logger.info(f'new data save: {version_pth}') 
        # 印度
        if ctry=='印度':
            if file_tree[ctry]['import_paths']:
                ver_cols = country36_cols_dict[ctry]['import_paths']
                for file_pth in file_tree[ctry]['import_paths']:
                    logger.info(f'read data: {file_pth}')
                    file_name = str(file_pth).split(os.sep)[-1]
                    version_pth = to_dir / ctry / 'import_paths' / file_name
                    mth = str(file_pth).split('/')[-1].split('_')[2][0:6]
                    print(mth)
                    tab_tmp = pd.read_csv(file_pth,encoding='utf-8',header=0,dtype=col_type)
                    tab_tmp['version'] = mth
                    os.makedirs(os.sep.join(str(version_pth).split(os.sep)[:-1]), exist_ok=True)
                    tab_tmp.to_csv(version_pth, sep=',', index=False, header=True, encoding='utf-8')
                    logger.info(f'data: {tab_tmp.shape}')
                    logger.info(f'new data save: {version_pth}')
            if file_tree[ctry]['export_paths']:
                ver_cols = country36_cols_dict[ctry]['export_paths']
                for file_pth in file_tree[ctry]['export_paths']:
                    logger.info(f'read data: {file_pth}')
                    file_name = str(file_pth).split(os.sep)[-1]
                    version_pth = to_dir / ctry / 'export_paths' / file_name
                    mth = str(file_pth).split('/')[-1].split('_')[2][0:6]
                    print(mth)
                    tab_tmp = pd.read_csv(file_pth,encoding='utf-8',header=0,dtype=col_type)
                    tab_tmp['version'] = mth
                    os.makedirs(os.sep.join(str(version_pth).split(os.sep)[:-1]), exist_ok=True)
                    tab_tmp.to_csv(version_pth, sep=',', index=False, header=True, encoding='utf-8')
                    logger.info(f'data: {tab_tmp.shape}')
                    logger.info(f'new data save: {version_pth}') 
        # 俄罗斯陆运
        if ctry=='俄罗斯陆运':
            if file_tree[ctry]['import_paths']:
                ver_cols = country36_cols_dict[ctry]['import_paths']
                for file_pth in file_tree[ctry]['import_paths']:
                    logger.info(f'read data: {file_pth}')
                    file_name = str(file_pth).split(os.sep)[-1]
                    version_pth = to_dir / ctry / 'import_paths' / file_name
                    mth1 = str(file_pth).split('/')[-1].split('_')[1]
                    mth2 = str(file_pth).split('/')[-1].split('_')[2]
                    mth = mth2+mth1
                    print(mth)
                    tab_tmp = pd.read_csv(file_pth,encoding='utf-8',header=0,dtype=col_type)
                    tab_tmp['version'] = mth
                    os.makedirs(os.sep.join(str(version_pth).split(os.sep)[:-1]), exist_ok=True)
                    tab_tmp.to_csv(version_pth, sep=',', index=False, header=True, encoding='utf-8')
                    logger.info(f'data: {tab_tmp.shape}')
                    logger.info(f'new data save: {version_pth}')
            if file_tree[ctry]['export_paths']:
                ver_cols = country36_cols_dict[ctry]['export_paths']
                for file_pth in file_tree[ctry]['export_paths']:
                    logger.info(f'read data: {file_pth}')
                    file_name = str(file_pth).split(os.sep)[-1]
                    version_pth = to_dir / ctry / 'export_paths' / file_name
                    mth1 = str(file_pth).split('/')[-1].split('_')[1]
                    mth2 = str(file_pth).split('/')[-1].split('_')[2]
                    mth = mth2+mth1
                    print(mth)
                    tab_tmp = pd.read_csv(file_pth,encoding='utf-8',header=0,dtype=col_type)
                    tab_tmp['version'] = mth
                    os.makedirs(os.sep.join(str(version_pth).split(os.sep)[:-1]), exist_ok=True)
                    tab_tmp.to_csv(version_pth, sep=',', index=False, header=True, encoding='utf-8')
                    logger.info(f'data: {tab_tmp.shape}')
                    logger.info(f'new data save: {version_pth}')
    return


def comp_stat(tab_stat, file_path):
    stat_result = dict()
    stat_result['数据路径'] = str(file_path)
    stat_result['数据量'] = tab_stat.shape[0]
    stat_result['启运港'] = len([x for x in tab_stat['portofloading'].values if not is_nan(x)])
    stat_result['目的港'] = len([x for x in tab_stat['portofdestination'].values if not is_nan(x)])
    stat_result['运输方式'] = len([x for x in tab_stat['transportterm'].values if not is_nan(x)])
    stat_result['成交方式'] = len([x for x in tab_stat['tradeterm'].values if not is_nan(x)])
    stat_result['付款方式'] = len([x for x in tab_stat['paymentterm'].values if not is_nan(x)])
    stat_result['承运人名称'] = len([x for x in tab_stat['carrier'].values if not is_nan(x)])
    stat_result['集装箱箱号'] = len([x for x in tab_stat['containerno'].values if not is_nan(x)])
    stat_result['船名'] = len([x for x in tab_stat['vesselname'].values if not is_nan(x)])
    stat_result['品牌'] = len([x for x in tab_stat['brand'].values if not is_nan(x)])
    stat_result['cif总价'] = round(tab_stat['totalcifvalue'].astype(float).sum(),2)
    stat_result['fob总价'] = round(tab_stat['totalfobvalue'].astype(float).sum(),2)
    stat_result['毛重'] = round(tab_stat['grossweight'].astype(float).sum(),2)
    stat_result['重量'] = round(tab_stat['netweight'].astype(float).sum(),2)
    stat_result['数量'] = round(tab_stat['quantity'].astype(float).sum(),2)
    return stat_result


def stat_result(file_paths):
    # file_paths
    
    start = time.time()
    N_files = 5
    M = len(file_paths)//N_files
    # M
    stat_lst = []
    ctry_mapping = []
    for i in tqdm(range(M)):
        file_list = file_paths[(i*N_files) : ((i+1)*N_files)]
        tab_lst, country_mapping_dict = data_normalization_5files(file_list)
        ctry_mapping.append(country_mapping_dict)
        for j in range(N_files):
            f_path = file_list[j]
            if '俄罗斯陆运' in str(f_path):
                f_tab = tab_lst[j].loc[tab_lst[j][iesign]=='I',:]
                stat_dict = comp_stat(f_tab, f_path)
                stat_dict.update({"进出口标志":'I'})
                stat_lst.append(stat_dict)
                f_tab = tab_lst[j].loc[tab_lst[j][iesign]=='E',:]
                stat_dict = comp_stat(f_tab, f_path)
                stat_dict.update({"进出口标志":'E'})
                stat_lst.append(stat_dict)
            else:
                f_tab = tab_lst[j]
                stat_dict = comp_stat(f_tab, f_path)
                stat_lst.append(stat_dict)
    
    if file_paths[M*N_files:len(file_paths)]:
        for f_path in tqdm(file_paths[M*N_files:len(file_paths)]):
            tab_stat, country_mapping_dict = data_normalization(f_path)
            if '俄罗斯陆运' in str(f_path):
                # stat_dict = comp_stat(tab_stat, f_path)
                f_tab = tab_stat.loc[tab_stat[iesign]=='I',:]
                stat_dict = comp_stat(f_tab, f_path)
                stat_dict.update({"进出口标志":'I'})
                stat_lst.append(stat_dict)
                
                f_tab = tab_stat.loc[tab_stat[iesign]=='E',:]
                stat_dict = comp_stat(f_tab, f_path)
                stat_dict.update({"进出口标志":'E'})
                stat_lst.append(stat_dict)
            else:
                f_tab = tab_stat
                stat_dict = comp_stat(f_tab, f_path)
                stat_lst.append(stat_dict)
    
    end = time.time() 
    logger.info(f'All Normal process and statiscs steps takes {round((end-start)/60,2)} minutes')
    return stat_lst, ctry_mapping


def get_hs_mapping(from_dir, to_dir, country_list):
    country_list_all = ['阿根廷', '埃塞俄比亚', '巴基斯坦', '巴拉圭', '巴拿马', '博兹瓦纳', '俄罗斯陆运', '厄瓜多尔'
                        , '菲律宾', '哥伦比亚', '哥斯达黎加', '哈萨克斯坦', '加纳', '喀麦隆', '科特迪瓦', '肯尼亚'
                        , '莱索托', '马拉维', '美国', '孟加拉', '秘鲁', '秘鲁-海运', '秘鲁-空运', '墨西哥'
                        , '纳米比亚', '尼日利亚', '斯里兰卡', '坦桑尼亚', '乌干达', '乌克兰', '乌拉圭', '乌兹别克斯坦'
                        , '印度', '英国', '越南', '智利']
    
    dir_paths, file_paths = get_dir_files(from_dir)
    file_tree = distinct_ie_data(from_dir, country_list_all)
    col_type = get_col_str_type(config.target_cols)
    
    for i in range(len(country_list)):
        ctry = country_list[i]
        logger.info(ctry)
        hs_pth = to_dir / f'{ctry}_hs_mapping.csv'
        hs_cols = ['hscode', 'hscodedescription']
        hs_unique = pd.DataFrame(columns=hs_cols)
        if ctry in country_list_all:
            if file_tree[ctry]['import_paths']:
                for file_pth in file_tree[ctry]['import_paths']:
                    logger.info(f'data from: {file_pth}')
                    tab_tmp = pd.read_csv(file_pth,encoding='utf-8',header=0,dtype=col_type)
                    tab_tmp = tab_tmp.loc[:, hs_cols]
                    hs_tmp = tab_tmp.drop_duplicates(subset=hs_cols)
                    hs_tmp.loc[hs_tmp['hscode'].str.strip().str.len()>=6,:]
                    logger.info(f'table shape: {tab_tmp.shape}')
                    logger.info(f'hs unique shape: {hs_tmp.shape}')
                    hs_unique = pd.concat([hs_unique, hs_tmp], axis=0, ignore_index=True)
            if file_tree[ctry]['export_paths']:
                for file_pth in file_tree[ctry]['export_paths']:
                    logger.info(f'data from: {file_pth}')
                    tab_tmp = pd.read_csv(file_pth,encoding='utf-8',header=0,dtype=col_type)
                    tab_tmp = tab_tmp.loc[:, hs_cols]
                    hs_tmp = tab_tmp.drop_duplicates(subset=hs_cols)
                    hs_tmp.loc[hs_tmp['hscode'].str.strip().str.len()>=6,:]
                    logger.info(f'table shape: {tab_tmp.shape}')
                    logger.info(f'hs unique shape: {hs_tmp.shape}')
                    hs_unique = pd.concat([hs_unique, hs_tmp], axis=0, ignore_index=True)
        hs_unique.drop_duplicates(subset=hs_cols, inplace=True)
        hs_unique['country'] = ctry
        os.makedirs(os.sep.join(str(hs_pth).split(os.sep)[:-1]), exist_ok=True)
        hs_unique.loc[:,['country']+hs_cols].to_csv(hs_pth, sep=',', index=False, header=True, encoding='utf-8')
        logger.info(f'hs unique shape: {hs_unique.shape}')
        logger.info(f'new data save: {hs_pth}')
    return


def extract_float_before_second_dot(s):
    # 检查字符串是否包含至少两个点
    s=str(s)
    if s.count('.') < 2:
        return float(s) if re.match(r'^-?\d+(\.\d+)?$', s) else np.nan
    
    # 找到第二个点的位置
    first_dot = s.find('.')
    second_dot = s.find('.', first_dot + 1)
    
    # 提取第二个点之前的部分
    part = s[:second_dot]
    
    # 检查是否是有效数字
    if re.match(r'^-?\d+(\.\d+)?$', s):
        return float(part)
    else:
        return 0.0


def get_stat_df(tab_df, stat_cols):
    # 数据量
    tmp1 = tab_df.groupby(by=['iesign'], as_index=True).agg({'iesign':'count'})
    tmp1.rename(columns={'iesign':'数据量'}, inplace=True)
    # # 启运港
    # tab_df['portofloading_tag'] = [1 if not is_nan(x) else 0 for x in tab_df['portofloading'].values ]
    # tmp2 = tab_df.groupby(by=['iesign'], as_index=True).agg({'portofloading_tag':'sum'})
    # tmp2.rename(columns={'portofloading_tag':'启运港'}, inplace=True)
    # # 目的港
    # tab_df['portofdestination_tag'] = [1 if not is_nan(x) else 0 for x in tab_df['portofdestination'].values ]
    # tmp3 = tab_df.groupby(by=['iesign'], as_index=True).agg({'portofdestination_tag':'sum'})
    # tmp3.rename(columns={'portofdestination_tag':'目的港'}, inplace=True)
    # # 运输方式
    # tab_df['transportterm_tag'] = [1 if not is_nan(x) else 0 for x in tab_df['transportterm'].values ]
    # tmp4 = tab_df.groupby(by=['iesign'], as_index=True).agg({'transportterm_tag':'sum'})
    # tmp4.rename(columns={'transportterm_tag':'运输方式'}, inplace=True)
    # # 成交方式
    # tab_df['tradeterm_tag'] = [1 if not is_nan(x) else 0 for x in tab_df['tradeterm'].values ]
    # tmp5 = tab_df.groupby(by=['iesign'], as_index=True).agg({'tradeterm_tag':'sum'})
    # tmp5.rename(columns={'tradeterm_tag':'成交方式'}, inplace=True)
    # # 付款方式
    # tab_df['paymentterm_tag'] = [1 if not is_nan(x) else 0 for x in tab_df['paymentterm'].values ]
    # tmp6 = tab_df.groupby(by=['iesign'], as_index=True).agg({'paymentterm_tag':'sum'})
    # tmp6.rename(columns={'paymentterm_tag':'付款方式'}, inplace=True)
    # # 承运人名称
    # tab_df['carrier_tag'] = [1 if not is_nan(x) else 0 for x in tab_df['carrier'].values ]
    # tmp7 = tab_df.groupby(by=['iesign'], as_index=True).agg({'carrier_tag':'sum'})
    # tmp7.rename(columns={'carrier_tag':'承运人名称'}, inplace=True)
    # # 集装箱箱号
    # tab_df['containerno_tag'] = [1 if not is_nan(x) else 0 for x in tab_df['containerno'].values ]
    # tmp8 = tab_df.groupby(by=['iesign'], as_index=True).agg({'containerno_tag':'sum'})
    # tmp8.rename(columns={'containerno_tag':'集装箱箱号'}, inplace=True)
    # # 船名
    # tab_df['vesselname_tag'] = [1 if not is_nan(x) else 0 for x in tab_df['vesselname'].values ]
    # tmp9 = tab_df.groupby(by=['iesign'], as_index=True).agg({'vesselname_tag':'sum'})
    # tmp9.rename(columns={'vesselname_tag':'船名'}, inplace=True)
    # # 品牌
    # tab_df['brand_tag'] = [1 if not is_nan(x) else 0 for x in tab_df['brand'].values ]
    # tmp10 = tab_df.groupby(by=['iesign'], as_index=True).agg({'brand_tag':'sum'})
    # tmp10.rename(columns={'brand_tag':'品牌'}, inplace=True)
    # # 原产国国际编码
    # tab_df['origincountrycode_tag'] = [1 if not is_nan(x) else 0 for x in tab_df['origincountrycode'].values ]
    # tmp101 = tab_df.groupby(by=['iesign'], as_index=True).agg({'origincountrycode_tag':'sum'})
    # tmp101.rename(columns={'origincountrycode_tag':'原产国国际编码'}, inplace=True)
    # # 原产国英文名称
    # tab_df['origincountry_tag'] = [1 if not is_nan(x) else 0 for x in tab_df['origincountry'].values ]
    # tmp102 = tab_df.groupby(by=['iesign'], as_index=True).agg({'origincountry_tag':'sum'})
    # tmp102.rename(columns={'origincountry_tag':'原产国英文名称'}, inplace=True)
    # # 目的国国际编码
    # tab_df['countrycodeofdelivery_tag'] = [1 if not is_nan(x) else 0 for x in tab_df['countrycodeofdelivery'].values ]
    # tmp103 = tab_df.groupby(by=['iesign'], as_index=True).agg({'countrycodeofdelivery_tag':'sum'})
    # tmp103.rename(columns={'countrycodeofdelivery_tag':'目的国国际编码'}, inplace=True)
    # # 目的国英文名称
    # tab_df['countryofdelivery_tag'] = [1 if not is_nan(x) else 0 for x in tab_df['countryofdelivery'].values ]
    # tmp104 = tab_df.groupby(by=['iesign'], as_index=True).agg({'countryofdelivery_tag':'sum'})
    # tmp104.rename(columns={'countryofdelivery_tag':'目的国英文名称'}, inplace=True)
    # # 启运国国际编码
    # tab_df['loadingcountrycode_tag'] = [1 if not is_nan(x) else 0 for x in tab_df['loadingcountrycode'].values ]
    # tmp105 = tab_df.groupby(by=['iesign'], as_index=True).agg({'loadingcountrycode_tag':'sum'})
    # tmp105.rename(columns={'loadingcountrycode_tag':'启运国国际编码'}, inplace=True)
    # # 启运国英文名称
    # tab_df['loadingcountry_tag'] = [1 if not is_nan(x) else 0 for x in tab_df['loadingcountry'].values ]
    # tmp106 = tab_df.groupby(by=['iesign'], as_index=True).agg({'loadingcountry_tag':'sum'})
    # tmp106.rename(columns={'loadingcountry_tag':'启运国英文名称'}, inplace=True)
    # # 国家
    # tab_df['country_tag'] = [1 if not is_nan(x) else 0 for x in tab_df['country'].values ]
    # tmp107 = tab_df.groupby(by=['iesign'], as_index=True).agg({'country_tag':'sum'})
    # tmp107.rename(columns={'country_tag':'国家'}, inplace=True)
    # cif总价
    tab_df['totalcifvalue'] = [extract_float_before_second_dot(x) for x in tab_df['totalcifvalue'].values ]
    tmp11 = tab_df.groupby(by=['iesign'], as_index=True).agg({'totalcifvalue':'sum'})
    tmp11.rename(columns={'totalcifvalue':'cif总价'}, inplace=True)
    # fob总价
    tab_df['totalfobvalue'] = [extract_float_before_second_dot(x) for x in tab_df['totalfobvalue'].values ]
    tmp12 = tab_df.groupby(by=['iesign'], as_index=True).agg({'totalfobvalue':'sum'})
    tmp12.rename(columns={'totalfobvalue':'fob总价'}, inplace=True)
    # 毛重
    tab_df['grossweight'] = [extract_float_before_second_dot(x) for x in tab_df['grossweight'].values ]
    tmp13 = tab_df.groupby(by=['iesign'], as_index=True).agg({'grossweight':'sum'})
    tmp13.rename(columns={'grossweight':'毛重'}, inplace=True)
    # 重量
    tab_df['netweight'] = [extract_float_before_second_dot(x) for x in tab_df['netweight'].values ]
    tmp14 = tab_df.groupby(by=['iesign'], as_index=True).agg({'netweight':'sum'})
    tmp14.rename(columns={'netweight':'重量'}, inplace=True)
    # 数量
    tab_df['quantity'] = [extract_float_before_second_dot(x) for x in tab_df['quantity'].values ]
    tmp15 = tab_df.groupby(by=['iesign'], as_index=True).agg({'quantity':'sum'})
    tmp15.rename(columns={'quantity':'数量'}, inplace=True)
    
    # tmp = pd.concat([tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp8, tmp9, tmp10
    #                  , tmp101, tmp102, tmp103, tmp104, tmp105, tmp106
    #                  , tmp11, tmp12, tmp13, tmp14, tmp15], axis=1)
    tmp = pd.concat([tmp1, tmp11, tmp12, tmp13, tmp14, tmp15], axis=1)
    
    tmp.reset_index(level=0, inplace=True) 
    tmp.rename(columns={'iesign':'进出口'}, inplace=True)
    return tmp.loc[:, stat_cols], tab_df.loc[:, config.target_cols[0:-2]]


def get_filling_rate_df(tab_df, stat_cols):
    # 数据量
    tmp_01 = tab_df.groupby(by=['iesign'], as_index=True).agg({'iesign':'count'})
    tmp_01.rename(columns={'iesign':'数据量'}, inplace=True)

    # 目的国国际编码
    tab_df['countrycodeofdelivery_tag'] = [1 if not is_nan(x) else 0 for x in tab_df['countrycodeofdelivery'].values ]
    tmp_02 = tab_df.groupby(by=['iesign'], as_index=True).agg({'countrycodeofdelivery_tag':'sum'})
    tmp_02.rename(columns={'countrycodeofdelivery_tag':'目的国国际编码'}, inplace=True)

    # 原产国国际编码
    tab_df['origincountrycode_tag'] = [1 if not is_nan(x) else 0 for x in tab_df['origincountrycode'].values ]
    tmp_03 = tab_df.groupby(by=['iesign'], as_index=True).agg({'origincountrycode_tag':'sum'})
    tmp_03.rename(columns={'origincountrycode_tag':'原产国国际编码'}, inplace=True)

    # 启运国国际编码
    tab_df['loadingcountrycode_tag'] = [1 if not is_nan(x) else 0 for x in tab_df['loadingcountrycode'].values ]
    tmp_04 = tab_df.groupby(by=['iesign'], as_index=True).agg({'loadingcountrycode_tag':'sum'})
    tmp_04.rename(columns={'loadingcountrycode_tag':'启运国国际编码'}, inplace=True)

    # hs编码
    tab_df['hscode_tag'] = [1 if not is_nan(x) else 0 for x in tab_df['hscode'].values ]
    tmp_05 = tab_df.groupby(by=['iesign'], as_index=True).agg({'hscode_tag':'sum'})
    tmp_05.rename(columns={'hscode_tag':'hs编码'}, inplace=True)

    # hs编码描述
    tab_df['hscodedescription_tag'] = [1 if not is_nan(x) else 0 for x in tab_df['hscodedescription'].values ]
    tmp_06 = tab_df.groupby(by=['iesign'], as_index=True).agg({'hscodedescription_tag':'sum'})
    tmp_06.rename(columns={'hscodedescription_tag':'hs编码描述'}, inplace=True)

    # 产品描述
    tab_df['commoditydescription_tag'] = [1 if not is_nan(x) else 0 for x in tab_df['commoditydescription'].values ]
    tmp_07 = tab_df.groupby(by=['iesign'], as_index=True).agg({'commoditydescription_tag':'sum'})
    tmp_07.rename(columns={'commoditydescription_tag':'产品描述'}, inplace=True)

    # cif总价
    tab_df['totalcifvalue_tag'] = [1 if not is_nan(x) else 0 for x in tab_df['totalcifvalue'].values ]
    tmp_08 = tab_df.groupby(by=['iesign'], as_index=True).agg({'totalcifvalue_tag':'sum'})
    tmp_08.rename(columns={'totalcifvalue_tag':'cif总价'}, inplace=True)

    # fob总价
    tab_df['totalfobvalue_tag'] = [1 if not is_nan(x) else 0 for x in tab_df['totalfobvalue'].values ]
    tmp_09 = tab_df.groupby(by=['iesign'], as_index=True).agg({'totalfobvalue_tag':'sum'})
    tmp_09.rename(columns={'totalfobvalue_tag':'fob总价'}, inplace=True)

    # 毛重
    tab_df['grossweight_tag'] = [1 if not is_nan(x) else 0 for x in tab_df['grossweight'].values ]
    tmp_10 = tab_df.groupby(by=['iesign'], as_index=True).agg({'grossweight_tag':'sum'})
    tmp_10.rename(columns={'grossweight_tag':'毛重'}, inplace=True)

    # 重量
    tab_df['netweight_tag'] = [1 if not is_nan(x) else 0 for x in tab_df['netweight'].values ]
    tmp_11 = tab_df.groupby(by=['iesign'], as_index=True).agg({'netweight_tag':'sum'})
    tmp_11.rename(columns={'netweight_tag':'重量'}, inplace=True)

    # TEU
    tab_df['teu_tag'] = [1 if not is_nan(x) else 0 for x in tab_df['teu'].values ]
    tmp_12 = tab_df.groupby(by=['iesign'], as_index=True).agg({'teu_tag':'sum'})
    tmp_12.rename(columns={'teu_tag':'TEU'}, inplace=True)

    # 数量
    tab_df['quantity_tag'] = [1 if not is_nan(x) else 0 for x in tab_df['quantity'].values ]
    tmp_13 = tab_df.groupby(by=['iesign'], as_index=True).agg({'quantity_tag':'sum'})
    tmp_13.rename(columns={'quantity_tag':'数量'}, inplace=True)
    
    # 数量单位
    tab_df['quantityunit_tag'] = [1 if not is_nan(x) else 0 for x in tab_df['quantityunit'].values ]
    tmp_14 = tab_df.groupby(by=['iesign'], as_index=True).agg({'quantityunit_tag':'sum'})
    tmp_14.rename(columns={'quantityunit_tag':'数量单位'}, inplace=True)
    
    # 采购商名称
    tab_df['importername_tag'] = [1 if not is_nan(x) else 0 for x in tab_df['importername'].values ]
    tmp_15 = tab_df.groupby(by=['iesign'], as_index=True).agg({'importername_tag':'sum'})
    tmp_15.rename(columns={'importername_tag':'采购商名称'}, inplace=True)
    
    # 采购商地址
    tab_df['importeraddress_tag'] = [1 if not is_nan(x) else 0 for x in tab_df['importeraddress'].values ]
    tmp_16 = tab_df.groupby(by=['iesign'], as_index=True).agg({'importeraddress_tag':'sum'})
    tmp_16.rename(columns={'importeraddress_tag':'采购商地址'}, inplace=True)

    # 采购商联系方式
    tab_df['importercontact_tag'] = [1 if not is_nan(x) else 0 for x in tab_df['importercontact'].values ]
    tmp_17 = tab_df.groupby(by=['iesign'], as_index=True).agg({'importercontact_tag':'sum'})
    tmp_17.rename(columns={'importercontact_tag':'采购商联系方式'}, inplace=True)

    # 采购商货代公司标签
    # tab_df['importer_forwarderagent_tag'] = [1 if not is_nan(x) else 0 for x in tab_df['importer_forwarderagent'].values ]
    tab_df['importer_forwarderagent_tag'] = [1 if (x==1) or (x=='1') else 0 for x in tab_df['importer_forwarderagent'].values ]
    tmp_18 = tab_df.groupby(by=['iesign'], as_index=True).agg({'importer_forwarderagent_tag':'sum'})
    tmp_18.rename(columns={'importer_forwarderagent_tag':'采购商货代公司标签'}, inplace=True)

    # 供应商名称
    tab_df['suppliername_tag'] = [1 if not is_nan(x) else 0 for x in tab_df['suppliername'].values ]
    tmp_19 = tab_df.groupby(by=['iesign'], as_index=True).agg({'suppliername_tag':'sum'})
    tmp_19.rename(columns={'suppliername_tag':'供应商名称'}, inplace=True)

    # 供应商地址
    tab_df['supplieraddress_tag'] = [1 if not is_nan(x) else 0 for x in tab_df['supplieraddress'].values ]
    tmp_20 = tab_df.groupby(by=['iesign'], as_index=True).agg({'supplieraddress_tag':'sum'})
    tmp_20.rename(columns={'supplieraddress_tag':'供应商地址'}, inplace=True)

    # 供应商联系方式
    tab_df['suppliercontact_tag'] = [1 if not is_nan(x) else 0 for x in tab_df['suppliercontact'].values ]
    tmp_21 = tab_df.groupby(by=['iesign'], as_index=True).agg({'suppliercontact_tag':'sum'})
    tmp_21.rename(columns={'suppliercontact_tag':'供应商联系方式'}, inplace=True)

    # 供应商货代公司标签
    # tab_df['supplier_forwarderagent_tag'] = [1 if not is_nan(x) else 0 for x in tab_df['supplier_forwarderagent'].values ]
    tab_df['supplier_forwarderagent_tag'] = [1 if (x==1) or (x=='1') else 0 for x in tab_df['supplier_forwarderagent'].values ]
    tmp_22 = tab_df.groupby(by=['iesign'], as_index=True).agg({'supplier_forwarderagent_tag':'sum'})
    tmp_22.rename(columns={'supplier_forwarderagent_tag':'供应商货代公司标签'}, inplace=True)

    # 运输方式
    tab_df['transportterm_tag'] = [1 if not is_nan(x) else 0 for x in tab_df['transportterm'].values ]
    tmp_23 = tab_df.groupby(by=['iesign'], as_index=True).agg({'transportterm_tag':'sum'})
    tmp_23.rename(columns={'transportterm_tag':'运输方式'}, inplace=True)

    # 成交方式
    tab_df['tradeterm_tag'] = [1 if not is_nan(x) else 0 for x in tab_df['tradeterm'].values ]
    tmp_24 = tab_df.groupby(by=['iesign'], as_index=True).agg({'tradeterm_tag':'sum'})
    tmp_24.rename(columns={'tradeterm_tag':'成交方式'}, inplace=True)

    # 付款方式
    tab_df['paymentterm_tag'] = [1 if not is_nan(x) else 0 for x in tab_df['paymentterm'].values ]
    tmp_25 = tab_df.groupby(by=['iesign'], as_index=True).agg({'paymentterm_tag':'sum'})
    tmp_25.rename(columns={'paymentterm_tag':'付款方式'}, inplace=True)

    # 启运港
    tab_df['portofloading_tag'] = [1 if not is_nan(x) else 0 for x in tab_df['portofloading'].values ]
    tmp_26 = tab_df.groupby(by=['iesign'], as_index=True).agg({'portofloading_tag':'sum'})
    tmp_26.rename(columns={'portofloading_tag':'启运港'}, inplace=True)

    # 目的港
    tab_df['portofdestination_tag'] = [1 if not is_nan(x) else 0 for x in tab_df['portofdestination'].values ]
    tmp_27 = tab_df.groupby(by=['iesign'], as_index=True).agg({'portofdestination_tag':'sum'})
    tmp_27.rename(columns={'portofdestination_tag':'目的港'}, inplace=True)

    # 集装箱箱号
    tab_df['containerno_tag'] = [1 if not is_nan(x) else 0 for x in tab_df['containerno'].values ]
    tmp_28 = tab_df.groupby(by=['iesign'], as_index=True).agg({'containerno_tag':'sum'})
    tmp_28.rename(columns={'containerno_tag':'集装箱箱号'}, inplace=True)
    
    tmp = pd.concat([tmp_01, tmp_02, tmp_03, tmp_04, tmp_05, tmp_06, tmp_07, tmp_08, tmp_09, tmp_10
                     , tmp_11, tmp_12, tmp_13, tmp_14, tmp_15, tmp_16, tmp_17, tmp_18, tmp_19, tmp_20
                     , tmp_21, tmp_22, tmp_23, tmp_24, tmp_25, tmp_26, tmp_27, tmp_28], axis=1)
    
    tmp.reset_index(level=0, inplace=True) 
    tmp.rename(columns={'iesign':'进出口'}, inplace=True)
    return tmp.loc[:, stat_cols]


def compute_stat(country_list, csv_files_path, pth_change_tag='20251t3_csv_correct'):
    # correct_pth = "20251t3_csv_correct"
    # csv_files_path = Path('dataset/data_20250724/20251t3_csv_save')
    # country_list = ['尼日利亚', '斯里兰卡', '乌拉圭']
    
    stat_cols = ['进出口', '数据量', '启运港', '目的港', '运输方式', '成交方式', '付款方式', '承运人名称',
           '集装箱箱号', '船名', '品牌', 'cif总价', 'fob总价', '毛重', '重量', '数量', '国家', '文件路径']
    stat_rslt = pd.DataFrame(columns=stat_cols)
    for ctry in tqdm(country_list):
        tmp_path = csv_files_path / ctry
        _, file_paths = get_dir_files(tmp_path)
        col_type = get_col_str_type(config.target_cols)
        tab_df = pd.DataFrame(columns=config.target_cols)
        for i in tqdm(range(len(file_paths))):
            file_path = file_paths[i]
            correct_save_path = str(file_path).replace('20251t3_csv_save', pth_change_tag)
            os.makedirs(os.sep.join(correct_save_path.split(os.sep)[:-1]), exist_ok=True)
            correct_save_path = Path(correct_save_path)
            logger.info(file_path)
            tab_tmp = pd.read_csv(file_path,encoding='utf-8',header=0,dtype=col_type)
            # tab_df = pd.concat([tab_df, tab_tmp], axis=0, ignore_index=True)
            stat_tmp, crct_tab = get_stat_df(tab_tmp)
            stat_tmp['国家'] = ctry
            stat_tmp['文件路径'] = file_path
            stat_rslt = pd.concat([stat_rslt, stat_tmp], axis=0, ignore_index=True)
            crct_tab.to_csv(correct_save_path, sep=',', index=False, header=True, encoding='utf-8')
    return stat_rslt

def compute_stat2(country_list, correct_csv_path):
    # correct_csv_path = Path('dataset/data_20250724/20251t3_csv_correct')
    # country_list = ['尼日利亚', '斯里兰卡', '乌拉圭']
    # stat_cols = ['进出口', '数据量', '启运港', '目的港', '运输方式', '成交方式', '付款方式'
    #              , '承运人名称', '集装箱箱号', '船名', '品牌'
    #              , '原产国国际编码', '原产国英文名称', '目的国国际编码', '目的国英文名称', '启运国国际编码', '启运国英文名称'
    #              , 'cif总价', 'fob总价', '毛重', '重量', '数量', '国家', '文件路径']
    stat_cols = ['进出口', '数据量', '启运港', '目的港', '运输方式', '成交方式', '付款方式'
                 , '承运人名称', '集装箱箱号', '船名', '品牌'
                 , 'cif总价', 'fob总价', '毛重', '重量', '数量', '国家', '文件路径']
    stat_rslt = pd.DataFrame(columns=stat_cols)
    for ctry in tqdm(country_list):
        tmp_path = correct_csv_path / ctry
        _, file_paths = get_dir_files(tmp_path)
        col_type = get_col_str_type(config.target_cols[0:-2])
        tab_df = pd.DataFrame(columns=config.target_cols[0:-2])
        for i in tqdm(range(len(file_paths))):
            file_path = file_paths[i]
            # correct_save_path = str(file_path).replace('20251t3_csv_save', pth_change_tag)
            # os.makedirs(os.sep.join(correct_save_path.split(os.sep)[:-1]), exist_ok=True)
            # correct_save_path = Path(correct_save_path)
            logger.info(file_path)
            tab_tmp = pd.read_csv(file_path,encoding='utf-8',header=0,dtype=col_type)
            # tab_df = pd.concat([tab_df, tab_tmp], axis=0, ignore_index=True)
            stat_tmp, crct_tab = get_stat_df(tab_tmp, stat_cols[0:-2])
            stat_tmp['国家'] = ctry
            stat_tmp['文件路径'] = file_path
            stat_rslt = pd.concat([stat_rslt, stat_tmp], axis=0, ignore_index=True)
            # crct_tab.loc[:,config.target_cols[0:-2]].to_csv(file_path, sep=',', index=False, header=True, encoding='utf-8')
    return stat_rslt

def compute_stat3(country_list, correct_csv_path):
    # correct_csv_path = Path('dataset/data_20250724/20251t3_csv_correct')
    stat_cols = ['进出口', '数据量', '目的国国际编码', '原产国国际编码', '启运国国际编码', 'hs编码', 'hs编码描述', '产品描述'
                 , 'cif总价', 'fob总价', '毛重', '重量', 'TEU', '数量', '数量单位', '采购商名称'
                 , '采购商地址', '采购商联系方式', '采购商货代公司标签', '供应商名称', '供应商地址', '供应商联系方式', '供应商货代公司标签', '运输方式'
                 , '成交方式', '付款方式', '启运港', '目的港', '集装箱箱号']
    stat_rslt = pd.DataFrame(columns=stat_cols)
    for ctry in tqdm(country_list):
        tmp_path = correct_csv_path / ctry
        _, file_paths = get_dir_files(tmp_path)
        col_type = get_col_str_type(config.target_cols[0:-2])
        tab_df = pd.DataFrame(columns=config.target_cols[0:-2])
        for i in tqdm(range(len(file_paths))):
            file_path = file_paths[i]
            logger.info(file_path)
            tab_tmp = pd.read_csv(file_path,encoding='utf-8',header=0,dtype=col_type)
            stat_tmp = get_filling_rate_df(tab_tmp, stat_cols[0:-2])
            stat_tmp['国家'] = ctry
            stat_tmp['文件路径'] = file_path
            stat_rslt = pd.concat([stat_rslt, stat_tmp], axis=0, ignore_index=True)
            # crct_tab.loc[:,config.target_cols[0:-2]].to_csv(file_path, sep=',', index=False, header=True, encoding='utf-8')
    return stat_rslt

def compute_stat4(country_list, from_path, country36_dict):
    dir_paths, file_paths = get_dir_files(from_path)
    file_tree = dict()
    for x in country_list:
        file_tree[x] = [country36_dict[x].get('国别编码'), \
        [y for y in file_paths if country36_dict[x].get('国别编码')==str(y).split('/')[-1].split('_')[0]]]
    
    column_names = config.target_cols[0:-2]
    # stat_cols = ['进出口', '数据量', '启运港', '目的港', '运输方式', '成交方式', '付款方式'
    #              , '承运人名称', '集装箱箱号', '船名', '品牌'
    #              , 'cif总价', 'fob总价', '毛重', '重量', '数量', '国家', '文件路径']
    stat_cols = ['进出口', '数据量', 'cif总价', 'fob总价', '毛重', '重量', '数量', '国家', '文件路径']
    stat_rslt = pd.DataFrame(columns=stat_cols)
    for ctry in country_list:
        logger.info(ctry)
        file_paths = file_tree[ctry][1]
        col_type = get_col_str_type(config.target_cols[0:-2])
        tab_df = pd.DataFrame(columns=config.target_cols[0:-2])
        for i in range(len(file_paths)):
            file_path = file_paths[i]
            logger.info(file_path)
            # tab_tmp = pd.read_csv(file_path,encoding='utf-8',header=0,dtype=col_type)
            tab_tmp = pd.read_csv(file_path ,header=None, names=column_names
                                  , sep=','
                                  , quotechar='"'
                                  , lineterminator='\n'
                                  , on_bad_lines='skip'
                                  , encoding='utf-8',dtype=col_type)
            logger.info(f'tab_tmp shape: {tab_tmp.shape}')
            # logger.info(f'tab_tmp shape: {tab_tmp.shape}')
            stat_tmp, crct_tab = get_stat_df(tab_tmp, stat_cols[0:-2])
            stat_tmp['国家'] = ctry
            stat_tmp['文件路径'] = file_path
            # logger.info(f'tab_tmp shape: {tab_tmp.shape}')
            logger.info(f'stat_tmp shape: {stat_tmp.shape}')
            stat_rslt = pd.concat([stat_rslt, stat_tmp], axis=0, ignore_index=True)
    return stat_rslt

def compute_stat5(country_list, from_path, country36_dict):
    dir_paths, file_paths = get_dir_files(from_path)
    file_tree = dict()
    for x in country_list:
        file_tree[x] = [country36_dict[x].get('国别编码'), \
        [y for y in file_paths if country36_dict[x].get('国别编码')==str(y).split('/')[-1].split('_')[0]]]
    
    column_names = config.target_cols[0:-2]
    stat_cols = ['进出口', '数据量', '目的国国际编码', '原产国国际编码', '启运国国际编码', 'hs编码', 'hs编码描述', '产品描述'
                 , 'cif总价', 'fob总价', '毛重', '重量', 'TEU', '数量', '数量单位', '采购商名称'
                 , '采购商地址', '采购商联系方式', '采购商货代公司标签', '供应商名称', '供应商地址', '供应商联系方式', '供应商货代公司标签', '运输方式'
                 , '成交方式', '付款方式', '启运港', '目的港', '集装箱箱号']
    stat_rslt = pd.DataFrame(columns=stat_cols)
    for ctry in country_list:
        logger.info(ctry)
        file_paths =  file_tree[ctry][1]
        col_type = get_col_str_type(config.target_cols[0:-2])
        tab_df = pd.DataFrame(columns=config.target_cols[0:-2])
        for i in range(len(file_paths)):
            file_path = file_paths[i]
            logger.info(file_path)
            # tab_tmp = pd.read_csv(file_path,encoding='utf-8',header=0,dtype=col_type)
            tab_tmp = pd.read_csv(file_path ,header=None, names=column_names
                                  , sep=','
                                  , quotechar='"'
                                  , lineterminator='\n'
                                  , on_bad_lines='skip'
                                  , encoding='utf-8',dtype=col_type)
            stat_tmp = get_filling_rate_df(tab_tmp, stat_cols[0:-2])
            stat_tmp['国家'] = ctry
            stat_tmp['文件路径'] = file_path
            stat_rslt = pd.concat([stat_rslt, stat_tmp], axis=0, ignore_index=True)
    return stat_rslt


def get_type_enumvalue(from_dir, to_dir, country_list):
    country_list_all = ['阿根廷', '埃塞俄比亚', '巴基斯坦', '巴拉圭', '巴拿马', '博兹瓦纳', '俄罗斯陆运', '厄瓜多尔'
                        , '菲律宾', '哥伦比亚', '哥斯达黎加', '哈萨克斯坦', '加纳', '喀麦隆', '科特迪瓦', '肯尼亚'
                        , '莱索托', '马拉维', '美国', '孟加拉', '秘鲁', '秘鲁-海运', '秘鲁-空运', '墨西哥'
                        , '纳米比亚', '尼日利亚', '斯里兰卡', '坦桑尼亚', '乌干达', '乌克兰', '乌拉圭', '乌兹别克斯坦'
                        , '印度', '英国', '越南', '智利']
    
    dir_paths, file_paths = get_dir_files(from_dir)
    file_tree = distinct_ie_data(from_dir, country_list_all)
    col_type = get_col_str_type(config.target_cols)
    type_cols = ['transportterm', 'tradeterm', 'paymentterm']
    type_all_unique = pd.DataFrame(columns=type_cols+['country'])
    type_enumvalue_list = []
    type_unique_pth = to_dir / f'type_unique.csv'
    for i in range(len(country_list)):
        ctry = country_list[i]
        logger.info(ctry)
        type_unique = pd.DataFrame(columns=type_cols)
        if ctry in country_list_all:
            if file_tree[ctry]['import_paths']:
                for file_pth in file_tree[ctry]['import_paths']:
                    logger.info(f'data from: {file_pth}')
                    tab_tmp = pd.read_csv(file_pth,encoding='utf-8',header=0,dtype=col_type)
                    tab_tmp = tab_tmp.loc[:, type_cols]
                    type_tmp = tab_tmp.drop_duplicates(subset=type_cols)
                    # hs_tmp.loc[hs_tmp['hscode'].str.strip().str.len()>=6,:]
                    logger.info(f'table shape: {tab_tmp.shape}')
                    logger.info(f'hs unique shape: {type_tmp.shape}')
                    type_unique = pd.concat([type_unique, type_tmp], axis=0, ignore_index=True)
            if file_tree[ctry]['export_paths']:
                for file_pth in file_tree[ctry]['export_paths']:
                    logger.info(f'data from: {file_pth}')
                    tab_tmp = pd.read_csv(file_pth,encoding='utf-8',header=0,dtype=col_type)
                    tab_tmp = tab_tmp.loc[:, type_cols]
                    type_tmp = tab_tmp.drop_duplicates(subset=type_cols)
                    # type_tmp.loc[type_tmp['hscode'].str.strip().str.len()>=6,:]
                    logger.info(f'table shape: {tab_tmp.shape}')
                    logger.info(f'hs unique shape: {type_tmp.shape}')
                    type_unique = pd.concat([type_unique, type_tmp], axis=0, ignore_index=True)
        type_unique.drop_duplicates(subset=type_cols, inplace=True)
        type_unique['country'] = ctry
        type_enumvalue_list.append([ctry
                                    , list(set(list(type_unique.loc[:, type_cols[0]].values)))
                                    , list(set(list(type_unique.loc[:, type_cols[1]].values)))
                                    , list(set(list(type_unique.loc[:, type_cols[2]].values)))])
    
    type_all_unique = pd.concat([type_all_unique, type_unique], axis=0, ignore_index=True)
    type_all_unique.drop_duplicates(subset=type_cols+['country'], inplace=True)
    
    type_enumvalue_all = pd.DataFrame(type_enumvalue_list, columns=['country']+type_cols)
    os.makedirs(os.sep.join(str(type_unique_pth).split(os.sep)[:-1]), exist_ok=True)
    type_enumvalue_all.to_csv(type_unique_pth, sep=',', index=False, header=True, encoding='utf-8')
    logger.info(f'type_all_unique shape: {type_all_unique.shape}')
    logger.info(f'new data save: {type_unique_pth}')
    return type_enumvalue_all


def get_currency_exchange_rate(start_year, end_year):
    # 获取所有国家/地区代码（含非主权地区）
    all_economies = [econ['id'] for econ in wb.economy.list()]
    # country_info = [[econ['value'], [econ['id'], [econ['value'], ] for econ in wb.economy.list()]
    logger.info(f"共获取 {len(all_economies)} 个国家/地区代码")
    country_info_df = pd.DataFrame([x for x in wb.economy.list()])

    
    M = 50
    N = len(all_economies)//50+1
    # 定义查询参数
    indicator = 'PA.NUS.FCRF'  # 官方汇率指标
    years = range(start_year, end_year)  # 时间范围（start_year-end_year）
    y_cols = [x for x in years]
    
    # 获取所有国家的汇率数据
    df = wb.data.DataFrame(
        indicator,
        economy=all_economies[0:M],
        time=years,
        skipBlanks=True,     # 跳过空值
        numericTimeKeys=True # 时间列转为数字
    )
    df.reset_index(level=0, inplace=True) 
    logger.info(df.shape)

    for i in range(1, N):
        # print(i*M, (i+1)*M)
        # 获取所有国家的汇率数据
        df_tmp = wb.data.DataFrame(
            indicator,
            economy=all_economies[(i*M):((i+1)*M)],
            time=years,
            skipBlanks=True,     # 跳过空值
            numericTimeKeys=True # 时间列转为数字
        )
        logger.info(df_tmp.shape)
        df_tmp.reset_index(level=0, inplace=True) 
        df = pd.concat([df, df_tmp], axis=0, ignore_index=True)

    df1 = pd.merge(country_info_df, df, left_on='id', right_on='economy', how='left')
    df1['exchange_rate_on_usd'] = df1.loc[:,y_cols].mean(axis=1).values
    # df2 / = df1.loc[]
    df1

    cols = ['id', 'value', 'region', 'exchange_rate_on_usd']
    df2 = df1.loc[df1['exchange_rate_on_usd'].isna()==False, cols].reset_index(drop=True)
    return df2


def get_currency_list(country_list, from_dir, currency_cols = ['cif_currency', 'fob_currency']):
    start = time.time()
    currency_list = []
    col_type = get_col_str_type(config.target_cols)
    for cntry in country_list:
        cntry_pth = from_dir / cntry
        _, file_paths = get_dir_files(cntry_pth)
        for pth in file_paths:
            logger.info(f'the path : {pth}')
            # tab_tmp = pd.read_csv(pth,encoding='utf-8',header=0,dtype=col_type)
            tab_tmp = pd.read_csv(pth
                         ,header=0
                         , sep=','
                         , quotechar='"'
                         , lineterminator='\n'
                         , on_bad_lines='skip'
                         # , encoding='latin-1'
                         , encoding='utf-8'
                         , dtype=col_type)
            # ctries_cols = ['origincountry', 'countryofdelivery', 'loadingcountry', 'country']
            tab_tmp = tab_tmp.loc[:, currency_cols]
            for col in currency_cols:
                tab_tmp[col]=tab_tmp[col].str.upper()
            dt_currency = [x for x in list(set(flatten(tab_tmp.values))) if not is_nan(x)]
            logger.info(f'currency num : {len(dt_currency)}')
            currency_list += dt_currency
    currency_list = list(set([x.strip() for x in currency_list]))
    currency_list.sort()
    logger.info(f'currency_list num : {len(currency_list)}')
    end = time.time()
    logger.info(f'get all distinct countries takes {round(end-start)} seconds')
    return currency_list


def replace_currency_to_usd(country_list, currency_on_usd_dict, from_dir, to_dir):
    start = time.time()
    col_type = get_col_str_type(config.target_cols)
    for cntry in country_list:
        cntry_pth = from_dir / cntry
        _, file_paths = get_dir_files(cntry_pth)
        for pth in file_paths:
            logger.info(f'the path : {pth}')
            tab_tmp = pd.read_csv(pth
                         ,header=0
                         , sep=','
                         , quotechar='"'
                         , lineterminator='\n'
                         , on_bad_lines='skip'
                         # , encoding='latin-1'
                         , encoding='utf-8'
                         , dtype=col_type)
            tab_tmp['totalcifvalue']=[float(str(x[0]).replace(',',''))/currency_on_usd_dict[x[1]] if not is_nan(x[0]) and not is_nan(x[1]) else x[0] for x in tab_tmp.loc[:,['totalcifvalue', 'cif_currency']].values]
            tab_tmp['totalfobvalue']=[float(str(x[0]).replace(',',''))/currency_on_usd_dict[x[1]] if not is_nan(x[0]) and not is_nan(x[1]) else x[0] for x in tab_tmp.loc[:,['totalfobvalue', 'fob_currency']].values]
    
            file_save_path = Path(str(pth).replace(str(from_dir),str(to_dir)))
            os.makedirs(os.sep.join(str(file_save_path).split(os.sep)[:-1]), exist_ok=True)
            tab_tmp.loc[:,config.target_cols[0:-2]].to_csv(file_save_path, sep=','
                                                           , index=False
                                                           , quotechar='"'         # 双引号导引符
                                                           , quoting=csv.QUOTE_ALL        # 强制所有字段加引号
                                                           , lineterminator='\n', header=True, encoding='utf-8')

            logger.info(f'save path: {file_save_path}')
            logger.info(f'currency transform done, the data shape is {tab_tmp.shape}')
    end = time.time()
    logger.info(f'replace currency to USD takes {round(end-start)} seconds')
    return len(country_list)

def replace_tab_to_norm(country_list, from_path, to_path, country36_dict):
    dir_paths, file_paths = get_dir_files(from_path)
    column_names = config.target_cols[0:-2]
    # logger.info(column_names)
    # column_names
    col_type = get_col_str_type(config.target_cols[0:-2])

    file_tree = dict()
    for x in country_list:
        file_tree[x] = [country36_dict[x].get('国别编码'), \
        [y for y in file_paths if country36_dict[x].get('国别编码')==str(y).split('/')[-1].split('_')[0]]]
    # file_tree

    # 跑批
    start = time.time()
    for ctry in country_list:
        logger.info(ctry)
        if file_tree[ctry]:
            for file_path in file_tree[ctry][1]:
                logger.info(file_path)
                imp_save_path = to_path / str(file_path).split(os.sep)[-1]
                tab_tmp = pd.read_csv(file_path ,header=None, names=column_names
                                      , sep=','
                                      , quotechar='"'
                                      , lineterminator='\n'
                                      , on_bad_lines='skip'
                                      , encoding='utf-8',dtype=col_type)
                os.makedirs(os.sep.join(str(imp_save_path).split(os.sep)[:-1]), exist_ok=True)
                logger.info(f'table shape: {tab_tmp.shape}')
                tab_tmp.replace("\\N", np.nan, inplace=True)
                logger.info(f'transformed table shape: {tab_tmp.shape}')
                tab_tmp = tab_tmp.loc[:, column_names]
                tab_tmp.to_csv(imp_save_path, sep=','
                               , index=False
                               , quotechar='"'         # 双引号导引符
                               , quoting=csv.QUOTE_ALL        # 强制所有字段加引号
                               , lineterminator='\n', header=True, encoding='utf-8')
                logger.info(f'new data saved: {imp_save_path}')
        else:
            logger.info(f'{ctry} is not in dataset')
    end = time.time()
    logger.info(f'all data takes {round((end - start)/60, 2)} minutes')
    return

def get_transtype_list(country_list, from_dir, transtype_cols = ['transportterm']):
    start = time.time()
    transtype_list = []
    col_type = get_col_str_type(config.target_cols)
    for cntry in country_list:
        cntry_pth = from_dir / cntry
        _, file_paths = get_dir_files(cntry_pth)
        for pth in tqdm(file_paths):
            logger.info(f'the path : {pth}')
            # tab_tmp = pd.read_csv(pth,encoding='utf-8',header=0,dtype=col_type)
            tab_tmp = pd.read_csv(pth
                         ,header=0
                         , sep=','
                         , quotechar='"'
                         , lineterminator='\n'
                         , on_bad_lines='skip'
                         # , encoding='latin-1'
                         , encoding='utf-8'
                         , dtype=col_type)
            # ctries_cols = ['origincountry', 'countryofdelivery', 'loadingcountry', 'country']
            tab_tmp = tab_tmp.loc[:, transtype_cols]
            for col in transtype_cols:
                tab_tmp[col]=tab_tmp[col].str.upper()
            dt_transtype = [x for x in list(set(flatten(tab_tmp.values))) if not is_nan(x)]
            logger.info(f'transtype num : {len(dt_transtype)}')
            transtype_list += dt_transtype
    transtype_list = list(set([x.strip() for x in transtype_list]))
    transtype_list.sort()
    logger.info(f'transtype_list num : {len(transtype_list)}')
    end = time.time()
    logger.info(f'get all distinct countries takes {round(end-start)} seconds')
    return transtype_list

def replace_transtype(country_list, transtype_mapping, from_dir, to_dir):
    start = time.time()
    col_type = get_col_str_type(config.target_cols)
    for cntry in country_list:
        cntry_pth = from_dir / cntry
        _, file_paths = get_dir_files(cntry_pth)
        for pth in file_paths:
            logger.info(f'the path : {pth}')
            tab_tmp = pd.read_csv(pth
                         ,header=0
                         , sep=','
                         , quotechar='"'
                         , lineterminator='\n'
                         , on_bad_lines='skip'
                         # , encoding='latin-1'
                         , encoding='utf-8'
                         , dtype=col_type)

            tab_tmp['transportterm']=tab_tmp['transportterm'].str.upper()
            
            transtype_cols = ['transportterm']
            tab_df_transtype = tab_tmp.loc[:, transtype_cols]
            dt_transtype = [x for x in list(set(flatten(tab_df_transtype.values))) if not is_nan(x)]
            for x in dt_transtype:
                if x not in transtype_mapping.keys():
                    transtype_mapping[x]=''

            tab_tmp['transportterm'] = tab_tmp['transportterm'].apply(lambda x: transtype_mapping[x] if not (is_nan(x) or is_nan(transtype_mapping[x])) else x)

            file_save_path = Path(str(pth).replace(str(from_dir),str(to_dir)))
            os.makedirs(os.sep.join(str(file_save_path).split(os.sep)[:-1]), exist_ok=True)
            tab_tmp.loc[:,config.target_cols[0:-2]].to_csv(file_save_path, sep=','
                                                           , index=False
                                                           , quotechar='"'         # 双引号导引符
                                                           , quoting=csv.QUOTE_ALL        # 强制所有字段加引号
                                                           , lineterminator='\n', header=True, encoding='utf-8')

            logger.info(f'save path: {file_save_path}')
            logger.info(f'transtype mapping done, the data shape is {tab_tmp.shape}')
    end = time.time()
    logger.info(f'transtype mapping takes {round(end-start)} seconds')
    return len(country_list)


def mapping_forwarderagent(cmpt_nm, choices, cutoff=0.8):
    best =  difflib.get_close_matches(cmpt_nm, choices, n=1, cutoff=cutoff)
    # print(best)
    # return (1,best[0]) if best else 0
    return 1 if best else 0


def comp_forwarderagent(country_list, hd_list, from_dir, to_dir, cutoff=0.9):
    start = time.time()
    choices = hd_list
    for cntry in country_list:
        cntry_pth = from_dir / cntry
        _, file_paths = get_dir_files(cntry_pth)
        for pth in file_paths:
            logger.info(pth)
            df_tmp = pd.read_csv(pth)
            df_tmp['importer_forwarderagent'] = df_tmp['importername'].fillna('').astype(str).str.upper().apply(lambda x: mapping_forwarderagent(x, choices, cutoff))
            df_tmp['supplier_forwarderagent'] = df_tmp['suppliername'].fillna('').astype(str).str.upper().apply(lambda x: mapping_forwarderagent(x, choices, cutoff))
            file_save_path = Path(str(pth).replace(str(from_dir),str(to_dir)))
            os.makedirs(os.sep.join(str(file_save_path).split(os.sep)[:-1]), exist_ok=True)
            df_tmp.to_csv(file_save_path, sep=','
                          , index=False
                          , quotechar='"'         # 双引号导引符
                          , quoting=csv.QUOTE_ALL        # 强制所有字段加引号
                          , lineterminator='\n', header=True, encoding='utf-8')
            logger.info(f'saved to : {pth}')
    end = time.time()
    logger.info(f'country list {country_list} takes {round(end-start)} seconds')
    return len(country_list)






##


