import os
import re
import copy
import pandas as pd
from collections import Counter
import uuid
import Levenshtein
import jieba
import requests
from file_encryptor import encryptor
from bs4 import BeautifulSoup
# import hanlp
# han_tok = hanlp.load(hanlp.pretrained.tok.COARSE_ELECTRA_SMALL_ZH)



know_df_cols = ['path', 'content', 'linkage', 'summary', 'keywords', 'know_id']
all_df_cols = ['content', 'path', 'type', 'length', 'keywords', 'summary', 'know_id', 'tokens']
match_df_cols = ['path', 'content', 'type', 'length', 'keywords', 'summary', 'tokens', 'know_id', 'intention']
optional_set = {'keywords', 'summary'}

SPLIT_CHAR = '-->'
EMPTY_DIR_PLACEHOLDER = '__$$__EMPTY_DIR_PLACEHOLDER__$$__'


def use_llm_api(call_llm, histories, paras, config):
    paras.update( {'histories':histories} )
    answer = call_llm(paras=paras, config=config)
    
    if (not paras['task'] in ['if-history']) and 'use_his' in paras and paras['use_his']:
        histories.append((paras['query'], answer))
    return answer, histories


def check_internet(url='http://www.baidu.com'):
    try:
        response = requests.get(url, timeout=5)
        response.raise_for_status()
        return True
    except requests.RequestException as e:
        print(f"网络连接失败: {e}")
        return False
    

def clean_hhf(text):
    # 确保开头和结尾只有一个 '__HHF__'
    text = re.sub(r'^(__HHF__)+', '__HHF__', text)  # 开头多余的 __HHF__
    text = re.sub(r'(__HHF__)+$', '__HHF__', text)  # 结尾多余的 __HHF__
    # 替换中间连续的 '__HHF__' 为单个 '__HHF__'
    text = re.sub(r'(__HHF__){2,}', '__HHF__', text)  
    if not text.startswith('__HHF__'):
        text = '__HHF__' + text
    if not text.endswith('__HHF__'):
        text = text + '__HHF__'
    return text


def get_node_level(tree, target_node, current_level=0):
    if target_node in tree:
        return current_level
    
    for child, subtree in tree.items():
        if subtree is not None:  # Check if the child has children
            level = get_node_level(subtree, target_node, current_level + 1)
            if level is not None:
                return level
    return None


def gen_str_codes(input_string):
    namespace = uuid.NAMESPACE_DNS
    return str(uuid.uuid5(namespace, input_string))


def postprocess_tb(df):
    # Replace '\n' in column headers
    df.columns = [str(col).replace('\n', '') for col in df.columns]
    # Replace '\n' in each cell
    df = df.applymap(lambda x: x.replace('\n', '') if isinstance(x, str) else x)
    # Convert date values that can cause errors
    for col in df.columns:
        if pd.api.types.is_datetime64_any_dtype(df[col]):
            df[col] = df[col].astype(str)
    return df


def cal_levenshtein_dis(text, target):
    distance = Levenshtein.distance(text, target)
    max_len = max(len(text), len(target))
    similarity = 1 - (distance / max_len)
    return similarity


def clean_file(path_, mode='remove', cols=None):
    try:
        if mode=='remove':
            os.remove(path_)
        elif mode=='clean':
            if '.txt' in path_:
                pass
                # with open(path_, 'w') as f:
                #     pass
                
            elif '.csv' in path_:
                if encryptor.encrypt:
                    exist_df = encryptor.load_from_file(path_)
                else:
                    exist_df = pd.read_csv(path_, encoding='utf-8')
                if not cols==None:
                    empty_df = pd.DataFrame(columns=cols)
                else:
                    empty_df = pd.DataFrame(columns=exist_df.columns)
                if encryptor.encrypt:
                    encryptor.save_to_file(empty_df, path_)
                else:
                    empty_df.to_csv(path_, index=False)
        else:
            pass
    except:
        pass
    
    
def create_reply(contents, intentions):
    reply = '为你找到如下信息：\n'
    for i, content in enumerate(contents):
        reply += '【' + str(i+1) + '】' + content +'  \n'
    reply = reply.strip()
    print('\n用户意图：{} \n{}'.format(intentions, reply), '\n')
    return reply


def expand_summary_paths(df, node, split_char='-->', summary_term=''):
    paths = list(df['path'].values)
    filtered_paths = []
    cut_filtered_paths = []
    
    for path in paths:
        nodes = path.split(split_char)
        if node in nodes and (not summary_term in path):
            idx = nodes.index(node)
            filtered_paths.append(path)
            cut_filtered_paths.append('-->'.join(nodes[idx: ]))
            
    filtered_df = df[df['path'].isin(filtered_paths)]
    cut_filtered_paths = remove_duplicates_orderkept(cut_filtered_paths)
    return cut_filtered_paths, filtered_df


def extract_nested_dic_vals(obj, values_list=None):
    if values_list is None:
        values_list = []
    if isinstance(obj, dict):
        for key, value in obj.items():
            if key == 'id':  # skip key-value pairs where key is 'id'
                continue
            if isinstance(value, dict):
                extract_nested_dic_vals(value, values_list)
            elif isinstance(value, list):
                for item in value:
                    extract_nested_dic_vals(item, values_list)
            elif isinstance(value, str):
                values_list.append(value)
    
    values_list = [v for v in values_list if not v.strip()=='']
    return values_list


def extract_know(match_dfs, placeholders=['HHF']):
    raw_texts = []
    for i, row in match_dfs.iterrows():
        kg_contents_ = row['content']
        type = row['type']
        
        if type=='PTXT':
            raw_texts.append(kg_contents_)
        
        elif type=='TABLE':
            tb_path = os.path.join(KB_PATH, 'tables', (type + '.csv'))
            tb_df = pd.read_csv(tb_path, encoding='utf-8')
            raw_texts.append(tb_df.to_string(index=False))
             
        elif type=='IMAGE':
            continue
    
    content_ = '\n'.join(raw_texts)
    pattern = r'|'.join(re.escape(p) for p in placeholders if not p=='')
    content_ = re.sub(pattern, '', content_)
    return content_, raw_texts


def extract_window(lst, index, num, return_lst=False):
    proceedings = lst[max(0, index - num):index]  # Extract proceeding elements
    succeedings = lst[index + 1:min(len(lst), index + num + 1)]  # Extract succeeding elements
    if return_lst:
        return proceedings, succeedings
    else:
        return '\n'.join(proceedings), '\n'.join(succeedings)
    

def extract_keylevels(dic, level=0, result=None):
    if result is None:
        result = []
    for key, value in dic.items():
        result.append((key, level))
        if isinstance(value, dict):  # If the value is a dictionary, recurse
            extract_keylevels(value, level+1, result)
    return result


'''methods for file processing'''
def file_lst(origin_path):
    paths = os.walk(origin_path)
    res_paths = []
    for root, _, file_lst in paths:
        if not root==origin_path:
            break
        for f_name in file_lst:
            temp_path = os.path.join(origin_path, f_name)
            res_paths.append(temp_path)
    return res_paths


def find_frequent(lst):
    counter = Counter(lst)
    most_common = counter.most_common(1)[0][0]
    return most_common


def find_similar_bychars(reference, strings):
    min_distance = float('inf')
    most_similar = None

    for string in strings:
        distance = Levenshtein.distance(reference, string)
        if distance < min_distance:
            min_distance = distance
            most_similar = string
    return most_similar


def find_matches_parsing(content, path):
    pattern = re.compile(r'(TABLE_.*?_TABLE|IMAGE_.*?_IMAGE)')
    matches = pattern.findall(content)
    if len(matches)==0:
        matches = ['NOLINK']
    else:
        matches = ['\n'.join(matches)]
    
    if '__摘要总结__' in path:
        matches = ['__SUMMARY__'] + content.split('-**-')
        matches = ['-**-'.join(matches)]
    return matches


def flatten_list(nested_list):
    """Helper function to flatten a nested list."""
    flat_list = []
    for item in nested_list:
        if isinstance(item, list):
            flat_list.extend(flatten_list(item))
        else:
            flat_list.append(item)
    return flat_list


def flatten_dict(d, parent_key=()):
    """Helper function to flatten a nested dictionary. for converting dictionary to dataframe"""
    items = []
    for k, v in d.items():
        new_key = parent_key + (k,)
        if isinstance(v, dict):
            items.extend(flatten_dict(v, new_key).items())
        else:
            items.append((new_key, v))
    return dict(items)


def flatten_dic_dfs(d, parent_key=''):
    """Helper function to flatten a nested dictionary by screening keys from top to bottom levels, keeping their order"""
    keys = []
    for k, v in d.items():
        full_key = k
        keys.append(full_key)
        if isinstance(v, dict) and v:
            keys.extend(flatten_dic_dfs(v))
    return keys


def flatten_dic2paths(d, current_path=None, result=None):
    if result is None:
        result = []
    if current_path is None:
        current_path = []

    for key, value in d.items():
        if not isinstance(key, str):
            continue
        new_path = current_path + [key]  # Add the current key to the path
        if isinstance(value, dict) and value:  # If the value is a non-empty dictionary, recurse
            flatten_dic2paths(value, new_path, result)
        else:
            result.append('-->'.join(new_path))
    return result


def get_bottom_level_titles(tree, titles=None, current_path=None, keep_path=False):
    if titles is None:
        titles = []
    if current_path is None:
        current_path = []

    for key, value in tree.items():
        new_path = current_path + [key]
        if not value:  # If the dictionary is empty, it's a bottom-level title
            if keep_path:
                titles.append(new_path)
            else:
                titles.append(key)
        else:
            get_bottom_level_titles(value, titles, new_path, keep_path)
    return titles
    
    
def intersect_lst(list_1, list_2, mode='inter'):
    if mode=='inter':
        res_list = [value for value in list_1 if value in list_2]
    elif mode=='diff':
        res_list = list(set(list_1) - set(list_2))
    else:
        pass
    return res_list


def merge_df(input_df):    
    dfs_by_path = list(input_df.groupby('path', sort=False))
    processed_dfs = []
    
    for key, df in dfs_by_path:
        merged_rows = []
        content_to_merge = []  # Collect content for either TABLE or IMAGE
        first_type = None
        first_path = None
        firt_knowid = None
        first_intention = None
        total_length = 0
        
        for i, row in df.iterrows():
            if 'TABLE' in row['type'] or 'IMAGE' in row['type']:
                # Start capturing the first occurrence details
                if first_type is None:
                    first_type = row['type']
                    first_path = row['path']
                    firt_knowid = row['know_id']
                    first_intention = row['intention']
                
                # Ensure we only merge rows of the same type (TABLE or IMAGE)
                if row['type']==first_type:
                    # Collect content and sum length
                    content_to_merge.append(row['content'])
                    total_length += int(row['length'])
            else:
                # If we've captured TABLE or IMAGE data, append the merged row
                if first_type is not None:
                    merged_rows.append([ '\n'.join(content_to_merge), first_type, first_path, total_length, firt_knowid, first_intention])
                    # Reset the merging process
                    first_type = None
                    content_to_merge = []
                    total_length = 0
                merged_rows.append([row['content'], row['type'], row['path'], row['length'], row['know_id'], row['intention']])
        
        if first_type is not None:
            merged_rows.append([ '\n'.join(content_to_merge), first_type, first_path, total_length, firt_knowid, first_intention])
        
        temp_merge_df = pd.DataFrame(merged_rows, columns=['content', 'type', 'path', 'length', 'know_id', 'intention'])
        processed_dfs.append(temp_merge_df)
    final_df = pd.concat(processed_dfs, axis=0, ignore_index=True)
    return final_df


def process_texts4displaty(know_df, img_record, tb_record, KB_PATH):        
    res_texts = []   
    for i, row in know_df.iterrows():
        kg_contents_ = row['content']
        type = row['type']
        
        if type=='PTXT':
            res_texts.append(kg_contents_.replace('__HHF__', ''))

        elif 'TABLE_' in type:
            tb_path = tb_record[type].replace('-->', os.path.sep)
            tb_path = os.path.join(KB_PATH, tb_path)
            if encryptor.encrypt:
                tb_df = encryptor.load_from_file(tb_path)
            else:
                tb_df = pd.read_csv(tb_path, encoding='utf-8')
            res_texts.append(('\n' + tb_df.to_csv(index=False) +'\n')) 
                
        elif 'IMAGE_' in type:
            img_path = img_record[type].replace('-->', os.path.sep)
            img_path = os.path.join(KB_PATH, img_path)
            res_texts.append('IMAGE_' + img_path)
    return '\n'.join(res_texts)
    
    
def min_max_normalize(value, min_val, max_val):
    if max_val == min_val:
        return 0  # Avoid division by zero
    normalized = (value - min_val) / (max_val - min_val)
    return max(0, min(1, normalized))  # Clip to [0, 1]


def path_handle(path, mode, suffixes=['.pdf', '.doc', '.xls', '.docx', '.xlsx', '.csv', '.txt', '.png', '.jpg', '.json']):
    illegal_pattern = re.compile(f'[<>"|?*\x00-\x1F\t]')
    
    path_lst = []
    if mode=='split':
        path_lst = path.split(os.sep)
        return path_lst
    elif mode=='extract-base':
        base_name = os.path.basename(path)
        base_name = os.path.splitext(base_name)[0]
        return base_name
    elif mode=='convert':
        path = illegal_pattern.sub('', path)
        try:
            separator = os.sep
            path = path.replace('/', separator).replace('\\', separator)
            return path.replace('\n', '')
        except:
            return path.replace('\n', '')
    elif mode=='clean': # remove all non-Chinese or English characters at the beginning of the text, also remove all non-necessary suffix
        # re_pattern = r'^[^\u4e00-\u9fff]*([\u4e00-\u9fff].*)'
        # match = re.search(re_pattern, path)
        # p_item = match.group(1) if match else path
        p_item = path
        if len(suffixes)>0:
            for suffix in suffixes:
                p_item = p_item.replace(suffix, '')
        return p_item  
    elif mode=='sanitize':
        illegal_chars = '\t\n<>：:;；"/\\|?*'
        safe_char = '_'

        sanitized_parts = []
        parts = path.split(os.sep)
        for part in parts:
            clean_part = part
            for ch in illegal_chars:
                clean_part = clean_part.replace(ch, safe_char)
            sanitized_parts.append(clean_part)
        sanitized_path = os.sep.join(sanitized_parts)
        return sanitized_path
    else:
        pass


def process_path_texts(path_, last=50):
    temp_path = path_handle(path_, mode='sanitize')
    return '_'.join(temp_path.split(os.sep))[:last]


def process_dup_paths_df(df):
    if df['path'].duplicated(keep=False).any():
        df['count'] = df.groupby('path').cumcount() + 1
        df['path'] = df.apply(lambda x: f"{x['path']}_{x['count']}" if x['count'] > 1 else x['path'], axis=1)
        df.drop(columns=['count'], inplace=True)
    return df
    
    
def remove_duplicates_orderkept(input_list):
    seen = set()
    output_list = []
    for item in input_list:
        if item not in seen:
            output_list.append(item)
            seen.add(item)
    return output_list
    

def remove_spaces(text, handle_punctuation=False):
    '''
        :function remove empty spaces between Chinese words and keep such spaces between English words and numbers
        :input
            :text raw texts
        :output
            :res_text texts after space removing
    '''
    if handle_punctuation==True:
        punctuation = r"""!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~，。、【】《》？；：‘’“”（）…—-！"""
        res_text = re.sub(f"[{re.escape(punctuation)}]", "", text)
    else:
        pattern = re.compile(r'([\u4e00-\u9fff])\s+|(?<=\s)([\u4e00-\u9fff])')
        def replacer(match):
            return match.group(1) or match.group(2)
        res_text = pattern.sub(replacer, text)
    
    res_text = re.sub(r'\s+', ' ', res_text)
    return res_text.strip()


def replace_terms(res_contents, current_template, repl_dic):
    if not isinstance(current_template, list):
        current_template = [current_template]
    # PLEASE note this method adds a template to the existing res_contents list    
    for para in current_template: # a current template contains multiple paragraphs, replacement is at paragraph level
        for key, val in repl_dic.items():
            para = re.sub(key, val, para)
            #para = para.replace(' ', '')
        res_contents.append(para)
    return res_contents


def restore_graph_by_paths(paths, split_char='-->', summary_term='###'):
    root_dict = {}
    for path in paths:
        if summary_term in path:
            continue
        
        nodes = path.split(split_char)
        current_dict = root_dict
        for node in nodes:
            if node not in current_dict:
                current_dict[node] = {}
            current_dict = current_dict[node]
    dic_texts = traverse_dict(root_dict)
    return root_dict, dic_texts 
    

def restore_graph_by_texts(title_tuples):    
    hierarchy = {}
    stack = [(0, hierarchy)]  # Start with level 0 for the root of the hierarchy

    for title, level in title_tuples:
        while len(stack) > level:
            stack.pop()

        parent = stack[-1][1]  # Get the parent dictionary
        parent[title] = {}  # Insert the current title with an empty dictionary
        stack.append((level, parent[title])) 
    return {'ROOT' : hierarchy}


def set_bottom_dic_val(d, path, val, split_char='-->', mode='replace'):
    modified_dict = copy.deepcopy(d)  # Create a deep copy of the dictionary
    keys = path.split(split_char)
    current = modified_dict
    for key in keys[:-1]:  # Traverse to the second-to-last key
        if key not in current or not isinstance(current[key], dict):
            raise ValueError(f"Invalid path or non-dictionary node encountered at '{key}'")
        current = current[key]
    
    if mode=='replace':
        current[keys[-1]] = val  # Directly replace the value at the final key
    elif mode=='add':
        current[keys[-1]] += val
    elif mode=='extract':
        return ','.join(keys[1:]), current[keys[-1]]
    else:
        pass
    return modified_dict


def searching_files_upwards(bottom_path, KB_PATH):
    bottom_abs_path =  os.path.join(KB_PATH, bottom_path)
    
    upper_path_lst = [bottom_abs_path]
    while True:
        upper_path = os.path.abspath(os.path.dirname(bottom_abs_path))
        upper_path_lst.append(upper_path)
        last_path  = re.split(r'\\|/', upper_path)[-1]
        if '知识固化' in last_path:
            break
        else:
            bottom_abs_path = upper_path
    return upper_path_lst


def traverse_dict(d, parent=None):
    dic_texts = []
    for key, value in d.items():
        if value:
            child_keys = ', '.join(value.keys())
            text = f"'{key}' 包括 {child_keys}"
            dic_texts.append(text)
            dic_texts.extend(traverse_dict(value, key))
    return dic_texts


def post_request(url, req_body=None, files=None, timeout=60):
    try:
        rsp = requests.post(url, json=req_body, files=files, timeout=timeout)
        rsp.raise_for_status()
    except requests.exceptions.Timeout:
        return '请求超时，请稍后重试', 408
    except requests.exceptions.ConnectionError:
        return '网络连接错误，请检查您的网络', 503
    except requests.exceptions.HTTPError as e:
        return f'HTTP错误: {str(e)}', rsp.status_code
    except requests.exceptions.RequestException as e:
        return f'请求异常: {str(e)}', 500
    return rsp.json(), 200
    

def han_tok(contents):
    req_body = {'querys':contents}
    msg, status_code = post_request('http://218.17.187.47:35010/tokenizer', req_body)
    if status_code != 200:
        raise ConnectionError(msg)
    tokens = msg['tokens']
    return tokens


def merge_non_chinese_until_chinese(lst):
    # 去除空字符串
    lst = [item for item in lst if item.strip()]
    
    result = []
    temp = ""
    
    for item in lst:
        # 判断是否是中文字符
        if re.search(r'[\u4e00-\u9fff]', item):
            # 如果前面有非中文字符要先合并
            if temp:
                result.append(temp)
                temp = ""
            result.append(item)
        else:
            temp += item  # 合并非中文字符
            
    # 如果最后一个是非中文字符也要加到结果中
    if temp:
        result.append(temp)
    return result


def merge_texts_by_threshold(text_list, threshold, merge_term='\n'):
    merged_texts = []
    current_text = []
    current_word_count = 0

    for text in text_list:
        word_count = len(text)  # Count words in the current text
        # If the current block is already over the threshold, finalize it before adding a new text
        if current_word_count >= threshold:
            merged_texts.append(merge_term.join(current_text))
            current_text = []  # Reset for the next batch
            current_word_count = 0
        # Add the current text to the buffer
        current_text.append(text)
        current_word_count += word_count
    # Ensure the last accumulated text is added
    if current_text:
        last_text = merge_term.join(current_text)
        last_word_count = len(last_text)

        if merged_texts and last_word_count < threshold:
            merged_texts[-1] += (merge_term + last_text)
        else:
            merged_texts.append(last_text)
    return merged_texts


def tokenize2stw_remove(contents, stopwords=None, link_char='->'):
    res_contents = []
    # tokens = han_tok(contents)
    tokens = []
    for content in contents:
        tokens.append(merge_non_chinese_until_chinese(jieba.lcut(content)))
    for token in tokens:
        if not stopwords==None:
            filtered_tokens = [w for w in token if w not in stopwords and (not w.strip()=='')]
        else:
            filtered_tokens = token
        filtered_tokens = remove_duplicates_orderkept(filtered_tokens)
        res_contents.append(link_char.join(filtered_tokens))
    return res_contents


def is_valid_windows_filename(filename):
    # 检查长度
    if len(filename) > 250:
        return False
    
    reserved_names = {
        "CON", "PRN", "AUX", "NUL",
        "COM1", "COM2", "COM3", "COM4", "COM5", "COM6", "COM7", "COM8", "COM9",
        "LPT1", "LPT2", "LPT3", "LPT4", "LPT5", "LPT6", "LPT7", "LPT8", "LPT9"
    }
    if filename.upper() in reserved_names:
        return False

    # 禁止使用的字符
    invalid_chars = r'[<>:"/\\|?*]'
    if re.search(invalid_chars, filename):
        return False
    return True


def parse_fragment_path(user, KB_PATH, fragment_path):
    type = 2
    fragment_path = fragment_path.removeprefix(user + SPLIT_CHAR)
    fragment_paths = fragment_path.split(SPLIT_CHAR)
    fragment_name = fragment_paths[-1]
    if fragment_paths[0] == 'images':
        type = 3
        fragment_paths.pop(0)

    i = 0
    real_file_dir = ''
    for path in fragment_paths:
        temp_path = os.path.join(real_file_dir, path)
        if not os.path.isdir(os.path.join(KB_PATH, temp_path)):
            break
        real_file_dir = temp_path
        i += 1
    if real_file_dir == '':
        raise ValueError('路径错误')
    file_dir = f'{SPLIT_CHAR}'.join(fragment_paths[:i])
    sub_path = f'{SPLIT_CHAR}'.join(fragment_paths[i:])
    return type, os.path.join(KB_PATH, real_file_dir), file_dir, sub_path, fragment_name


def parser_context(html_text):
    # 使用BeautifulSoup解析HTML
    soup = BeautifulSoup(html_text, 'lxml')

    # 初始化结果列表
    result = []

    # 遍历解析后的内容
    for element in soup.recursiveChildGenerator():
        if element.name == 'img':  # 图片
            result.append(('image', element.get('src')))
        elif element.name == 'table':  # 表格
            try:
                tbl_str = str(element)
                df = pd.read_html(tbl_str)[0]
                result.append(('table', df))
            except Exception as e:
                print(f'tbl_str:{tbl_str}, e:{e}')
                continue
        elif isinstance(element, str) and element.strip():  # 文本
            # 检查当前文本是否在表格内部
            parent = element.parent
            while parent:
                if parent.name == 'table':
                    break  # 如果在表格内部，跳过
                parent = parent.parent
            else:
                # 如果不在表格内部，添加到结果
                result.append(('text', element.strip()))

    return result


'''
    some temporary functions
'''
# def secure_filename(input_name):
#     safe_name = re.sub(r'[<>:"/\\|?*,]', '_', input_name)
#     safe_name = safe_name.rstrip('. ')

#     reserved_names = ["CON", "PRN", "AUX", "NUL",
#                       "COM1", "COM2", "COM3", "COM4", "COM5", "COM6", "COM7", "COM8", "COM9",
#                       "LPT1", "LPT2", "LPT3", "LPT4", "LPT5", "LPT6", "LPT7", "LPT8", "LPT9"]
    
#     if safe_name.upper() in reserved_names:
#         safe_name = "_" + safe_name
#     return safe_name


# def get_key_levels(d, level=1):
#     key_levels = []
#     for key, value in d.items():
#         key_levels.append((key, level))
#         if isinstance(value, dict):
#             key_levels.extend(get_key_levels(value, level + 1))
#     return key_levels
    
    
# def handle_unique_dfcols(df):
#     if df.columns.duplicated().any():
#         new_columns = []
#         for i, col in enumerate(df.columns):
#             if df.columns.duplicated()[i]:
#                 new_columns.append(f"{col}_{i}")
#             else:
#                 new_columns.append(col)
#         df.columns = new_columns
#     return df