import json  
import os  
import pandas as pd  
from bs4 import BeautifulSoup  

json_path = "./index/jsons"  
data_path = "./data"  
html_directory = './data/data_pages' 

# 读取index/jsons下的所有数据：  
with open(os.path.join(json_path, 'invert_index.json'), 'r', encoding='utf-8') as f:  
    invert_index = json.load(f)  

with open(os.path.join(json_path, 'invert_index_title.json'), 'r', encoding='utf-8') as f:  
    invert_index_title = json.load(f)  

with open(os.path.join(json_path, 'TF_IDF.json'), 'r', encoding='utf-8') as f:  
    TF_IDF = json.load(f)  

with open(os.path.join(json_path, 'TF_IDF_Title.json'), 'r', encoding='utf-8') as f:  
    TF_IDF_Title = json.load(f)  

# 读取词频，即所有出现过的词的TF值  
with open(os.path.join(json_path, "final_TF.json"), 'r', encoding='utf-8') as f:  
    final_TF = json.load(f)  
    word_s = sorted(set(final_TF.keys()))  

with open(os.path.join(json_path, "final_TF_title.json"), 'r', encoding='utf-8') as f:  
    final_TF_title = json.load(f)  
    word_s_title = sorted(set(final_TF_title.keys()))  

with open(os.path.join(json_path, "TF.json"), 'r', encoding='utf-8') as f:  
    TF = json.load(f)  

with open(os.path.join(json_path, "TF_Title.json"), 'r', encoding='utf-8') as f:  
    TF_Title = json.load(f)  

with open(os.path.join(json_path, "IDF.json"), 'r', encoding='utf-8') as f:  
    IDF = json.load(f)  

with open(os.path.join(json_path, "IDF_Title.json"), 'r', encoding='utf-8') as f:  
    IDF_Title = json.load(f)  

# 读取csv文件：  
page_rank = pd.read_csv(os.path.join(data_path, "page_rank.csv"), encoding='utf-8-sig', index_col=0)  
information = pd.read_csv(os.path.join(data_path, "data_with_otherinfos.csv"), encoding='utf-8', index_col=0)  

def extract_file_links_from_html(html_directory):  
    """  
    Extracts links for PDF, DOC/DOCX, and XLS/XLSX files from HTML files in the specified directory.  
    
    Parameters:  
    html_directory (str): The directory containing HTML files.  
    
    Returns:  
    list: A list of dictionaries containing 'title' and 'url' for files.  
    """  
    file_links = []  
    
    supported_extensions = ('.pdf', '.doc', '.docx', '.xls', '.xlsx')  
    
   
    for filename in os.listdir(html_directory):  
        if filename.endswith('.html'):  
            file_path = os.path.join(html_directory, filename)  
            with open(file_path, 'r', encoding='utf-8') as file:  
                content = file.read()  
                soup = BeautifulSoup(content, 'html.parser')  

              
                links = soup.find_all('a', href=True)  
                for link in links:  
                    href = link['href']  
                    if href.endswith(supported_extensions): 
                       
                        if not href.startswith(('http://', 'https://')):  
                            href = os.path.join(os.path.dirname(file_path), href)  
                        file_links.append({'title': link.get_text(), 'url': href})  
    return file_links  

def create_file_links_dataframe(file_links):  
    """  
    Creates a DataFrame from the list of file links.  
    
    Parameters:  
    file_links (list): A list of dictionaries with 'title' and 'url'.  
    
    Returns:  
    DataFrame: A DataFrame containing file names and links.  
    """  
    file_df = pd.DataFrame(file_links)  
    
    file_df.drop_duplicates(subset='title', inplace=True)  
    return file_df  

def save_file_links_to_csv(file_df, output_file):  
    """  
    Saves the file links DataFrame to a CSV file.  
    
    Parameters:  
    file_df (DataFrame): The DataFrame to save.  
    output_file (str): The path to the CSV file.  
    """  
    file_df.to_csv(output_file, index=False)  

if __name__ == "__main__":  
    
    file_links = extract_file_links_from_html(html_directory)  
 
    file_df = create_file_links_dataframe(file_links)  
    output_csv_file = os.path.join(data_path, "file_links.csv")  
    save_file_links_to_csv(file_df, output_csv_file)  

    
    print(file_df)