import os
import pandas as pd
import datetime
import re
from parsel import Selector


data = pd.read_csv("../data/data.csv", encoding='utf-8', index_col=0)

# 为本地文件夹下的所有HTML文档添加description，用于后续分词构建索引
def add_description(path="data/data_pages"):  # 确保这里的路径是正确的
    files = os.listdir(path)
    total_files = len(files)  # 获取文件总数
    processed_count = 0  # 初始化已处理文件计数器

    for file_name in files:
        file_path = os.path.join(path, file_name)
        try:
            with open(file_path, 'r', encoding='utf-8') as file:
                content = file.read()
                selector = Selector(content)
                title = selector.css('title::text').get()
                if not title:
                    print(f"No title found in {file_name}")
                    continue
                _title = title.replace('/', '_')
                # 获取head内的以description为类名的meta标签内容
                description = selector.css('meta[name="description"]::attr(content)').get()
                if description is not None:  # 去除空字符
                    description = description.replace('\r', '').replace('\n', '').replace('\t', '').replace('\n', '').replace('　', '')
                else:
                    description = ""  # 设置默认值为空字符串
                if _title in data.index:
                    data.loc[_title, 'description'] = description
                else:
                    print(f"Title '{_title}' not found in CSV index for file {file_name}")
                processed_count += 1  # 成功处理一个文件，增加计数器
                print(f"Processed {processed_count} out of {total_files} files.")  # 打印处理进度
        except Exception as e:
            print(f"Error reading {file_name}: {e}")

add_description()
data.to_csv("../data/data_with_description.csv")




# 创建一个 DataFrame 用于存储所有信息  
info_df = pd.DataFrame(columns=['title', 'description', 'date_timestamp', 'content', 'editor'])  
info_df.index.name = 'url'  


# 获取一个 HTML 文档的全部信息，包括描述，发布时间，作者，正文等  
def get_all_info(html_path="../data/data_pages"):  
    files = os.listdir(html_path)  
    total_files = len(files)  
    processed_count = 0  

    for file_name in files:  
        file_path = os.path.join(html_path, file_name)  
        with open(file_path, 'r', encoding='utf-8') as file:  
            page_content = file.read()  
            page_selector = Selector(page_content)  
            page_title = page_selector.css('title::text').get()  
            if not page_title:
                print(f"No title found in {file_name}, skipping.")
                continue
            clean_title = page_title.replace('/', '_')  

            if clean_title in data.index:
                url_series = data.loc[clean_title, 'url']
                if isinstance(url_series, pd.Series):
                    if url_series.empty:
                        print(f"No URL found for {clean_title}")
                        continue
                    url = url_series.iloc[0]
                elif isinstance(url_series, str):
                    url = url_series
                else:
                    print(f"Unexpected data type for URL: {url_series}")
                    continue

                print(f"Processing {file_name} - {url}")  

                page_description = page_selector.css('meta[name="description"]::attr(content)').get()
                if page_description:
                    page_description = page_description.replace('\r', '').replace('\n', '').replace('\t', '').replace('　', '')

                paragraphs = page_selector.css('p::text').getall()
                page_content_cleaned = "".join(paragraphs[:-1])
                if paragraphs:
                    page_content_cleaned = page_content_cleaned.replace('\r', '').replace('\n', '').replace('\t', '').replace(' ', ' ').replace('　', '')
                    editor = paragraphs[-1].replace('\n', '').replace(' ', '')
                else:
                    editor = None

                regex_result = re.search(r'(20)\d{2}/(0?[1-9]|1[012])/(0?[1-9]|[12][0-9]|3[01])/', str(url), re.S)
                if regex_result:
                    date_str = regex_result.group()
                    date_timestamp = datetime.datetime.strptime(date_str, '%Y/%m/%d/').timestamp()
                else:
                    date_timestamp = None

                info_df.loc[url] = [page_title, page_description, date_timestamp, page_content_cleaned, editor]
            else:
                print(f"Title '{clean_title}' not found in CSV index for file {file_name}")
        processed_count += 1  
        print(f"Processed {processed_count} out of {total_files} files.")

get_all_info()  
info_df.to_csv("../data/data_with_otherinfos.csv", index=True)