import pandas as pd
import jieba
import re
from bs4 import BeautifulSoup
import os

def load_stopwords(file_path='主题分析/stopwords.txt'):
    """加载停用词"""
    with open(file_path, 'r', encoding='utf-8') as f:
        return set([line.strip() for line in f])

def load_userdict(file_path='主题分析/userdict.txt'):
    """加载用户词典"""
    jieba.load_userdict(file_path)

def clean_text(text):
    """清理文本"""
    # 移除HTML标签
    text = re.sub(r'<[^>]+>', '', str(text))
    # 移除标点符号和特殊字符（包括中文标点）
    text = re.sub(r'[^\w\s]|[，。！？；：""''（）、【】《》]', '', text)
    # 移除数字
    text = re.sub(r'\d+', '', text)
    # 移除多余的空白字符
    text = re.sub(r'\s+', ' ', text)
    return text.strip()

def process_data(file_path, stopwords):
    """处理CNKI数据"""
    # 读取HTML格式的Excel文件
    with open(file_path, 'r', encoding='utf-8') as f:
        html_content = f.read()
    
    # 解析HTML
    soup = BeautifulSoup(html_content, 'html.parser')
    table = soup.find('table')
    rows = table.find_all('tr')
    
    # 提取表头和数据
    headers = [td.text.split('-')[1] if '-' in td.text else td.text for td in rows[0].find_all('td')]
    data = []
    for row in rows[1:]:
        data.append([td.text for td in row.find_all('td')])
    
    # 创建DataFrame
    df = pd.DataFrame(data, columns=headers)
    
    # 处理摘要列
    if '摘要' in df.columns:
        # 清理文本
        df['摘要_原文'] = df['摘要']  # 保存原始摘要
        df['摘要'] = df['摘要'].apply(clean_text)
        
        # 分词处理
        def segment_text(text):
            words = jieba.cut(text)
            # 过滤停用词和空字符
            words = [w for w in words if w.strip() and w not in stopwords and len(w) > 1]
            return ' '.join(words)
        
        df['摘要_分词'] = df['摘要'].apply(segment_text)
    
    return df

def save_to_csv(df, output_file='主题分析/CNKI-processed.csv'):
    """保存处理后的数据到CSV"""
    # 创建输出目录
    os.makedirs(os.path.dirname(output_file), exist_ok=True)
    
    try:
        # 保存为CSV格式，使用utf-8编码以支持中文
        df.to_csv(output_file, index=False, encoding='utf-8-sig')
        print(f"已保存为: {output_file}")
    except Exception as e:
        print(f"保存文件失败: {str(e)}")

if __name__ == "__main__":
    print("开始数据预处理...")
    
    # 加载停用词和用户词典
    stopwords = load_stopwords()
    load_userdict()
    
    # 处理数据
    df = process_data('CNKI.xls', stopwords)
    
    # 保存处理后的数据
    save_to_csv(df)
    
    print("预处理完成！结果已保存到 主题分析/CNKI-processed.csv")
    print(f"处理的文档数量: {len(df)}")