import pandas as pd
import jieba
import jieba.analyse
import sqlite3
from typing import List, Dict
import logging
import os

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)

class DataPreprocessor:
    def __init__(self, db_path: str = 'processed_weibo.db'):
        # 加载停用词
        self.stopwords = self._load_stopwords()
        # 配置jieba
        jieba.initialize()
        # 数据库连接
        self.db_path = db_path
        self.conn = sqlite3.connect(db_path)
        logger.info(f"成功连接到数据库 {db_path}")
        
    def __del__(self):
        """析构函数，确保关闭数据库连接"""
        if hasattr(self, 'conn'):
            self.conn.close()

    def _load_stopwords(self) -> set:
        """加载停用词表"""
        stopwords_path = os.path.join(os.path.dirname(__file__), 'baidu_stopwords.txt')
        try:
            with open(stopwords_path, 'r', encoding='utf-8') as f:
                stopwords = set([line.strip() for line in f])
            logger.info(f"成功加载 {len(stopwords)} 个百度停用词")
            return stopwords
        except FileNotFoundError:
            logger.error("未找到百度停用词文件，请确保baidu_stopwords.txt文件存在于data_processing目录下")
            return set()

    def segment_text(self, text: str) -> List[str]:
        """对文本进行分词"""
        words = jieba.lcut(text)
        # 移除停用词
        words = [word for word in words if word not in self.stopwords and len(word) > 1]
        return words

    def extract_keywords(self, text: str, topK: int = 5) -> List[str]:
        """提取关键词"""
        keywords = jieba.analyse.extract_tags(text, topK=topK)
        return keywords

    def process_data(self, df: pd.DataFrame) -> pd.DataFrame:
        """处理数据框中的所有文本"""
        # 分词
        df['segmented_text'] = df['cleaned_text'].apply(self.segment_text)
        
        # 提取关键词
        df['keywords'] = df['cleaned_text'].apply(lambda x: self.extract_keywords(x))
        
        # 计算文本长度
        df['segment_count'] = df['segmented_text'].apply(len)
        
        logger.info("数据预处理完成")
        return df

    def save_to_csv(self, df: pd.DataFrame, output_path: str):
        """保存预处理后的数据到CSV文件"""
        try:
            # 将列表转换为字符串以便于CSV存储
            df['segmented_text'] = df['segmented_text'].apply(lambda x: ' '.join(x))
            df['keywords'] = df['keywords'].apply(lambda x: ' '.join(x))
            
            df.to_csv(output_path, index=False, encoding='utf-8')
            logger.info(f"预处理后的数据已保存到 {output_path}")
        except Exception as e:
            logger.error(f"保存CSV文件失败: {str(e)}")
            raise

    def save_to_db(self, df: pd.DataFrame):
        """保存预处理后的数据到数据库"""
        try:
            # 创建processed_comments表
            create_table_sql = """
            CREATE TABLE IF NOT EXISTS processed_comments (
                comment_id TEXT PRIMARY KEY,
                created_at TEXT,
                user_id TEXT,
                user_name TEXT,
                user_city TEXT,
                like_counts INTEGER,
                reply_counts INTEGER,
                content TEXT,
                cleaned_text TEXT,
                segmented_text TEXT,
                keywords TEXT,
                segment_count INTEGER
            )
            """
            self.conn.execute(create_table_sql)
            
            # 将DataFrame保存到数据库
            df.to_sql('processed_comments', self.conn, if_exists='replace', index=False)
            
            # 创建索引以提高查询性能
            self.conn.execute("CREATE INDEX IF NOT EXISTS idx_user_id ON processed_comments(user_id)")
            self.conn.execute("CREATE INDEX IF NOT EXISTS idx_created_at ON processed_comments(created_at)")
            self.conn.execute("CREATE INDEX IF NOT EXISTS idx_user_city ON processed_comments(user_city)")
            
            self.conn.commit()
            logger.info("数据已成功保存到数据库")
        except Exception as e:
            logger.error(f"保存到数据库失败: {str(e)}")
            raise

def main():
    preprocessor = DataPreprocessor()
    try:
        # 读取清洗后的数据
        df = pd.read_csv('cleaned_comments.csv')
        
        # 预处理数据
        processed_df = preprocessor.process_data(df)
        
        # 保存到CSV
        preprocessor.save_to_csv(processed_df, 'preprocessed_comments.csv')
        
        # 保存到数据库
        preprocessor.save_to_db(processed_df)
        
    except Exception as e:
        logger.error(f"预处理过程中发生错误: {str(e)}")

if __name__ == "__main__":
    main() 