import pandas as pd
import os
import re
from datetime import datetime
import argparse
import unicodedata

def clean_data(input_file, output_file=None):
    """
    清洗股票评论数据并按日期排序
    
    参数:
        input_file (str): 输入CSV文件路径
        output_file (str, optional): 输出CSV文件路径，如果不提供则自动生成
    
    返回:
        str: 输出文件路径
    """
    # 如果未指定输出文件，则自动生成
    if output_file is None:
        base_name = os.path.basename(input_file)
        name_without_ext = os.path.splitext(base_name)[0]
        output_file = f"{name_without_ext}_cleaned.csv"
    
    print(f"开始处理文件: {input_file}")
    
    # 读取CSV文件
    df = pd.read_csv(input_file)
    
    # 数据清洗步骤
    print(f"原始数据行数: {len(df)}")
    
    # 1. 去除重复数据
    df = df.drop_duplicates(subset=["post_id"])
    print(f"去重后行数: {len(df)}")
    
    # 2. 清洗标题（去除乱码）
    def clean_title(title):
        if pd.isna(title):
            return ""
        
        # 检测是否含有过多的特殊字符（可能是乱码）
        special_char_count = len(re.findall(r'[^\w\s\u4e00-\u9fff,，。、；''""（）【】《》？！:：.!?-]', title))
        total_char_count = len(title)
        
        # 如果特殊字符占比超过20%，可能是乱码
        if total_char_count > 0 and special_char_count / total_char_count > 0.2:
            return ""
        
        # 规范化Unicode字符
        try:
            normalized_title = unicodedata.normalize('NFKC', title)
            
            # 移除控制字符和其他不可见字符
            cleaned_title = re.sub(r'[\x00-\x1F\x7F-\x9F]', '', normalized_title)
            
            # 如果清洗后标题长度不足2个字符，认为无效
            if len(cleaned_title.strip()) < 2:
                return ""
                
            return cleaned_title.strip()
        except Exception as e:
            print(f"处理标题出错 '{title}': {str(e)}")
            return ""
    
    df['cleaned_title'] = df['post_title'].apply(clean_title)
    
    # 过滤掉标题为空的帖子（很可能是乱码或无效内容）
    df = df[df['cleaned_title'] != ""]
    print(f"去除乱码标题后行数: {len(df)}")
    
    # 3. 处理日期格式
    def parse_date(date_str):
        if pd.isna(date_str):
            return None
        
        try:
            # 处理"今天"、"昨天"等相对日期表述
            today_pattern = re.compile(r'今天\s+(\d{2}):(\d{2})')
            yesterday_pattern = re.compile(r'昨天\s+(\d{2}):(\d{2})')
            
            today_match = today_pattern.match(date_str)
            yesterday_match = yesterday_pattern.match(date_str)
            
            if today_match:
                hour, minute = today_match.groups()
                today = datetime.now().replace(hour=int(hour), minute=int(minute), second=0, microsecond=0)
                return today
            elif yesterday_match:
                hour, minute = yesterday_match.groups()
                yesterday = datetime.now().replace(hour=int(hour), minute=int(minute), second=0, microsecond=0)
                yesterday = yesterday.replace(day=yesterday.day-1)
                return yesterday
            
            # 处理标准日期格式
            formats = [
                '%Y-%m-%d %H:%M:%S',
                '%Y-%m-%d %H:%M',
                '%Y/%m/%d %H:%M:%S',
                '%Y/%m/%d %H:%M',
                '%m-%d %H:%M',
                '%Y年%m月%d日 %H:%M'
            ]
            
            # 如果是类似 "06-11 14:30" 的格式，添加当前年份
            if re.match(r'\d{2}-\d{2}\s+\d{2}:\d{2}', date_str):
                current_year = datetime.now().year
                date_str = f"{current_year}-{date_str}"
                return datetime.strptime(date_str, '%Y-%m-%d %H:%M')
            
            # 尝试不同的日期格式
            for fmt in formats:
                try:
                    return datetime.strptime(date_str, fmt)
                except ValueError:
                    continue
            
            print(f"无法解析的日期格式: {date_str}")
            return None
        except Exception as e:
            print(f"处理日期出错 '{date_str}': {str(e)}")
            return None
    
    # 转换日期并创建新列
    df['parsed_date'] = df['post_publish_time'].apply(parse_date)
    
    # 删除无法解析日期的行
    df_with_dates = df[~df['parsed_date'].isna()].copy()
    print(f"有效日期的行数: {len(df_with_dates)}")
    
    # 4. 清洗用户昵称
    def clean_nickname(nickname):
        if pd.isna(nickname):
            return ""
        
        try:
            # 规范化Unicode字符
            normalized_nickname = unicodedata.normalize('NFKC', nickname)
            
            # 移除特殊字符
            cleaned_nickname = re.sub(r'[^\w\s\u4e00-\u9fff]', '', normalized_nickname)
            return cleaned_nickname.strip()
        except Exception as e:
            print(f"处理昵称出错 '{nickname}': {str(e)}")
            return ""
    
    df_with_dates['cleaned_nickname'] = df_with_dates['user_nickname'].apply(clean_nickname)
    
    # 5. 处理点击量和评论数（确保为数字）
    def clean_number(value):
        if pd.isna(value):
            return 0
        
        try:
            return int(value)
        except:
            # 尝试从字符串中提取数字
            num_str = re.sub(r'[^\d]', '', str(value))
            if num_str:
                return int(num_str)
            return 0
            
    df_with_dates['cleaned_click_count'] = df_with_dates['post_click_count'].apply(clean_number)
    df_with_dates['cleaned_comment_count'] = df_with_dates['post_comment_count'].apply(clean_number)
    
    # 6. 按日期排序（从新到旧）
    df_with_dates.sort_values(by='parsed_date', ascending=False, inplace=True)
    
    # 7. 将日期转换为标准格式
    df_with_dates['formatted_date'] = df_with_dates['parsed_date'].dt.strftime('%Y-%m-%d %H:%M:%S')
    
    # 创建最终输出数据集
    output_df = pd.DataFrame()
    output_df['post_id'] = df_with_dates['post_id']
    output_df['post_title'] = df_with_dates['cleaned_title']
    output_df['publish_date'] = df_with_dates['formatted_date']
    output_df['post_url'] = df_with_dates['post_url']
    output_df['stockbar_code'] = df_with_dates['stockbar_code']
    output_df['stockbar_name'] = df_with_dates['stockbar_name']
    output_df['user_id'] = df_with_dates['user_id']
    output_df['user_nickname'] = df_with_dates['cleaned_nickname']
    output_df['user_is_majia'] = df_with_dates['user_is_majia']
    output_df['post_click_count'] = df_with_dates['cleaned_click_count']
    output_df['post_comment_count'] = df_with_dates['cleaned_comment_count']
    
    # 检查是否有异常值或极端值
    print("\n数据质量统计:")
    print(f"帖子标题长度 - 最小: {output_df['post_title'].str.len().min()}, 最大: {output_df['post_title'].str.len().max()}, 平均: {output_df['post_title'].str.len().mean():.1f}")
    print(f"点击量 - 最小: {output_df['post_click_count'].min()}, 最大: {output_df['post_click_count'].max()}, 平均: {output_df['post_click_count'].mean():.1f}")
    print(f"评论数 - 最小: {output_df['post_comment_count'].min()}, 最大: {output_df['post_comment_count'].max()}, 平均: {output_df['post_comment_count'].mean():.1f}")
    
    # 保存到CSV
    output_df.to_csv(output_file, index=False, encoding='utf-8-sig')
    
    print(f"\n清洗完成，共保存 {len(output_df)} 条数据")
    print(f"数据已保存到: {output_file}")
    
    return output_file

def main():
    parser = argparse.ArgumentParser(description='清洗股票评论数据并按日期排序')
    parser.add_argument('input_file', help='输入CSV文件路径')
    parser.add_argument('-o', '--output', help='输出CSV文件路径（可选）')
    parser.add_argument('-s', '--sample', type=int, help='显示样本数据数量')
    
    args = parser.parse_args()
    
    output_file = clean_data(args.input_file, args.output)
    
    # 如果指定了样本数量，显示部分清洗后的数据
    if args.sample:
        try:
            sample_df = pd.read_csv(output_file, encoding='utf-8-sig')
            print(f"\n显示前 {args.sample} 条清洗后的数据:")
            pd.set_option('display.max_columns', None)
            pd.set_option('display.width', 1000)
            print(sample_df.head(args.sample))
        except Exception as e:
            print(f"显示样本数据时出错: {str(e)}")

if __name__ == '__main__':
    main() 