import csv
import os
import re
from collections import defaultdict
import datetime

CSV_PATH = r'C:\Users\Devin\Desktop\python\data\comments.csv'

# =============== 手动变量配置 ===============
# 列名配置（根据实际CSV表头调整）
COLUMN_MAPPING = {
    'username': '用户名',        # 用户名
    'location': '位置',          # 位置
    'following': '关注数',       # 关注数
    'followers': '粉丝数',       # 粉丝数
    'remark': '备注',            # 备注
    'link': '用户链接'           # 用户链接
}

# 过滤规则配置
EXCLUDE_LOC = set()                      # 地区包含规则
#EXCLUDE_LOC = {'上海', '广东'}           # 地区排除规则
INCLUDE_LOC = {'湖北', '上海'}    # 只保留湖北和上海的用户
INCLUDE_RMK = set()                      # 备注包含规则
EXCLUDE_RMK = {'你的关注', '互相关注', '作者'}  # 备注排除规则
FILTER_BY_DIFFERENCE = False            # 是否启用关注-粉丝差值过滤
DIFFERENCE_OPERATOR = ">"                # 差值运算符: >, >=, ==, <=, <
DIFFERENCE_VALUE = 1                     # 比较的数值

# 报告配置
SAVE_DETAILED_REPORT = False             # 是否保存详细报表(True/False)
REPORT_FILENAME = "数据清洗报告"           # 报告文件名(不含扩展名)

# 输出配置
ADD_TIMESTAMP = True                     # 输出文件添加时间戳
TIMESTAMP_FORMAT = "%Y%m%d_%H%M%S"       # 时间戳格式

# -----------------------------------------
# 快速切换示范（取消对应行注释即可）：
# EXCLUDE_LOC = {'上海', '广东'}  # 排除上海、广东
# EXCLUDE_RMK = set()         # 一个也不过滤（全保留）
# INCLUDE_RMK = {'作者'}      # 仅保留 remark 为"作者"的行
DIFFERENCE_OPERATOR = ">"   # 大于
DIFFERENCE_VALUE = 1        # 1
# DIFFERENCE_OPERATOR = "=="  # 等于
# DIFFERENCE_VALUE = 0        # 0
# ==========================================

def get_column_index(header, column_mapping):
    """根据列名映射获取列索引"""
    index_map = {}
    for key, chinese_name in column_mapping.items():
        for i, col in enumerate(header):
            # 支持列名不完全匹配（去除前后空格和特殊字符）
            clean_col = re.sub(r'\W+', '', col.strip().lower())
            clean_name = re.sub(r'\W+', '', chinese_name.strip().lower())
            if clean_col == clean_name:
                index_map[key] = i
                break
        else:
            # 如果找不到匹配的列名，使用默认位置（可能需要根据实际情况调整）
            if key == 'username': index_map[key] = 0
            elif key == 'location': index_map[key] = 1
            elif key == 'following': index_map[key] = 2
            elif key == 'followers': index_map[key] = 3
            elif key == 'remark': index_map[key] = 6
            elif key == 'link': index_map[key] = 7
    
    return index_map

def should_keep(row, reasons_counter, col_idx):
    """判断是否保留该行数据"""
    # 获取位置信息
    loc_idx = col_idx.get('location', 1)
    location = row[loc_idx].strip() if len(row) > loc_idx else ''
    
    # 获取备注信息
    rmk_idx = col_idx.get('remark', 6)
    remark = row[rmk_idx].strip() if len(row) > rmk_idx else ''
    
    # 规则执行顺序
    if EXCLUDE_RMK and remark in EXCLUDE_RMK:
        reason = f"备注排除: {remark}"
        reasons_counter[reason] += 1
        return False, reason
    
    if EXCLUDE_LOC and location in EXCLUDE_LOC:
        reason = f"地区排除: {location}"
        reasons_counter[reason] += 1
        return False, reason
    
    if INCLUDE_LOC and location not in INCLUDE_LOC:
        reason = f"不在包含地区: {location}"
        reasons_counter[reason] += 1
        return False, reason
    
    if INCLUDE_RMK and remark not in INCLUDE_RMK:
        reason = f"不在包含备注: {remark}"
        reasons_counter[reason] += 1
        return False, reason
    
    # 关注数与粉丝数差值过滤
    if FILTER_BY_DIFFERENCE:
        # 获取关注数和粉丝数列索引
        following_idx = col_idx.get('following', 2)
        followers_idx = col_idx.get('followers', 3)
        
        following_str = row[following_idx].strip().replace(',', '') if len(row) > following_idx else ''
        followers_str = row[followers_idx].strip().replace(',', '') if len(row) > followers_idx else ''
        
        # 处理空值情况 - 如果关注或粉丝数为空，默认保留
        if not following_str or not followers_str:
            return True, None
        
        try:
            following = int(following_str)  # 关注数
            followers = int(followers_str)  # 粉丝数
            diff = following - followers
            
            # 根据配置的运算符和数值进行过滤
            condition_met = False
            if DIFFERENCE_OPERATOR == ">":
                condition_met = (diff > DIFFERENCE_VALUE)
            elif DIFFERENCE_OPERATOR == ">=":
                condition_met = (diff >= DIFFERENCE_VALUE)
            elif DIFFERENCE_OPERATOR == "==":
                condition_met = (diff == DIFFERENCE_VALUE)
            elif DIFFERENCE_OPERATOR == "<=":
                condition_met = (diff <= DIFFERENCE_VALUE)
            elif DIFFERENCE_OPERATOR == "<":
                condition_met = (diff < DIFFERENCE_VALUE)
            
            if not condition_met:
                reason = f"关注-粉丝差值不符合条件: {following}-{followers}={diff} (要求: {DIFFERENCE_OPERATOR}{DIFFERENCE_VALUE})"
                reasons_counter[reason] += 1
                return False, reason
        except ValueError:
            # 格式无效时也默认保留
            return True, None
    
    return True, None

def get_output_path(input_path, add_timestamp=False, timestamp_format="%Y%m%d_%H%M%S"):
    """获取输出路径，可添加时间戳"""
    directory = os.path.dirname(input_path)
    filename = os.path.basename(input_path)
    base, ext = os.path.splitext(filename)
    
    if add_timestamp:
        timestamp = datetime.datetime.now().strftime(timestamp_format)
        return os.path.join(directory, f"{base}_{timestamp}_cleaned{ext}")
    else:
        return os.path.join(directory, f"{base}_cleaned{ext}")

def get_report_path(csv_path, report_filename, add_timestamp=False, timestamp_format="%Y%m%d_%H%M%S"):
    """获取报告文件路径"""
    directory = os.path.dirname(csv_path)
    
    if add_timestamp:
        timestamp = datetime.datetime.now().strftime(timestamp_format)
        return os.path.join(directory, f"{report_filename}_{timestamp}.txt")
    else:
        return os.path.join(directory, f"{report_filename}.txt")

# 主流程
start_time = datetime.datetime.now()
output_path = get_output_path(CSV_PATH, ADD_TIMESTAMP, TIMESTAMP_FORMAT)
report_path = get_report_path(CSV_PATH, REPORT_FILENAME, ADD_TIMESTAMP, TIMESTAMP_FORMAT)

total_count = 0
kept_count = 0
reasons_counter = defaultdict(int)
removed_due_to_rules = 0
col_idx = {}  # 列索引映射

with open(CSV_PATH, 'r', encoding='utf-8-sig') as infile, \
     open(output_path, 'w', encoding='utf-8-sig', newline='') as outfile:
    
    reader = csv.reader(infile)
    writer = csv.writer(outfile)
    
    # 处理表头
    header = next(reader)
    writer.writerow(header)
    
    # 获取列索引映射
    col_idx = get_column_index(header, COLUMN_MAPPING)
    
    # 打印列映射信息（调试用）
    print("列映射配置:")
    for key, idx in col_idx.items():
        col_name = header[idx] if idx < len(header) else f"列{idx}"
        print(f"  {key}({COLUMN_MAPPING[key]}): 索引={idx} (实际列名: '{col_name}')")
    
    # 流式处理每一行
    for row in reader:
        total_count += 1
        keep, reason = should_keep(row, reasons_counter, col_idx)
        
        if keep:
            kept_count += 1
            writer.writerow(row)
        elif reason:
            removed_due_to_rules += 1

# 计算处理时间
end_time = datetime.datetime.now()
process_duration = end_time - start_time

# 统计分析
filtered_total = total_count - kept_count
filtered_ratio = filtered_total / total_count * 100 if total_count > 0 else 0
kept_ratio = kept_count / total_count * 100 if total_count > 0 else 0

# 详细的过滤原因分析
active_rules = []
if EXCLUDE_RMK: active_rules.append(f"备注排除规则: {', '.join(EXCLUDE_RMK)}")
if EXCLUDE_LOC: active_rules.append(f"地区排除规则: {', '.join(EXCLUDE_LOC)}")
if INCLUDE_LOC: active_rules.append(f"地区包含规则: {', '.join(INCLUDE_LOC)}")
if INCLUDE_RMK: active_rules.append(f"备注包含规则: {', '.join(INCLUDE_RMK)}")
if FILTER_BY_DIFFERENCE: 
    active_rules.append(f"关注-粉丝差值过滤: 保留差值{DIFFERENCE_OPERATOR}{DIFFERENCE_VALUE}的数据")

# 打印控制台报表
border = "=" * 60
print(f"\n{border}")
print(f"{'数据清洗完成！':^60}")
print(border)
print(f"📁 源文件: {CSV_PATH}")
print(f"📄 清洗后文件: {output_path}")
print(f"📝 报告文件: {report_path}" if SAVE_DETAILED_REPORT else "")
print(f"⏱️ 处理时间: {start_time.strftime('%Y-%m-%d %H:%M:%S')}")
print(f"🕒 处理耗时: {process_duration.total_seconds():.2f}秒")
print("-" * 60)

print("\n📊 整体统计:")
print(f"  原始数据行数: {total_count:,} 行")
print(f"  清洗后数据行数: {kept_count:,} 行 ({kept_ratio:.1f}%)")
print(f"  总过滤行数: {filtered_total:,} 行 ({filtered_ratio:.1f}%)")
print(f"  其中因规则过滤: {removed_due_to_rules:,} 行")

if active_rules:
    print("\n🔧 生效的过滤规则:")
    for rule in active_rules:
        print(f"  - {rule}")

if reasons_counter:
    print("\n📈 过滤原因分布 (仅显示主要原因):")
    sorted_reasons = sorted(reasons_counter.items(), key=lambda x: x[1], reverse=True)
    max_display = min(5, len(sorted_reasons))  # 最多显示5个主要原因
    
    for reason, count in sorted_reasons[:max_display]:
        pct = count / filtered_total * 100 if filtered_total > 0 else 0
        print(f"  - {reason}: {count:,} 行 ({pct:.1f}%)")
    
    # 如果有其他原因，显示汇总
    if len(sorted_reasons) > max_display:
        other_count = sum(count for reason, count in sorted_reasons[max_display:])
        other_pct = other_count / filtered_total * 100 if filtered_total > 0 else 0
        print(f"  - 其他原因: {other_count:,} 行 ({other_pct:.1f}%)")

# 如果需要保存详细报表
if SAVE_DETAILED_REPORT:
    with open(report_path, 'w', encoding='utf-8') as report_file:
        report_file.write(border + "\n")
        report_file.write(f"{'数据清洗详细报告':^60}\n")
        report_file.write(border + "\n\n")
        
        report_file.write(f"源文件路径: {CSV_PATH}\n")
        report_file.write(f"清洗后文件路径: {output_path}\n")
        report_file.write(f"报告文件路径: {report_path}\n")
        report_file.write(f"处理开始时间: {start_time.strftime('%Y-%m-%d %H:%M:%S')}\n")
        report_file.write(f"处理结束时间: {end_time.strftime('%Y-%m-%d %H:%M:%S')}\n")
        report_file.write(f"总处理时间: {process_duration.total_seconds():.2f}秒\n\n")
        
        report_file.write("[列映射配置]\n")
        for key, idx in col_idx.items():
            col_name = header[idx] if 'header' in locals() and idx < len(header) else f"列{idx}"
            report_file.write(f"{key}({COLUMN_MAPPING[key]}): 索引={idx} (实际列名: '{col_name}')\n")
        
        report_file.write("\n[总体统计]\n")
        report_file.write(f"原始数据行数: {total_count:,}\n")
        report_file.write(f"清洗后数据行数: {kept_count:,} ({kept_ratio:.1f}%)\n")
        report_file.write(f"总过滤行数: {filtered_total:,} ({filtered_ratio:.1f}%)\n")
        report_file.write(f"因规则过滤: {removed_due_to_rules:,}\n")
        
        if active_rules:
            report_file.write("\n[生效过滤规则]\n")
            for rule in active_rules:
                report_file.write(f"{rule}\n")
        
        if reasons_counter:
            report_file.write("\n[详细过滤原因分析]\n")
            sorted_reasons = sorted(reasons_counter.items(), key=lambda x: x[1], reverse=True)
            for reason, count in sorted_reasons:
                pct = count / filtered_total * 100 if filtered_total > 0 else 0
                report_file.write(f"{reason}: {count:,} ({pct:.1f}%)\n")
            
            report_file.write("\n[过滤原因汇总]\n")
            report_file.write(f"主过滤原因数量: {len(reasons_counter)}\n")
            
            # 添加各类规则过滤的汇总统计
            rm_excluded = sum(count for reason, count in reasons_counter.items() if "备注排除" in reason)
            loc_excluded = sum(count for reason, count in reasons_counter.items() if "地区排除" in reason)
            rm_not_included = sum(count for reason, count in reasons_counter.items() if "不在包含备注" in reason)
            loc_not_included = sum(count for reason, count in reasons_counter.items() if "不在包含地区" in reason)
            diff_excluded = sum(count for reason, count in reasons_counter.items() if "关注-粉丝差值" in reason)
            
            report_file.write("\n[分类过滤统计]\n")
            report_file.write(f"按备注排除规则过滤: {rm_excluded} 行\n")
            report_file.write(f"按地区排除规则过滤: {loc_excluded} 行\n")
            report_file.write(f"不符合备注包含规则: {rm_not_included} 行\n")
            report_file.write(f"不符合地区包含规则: {loc_not_included} 行\n")
            if FILTER_BY_DIFFERENCE:
                report_file.write(f"不符合关注-粉丝差值规则: {diff_excluded} 行\n")
    
    print(f"\n✅ 详细报表已保存至: {report_path}")

print("\n" + border)
print(f"{'处理完成':^60}")
print(border)