import pandas as pd
import re
import os

# 文件路径设置
input_csv = r"c:\Users\25089\Desktop\学习\毕设v2\爬虫\当当网爬虫\合并后的图书信息.csv"
output_csv = r"c:\Users\25089\Desktop\学习\毕设v2\爬虫\当当网爬虫\清洗后的图书信息.csv"

def clean_book_data():
    try:
        # 1. 读取原始数据（通过open函数处理编码错误）
        print(f"正在读取原始CSV文件: {input_csv}")
        # 用open函数打开，指定encoding和errors参数，再传给pandas
        with open(input_csv, 'r', encoding='utf-8-sig', errors='ignore') as f:
            df = pd.read_csv(f, low_memory=False)
        print(f"原始数据共 {len(df)} 条记录")

        # 2. 缺失值处理
        print("处理缺失值...")
        required_cols = ['category_id', 'title']
        for col in required_cols:
            if col not in df.columns:
                raise ValueError(f"CSV文件缺少必要字段: {col}")
            if col == 'category_id':
                df[col] = pd.to_numeric(df[col], errors='coerce').fillna(0).astype(int)
            else:
                df[col] = df[col].fillna("未知标题")

        optional_cols = ['author', 'publisher', 'description', 'cover', 'url']
        for col in optional_cols:
            if col not in df.columns:
                df[col] = ""
            else:
                df[col] = df[col].fillna("")

        # 3. 强化字符串清洗（重点去乱码）
        print("清洗字符串特殊字符和乱码...")
        def clean_string(s):
            if not isinstance(s, str):
                s = str(s) if pd.notna(s) else ""
            
            # 过滤乱码和无效字符
            s = re.sub(r'[^\u4E00-\u9FA5a-zA-Z0-9\s!"#$%&\'()*+,-./:;<=>?@\[\]^_`{|}~]', '', s)
            # 移除控制字符
            s = re.sub(r'[\x00-\x1F\x7F]', '', s)
            # 转义单引号
            s = s.replace("'", "''")
            # 清理空格
            s = re.sub(r'\s+', ' ', s).strip()
            
            return s

        # 对除description外的字符串字段应用清洗
        for col in ['title', 'author', 'publisher', 'cover', 'url']:
            df[col] = df[col].apply(clean_string)
        
        # 为description字段单独处理，保留所有标点符号
        def clean_description(s):
            if not isinstance(s, str):
                s = str(s) if pd.notna(s) else ""
            
            # 只移除控制字符
            s = re.sub(r'[\x00-\x1F\x7F]', '', s)
            # 转义单引号
            s = s.replace("'", "''")
            # 清理空格
            s = re.sub(r'\s+', ' ', s).strip()
            
            return s

        df['description'] = df['description'].apply(clean_description)

        # 4. 字段长度限制
        print("限制字段长度...")
        length_limits = {
            'title': 255,
            'author': 255,
            'publisher': 255,
            'cover': 512,
            'url': 512
        }
        for col, max_len in length_limits.items():
            df[col] = df[col].apply(lambda x: x[:max_len] if len(x) > max_len else x)

        # 5. 处理无效URL
        print("过滤无效URL...")
        def filter_invalid_url(url):
            if url.startswith(('http://', 'https://')) and len(url) < 8:
                return ""
            return url
        df['url'] = df['url'].apply(filter_invalid_url)
        df['cover'] = df['cover'].apply(filter_invalid_url)

        # 6. 去重
        print("去除重复记录...")
        df.drop_duplicates(subset=['title', 'author'], keep='first', inplace=True)
        print(f"清洗后剩余 {len(df)} 条记录")

        # 7. 保存清洗后的CSV
        df.to_csv(output_csv, encoding='utf-8-sig', index=False)
        print(f"清洗完成！已保存至: {output_csv}")

    except Exception as e:
        print(f"清洗过程出错: {str(e)}")

if __name__ == "__main__":
    print("====== 图书数据清洗工具（修正版） ======")
    clean_book_data()
    print("清洗程序结束。")