import pandas as pd
import random
import pymysql
from pymysql import OperationalError, ProgrammingError
import time

# -------------------------- 配置信息（请修改为你的实际信息） --------------------------
CSV_PATH = r"c:\Users\25089\Desktop\学习\毕设v2\爬虫\当当网爬虫\output\清洗后的图书信息.csv"
DB_CONFIG = {
    "host": "192.168.200.131",
    "user": "root",
    "password": "1234",  # 替换为实际密码
    "db": "book_db",         # 替换为实际数据库名
    "port": 3306,
    "charset": "utf8mb4"
}
DEFAULT_PUBLISHERS = [
    '人民文学出版社', '商务印书馆', '中华书局', '三联书店', '上海古籍出版社',
    '作家出版社', '中信出版社', '机械工业出版社', '清华大学出版社', '北京大学出版社',
    '复旦大学出版社', '上海交通大学出版社', '人民教育出版社', '外语教学与研究出版社',
    '电子工业出版社', '化学工业出版社', '中国人民大学出版社', '北京师范大学出版社',
    '高等教育出版社', '科学出版社'
]
BATCH_SIZE = 500  # 批量插入批次大小

# -------------------------- 核心函数 --------------------------
def read_and_preprocess_data(csv_path):
    """读取CSV并预处理数据"""
    try:
        print(f"开始读取CSV文件：{csv_path}")
        df = pd.read_csv(csv_path, encoding='utf-8-sig', keep_default_na=False)
        
        # 处理category_id为整数，空值填0
        df['category_id'] = pd.to_numeric(df['category_id'], errors='coerce').fillna(0).astype(int)
        
        # 处理字符串列空值为''
        str_columns = ['title', 'author', 'publisher', 'description', 'cover', 'url']
        for col in str_columns:
            if col in df.columns:
                df[col] = df[col].fillna('').astype(str)
        
        # 补充空出版社
        df['publisher'] = df['publisher'].apply(
            lambda x: random.choice(DEFAULT_PUBLISHERS) if x.strip() == '' else x
        )
        
        print(f"数据预处理完成，共{len(df)}条记录")
        return df
    except Exception as e:
        print(f"读取/预处理数据失败：{str(e)}")
        return None

def recreate_book_table(conn):
    """先删除旧表（如果存在），再创建新表"""
    # 先删除旧表
    drop_sql = "DROP TABLE IF EXISTS `tb_book`;"
    # 再创建新表
    create_sql = """
    CREATE TABLE `tb_book` (
      `id` INT PRIMARY KEY AUTO_INCREMENT,
      `category_id` INT NOT NULL,
      `title` VARCHAR(255) NOT NULL,
      `author` VARCHAR(255),
      `publisher` VARCHAR(255),
      `description` TEXT,
      `cover` VARCHAR(512),
      `url` VARCHAR(512),
      `create_time` DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
      `update_time` DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
      FOREIGN KEY (`category_id`) REFERENCES `tb_category`(`id`),
      INDEX idx_category (`category_id`),
      INDEX idx_title (`title`),
      INDEX idx_author (`author`)
    ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;
    """
    try:
        with conn.cursor() as cursor:
            # 第一步：删除旧表
            cursor.execute(drop_sql)
            print("旧表（如果存在）已删除")
            # 第二步：创建新表
            cursor.execute(create_sql)
            print("新表创建完成")
        conn.commit()
    except ProgrammingError as e:
        print(f"表操作失败（可能tb_category表不存在）：{str(e)}")
        conn.rollback()
        raise
    except Exception as e:
        print(f"表操作错误：{str(e)}")
        conn.rollback()
        raise

def batch_insert_data(conn, df):
    """批量插入数据"""
    if df.empty:
        print("没有数据可插入")
        return
    
    insert_sql = """
    INSERT INTO `tb_book` (
        `category_id`, `title`, `author`, `publisher`, `description`, `cover`, `url`
    ) VALUES (%s, %s, %s, %s, %s, %s, %s)
    """
    
    total_rows = len(df)
    success_count = 0
    
    try:
        with conn.cursor() as cursor:
            for i in range(0, total_rows, BATCH_SIZE):
                batch_df = df.iloc[i:i+BATCH_SIZE]
                params = [
                    (
                        row['category_id'],
                        row['title'],
                        row['author'],
                        row['publisher'],
                        row['description'],
                        row['cover'],
                        row['url']
                    ) for _, row in batch_df.iterrows()
                ]
                
                affected = cursor.executemany(insert_sql, params)
                success_count += affected
                print(f"已插入 {min(i+BATCH_SIZE, total_rows)}/{total_rows} 条记录")
        
        conn.commit()
        print(f"全部插入完成，成功插入 {success_count}/{total_rows} 条记录")
    except Exception as e:
        print(f"插入数据失败：{str(e)}")
        conn.rollback()
        raise

# -------------------------- 主程序 --------------------------
def main():
    start_time = time.time()
    conn = None
    try:
        # 1. 读取数据
        df = read_and_preprocess_data(CSV_PATH)
        if df is None or df.empty:
            print("没有有效数据，程序终止")
            return
        
        # 2. 连接数据库
        print("连接数据库...")
        conn = pymysql.connect(**DB_CONFIG)
        print("数据库连接成功")
        
        # 3. 先删旧表，再创建新表
        recreate_book_table(conn)
        
        # 4. 批量插入
        batch_insert_data(conn, df)
        
    except OperationalError as e:
        print(f"数据库连接失败（检查账号密码）：{str(e)}")
    except Exception as e:
        print(f"程序执行失败：{str(e)}")
    finally:
        if conn:
            conn.close()
            print("数据库连接已关闭")
        
        end_time = time.time()
        print(f"总耗时：{round(end_time - start_time, 2)}秒")

if __name__ == "__main__":
    print("====== 图书数据批量插入程序（先删旧表） ======")
    main()
    print("====== 程序结束 ======")