# csv_to_db.py - CSV数据导入SQLite数据库模块

import os
import csv
import sqlite3
import pandas as pd
from datetime import datetime
import logging

class XHSDataImporter:
    def __init__(self, db_path='data/xhs_data.db'):
        """初始化导入工具
        
        Args:
            db_path: SQLite数据库文件路径
        """
        self.db_path = db_path
        self.setup_logging()
        
    def setup_logging(self):
        """配置日志"""
        log_dir = 'logs'
        if not os.path.exists(log_dir):
            os.makedirs(log_dir)
            
        logging.basicConfig(
            filename=f'logs/import_{datetime.now().strftime("%Y%m%d%H%M%S")}.log',
            level=logging.INFO,
            format='%(asctime)s - %(levelname)s - %(message)s'
        )
        self.logger = logging.getLogger('XHSDataImporter')
        
        # 同时输出到控制台
        console = logging.StreamHandler()
        console.setLevel(logging.INFO)
        formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
        console.setFormatter(formatter)
        self.logger.addHandler(console)

    def create_database_schema(self):
        """创建数据库表结构"""
        try:
            conn = sqlite3.connect(self.db_path)
            cursor = conn.cursor()
            
            # 创建笔记表
            cursor.execute('''
                CREATE TABLE IF NOT EXISTS notes (
                    note_id INTEGER PRIMARY KEY AUTOINCREMENT,
                    title TEXT NOT NULL,
                    author TEXT NOT NULL,
                    content TEXT,
                    edit_time TEXT,
                    likes INTEGER DEFAULT 0,
                    collects INTEGER DEFAULT 0,
                    comments_count INTEGER DEFAULT 0,
                    collect_time DATETIME,
                    url TEXT UNIQUE,
                    keyword TEXT
                )
            ''')
            
            # 创建评论表
            cursor.execute('''
                CREATE TABLE IF NOT EXISTS comments (
                    comment_id INTEGER PRIMARY KEY AUTOINCREMENT,
                    note_id INTEGER,
                    user_id TEXT NOT NULL,
                    user_name TEXT,
                    comment_text TEXT,
                    comment_time TEXT,
                    location TEXT,
                    likes INTEGER DEFAULT 0,
                    collect_time DATETIME,
                    FOREIGN KEY (note_id) REFERENCES notes(note_id)
                )
            ''')
            
            # 创建用户表
            cursor.execute('''
                CREATE TABLE IF NOT EXISTS users (
                    user_id TEXT PRIMARY KEY,
                    user_name TEXT,
                    gender TEXT,
                    location TEXT,
                    profile_url TEXT,
                    notes_count INTEGER DEFAULT 0,
                    followers_count INTEGER DEFAULT 0,
                    following_count INTEGER DEFAULT 0,
                    first_seen_date DATETIME,
                    last_updated DATETIME
                )
            ''')
            
            # 创建地区信息表
            cursor.execute('''
                CREATE TABLE IF NOT EXISTS locations (
                    location_id INTEGER PRIMARY KEY AUTOINCREMENT,
                    location_name TEXT UNIQUE,
                    province TEXT,
                    city TEXT,
                    count INTEGER DEFAULT 0,
                    last_updated DATETIME
                )
            ''')
            
            # 创建任务表
            cursor.execute('''
                CREATE TABLE IF NOT EXISTS collection_tasks (
                    task_id INTEGER PRIMARY KEY AUTOINCREMENT,
                    keyword TEXT,
                    start_time DATETIME,
                    end_time DATETIME,
                    target_notes_count INTEGER,
                    target_comments_count INTEGER,
                    actual_notes_count INTEGER,
                    actual_comments_count INTEGER,
                    status TEXT,
                    error_message TEXT
                )
            ''')
            
            # 创建索引
            cursor.execute('CREATE INDEX IF NOT EXISTS idx_notes_keyword ON notes(keyword)')
            cursor.execute('CREATE INDEX IF NOT EXISTS idx_comments_note_id ON comments(note_id)')
            cursor.execute('CREATE INDEX IF NOT EXISTS idx_comments_user_id ON comments(user_id)')
            cursor.execute('CREATE INDEX IF NOT EXISTS idx_comments_location ON comments(location)')
            
            conn.commit()
            self.logger.info("数据库结构创建成功")
            return True
            
        except Exception as e:
            self.logger.error(f"创建数据库结构失败: {e}")
            return False
        finally:
            if conn:
                conn.close()

    def import_notes_csv(self, csv_file='data/notes/xhs_python_notes.csv', keyword=None):
        """导入笔记数据CSV到数据库
        
        Args:
            csv_file: 笔记CSV文件路径
            keyword: 搜索关键词，如果CSV中没有的话可以指定
        """
        if not os.path.exists(csv_file):
            self.logger.error(f"笔记CSV文件不存在: {csv_file}")
            return False
            
        try:
            # 检查数据库结构
            if not os.path.exists(self.db_path) or os.path.getsize(self.db_path) == 0:
                self.create_database_schema()
                
            # 读取CSV文件
            df = pd.read_csv(csv_file, encoding='utf-8-sig')
            self.logger.info(f"读取到{len(df)}条笔记数据")
            
            # 连接数据库
            conn = sqlite3.connect(self.db_path)
            
            # 处理数值列
            for col in ['likes', 'collects', 'comments']:
                if col in df.columns:
                    df[col] = pd.to_numeric(df[col].astype(str).str.replace('万', '0000').str.extract('(\d+)')[0], 
                                          errors='coerce').fillna(0).astype(int)
            
            # 添加关键词列（如果没有）
            if 'keyword' not in df.columns and keyword:
                df['keyword'] = keyword
                
            # 将数据导入到notes表
            imported = 0
            skipped = 0
            
            for _, row in df.iterrows():
                try:
                    cursor = conn.cursor()
                    # 检查是否已存在
                    cursor.execute("SELECT COUNT(*) FROM notes WHERE url = ?", (row['url'],))
                    if cursor.fetchone()[0] == 0:
                        # 构建插入数据
                        columns = []
                        placeholders = []
                        values = []
                        
                        # 映射CSV列到数据库列
                        column_mapping = {
                            'title': 'title',
                            'author': 'author',
                            'content': 'content',
                            'edit_time': 'edit_time',
                            'likes': 'likes',
                            'collects': 'collects',
                            'comments': 'comments_count',
                            'collect_time': 'collect_time',
                            'url': 'url',
                            'keyword': 'keyword'
                        }
                        
                        for db_col, csv_col in column_mapping.items():
                            if csv_col in row and pd.notna(row[csv_col]):
                                columns.append(db_col)
                                placeholders.append('?')
                                values.append(row[csv_col])
                        
                        # 执行插入
                        sql = f"INSERT INTO notes ({', '.join(columns)}) VALUES ({', '.join(placeholders)})"
                        cursor.execute(sql, values)
                        conn.commit()
                        imported += 1
                    else:
                        skipped += 1
                        
                except Exception as e:
                    self.logger.error(f"导入笔记行失败: {e}")
                    skipped += 1
                    conn.rollback()
                    
            self.logger.info(f"导入完成: 成功导入{imported}条笔记，跳过{skipped}条")
            return imported
            
        except Exception as e:
            self.logger.error(f"导入笔记CSV失败: {e}")
            return False
        finally:
            if conn:
                conn.close()

    def import_comments_csv(self, csv_file='data/user/xhs_comments.csv'):
        """导入评论数据CSV到数据库
        
        Args:
            csv_file: 评论CSV文件路径
        """
        if not os.path.exists(csv_file):
            self.logger.error(f"评论CSV文件不存在: {csv_file}")
            return False
            
        try:
            # 检查数据库结构
            if not os.path.exists(self.db_path) or os.path.getsize(self.db_path) == 0:
                self.create_database_schema()
                
            # 读取CSV文件
            df = pd.read_csv(csv_file, encoding='utf-8-sig')
            self.logger.info(f"读取到{len(df)}条评论数据")
            
            # 连接数据库
            conn = sqlite3.connect(self.db_path)
            
            # 从评论文本或时间中提取地区信息的函数
            def extract_location(text):
                import re
                location_pattern = r'(?:\d{2}-\d{2}|[\d-]+)([^\d,]+)$'
                match = re.search(location_pattern, str(text))
                if match and match.group(1).strip():
                    location = match.group(1).strip()
                    # 过滤掉非地区信息
                    if len(location) <= 4 and not any(char in location for char in ',.。，'):
                        # 额外过滤掉"本"字和其他可能的无意义词
                        if location not in ['本', '来自', '来自小红', '来自小红书']:
                            return location
                return None
            
            # 将数据导入到comments表
            imported = 0
            skipped = 0
            user_data = {}  # 收集用户数据
            location_data = {}  # 收集地区数据
            
            for _, row in df.iterrows():
                try:
                    cursor = conn.cursor()
                    
                    # 抽取并添加地区信息
                    location = None
                    if 'comment_text' in row and pd.notna(row['comment_text']):
                        location = extract_location(row['comment_text'])
                    if not location and 'comment_time' in row and pd.notna(row['comment_time']):
                        location = extract_location(row['comment_time'])
                    
                    # 查找对应的note_id
                    note_id = None
                    if 'note_url' in row and pd.notna(row['note_url']):
                        cursor.execute("SELECT note_id FROM notes WHERE url = ?", (row['note_url'],))
                        result = cursor.fetchone()
                        if result:
                            note_id = result[0]
                    
                    # 构建插入评论的SQL
                    columns = ['note_id', 'user_id', 'user_name', 'comment_text', 
                              'comment_time', 'location', 'likes', 'collect_time']
                    values = [
                        note_id,
                        row.get('user_id', '未知用户'),
                        row.get('user_name', '未知用户名'),
                        row.get('comment_text', ''),
                        row.get('comment_time', ''),
                        location,
                        int(row.get('likes', 0)),
                        datetime.now().strftime('%Y-%m-%d %H:%M:%S')
                    ]
                    
                    # 执行插入
                    sql = f"INSERT INTO comments ({', '.join(col for col in columns if values[columns.index(col)] is not None)}) VALUES ({', '.join('?' for col in columns if values[columns.index(col)] is not None)})"
                    final_values = [val for val in values if val is not None]
                    cursor.execute(sql, final_values)
                    
                    # 收集用户数据
                    if 'user_id' in row and pd.notna(row['user_id']):
                        user_id = row['user_id']
                        if user_id not in user_data:
                            user_data[user_id] = {
                                'user_name': row.get('user_name', ''),
                                'location': location,
                                'first_seen_date': datetime.now().strftime('%Y-%m-%d %H:%M:%S')
                            }
                    
                    # 收集地区数据
                    if location:
                        if location not in location_data:
                            location_data[location] = 1
                        else:
                            location_data[location] += 1
                    
                    conn.commit()
                    imported += 1
                        
                except Exception as e:
                    self.logger.error(f"导入评论行失败: {e}")
                    skipped += 1
                    conn.rollback()
            
            # 批量导入收集的用户数据
            self.logger.info(f"开始导入{len(user_data)}条用户数据")
            for user_id, user_info in user_data.items():
                try:
                    cursor.execute(
                        "INSERT OR IGNORE INTO users (user_id, user_name, location, first_seen_date, last_updated) VALUES (?, ?, ?, ?, ?)",
                        (user_id, user_info['user_name'], user_info['location'], user_info['first_seen_date'], datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
                    )
                except Exception as e:
                    self.logger.error(f"导入用户数据失败: {e}")
            
            # 批量导入收集的地区数据
            self.logger.info(f"开始导入{len(location_data)}条地区数据")
            for location, count in location_data.items():
                try:
                    cursor.execute(
                        "INSERT OR IGNORE INTO locations (location_name, count, last_updated) VALUES (?, ?, ?)",
                        (location, count, datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
                    )
                    # 如果已存在，则更新计数
                    cursor.execute(
                        "UPDATE locations SET count = count + ?, last_updated = ? WHERE location_name = ?",
                        (count, datetime.now().strftime('%Y-%m-%d %H:%M:%S'), location)
                    )
                except Exception as e:
                    self.logger.error(f"导入地区数据失败: {e}")
            
            conn.commit()
            self.logger.info(f"导入完成: 成功导入{imported}条评论，跳过{skipped}条")
            return imported
            
        except Exception as e:
            self.logger.error(f"导入评论CSV失败: {e}")
            return False
        finally:
            if conn:
                conn.close()

    def import_all_data(self, keyword=None):
        """导入所有CSV数据到数据库"""
        self.logger.info("开始全量数据导入")
        
        # 记录导入任务
        conn = None
        try:
            conn = sqlite3.connect(self.db_path)
            cursor = conn.cursor()
            
            # 创建数据库结构（如果不存在）
            if not os.path.exists(self.db_path) or os.path.getsize(self.db_path) == 0:
                self.create_database_schema()
            
            # 记录任务开始
            start_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
            cursor.execute(
                "INSERT INTO collection_tasks (keyword, start_time, status) VALUES (?, ?, ?)",
                (keyword or "批量导入", start_time, "进行中")
            )
            task_id = cursor.lastrowid
            conn.commit()
            
            # 导入笔记数据
            notes_count = self.import_notes_csv(keyword=keyword)
            
            # 导入评论数据
            comments_count = self.import_comments_csv()
            
            # 更新任务状态
            end_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
            cursor.execute(
                "UPDATE collection_tasks SET end_time = ?, actual_notes_count = ?, actual_comments_count = ?, status = ? WHERE task_id = ?",
                (end_time, notes_count, comments_count, "完成", task_id)
            )
            conn.commit()
            
            self.logger.info(f"全量数据导入完成: 导入了{notes_count}条笔记和{comments_count}条评论")
            return True
            
        except Exception as e:
            if conn and task_id:
                try:
                    cursor.execute(
                        "UPDATE collection_tasks SET status = ?, error_message = ? WHERE task_id = ?",
                        ("失败", str(e), task_id)
                    )
                    conn.commit()
                except:
                    pass
                    
            self.logger.error(f"全量数据导入失败: {e}")
            return False
        finally:
            if conn:
                conn.close()

def main():
    """主函数，提供命令行导入功能"""
    import argparse
    
    parser = argparse.ArgumentParser(description='小红书数据CSV导入SQLite数据库工具')
    parser.add_argument('--db', type=str, default='data/xhs_data.db', help='数据库文件路径')
    parser.add_argument('--notes', type=str, default='data/notes/xhs_python_notes.csv', help='笔记CSV文件路径')
    parser.add_argument('--comments', type=str, default='data/user/xhs_comments.csv', help='评论CSV文件路径')
    parser.add_argument('--keyword', type=str, help='笔记关键词（如果CSV中没有）')
    parser.add_argument('--all', action='store_true', help='导入所有数据')
    
    args = parser.parse_args()
    
    importer = XHSDataImporter(db_path=args.db)
    
    if args.all:
        importer.import_all_data(keyword=args.keyword)
    else:
        if os.path.exists(args.notes):
            importer.import_notes_csv(csv_file=args.notes, keyword=args.keyword)
        if os.path.exists(args.comments):
            importer.import_comments_csv(csv_file=args.comments)

if __name__ == "__main__":
    main()