##本功能只扫描漫画列表页
##这个是漫画列表的接口https://om0813.zw7gmc49.work/api/comics/base/fictionListByWorkCategory?page=1&pageSize=10&orderType=1&_t=1756282404750
from 核心.配置文件 import settings
from 核心.解密数据 import vp
from 核心.公共函数 import comm
from 核心.数据操作 import MySQLCRUD
import requests,json,time

class ScanComic:
    ##初始化
    def __init__(self):
        self.web_url = settings.BASE_URL
        self.page = 1
        self.pageSize = 100
        self.orderType = 1
        self.comm = comm
        
        # 初始化数据库连接
        self.db = None
        self.init_database()
        
        # 批量插入的缓冲区
        self.batch_buffer = []
        self.batch_size = 100  # 每100条数据批量插入一次
    ##初始化数据库连接
    def init_database(self):
        """初始化数据库连接，使用增强的MySQLCRUD类"""
        try:
            # 使用增强的MySQLCRUD类，支持连接池和自动重连
            db_config = {
                **settings.DB_CONFIG,
                'max_retries': 3,         # 最大重试次数
                'connection_timeout': 60,  # 连接超时时间
                'pool_size': 5            # 连接池大小
            }
            
            self.db = MySQLCRUD(**db_config)
            
            if not self.db.is_connected():
                print("数据库连接失败！")
                return False
            else:
                print("数据库连接成功！")
                print(f"连接池状态: {self.db.get_pool_status()}")
                return True
        except Exception as e:
            print(f"数据库初始化失败: {e}")
            return False
    ##通用JSON字段处理函数
    def _process_json_field(self, field_value, field_name):
        """通用JSON字段处理函数，自动检测类型并转换"""
        if isinstance(field_value, str):
            # 如果已经是字符串，直接使用
            return field_value
        elif isinstance(field_value, list):
            # 如果是列表，转换为JSON字符串
            json_string = json.dumps(field_value, ensure_ascii=False)
            return json_string
        else:
            # 其他类型转换为字符串
            string_value = str(field_value) if field_value else '[]'
            return string_value
    ##批量插入数据到数据库
    def batch_insert_comics(self, comics_data, domain=None):
        """批量插入漫画数据到数据库"""
        if not self.db or not self.db.is_connected():
            print("数据库未连接，无法插入数据")
            return False
            
        try:
            # 准备批量插入的数据
            for comic in comics_data:
                # 确保JSON字段正确序列化
                station_list = comic.get('stationList', [])
                class_list = comic.get('classList', [])
                tag_list = comic.get('tagList', [])
                
                # 使用通用函数处理所有可能的JSON字段
                station_list_json = self._process_json_field(station_list, 'station_list')
                class_list_json = self._process_json_field(class_list, 'class_list')
                tag_list_json = self._process_json_field(tag_list, 'tag_list')
                
                # 检查是否还有其他列表字段需要处理
                # 遍历所有字段，自动检测列表类型
                processed_fields = {}
                for key, value in comic.items():
                    if isinstance(value, list):
                        # 转换为下划线命名
                        field_name = key.replace('([A-Z])', r'_\1').lower()
                        processed_fields[key] = self._process_json_field(value, field_name)
                    else:
                        processed_fields[key] = value
                
                comic_record = {
                    'comic_id': comic.get('id'),
                    'comics_id': comic.get('comicsId'),
                    'comics_title': comic.get('comicsTitle', ''),
                    'cover_img': comic.get('coverImg', ''),
                    'back_img': comic.get('backImg', ''),
                    'station_list': station_list_json,
                    'tag_list': tag_list_json,
                    'class_list': class_list_json,
                    'work_category': comic.get('workCategory', ''),
                    'author_name': comic.get('authorName', ''),
                    'info': comic.get('info', ''),
                    'chapter_new_num': comic.get('chapterNewNum'),
                    'type': comic.get('type'),
                    'update_status': comic.get('updateStatus'),
                    'recommend': comic.get('recommend', ''),
                    'fake_likes': comic.get('fakeLikes'),
                    'real_likes': comic.get('realLikes'),
                    'fake_watch_times': comic.get('fakeWatchTimes'),
                    'real_watch_times': comic.get('realWatchTimes'),
                    'fake_favorites': comic.get('fakeFavorites'),
                    'real_favorites': comic.get('realFavorites'),
                    'comment_num': comic.get('commentNum'),
                    'user_id': comic.get('userId'),
                    'nick_name': comic.get('nickName', ''),
                    'logo': comic.get('logo', ''),
                    'status': comic.get('status'),
                    'station_sort': comic.get('stationSort'),
                    'up_at': comic.get('upAt', ''),
                    'auto_shelf': comic.get('autoShelf', ''),
                    'shelf_date': comic.get('shelfDate', ''),
                    'created_at': comic.get('createdAt', ''),
                    'updated_at': comic.get('updatedAt', ''),
                    'domain': domain or '',
                    'scan_time': time.strftime('%Y-%m-%d %H:%M:%S')
                }
                
                self.batch_buffer.append(comic_record)
                
                # 当缓冲区达到指定大小时，执行批量插入
                if len(self.batch_buffer) >= self.batch_size:
                    self._execute_batch_insert()
                    
        except Exception as e:
            print(f"准备批量插入数据时出错: {e}")
            return False
            
        return True
    ##执行批量插入
    def _execute_batch_insert(self):
        """执行实际的批量插入操作，使用UPSERT提高性能"""
        if not self.batch_buffer:
            return
            
        try:
            # 构建UPSERT SQL语句
            columns = [
                'comic_id', 'comics_id', 'comics_title', 'cover_img', 'back_img', 
                'station_list', 'tag_list', 'class_list', 'work_category', 'author_name',
                'info', 'chapter_new_num', 'type', 'update_status', 'recommend',
                'fake_likes', 'real_likes', 'fake_watch_times', 'real_watch_times',
                'fake_favorites', 'real_favorites', 'comment_num', 'user_id', 'nick_name',
                'logo', 'status', 'station_sort', 'up_at', 'auto_shelf', 'shelf_date',
                'created_at', 'updated_at', 'domain', 'scan_time'
            ]
            
            # 构建INSERT部分
            placeholders = ', '.join(['%s'] * len(columns))
            insert_part = f"INSERT INTO comics_list ({', '.join(columns)}) VALUES ({placeholders})"
            
            # 构建ON DUPLICATE KEY UPDATE部分
            # 简化处理：所有字段都使用VALUES()，MySQL会自动处理类型转换
            update_columns = [col for col in columns if col not in ['id', 'comic_id', 'comics_id', 'created_at']]
            update_part = ', '.join([f"{col} = VALUES({col})" for col in update_columns])
            
            # 完整的UPSERT SQL
            query = f"{insert_part} ON DUPLICATE KEY UPDATE {update_part}"
            
            # 准备批量数据
            batch_data = []
            for i, record in enumerate(self.batch_buffer):
                values = [record[col] for col in columns]
                
                batch_data.append(values)
            
            # 使用新的连接池API执行批量UPSERT
            try:
                result = self.db.execute_batch(query, batch_data)
                if result is not None:
                    print(f"成功处理 {len(self.batch_buffer)} 条漫画数据 (插入/更新: {result} 行)")
                else:
                    print("批量插入失败")
                    
            except Exception as upsert_error:
                print(f"UPSERT失败，尝试使用普通INSERT: {upsert_error}")
                
                # 回退到普通INSERT，忽略重复键错误
                fallback_query = f"INSERT IGNORE INTO comics_list ({', '.join(columns)}) VALUES ({placeholders})"
                
                result = self.db.execute_batch(fallback_query, batch_data)
                if result is not None:
                    print(f"使用INSERT IGNORE成功处理 {len(self.batch_buffer)} 条漫画数据 (插入: {result} 行)")
                else:
                    print("INSERT IGNORE失败")
            
            # 清空缓冲区
            self.batch_buffer.clear()
            
        except Exception as e:
            print(f"批量插入失败: {e}")
            print(f"SQL语句: {query}")
    ##强制插入剩余数据
    def flush_remaining_data(self):
        """强制插入缓冲区中剩余的数据"""
        if self.batch_buffer:
            print(f"插入剩余的 {len(self.batch_buffer)} 条数据...")
            self._execute_batch_insert()
    ##单条数据UPSERT操作
    def upsert_single_comic(self, comic_data, domain=None):
        """单条漫画数据的UPSERT操作（插入或更新）"""
        if not self.db or not self.db.is_connected():
            print("数据库未连接，无法处理数据")
            return False
            
        try:
            # 确保JSON字段正确序列化
            station_list = comic_data.get('stationList', [])
            class_list = comic_data.get('classList', [])
            tag_list = comic_data.get('tagList', [])
            
            # 使用通用函数处理所有可能的JSON字段
            station_list_json = self._process_json_field(station_list, 'station_list')
            class_list_json = self._process_json_field(class_list, 'class_list')
            tag_list_json = self._process_json_field(tag_list, 'tag_list')
            
            # 检查是否还有其他列表字段需要处理
            processed_fields = {}
            for key, value in comic_data.items():
                if isinstance(value, list):
                    # 转换为下划线命名
                    field_name = key.replace('([A-Z])', r'_\1').lower()
                    processed_fields[key] = self._process_json_field(value, field_name)
                else:
                    processed_fields[key] = value
            
            # 准备数据
            comic_record = {
                'comic_id': comic_data.get('id'),
                'comics_id': comic_data.get('comicsId'),
                'comics_title': comic_data.get('comicsTitle', ''),
                'cover_img': comic_data.get('coverImg', ''),
                'back_img': comic_data.get('backImg', ''),
                'station_list': station_list_json,
                'tag_list': tag_list_json,
                'class_list': class_list_json,
                'work_category': comic_data.get('workCategory', ''),
                'author_name': comic_data.get('authorName', ''),
                'info': comic_data.get('info', ''),
                'chapter_new_num': comic_data.get('chapterNewNum'),
                'type': comic_data.get('type'),
                'update_status': comic_data.get('updateStatus'),
                'recommend': comic_data.get('recommend', ''),
                'fake_likes': comic_data.get('fakeLikes'),
                'real_likes': comic_data.get('realLikes'),
                'fake_watch_times': comic_data.get('fakeWatchTimes'),
                'real_watch_times': comic_data.get('realWatchTimes'),
                'fake_favorites': comic_data.get('fakeFavorites'),
                'real_favorites': comic_data.get('realFavorites'),
                'comment_num': comic_data.get('commentNum'),
                'user_id': comic_data.get('userId'),
                'nick_name': comic_data.get('nickName', ''),
                'logo': comic_data.get('logo', ''),
                'status': comic_data.get('status'),
                'station_sort': comic_data.get('stationSort'),
                'up_at': comic_data.get('upAt', ''),
                'auto_shelf': comic_data.get('autoShelf', ''),
                'shelf_date': comic_data.get('shelfDate', ''),
                'created_at': comic_data.get('createdAt', ''),
                'updated_at': comic_data.get('updatedAt', ''),
                'domain': domain or '',
                'scan_time': time.strftime('%Y-%m-%d %H:%M:%S')
            }
            
            # 构建UPSERT SQL
            columns = list(comic_record.keys())
            placeholders = ', '.join(['%s'] * len(columns))
            insert_part = f"INSERT INTO comics_list ({', '.join(columns)}) VALUES ({placeholders})"
            
            # 构建ON DUPLICATE KEY UPDATE部分
            update_columns = [col for col in columns if col not in ['id', 'comic_id', 'comics_id', 'created_at']]
            update_part = ', '.join([f"{col} = VALUES({col})" for col in update_columns])
            
            query = f"{insert_part} ON DUPLICATE KEY UPDATE {update_part}"
            values = [comic_record[col] for col in columns]
            
            # 尝试执行UPSERT
            try:
                result = self.db.execute_update(query, values)
                if result > 0:
                    return True
                else:
                    return False
                    
            except Exception as upsert_error:
                print(f"UPSERT失败，尝试使用普通INSERT: {upsert_error}")
                # 回退到普通INSERT，忽略重复键错误
                fallback_query = f"INSERT IGNORE INTO comics_list ({', '.join(columns)}) VALUES ({placeholders})"
                result = self.db.execute_update(fallback_query, values)
                
                if result > 0:
                    return True
                else:
                    return False
                
        except Exception as e:
            print(f"单条插入失败: {e}")
            return False
    ##获取漫画列表数据
    def get_comics_list(self,page_num):
        try:
            current_timestamp = int(time.time() * 1000)
            url = f"{self.web_url}/api/comics/base/fictionListByWorkCategory?page={page_num}&pageSize={self.pageSize}&orderType={self.orderType}&_t={current_timestamp}"
            response = self.comm.get_resp(url)
            try:
                data = response.json()
                print(f"JSON解析成功: code={data.get('code')}, msg={data.get('msg')}")
                return data
            except json.JSONDecodeError as json_error:
                print(f"JSON解析失败: {json_error}")
                return None
        except requests.exceptions.RequestException as e:
            print(f"HTTP请求失败: {e}")
            return None
        except Exception as e:
            print(f"获取漫画列表失败: {e}")
            return None
    ##扫描漫画列表
    def scan_comics_list(self,start_page=1, end_page=2):

        print(f"=== 扫描漫画列表 ===")
        print(f"扫描范围: 第 {start_page} 页到第 {end_page} 页")
        print(f"每页数量: {self.pageSize}")
        print("=" * 50)

        # 检查数据库连接
        if not self.db or not self.db.is_connected():
            print("数据库未连接，无法继续扫描")
            return

        for page_num in range(start_page, end_page + 1):
            print(f"\n--- 处理第 {page_num} 页 ---")
            # 获取漫画列表，使用新token
            response_data = self.get_comics_list(page_num)
            if not response_data:
                print(f"第 {page_num} 页获取失败，跳过")
                continue
                
            # 解密并保存数据
            decrypted_data = vp(response_data['encData'])
            print(f"第 {page_num} 页获取到 {len(decrypted_data.get('data', []))} 条漫画数据")
            
            # 批量插入数据到数据库
            if decrypted_data and 'data' in decrypted_data:
                # 获取domain信息
                domain = decrypted_data.get('domain', '')
                self.batch_insert_comics(decrypted_data['data'], domain)

            # 添加延迟，避免请求过于频繁
            if page_num < end_page:
                print("等待2秒后继续...")
                time.sleep(2)

        # 插入剩余的数据
        self.flush_remaining_data()
        print("所有数据扫描完成！")
    ##关闭数据库连接
    def close_database(self):
        """关闭数据库连接"""
        if self.db:
            # 确保插入剩余数据
            self.flush_remaining_data()
            # 关闭连接池
            self.db.disconnect()
            print("数据库连接池已关闭")

if __name__ == "__main__":
    scan_comic = ScanComic()
    
    try:
        # 可以选择运行性能测试或正常扫描
        scan_comic.scan_comics_list(start_page=1, end_page=2)
    finally:
        # 确保程序结束时关闭数据库连接
        scan_comic.close_database()
