import requests
from common import Authorization, DynamoDBKayValue
import json
import logging
import random
import time
from configuration import is_running_on_aws_lambda
import re
from bs4 import BeautifulSoup

logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
if not is_running_on_aws_lambda():
    handler = logging.StreamHandler()
    # format the log
    formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    handler.setFormatter(formatter)
    logger.addHandler(handler)

# 反防爬虫配置
USER_AGENTS = [
    'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
    'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36',
    'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
    'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/121.0',
    'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/17.1 Safari/605.1.15',
    'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36'
]

def get_random_headers():
    """生成随机请求头"""
    return {
        'User-Agent': random.choice(USER_AGENTS),
        'Accept': 'application/json, text/plain, */*',
        'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
        'Accept-Encoding': 'gzip, deflate',
        'Connection': 'keep-alive',
        'Cache-Control': 'no-cache',
        'Pragma': 'no-cache',
        'Referer': 'http://f.y41566.com/',
        'Origin': 'http://f.y41566.com',
        'X-Requested-With': 'XMLHttpRequest'
    }

def make_request_with_retry(url, method='GET', json_data=None, max_retries=3, base_delay=1):
    """带重试机制的请求函数"""
    session = requests.Session()
    
    for attempt in range(max_retries):
        try:
            # 随机延迟
            delay = base_delay + random.uniform(0.5, 2.0)
            time.sleep(delay)
            
            headers = get_random_headers()
            
            if method.upper() == 'GET':
                response = session.get(url, headers=headers, timeout=10)
            else:
                response = session.post(url, headers=headers, json=json_data, timeout=10)
            
            # 检查响应状态
            if response.status_code == 200:
                return response
            elif response.status_code == 429:  # Too Many Requests
                logger.warning(f'Rate limited, attempt {attempt + 1}/{max_retries}')
                time.sleep(delay * 2)  # 加倍延迟
                continue
            else:
                logger.error(f'Request failed with status {response.status_code}, attempt {attempt + 1}/{max_retries}')
                
        except requests.exceptions.RequestException as e:
            logger.error(f'Request exception on attempt {attempt + 1}/{max_retries}: {e}')
            if attempt < max_retries - 1:
                time.sleep(delay * 2)
                continue
        
        # 最后一次尝试失败
        if attempt == max_retries - 1:
            return None
    
    return None

def lambda_handler(event, context):
    search_func_mapping = {
        'video': search_video,
        'music': search_music,
        'book': search_book,
    }
    search_list = list(search_func_mapping.keys())
    search_func_list = list(search_func_mapping.values())
    
    is_login, user_info = Authorization.get_user_info(event)
    if not is_login:
       return user_info
    
    query = event.get('queryStringParameters', {})
    search_type = query.get('search_type', 0) 
    search_keyword = query.get('search_keyword', '')
    if not search_keyword:
        return {
            'statusCode': 400,
            'body': json.dumps({'error': 'search_keyword is required'})
        }
    try:
        search_type = int(search_type)
    except ValueError:
        return {
            'statusCode': 400,
            'body': json.dumps({'error': 'search_type is invalid'})
        }
    if search_type < 0 or search_type >= len(search_list):
        return {
            'statusCode': 400,
            'body': json.dumps({'error': 'search_type is invalid'})
        }
    search_type_name = search_list[search_type]
    search_keyword = search_keyword.strip()
    logger.info(f'search_type: {search_type_name}, search_keyword: {search_keyword}')
    
    search_func = search_func_list[search_type]
    try:
        kv = DynamoDBKayValue(query, user_info)
        search_value = kv.get_search_value()
        if search_value:
            return build_response({'data': search_value}, 200)
        else:
            response = search_func(search_keyword, query)
            if response:
                kv.set_search_value(response)
                return build_response({'data': response}, 200)
            else:
                return build_response({'error': '搜索失败，请联系管理员'}, 500)
    except Exception as e:
        logger.error(f'search_func error: {e}')
        return build_response({'error': '搜索失败，请联系管理员', 'detail': str(e)}, 500)

def build_response(body: dict, status_code: int = 200):
    return {
        'statusCode': status_code,
        'headers': {
            'Content-Type': 'application/json'
        },
        'body': json.dumps(body)
    }

def search_video(search_keyword, query: dict):
    video_search_endpoint = 'http://f.y41566.com'
    
    # 获取token
    response_token = make_request_with_retry(f'{video_search_endpoint}/v/api/getToken')
    if not response_token:
        logger.error('search_video: get token failed after retries')
        return None
    
    try:
        token = response_token.json().get('token')
        if not token:
            logger.error('search_video: token not found in response')
            return None
    except Exception as e:
        logger.error(f'search_video: parse token response error: {e}')
        return None
    
    video_resources = []
    apis = ['/v/api/getJuzi', '/v/api/getpwdcfg', '/v/api/sortWeb', '/v/api/getTop', '/v/api/getTTZJB', '/v/api/getDyfx', '/v/api/getGirls', '/v/api/getXiaoy', '/v/api/getGGang']
    
    # 随机打乱API顺序，避免固定模式
    random.shuffle(apis)
    
    for i, api in enumerate(apis):
        # 在API请求之间添加随机延迟
        if i > 0:
            delay = random.uniform(1.0, 3.0)
            time.sleep(delay)
        
        response = make_request_with_retry(
            f'{video_search_endpoint}{api}', 
            method='POST', 
            json_data={'token': token, 'name': search_keyword}
        )
        
        if not response:
            logger.error(f'search_video: {api} failed after retries')
            continue
            
        try:
            data = response.json().get('list', [])
            if isinstance(data, list):
                video_resources.extend(data)
                logger.info(f'search_video: {api} success, {len(data)} videos found')
            else:
                logger.error(f'search_video: {api} returned invalid data format')
        except Exception as e:
            logger.error(f'search_video: parse response error: {e}, api: {api}')
            continue

    return video_resources

def search_music(search_keyword, query: dict):
    source = {
        'tencent': {
            'search': 'https://api.vkeys.cn/v2/music/tencent?word=',
            'search_by_id': 'https://api.vkeys.cn/v2/music/tencent?id=',
            'search_lrc_by_id': 'https://api.vkeys.cn/v2/music/tencent/lyric?id='
        },
        'netease': {
            'search': 'https://api.vkeys.cn/v2/music/netease?word=',
            'search_by_id': 'https://api.vkeys.cn/v2/music/netease?id=',
            'search_lrc_by_id': 'https://api.vkeys.cn/v2/music/netease/lyric?id='
        }
    }
    
    music_id = query.get('id', '')
    
    if not music_id:
        # 搜索模式：分别调用两个平台的搜索API
        all_music_results = []
        
        for platform, apis in source.items():
            try:
                # 添加随机延迟避免请求过快
                time.sleep(random.uniform(0.5, 1.5))
                
                search_url = f"{apis['search']}{search_keyword}"
                response = make_request_with_retry(search_url)
                
                if response:
                    try:
                        result = response.json()
                        if result.get('code') == 200 and result.get('data'):
                            music_list = result['data']
                            # 为每个音乐项添加平台标识
                            for music in music_list:
                                if isinstance(music, dict):
                                    music['type'] = platform
                                search_key = DynamoDBKayValue.get_search_key(f'{music['id']}_music_id')
                                DynamoDBKayValue.set_value(search_key, music, 60 * 60 * 24)
                            all_music_results.extend(music_list)
                            logger.info(f'search_music: {platform} search success, found {len(music_list)} songs')
                        else:
                            logger.warning(f'search_music: {platform} search failed, response: {result}')
                    except Exception as e:
                        logger.error(f'search_music: parse {platform} response error: {e}')
                else:
                    logger.error(f'search_music: {platform} search request failed')
                    
            except Exception as e:
                logger.error(f'search_music: {platform} search error: {e}')
                continue
        
        # 去重处理（基于id和平台组合）
        if all_music_results:
            seen = set()
            unique_music = []
            for music in all_music_results:
                if isinstance(music, dict):
                    music_id = music.get('id', '')
                    platform = music.get('type', '')
                    unique_key = f"{music_id}_{platform}"
                    if unique_key not in seen:
                        seen.add(unique_key)
                        unique_music.append(music)
            all_music_results = unique_music
            logger.info(f'search_music: total unique songs found: {len(all_music_results)}')
        
        return all_music_results
    else:
        search_key = DynamoDBKayValue.get_search_key(f'{music_id}_music_id')
        music_by_id = DynamoDBKayValue.get_value(search_key)
        if not music_by_id:
            logger.error(f'search_music: music_by_id not found for id: {music_id}')
            return None
        
        # 根据ID获取详情模式
        platform = music_by_id.get('type', '')
        if platform not in source:
            logger.error(f'search_music: unsupported platform: {platform}')
            return None
            
        try:
            # 添加随机延迟
            time.sleep(random.uniform(0.5, 1.0))
            
            detail_url = f"{source[platform]['search_by_id']}{music_id}"
            response = make_request_with_retry(detail_url)
            
            if response:
                try:
                    result = response.json()
                    if result.get('code') == 200 and result.get('data'):
                        music_detail = result['data']
                        # 添加平台标识
                        if isinstance(music_detail, dict):
                            music_detail['type'] = platform
                            
                            # 获取歌词
                            try:
                                # 添加随机延迟避免请求过快
                                time.sleep(random.uniform(0.3, 0.8))
                                
                                lrc_url = f"{source[platform]['search_lrc_by_id']}{music_id}"
                                lrc_response = make_request_with_retry(lrc_url)
                                
                                if lrc_response:
                                    lrc_result = lrc_response.json()
                                    if lrc_result.get('code') == 200 and lrc_result.get('data'):
                                        music_detail['lrc'] = lrc_result['data']
                                        logger.info(f'search_music: {platform} lyric success for id: {music_id}')
                                    else:
                                        logger.warning(f'search_music: {platform} lyric failed, response: {lrc_result}')
                                        music_detail['lrc'] = {}
                                else:
                                    logger.error(f'search_music: {platform} lyric request failed for id: {music_id}')
                                    music_detail['lrc'] = {}
                            except Exception as e:
                                logger.error(f'search_music: get lyric error for {platform} id {music_id}: {e}')
                                music_detail['lrc'] = {}
                        
                        logger.info(f'search_music: {platform} detail success for id: {music_id}')
                        return music_detail
                    else:
                        logger.warning(f'search_music: {platform} detail failed, response: {result}')
                        return None
                except Exception as e:
                    logger.error(f'search_music: parse {platform} detail response error: {e}')
                    return None
            else:
                logger.error(f'search_music: {platform} detail request failed for id: {music_id}')
                return None
                
        except Exception as e:
            logger.error(f'search_music: {platform} detail error: {e}')
            return None

def search_book(search_keyword, query: dict):
    book_id = query.get('id', '')
    search_source = "https://www.sudugu.com"
    
    if not book_id:
        # 搜索小说列表
        search_url = f"{search_source}/i/sor.aspx?key={search_keyword}"
        
        # 添加随机延迟
        time.sleep(random.uniform(1.0, 2.0))
        
        response = make_request_with_retry(search_url)
        if not response:
            logger.error(f'search_book: search request failed for keyword: {search_keyword}')
            return None
            
        try:
            # 使用BeautifulSoup解析HTML
            soup = BeautifulSoup(response.text, 'html.parser')
            logger.info('search_book: HTML parsed with BeautifulSoup')
            
            # 查找container div
            container = soup.find('div', class_='container')
            if not container:
                logger.error('search_book: 未找到class="container"的DOM')
                return None
            
            # 查找所有item div
            items = container.find_all('div', class_='item')
            logger.info(f'search_book: found {len(items)} items with BeautifulSoup')
            
            book_list = []
            
            for i, item in enumerate(items):
                try:
                    logger.info(f'search_book: processing item {i+1}')
                    
                    # 提取小说ID
                    book_link = item.find('a', href=re.compile(r'^/\d+/$'))
                    if not book_link:
                        logger.warning(f'search_book: no book link found in item {i+1}')
                        continue
                    
                    book_id = book_link['href'].strip('/')
                    logger.info(f'search_book: found book ID: {book_id}')
                    
                    # 提取标题
                    title_elem = item.find('h3')
                    title = title_elem.get_text(strip=True) if title_elem else ''
                    logger.info(f'search_book: found title: {title}')
                    
                    # 提取封面图片
                    img_elem = item.find('img')
                    cover = img_elem['src'] if img_elem and img_elem.get('src') else ''
                    if cover and not cover.startswith('http'):
                        cover = f"{search_source}{cover}"
                    logger.info(f'search_book: found cover: {cover}')
                    
                    # 提取作者信息
                    author_link = item.find('a', href=re.compile(r'^/zuozhe/\?tag='))
                    author = ''
                    if author_link:
                        author_text = author_link.get_text(strip=True)
                        if author_text.startswith('作者：'):
                            author = author_text[3:]  # 移除"作者："前缀
                    logger.info(f'search_book: found author: {author}')
                    
                    # 提取状态信息
                    status_spans = item.find_all('span')
                    status = ''
                    read_count = ''
                    if status_spans:
                        status = status_spans[0].get_text(strip=True) if status_spans else ''
                        # 查找包含"阅读"的span
                        for span in status_spans:
                            span_text = span.get_text(strip=True)
                            if '阅读' in span_text:
                                read_count = span_text
                                break
                    logger.info(f'search_book: found status: {status}, read_count: {read_count}')
                    
                    # 提取更新时间 - 从章节列表的i标签中获取
                    update_time = ''
                    chapter_items = item.find_all('li')
                    if chapter_items:
                        # 获取第一个章节的时间
                        first_chapter = chapter_items[0]
                        time_elem = first_chapter.find('i')
                        if time_elem:
                            update_time = time_elem.get_text(strip=True)
                    logger.info(f'search_book: found update_time: {update_time}')
                    
                    # 提取章节列表
                    chapter_list = []
                    chapter_links = item.find_all('a', href=re.compile(r'\.html$'))
                    for link in chapter_links[:3]:  # 只取前3章
                        chapter_title = link.get_text(strip=True)
                        if chapter_title and not chapter_title.startswith('作者：'):
                            chapter_list.append(chapter_title)
                    logger.info(f'search_book: found chapters: {chapter_list}')
                    
                    book_info = {
                        'id': book_id,
                        'title': title,
                        'cover': cover,
                        'author': author,
                        'status': status,
                        'read_count': read_count,
                        'update_time': update_time,
                        'chapter_list': chapter_list
                    }
                    
                    # 缓存单个小说信息
                    search_key = DynamoDBKayValue.get_search_key(f'{book_id}_book_id')
                    DynamoDBKayValue.set_value(search_key, json.dumps(book_info), 60 * 60 * 24)
                    
                    book_list.append(book_info)
                    logger.info(f'search_book: parsed book {book_id}: {title}')
                    
                except Exception as e:
                    logger.error(f'search_book: parse book item error: {e}')
                    continue
            
            logger.info(f'search_book: total books found: {len(book_list)}')
            if len(book_list) == 0:
                logger.error(f'search_book: no books found for keyword: {search_keyword}')
            return book_list
            
        except Exception as e:
            logger.error(f'search_book: parse search response error: {e}')
            return None
    else:
        # 如果缓存中没有，从网站获取详情
        detail_url = f"{search_source}/{book_id}/txt.html#dir"
        
        # 添加随机延迟
        time.sleep(random.uniform(1.0, 2.0))
        
        response = make_request_with_retry(detail_url)
        if not response:
            logger.error(f'search_book: detail request failed for id: {book_id}')
            return None
            
        try:
            # 使用BeautifulSoup解析HTML
            soup = BeautifulSoup(response.text, 'html.parser')
            logger.info(f'search_book: detail HTML parsed for id: {book_id}')
            
            # 查找container div
            container = soup.find('div', class_='container')
            if not container:
                logger.error(f'search_book: no container found for id: {book_id}')
                return None
            
            # 查找item div获取基本信息
            item = container.find('div', class_='item')
            if not item:
                logger.error(f'search_book: no item found for id: {book_id}')
                return None
            
            # 提取基本信息
            book_info = {}
            
            # 提取标题
            title_elem = item.find('h1')
            if title_elem:
                title_link = title_elem.find('a')
                title = title_link.get_text(strip=True) if title_link else ''
            else:
                title = ''
            book_info['title'] = title
            logger.info(f'search_book: found title: {title}')
            
            # 提取封面图片
            img_elem = item.find('img')
            cover = img_elem['src'] if img_elem and img_elem.get('src') else ''
            if cover and not cover.startswith('http'):
                cover = f"{search_source}{cover}"
            book_info['cover'] = cover
            logger.info(f'search_book: found cover: {cover}')
            
            # 提取作者信息
            author_link = item.find('a', href=re.compile(r'^/tag/\?key='))
            author = ''
            if author_link:
                author_text = author_link.get_text(strip=True)
                if author_text.startswith('作者：'):
                    author = author_text[3:]  # 移除"作者："前缀
            book_info['author'] = author
            logger.info(f'search_book: found author: {author}')
            
            # 提取状态信息
            status_spans = item.find_all('span')
            status = ''
            read_count = ''
            if status_spans:
                status = status_spans[0].get_text(strip=True) if status_spans else ''
                # 查找包含"阅读"的span
                for span in status_spans:
                    span_text = span.get_text(strip=True)
                    if '阅读' in span_text:
                        read_count = span_text
                        break
            book_info['status'] = status
            book_info['read_count'] = read_count
            logger.info(f'search_book: found status: {status}, read_count: {read_count}')
            
            # 提取更新时间
            update_time = ''
            chapter_items = item.find_all('li')
            if chapter_items:
                # 获取第一个章节的时间
                first_chapter = chapter_items[0]
                time_elem = first_chapter.find('i')
                if time_elem:
                    update_time = time_elem.get_text(strip=True)
            book_info['update_time'] = update_time
            logger.info(f'search_book: found update_time: {update_time}')
            
            # 提取章节列表
            chapter_list = []
            chapter_links = item.find_all('a', href=re.compile(r'\.html$'))
            for link in chapter_links[:3]:  # 只取前3章
                chapter_title = link.get_text(strip=True)
                if chapter_title and not chapter_title.startswith('作者：'):
                    chapter_list.append(chapter_title)
            book_info['chapter_list'] = chapter_list
            logger.info(f'search_book: found chapters: {chapter_list}')
            
            # 提取小说简介
            description = ''
            des_div = container.find('div', class_='des bb')
            if des_div:
                des_paragraphs = des_div.find_all('p')
                description = '\n'.join([p.get_text(strip=True) for p in des_paragraphs if p.get_text(strip=True)])
            book_info['description'] = description
            logger.info(f'search_book: found description length: {len(description)}')
            
            # 提取下载链接
            download_list = []
            dir_div = container.find('div', id='list')
            if dir_div:
                download_links = dir_div.find_all('a', href=re.compile(r'^/txt/\?id='))
                for link in download_links:
                    title = link.get_text(strip=True)
                    download_url = link['href']
                    if not download_url.startswith('http'):
                        download_url = f"{search_source}{download_url}"
                    download_list.append({
                        'title': title,
                        'downloadUrl': download_url
                    })
            book_info['download_list'] = download_list
            logger.info(f'search_book: found {len(download_list)} download links')
            
            # 缓存详情信息
            search_key = DynamoDBKayValue.get_search_key(f'{book_id}_book_id')
            DynamoDBKayValue.set_value(search_key, json.dumps(book_info), 60 * 60 * 24)
            
            logger.info(f'search_book: successfully parsed book detail for id: {book_id}')
            return book_info
            
        except Exception as e:
            logger.error(f'search_book: parse detail response error for id {book_id}: {e}')
            return None
        