import asyncio

import requests
import json
import re
import os
import time
from urllib.parse import unquote, urljoin
from bs4 import BeautifulSoup
import base64
import datetime

from xhs_auto_poster.__001__langgraph_more_nodes.agent_state import AgentState
from xhs_auto_poster.__002__fastapi.msg_queue import msg_queue, MSG_TYPE
from xhs_auto_poster.common.path_utils import project_path


def sanitize_title_for_filename(ext) -> str:
    """
    将标题字符串清洗成适合作为文件名的格式（去除不合法字符，只保留前max_length个字符）

    :param title: 原始标题
    :param max_length: 截取的最大字符数
    :return: 清洗后的文件名部分
    """
    # 获取现在的时间
    now = datetime.datetime.now()
    # 格式化时间
    time_str = now.strftime("%Y%m%d%H%M%S")
    return time_str + f"{ext}"


class BaiduImageSpider:
    def __init__(self):
        self.session = requests.Session()
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
            'Referer': 'https://image.baidu.com/',
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
            'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
        }

    def extract_image_data_from_html(self, html_content):
        """从HTML中提取图片数据"""
        images_data = []

        # 方法1：从script标签中提取window.jsonData
        script_pattern = r'window\.jsonData\s*=\s*({.*?});'
        match = re.search(script_pattern, html_content, re.DOTALL)

        if match:
            try:
                json_data = json.loads(match.group(1))
                if 'data' in json_data and 'list' in json_data['data']:
                    images_data = self.parse_json_data(json_data['data']['list'])
                    return images_data
            except Exception as e:
                print(f"解析JSON数据失败: {e}")

        # 方法2：从image-search-data元素中提取
        soup = BeautifulSoup(html_content, 'html.parser')
        script_tag = soup.find('script', {'id': 'image-search-data'})
        if script_tag and script_tag.string:
            try:
                data = json.loads(script_tag.string)
                if 'data' in data and 'images' in data['data']:
                    images_data = self.parse_image_objects(data['data']['images'])
                    return images_data
            except Exception as e:
                print(f"解析image-search-data失败: {e}")

        # 方法3：从HTML结构中提取thumbURL
        thumburl_pattern = r'"thumbURL":"(https?://[^"]+)"'
        thumburls = re.findall(thumburl_pattern, html_content)
        for url in thumburls:
            images_data.append({
                'thumbURL': unquote(url),
                'title': '',
                'width': 0,
                'height': 0,
                'fromURL': ''
            })

        return images_data

    def parse_json_data(self, data_list):
        """解析JSON格式的图片数据"""
        images = []
        for item in data_list:
            try:
                image_info = {
                    'thumbURL': item.get('thumbURL', ''),
                    'objURL': item.get('objURL', ''),
                    'fromURL': item.get('fromURL', ''),
                    'title': item.get('title', ''),
                    'width': item.get('width', 0),
                    'height': item.get('height', 0),
                    'type': item.get('type', ''),
                    'size': item.get('size', '')
                }
                images.append(image_info)
            except Exception as e:
                print(f"解析图片项失败: {e}")
        return images

    def parse_image_objects(self, images_list):
        """解析图片对象数据"""
        images = []
        for img_obj in images_list:
            try:
                # 获取缩略图URL
                thumburl = img_obj.get('thumburl', '')
                if not thumburl:
                    # 尝试从其他字段获取
                    thumburl = img_obj.get('thumbURL', '')

                # 获取原图URL
                objurl = img_obj.get('objurl', '')
                if not objurl:
                    objurl = img_obj.get('replaceUrl', [{}])[0].get('objurl', '') if img_obj.get('replaceUrl') else ''

                image_info = {
                    'thumbURL': thumburl,
                    'objURL': objurl,
                    'fromURL': img_obj.get('fromUrl', ''),
                    'title': img_obj.get('titleShow', ''),
                    'width': img_obj.get('width', 0),
                    'height': img_obj.get('height', 0),
                    'setList': img_obj.get('setList', []),  # 图片组信息
                    'isImageSet': img_obj.get('isImageSet', False)
                }
                images.append(image_info)
            except Exception as e:
                print(f"解析图片对象失败: {e}")
        return images

    def get_high_quality_url(self, image_info):
        """获取高质量图片URL"""
        # 优先使用objURL（原图）
        if image_info.get('objURL'):
            return image_info['objURL']

        # 其次使用thumbURL（缩略图）
        if image_info.get('thumbURL'):
            return image_info['thumbURL']

        # 如果有图片组，使用第一张图片
        if image_info.get('setList'):
            first_img = image_info['setList'][0]
            return first_img.get('objurl', first_img.get('thumburl', ''))

        return ''

    def download_image(self, image_url, save_path, retry_count=3):
        """下载单张图片"""
        for i in range(retry_count):
            try:
                # 处理base64编码的URL
                if image_url.startswith('data:image'):
                    self.save_base64_image(image_url, save_path)
                    return True

                response = self.session.get(image_url, headers=self.headers, timeout=30)
                if response.status_code == 200:
                    with open(save_path, 'wb') as f:
                        f.write(response.content)
                    return True
                else:
                    print(f"下载失败，状态码: {response.status_code}, URL: {image_url}")
            except Exception as e:
                print(f"下载图片出错 (尝试 {i + 1}/{retry_count}): {e}")
                time.sleep(1)

        return False

    def save_base64_image(self, base64_str, save_path):
        """保存base64编码的图片"""
        try:
            # 提取base64数据部分
            base64_data = base64_str.split(',')[1]
            image_data = base64.b64decode(base64_data)
            with open(save_path, 'wb') as f:
                f.write(image_data)
            return True
        except Exception as e:
            print(f"保存base64图片失败: {e}")
            return False

    def get_file_extension(self, url):
        """根据URL获取文件扩展名"""
        if not url:
            return '.jpg'

        # 处理base64 URL
        if url.startswith('data:image'):
            mime_type = url.split(';')[0].split(':')[1]
            extension_map = {
                'image/jpeg': '.jpg',
                'image/png': '.png',
                'image/gif': '.gif',
                'image/webp': '.webp'
            }
            return extension_map.get(mime_type, '.jpg')

        # 从普通URL提取扩展名
        path = url.split('?')[0]
        if '.' in path:
            ext = '.' + path.split('.')[-1].lower()
            if len(ext) <= 5:  # 合理的扩展名长度
                return ext
        return '.jpg'

    def crawl(self, search_url, save_dir='baidu_images', download_images=True, max_images=50):
        """主爬虫函数"""
        print("开始爬取百度图片...")

        # 创建保存目录
        if not os.path.exists(save_dir):
            os.makedirs(save_dir)

        # 获取页面内容
        response = self.session.get(search_url, headers=self.headers)
        if response.status_code != 200:
            print(f"请求失败，状态码: {response.status_code}")
            return

        html_content = response.text

        # 提取图片数据
        images_data = self.extract_image_data_from_html(html_content)

        print(f"找到 {len(images_data)} 张图片")

        # 保存图片信息到JSON文件
        # info_file = os.path.join(save_dir, 'images_info.json')
        # with open(info_file, 'w', encoding='utf-8') as f:
        #     json.dump(images_data, f, ensure_ascii=False, indent=2)
        # print(f"图片信息已保存到: {info_file}")
        file_path_list = []
        # 下载图片
        if download_images:
            success_count = 0
            for i, image_info in enumerate(images_data[:max_images]):
                image_url = self.get_high_quality_url(image_info)
                if not image_url:
                    print(f"第 {i + 1} 张图片无法获取URL，跳过")
                    continue

                # 获取文件扩展名
                ext = self.get_file_extension(image_url)
                filename = sanitize_title_for_filename(ext)
                save_path = os.path.join(save_dir, filename)

                print(f"下载第 {i + 1} 张图片...")
                if self.download_image(image_url, save_path):
                    image_info['local_path'] = filename
                    success_count += 1
                    print(f"下载成功: {filename}")
                    file_path_list.append(save_path)
                else:
                    print(f"下载失败: {filename}")

                # 添加延迟，避免请求过快
                time.sleep(0.5)

            # 更新JSON文件
            # with open(info_file, 'w', encoding='utf-8') as f:
            #     json.dump(images_data, f, ensure_ascii=False, indent=2)
            #
            # print(f"图片下载完成! 成功下载 {success_count}/{min(len(images_data), max_images)} 张图片")

        return images_data, file_path_list


def main(title):
    # 创建爬虫实例
    spider = BaiduImageSpider()

    # 搜索URL（请替换为实际的搜索URL）
    search_url = f"https://image.baidu.com/search/index?tn=baiduimage&word={title}旅游"

    # 开始爬取
    images_data, file_path_list = spider.crawl(
        search_url=search_url,
        save_dir=project_path.joinpath("picture"),
        download_images=True,
        max_images=5  # 限制下载数量，避免过多请求
    )
    return file_path_list


def xiaohongshu_image_generator(site):
    # 创建爬虫实例
    spider = BaiduImageSpider()

    # 搜索URL（请替换为实际的搜索URL）
    search_url = f"https://image.baidu.com/search/index?tn=baiduimage&word={site}旅游"

    # 开始爬取
    images_data, file_path_list = spider.crawl(
        search_url=search_url,
        save_dir=project_path.joinpath("picture"),
        download_images=True,
        max_images=5  # 限制下载数量，避免过多请求
    )
    file_path_list = list(set(file_path_list))
    return file_path_list


async def image_crawl_node(state: AgentState):
    """根据标题和内容生成中医养生风格的小红书配图"""
    try:
        print("开始生成小红书图片生成")
        await msg_queue.put({"type": MSG_TYPE.THINK.name, "content": "开始生成小红书图片生成"})
        time.sleep(0.1)
        site = state.get('xhs_tcm_post_site')
        title = state.get('xhs_tcm_post_title')
        content = state.get('xhs_tcm_post_content')

        image_path = xiaohongshu_image_generator(site)

        state['xhs_tcm_post_image_path'] = image_path
        print(f"图片生成成功: {image_path}")
        state['xhs_tcm_tip'] = "图片生成成功"
        print("完成生成小红书图片生成")
        await msg_queue.put({"type": MSG_TYPE.THINK.name, "content": "完成生成小红书图片生成"})
    except Exception as e:
        import traceback
        traceback.print_exc()
        state['xhs_tcm_post_image_path'] = project_path.joinpath("picture/20250920221340黄山归来不.png")
        state['xhs_tcm_tip'] = "图片生成失败"
        await msg_queue.put({"type": MSG_TYPE.THINK.name, "content": "小红书生成图片失败"})
    return state


if __name__ == "__main__":
    asyncio.run(
        image_crawl_node(
            {"xhs_tcm_post_title": "扬州", "xhs_tcm_post_content": "黄山归来不"}
        )
    )
