import requests
import json
import time
import random
from typing import Dict, List, Optional

import os
import re
import sys
import traceback
from datetime import datetime
import redis
from urllib.parse import urlparse, parse_qs, unquote, urlencode
from util.tools import *

import urllib3
from bs4 import BeautifulSoup, NavigableString, Tag
from loguru import logger
from setting import *
# from aa import quick_generate_cookie
import myglobal

def load_cookies_from_file(filename: str = "weibo_cookies.json") -> Dict:
    """从JSON文件加载cookies"""
    try:
        with open(filename, 'r', encoding='utf-8') as f:
            cookies_data = json.load(f)
        return cookies_data
    except FileNotFoundError:
        print(f"错误: 找不到文件 {filename}")
        return {}
    except json.JSONDecodeError:
        print(f"错误: {filename} 文件格式不正确")
        return {}

def cookies_dict_to_string(cookies_dict: Dict) -> str:
    """将cookies字典转换为字符串格式"""
    cookie_parts = []
    for key, value in cookies_dict.items():
        cookie_parts.append(f"{key}={value}")
    return "; ".join(cookie_parts)

def create_weibo_headers(cookies_dict: Dict) -> Dict:
    """创建微博请求头"""
    cookie_string = cookies_dict_to_string(cookies_dict)
    
    headers = {
        'authority': 'www.weibo.com',
        'accept': 'application/json, text/plain, */*',
        'accept-language': 'zh-CN,zh;q=0.9',
        'client-version': 'v2.47.132',
        'cookie': cookie_string,
        'priority': 'u=1, i',
        'referer': 'https://www.weibo.com/u/5069029750',
        'sec-ch-ua': '"Chromium";v="128", "Not;A=Brand";v="24", "Google Chrome";v="128"',
        'sec-ch-ua-mobile': '?0',
        'sec-ch-ua-platform': '"Windows"',
        'sec-fetch-dest': 'empty',
        'sec-fetch-mode': 'cors',
        'sec-fetch-site': 'same-origin',
        'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36',
        'x-requested-with': 'XMLHttpRequest',
    }
    
    # 添加XSRF token
    if 'XSRF-TOKEN' in cookies_dict:
        headers['x-xsrf-token'] = cookies_dict['XSRF-TOKEN']
    
    return headers

def get_user_weibo_list(uid: str, headers: Dict, page: int = 1, feature: int = 0) -> Optional[Dict]:
    """
    获取用户微博列表
    """
    url = "https://www.weibo.com/ajax/statuses/mymblog"
    
    params = {
        'uid': uid,
        'page': page,
        'feature': feature
    }
    
    try:
        response = requests.get(
            url, 
            headers=headers, 
            params=params,
            #proxies=PROXY,
            verify=False,
            timeout=10
        )
        time.sleep(random.uniform(1, 5))
        print(f"请求状态码: {response.status_code}")
        print(f"请求URL: {response.url}")
        
        if response.status_code == 200:
            data = response.json()
            return data
        else:
            print(f"请求失败，状态码: {response.status_code}")
            print(f"响应内容: {response.text[:500]}")
            return None
            
    except requests.exceptions.RequestException as e:
        print(f"请求异常: {e}")
        return None
    except json.JSONDecodeError as e:
        print(f"JSON解析错误: {e}")
        print(f"响应内容: {response.text[:500]}")
        return None

def format_publish_time(created_at: str) -> str:
    """格式化发布时间"""
    try:
        if created_at and isinstance(created_at, str):
            if '+' in created_at:
                # 修复：使用正确的datetime.strptime
                dt = datetime.strptime(created_at, '%a %b %d %H:%M:%S %z %Y')
                return dt.strftime('%Y-%m-%d %H:%M:%S')
            return created_at
    except Exception as e:
        print(f"时间格式转换错误: {e}")
        # 如果解析失败，返回原始时间
    return created_at or ""

def clean_text(text: str) -> str:
    """
    清理文本中的零宽字符和其他不可见字符
    """
    if not text:
        return ""
    
    # 移除零宽字符
    zero_width_chars = [
        '\u200b',  # Zero Width Space
        '\u200c',  # Zero Width Non-Joiner
        '\u200d',  # Zero Width Joiner
        '\ufeff',  # Zero Width No-Break Space
        '\u200e',  # Left-to-Right Mark
        '\u200f',  # Right-to-Left Mark
        '\u202a',  # Left-to-Right Embedding
        '\u202b',  # Right-to-Left Embedding
        '\u202c',  # Pop Directional Formatting
        '\u202d',  # Left-to-Right Override
        '\u202e',  # Right-to-Left Override
    ]
    
    cleaned_text = text
    for char in zero_width_chars:
        cleaned_text = cleaned_text.replace(char, '')
    
    # 移除其他不可见控制字符（除了换行和制表符）
    cleaned_text = re.sub(r'[\x00-\x08\x0b\x0c\x0e-\x1f\x7f]', '', cleaned_text)
    
    # 移除多余的空格和换行
    cleaned_text = re.sub(r'\s+', ' ', cleaned_text).strip()
    
    return cleaned_text

def extract_weibo_article_id(url: str) -> Optional[str]:
    """提取微博文章ID"""
    weibo_patterns = [
        r'https?://weibo\.com/\d+/(\w+)',
        r'https?://weibo\.com/\d+/(\w+)(?:\?[^#]*)?',
        r'https?://weibo\.com/\d+/(\w+)(?:#.*)?'
    ]
    
    for pattern in weibo_patterns:
        match = re.search(pattern, url)
        if match:
            return match.group(1)
    return None

def get_platform(url: str) -> str:
    """根据URL获取平台名称"""
    domain = urlparse(url).netloc.lower()
    if 'jd.com' in domain or 'jd.hk' in domain:
        return "京东"
    elif 'taobao.com' in domain or 'tmall.com' in domain:
        return "淘宝"
    elif 'pinduoduo.com' in domain:
        return "拼多多"
    elif 'douyin.com' in domain:
        return "抖音"
    else:
        return "其他"

def parse_url_get_info(url: str):
    """解析URL获取商品信息"""
    headers = {
        'Content-Type': 'application/json',
    }

    json_data = {
        'content': f'{url}',
    }

    try:
        response = requests.post(PARSE_PRODUCT, headers=headers, json=json_data, verify=False)
        res_js = response.json()

        productId = res_js['data']['items'][0]['matchedPlatforms'][0]['data']['productInfo']['productId']
        platform_name = res_js['data']['items'][0]['matchedPlatforms'][0]['platformName']
        return productId, platform_name
    except:
        return "", ""

def parse_weibo_content(weibo_data: Dict) -> List[Dict]:
    """
    解析微博内容为指定格式，包含文本、链接、图片等
    """
    result = []
    text_raw = weibo_data.get('text_raw', '')
    
    # 清理原始文本
    text_raw = clean_text(text_raw)
    
    # 如果有HTML内容，使用BeautifulSoup解析
    if 'text' in weibo_data and '<' in weibo_data['text']:
        try:
            # 修复：使用正确的HTML解析方式
            html_content = weibo_data['text']
            soup = BeautifulSoup(html_content, 'html.parser')
            
            # 遍历所有元素
            for element in soup.descendants:
                if isinstance(element, NavigableString):
                    text = clean_text(str(element))
                    if text and text.strip():
                        result.append({"type": "text", "text": text.strip()})
                
                elif isinstance(element, Tag):
                    if element.name.lower() == "a":
                        link_text = clean_text(element.get_text(strip=True))
                        href = element.get("href", "")
                        
                        # 跳过特定文本
                        if "U先素质试用" in link_text:
                            continue
                        
                        # 处理微博链接
                        if "weibo.com" in href:
                            article_id = extract_weibo_article_id(href)
                            if article_id:
                                result.append({
                                    "type": "extra", 
                                    "text": f"微博文章: {article_id}",
                                    "url": href
                                })
                                continue
                        
                        # 处理商品链接
                        product_id, platform_name = parse_url_get_info(href)
                        if product_id:
                            result.append({
                                "type": "link",
                                "text": href,
                                "product_id": product_id,
                                "platform_name": platform_name,
                                "link_text": link_text
                            })
                        else:
                            # 普通链接
                            platform = get_platform(href)
                            result.append({
                                "type": "link2",
                                "text": href,
                                "platform": platform,
                                "link_text": link_text
                            })
                    
                    elif element.name.lower() in ["img", "image"]:
                        src = element.get("src", "")
                        if src:
                            result.append({
                                "type": "image",
                                "text": src
                            })
                        
        except Exception as e:
            print(f"解析HTML内容错误: {e}")
            # 如果解析失败，回退到纯文本处理
            if text_raw.strip():
                lines = text_raw.split('\n')
                for line in lines:
                    line = clean_text(line)
                    if line.strip():
                        result.append({"type": "text", "text": line.strip()})
    else:
        # 处理纯文本内容
        if text_raw.strip():
            # 按行分割文本
            lines = text_raw.split('\n')
            for line in lines:
                line = clean_text(line)
                if line.strip():
                    # 检查是否是URL
                    if line.startswith('http://') or line.startswith('https://'):
                        product_id, platform_name = parse_url_get_info(line)
                        if product_id:
                            result.append({
                                "type": "link",
                                "text": line,
                                "product_id": product_id,
                                "platform_name": platform_name,
                                "link_text": "网页链接"
                            })
                        else:
                            platform = get_platform(line)
                            result.append({
                                "type": "link2",
                                "text": line,
                                "platform": platform,
                                "link_text": "网页链接"
                            })
                    else:
                        result.append({"type": "text", "text": line.strip()})
    
    # 过滤空文本项
    result = [item for item in result if not (item['type'] == 'text' and not item['text'].strip())]
    
    return result

def parse_images_from_weibo(weibo_data: Dict) -> List[str]:
    """从微博数据中解析图片"""
    images = []
    
    # 从pic_ids字段获取图片
    pic_ids = weibo_data.get('pic_ids', [])
    for pic_id in pic_ids:
        image_url = f"https://wx1.sinaimg.cn/large/{pic_id}.jpg"
        images.append(image_url)
    
    # 从pic_infos字段获取图片
    if 'pic_infos' in weibo_data:
        for pic_id, pic_info in weibo_data['pic_infos'].items():
            if 'largest' in pic_info:
                images.append(pic_info['largest']['url'])
            elif 'large' in pic_info:
                images.append(pic_info['large']['url'])
            elif 'url' in pic_info:
                images.append(pic_info['url'])
    
    return images

def count_products_in_content(content: List[Dict]) -> int:
    """统计内容中的商品数量"""
    product_count = 0
    for item in content:
        if item.get('type') == 'link' and 'product_id' in item:
            product_count += 1
    return product_count

def toDatabase(data):
    """将数据推送到远程数据库"""

    ####################################################  数据过滤 ######################################################################
    # 这种可能是偶发性质的爬取失败  还不能将其加入到redis中去重
    if "__获取内容为空__" in str(data) or "__获取内容出错__" in str(data):
        logger.info(f"❌获取文章二级跳转页面失败，不推送，待再次采集：https://m.weibo.cn/detail/{data['article_id']}==={data['article_id']},{data['content']}...")
        return False

    if "__循环递归__" in str(data):
        logger.info(f"❌文章出现循环嵌套，加入去重不再采集：{data['article_id']},{data['content']}...")
        return True

    # 查看是否有过滤关键词
    # for keyword in block_lis:
    #     if keyword in str(data):
    #         logger.info(f"文本中包含过滤关键词：{keyword} => {data['article_id']}  {data['content']}...")
    #         return True
    
    temp_str_data = json.dumps(data, ensure_ascii=False)
    temp_str_data = temp_str_data.replace("微博正文", "内容正文")
    data = json.loads(temp_str_data)
    
    # 添加必要的字段
    if 'article_id' in data:
        article_id = data['article_id']
        data['article_detail_url'] = f"https://m.weibo.cn/detail/{article_id}"
        data['is_weibo_chaohua'] = myglobal.IS_WEIBO_SUBOR
        
        # 打印日志
        print('推送到远端数据库')
        print(f"文章ID: {article_id}")
        write_log(data, article_id)
        print('准备推送数据到远端数据库')
    
    ################################################################################################################################

    # 发送POST请求，将数据推送到远程数据库
    urls = URLS
    success_count = 0
    
    for url in urls:
        try:
            req = requests.post(url, json=data, verify=False)
            print(f"推送请求状态码: {req.status_code}")
            
            # 如果返回的内容中包含"success"，则推送成功
            if "success" in req.content.decode():
                logger.info(f"✅推送成功: ID = {data['article_id']} {str(data['content'])[:100]}...")
                success_count += 1
            else:
                logger.error(f"❌推送请求失败:ID = {data['article_id']} {req.text}...")
                
        except Exception as e:
            logger.error(f"❌推送请求异常: {e}")
    
    # 只要有一个URL推送成功就返回True
    return success_count > 0

def parse_weibo_time(time_str):

    """解析微博时间格式"""
    try:
        # 直接使用datetime模块解析
        dt = datetime.strptime(time_str, "%a %b %d %H:%M:%S %z %Y")
        return dt.strftime('%Y-%m-%d %H:%M:%S')
    except Exception as e:
        logger.error(f"❌时间解析失败：{e}，原始时间：{time_str}")
        return time_str
def convert_to_target_format(weibo_data: Dict) -> Dict:
    """
    将原始微博数据转换为目标格式
    """
    try:
        user_info = weibo_data.get('user', {})
        retweeted_status = weibo_data.get('retweeted_status')
        
        # 解析内容
        content = parse_weibo_content(weibo_data)
        
        # 统计商品数量
        product_num = count_products_in_content(content)
        
        formatted_data = {
            "avatar": user_info.get('avatar_hd') or user_info.get('profile_image_url', ''),
            "user_id": user_info.get('id', 0),
            "nickname": user_info.get('screen_name', ''),
            "publish_time": parse_weibo_time(weibo_data.get('created_at', '')),
            "content": content,
            "images": parse_images_from_weibo(weibo_data),
            "has_retweet": bool(retweeted_status),
            "article_id": weibo_data.get('idstr', weibo_data.get('id', '')),
            "product_num": product_num,
            "article_detail_url": f"https://m.weibo.cn/detail/{weibo_data.get('id', '')}",
            "is_weibo_chaohua": False
        }
        
        # 处理转发内容
        if retweeted_status:
            formatted_data["retweet_article_id"] = retweeted_status.get('idstr', retweeted_status.get('id', ''))
            formatted_data["retweeted_content"] = convert_to_target_format(retweeted_status)
        
        return formatted_data
    except Exception as e:
        print(f"转换数据格式错误: {e}")
        return {}

def parse_weibo_data(data: Dict) -> List[Dict]:
    """
    解析微博数据并转换为目标格式
    """
    weibo_list = []
    
    if not data or 'data' not in data or 'list' not in data['data']:
        print("数据格式异常")
        return weibo_list
    
    for weibo in data['data']['list']:
        try:
            formatted_weibo = convert_to_target_format(weibo)
            if formatted_weibo:  # 只添加非空数据
                weibo_list.append(formatted_weibo)
        except Exception as e:
            print(f"解析微博数据错误: {e}")
            print(f"原始数据ID: {weibo.get('id', 'unknown')}")
            continue
    
    return weibo_list

def save_weibo_to_file(data: List[Dict], filename: str = None):
    """
    保存微博数据到文件
    """
    if filename is None:
        timestamp = int(time.time())
        filename = f'weibo_data_{timestamp}.json'
    
    with open(filename, 'w', encoding='utf-8') as f:
        json.dump(data, f, ensure_ascii=False, indent=2)
    
    print(f"微博数据已保存到: {filename}")

def get_multiple_pages_weibo(uid: str, headers: Dict, start_page: int = 1, end_page: int = 3, delay: float = 2.0) -> List[Dict]:
    """
    获取多页微博数据
    """
    all_weibos = []
    
    for page in range(start_page, end_page + 1):
        print(f"正在获取第 {page} 页数据...")
        
        data = get_user_weibo_list(uid, headers, page)
        if data:
            weibos = parse_weibo_data(data)
            all_weibos.extend(weibos)
            print(f"第 {page} 页获取到 {len(weibos)} 条微博")
        else:
            print(f"第 {page} 页获取失败")
        
        # 添加随机延迟，避免请求过快
        if page < end_page:
            sleep_time = delay + random.uniform(0.5, 1.5)
            print(f"等待 {sleep_time:.2f} 秒后继续...")
            time.sleep(sleep_time)
    
    return all_weibos

def display_weibo_info(weibos: List[Dict], count: int = 5):
    """
    显示微博信息
    """
    print(f"\n=== 前 {min(count, len(weibos))} 条微博 ===")
    for i, weibo in enumerate(weibos[:count]):
        print(f"\n--- 微博 {i+1} ---")
        print(f"ID: {weibo['article_id']}")
        print(f"昵称: {weibo['nickname']}")
        content_preview = ""
        for item in weibo['content'][:2]:  # 只显示前两个内容项
            if item['type'] == 'text':
                content_preview += item['text'][:50] + "..."
            elif item['type'] in ['link', 'link2']:
                content_preview += f"[链接]"
        print(f"内容预览: {content_preview}")
        print(f"时间: {weibo['publish_time']}")
        print(f"是否有转发: {weibo['has_retweet']}")
        print(f"商品数量: {weibo['product_num']}")
        print(f"图片数量: {len(weibo['images'])}")

def main():
    """主函数"""
    # 从文件加载cookies
    cookies_dict = load_cookies_from_file("weibo_cookies.json")
    
    if not cookies_dict:
        print("无法加载cookies，程序退出")
        return
    
    # 创建请求头
    headers = create_weibo_headers(cookies_dict)
    
    # 目标用户ID
    user_id = "5069029750"
    
    # 获取第一页数据测试
    print("=== 测试获取第一页数据 ===")
    data = get_user_weibo_list(user_id, headers, page=1)
    
    if data:
        weibos = parse_weibo_data(data)
        print(f"解析到 {len(weibos)} 条微博")
        
        # 显示前几条微博
        if weibos:
            display_weibo_info(weibos, min(3, len(weibos)))
            
            # 保存数据
            save_weibo_to_file(weibos, f'weibo_{user_id}_page_1.json')
            
            # 获取多页数据
            try:
                pages = 1
                all_weibos = get_multiple_pages_weibo(user_id, headers, 1, pages)
                if all_weibos:
                    save_weibo_to_file(all_weibos, f'weibo_{user_id}_pages_{pages}.json')
                    print(f"总共获取到 {len(all_weibos)} 条微博")
                    
                    # 显示汇总信息
                    display_weibo_info(all_weibos, min(5, len(all_weibos)))
                    
                    # 打印一条完整的数据示例
                    print(f"\n=== 完整数据格式示例 ===")
                    print(json.dumps(all_weibos[0], ensure_ascii=False, indent=2))
                else:
                    print("没有获取到多页数据")
            except Exception as e:
                print(f"获取多页数据错误: {e}")
        else:
            print("解析到的微博数据为空")
    else:
        print("获取数据失败，请检查网络连接和cookies有效性")

if __name__ == "__main__":
    main()