import json
import os
import re
import sys
import time
import traceback
from datetime import datetime
from urllib.parse import urlparse, parse_qs, unquote

import redis
import requests
import urllib3
from bs4 import BeautifulSoup, NavigableString, Tag
from loguru import logger

from setting import *

sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', buffering=1)  # 行缓冲
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)

R = redis.Redis(host='localhost', port=6379, db=0)

with open(BLOCK_KEYWORD_PATH, 'r', encoding='utf-8') as f:
    temp = f.read()
    if temp:
        block_lis = temp.split("\n")
    else:
        block_lis = []


class CaptchaError(Exception):
    def __init__(self, errorInfo):
        super().__init__(self)
        self.errorInfo = errorInfo

    def __str__(self):
        return self.errorInfo


class RetryError(Exception):
    def __init__(self, errorInfo):
        super().__init__(self)
        self.errorInfo = errorInfo

    def __str__(self):
        return self.errorInfo



def extract_article_id(url, pageNum):
    """
    从微博用户主页URL中提取user_id和container_id。
    若URL格式不符则返回None。
    """
    parsed = urlparse(url)
    path_segments = parsed.path.strip('/').split('/')
    if len(path_segments) < 2 or path_segments[0] != 'u':
        return None
    user_id = path_segments[1]
    lfid = '107603'
    container_id = f"{lfid}{user_id}"
    return user_id, container_id


def extract_article_ids_from_api_response(json_data):
    """
    从API响应中提取文章ID列表
    
    Args:
        json_data: API响应的JSON数据
        
    Returns:
        list: 文章ID列表
    """
    article_ids = []
    
    # 处理新的数据格式
    for card in json_data.get("data", {}).get("cards", []):
        # 检查是否有card_group（新格式）
        if "card_group" in card:
            for group_item in card.get("card_group", []):
                mblog = group_item.get("mblog")
                if mblog and "id" in mblog:
                    article_ids.append(mblog["id"])
        else:
            # 兼容旧格式
            mblog = card.get("mblog")
            if mblog and "id" in mblog:
                article_ids.append(mblog["id"])
    
    return article_ids


def get_next_page_since_id(json_data, is_new_format=False):
    """
    获取下一页的since_id
    
    Args:
        json_data: API响应的JSON数据
        is_new_format: 是否为新的API格式
        
    Returns:
        str: since_id，如果没有下一页则返回None
    """
    if is_new_format:
        return json_data.get("data", {}).get("pageInfo", {}).get("since_id")
    else:
        return json_data.get("data", {}).get("cardlistInfo", {}).get("since_id")


def extract_or_return_url(input_url):
    """
    如果URL符合特定格式，则提取并返回'u'参数的值，否则返回原链接。

    参数:
    input_url (str): 要处理的URL字符串。

    返回:
    str: 提取的'u'参数的值（解码后），或原链接。
    """
    # 解析URL
    parsed_url = urlparse(input_url)

    # 检查URL的netloc部分是否符合特定格式
    if parsed_url.netloc in ["weibo.cn", "shop.sc.weibo.com", "passport.weibo.com"] and parsed_url.path in ["/sinaurl",
                                                                                                            '/h5/sclick/index',
                                                                                                            "/visitor/visitor"]:
        # 解析查询参数
        query_params = parse_qs(parsed_url.query)

        # 获取'u'参数的值
        u_value = query_params.get('u')
        url_value = query_params.get('url')

        # 如果'u'参数存在且只有一个值，则返回解码后的值
        if u_value and len(u_value) == 1:
            return unquote(u_value[0])
        if url_value and len(url_value) == 1:
            return unquote(url_value[0])

    # 如果URL不符合特定格式，则返回原链接
    return input_url


def ownRequests(url, **args):
    """
    封装请求函数，处理代理超时等错误

    Args:
        url: 请求URL
        **args: 其他请求参数

    Returns:
        response: 请求响应对象
    """

    max_retries = 5  # 最大重试次数
    retry_count = 0

    while retry_count < max_retries:
        try:
            response = requests.get(url, **args)
            if r"geetest?testType=2" in response.text:
                # bypass_selenium({}, DRIVER)
                retry_count += 1
                continue
                # raise CaptchaError(f"url: {url} 出现验证码=> {response.text[:100]}")
            return response

        except requests.exceptions.ProxyError:
            logger.info(f"代理错误，正在重试 ({retry_count + 1}/{max_retries})")
            retry_count += 1
            # time.sleep(2)  # 等待2秒后重试
        except requests.exceptions.ConnectTimeout:
            logger.info(f"连接超时，正在重试 ({retry_count + 1}/{max_retries})")
            retry_count += 1
        except requests.exceptions.ReadTimeout:
            logger.info(f"读取超时，正在重试 ({retry_count + 1}/{max_retries})")
            retry_count += 1
        except requests.exceptions.RequestException as e:
            logger.info(f"请求异常: {str(e)}，正在重试 ({retry_count + 1}/{max_retries})")
            retry_count += 1
        except requests.exceptions.SSLError:
            logger.info(f"SSL错误，正在重试 ({retry_count + 1}/{max_retries})")
            retry_count += 1
        except Exception as e:
            logger.info(f"未知错误，正在重试 ({retry_count + 1}/{max_retries})")
            retry_count += 1

    # 如果所有重试都失败了
    raise RetryError(f"请求 {url} 失败，已重试{max_retries}次")
    # raise requests.exceptions.RequestException(f"请求失败，已重试{max_retries}次")


def to_data_base(data):
    """将数据推送到远程数据库"""

    ####################################################  数据过滤 ######################################################################
    # 这种可能是偶发性质的爬取失败  还不能将其加入到redis中去重
    if "__获取内容为空__" in str(data) or "__获取内容出错__" in str(data):
        logger.info(
            f"获取文章二级跳转页面失败，不推送，待再次采集：{data['article_id']},{str(data['content'])[:100]}...")
        return False

    if "__循环递归__" in str(data):
        logger.info(f"文章出现循环嵌套，加入去重不再采集：{data['article_id']},{str(data['content'])[:100]}...")
        return True

    # 查看是否有过滤关键词
    for keyword in block_lis:
        if keyword in str(data):
            logger.info(f"文本中包含过滤关键词：{keyword} => {data['article_id']}  {str(data['content'])[:100]}...")
            return True
    temp_str_data = json.dumps(data, ensure_ascii=False)

    temp_str_data.replace("微博正文", "内容正文")

    data = json.loads(temp_str_data)
    ################################################################################################################################

    # 发送POST请求，将数据推送到远程数据库
    req = requests.post(REMOTE_DB_PUSH_URL, json=data, verify=False)

    if DEBUG:
        logger.info(f"模拟推送=> {data['article_id']} {str(data['content'])[:100]}...")
        return False

    # 如果返回的内容中不包含"success"，则打印"推送失败"
    if "success" not in req.content.decode():
        logger.error(f"推送请求失败:ID = {data['article_id']} {req.text[:100]}...")
        return False
    else:
        logger.info(f"推送成功: ID = {data['article_id']} {str(data['content'])[:100]}...")
        return True


def parse_weibo_time(time_str):
    """解析微博时间格式"""
    try:
        dt = datetime.strptime(time_str, '%a %b %d %H:%M:%S %z %Y')
        return dt.strftime('%Y-%m-%d %H:%M:%S')
    except Exception as e:
        logger.error(f"时间解析失败：{str(e)}")
        return time_str


def extract_images(mblog):
    """提取所有图片地址"""
    images = []

    # 处理新旧格式图片数据
    # if mblog.get('pic_ids'):
    #     for pid in mblog['pic_ids']:
    #         images.append(f"https://wx3.sinaimg.cn/orj480/{pid}.jpg")

    if mblog.get('pics'):
        for pic in mblog['pics']:
            # 优先去拿大图高清图，没有再去拿小图
            if pic.get('large') and pic['large'].get('url'):
                images.append(pic['large']['url'])
            elif pic.get('url'):
                images.append(pic['url'])

    return list(set("https://image.baidu.com/search/down?url=" + i for i in images))


def parse_url_get_info(url):
    headers = {
        'Content-Type': 'application/json',
    }

    json_data = {
        'content': f'{url}',
    }

    response = requests.post('http://cgi-bin.lottefuture.com/python/link-parse.php', headers=headers, json=json_data,
                            verify=False)
    res_js = response.json()

    try:
        # if  response.status_code!=200 or  res_js['data']['items']or res_js['data']['items'][0]['matchedPlatforms'][0]['status'] =="failure":
        # print(f"解析失败")

        productId = res_js['data']['items'][0]['matchedPlatforms'][0]['data']['productInfo']['productId']
        platform_name = res_js['data']['items'][0]['matchedPlatforms'][0]['platformName']
    except:
        return "", ""

    return productId, platform_name


def get_platform(url):
    """
    """
    if "tmall.com" in url or "taobao.com" in url or "tb.cn" in url or "tmall.hk" in url:
        return "淘宝"
    elif "jd.com" in url or "3.cn" in url:
        return "京东"
    elif "pinduoduo.com" in url or "yangkeduo.com" in url:
        return "拼多多"
    elif "meituan.com" in url or "dpurl.cn" in url:
        return "美团"
    elif "vip.com" in url:
        return "唯品会"
    else:
        return "其他"


def extract_contents(text, filter_set):
    """
    解析一个 BeautifulSoup 标签的 contents，
    提取文本和链接为结构化数据。

    :param tag: BeautifulSoup 的 Tag 对象
    :return: List[Dict] 格式的内容结构
    """
    tag = BeautifulSoup(text, 'html.parser')
    result = []
    product_num = 0
    for child in tag.contents:
        if isinstance(child, NavigableString):
            text = str(child).strip()
            if text:
                result.append({"type": "text", "text": text})
        elif isinstance(child, Tag):
            if child.name.lower() == "a":
                link_text = child.get_text(strip=True)

                if "U先素质试用" in link_text : continue

                href = child.get("href", "")

                # 提取参数中的url 只用两者中的一个就可以提取直链了吧？？？
                # url,content_type =  extract_or_return_url(href).strip()

                url, content_type = get_redirect_url(extract_or_return_url(href).strip())

                # 是否为正确的链接
                if url and "http" in url:

                    """
                    # 判断是否与为此类url格式：https://weibo.com/7494417148/5168403977339214
                    # 判断是否与为此类url格式：https://weibo.com/5069029750/PqGDk3IeL#repost
                    # 判断是否与为此类url格式：https://weibo.com/2048344461/PkElO8VXZ?pagetype=profilefeed
                    """

                    # 使用正则表达式匹配三种微博URL格式并提取ID
                    weibo_patterns = [
                        r'https?://weibo\.com/\d+/(\d+)',  # 数字ID格式
                        r'https?://weibo\.com/\d+/([A-Za-z0-9]+)(?:#repost)?',  # 字母数字ID格式
                        r'https?://weibo\.com/\d+/([A-Za-z0-9]+)(?:\?pagetype=profilefeed)?'  # 带查询参数的ID格式
                    ]

                    article_id = None
                    for pattern in weibo_patterns:
                        match = re.match(pattern, url)
                        if match:
                            article_id = match.group(1)
                            break

                    if article_id:
                        result.append(
                            {"type": "extra", "text": parse_weibo_data(article_id, False, filter_set), "url": url})
                        continue

                    productId, platform_name = parse_url_get_info(url)

                    # 没有商品ID 说明是优惠券链接 或其他链接
                    if not productId:
                        if content_type and content_type.startswith('image/'):
                            result.append({"type": "image", "text": url})

                        else:
                            result.append(
                                {"type": "link2", "text": url, "platform": get_platform(url), "link_text": link_text})
                        continue

                    # 有商品ID 说明是商品链接
                    else:
                        product_num += 1
                        result.append(
                            {"type": "link", "text": url, "product_id": productId, "platform_name": platform_name,
                            "link_text": link_text})

                # 若非正确链接 则可能是@用户名或话题  此时保存@文案
                else:
                    result.append({"type": "text", "text": link_text})

                # 获取直链 并且 获取商品详情数据

            else:
                # 可扩展其他标签的提取策略
                pass

    return result, product_num


def get_article_data(article_id):
    pattern = r'var \$render_data = (\[.*?\])\[0\] \|\| {};'
    res = ownRequests(f'https://m.weibo.cn/detail/{article_id}',
                    verify=False,
                    proxies=PROXY,
                    headers=HEADERS,
                    timeout=3)
    # res.encoding='utf-8'
    match = re.search(pattern, res.text, re.DOTALL)
    if match:
        json_str = match.group(1)
        try:
            data = json.loads(json_str)[0]
            # print("解析成功，数据为：")
            # return  data['status']['text']
            return data

        except json.JSONDecodeError as e:
            # print("JSON解析失败:", e)
            return {}
    else:
        # print("未找到匹配内容")
        return {}

def get_comments(article_id):
    headers = {
        'sec-ch-ua-platform': '"Windows"',
        'X-XSRF-TOKEN': '3af3ce',
        'Referer': 'https://m.weibo.cn/detail/5183190165618689',
        'sec-ch-ua': '"Google Chrome";v="137", "Chromium";v="137", "Not/A)Brand";v="24"',
        'sec-ch-ua-mobile': '?0',
        'MWeibo-Pwa': '1',
        'X-Requested-With': 'XMLHttpRequest',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/137.0.0.0 Safari/537.36',
        'Accept': 'application/json, text/plain, */*',
    }
    
    params = {
        'id': f'{article_id}',
        'mid': f'{article_id}',
        'max_id_type': '0',
    }
    
    response = ownRequests('https://m.weibo.cn/comments/hotflow', params=params, headers=headers,verify=False,proxies=PROXY,timeout=3)
    
    return response.json()
    
def process_comments(comments_dic,filter_set):
    result = []
    for i in comments_dic:

        temp1, temp2 =extract_contents(i['text'], filter_set)
        result.append({
            "comment_id": i['id'],
            "comment_text":temp1,
            "comment_time": i['created_at'],
            "comment_user": i['user']['screen_name'],
            "comment_user_id": i['user']['id'],
            "comments": process_comments(i.get('comments',{}) if i.get('comments',{}) is not  False else {},filter_set)
        })
    
    return result

def process_mblog(mblog, filter_set):
    """处理单条微博"""
    
    original_text = mblog['text']
    
    content, product_num = extract_contents(original_text, filter_set)

    result = {
        "avatar": "https://image.baidu.com/search/down?url=" + mblog['user'].get('profile_image_url', ''),
        "user_id": mblog['user'].get('id'),
        "nickname": mblog['user'].get('screen_name', ''),
        "publish_time": parse_weibo_time(mblog['created_at']),
        "content": content,
        "images": extract_images(mblog),
        "has_retweet": False,
        "article_id": mblog.get('id'),
        "product_num": product_num,
        "comments": process_comments(get_comments(mblog.get('id')).get('data',{}).get('data',[]),filter_set)
    }

    # 处理转发内容
    if 'retweeted_status' in mblog:
        result['has_retweet'] = True
        result['retweet_article_id'] = mblog['retweeted_status'].get('id')
        result['retweeted_content'] = parse_weibo_data(result['retweet_article_id'], False, filter_set)

    return result


def parse_weibo_data(article_id, flag=True, filter_set=None):
    """主解析函数"""
    # filter_set 用于处理递归调用的情况 发现递归调用则不再递归
    if filter_set is None:
        filter_set = set()

    # 判断是否已经在集合里面
    if article_id in filter_set:
        return {"message": "__循环递归__", "article_id": article_id, "flag": "失败失败失败失败失败"}
    else:
        filter_set.add(article_id)

    try:
        # 判断 article_id 是否在redis中  若在集合中为真 为真则跳过
        if flag and R.sismember("unique_set", article_id):
            # print(f"已经提取过该文章：{article_id}")
            return
            # return {"message":"已经提取过该文章","article_id":article_id,"flag":"失败失败失败失败失败"}

        # 线程数为1 说明有降速需求 故等待3秒
        if MAX_WORKERS == 1:
            time.sleep(3)

        data = get_article_data(article_id)

        if not data:
            # 偶尔会出现内容为空  应该是微博反爬导致的  出现这种情况添加标识等下次采集重试
            return {"message": "__获取内容为空__", "article_id": article_id, "flag": "失败失败失败失败失败"}

        mblog_data = data['status']
        processed = process_mblog(mblog_data, filter_set)

        # flag为真 表示是第一次提取 为假 表示是提取引用  仅在第一次提取 且 DEBUG 为假时才推送保存数据
        if flag and to_data_base(processed):
            # 正确添加至数据库后才将其加入到redis集合中
            R.sadd("unique_set", article_id)
        return processed
    except Exception as e:
        logger.error(f"处理文章ID {article_id}发生错误: ", e)
        traceback.print_exc()
        return {"message": "__获取内容出错__", "article_id": article_id, "flag": "失败失败失败失败失败"}


def get_redirect_url(url: str, max_redirects=10):
    """自动跟随所有重定向获取最终URL:ml-citation{ref="6,8" data="citationList"}
    :param url: 原始请求地址
    :param max_redirects: 最大重定向次数(默认10)
    :return: 最终地址字符串
    """

    # 如果长度小于这个长度 大概率是直链 无需重定向
    if len(url) < 50:
        return url, None

    parsed_url = urlparse(url)

    # 检查URL是否包含查询参数   跳过正文中"点击查看"这类图片的URL链接处理  刚好可以让后端用与微博相同的方式展示图片
    if not parsed_url.query:
        return url, None

    try:
        response = requests.get(
            url,
            allow_redirects=True,  # 启用自动重定向
            timeout=10,
            headers={'User-Agent': 'Mozilla/5.0'},
            verify=False,
            # max_redirects=max_redirects,
            # max_redirects=max_redirects
        )
        return response.url, response.headers.get('Content-Type', '')
    except requests.exceptions.TooManyRedirects:
        # raise ValueError("超出最大重定向次数限制")
        logger.error(f"重定向链接：超出最大重定向次数限制,URL: {url}")
        return None, None

    except requests.exceptions.RequestException as e:
        # raise ConnectionError(f"请求失败: {str(e)}")
        logger.error(f"重定向链接：请求失败,URL: {url}")
        return None, None


# 使用示例
if __name__ == "__main__":
    url = "https://shop.sc.weibo.com/h5/goods/index?iid=110020483444610003937945"
    print(get_redirect_url(url))
