import json
import os
import re
import sys
import time
import traceback
from datetime  import datetime
import time
import redis
from urllib.parse import urlparse, parse_qs, unquote,urlencode
from util.tools import *

import requests
import urllib3
from bs4 import BeautifulSoup, NavigableString, Tag
from loguru import logger
from setting import *
from util.public_utils import *
import random

import myglobal

with open(BLOCK_KEYWORD_PATH, 'r', encoding='utf-8') as f:
    temp = f.read()
    if temp:
        block_lis = temp.split("\n")
    else:
        block_lis = []


class CaptchaError(Exception):
    def __init__(self, errorInfo):
        super().__init__(self)
        self.errorInfo = errorInfo

    def __str__(self):
        return self.errorInfo


class RetryError(Exception):
    def __init__(self, errorInfo):
        super().__init__(self)
        self.errorInfo = errorInfo

    def __str__(self):
        return self.errorInfo



def extract_article_id(url, pageNum):
    """
    从微博用户主页URL中提取user_id和container_id。
    若URL格式不符则返回None。
    """
    parsed = urlparse(url)
    path_segments = parsed.path.strip('/').split('/')
    if len(path_segments) < 2 or path_segments[0] != 'u':
        return None
    user_id = path_segments[1]
    lfid = '107603'
    container_id = f"{lfid}{user_id}"
    return user_id, container_id


def extract_article_ids_from_api_response(json_data):
    """
    从API响应中提取文章ID列表
    
    Args:
        json_data: API响应的JSON数据
        
    Returns:
        list: 文章ID列表
    """
    article_ids = []
    
    # 处理新的数据格式
    for card in json_data.get("data", {}).get("cards", []):
        # 检查是否有card_group（新格式）
        if "card_group" in card:
            for group_item in card.get("card_group", []):
                mblog = group_item.get("mblog")
                if mblog and "id" in mblog:
                    article_ids.append(mblog["id"])
        else:
            # 兼容旧格式
            mblog = card.get("mblog")
            if mblog and "id" in mblog:
                article_ids.append(mblog["id"])
    
    return article_ids


def get_next_page_since_id(json_data, is_new_format=False):
    """
    获取下一页的since_id
    
    Args:
        json_data: API响应的JSON数据
        is_new_format: 是否为新的API格式
        
    Returns:
        str: since_id，如果没有下一页则返回None
    """
    if is_new_format:
        return json_data.get("data", {}).get("pageInfo", {}).get("since_id")
    else:
        return json_data.get("data", {}).get("cardlistInfo", {}).get("since_id")


def extract_or_return_url(input_url):
    """
    如果URL符合特定格式，则提取并返回'u'参数的值，否则返回原链接。

    参数:
    input_url (str): 要处理的URL字符串。

    返回:
    str: 提取的'u'参数的值（解码后），或原链接。
    """
    # 解析URL
    parsed_url = urlparse(input_url)

    # 检查URL的netloc部分是否符合特定格式
    if parsed_url.netloc in ["weibo.cn", "shop.sc.weibo.com", "passport.weibo.com"] and parsed_url.path in ["/sinaurl",
                                                                                                            '/h5/sclick/index',
                                                                                                            "/visitor/visitor"]:
        # 解析查询参数
        query_params = parse_qs(parsed_url.query)

        # 获取'u'参数的值
        u_value = query_params.get('u')
        url_value = query_params.get('url')

        # 如果'u'参数存在且只有一个值，则返回解码后的值
        if u_value and len(u_value) == 1:
            return unquote(u_value[0])
        if url_value and len(url_value) == 1:
            return unquote(url_value[0])

    # 如果URL不符合特定格式，则返回原链接
    return input_url

# 自定义异常类
class RetryError(Exception):
    """重试次数用尽后抛出的异常"""
    pass

class CaptchaError(Exception):
    """遇到验证码时抛出的异常"""
    pass



def ownRequests(url,**args):
    """
    获取微博用户信息的函数
    
    Args:
        user_id: 微博用户ID
        retry_count: 重试次数，默认为3次
    
    Returns:
        dict: 用户信息字典，如果失败返回None
    """
    if 'extparam' in url:
        myglobal.IS_WEIBO_SUBOR = True

    user_id=myglobal.user_id
    retry_count=5

    # 设置请求头
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:143.0) Gecko/20100101 Firefox/143.0',
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
        'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
        'Accept-Encoding': 'gzip, deflate, br',
        'Connection': 'keep-alive',
        'Upgrade-Insecure-Requests': '1',
        'Host':'m.weibo.cn',
    
    }

    cookies=rand_cookie()


    for attempt in range(retry_count):
        try:
            
            response = requests.get(url, headers=headers, cookies=cookies, timeout=10)
            
            if response.status_code != 200:
                cookies=rand_cookie()
                logger.info(f"❌请求失败，状态码:https://m.weibo.cn/u/{user_id}==== {url}== {response.status_code}，尝试 {attempt+1}/{retry_count}")
                time.sleep(random.uniform(1, 2))
                continue
                
            # 尝试解析JSON
            try:
                data = response.json()
                if data.get('ok') == 1:
                    
                    return response
                else:
                    cookies=rand_cookie()
                    logger.info(f"API响应失败:https://m.weibo.cn/u/{user_id}==== {url}=={data}")
            except json.JSONDecodeError:
                cookies=rand_cookie()
                logger.info(f"❌JSON解析错误，尝试 响应内容:https://m.weibo.cn/u/{user_id}==== {url}== 尝试 {attempt+1}/{retry_count}")
            cookies=rand_cookie()   
            time.sleep(random.uniform(1, 2))
            
        except requests.exceptions.RequestException as e:
            cookies=rand_cookie()
            logger.info(f"❌请求异常:https://m.weibo.cn/u/{user_id}==== {url}== {e}，尝试 {attempt+1}/{retry_count}")
            time.sleep(random.uniform(1, 2))
    
    return None


# 推送到远端数据库
def to_data_base(data):
    """将数据推送到远程数据库"""

    ####################################################  数据过滤 ######################################################################
    # 这种可能是偶发性质的爬取失败  还不能将其加入到redis中去重
    if "__获取内容为空__" in str(data) or "__获取内容出错__" in str(data):
        logger.info(f"❌获取文章二级跳转页面失败，不推送，待再次采集：https://m.weibo.cn/detail/{data['article_id']}==={data['article_id']},{data['content']}...")
        return False

    if "__循环递归__" in str(data):
        logger.info(f"❌文章出现循环嵌套，加入去重不再采集：{data['article_id']},{data['content']}...")
        return True

    # 查看是否有过滤关键词
    for keyword in block_lis:
        if keyword in str(data):
            logger.info(f"文本中包含过滤关键词：{keyword} => {data['article_id']}  {data['content']}...")
            return True
    temp_str_data = json.dumps(data, ensure_ascii=False)

    temp_str_data.replace("微博正文", "内容正文")

    data = json.loads(temp_str_data)
    #打印日志
    if 'article_id' in data:
        print('推送到远端数据库')
        article_id=data['article_id']
        data['article_detail_url']=f"https://m.weibo.cn/detail/{article_id}"
        data['is_weibo_chaohua']=myglobal.IS_WEIBO_SUBOR
        
        print(data)
        
        write_log(data,article_id)
        print('推送到远端数据库')
    #测试
    #return True
    ################################################################################################################################

    # 发送POST请求，将数据推送到远程数据库
    urls = URLS
    for url in urls:
        req = requests.post(url, json=data, verify=False)

    # if DEBUG:
    #     logger.info(f"模拟推送=> {data['article_id']} {str(data['content'])[:100]}...")
    #     return False

    # 如果返回的内容中不包含"success"，则打印"推送失败"
    if "success" not in req.content.decode():

        logger.error(f"❌推送请求失败:ID = {data['article_id']} {req.text}...")
        return False
    else:

        logger.info(f"✅推送成功: ID = {data['article_id']} {str(data['content'])[:100]}...")
        return True


def parse_weibo_time(time_str):

    """解析微博时间格式"""
    try:
        timeArray = time.strptime(time_str, "%a %b %d %H:%M:%S %z %Y")
        stptime = time.strftime('%Y-%m-%d %H:%M:%S',timeArray)
        return stptime
    except Exception as e:
        logger.error(f"❌时间解析失败：{e}")
        return time_str

#图片
def replace_sinaimg_url_regex(url):
    pattern = r'//(wx\d?)\.sinaimg\.cn/large/'
    replacement = r'//lz.sinaimg.cn/oslarge/'
    return re.sub(pattern, replacement, url)
def extract_images(mblog):
    """提取所有图片地址"""
    images = []

    # 处理新旧格式图片数据
    # if mblog.get('pic_ids'):
    #     for pid in mblog['pic_ids']:
    #         images.append(f"https://wx3.sinaimg.cn/orj480/{pid}.jpg")



    if mblog.get('pics'):
        for pic in mblog['pics']:
            # 优先去拿大图高清图，没有再去拿小图
            if pic.get('large') and pic['large'].get('url'):
                url=replace_sinaimg_url_regex(pic['large']['url'])
                images.append(url)
            elif pic.get('url'):
                url=replace_sinaimg_url_regex(pic['url'])
                images.append(url)

    result=list(set("https://image.baidu.com/search/down?url=" + i for i in images)) 
    print("图片图片图片图片");
    print(result)
    print("图片图片图片图片");          

    return result

# 解析产品id-链接解析
def parse_url_get_info(url):
    headers = {
        'Content-Type': 'application/json',
    }

    json_data = {
        'content': f'{url}',
    }

    response = requests.post(PARSE_PRODUCT, headers=headers, json=json_data,
                            verify=False)
    res_js = response.json()

    try:
        # if  response.status_code!=200 or  res_js['data']['items']or res_js['data']['items'][0]['matchedPlatforms'][0]['status'] =="failure":
        # print(f"解析失败")

        productId = res_js['data']['items'][0]['matchedPlatforms'][0]['data']['productInfo']['productId']
        platform_name = res_js['data']['items'][0]['matchedPlatforms'][0]['platformName']
    except:
        return "", ""

    return productId, platform_name


def get_platform(url):
    """
    """
    if "tmall.com" in url or "taobao.com" in url or "tb.cn" in url or "tmall.hk" in url:
        return "淘宝"
    elif "jd.com" in url or "3.cn"  in url:
        return "京东"
    elif "pinduoduo.com" in url or "yangkeduo.com" in url:
        return "拼多多"
    elif "meituan.com" in url or "dpurl.cn" in url:
        return "美团"
    elif "vip.com" in url:
        return "唯品会"
    else:
        return "其他"


def extract_contents(text, filter_set):
    """
    解析一个 BeautifulSoup 标签的 contents，
    提取文本和链接为结构化数据。

    :param tag: BeautifulSoup 的 Tag 对象
    :return: List[Dict] 格式的内容结构
    """
    tag = BeautifulSoup(text, 'html.parser')
    result = []
    product_num = 0
    for child in tag.contents:
        if isinstance(child, NavigableString):
            text = str(child).strip()
            if text:
                result.append({"type": "text", "text": text})
        elif isinstance(child, Tag):
            if child.name.lower() == "a":
                link_text = child.get_text(strip=True)

                if "U先素质试用" in link_text : continue

                href = child.get("href", "")

                # 提取参数中的url 只用两者中的一个就可以提取直链了吧？？？
                # url,content_type =  extract_or_return_url(href).strip()

                url, content_type = get_redirect_url(extract_or_return_url(href).strip())

                # 是否为正确的链接
                if url and "http" in url:

                    """
                    # 判断是否与为此类url格式：https://weibo.com/7494417148/5168403977339214
                    # 判断是否与为此类url格式：https://weibo.com/5069029750/PqGDk3IeL#repost
                    # 判断是否与为此类url格式：https://weibo.com/2048344461/PkElO8VXZ?pagetype=profilefeed
                    """

                    # 使用正则表达式匹配三种微博URL格式并提取ID
                    weibo_patterns = [
                        r'https?://weibo\.com/\d+/(\d+)',  # 数字ID格式
                        r'https?://weibo\.com/\d+/([A-Za-z0-9]+)(?:#repost)?',  # 字母数字ID格式
                        r'https?://weibo\.com/\d+/([A-Za-z0-9]+)(?:\?pagetype=profilefeed)?'  # 带查询参数的ID格式
                    ]

                    article_id = None
                    for pattern in weibo_patterns:
                        match = re.match(pattern, url)
                        if match:
                            article_id = match.group(1)
                            break

                    if article_id:
                        result.append(
                            {
                            "type": "extra", 
                            "text": parse_weibo_data(article_id, False, filter_set), 
                            "url": url
                            }
                            )
                        continue

                    productId, platform_name = parse_url_get_info(url)

                    # 没有商品ID 说明是优惠券链接 或其他链接
                    if not productId:
                        #解析不出来
                        if content_type and content_type.startswith('image/'):
                            result.append(
                                {
                                "type": "image", 
                                "text": url
                                }
                                )

                        else:

                            result.append(
                                    {
                                    "type": "link2",
                                    "text": url,
                                    #"text": NO_PARSE_DEFAULT_URL,
                                    "platform": get_platform(url),
                                    "link_text": link_text
                                    }
                                   )
                        continue

                    # 有商品ID 说明是商品链接
                    else:
                        product_num += 1
                        result.append(
                            {
                            "type": "link",
                            "text": url, 
                            "product_id": productId,
                            "platform_name": platform_name,
                            "link_text": link_text
                            }
                            )

                # 若非正确链接 则可能是@用户名或话题  此时保存@文案
                else:
                    result.append({"type": "text", "text": link_text})

                # 获取直链 并且 获取商品详情数据

            else:
                # 可扩展其他标签的提取策略
                pass

    return result, product_num

#获取文章详情
def get_weibo_detail_data(weibo_id: str, max_retries: int = 5) :
    """
    获取微博详情页的数据
    
    Args:
        weibo_id: 微博ID，例如 "5203489464782023"
        max_retries: 最大重试次数，默认为3次
    
    Returns:
        dict: 微博详情数据，如果失败返回None
    """
    # 构造URL
    url = f"https://m.weibo.cn/detail/{weibo_id}"
    
    # 设置请求头
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:143.0) Gecko/20100101 Firefox/143.0',
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
        'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
        'Accept-Encoding': 'gzip, deflate, br',
        'Connection': 'keep-alive',
        'Upgrade-Insecure-Requests': '1',
        'Host':'m.weibo.cn',
    
    }
    cookies=rand_cookie()

    
    # 重试机制
    for attempt in range(max_retries):
        time.sleep(random.uniform(10, 20))     

        try:
            response = requests.get(url, headers=headers, cookies=cookies, timeout=10)
            
            if response.status_code != 200:
                print(f"请求失败，状态码: {response.status_code}，尝试 {attempt+1}/{max_retries}")
                continue
            # print("权限权限权限权限权限权限权限")
            # print(url)
            # print(response.content)
            # print("权限权限权限权限权限权限权限")
            if "权限" in response.content.decode('utf-8'):
                print("微博没有查看权限");
                return None
            if  "微博验证" in response.content.decode('utf-8'):
                print(f"微博验证{url}");
                return None
                
            # 使用正则表达式匹配渲染数据
            pattern = r'var \$render_data = (\[.*?\])\[0\] \|\| {};'
            match = re.search(pattern, response.text, re.DOTALL)
            
            if match:
                try:
                    # 解析JSON数据
                    render_data = json.loads(match.group(1))
                    # print("asdflkajsklfasjfldasjkl;fdkas;dfl")
                    # print(render_data[0]['status']['text'])
                    # print("asdflkajsklfasjfldasjkl;fdkas;dfl")
                    if render_data and len(render_data) > 0:
                        render_data=render_data[0]
                        # print("asdflkajsklfasjfldasjkl;fdkas;dfl")
                        # print(render_data['status'])
                        # print("asdflkajsklfasjfldasjkl;fdkas;dfl")
                        return render_data
                        cookies=rand_cookie()
                        return render_data['status']['text']
                    else:
                        print("渲染数据为空或格式不正确")
                except json.JSONDecodeError as e:
                    cookies=rand_cookie()
                    print(f"JSON解析错误: {e}，尝试 {attempt+1}/{max_retries}")
            else:
                cookies=rand_cookie()
                print(f"未找到渲染数据,尝试{response.content.decode('utf-8')} ===={attempt+1}/{max_retries}")
            time.sleep(random.uniform(10, 20))     
        except requests.exceptions.RequestException as e:
            cookies=rand_cookie()
            print(f"请求异常: {e}，尝试 {attempt+1}/{max_retries}")
        
        # 如果不是最后一次尝试，等待一会儿再重试
        if attempt < max_retries - 1:
            cookies=rand_cookie()
            time.sleep(2)
    
    return None
def get_article_data(article_id):
    data=get_weibo_detail_data(article_id)
    if data:
        return data
    else :
        print(f"获取微博  https://m.weibo.cn/detail/{article_id}  数据失败")
    # pattern = r'var \$render_data = (\[.*?\])\[0\] \|\| {};'
    # url=f"https://m.weibo.cn/detail/{article_id}"
    # # 获取url 详情
    # print("Asfjaslkjdfl;ads");
    # print(url)
    # print("Asfjaslkjdfl;ads");
    # res = ownRequests(url,
    #                 verify=False,
    #                 proxies=PROXY,
    #                 headers=HEADERS,
    #                 timeout=3)
    # print("8888888asdf;lasl;dfa");
    # print(res.text)
    # print("8888888asdf;lasl;dfa");
    # # res.encoding='utf-8'
    # match = re.search(pattern, res.text, re.DOTALL)
    # print("matchmatchmatchmatchmatch")
    # print(match)
    # print("matchmatchmatchmatchmatch")
    # if match:
    #     json_str = match.group(1)
    #     try:
    #         data = json.loads(json_str)[0]
    #         # print("解析成功，数据为：")
    #         # return  data['status']['text']
    #         return data

    #     except json.JSONDecodeError as e:
    #         # print("JSON解析失败:", e)
    #         return {}
    # else:
    #     print(f"未找到匹配内容{res.text}")
    #     return {}

def get_comments(article_id):
    headers = {
        'sec-ch-ua-platform': '"Windows"',
        'X-XSRF-TOKEN': '3af3ce',
        'Referer': 'https://m.weibo.cn/detail/5183190165618689',
        'sec-ch-ua': '"Google Chrome";v="137", "Chromium";v="137", "Not/A)Brand";v="24"',
        'sec-ch-ua-mobile': '?0',
        'MWeibo-Pwa': '1',
        'X-Requested-With': 'XMLHttpRequest',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/137.0.0.0 Safari/537.36',
        'Accept': 'application/json, text/plain, */*',
    }
    
    params = {
        'id': f'{article_id}',
        'mid': f'{article_id}',
        'max_id_type': '0',
    }
    
    response = ownRequests('https://m.weibo.cn/comments/hotflow', params=params, headers=headers,verify=False,proxies=PROXY,timeout=3)
    
    return response.json()
    
def process_comments(comments_dic,filter_set):
    result = []
    for i in comments_dic:

        temp1, temp2 =extract_contents(i['text'], filter_set)
        result.append({
            "comment_id": i['id'],
            "comment_text":temp1,
            "comment_time": i['created_at'],
            "comment_user": i['user']['screen_name'],
            "comment_user_id": i['user']['id'],
            "comments": process_comments(i.get('comments',{}) if i.get('comments',{}) is not  False else {},filter_set)
        })
    
    return result

def process_mblog(mblog, filter_set):
    """处理单条微博"""
    
    original_text = mblog['text']
    
    content, product_num = extract_contents(original_text, filter_set)

    result = {
        "avatar": "https://image.baidu.com/search/down?url=" + mblog['user'].get('profile_image_url', ''),
        "user_id": mblog['user'].get('id'),
        "nickname": mblog['user'].get('screen_name', ''),
        "publish_time": parse_weibo_time(mblog['created_at']),
        "content": content,
        "images": extract_images(mblog),
        "has_retweet": False,
        "article_id": mblog.get('id'),
        "product_num": product_num,
        #"comments": process_comments(get_comments(mblog.get('id')).get('data',{}).get('data',[]),filter_set)
    }

    # 处理转发内容
    if 'retweeted_status' in mblog:
        result['has_retweet'] = True
        result['retweet_article_id'] = mblog['retweeted_status'].get('id')
        result['retweeted_content'] = parse_weibo_data(result['retweet_article_id'], False, filter_set)

    return result


def parse_weibo_data(article_id, flag=True, filter_set=None):
    """主解析函数"""
    # filter_set 用于处理递归调用的情况 发现递归调用则不再递归
    if filter_set is None:
        filter_set = set()

    # 判断是否已经在集合里面
    if article_id in filter_set:
        return {"message": "__循环递归__", "article_id": article_id, "flag": "失败失败失败失败失败"}
    else:
        filter_set.add(article_id)

    try:
        # 判断 article_id 是否在redis中  若在集合中为真 为真则跳过
        if flag and R.sismember("unique_set", article_id):
            print(f"已经提取过该文章：{article_id}")
            return
            # return {"message":"已经提取过该文章","article_id":article_id,"flag":"失败失败失败失败失败"}

        # 线程数为1 说明有降速需求 故等待3秒
        if MAX_WORKERS == 1:
            time.sleep(3)

        data = get_article_data(article_id)
        # print('文章详情文章详情文章详情文章详情文章详情')
        # print(data)
        # print('文章详情文章详情文章详情文章详情文章详情')

        if not data:
            # 偶尔会出现内容为空  应该是微博反爬导致的  出现这种情况添加标识等下次采集重试
            return {"message": "__获取内容为空__", "article_id": article_id, "flag": "失败失败失败失败失败"}

        mblog_data = data['status']
        processed = process_mblog(mblog_data, filter_set)

        # flag为真 表示是第一次提取 为假 表示是提取引用  仅在第一次提取 且 DEBUG 为假时才推送保存数据
        if flag and to_data_base(processed):
            # 正确添加至数据库后才将其加入到redis集合中
            R.sadd("unique_set", article_id)
        return processed
    except Exception as e:
        logger.error(f"处理文章ID {article_id}发生错误: ", e)
        traceback.print_exc()
        return {"message": "__获取内容出错__", "article_id": article_id, "flag": "失败失败失败失败失败"}


def get_redirect_url(url: str, max_redirects=10):
    """自动跟随所有重定向获取最终URL:ml-citation{ref="6,8" data="citationList"}
    :param url: 原始请求地址
    :param max_redirects: 最大重定向次数(默认10)
    :return: 最终地址字符串
    """

    # 如果长度小于这个长度 大概率是直链 无需重定向
    if len(url) < 50:
        return url, None

    parsed_url = urlparse(url)

    # 检查URL是否包含查询参数   跳过正文中"点击查看"这类图片的URL链接处理  刚好可以让后端用与微博相同的方式展示图片
    if not parsed_url.query:
        return url, None

    try:
        response = requests.get(
            url,
            allow_redirects=True,  # 启用自动重定向
            timeout=10,
            headers={'User-Agent': 'Mozilla/5.0'},
            verify=False,
            # max_redirects=max_redirects,
            # max_redirects=max_redirects
        )
        return response.url, response.headers.get('Content-Type', '')
    except requests.exceptions.TooManyRedirects:
        # raise ValueError("超出最大重定向次数限制")
        logger.error(f"重定向链接：超出最大重定向次数限制,URL: {url}")
        return None, None

    except requests.exceptions.RequestException as e:
        # raise ConnectionError(f"请求失败: {str(e)}")
        logger.error(f"重定向链接：请求失败,URL: {url}")
        return None, None


# 使用示例
if __name__ == "__main__":
    url = "https://shop.sc.weibo.com/h5/goods/index?iid=110020483444610003937945"
    print(get_redirect_url(url))
