import traceback
import concurrent.futures
from tools import *
import sys,os
from urllib.parse import urlparse, parse_qs, urlencode

def process_weibo_url(url):
    # 提取每个用户两页数据
    pageNum = 2

    # 提取每个用户的主页时  也要间隔一段时间
    # time.sleep(3)
    
    # 检查是否为新的API URL格式
    if 'extparam' in url:
        process_api_url(url, pageNum)
    else:
        process_user_url(url, pageNum)

def process_api_url(url, pageNum):
    """处理新的API URL格式"""
    try:
        logger.info(f"正在提取API链接: {url}...")
        for i in range(pageNum):
            # 直接使用提供的API URL
            api_response = ownRequests(
                url,
                headers=HEADERS,
                timeout=3,
                proxies=PROXY,
                verify=False)
            json_data = api_response.json()

            # 提取文章ID并处理
            article_ids = extract_article_ids_from_api_response(json_data)
            for article_id in article_ids:
                parse_weibo_data(article_id)

            # 获取下一页的since_id
            since_id = get_next_page_since_id(json_data, is_new_format=True)
            
            # 没有下一页则跳出循环
            if not since_id:
                break

            # 构造下一页URL
            parsed = urlparse(url)
            query_params = parse_qs(parsed.query)
            query_params['since_id'] = [str(since_id)]
            new_query = urlencode(query_params, doseq=True)
            url = f"{parsed.scheme}://{parsed.netloc}{parsed.path}?{new_query}"

        logger.info(f"API链接提取完毕: {url}")

    except Exception as e:
        logger.error(f"处理API链接时发生错误: {url} ", e)
        traceback.print_exc()


def process_user_url(url, pageNum):
    """处理原有的用户主页URL格式"""
    # 提取用户ID和容器ID
    extract_result = extract_article_id(url, pageNum)
    if extract_result is None:
        logger.error(f"无效的URL格式: {url}")
        return
    
    user_id, container_id = extract_result
    api_url = (
        f"https://m.weibo.cn/api/container/getIndex"
        f"?type=uid&value={user_id}"
        f"&containerid={container_id}")

    try:
        logger.info(f"正在提取链接: {url}...")
        for i in range(pageNum):
            # 请求API接口
            api_response = ownRequests(
                api_url,
                headers=HEADERS,
                timeout=3,
                proxies=PROXY,
                verify=False)

            json_data = api_response.json()
            
            # 提取文章ID并处理
            article_ids = extract_article_ids_from_api_response(json_data)
            for article_id in article_ids:
                parse_weibo_data(article_id)

            since_id = get_next_page_since_id(json_data, is_new_format=False)

            # 没有下一页则跳出循环
            if not since_id:
                break
            # 构造下一页URL
            api_url = f"https://m.weibo.cn/api/container/getIndex?type=uid&value={user_id}&containerid={container_id}&since_id={since_id}"

        logger.info(f"链接提取完毕: {url}")

    except Exception as e:
        logger.error(f"处理链接时发生错误: {url} ", e)
        traceback.print_exc()

def main():
    logger.info("【一轮开始】")
    # with open(current_path+'\\'+"urls.txt", mode='r') as f:

    with open(URLS_PATH, mode='r',encoding="utf-8") as f:
        weibo_urls = f.read().splitlines()

    logger.info(f"读取到的URL: {str(weibo_urls)}")

    with concurrent.futures.ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor:
        executor.map(process_weibo_url, weibo_urls)

    logger.info("【一轮结束】")

if __name__ == "__main__":
    """
    https://weibo.com/u/5748988380，小屁屁找
    https://weibo.com/u/2794284831，好心天天分享
    https://weibo.com/u/6439151235，败家少女的日常
    https://weibo.com/u/5371906414，佩奇线报
    """
    while True:
        main()
        time.sleep(5)