import traceback
import concurrent.futures
from util.data_utils import *
import sys,os
from urllib.parse import urlparse, parse_qs, urlencode
import time
import logging
import myglobal
from concurrent.futures import ProcessPoolExecutor
import math

from datetime import datetime

# 配置日志
# 创建带日期的日志文件名
log_directory = "error_logs"
if not os.path.exists(log_directory):
    os.makedirs(log_directory)
    
log_filename = f"{log_directory}/app_{datetime.now().strftime('%Y-%m-%d')}.log"

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
    filename=log_filename,
    encoding='utf-8'
)

# 获取日志记录器
logger = logging.getLogger(__name__)

def process_weibo_url(url):
    # 提取每个用户两页数据
    pageNum = 2
    logger.info(f"🚀正在提取API链接: {url}...")

    
    # 提取每个用户的主页时  也要间隔一段时间
    # time.sleep(3)
    
    # 检查是否为新的API URL格式
    if 'extparam' in url:
        print('运行微博超话url')

        process_api_url(url, pageNum)
    else:
        process_user_url(url, pageNum)

def process_api_url(url, pageNum):
    """处理新的API URL格式"""
    logger.info(f"✅正在提取API链接: {url}...")
    try:
        
        for i in range(pageNum):
            # 直接使用提供的API URL
            response = ownRequests(
                url,
                proxies=PROXY,
                verify=False)

            if response == None:
                continue
            json_data = response.json()
            # print("9999999999");
            # print(json_data)
            # print("9999999999");
            # 提取文章ID并处理
            article_ids = extract_article_ids_from_api_response(json_data)
            print("文章id文章id")
            print(f" {article_ids}")
            print("文章id文章id")
            for article_id in article_ids:
                parse_weibo_data(article_id)

            # 获取下一页的since_id
            since_id = get_next_page_since_id(json_data, is_new_format=True)
            
            # 没有下一页则跳出循环
            if not since_id:
                break

            # 构造下一页URL
            parsed = urlparse(url)
            query_params = parse_qs(parsed.query)
            query_params['since_id'] = [str(since_id)]
            new_query = urlencode(query_params, doseq=True)
            url = f"{parsed.scheme}://{parsed.netloc}{parsed.path}?{new_query}"

        logger.info(f"🚀 API链接提取完毕: {url}")

    except Exception as e:
        logger.error(f"❌处理API链接时发生错误: {url} ", e)
        traceback.print_exc()


def process_user_url(url, pageNum):
    """处理原有的用户主页URL格式"""
    # 提取用户ID和容器ID
    extract_result = extract_article_id(url, pageNum)

    if extract_result is None:
        logger.error(f"❌无效的URL格式: {url}")
        return
    
    user_id, container_id = extract_result
    api_url = (
        f"https://m.weibo.cn/api/container/getIndex"
        f"?type=uid&value={user_id}"
        f"&containerid={container_id}")
    myglobal.user_id = user_id
    # print('9899999999999')
    # print(api_url)
    # print('9899999999999')

    try:
        logger.info(f"正在提取链接: {url}...")
        for i in range(pageNum):
            # 请求API接口
            response = ownRequests(
                api_url,
                proxies=PROXY,
                verify=False)

            if response == None:
                continue

            json_data = response.json()
            # print("888888888");
            # print(json_data)
            # print("888888888");
            #获取微博的真实的用户id  暂时定为 people_id  用户id

            # return False;

            # 提取文章ID并处理
            article_ids = extract_article_ids_from_api_response(json_data)
            # print("文章id文章id")
            # print(f" {article_ids}")
            # print("文章id文章id")
            for article_id in article_ids:
                parse_weibo_data(article_id)

            since_id = get_next_page_since_id(json_data, is_new_format=False)
            
            # 没有下一页则跳出循环
            if not since_id:
                break
            # 构造下一页URL
            api_url = f"https://m.weibo.cn/api/container/getIndex?type=uid&value={user_id}&containerid={container_id}&since_id={since_id}"

        logger.info(f"✅链接提取完毕: {url}")
        time.sleep(random.uniform(5, 10))
    except Exception as e:
        logger.error(f"❌处理链接时发生错误: {url} ", e)
        traceback.print_exc()

def main():
    logger.info(" --- 【一轮开始】 --- ")
    # 记录开始时间
    start_time = time.time()

    with open(URLS_PATH, mode='r',encoding="utf-8") as f:
        weibo_urls = f.read().splitlines()
    #读取url
    logger.info(f"🚀 读取到的URL: {weibo_urls}")
    MAX_WORKERS = 1


    

    results = {}
    with concurrent.futures.ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor:
        # 提交所有任务
        future_to_url = {executor.submit(process_weibo_url, url): url for url in weibo_urls}
        # 处理完成的任务
        for future in concurrent.futures.as_completed(future_to_url):
            url = future_to_url[future]
            try:
                result = future.result()
                results[url] = result
            except Exception as e:
                logger.error(f"❌处理 {url} 时发生异常: {e}")
                results[url] = {'url': url, 'error': e}

    # 记录结束时间            
    end_time = time.time()
    logger.info(f"✅ 所有任务完成，耗时 {end_time - start_time:.2f} 秒")

    # 统计结果
    success_count = sum(1 for result in results.values() 
                   if result is not None and 'error' not in result)
    # 保存结果到文件
    with open('weibo_users_results.json', 'w', encoding='utf-8') as f:
        json.dump(results, f, ensure_ascii=False, indent=2)
    
    logger.info("✅结果已保存到 weibo_users_results.json")
    logger.info(" --- 【一轮结束】 --- ")

if __name__ == "__main__":
    """
    https://weibo.com/u/5748988380，小屁屁找
    https://weibo.com/u/2794284831，好心天天分享
    https://weibo.com/u/6439151235，败家少女的日常
    https://weibo.com/u/5371906414，佩奇线报
    微博超话 https://m.weibo.cn/p/index?extparam=U%E5%85%88%E7%B4%A0%E8%B4%A8%E8%AF%95%E7%94%A8&containerid=100808c9f05766724555f036b3383f5cb97e33&luicode=20000061&lfid=5183171777004794
    """
    while True:
        main()
        time.sleep(random.uniform(1, 5))
