from concurrent.futures import ThreadPoolExecutor
from datetime import datetime

from config import headers, download_directory
import os
from urllib.parse import urlparse
import requests
from WbMysql import insert_image_url


def getSinceId(urlPath, uid, type1):
    resp = requests.get(urlPath, headers=headers)
    list = resp.json().get("data", {}).get("list", [])
    since_id = ""
    if not list:
        return since_id, 0
    else:
        since_id = resp.json().get("data", {}).get("since_id", "")
        num = parseList(list, uid, type1)
        return since_id, num


def parseList(list, uid, type1):
    thumbnail_urls = []
    for item in list:
        date_str = item.get("created_at")
        # 解析原始日期字符串
        parsed_date = datetime.strptime(date_str, "%a %b %d %H:%M:%S %z %Y")
        # 格式化为所需的日期格式
        formatted_date = parsed_date.strftime("%Y%m%d")
        pic_infos = item.get("pic_infos", {})
        for pic_info in pic_infos.values():
            bmiddle_url = pic_info.get("largest", {}).get("url")
            thumbnail_urls.append(bmiddle_url)  # 假设你只需要第一个 url，退出循环
            # if bmiddle_url and not bmiddle_url.startswith("http://zzx"):
            #     thumbnail_urls.append(bmiddle_url)  # 假设你只需要第一个 url，退出循环
    print(len(thumbnail_urls))
    if type1 == '1':
        print("start download_images_in_parallel")
        uid_name = get_uid_name(uid)

        download_images_in_parallel(thumbnail_urls, uid, uid_name,formatted_date, 5)
    return len(thumbnail_urls)


def download_images_in_parallel(urls, uid, uid_name, formatted_date,max_workers=5):
    with ThreadPoolExecutor(max_workers=max_workers) as executor:
        executor.map(lambda url: download_image(url, uid, uid_name,formatted_date), urls)


def download_image(url, uid, uid_name,formatted_date):
    try:
        print(url)
        re = requests.get(url)
        if re.status_code == 200:
            parsed_url = urlparse(url)
            filename = os.path.basename(parsed_url.path)
            filename = f"{formatted_date}-{filename}"
            uid_download_directory = f"{download_directory}/{uid_name}/{uid}"
            name = os.path.join(uid_download_directory, filename)
            with open(name, 'wb') as file:
                file.write(re.content)
                print(f"图片已保存到: {name}")
                # insert_image_url(url, filename, uid, uid_name)
        else:
            print(f"下载失败，URL: {url}")
    except requests.RequestException as e:
        # 处理请求异常（如连接错误、超时等）
        print(f"下载失败，URL: {url}，错误信息: {e}")

    except Exception as e:
        # 处理其他异常
        print(f"处理文件失败，URL: {url}，错误信息: {e}")


def down_image(uid, type1):
    uid_name = get_uid_name(uid)
    uid_download_directory = f"{download_directory}/{uid_name}/{uid}"
    if not os.path.exists(uid_download_directory):
        os.makedirs(uid_download_directory)
    page = 1
    since_id = "1"
    count = 0
    while since_id:
        if since_id == "1":
            url = f"https://weibo.com/ajax/statuses/mymblog?uid={uid}&feature=0&page={page}"
        else:
            url = f"https://weibo.com/ajax/statuses/mymblog?uid={uid}&feature=0&page={page}&since_id={since_id}"
        print(url)
        since_id, num = getSinceId(url, uid, type1)
        count += num
        print(since_id)
        print(num)
        page += 1
    print(count)
    return count


def get_uid_name(uid):
    url = f"https://weibo.com/ajax/profile/info?uid={uid}"
    try:
        resp = requests.get(url, headers=headers)
        uid_name = resp.json().get("data", {}).get("user", {}).get("screen_name", {})
        return uid_name
    except Exception as e:
        print(f"get_uid_name，URL: {url}，错误信息: {e}")
    return ""
