# 爬取猫眼专业版网站的数据，获取对应的数据
import requests
import csv
import calendar
import datetime
import decoder as de
from zoneinfo import ZoneInfo
import concurrent.futures
import threading
import pandas as pd
import os


def get_root_dir():
    root_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
    return root_dir


# 线程安全计数器
class ThreadSafeCounter:
    def __init__(self):
        self.value = 0
        self.lock = threading.Lock()

    def increment(self, amount=1):
        with self.lock:
            self.value += amount
        return self.value


def previous_year_date_range():
    return pd.date_range(
        start=pd.Timestamp.now() - pd.DateOffset(years=1),
        end=pd.Timestamp.now() - pd.DateOffset(days=1),
        freq="D",  # 按天生成
    )


def get_year_month_days(year, month):
    """返回指定年月的天数"""
    _, days = calendar.monthrange(year, month)
    return year, month, days


def format_timedelta(start: datetime, end: datetime) -> str:
    delta = end - start
    total_seconds = delta.total_seconds()
    return format_timedelta_ch(total_seconds)


def format_timedelta_ch(total_seconds) -> str:
    hours, remainder = divmod(total_seconds, 3600)
    minutes, seconds = divmod(remainder, 60)
    if hours > 0:
        return f"{int(hours):02d}小时{int(minutes):02d}分{int(seconds):02d}秒"
    elif minutes > 0:
        return f"{int(minutes):02d}分{int(seconds):02d}秒"
    else:
        return f"{int(seconds):02d}秒"


def format_timedelta_en(total_seconds) -> str:
    hours, remainder = divmod(total_seconds, 3600)
    minutes, seconds = divmod(remainder, 60)
    if hours > 0:
        return f"({int(hours):02d}h {int(minutes):02d}m {int(seconds):02d}s)"
    elif minutes > 0:
        return f"({int(minutes):02d}m {int(seconds):02d}s)"
    else:
        return f"({int(seconds):02d}s)"


def get_html(url):
    # 伪装和请求响应
    # 在要被爬取的网页 -- F12 -- Network -- all -- 4 -- header -- 找到对应信息复制粘贴过来
    # 加入headers伪装成人工操作行为，防止被网站反爬
    headers = {  # 设置header
        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,\
                    image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
        "Accept-Encoding": "gzip, deflate, br, zstd",
        "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6",
        "Cache-Control": "max-age=0",
        "Connection": "keep-alive",
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko)\
                        Chrome/125.0.0.0 Safari/537.36 Edg/125.0.0.0",
        "referer": "https://piaofang.maoyan.com/dashboard",
        "Cookie": "_lxsdk_cuid=18a0220df58c8-09d9a61491c11a-7c54647e-1fa400-18a0220df58c8; \
                    uuid=18a0220df58c8-09d9a61491c11a-7c54647e-1fa400-18a0220df58c8; theme=moviepro; \
                    Hm_lvt_703e94591e87be68cc8da0da7cbd0be2=1717814735; \
                    Hm_lpvt_703e94591e87be68cc8da0da7cbd0be2=1717815923; \
                    _lxsdk=18a0220df58c8-09d9a61491c11a-7c54647e-1fa400-18a0220df58c8; \
                    _lx_utm=utm_source%3Dbing%26utm_medium%3Dorganic; \
                    _lxsdk_s=18ff5b75146-7-cb0-96e%7C%7C97",
    }

    # 使用get向当前URL发送请求
    result = requests.get(url, headers=headers)

    # 如果状态码为200，表示请求成功，接着使用result.text来获取服务器返回的响应内容
    if result.status_code == 200:
        # print("SUCCESS")
        return result.json()
    else:
        print("ERROR")
    return


def download_html(
    url: str = "https://piaofang.maoyan.com/dashboard-ajax/movie",
    year: int = 2025,
    month: int = 10,
    day: int = 1,
):
    # 下载所有需要的html页面
    html = ""
    month_tmp = "{:02d}".format(month)
    day_tmp = "{:02d}".format(day)
    urlm = f"{url}?showDate={year}{month_tmp}{day_tmp}"
    # print("craw html:", urlm)
    html = get_html(urlm)
    return html


def to_csv_by_day(html, year, month, day, html_dir=f"{get_root_dir()}/爬取到的数据/最新数据/csv缓存"):
    # 将找到的信息存储到csv文件中
    with open(
        f"{html_dir}/movies_{year}年{month}月{day}.csv",
        mode="w",
        newline="",
        encoding="utf-8",
    ) as file:
        writer = csv.writer(file)
        # 写入csv文件第一行数据，作为标题
        writer.writerow(
            [
                "日期",
                "上座率",
                "场均人次",
                "票房占比",
                "电影ID",
                "电影名字",
                "已上映天数",
                "排片场次",
                "排片占比",
                "当日票房",
                "当日总出票",
                "当日总场次",
            ]
        )
        # 按照标题给每行写入对应数据
        # print(html)
        try:
            for movie in html["movieList"]["list"]:
                movie_info = movie["movieInfo"]
                num_info = movie["boxSplitUnit"]
                writer.writerow(
                    [
                        (str(month) + str("{:02d}".format(day))),
                        movie["avgSeatView"],
                        movie["avgShowView"],
                        movie["boxRate"],
                        movie_info["movieId"],
                        movie_info["movieName"],
                        movie_info["releaseInfo"],
                        movie["showCount"],
                        movie["showCountRate"],
                        de.decode_html_entities(num_info["num"]),
                        html["movieList"]["nationBoxInfo"]["viewCountDesc"],
                        html["movieList"]["nationBoxInfo"]["showCountDesc"],
                    ]
                )  # 调用解码器，直接解码票房直接写入
        except KeyError:
            writer.writerow("")


def download_write_by_day(input_year, input_month, current_day):
    ts = datetime.datetime.now(ZoneInfo("Asia/Shanghai")).strftime("%Y-%m-%d %H:%M:%S")
    print(f"{ts} - 处理日期：{input_year:04d}年{input_month:02d}月{current_day:02d}日")
    html = download_html(year=input_year, month=input_month, day=current_day)
    to_csv_by_day(html, input_year, input_month, current_day)


def main():
    start = datetime.datetime.now()

    # 创建任务列表
    tasks = []
    # 线程安全计数器
    all_count = ThreadSafeCounter()

    # 批量处理日期序列
    previous_year_date_ranges = previous_year_date_range()
    for date in previous_year_date_ranges:
        tasks.append((date.year, date.month, date.day))

    # 使用线程池执行任务
    with concurrent.futures.ThreadPoolExecutor(
        max_workers=8
    ) as executor:  # 建议4-8个线程
        future_to_task = {
            executor.submit(download_write_by_day, year, month, day): (year, month, day)
            for year, month, day in tasks
        }

        # 处理线程结果
        for future in concurrent.futures.as_completed(future_to_task):
            future.result()
            all_count.increment()

    # 最终统计
    ts = datetime.datetime.now(ZoneInfo("Asia/Shanghai")).strftime("%Y-%m-%d %H:%M:%S")
    print(f"{ts} - 爬取完成：共{all_count.value}天数据")

    end = datetime.datetime.now()
    timedelta = format_timedelta(start, end)
    ts = datetime.datetime.now(ZoneInfo("Asia/Shanghai")).strftime("%Y-%m-%d %H:%M:%S")
    print(f"{ts} - 爬取时长：{timedelta}")


if __name__ == "__main__":
    main()
