import redis
import os
import logging
import requests
import csv

from django.conf import settings
from django.db.models import Q
from datetime import datetime, timezone
from dateutil.relativedelta import relativedelta
from requests import HTTPError

from AchieveHandler.models import MaterialVideoData, MaterialImageData
from AchieveHandler.utils import oss_utils

logger = logging.getLogger('AchieveHandler')


def split_day(start_time, end_time):
    start_millis = datetime_to_milliseconds(start_time)
    end_millis = datetime_to_milliseconds(end_time)
    result = []
    day_millis = 24 * 60 * 60 * 1000
    if end_millis - start_millis <= day_millis:
        result.append([milliseconds_to_datetime(start_millis), milliseconds_to_datetime(end_millis)])
    else:
        while end_millis - start_millis >= day_millis:
            result.append([milliseconds_to_datetime(start_millis), milliseconds_to_datetime(start_millis + day_millis)])
            start_millis += day_millis
    return result

def get_value_from_redis(key):
    r = redis.Redis(host=settings.REDIS_CONFIG['host'], port=settings.REDIS_CONFIG['port'], db=settings.REDIS_CONFIG['db'], decode_responses=True)
    return r.get(key)


def set_value_to_redis(key, value):
    r = redis.Redis(host=settings.REDIS_CONFIG['host'], port=settings.REDIS_CONFIG['port'], db=settings.REDIS_CONFIG['db'], decode_responses=True)
    return r.set(key, value)

# redis时间戳到当前时间的所有视频素材

# video_type 6 素材视频  8 成品
def load_videos(start_time, end_time, video_types):
    video_data_list = MaterialVideoData.objects.filter(Q(type__in=video_types) & Q(create_time__gte=start_time) & Q(create_time__lt=end_time))
    # 导出数据到csv
    if video_data_list is not None and len(video_data_list) > 0:
        export_to_file(settings.BACKUP_PATH + f"/info/video/{start_time.strftime('%Y-%m-%d %H:%M:%S')[0:10]}.csv", video_data_list, 1)
        for video_data in video_data_list:
            if video_data.deleted is not None and video_data.deleted == 0:
                url = video_data.url
                # 备份文件
                try:
                    MaterialVideoData.objects.filter(id=video_data.id).update(deleted=1)
                    backup_file(url)
                    oss_utils.delete_from_oss(url, 'crowdpack')
                except HTTPError as e:
                    logger.error("备份文件失败", exc_info=e)

# image_type 1 原图  2 成图
def load_images(start_time, end_time, image_types):
    image_data_list = MaterialImageData.objects.filter(Q(type__in=image_types) & Q(create_time__gte=start_time) & Q(create_time__lt=end_time))
    # 导出数据到csv
    if image_data_list is not None and len(image_data_list) > 0:
        export_to_file(settings.BACKUP_PATH + f"/info/image/{start_time.strftime('%Y-%m-%d %H:%M:%S')[0:10]}.csv", image_data_list, 0)
        for image_data in image_data_list:
            if image_data.deleted is not None and image_data.deleted == 0:
                url = image_data.url
                # 备份文件
                try:
                    MaterialImageData.objects.filter(id=image_data.id).update(deleted=1)
                    backup_file(url)
                    oss_utils.delete_from_oss(url, 'crowdpack')
                except HTTPError as e:
                    logger.error("备份文件失败", exc_info=e)


# 结果导出到csv中
def export_to_file(file_path, data, data_type):
    rows = []
    field_names = []
    if data_type == 0:
        field_names = ['id', 'material_name', 'url', 'type', 'user_id', 'width', 'height', 'create_time', 'update_time', 'app_id', 'media_type', 'platform_type', 'source_type', 'lay_type', 'layout_type', 'ratio_type', 'color_style', 'position_infos', 'cover_mode', 'extra_placements', 'price_info', 'file_md5']
        for data_item in data:
            row_item = [data_item.id, data_item.material_name, data_item.url, data_item.type, data_item.user_id, data_item.width, data_item.height, data_item.create_time, data_item.update_time, data_item.app_id, data_item.media_type, data_item.platform_type, data_item.source_type, data_item.lay_type, data_item.layout_type, data_item.ratio_type, data_item.color_style, data_item.position_infos, data_item.cover_mode, data_item.extra_placements, data_item.price_info, data_item.file_md5]
            rows.append(row_item)
    else:
        field_names = ['id', 'material_name', 'url', 'type', 'user_id', 'cover', 'price_info', 'width', 'height', 'duration', 'category_id', 'source_type', 'create_time', 'update_time', 'app_id', 'platform_type', 'mix_type', 'file_md5']
        for data_item in data:
            row_item = [data_item.id, data_item.material_name, data_item.url, data_item.type, data_item.user_id, data_item.cover, data_item.price_info, data_item.width, data_item.height, data_item.duration, data_item.category_id, data_item.source_type, data_item.create_time, data_item.update_time, data_item.app_id, data_item.platform_type, data_item.mix_type, data_item.file_md5]
            rows.append(row_item)

    directory = os.path.dirname(file_path)
    if directory and not os.path.exists(directory):
        os.makedirs(directory)
    with open(file_path, "w", newline='') as file:
        writer = csv.writer(file)
        writer.writerow(field_names)
        writer.writerows(rows)


# 获取素材路径，按路径将文件保存到本地
def backup_file(url):
    src = "https://crowdpack.oss-cn-beijing.aliyuncs.com" + url
    local_path = settings.BACKUP_PATH + url
    directory = os.path.dirname(local_path)
    if directory and not os.path.exists(directory):
        os.makedirs(directory)
    with requests.get(src, stream=True, timeout=(60, 60)) as res:
        res.raise_for_status()  # 如果请求出错，这里会抛出HTTPError异常
        with open(local_path, 'wb') as f:
            for chunk in res.iter_content(chunk_size=8192):
                f.write(chunk)


def datetime_to_milliseconds(dt_str: str, format_str: str = "%Y-%m-%d %H:%M:%S") -> int:
    # 将字符串解析为datetime对象
    dt = datetime.strptime(dt_str, format_str)
    # 获取从1970年1月1日开始的时间戳（秒）
    timestamp_seconds = (dt - datetime(1970, 1, 1)).total_seconds()
    # 转换为毫秒
    return int(timestamp_seconds * 1000)


def milliseconds_to_datetime(milliseconds: int) -> datetime:
    # 将毫秒转换为秒
    seconds = milliseconds / 1000.0
    # 创建datetime对象（UTC时间）
    return datetime.fromtimestamp(seconds)

def backup_material():
    # 查询redis中上次查询的截止时间作为开始时间
    # start_date = get_value_from_redis('material_achieve:last_backup_datetime')
    # if start_date is None:
    #     start_date = '2025-02-20 00:00:00'
    start_date = '2025-02-20 00:00:00'
    # 获取三个月前的时间作为end_date
    end_date = (datetime.now().replace(hour=0, minute=0, second=0, microsecond=0) - relativedelta(months=3)).strftime('%Y-%m-%d %H:%M:%S')
    logger.info(f"start_time: {start_date}, end_time: {end_date}")
    date_list = split_day(start_date, end_date)
    for date_item in date_list:
        load_videos(date_item[0], date_item[1], [6, 8])
        load_images(date_item[0], date_item[1], [1, 2])
    set_value_to_redis('material_achieve:last_backup_datetime', end_date)