"""
检查德国气象局ICON模型文件夹的创建/更新时间
"""
import bz2
import os
import re
import tempfile
import time
from datetime import datetime
from urllib.parse import urljoin

import numpy as np
import pandas as pd
import requests
import xarray as xr
from bs4 import BeautifulSoup

DOWNLOAD_DIR = './data/'

best_df = None
best_previous_df = None


def parse_icon_filename(filename):
    """
    解析ICON文件名，提取时间信息

    文件名格式：icon_global_icosahedral_single-level_2025092306_000_TOT_PREC.grib2.bz2

    返回：
    - base_time: 预报发布时间
    - forecast_hour: 预报时效（小时）
    """

    # 使用正则表达式解析文件名
    # 匹配：2025092306_000 部分
    pattern = r'icon_global_icosahedral_single-level_(\d{10})_(\d{3})_'

    match = re.search(pattern, filename)
    if not match:
        raise ValueError(f"无法解析文件名格式: {filename}")

    date_time_str = match.group(1)  # 2025092306
    forecast_hour_str = match.group(2)  # 000

    # 解析发布时间 (YYYYMMDDHH)
    year = int(date_time_str[:4])
    month = int(date_time_str[4:6])
    day = int(date_time_str[6:8])
    hour = int(date_time_str[8:10])

    base_time = datetime(year, month, day, hour)

    # 解析预报时效
    forecast_hour = int(forecast_hour_str)

    return base_time, forecast_hour


def get_file_list(url):
    """
    获取目录中的所有GRIB文件列表
    """
    try:
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
        }
        response = requests.get(url, headers=headers, timeout=30)
        response.raise_for_status()

        soup = BeautifulSoup(response.text, 'html.parser')
        files = []

        # 查找所有链接
        for link in soup.find_all('a', href=True):
            href = link['href']
            # 筛选GRIB文件（通常以.grib2结尾，可能被压缩为.bz2）
            if href.endswith(('.grib2', '.bz2')) and 'icon_global' in href:
                base_time, forecast_hour = parse_icon_filename(href)
                file_info = {
                    'name': href,
                    'url': urljoin(url, href),
                    'create_utc_time': base_time,
                    'forecast_hour': forecast_hour
                }

                files.append(file_info)

        return files

    except requests.RequestException as e:
        print(f"❌ 获取文件列表失败: {e}")
        return []


def download_file(file_url, file_name, download_dir, max_retries=3):
    """
    下载单个文件
    """
    local_path = os.path.join(download_dir, file_name)

    # 检查文件是否已存在
    if os.path.exists(local_path):
        print(f"⏭️  跳过已存在文件: {file_name}")
        return True

    print(f"📥 下载中: {file_name}")

    for attempt in range(max_retries):
        try:
            headers = {
                'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
            }

            response = requests.get(file_url, headers=headers, stream=True, timeout=300)
            response.raise_for_status()

            # 获取文件大小
            total_size = int(response.headers.get('content-length', 0))

            with open(local_path, 'wb') as f:
                downloaded = 0
                for chunk in response.iter_content(chunk_size=8192):
                    if chunk:
                        f.write(chunk)
                        downloaded += len(chunk)

                        # 显示进度
                        if total_size > 0:
                            percent = (downloaded / total_size) * 100
                            print(
                                f"\r   进度: {percent:.1f}% ({downloaded / 1024:.1f}KB/{total_size / 1024:.1f}KB)",
                                end='')
                        else:
                            print(f"\r   已下载: {downloaded / 1024:.1f}KB", end='')

            print(f"\n✅ 完成: {file_name}")
            return True

        except Exception as e:
            print(f"\n❌ 下载失败 (尝试 {attempt + 1}/{max_retries}): {e}")
            if attempt < max_retries - 1:
                print("⏳ 等待5秒后重试...")
                time.sleep(5)
            else:
                print(f"💔 放弃下载: {file_name}")
                # 删除不完整的文件
                if os.path.exists(local_path):
                    os.remove(local_path)
                return False


def decompress_bz2(file_path, keep_decompressed=True):
    """
    解压.bz2文件
    如果keep_decompressed=True，保存解压后的文件
    如果keep_decompressed=False，创建临时文件
    """
    if not str(file_path).endswith('.bz2'):
        return file_path

    print(f"解压文件: {file_path}")

    if keep_decompressed:
        # 创建永久文件
        output_path = file_path.replace('.bz2', '')

        # 检查是否已存在
        if os.path.exists(output_path):
            print(f"文件已存在: {output_path}")
            return output_path

        try:
            with bz2.open(file_path, 'rb') as compressed:
                with open(output_path, 'wb') as decompressed:
                    decompressed.write(compressed.read())

            print(f"解压完成: {output_path}")
            return output_path

        except Exception as e:
            print(f"解压失败: {e}")
            if os.path.exists(output_path):
                os.remove(output_path)
            raise
    else:
        # 原来的临时文件逻辑
        temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.grib2')

        try:
            with bz2.open(file_path, 'rb') as compressed:
                temp_file.write(compressed.read())
            temp_file.close()
            return temp_file.name
        except Exception as e:
            print(f"解压失败: {e}")
            if os.path.exists(temp_file.name):
                os.unlink(temp_file.name)
            raise


def find_best_forecast(forecast_files, min_buffer_minutes=10):
    """
    找到最接近三小时后UTC时间的预报数据，返回两个连续时效的文件用于差分计算

    参数:
    - forecast_files: 已排序的预报文件列表（按 create_utc_time DESC, forecast_hour ASC）
    - min_buffer_minutes: 最小缓冲时间（分钟）

    返回:
    - dict: 包含 'current_file' 和 'previous_file' 的字典，或 None
    """

    after_three_hour_utc = datetime.utcnow() + timedelta(hours=3)
    buffer_time = timedelta(minutes=min_buffer_minutes)

    print(f"三小时后的UTC时间: {after_three_hour_utc.strftime('%Y-%m-%d %H:%M:%S')} (缓冲 {min_buffer_minutes} 分钟)")

    for i, file_info in enumerate(forecast_files):

        if i < 1:
            continue

        # 跳过000时效文件
        if file_info['forecast_hour'] == 0:
            print(f"⏩ 跳过000时效文件: {file_info['name']}")
            continue

        # 有效时间 = 起报时间 + 时效小时数
        valid_utc_time = file_info['create_utc_time'] + timedelta(hours=file_info['forecast_hour'])
        time_diff = valid_utc_time - after_three_hour_utc

        print(f"检查文件: {file_info['name']} | valid_utc_time: {valid_utc_time} | 距现在: {time_diff}")

        if time_diff >= buffer_time:
            # 找到符合条件的当前文件
            current_file = file_info.copy()
            current_file['valid_utc_time'] = valid_utc_time
            current_file['time_diff_minutes'] = time_diff.total_seconds() / 60

            # 在已排序的列表中查找前一个文件
            previous_file = None

            # 找同一起报时间的前一时效（当前索引-3）
            if i >= 1:
                prev_candidate = forecast_files[i - 1]
                # 检查是否是同一起报时间且时效连续
                if (prev_candidate['create_utc_time'] == file_info['create_utc_time'] and
                        prev_candidate['forecast_hour'] == file_info['forecast_hour'] - 1):
                    previous_file = prev_candidate.copy()
                    prev_valid_time = prev_candidate['create_utc_time'] + timedelta(
                        hours=prev_candidate['forecast_hour'])
                    previous_file['valid_utc_time'] = prev_valid_time
                    print(f"📁 找到连续时效的前一文件: {previous_file['name']}")

            # 如果找到了前一个文件，返回文件对
            if previous_file:
                time_interval_hours = (current_file['valid_utc_time'] - previous_file[
                    'valid_utc_time']).total_seconds() / 3600

                result = {
                    'current_file': current_file,
                    'previous_file': previous_file,
                    'time_interval_hours': time_interval_hours
                }

                print(f"✅ 选择当前文件: {current_file['name']} (时效{current_file['forecast_hour']:03d})")
                print(f"✅ 选择前一文件: {previous_file['name']} (时效{previous_file['forecast_hour']:03d})")
                print(f"📊 时间间隔: {time_interval_hours:.1f} 小时")

                return result
            else:
                print(f"⚠️ 找到当前文件但无法找到对应的前一文件: {current_file['name']}")
                continue

        else:
            print(f"⏩ 跳过：不满足缓冲时间（{time_diff.total_seconds() / 60:.1f} 分钟）")

    print("⚠️ 没有找到符合条件的预报文件对")
    return None


from flask import Flask
from flask_cors import CORS
import logging
from datetime import timedelta

from flask import Blueprint, request
from flask import jsonify
from flask_apscheduler import APScheduler

bp = Blueprint('aifs', __name__, url_prefix='/aifs')


@bp.post('/get_forecast')
def get_forecast():
    global best_df
    global best_previous_df

    try:

        data = request.get_json()
        lat = data.get('lat')
        lon = data.get('lon')

        # 验证参数
        if not lat or not lon:
            return jsonify({'error': '缺少经纬度参数'}), 400

        lat = float(lat)
        lon = float(lon)

        df_local = best_df.copy()
        df_previous_local = best_previous_df.copy()

        # 计算距离（简单的欧几里得距离）
        df_local['distance'] = np.sqrt(
            (df_local['latitude'] - lat) ** 2 +
            (df_local['longitude'] - lon) ** 2
        )

        # 找到最近的点
        closest_idx = df_local['distance'].idxmin()
        closest_point = df_local.loc[closest_idx]

        # 计算距离（简单的欧几里得距离）
        df_previous_local['distance'] = np.sqrt(
            (df_previous_local['latitude'] - lat) ** 2 +
            (df_previous_local['longitude'] - lon) ** 2
        )

        # 找到最近的点
        closest_previous_idx = df_previous_local['distance'].idxmin()
        closest_previous_point = df_previous_local.loc[closest_previous_idx]

        valid_time_utc = closest_point.get('valid_time')

        # 转换为pandas时间戳并加8小时
        beijing_time = pd.Timestamp(valid_time_utc) + pd.Timedelta(hours=8)
        valid_time_str = beijing_time.strftime('%Y-%m-%d %H:%M:%S CST')

        # 提取预报数据
        forecast_data = {
            'status': 200,
            'requested_lat': lat,
            'requested_lon': lon,
            'actual_lat': float(closest_point['latitude']),
            'actual_lon': float(closest_point['longitude']),
            'distance_km': float(closest_point['distance'] * 111),  # 转换为公里
            'total_precipitation': float(closest_point.get('tp', 0) - closest_previous_point.get('tp', 0)),  # mm
            'valid_time': valid_time_str
        }

        return jsonify(forecast_data)
    except Exception as e:
        print(e)


scheduler = APScheduler()


@scheduler.task('interval', id='download_nearest_forecast_task', seconds=10 * 60, misfire_grace_time=10 * 60)
def download_nearest_forecast_task():
    global best_df
    global best_previous_df

    print('download_nearest_forecast_task executed --------', datetime.now())

    try:
        now_utc = datetime.utcnow()

        available_time = now_utc - timedelta(hours=3)
        available_hour = available_time.hour

        # 根据可用时间确定最新的预报时次
        if available_hour >= 18:
            _time = 18
        elif available_hour >= 12:
            _time = 12
        elif available_hour >= 6:
            _time = 6
        elif available_hour >= 0:
            _time = 0

        target_url = f'https://opendata.dwd.de/weather/nwp/icon/grib/{_time:02d}/tot_prec/'
        print(f'下载中：{target_url}')

        file_list = get_file_list(target_url)

        file_list = sorted(
            file_list,
            key=lambda x: (-x['create_utc_time'].timestamp(), x['forecast_hour'])
        )

        best_results = find_best_forecast(file_list)

        best_file = best_results['current_file']
        best_previous_file = best_results['previous_file']

        # 下载文件（如果存在则跳过）
        download_file(best_file['url'], best_file['name'], DOWNLOAD_DIR)

        # 解压文件（如果存在则跳过）
        decompress_bz2(os.path.join(DOWNLOAD_DIR, best_file['name']))

        # 下载文件（如果存在则跳过）
        download_file(best_previous_file['url'], best_previous_file['name'], DOWNLOAD_DIR)

        # 解压文件（如果存在则跳过）
        decompress_bz2(os.path.join(DOWNLOAD_DIR, best_previous_file['name']))

        best_file_name = best_file['name'].replace('.bz2', '')

        best_previous_file_name = best_previous_file['name'].replace('.bz2', '')

        best_file_path = os.path.join(DOWNLOAD_DIR, best_file_name)

        best_previous_file_path = os.path.join(DOWNLOAD_DIR, best_previous_file_name)

        best_file_output_path = os.path.join(DOWNLOAD_DIR, 'cdo_' + best_file_name)

        best_previous_file_output_path = os.path.join(DOWNLOAD_DIR, 'cdo_' + best_previous_file_name)

        target_grid_world_txt_path = os.path.join(DOWNLOAD_DIR, 'ICON_GLOBAL2WORLD_0125_EASY',
                                                  'target_grid_world_0125.txt')

        target_weights_nc = os.path.join(DOWNLOAD_DIR, 'ICON_GLOBAL2WORLD_0125_EASY', 'weights_icogl2world_0125.nc')

        if not os.path.exists(best_file_output_path):
            # CDO坐标转换
            cdo_shell_cmd = f'cdo remap,{target_grid_world_txt_path},{target_weights_nc} {best_file_path} {best_file_output_path}'
            print(cdo_shell_cmd)
            os.system(cdo_shell_cmd)

        if not os.path.exists(best_previous_file_output_path):
            # CDO坐标转换
            cdo_shell_cmd = f'cdo remap,{target_grid_world_txt_path},{target_weights_nc} {best_previous_file_path} {best_previous_file_output_path}'
            print(cdo_shell_cmd)
            os.system(cdo_shell_cmd)

        # 打开GRIB2文件
        best_ds = xr.open_dataset(best_file_output_path, engine="cfgrib")

        # 转换整个数据集，包含所有变量
        best_df = best_ds.to_dataframe().reset_index()

        # 打开GRIB2文件
        best_previous_ds = xr.open_dataset(best_previous_file_output_path, engine="cfgrib")

        # 转换整个数据集，包含所有变量
        best_previous_df = best_previous_ds.to_dataframe().reset_index()

        # 获取所有历史文件路径
        hist_files = [f for f in os.listdir(DOWNLOAD_DIR)
                      if os.path.isfile(os.path.join(DOWNLOAD_DIR, f))]

        # 删除超过2天的历史数据
        for file_name in hist_files:
            if 'icon_global_icosahedral_single' in file_name:
                base_time, _ = parse_icon_filename(file_name)
                if now_utc - base_time >= timedelta(days=2):
                    file_path = os.path.join(DOWNLOAD_DIR, file_name)
                    os.remove(file_path)
                    print(f"文件 {file_path} 删除成功")
    except Exception as e:
        print(e)


class BaseConfig:
    SECRET_KEY = "fsfsd482afewfwhudsdkckznknajdhuwrhafnf"

    # 数据库的配置信息
    # SQLALCHEMY_DATABASE_URI = 'sqlite:///../data.db'

    # 默认日志等级
    LOG_LEVEL = logging.WARN

    SEND_FILE_MAX_AGE_DEFAULT = timedelta(hours=48)

    # SOCK_SERVER_OPTIONS = {'ping_interval': 25}

    SCHEDULER_API_ENABLED = True


def init_bps(app):
    # 在admin_bp下注册子蓝图
    app.register_blueprint(bp)


def create_app():
    app = Flask(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))

    # 启用CORS，允许所有源访问
    CORS(app)

    # 引入配置
    app.config.from_object(BaseConfig)

    # 注册蓝图
    init_bps(app)

    return app


if __name__ == "__main__":
    download_nearest_forecast_task()

    app = create_app()

    scheduler.init_app(app)
    scheduler.start()

    app.run(
        host='0.0.0.0',
        port=12000,
        debug=True,
        use_reloader=False
    )
