import os
from datetime import date, timedelta
from multiprocessing import Pool

import requests
from bs4 import BeautifulSoup
import pygrib

from del_file import delete_old_data
from produce_nc import nc_main

try:
    from log_own import logger
    from config import *
except ImportError:
    import sys
    sys.path.append(".")
    sys.path.append("../")
    sys.path.append("../../")
    from log_own import logger
    from config import *


forecasts_url = os.path.join(main_page, forecasts).replace("\\", "/")
connect_time = 10
response_time = 500     # 下载时长，超时后重新下载（3次）


def step1():
    request_url = r"https://data.ecmwf.int/forecasts/"
    response_data = requests.get(request_url, headers=my_head)
    res_html = response_data.text

    soup = BeautifulSoup(res_html, 'html.parser')
    find_link = soup.find_all('a')
    date_list = []
    for link in find_link:
        # href = link.get('href')
        click_value = link.getText().strip("/")
        if click_value in ["home"]:
            continue
        date_list.append(click_value)
    return date_list


def step2(date_str):
    href_url = os.path.join(forecasts_url, date_str).replace("\\", "/")
    href_kw = os.path.join(forecasts, date_str).replace("\\", "/")

    response = requests.get(href_url, headers=my_head)
    response_html = response.text

    soup1 = BeautifulSoup(response_html, 'html.parser')
    find_link1 = soup1.find_all('a')
    click_value_list = []
    for link1 in find_link1:
        href = link1.get("href")
        if href_kw not in href:
            continue
        if href.strip("/") == href_kw:
            continue
        click_value = link1.getText().strip("/")
        if click_value.lower() in ["parent directory", "name", "home"]:
            continue
        click_value_list.append(click_value)
    return click_value_list


def step3(date_s, suffix):
    href_url = os.path.join(forecasts_url, date_s, suffix).replace("\\", "/") 

    response = requests.get(href_url, headers=my_head)
    response_html = response.text

    soup1 = BeautifulSoup(response_html, 'html.parser')
    find_link1 = soup1.find_all('a')

    href_kw = os.path.join(forecasts, date_s, suffix).replace("\\", "/")

    click_value_list = []
    for link1 in find_link1:
        href = link1.get("href")
        if href_kw not in href:
            continue
        if href.strip("/") == href_kw:
            continue
        click_value = link1.getText().strip("/")
        if click_value.lower() in ["parent directory", "name", "home"]:
            continue
        click_value_list.append(click_value)
    return click_value_list
    

def step4(ste1, ste2, ste3):
    url = os.path.join(forecasts_url, ste1, ste2, ste3).replace("\\", "/")
    response = requests.get(url, headers=my_head)
    response_html = response.text
   
    soup1 = BeautifulSoup(response_html, 'html.parser')
    find_link1 = soup1.find_all('a')

    href_kw = os.path.join(forecasts, ste1, ste2, ste3).replace("\\", "/")

    click_value_list = []
    for link1 in find_link1:
        href = link1.get("href")
        if href_kw not in href:
            continue
        if href.strip("/") == href_kw:
            continue
        click_value = link1.getText().strip("/")
        if click_value.lower() in ["parent directory", "name", "home"]:
            continue
        click_value_list.append(click_value)
    return click_value_list


def step5(ste1, ste2, ste3, ste4):
    url = os.path.join(forecasts_url, ste1, ste2, ste3, ste4).replace("\\", "/")
    response = requests.get(url, headers=my_head)
    response_html = response.text

    soup1 = BeautifulSoup(response_html, 'html.parser')
    find_link1 = soup1.find_all('a')

    href_kw = os.path.join(forecasts, ste1, ste2, ste3, ste4).replace("\\", "/")

    click_value_list = []
    for link1 in find_link1:
        href = link1.get("href")
        if href_kw not in href:
            continue
        if href.strip("/") == href_kw:
            continue
        click_value = link1.getText().strip("/")
        if click_value.lower() in ["parent directory", "name", "home"]:
            continue
        click_value_list.append(click_value)
    return click_value_list


def calc_divisional_range(filesize, chuck):
    step = filesize//chuck
    arr = list(range(0, filesize, step))
    result = []
    for i in range(len(arr)-1):
        s_pos, e_pos = arr[i], arr[i+1]-1
        result.append([s_pos, e_pos])
    result[-1][-1] = filesize-1
    return result


def down_load_file_and_extract(arg_l):
    def extract_deal(file_p):
        if file_p.endswith("-enfo-ef.index"):
            logger.info(f"{file_p} 开始抽取")
            extract_file = extract_index_by_cf(file_p)
            os.remove(file_p)
            logger.info(f"{file_p} 已删除, 新文件{extract_file}")

        elif file_p.endswith("-enfo-ef.grib2"):
            logger.info(f"{file_p} 开始抽取")
            new_file, code, msg  = extract_by_cf(file_p)
            if code == 1:
                logger.warning(f"{file_p}, {msg}")
            else:
                os.remove(file_p)
                logger.info(f"{file_p} 已删除, 新文件{new_file}")

    url, store_path = arg_l[0], arg_l[1]
    store_dir = os.path.dirname(store_path)
    os.makedirs(store_dir, exist_ok=True)
    try:
        down_load_cmd = f"wget -c {url} -O {store_path}"
        os.system(down_load_cmd)
        if os.path.exists(store_path):
            extract_deal(store_path)

        # requests.packages.urllib3.disable_warnings()
        # res = requests.get(url, headers=my_head, stream=True, verify=False)
        # status_code = res.status_code
        # if res.status_code != 200:
        #     logger.error(f"{url}, status_code: {status_code}, {res.text}")
        #     return
        # else:
        #     logger.info(f"{url} 开始下载")
        #
        #     with open(store_path, "wb") as fwb:
        #         for data in res.iter_content(chunk_size=1024*1024):
        #             fwb.write(data)
        #     res.close()
        #     logger.info(f"{url} 下载完毕" )
        #     extract_deal(store_path)

    except Exception as e:
        logger.error(e)
        logger.error(f"{url} 失败" )


def extract_by_cf(file):
    """
    抽取grib数据中data_type为cf的message，写入当前目录下 “同名-processed.grib2” 新文件中
    :param file: 待抽取的数据文件 绝对路径
    """
    grbs = pygrib.open(file)
    selected_grbs = []
    for grb in grbs:
        # 只保留data_type为cf的数据，即Control Forecast数据
        if grb.dataType == 'cf':
            selected_grbs.append(grb)
            # logger.info("抽取 ", grb, " data_type=", grb['dataType'])
    grbs.close()

    # 抽取后的数据文件名
    processed_file = file.split('.')[0] + "-processed." + file.split('.')[1]

    if len(selected_grbs) == 0:
        logger.warning(f"{file} 抽取失败，请查看数据情况")
        return None, 1, "抽取失败"

    # 遍历列表，把所有message追加写入新文件
    for i in range(0, len(selected_grbs)):
        with open(processed_file, "ab") as grbout:
            msg = selected_grbs[i].tostring()
            grbout.write(msg)
    logger.info("已抽取 " + os.path.basename(file) + " 中的 " + str(len(selected_grbs)) + " 条message写入 " + os.path.basename(processed_file))
    return processed_file, 2, "抽取成功"


def extract_index_by_cf(file):
    """
    抽取索引文件 .index 中的 type=cf 的记录，写入当前目录下 “同名-processed.index” 新文件中，保留了旧文件
    :param file: 待抽取的索引文件(.index) 绝对路径
    """
    cf_records = []

    with open(file, 'r') as f:
        records = f.read().splitlines()

    # 抽取type为cf的记录，存入新列表
    for rec in records:
        rec = eval(rec)
        if rec['type'] == 'cf':
            cf_records.append(rec)

    # 更新抽取后的_offset
    cf_records[0]['_offset'] = 0
    for i in range(1, len(cf_records)):
        cf_records[i]['_offset'] = cf_records[i-1]['_offset'] + cf_records[i-1]['_length']

    # 抽取后的数据文件名
    processed_file = file.split('.')[0] + "-processed." + file.split('.')[1]
    # 遍历新列表的记录写入新文件
    for cf_rec in cf_records:
        with open(processed_file, 'a') as idx_out:
            idx_out.write(str(cf_rec) + '\n')
    logger.info("抽取 " + str(len(cf_records)) + " 条记录写入 " + os.path.basename(processed_file))
    return processed_file


def main():
    yesterday = (date.today() - timedelta(days=1)).strftime("%Y%m%d")
    ste1 = yesterday

    need_down_ste4_list = ["enfo"]  # 不同的ste2， 有不同的类型，共有enfo、oper、waef、wave、scda、scwv 几种
    down_arg_list = []

    # ste1 = download_date
    ste2_list = step2(ste1)
    for ste2 in ste2_list:          # ste2: 00z、 06z、12z、18z 四种
        ste3_list = step3(ste1, ste2)
        for ste3 in ste3_list:      # 只有0p4-beta 一种
            ste4_list = step4(ste1, ste2, ste3)
            for ste4 in ste4_list:      # enfo、oper、waef、wave、scda、scwv 几种
                if ste4 not in need_down_ste4_list:
                    continue
                # 只处理各个ste2、ste3下ste4在need_down_ste4_list中的类型
                file_name_list = step5(ste1, ste2, ste3, ste4)
                store_dir = os.path.join(store_grib_dir, ste1, ste2, ste3, ste4)
                for file_name in file_name_list:
                    down_url = os.path.join(forecasts_url, ste1, ste2, ste3, ste4, file_name).replace("\\", "/")
                    file_path = os.path.join(store_dir, file_name).replace("\\", "/")
                    down_arg_list.append(
                        [down_url, file_path]
                    )

    logger.info(f"{ste1} 共计 {len(down_arg_list)} 个文件")

    pp = Pool(14)
    pp.map(down_load_file_and_extract, iterable=down_arg_list)
    pp.close()
    pp.join()

    delete_old_data()
    nc_main()


if __name__ == '__main__':
    pass
    main()


    # test_url= r"https://data.ecmwf.int/forecasts/20230708/12z/0p4-beta/enfo/20230708120000-0h-enfo-ef.grib2"
    # sp = r"C:\Users\20200050\Desktop\test.grib2"
    # down_load_file_and_extract([test_url, sp])


