import argparse
import json
import os

import requests


def get_search_after_id(search_after_file):
    search_after_path = f"{search_after_dir}/{search_after_file}"
    with open(search_after_path, 'r') as file:
        search_after_id = file.readline()
    return search_after_id


def get_current_search_after_id():
    check_point_path = f"{search_after_dir}/{check_point_search_after_file}"
    last_search_after_path = f"{search_after_dir}/{last_search_after_file}"
    if os.path.exists(check_point_path):
        search_after_id = get_search_after_id(check_point_search_after_file)
        print(f"自动获取到断点search_after_id值:{search_after_id}")
    elif os.path.exists(last_search_after_path):
        search_after_id = get_search_after_id(last_search_after_file)
        print(f"自动获取历史search_after_id值:{search_after_id}")
    else:
        search_after_id = 0
        print(f"未监测到历史search_after_id,默认赋值:{search_after_id}")
    return int(search_after_id)


def save_search_after_id(search_after_file, search_after_id):
    if not os.path.exists(search_after_dir):
        os.makedirs(search_after_dir, exist_ok=True)
    search_after_path = f"{search_after_dir}/{search_after_file}"
    with open(search_after_path, 'w') as f:
        f.write(str(search_after_id))
        f.flush()
        f.close()
    print(f"保存search_after_id:{search_after_id}成功.")


def save_current_search_after_id(search_after_id):
    save_search_after_id(current_search_after_file, search_after_id)

def save_data(data: list, out_file: str, out_dir: str):
    if not os.path.exists(out_dir):
        os.makedirs(out_dir, exist_ok=True)
    f = open(out_dir + "/" + out_file, "w", encoding="utf-8")
    print(f"保存数据行数:" + str(len(data)))
    for record in data:
        if type(record) is dict:
            f.write(json.dumps(record, ensure_ascii=False) + "\n")
        else:
            f.write(str(record) + "\n")
    f.flush()
    f.close()
    return len(data)

def fetch_data_from_es(search_after_id):
    data = {
        "size": batch_size,
        "query": {"match_all": {}},
        "sort": [{sort_field: {"order": seq}}]
    }
    headers = {'Content-Type': 'application/json'}
    search_url = f"{es_url}/{index_name}/_search"
    if search_after_id != 0:
        data['search_after'] = [search_after_id]
    response = requests.post(search_url, headers=headers, data=json.dumps(data))
    results = response.json()
    if results['hits']['hits']:
        data_list = results['hits']['hits']
        search_after_id = data_list[-1]['sort'][0]
        return search_after_id, data_list
    else:
        return None, None


def save_amap_regions():
    current_search_after_id = get_current_search_after_id() if is_full == 0 else 0
    last_search_after_id = current_search_after_id
    round_num = 0
    current_round_data = []
    while True:
        round_num = round_num + 1
        print('round_num: ', round_num)
        tmp_current_search_after_id, data_list = fetch_data_from_es(current_search_after_id)
        current_search_after_id = current_search_after_id if tmp_current_search_after_id == 0 or tmp_current_search_after_id is None else tmp_current_search_after_id
        if data_list is None or len(data_list) == 0:
            save_data(current_round_data,f"{exec_type}_{last_search_after_id}.csv", data_dir)
            save_search_after_id(check_point_search_after_file, current_search_after_id)
            save_current_search_after_id(current_search_after_id)
            break
        elif round_num % (batch_size/10) == 0:
            print("round_num % batch_size: ", round_num % batch_size)
        # elif data_list is not None:
            current_round_data.extend(data_list)
            save_data(current_round_data,f"{exec_type}_{last_search_after_id}.csv", data_dir)
            save_search_after_id(check_point_search_after_file, current_search_after_id)
            save_current_search_after_id(current_search_after_id)
            current_round_data = []
            last_search_after_id = current_search_after_id
        else:
            current_round_data.extend(data_list)



if __name__ == '__main__':
    parser = argparse.ArgumentParser(description="es数据拉取")
    parser.add_argument("-t", "--exec_type",
                        help="抓取数据类型 amap_regions,",
                        required=True)
    parser.add_argument("-r", "--root_dir", help="文件存储父路径", required=True)
    parser.add_argument("-u", "--es_url", help="es请求地址", required=True)
    parser.add_argument("-i", "--index_name", help="es索引", required=True)
    parser.add_argument("-c", "--sort_field", help="排序字段", required=True)
    parser.add_argument("-b", "--batch_size", help="每次请求数据量", required=False)
    parser.add_argument("-s", "--seq", help="顺序", required=False)
    parser.add_argument("-f", "--is_full", help="是否全量,0:增量,1:全量", required=False)
    args = parser.parse_args()
    exec_type = args.exec_type
    root_dir = args.root_dir
    es_url = args.es_url
    sort_field = args.sort_field
    index_name = args.index_name
    batch_size = int(args.batch_size) if args.batch_size is not None else 1000
    seq = args.seq if args.seq is not None else 'asc'
    is_full = int(args.is_full) if args.is_full is not None else 1
    print("exec_type", exec_type, "root_dir", root_dir, "is_full", is_full)
    current_search_after_file = "current_search_after_id"
    check_point_search_after_file = "check_point_search_after_id"
    last_search_after_file = "last_search_after_id"
    search_after_dir = f"{root_dir}/{exec_type}/search_after"
    data_dir = f"{root_dir}/{exec_type}/data"

    if exec_type == "amap_regions":
        save_amap_regions()
