import os.path
import time

import requests
import pandas as pd
from machine_lib import *
from config import *
from tqdm import tqdm

pd.set_option('expand_frame_repr', False)
pd.set_option('display.max_rows', 1000)


def fetch_datasets(s, url):
    response = s.get(url)
    try:
        return response.json()
    except:
        raise Exception(f"Failed to fetch data: {response.status_code}")


def save_to_csv(data, filename):
    df = pd.DataFrame(data)
    df.to_csv(filename, index=False)


def crawl_all_datasets(s, region='USA', delay=1, universe="TOP3000", instrumentType="EQUITY", limit=50, offset=0):
    base_url = "https://api.worldquantbrain.com/data-sets"
    params = {
        "delay": delay,
        "instrumentType": instrumentType,
        "limit": limit,
        "offset": offset,
        "region": region,
        "universe": universe,
    }

    response = s.get(base_url, params=params)
    data = response.json()

    total_count = data['count']
    results_per_page = len(data['results'])
    if results_per_page == 0:
        return pd.DataFrame()

    total_pages = (total_count + results_per_page - 1) // results_per_page

    all_results = data['results']

    # 遍历获取所有数据
    for page in range(1, total_pages):
        params['offset'] = 50 * page  # 假设分页参数为 'page'
        response = s.get(base_url, params=params)
        data = response.json()
        all_results.extend(data['results'])

    df = pd.DataFrame(all_results)

    return df


def crawl_all_fields(s, instrumentType: str = 'EQUITY',
                     region: str = 'USA', delay: int = 1,
                     universe: str = 'TOP3000', dataset_id: str = '',
                     search: str = ''):
    if len(search) == 0:
        url_template = "https://api.worldquantbrain.com/data-fields?" + \
                       f"&instrumentType={instrumentType}" + \
                       f"&region={region}&delay={str(delay)}&universe={universe}&dataset.id={dataset_id}&limit=50" + \
                       "&offset={x}"
        while True:
            res = s.get(url_template.format(x=0))
            if res.status_code == 200:
                break
        count = res.json()['count']
    else:
        url_template = "https://api.worldquantbrain.com/data-fields?" + \
                       f"&instrumentType={instrumentType}" + \
                       f"&region={region}&delay={str(delay)}&universe={universe}&limit=50" + \
                       f"&search={search}" + \
                       "&offset={x}"
        count = 100

    datafields_list = []
    for x in range(0, count, 50):
        while True:
            datafields = s.get(url_template.format(x=x))
            if datafields.status_code == 200:
                break
        datafields_list.append(datafields.json()['results'])

    datafields_list_flat = [item for sublist in datafields_list for item in sublist]

    return datafields_list_flat


if __name__ == "__main__":
    s = login()
    if_cover = True

    for region in tqdm(REGION_LIST):
        for delay in DELAY_LIST:
            for instrumentType in ['EQUITY', ]:  # INSTRUMENT_TYPE_LIST:
                for universe in UNIVERSE_DICT["instrumentType"][instrumentType]['region'][region]:
                    output_path = os.path.join(DATASETS_PATH,
                                               f"{region}_{delay}_{instrumentType}_{universe}.csv")
                    if not if_cover:
                        if os.path.exists(output_path):
                            print(f"File {output_path} already exists.")
                            continue
                    datasets = crawl_all_datasets(s, region=region, delay=delay, universe=universe,
                                                  instrumentType=instrumentType)
                    save_to_csv(datasets, output_path)
                    time.sleep(5)
                    print(f"Saved to {output_path}")

    for region in tqdm(REGION_LIST[:]):
        for delay in DELAY_LIST:
            for instrumentType in ['EQUITY', ]:  # INSTRUMENT_TYPE_LIST:
                for universe in UNIVERSE_DICT["instrumentType"][instrumentType]['region'][region]:
                    output_path = os.path.join(FIELDS_PATH, f"{region}_{delay}_{instrumentType}_{universe}.csv")
                    src_path = os.path.join(DATASETS_PATH, f"{region}_{delay}_{instrumentType}_{universe}.csv")
                    if not if_cover:
                        if os.path.exists(output_path):
                            print(f"File {output_path} already exists.")
                            continue
                    try:
                        datasets = pd.read_csv(src_path)
                    except pd.errors.EmptyDataError:
                        print(f"File {src_path} is empty.")
                        continue
                    fields = []
                    for dataset_id in datasets['id'].unique().tolist():
                        fields.extend(crawl_all_fields(s, region=region, delay=delay, universe=universe,
                                                       instrumentType=instrumentType, dataset_id=dataset_id))
                    save_to_csv(fields, output_path)
                    print(f"Saved to {output_path}")
