import requests
import json
import time
from JsonFileHandler import JsonFileHandler
from collections import defaultdict
from BaiduPanRequest import BaiduPanRequest


# 存储结果的变量
results = []
results_path_map = {}
results_par_path_map = defaultdict(list)

baidu_pan_request = BaiduPanRequest(initFilePath='./baidu-trans/init.ini')

# 替换root_dir中/为-
root_dir_path = baidu_pan_request.root_dir.replace('/', '-')
# 创建一个JsonFileHandler实例，指定要操作的JSON文件路径
json_file_handler = JsonFileHandler(f'data{root_dir_path}.json')

def fetch_list(url, headers, params, parent_item):
    if params['dir'] in results_par_path_map:
        print(f"fetch_list-matched: {params['dir']}")
        datalist = results_par_path_map[params['dir']]
    else:
        datalist = fetch_list_batch(url, headers=headers, params=params)
    
    if len(datalist) == 0:
        return 0
    
    for item in datalist:
        # 如果item不在results中，根据item['path]匹配，则添加结果
        if not item['path'] in results_path_map:
            results.append(item)
    
    #如果parent_item不为空,标记一级子文档数量
    if parent_item is not None:
        parent_item['child_size'] = len(datalist)
        json_file_handler.write(results)

    all_child_size = 0;
    # 如果记录，递归调用fetch_list查询子列表
    for item in datalist:
        # 如果item不在results中，根据item['path]匹配，则添加结果到文件
        if item['path'] in results_path_map:
            item = results_path_map[item['path']]
        
        all_child_size += 1

        # # 如果存在child_size字段
        # if item.get('child_size'):
        #     print(f"fetch_list-haschild-skip: {item['path']}")
        #     continue
            
        if not item.get('isdir'):
            continue

        next_params = params.copy()
        next_params['page'] = 1
        next_params['dir'] = item['path']
        all_child_size += fetch_list(url, headers, next_params, item)

    #如果parent_item不为空,标记所有子文档数量
    if parent_item is not None:
        parent_item['all_child_size'] = all_child_size
        json_file_handler.write(results)

    return all_child_size


"""
批量获取列表数据，如果列表在本地不存在，则发送请求获取数据，并将结果保存到results中
"""    
def fetch_list_batch(url, headers, params):
    level_rets = []
    batch_size = params['num']

    # 循环获取数据，直到len(datalist) < batch_size
    while True:
        datalist = fetch_list_single(url, headers=headers, params=params)
        level_rets.extend(datalist)
        # 如果数组长度小于batch_size，则获取完成
        if len(datalist) < batch_size:
            break
        
        # 继续获取下一页
        params['page'] = params['page'] + 1

    return level_rets

def fetch_list_single(url, params, headers):
    print(f"fetch_list_single: {params['dir']}")
    
    response = requests.get(url, headers=headers, params=params)
    # sleep 200ms
    time.sleep(1)

    data = response.json()
    if not data.get('list'):
        print(f"fetch_list-no-list: {response.text}")
        return []
    
    return data['list']




# 读取data.json到results
results = json_file_handler.read()
results_path_map = {data['path']: data for data in results}

root_item = {'path': baidu_pan_request.root_dir, 'isdir': True}
if len(results) == 0:
     results.append(root_item)
# 遍历数据并进行分组,生成results_par_path_map
for item in results:
    path = item["path"]
    # 找出最后一个'/'的位置，以此来分割出父路径
    parent_path = path.rsplit('/', 1)[0] if '/' in path else path
    results_par_path_map[parent_path].append(item)


# 设置基本URL和参数
base_url = 'https://pan.baidu.com/share/list'
params = baidu_pan_request.getParams({'is_from_web': True, 'order': 'other', 'showempty': 0,
    'desc': 1, 'page': 1, 'num': 100, 'dir': baidu_pan_request.root_dir})
headers = baidu_pan_request.getHeaders()

fetch_list(url=base_url, headers=headers, params=params, parent_item=root_item)
