import os
from os.path import exists
import json
import requests
import logging
import re
from urllib.parse import urljoin
from base import BASE_PATH

# 定义日志
logging.basicConfig(level=logging.INFO,
                    format='%(asctime)s - %(levelname)s: %(message)s')

# 爬取的站点
BASE_URL = 'https://spa1.scrape.center/'

# 总页数
TOTAL_PAGE = 10

# 存储目录
RESULIS_DIR = BASE_PATH + os.sep + 'results' + os.sep + 'spilder_ajax_movies'


def request_scrape(url, method='GET', data=None):
    """
     根据url 请求爬取内容
    :param url:
    :return:
    """
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/137.0.0.0 Safari/537.36'
    }
    try:
        logging.info('开始请求接口 %s:', BASE_URL + url)
        res = requests.request(url=BASE_URL + url, method=method, data=data, headers=headers)
        if res.status_code == 200:
            result = res.json()
            return result
        logging.error('请求错误，请求状态码: %s', res.status_code)
    except requests.RequestException as e:
        logging.error('请求错误, %s', BASE_URL + url, exc_info=True)




def parse_detail_data(id):
    """
    通过资源id，解析获取电影数据
    :param id:
    :return:
    """
    url = 'api/movie/'+str(id)
    # 请求详情页接口
    res = request_scrape(url)
    result = {
        'cover': res.get('cover'),
        'categories': res.get('categories'),
        'name': res.get('name'),
        'published_at': res.get('published_at'),
        'score': res.get('score'),
        'drama': res.get('drama'),
    }
    return result


def save_files(data):
    """
        按名称存储json文件
    :param data:
    :return:
    """
    name = data.get('name')
    data_path = f'{RESULIS_DIR}/{name}.json'
    logging.info('存储json文件的路径: %s', data_path)
    with open(data_path, mode='w', encoding='utf-8') as f:
        json.dump(data, f, ensure_ascii=False, indent=2)

def run():
    # 如果存储目录不存在，则创建文件夹目录
    if not exists(RESULIS_DIR):
        os.makedirs(RESULIS_DIR)

    # 循环获取每一页的json数据
    detail_ids = [] # 详情页接口需要的id
    for page in range(1, 11):
        # 拼接请求的url
        url = 'api/movie/?limit=10&offset=' + str(10 * (page - 1))
        # 请求列表接口
        res = request_scrape(url)
        results = res.get('results')
        # 循环遍历获取到id
        for item in results:
            detail_ids.append(item.get('id'))
    #循环遍历请求详情页接口
    for id in detail_ids:
        data = parse_detail_data(id)
        logging.info('获取到详情页数据 %s',data)
        #存储影视信息-json
        logging.info('开始存储数据')
        save_files(data)
        logging.info('存储成功')




if __name__ == '__main__':
    # res = request_scrape('api/movie/?limit=10&offset=0')
    # # run()
    # data = parse_detail_data(1)
    # print(data)
    run()

