import json
import logging
import requests
import os

# 配置文件保存的目录
path = os.path.dirname(os.path.abspath(__file__))
file_path = './results'
os.makedirs(file_path, exist_ok=True)

logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')

class SpiderSpa1:

    # 配置请求的初始化链接，配置最大爬取页数
    def __init__(self):
        self.base_url = 'https://spa1.scrape.center/api/movie/?limit=10&offset={offset}'
        self.max_page = 10

    # 请求列表页拿到响应数据
    def get_index(self, offset):
        response = requests.get(self.base_url.format(offset=offset)).json()
        results = response['results']
        return results
    # 解析列表页的响应数据，提取关键参数id
    def parse_index(self, results):
        ids = []
        for movie in results:
            id = movie['id']
            ids.append(id)
        return ids

    # 拼接详情页url，请求并拿到响应数据
    def get_detail(self, ids):
        detail_responses = []
        for id in ids:
            detail_url = f'https://spa1.scrape.center/api/movie/{id}/'
            detail_response = requests.get(detail_url).json()
            detail_responses.append(detail_response)
        return detail_responses
    # 提取想要的数据
    def parse_detail(self, detail_response):
        name = detail_response['name']
        categories = detail_response['categories']
        published_at = detail_response['published_at']
        score = detail_response['score']
        drama = detail_response['drama']
        return {
            'name': name,
            'categories': categories,
            'published_at': published_at,
            'score': score,
            'drama': drama
        }

    # 保存数据
    def save_data(self, data):
        name = data['name']
        with open(f'{file_path}/{name}.json', 'w', encoding='utf-8') as f:
            json.dump(data, f, ensure_ascii=False, indent=2)

    # 定义一个执行方法
    def main(self):
        for i in range(1, self.max_page + 1):
            offset = (i - 1) * 10
            results = self.get_index(offset)
            logging.info(f'get index data: {results}')

            ids = self.parse_index(results)
            logging.info(f'get detail id: {ids}')

            detail_responses = self.get_detail(ids)
            for detail_response in detail_responses:
                logging.info(f'parse detail response: {detail_response}')
                data = self.parse_detail(detail_response)
                logging.info(f'get detail data: {data}')

                self.save_data(data)
                logging.info('saving successfully')





if __name__ == '__main__':
    s = SpiderSpa1()
    s.main()