import os
from os.path import exists
import json
import requests
import logging
import re
from urllib.parse import urljoin
from base import BASE_PATH

# 定义日志
logging.basicConfig(level=logging.INFO,
                    format='%(asctime)s - %(levelname)s: %(message)s')

# 爬取的链接
BASE_URL = 'https://ssr1.scrape.center'
# 总的页数
TOTAL_PAGE = 10
# 存储目录
RESULIS_DIR = BASE_PATH + os.sep + 'results' + os.sep+'spilder_movies'

"""
 爬取详情页
 通过基础爬虫实战01_爬起列表页，我们已经拿到所有详情页 URL  那么下一步当然就是解析详情页并提取出我们想要的信息了

 内容如下:
 正则表达式提取每部电影的名称、封面、类别、上映时间、评分、剧情简介等内容
"""


def request_scrape(url):
    """
    根据url 返回html源码
    :param url:
    :return:
    """
    logging.info('开始请求获取html源码,url: %s', url)
    try:
        headers = {
            'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36'
        }
        response = requests.get(url, headers=headers)
        if response.status_code == 200:
            html = response.text
            return html
        logging.error('请求状态码错误, code: %s', response.status_code)
    except requests.RequestException as e:
        # 错误信息日志-打印堆栈信息 exc_info = True
        logging.error('请求报错，url: %s', url, exc_info=True)


def scrape_index(page):
    """
     传入page 构建url
    :param page:
    :return:
    """
    url = urljoin(BASE_URL, f'/page/{page}')
    return url


def parse_index_get_detailurl(html):
    """
    解析html获取每一个列表页，详情页url
    :param html:
    :return:
    """
    # 需要正则捕获的超链接 <a data-v-7f856186="" href="/detail/1" class="name">
    pattern = re.compile('<a.*?href="(.*?)".*?class="name">')
    items = re.findall(pattern, html)
    if not items:
        return []
    # 循环遍历拼接url并返回，通过 yield 返回生成器(里面所有的url列表)
    for item in items:
        url = urljoin(BASE_URL, item)
        yield url


def parse_detail(html):
    """
    解析详情页html源码，提取需要的数据并返回
    :param html:
    :return:
    """
    name_pattern = re.compile('<h2.*?>(.*?)</h2>', re.S)
    cover_pattern = re.compile('class="item.*?<img.*?src="(.*?)".*?class="cover">', re.S)
    category_pattern = re.compile('<button.*?category.*?".*?<span>(.*?)</span>.*?</button>', re.S)
    release_time_pattern = re.compile('(\d{4}-\d{2}-\d{2})\s+上映')
    score_pattern = re.compile('<p.*?score.*?">(.*?)</p>', re.S)
    drama_pattern = re.compile('<div.*?drama.*?<p.*?>(.*?)</p>', re.S)

    cover = re.search(cover_pattern, html).group(1) if re.search(cover_pattern, html) else None
    categories = re.findall(category_pattern, html) if re.findall(category_pattern, html) else None
    name = re.search(name_pattern, html).group(1) if re.search(name_pattern, html) else None
    release_time = re.search(release_time_pattern, html).group(1) if re.search(release_time_pattern, html) else None
    score = re.search(score_pattern, html).group(1).strip() if re.search(score_pattern, html) else None
    drama = re.search(drama_pattern, html).group(1).strip() if re.search(drama_pattern, html) else None

    return {
        'cover': cover,
        'categories': categories,
        'name': name,
        'release_time': release_time,
        'score': score,
        'drama': drama,
    }


def save_files(data):
    """
        按名称存储json文件
    :param data:
    :return:
    """
    name = data.get('name')
    data_path = f'{RESULIS_DIR}/{name}.json'
    logging.info('存储json文件的路径: %s', data_path)
    with open(data_path, mode='w', encoding='utf-8') as f:
        json.dump(data, f, ensure_ascii=False, indent=2)


def run():
    # 如果存储目录不存在，则创建文件夹目录
    if not exists(RESULIS_DIR):
        os.makedirs(RESULIS_DIR)
    # 循环遍历每一页获取
    for page in range(1, TOTAL_PAGE + 1):
        # 构建列表页-url
        url = scrape_index(page)
        # 获取列表页html源码
        html = request_scrape(url)
        # 获取某一页列表，所有详情页url
        datail_urls = parse_index_get_detailurl(html)
        # logging.info("详情页url %s", list(datail_urls))
        # 解析每一个详情页
        for url in datail_urls:
            # 获取详情页源码
            html = request_scrape(url)
            data = parse_detail(html)
            logging.info("获取到详情页数据 %s", data)
            logging.info('存储json文件')
            save_files(data)


if __name__ == '__main__':
    # 测试执行
    # html = request_scrape('https://ssr1.scrape.center/detail/1')
    # # print(html)
    # data = parse_detail(html)
    # print(data)
    run()
