# 发送请求、获取数据、解析数据、存储数据
# HTTP请求与响应（请求方法GET、POST等，请求头，请求体）
# URL解析：根据URL地址找到目标网页 URL的组成部分（协议、主机口、端口号、路径、查询参数等）
# 数据解析 对获取到的HTML或JSON数据进行解析，提取出需要的数据 可采用正则表达式、XPath、CSS选择器等多种方法
# 数据存储 可采用文件、数据库等多种方式，具体选择取决于数据量大小、数据类型、访问频率等因素
import requests
from bs4 import BeautifulSoup
import re
import os
import json
import queue
import threading
import time

URL = 'https://ssr1.scrape.center'
PAGE = 10
PATH = './movies'
if not os.path.exists(PATH):
    os.mkdir(PATH)


Q = queue.Queue()


def get_a():
    for page in range(1, PAGE+1):
        url = URL + '/page/' + str(page)
        res = requests.get(url)
        res.encoding = res.apparent_encoding
        html = res.text

        soup = BeautifulSoup(html, 'html.parser')
        atags = soup.find_all('a', {'class': 'name'})
        for atag in atags:
            Q.put(URL + atag['href'])


def get_content(url):
    """
    获取电影的详细信息
    """
    try:
        res = requests.get(url)
        res.encoding = res.apparent_encoding
        html = res.text

        soup = BeautifulSoup(html, 'html.parser')
        title = soup.h2.string
        categories = [span.string for span in soup.find('div', {'class':'categories'}).findAll('span')]
        info_divs1, info_divs2 = soup.findAll('div', {'class': 'info'})
        info_spans1 = info_divs1.findAll('span')
        countries = info_spans1[0].string
        time = info_spans1[-1].string
        published = info_divs2.span.string
        drama = soup.find('div', {'class': 'drama'}.p.string.strip())
        score = soup.find('p', {'class': 'score'}).string.strip()

        dict = {
            'title': title,
            'categories': categories,
            'time': time,
            'published': published,
            'drama': drama,
            'score': score
        }
    except Exception as e:
        print(e)


def write_content():
    """
    线程任务函数
    """
    while not Q.empty():
        url = Q.get()
        content_dict = get_content(url)
        with open(os.path.join(PATH, content_dict['title']), 'w', encoding='utf8') as f:
            # 这部分代码用于生成文件的完整路径
            data = json.dumps(content_dict, ensure_ascii=False)
            f.write(data)


def start_thread_thread(thread_names, thread_nums, args=tuple()):
    """线程启动函数"""
    threads = []
    for i in range(thread_nums):
        t = threading.Thread(target=thread_names, args=args)
        t.setDaemon(True)
        t.start()
        threads.append(t)

        alive = True
        while alive:
            alive = False
            for t in threads:
                if t.is_alive():
                    alive = True
            time.sleep(0.1)


if __name__ == '__main__':
    get_a()
    start_thread_thread(write_content, 5)

get_content('https://ssr1.scarpe.center/detail/13')
write_content()
