import requests  # HTTP库，用于发送网络请求
from bs4 import BeautifulSoup  # 用于从HTML或DOM文件中提取数据的库
import re  # 正则表达式
import os  # 与操作系统交互
import json  # 允许编码和解码JSON数据
import queue  # 实现同步的、进程安全的队列类
import threading  # 提供基本线程和锁的支持
import time  # 时间

URL = 'https://ssr1.scrape.center/'
PAGE = 10
PATH = '.movies'
if not os.path.exists(PATH):
    os.mkdir(PATH)

Q = queue.Queue()


def get_a():
    """获取所有a标签的电影链接"""
    for page in range(1, PAGE + 1):
        # 每一页url链接f'{URL}/page/{page}'
        url = URL + '/page/' + str(page)
        res = requests.get(url)
        res.encoding = res.apparent_encoding  # 设置响应体的编码方式
        html = res.text

        soup = BeautifulSoup(html, 'html.parser')  # 创建BeautifulSoup对象，解析HTML元素，指定解析器；‘html.parser'是第二个参数，它指定了用于解析HTML
        # 内容的解析器
        atags = soup.find_all('a', {'class': 'name'})
        # 将电影内容存到队列中
        for atag in atags:
            Q.put(URL + atag['href'])


def get_content(url):
    """解析电影的详细信息"""
    try:
        res = requests.get(url)
        res.encoding = res.apparent_encoding
        html = res.text

        soup = BeautifulSoup(html, 'html.parser')
        title = soup.h2.string
        # print(title)
        categories = [span.string for span in soup.find('div', {'class': 'categories'}).find_all('span')]
        # for cateogrie_div in categories_div:
        # 1.所有的class为info的div标签
        # 2.通过div再定位到span标签
        info_divs1, info_divs2 = soup.find_all('div', {'class': 'info'})
        info_spans1 = info_divs1.findAll('span')
        countries = info_spans1[0].string
        time1 = info_spans1[-1].string
        published = info_divs2.span.string
        drama = soup.find('div', {'class': 'drama'}).p.string.strip()
        score = soup.find('p', {'class': 'score'}).string.strip()

        # 正则表达式提取数据 => 切片
        # categories = re.findall(r'<span>(.*)</span>', html)[:-1]
        # countries = re.search(r'">([^\x00-\xff]+)</span>', html).group(1) if re.search(r'">([^\x00-\xff]+)</span>',
        #                                                                                html) else ''
        # time = re.search(r'>(\d+.*)</span>', html).group(1) if re.search(r'>(\d+.*)</span>', html) else ''
        # published = re.search(r'\d+-\d+-\d+', html).group() if re.search(r'\d+-\d+-\d+', html) else ''
        # drama = re.search(r'>\s+(.*)\s+</p>', html).group(1) if re.search(r'>\s+(.*)\s+</p>', html) else ''
        # score = re.search(r'\d+\.\d+', html).group() if re.search(r'\d+\.\d+', html) else ''
        # print(title, categories, countries, time, published, drama, score)

        dic = {
            'title': title,
            'categories': categories,
            'time': time,
            'published': published,
            'drama': drama,
            'score': score
        }
        return dic
    except Exception as e:
        print(e)


def write_content():
    """线程任务函数"""
    while not Q.empty():
        url = Q.get()
        content_dic = get_content(url)

        with open(os.path.join(PATH, content_dic['title']), 'w', encoding='utf8') as f:
            # 这部分代码用于生成文件的完整路径
            data = json.dumps(content_dic, ensure_ascii=False)
            # json.dumps函数将字典content_dic转换为JSON格式的字符串
            f.write(data)


def start_thread(thread_names, thread_nums, args=tuple()):
    """线程启动函数
    params:thread_names function 线程函数
    params:thread_nums int 线程的数量
    params:args tuple 线程函数的参数
    """
    threads = []
    for i in range(thread_nums):
        t = threading.Thread(target=thread_names, args=args)
        t.setDaemon(True)  # setDaemon方法是python中threading.Thread类的一个方法，用于设置线程是否为守护线程（daemon thread)
        t.start()
        threads.append(t)
        # 主线程等待子线程结束
        # for t in threads:
        #   t.join()
        alive = True
        while alive:
            alive = False
            for t in threads:
                # is_alive 可以判断线程是否是活跃
                if t.is_alive():
                    alive = True
            time.sleep(0.1)


if __name__ == '__main__':
    get_a()
    start_thread(write_content(), 5)

get_content('https://ssr1.scrape.center/detail/13')
write_content()
