import requests
from bs4 import BeautifulSoup
import json
import threading
import time
import re
import queue
import os

URL = 'https://ssr1.scrape.center'
PAGE = 10
PATH = './movies'
if not os.path.exists(PATH):
    os.mkdir(PATH)

Q = queue.Queue()

def get_a():
    #获取所有a标签中的电影链接
    for page in range(1, PAGE + 1):
        url = URL + '/page/' + str(page)
        print(url)
        res = requests.get(url)
        res.encoding = res.apparent_encoding
        html = res.text

        soup = BeautifulSoup(html, 'html.parser')
        atags = soup.find_all('a', {'class' : 'name'})
        for atag in atags:
            Q.put(URL + atag['href'])
            # print(atag)

def get_content(url):
    try:
        res = requests.get(url)
        res.encoding = res.apparent_encoding
        html = res.text

        soup = BeautifulSoup(html, 'html.parser')

        title = soup.h2.string
        categories = [span.string for span in soup.find('div', {'class': 'categories'}).findAll('span')]
        info_divs1, info_divs2 = soup.findAll('div', {'class' : 'info'})
        info_span1 = soup.findAll('span')
        countries = info_span1[0].string
        time = info_span1[-1].string
        published = info_divs2.span.string
        drama = soup.find('div', {'class' : 'drama'}).p.string.strip()
        score = soup.find('p', {'class' : 'score'}).string.strip()

        # categories = re.findall(r'')



        dic = {
            'title' : title,
            'categories' : categories,
            'time' : time,
            'published' : published,
            'drama' : drama,
            'score' : score
        }
        return dic

    except Exception as e:
        print(e, 'aaaaaaaaaa')


def write_content():
    url = Q.get()
    content_dic = get_content(url)


    with open(os.path.join(PATH, content_dic['title']), 'w', encoding = 'utf8') as f:

        data = json.dumps(content_dic, ensure_ascii = False)
        f.write(data)

def start_thread(thread_names, thread_nums, args = tuple()):


    threads = []
    for i in range(thread_nums):
        t = threading.Thread(target = thread_names, args = args)
        t.setDaemon(True)
        t.start()
        threads.append(t)

    alive = True
    while alive:
        alive = False
        for t in threads:
            if t.is_alive():
                alive = True
        time.sleep(0.1)

# get_a()
if __name__ == '__main__':
    get_a()
    start_thread(write_content, 5)


get_content('https://ssr1.scrape.center/detail/13')
write_content()

# res = requests.get(URL)
# res.encoding = res.apparent_encoding
# html = res.text
#
# soup = BeautifulSoup(html, 'html.parser')
# titles = soup.find_all('h2')
# for title in titles:
#     print(title.string)