import requests
from bs4 import BeautifulSoup
import json
import threading
import time
import re
import queue
import os

URL = 'https://ssr1.scrape.center'
PAGE = 2
PATH = './movies'
if not os.path.exists(PATH):
    os.mkdir(PATH)

Q = queue.Queue()

def get_a():
    #获取所有a标签中的电影链接
    for page in range(1, PAGE + 1):
        url = URL + '/page/' + str(page)
        res = requests.get(url)
        res.encoding = res.apparent_encoding
        html = res.text

        soup = BeautifulSoup(html, 'html.parser')
        atags = soup.find_all('a', {'class' : 'name'})
        for atag in atags:
            Q.put(URL + atag['href'])
            # print(URL + atag['href'])

def get_content(url):
    try:
        res = requests.get(url)
        res.encoding = res.apparent_encoding
        html = res.text

        soup = BeautifulSoup(html, 'html.parser')

        title = soup.h2.string
        categories = [span.string for span in soup.find('div', {'class': 'categories'}).find_all('span')]
        info_div = soup.findAll('div', {'class': 'info'})
        s1 = info_div[0]
        s2 = info_div[1]
        info_s = [span.string for span in s1.findAll('span')]
        countries = info_s[0]
        time = info_s[-1]
        published = None
        if s2.find('span'):
            published = s2.span.string
        # print(countries, time, published)
        drama = soup.find('div', {'class': 'drama'}).p.string.strip()
        score = soup.find('p', {'class': 'score'}).string.strip()
        print(title, categories, countries, time, published, score)
        # 正则表达式提取数据 => 切片
        # categories = re.findall(r'<span>(.*)</span>', html)[:-1]
        # countries = re.search(r'">([^\x00-\xff]+)</span>', html).group(1) if re.search(r'">([^\x00-\xff]+)</span>', html) else ''
        # time = re.search(r'>(\d+.*)</span>', html).group(1) if re.search(r'>(\d+.*)</span>', html) else ''
        # published = re.search(r'\d+-\d+-\d+', html).group() if re.search(r'\d+-\d+-\d+', html) else ''
        # drama = re.search(r'>\s+(.*)\s+</p>', html).group(1) if re.search(r'>\s+(.*)\s+</p>', html) else ''
        # score = re.search(r'\d+\.\d+', html).group() if  re.search(r'\d+\.\d+', html) else ''
        # print(title, categories, countries, time,published,drama,score)



        dic = {
            'title' : title,
            'categories' : categories,
            'countries' : countries,
            'time' : time,
            'published' : published,
            'drama' : drama,
            'score' : score
        }
        return dic

    except Exception as e:
        print(e,'出错了')


def write_content():
    while not Q.empty():
        url = Q.get()
        content_dic = get_content(url)
        with open(os.path.join(PATH, content_dic['title']), 'w', encoding = 'utf8') as f:

            data = json.dumps(content_dic, ensure_ascii = False)
            f.write(data)

def start_thread(thread_names, thread_nums, args = tuple()):
    threads = []
    for i in range(thread_nums):
        t = threading.Thread(target = thread_names, args = args)
        t.setDaemon(True)
        t.start()
        threads.append(t)

    alive = True
    while alive:
        alive = False
        for t in threads:
            if t.is_alive():
                alive = True
        time.sleep(0.1)

get_a()
start_thread(write_content, 5)
# if __name__ == '__main__':
#     get_a()
#     start_thread(write_content, 5)
#
#
# get_content('https://ssr1.scrape.center/detail/9')


# res = requests.get(URL)
# res.encoding = res.apparent_encoding
# html = res.text
#
# soup = BeautifulSoup(html, 'html.parser')
# titles = soup.find_all('h2')
# for title in titles:
#     print(title.string)