from concurrent.futures import ThreadPoolExecutor
from web.models import MovieData
from web.setting import ConfigSettings
from requests import get
from lxml import etree
import web
import os
from web.tools import queue_message

config = ConfigSettings()

page_count = config.SPIDER_PAGE_COUNT  # 抓取页数
thread_num = config.SPIDER_THREAD  # 线程数
count = 0  # 抓取进度
path = './web/static/img/movie_img'  # 图片保存路径

headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.3945.130 Safari/537.36'
}

# 最好添加上自己的cookie
if config.SPIDER_COOKIE:
    headers['Cookie'] = config.SPIDER_COOKIE


def spider_message(message, num_count=0, schedule=0, status=400) -> None:
    """
    将运行信息添加到队列中
    :param message: 消息内容
    :param num_count: 当前抓取的数量
    :param schedule: 当前抓取的进度
    :param status: 当前抓取的状态（200为抓取完成）
    """
    queue_message.put({'message': message, 'num_count': num_count, 'schedule': schedule, 'status': status})


def img_download(url, name, now_count, now_schedule) -> None:
    """
    下载影视图片
    """
    try:
        response = get(url, headers=headers)
        response.raise_for_status()
        save_path = f'{path}/{name}.{url.split(".")[-1]}'
        if not os.path.exists(save_path):
            with open(f'{save_path}', 'wb') as f:
                f.write(response.content)
    except Exception as e:
        print(e)
        spider_message(f'{name}图片下载失败,原因:{e}', now_count, now_schedule)


def movie_info_url(url) -> list:
    """
    获取影视详细页链接
    :param url: 影视页链接
    :return: 影视详细页链接
    """
    info_list = []
    spider_message('正在获取影视详细页链接')
    response = get(f'https://movie.douban.com/top250?start={url}', headers=headers)
    html = etree.HTML(response.text)
    info_url = html.xpath("//div[@class='hd']/a/@href")
    for u in info_url:
        info_list.append(u)
    return info_list


def movie_info_data(info_list) -> None:
    """
    抓取电影详细信息
    :param info_list: 影视详细页链接
    """
    for u in info_list.result():
        global count
        count += 1
        try:
            response = get(u, headers=headers)
            response.raise_for_status()
            html = etree.HTML(response.text)
            title = html.xpath("//title/text()")[0].replace(' (豆瓣)', '').strip()  # 影视名
            year = html.xpath("//h1//span[@class='year']/text()")[0].replace('(', '').replace(')', '').strip()  # 影视上映年份
            score = html.xpath("//strong[@class='ll rating_num']/text()")[0]
            img_url = html.xpath("//div[@id='mainpic']//img/@src")[0]
            img_url2 = f'{path.split("static/")[-1]}/{title}.{img_url.split(".")[-1]}'
            people_num = html.xpath("//a[@class='rating_people']/span/text()")[0]  # 评价人数
            for info in html.xpath("//div[@id='info']"):
                director = info.xpath("./span[1]//a/text()")[0]  # 导演
                movie_type = info.xpath("./span[@property='v:genre']//text()")  # 类型
                movie_type = r'/'.join(str(d) for d in movie_type)
                film_length = info.xpath("./span[@property='v:runtime']/@content")[0]  # 片长
            with web.create_app().app_context():
                MovieData(title=title, year=year, score=score, img_url=img_url2, director=director,
                          movie_type=movie_type, film_length=film_length, people_num=people_num,
                          info_url=u).save()
            now_schedule = int(count / (page_count * 25) * 100)
            img_download(img_url, title, count, now_schedule)
            spider_message(f'正在获取>>{title}<<影视信息并下载图片', count, now_schedule)
        except Exception as e:
            text = '已被豆瓣屏蔽'
            print(e)
            spider_message(f'影视信息获取失败,原因:{text if "list" in u else e}')


def main() -> None:
    global count
    count = 0
    if not os.path.exists(path):
        os.makedirs(path)
    if 11 > page_count > 0:
        spider_message(f'爬虫开始运行，共开启{thread_num}个线程')
        with ThreadPoolExecutor(max_workers=thread_num, thread_name_prefix='spider-thread') as pool:
            for i in range(0, page_count * 25, 25):
                re = pool.submit(movie_info_url, i)
                re.add_done_callback(movie_info_data)
            pool.shutdown()
            spider_message(f'抓取完成，总共抓取>>{count}<<条数据', count, 100, 200)
    else:
        print('请检查页数是否超过10页')
