from .config import HEADERS
from .CommentSpider import detail_collector
import requests
import logging
import time
import random

__doc__ = """
根据用户输入的爬取页数和随机种子，随机序号生成器生成序号列表，然后根据序号列表进行电影信息的爬取。
根据电影页面链接，使用detail_collector收集信息，并每访问一次网页立刻将数据存入csv文件。
"""


def random_index_generator(page=10, seed=9527):  # 随机序号生成器
    index_pool = set()
    random.seed(seed)
    for _ in range(page):
        index = random.randint(0, 300)
        while index in index_pool:
            index = random.randint(0, 300)
        index_pool.add(index)
        yield index

def crawl_data_as_csv(page=10, seed=9527):
    movie_info = [['电影名称', '上映时间', '导演', '制片国家', '剧情类型', '片长', '评分']]
    filename = str(page) + '_' + str(seed) + '电影数据.csv'
    with open('./dataset/' + filename, 'w', encoding='utf8')as f:
        for info in movie_info:
            f.write(','.join(info) + '\n')
    total = 0
    for index in random_index_generator(page, seed):
        # 获取电影信息（包括id）的接口，来自豆瓣->选电影->热门->按时间排序,每页至多有20条电影信息
        start_url = 'https://movie.douban.com/j/search_subjects?type=movie&tag=%E7%83%AD%E9%97%A8&sort=time&page_limit=20&page_start={}'
        response = requests.get(url=start_url.format(index), headers=HEADERS)
        movie_json = response.json()
        for item in movie_json['subjects']:
            total += 1
            try:
                # 电影名称 ，上映时间 ， 导演， 制片国家， 剧情类型 ， 片长 ， 评分
                _, _, itemreviewed, show_year, director, country, genre_list, runtime, average, _ = detail_collector(item['url'])
            except:
                logging.error(item['url'] + 'Error')
            else:
                with open('./dataset/' + filename, 'a+', encoding='utf8')as f:
                    f.write(','.join([itemreviewed, show_year, director, country, genre_list, runtime, average]) + '\n')
        time.sleep(2)
    return total

def random_index_generator2(page=10, seed=9527):  # 随机序号生成器
    index_pool2 = set()
    random.seed(seed)
    for _ in range(page):
        index2 = random.randint(0, 300)
        while index2 in index_pool2:
            index2 = random.randint(0, 300)
        index_pool2.add(index2)
        yield index2

def crawl_data_as_csv2(page=10, seed=9527):
    movie_info = [['电影名称', '上映时间', '导演', '制片国家', '剧情类型', '片长', '评分']]
    filename = str(page) + '_' + str(seed) + '电影数据.csv'
    with open('./dataset/' + filename, 'w', encoding='utf8')as f:
        for info in movie_info:
            f.write(','.join(info) + '\n')
    total = 0
    for index2 in random_index_generator2(page, seed):
        # 获取电影信息（包括id）的接口，来自豆瓣->选电影->热门->按时间排序,每页至多有20条电影信息
        start_url = 'https://movie.douban.com/j/search_subjects?type=movie&tag=%E7%83%AD%E9%97%A8&sort=time&page_limit=20&page_start={}'
        response = requests.get(url=start_url.format(index2), headers=HEADERS)
        movie_json = response.json()
        for item in movie_json['subjects']:
            total += 1
            try:
                # 电影名称 ，上映时间 ， 导演， 制片国家， 剧情类型 ， 片长 ， 评分
                _, _, itemreviewed, show_year, director, country, genre_list, runtime, average, _ = detail_collector(item['url'])
            except:
                logging.error(item['url'] + 'Error')
            else:
                with open('./dataset/' + filename, 'a+', encoding='utf8')as f:
                    f.write(','.join([itemreviewed, show_year, director, country, genre_list, runtime, average]) + '\n')
        time.sleep(2)
    return total
