# coding: utf-8

import sys, os
from time import sleep

BASE_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(BASE_PATH)
from bs4 import BeautifulSoup
import requests
import json
from ip_common import log
from douban_common import file
class generator(object):
    def __init__(self):
        self.conf = {
            "base_url": "http://movie.douban.com/top250",
            "page_ctrl": "?start={page}&filter=",
            "offset": 25,
            "headers": {
                'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
                # 'Accept-Encoding': 'gzip, deflate, br',
                'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
                'Cache-Control': 'max-age=0',
                'Connection': 'keep-alive',
                'Host': 'movie.douban.com',
                'sec-ch-ua': '"Chromium";v="94", "Microsoft Edge";v="94", ";Not A Brand";v="99"',
                'sec-ch-ua-mobile': '?0',
                'sec-ch-ua-platform': '"Windows"',
                'Sec-Fetch-Dest': 'document',
                'Sec-Fetch-Mode': 'navigate',
                'Sec-Fetch-Site': 'none',
                'Sec-Fetch-User': '?1',
                'Upgrade-Insecure-Requests': '1',
                'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.81 Safari/537.36 Edg/94.0.992.47'
            },
        }
        self.data = []

    def get_result_dict(self,mv_top,mv_url,mv_id,mv_name,mv_img_url):
        temp = {
            "id":mv_id,
            "name":mv_name,
            "top":mv_top,
            "request_url":mv_url,
            "img_url":mv_img_url
        }
        return temp
    def get(self,page):
        # page[1,2,3]代表请求前三页
        conf = self.conf
        for item in page:
            request_url = conf["base_url"] + conf["page_ctrl"].format(page=(item-1) * conf["offset"])
            response = requests.get(url=request_url, headers=conf["headers"])
            content = response.text
            # print(response.encoding)
            data = self.parse(content)
            self.data.extend(data)
            log.info("第{page}页，解析成功".format(page = item))
            sleep(3)
        return self.data
    def parse(self,content):
        temp_list = []
        bf = BeautifulSoup(content, features="lxml")
        page_table = bf.findAll("ol")
        if len(page_table) != 1:
            log.error("页面没有数据表格")
            pass
        page_div_pic = bf.findAll("div", attrs={"class": "pic"})
        # print(len(page_div_pic))
        for pic in page_div_pic:
            mv_top = pic.findAll("em")[0].string
            mv_url = pic.findAll("a")[0]["href"]
            mv_id = mv_url.split("/")[-2]
            mv_name = pic.findAll("img")[0]["alt"]
            mv_img_url = pic.findAll("img")[0]["src"]
            temp_dict = self.get_result_dict(mv_top,mv_url,mv_id,mv_name,mv_img_url)
            temp_list.append(temp_dict)
        return temp_list


if __name__ == "__main__":
    gen = generator()
    data = gen.get(page=[1,2,3,4,5,6,7,8,9,10])#
    # print(data)
    file.save_json(data, "web2topic_top250_data.json")

