# coding: utf-8

import sys, os
from time import sleep

BASE_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(BASE_PATH)
from bs4 import BeautifulSoup
import requests
import json
from ip_common import log
from douban_common import file
class generator(object):
    def __init__(self):
        self.conf = {
            "url": "http://movie.douban.com/subject/{id}/",
            "headers": {
                'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
                # 'Accept-Encoding': 'gzip, deflate, br', 编码
                'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
                'Cache-Control': 'max-age=0',
                'Connection': 'keep-alive',
                'Host': 'movie.douban.com',
                'User-Agent': 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0)',
                'Cookie': 'll="118254"; bid=CWSQirOX9cQ; _vwo_uuid_v2=D62DA3815963313406CC297CBF7E8E337|70f72cae6621a9b79ed85d832fc3ed9d; __utmc=30149280; __utmc=223695111; dbcl2="154911806:4kyfVIxYxqY"; ck=GLHH; _pk_ref.100001.4cf6=%5B%22%22%2C%22%22%2C1638204148%2C%22https%3A%2F%2Faccounts.douban.com%2F%22%5D; _pk_ses.100001.4cf6=*; ap_v=0,6.0; __utma=30149280.795773592.1638027936.1638177892.1638204149.10; __utmb=30149280.0.10.1638204149; __utmz=30149280.1638204149.10.3.utmcsr=accounts.douban.com|utmccn=(referral)|utmcmd=referral|utmcct=/; __utma=223695111.615407103.1638027936.1638177892.1638204149.10; __utmb=223695111.0.10.1638204149; __utmz=223695111.1638204149.10.3.utmcsr=accounts.douban.com|utmccn=(referral)|utmcmd=referral|utmcct=/; push_noty_num=0; push_doumail_num=0; _pk_id.100001.4cf6=4c1c0520d512046e.1638027935.10.1638204154.1638177928.'
            },
        }
        self.data = []
    def get_result_dict(self,mv_name,mv_region,mv_release,
                        mv_length,mv_score,mv_top,
                        mv_introduce,mv_image_url,
                        mv_en_name,mv_douban_id,mv_director,
                        mv_star,mv_type,mv_writer):
        temp = {
            "mv_name":mv_name,
            "mv_region":mv_region,
            "mv_release":mv_release,
            "mv_length":mv_length,
            "mv_score":mv_score,
            "mv_top":mv_top,
            # "mv_introduce":mv_introduce,
            "mv_image_url":mv_image_url,
            "mv_en_name":mv_en_name,
            "mv_douban_id":mv_douban_id,
            "mv_director":mv_director,
            "mv_star":mv_star,
            "mv_type":mv_type,
            "mv_writer":mv_writer
        }
        return temp
    def get(self,request_list):
        conf = self.conf
        for req_dict in request_list:
            request_url = conf["url"].format(id = req_dict["id"])
            response = requests.get(url=request_url, headers=conf["headers"],allow_redirects=False)
            if response.status_code == 301:
                response = requests.get(url=response.headers['Location'], headers=conf["headers"],allow_redirects=False)
            content = response.text
            # print(content)
            res_data = self.parse(content,req_dict)
            log.info(res_data)
            self.data.append(res_data)
            # 这个方法是追加方法，效率很低，需要改
            # 很简单，从日志文件反写过来就好
            file.insert_data(res_data,"topic2detail_top250_data.json")
            sleep(4)
        return self.data
    def parse(self,content,req_dict):
        bf = BeautifulSoup(content, features="lxml")
        content = bf.findAll("div", attrs={"id": "content"})
        if len(content) != 1:
            return
        content = content[0]
        # 找片名和年份
        name_year_temp = content.findAll("h1")[0].findAll("span")
        mv_en_name = name_year_temp[0].string
        # print(mv_en_name)
        mv_release = (int)(name_year_temp[1].string[1:-1])
        score_temp = content.findAll("strong", attrs={"class": "ll rating_num"})
        mv_score = (float)(score_temp[0].string)
        all_info_temp = content.findAll("div", attrs={"id": "info"})[0]
        all_info_temp = all_info_temp.text.split("\n")
        mv_director = []
        mv_star = []
        mv_writer = []
        mv_type = []
        mv_region = ""
        mv_length = 0
        mv_introduce = ""
        for item in all_info_temp:
            if len(item) < 2:
                continue
            kv_list = item.replace(" ", "").split(":")
            if len(kv_list) < 2:
                continue
            if kv_list[0] == "导演":
                data = kv_list[1].split("/")
                if len(data) > 2:
                    data = data[0:2]
                mv_director.extend(data)
            if kv_list[0] == "编剧":
                data = kv_list[1].split("/")
                if len(data) > 2:
                    data = data[0:2]
                mv_writer.extend(data)
            if kv_list[0] == "主演":
                data = kv_list[1].split("/")
                if len(data) > 4:
                    data = data[0:4]
                mv_star.extend(data)
            if kv_list[0] == "类型":
                data = kv_list[1].split("/")
                mv_type.extend(data)
            if kv_list[0] == "制片国家/地区":
                data = kv_list[1].split("/")[0]
                mv_region = data
            if kv_list[0] == "片长":
                data = kv_list[1].split("/")[0]
                data = data.split("分钟")[0]
                mv_length = (int)(data)
        mv_name = req_dict["name"]
        mv_top = req_dict["top"]
        mv_image_url = req_dict["img_url"]
        mv_douban_id = req_dict["id"]
        result_data = self.get_result_dict(mv_name,mv_region,mv_release,
                        mv_length,mv_score,mv_top,
                        mv_introduce,mv_image_url,
                        mv_en_name,mv_douban_id,mv_director,
                        mv_star,mv_type,mv_writer)
        return result_data
if __name__ == "__main__":
    # 初始化文件
    file.save_json([],"topic2detail_top250_data.json")
    data = file.read_json( "web2topic_top250_data.json")["data"]
    gen = generator()
    data = gen.get(data)#
    # print(data)
    # file.save_json(data, "topic2detail_top250_data.json")

