#! /usr/bin/env python3
# -*- coding: utf-8 -*-
from client import get, log
from bs4 import BeautifulSoup
import json
import os, sys


class DoubanDownload(object):

    def __init__(self, url, html_save_path,):
        self.html_save_path = html_save_path
        self.base_url = url
        self.__item = 0
        self.max_num = 250
        self.querys = {
            "start": self.__item,
            "filter": '',
        }
        self.download_all_html()

    def download_all_html(self, ):
        while self.__item < self.max_num:
            html = self.download_html()
            if html:
                self.save_html(html)
                self.__item += 25
            else:
                log('no html from website, item ', self.__item, level='debug')
                self.__item += 1
            self.querys['start'] = self.__item

    def download_html(self,):
        log('start download html, Url: ', self.base_url, )
        log('start download html, querys: ', self.querys, )
        s, h, html = get(self.base_url, self.querys)
        if s not in ['200', '201']:
            html = ''
        return html

    def make_html_save_path(self, ):
        path = self.html_save_path + \
               '{}-{}.html'.format(self.__item, self.__item + 25)
        return path

    def save_html(self, html):
        path = self.make_html_save_path()
        with open(path, 'w') as f:
            f.write(html)
            log('save html succeful, file : ', path, level='debug' )


class DoubanParsed(object):

    def __init__(self, load_dir):
        self.load_dir = load_dir
        self.movie = {}
        self.movie_item = 1
        self.data_path = 'douban.json'

    def parsed_htmls(self,):
        htmls = self.load_htmls()
        for html in htmls:
            self.html = html
            self.parsed_html()
            html = ''

    def load_htmls(self,):
        htmls = self.path_of_htmls()
        for html in htmls:
            with open(html, 'r') as f:
                log('start parsed html,file name: ', html, level='debug')
                yield f

    def path_of_htmls(self,):
        htmls = os.listdir(self.load_dir)
        log('html file names: ', htmls, level='debug')
        return [os.path.join(self.load_dir, html) for html in htmls]

    def parsed_html(self,):
        self.init_soup()
        all_movie = self.find_all_movie()
        for movie in all_movie:
            self.set_movie(movie)

    def init_soup(self, ):
        self.soup = BeautifulSoup(self.html, 'lxml')

    def find_all_movie(self):
        wrapper = self.soup.find('div', id='wrapper')
        all_movie = wrapper.find('ol', class_="grid_view").find_all('li')
        return all_movie

    def set_movie(self, movie):
        hd = self.find_movie_hd(movie)
        zh_name, en_name, other_name = self.parsed_movie_hd(hd)

        bd = self.find_movie_bd(movie)
        other, rating_num, score_people, quote = self.parsed_movie_bd(bd)
        director, star, year, country, genre = self.parsed_movie_other(other)

        self.movie[zh_name] = {
            'en_name': en_name,
            'other_name': other_name,
            'item': self.movie_item,
            'rating_num': rating_num,
            'score_people': score_people,
            'quote': quote,
            'director': director,
            'star': star,
            'year': year,
            'country': country,
            'genre': genre,
        }
        self.movie_item += 1

    def find_movie_hd(self, movie):
        hd = movie.find('div', class_='hd')
        return hd

    def parsed_movie_hd(self, hd):
        """ return 中文名, 英文名, 其他名"""
        z_e = hd.find_all('span', class_='title')
        o = hd.find('span', class_='other').text.strip()
        if len(z_e) > 1:
            z, e = z_e[0].text.strip(), z_e[1].text.strip()
        else:
            z, e = z_e[0].text.strip(), ""
        return z, e, o

    def find_movie_bd(self, movie):
        bd = movie.find('div', class_='bd')
        return bd

    def parsed_movie_bd(self, bd):

        other = bd.find('p',class_='').text.strip()

        star = bd.find(class_='star').find_all('span')
        rating_num, score_people = star[1].text.strip(), star[-1].text.strip()

        quote = bd.find(class_='inq').text.strip()
        return other, rating_num, score_people, quote

    def parsed_movie_other(self, other):
        d_a, _y_c_t = other.strip().split('\n')
        a = d_a.split('主演: ')[-1]
        d = d_a.split('主演: ')[0].split('导演: ')[-1].strip()
        y_c_t = [i.strip() for i in _y_c_t.strip().rsplit('/', 2)]
        try:
            [y, c, t] = y_c_t
        except Exception as e:
            log('y c t parsed error: ', e, level='error' )
            log('y c t parsed error,y_c_t: ', y_c_t, level='error' )
            y = c = t = ''
        return d, a, y, c, t

    def save_data(self,):
        with open(self.data_path, 'w') as w:
            json.dump(self.movie, w)

    def load_data(self,):
        with open(self.data_path, 'r') as r:
            data = json.load(r)

        return data

    def run(self, ):
        self.parsed_htmls()
        self.save_data()


def download():
    url = r'https://movie.douban.com/top250'
    html_save_path = r'download/'
    try:
        os.mkdir(html_save_path)
    except Exception:
        pass

    DoubanDownload(url, html_save_path)


def parsed():
    html_path = r'download/'
    doubanParsed = DoubanParsed(html_path)
    doubanParsed.run()
    data = doubanParsed.load_data()
    print(data.keys())
    print(data.popitem())
    print(len(data))


if __name__ == '__main__':
    download()
    parsed()
