#!/usr/bin/env python3
# -*- coding: utf-8 -*-

"""
采用BeautifulSoup解析html并获取内容
注解
"""

__author__ = 'hubert'

from bs4 import BeautifulSoup
import requests

import logging
logging.basicConfig(level=logging.INFO)


class Movie(object):
    def __init__(self):
        # 中文名称
        self.name = ''
        # 外文名称
        self.foreignname = ''
        # 导演
        self.director = ''
        # 演员
        self.actor = ''
        # 编剧
        self.screenwriter = ''
        # 出品时间
        self.releaseyear = ''
        # 地区
        self.area = ''
        # 类型
        self.type = ''
        # 片长
        self.duration = ''


def __parser_douban__(url):
    headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
                             'Chrome/52.0.2743.116 Safari/537.36 Edge/15.15063'}
    r = requests.get(url, headers=headers)
    soup = BeautifulSoup(r.text, 'lxml')
    return soup


# 百度百科有302跳转问题,通过设置session保持及header来解决
def __parser_baike__(url):
    headers = {'Accept': 'text/html, application/xhtml+xml, image/jxr, */*',
               'Accept - Encoding': 'gzip, deflate',
               'Accept-Language': 'zh-Hans-CN, zh-Hans; q=0.5',
               'Connection': 'Keep-Alive',
               'Host': 'baike.baidu.com',
               'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
                             'Chrome/52.0.2743.116 Safari/537.36 Edge/15.15063',
               'X-Requested-With': 'XMLHttpRequest'}
    movie = Movie()
    s = requests.Session()
    r = s.get(url, headers=headers)
    # 设置编码为utf8
    r.encoding = 'uft-8'
    soup = BeautifulSoup(r.text, 'lxml')

    dl_left = soup.find(class_='basicInfo-block basicInfo-left')
    # 数组left
    dt_left = dl_left.find_all('dt')
    dd_left = dl_left.find_all('dd')
    # for dt in dl.find_all('dt'):
    #     print(dt.text)
    for index_left in range(len(dt_left)):
        if '中文名' == dt_left[index_left].text.replace(u'\xa0', u'').strip():
            try:
                movie.name = dd_left[index_left].text.strip()
            except:
                movie.name = ''
        elif '外文名' == dt_left[index_left].text.replace(u'\xa0', u'').strip():
            try:
                movie.foreignname = dd_left[index_left].text.strip()
            except:
                movie.foreignname = ''
        elif '出品时间' == dt_left[index_left].text.replace(u'\xa0', u'').strip():
            try:
                movie.releaseyear = dd_left[index_left].text.strip()
            except:
                pass
        elif '制片地区' == dt_left[index_left].text.replace(u'\xa0', u'').strip():
            try:
                movie.actor = dd_left[index_left].text.strip()
            except:
                pass
        elif '导演' == dt_left[index_left].text.replace(u'\xa0', u'').strip():
            try:
                movie.director = dd_left[index_left].text.strip()
            except:
                pass
        elif '编剧' == dt_left[index_left].text.replace(u'\xa0', u'').strip():
            try:
                movie.screenwriter = dd_left[index_left].text.strip()
            except:
                pass
    print('moive_name:', movie.name)

    dl_right = soup.find(class_='basicInfo-block basicInfo-right')
    # 数组right
    dt_right = dl_right.find_all('dt')
    dd_right = dl_right.find_all('dd')

    for index_right in range(len(dt_right)):
        # print(dt_right[index_right].text.replace(u'\xa0', u'').strip(), ':', dd_right[index_right].text.strip())
        if '导演' == dt_right[index_right].text.replace(u'\xa0', u'').strip():
            try:
                movie.director = dd_right[index_right].text.strip()
            except:
                pass
        elif '编剧' == dt_right[index_right].text.replace(u'\xa0', u'').strip():
            try:
                movie.screenwriter = dd_right[index_right].text.strip()
            except:
                pass
        elif '类型' == dt_right[index_right].text.replace(u'\xa0', u'').strip():
            try:
                movie.type = dd_right[index_right].text.strip()
            except:
                pass
        elif '主演' == dt_right[index_right].text.replace(u'\xa0', u'').strip():
            try:
                movie.actor = dd_right[index_right].text.strip()
            except:
                pass
        elif '片长' == dt_right[index_right].text.replace(u'\xa0', u'').strip():
            try:
                movie.duration = dd_right[index_right].text.strip()
            except:
                pass
    return movie


def __parser_baiduvideo__(url):
    headers = {'Accept': 'text/html, application/xhtml+xml, image/jxr, */*',
               'Accept - Encoding': 'gzip, deflate',
               'Accept-Language': 'zh-Hans-CN, zh-Hans; q=0.5',
               'Connection': 'Keep-Alive',
               'Host': 'v.baidu.com',
               'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
                             'Chrome/52.0.2743.116 Safari/537.36 Edge/15.15063',
               'X-Requested-With': 'XMLHttpRequest'}
    s = requests.Session()
    r = s.get(url, headers=headers)
    # 设置编码为utf8
    r.encoding = 'uft-8'
    soup = BeautifulSoup(r.text, 'lxml')
    return soup


def excel_insert(sheet,moviedata,row):
    sheet.write(row, '', '')


# pip安装第三方组件失败时，可采用离线安装方式
# python -m pip install lxml-4.2.3-cp37-cp37m-win_amd64.whl
if __name__ == '__main__':
    # url = 'https://movie.douban.com/subject/24773958/'
    url = 'https://baike.baidu.com/item/血色浪漫'
    # url = 'http://v.baidu.com/v?ie=utf-8&word=血色浪漫'
    # content = __parser_baike__(url)
    # dl = content.find(class_='basicInfo-block basicInfo-right')
    # # 数组
    # dt = dl.find_all('dt')
    # dd = dl.find_all('dd')
    # # for dt in dl.find_all('dt'):
    # #     print(dt.text)
    # for index in range(len(dt)):
    #     if '中文名' == dt[index].text.strip():
    #         print("中文名11:", dd[index].text.strip())
    #     # replace(u'\xa0', u'') 去掉&nbsp;
    #     print(dt[index].text.replace(u'\xa0', u'').strip(), ':', dd[index].text.strip())
    # 返回对象
    movie = __parser_baike__(url)
    print(movie)
    # print
    # html = __parser_baiduvideo__(url)
    # print(html)