# -*- coding: utf-8 -*-

import urllib.request
import urllib

import pymysql
from requests.exceptions import RequestException
from bs4 import BeautifulSoup
import ssl
import random
import time
import requests

ssl._create_default_https_context = ssl._create_unverified_context
uapools = [
    "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; AcooBrowser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Acoo Browser; SLCC1; .NET CLR 2.0.50727; Media Center PC 5.0; .NET CLR 3.0.04506)",
    "Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.5; AOLBuild 4337.35; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
    "Mozilla/5.0 (Windows; U; MSIE 9.0; Windows NT 9.0; en-US)",
    "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)",
    "Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)",
    "Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 3.0.04506.30)",
    "Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (Change: 287 c9dfb30)",
    "Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6",
    "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1",
    "Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0",
    "Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5",
    "Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Fedora/1.9.0.8-1.fc10 Kazehakase/0.5.6",
    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11",
    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.20 (KHTML, like Gecko) Chrome/19.0.1036.7 Safari/535.20",
    "Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52",
]


def ua(uapools):
    thisua = random.choice(uapools)
    headers = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
               'Accept-Encoding': 'gzip, deflate',
               'Accept-Language': 'zh-CN,zh;q=0.8',
               'Cache-Control': 'no-cache',
               'Connection': 'keep-alive',
               'Host': 'blog.csdn.net',
               'Pragma': 'no-cache',
               'Referer': 'http://blog.csdn.net/a394268045/article/list/2',
               'User-Agent': thisua}
    opener = urllib.request.build_opener()
    opener.addheaders = headers
    urllib.request.install_opener(opener)


def getHtml(url):
    ua(uapools)
    # page = urllib.request.urlopen(url)
    # html = page.read()
    # return html.decode('utf-8')
    data = requests.get(url, timeout=30).content.decode("utf-8", "ignore")
    return data


DEBUG = True


def log(obj):
    if DEBUG:
        print(obj)


class MovieListItem:
    def __init__(self, movie_id="", cover_url="", movie_name="", actor="", release_time="", movie_introduce=""):
        self.movie_id = movie_id  # 电影id
        self.cover_url = cover_url  # 封面链接
        self.movie_name = movie_name  # 电影名
        self.actor = actor  # 演员名字
        self.release_time = release_time  # 上映时间
        self.movie_introduce = movie_introduce  # 电影简介


class CelebrityListItem:
    # celebrity_type:名人类型;cover_url:名人照片;celebrity_name:名人的真实名字;celebrity_role:饰演的角色
    def __init__(self, movie_id="", celebrity_type="", cover_url="", celebrity_name="", celebrity_role=""):
        self.movie_id = movie_id
        self.celebrity_type = celebrity_type
        self.cover_url = cover_url
        self.celebrity_name = celebrity_name
        self.celebrity_role = celebrity_role


class CommentListItem:
    def __init__(self, movie_id="", nick_name="", cover_url="", comment_date="", comment_content=""):
        self.movie_id = movie_id
        self.nick_name = nick_name
        self.cover_url = cover_url
        self.comment_date = comment_date
        self.comment_content = comment_content


# 电影信息集合
movieList = []
# 演职人员信息集合
celebrityListInfo = []
# 评论列表信息集合
commentListInfo = []


def get_data(url):
    html = BeautifulSoup(getHtml(url), 'lxml')
    movie_tr = html.find('dl', 'board-wrapper').findAll('dd')
    for movie in movie_tr:
        movie_id = movie.find('div', 'movie-item-info').findAll('p')[0].find('a').attrs['data-val'].replace("{movieId:",
                                                                                                            "").replace(
            "}", "")
        # 封面照片，标签用find查找，标签里的key用attrs查找
        cover_url = movie.find('a').findAll('img')[1].attrs['data-src']
        # 电影名字
        movie_name = movie.find('div', 'movie-item-info').findAll('p')[0].find('a').attrs['title']
        # 主演 其中空格需要用replace和split方法配合使用
        actor = movie.find('div', 'movie-item-info').findAll('p')[1].get_text().replace(' ', '').split()[-1]
        # 上映时间
        release_time = movie.find('div', 'movie-item-info').findAll('p')[2].get_text().replace(' ', '').split()[-1]
        # 电影简介
        movie_introduce = get_movie_detail(movie_id)
        baseItem = MovieListItem(movie_id, cover_url, movie_name, actor, release_time, movie_introduce)
        movieList.append(baseItem)


# 获取电影详情中内容: 简介，演员表，评论表
def get_movie_detail(movie_id):
    url = "http://maoyan.com/films/" + movie_id
    time.sleep(5)  # 延时5s反爬虫
    log("准备获取" + url + "中的数据...")
    html = BeautifulSoup(getHtml(url), 'lxml')
    movie_introduce = html.find('span', 'dra').get_text()

    # ------------------------------------------封装演职人员信息开始------------------------------------------
    log("*" * 20 + "封装演职人员信息开始" + "*" * 20)
    celebrity_group = html.find('div', 'celebrity-container clearfix').findAll('div', 'celebrity-group')
    for group in celebrity_group:
        celebrity_type = group.find('div', 'celebrity-type').get_text().replace(' ', '').split()[-1]
        celebrity_list = group.find('ul', 'celebrity-list clearfix').findAll('li')
        for celebrity in celebrity_list:
            log("*" * 20)
            cover_url = celebrity.find('a', 'portrait').find('img', 'default-img').attrs['data-src']
            celebrity_name = celebrity.find('div', 'info').find('a', 'name').get_text().replace(' ', '').split()[-1]
            if celebrity_type == '演员':
                try:
                    celebrity_role = celebrity.find('div', 'info').find('span', 'role').get_text().replace(' ', '').split()[
                        -1]
                except Exception as err:
                    celebrity_role = ""
            else:
                celebrity_role = ""
            log(celebrity_type + "--" + celebrity_name + "--" + celebrity_role + "--" + cover_url)
            item = CelebrityListItem(movie_id, celebrity_type, cover_url, celebrity_name, celebrity_role)
            celebrityListInfo.append(item)
    log("*" * 20 + "封装演职人员信息结束" + "*" * 20)
    # ------------------------------------------封装演职人员信息结束------------------------------------------

    # ------------------------------------------封装评论信息开始------------------------------------------
    log("*" * 20 + "封装评论信息开始" + "*" * 20)
    comment_container = html.find('div', 'comment-list-container').findAll('li', 'comment-container ')
    for comment in comment_container:
        cover_url = comment.find('div', 'portrait-container').find('div', 'portrait').find('img').attrs['src']
        nick_name = comment.find('div', 'main').find('div', 'main-header clearfix').find('div', 'user').find('span',
                                                                                                             'name').get_text()
        comment_date = comment.find('div', 'main').find('div', 'main-header clearfix').find('div', 'time').find(
            'span').get_text()
        comment_content = comment.find('div', 'main').find('div', 'comment-content').get_text()
        log(nick_name + "--" + comment_date + "--" + comment_content + "--" + cover_url)
        item = CommentListItem(movie_id, nick_name, cover_url, comment_date, comment_content)
        commentListInfo.append(item)
    log("*" * 20 + "封装评论信息结束" + "*" * 20)
    # ------------------------------------------封装评论信息结束------------------------------------------
    return movie_introduce


def get_datas():
    offset = ["0", "10", "20", "30", "40", "50", "60", "70", "80", "90"]
    url = "http://maoyan.com/board/4?offset=0"
    for i in offset:
        log("准备获取" + url + i + "的数据。。。")
        get_data(url + i)
        # 延时5s，反爬虫
        time.sleep(5)
    connect_db()


def connect_db():
    db = pymysql.connect(host="localhost", user="root", password="123456", database="test_python", charset="utf8")
    cursor = db.cursor()  # 创建一个游标对象
    create_movielist_table(db, cursor)
    insert_movielist_data(db, cursor)

    create_celebritylist_table(db, cursor)
    insert_celebritylist_data(db, cursor)

    create_commentlist_table(db, cursor)
    insert_commentlist_data(db, cursor)
    db.close()


# 创建电影信息数据表
def create_movielist_table(db, cursor):
    cursor.execute("DROP TABLE IF EXISTS movielist_info;")  # 如果表存在则删除
    # 创建表sql语句
    createTab = """CREATE TABLE movielist_info (
        id INT NOT NULL AUTO_INCREMENT PRIMARY KEY,
        movie_id VARCHAR(10) NOT NULL,
        cover_url VARCHAR(256) NOT NULL,
        movie_name VARCHAR(40) NOT NULL,
        actor VARCHAR(100) NOT NULL,
        release_time VARCHAR(40) NOT NULL,
        movie_introduce TEXT
    );  """
    try:
        log("ready to create table..." + createTab)
        cursor.execute(createTab)
        db.commit()
        log("create table success...")
    except:
        log("create table fail...")
        db.rollback()


# 创建演职人员表
def create_celebritylist_table(db, cursor):
    cursor.execute("DROP TABLE IF EXISTS celegbritylist_info;")  # 如果表存在则删除
    # 创建表sql语句
    createTab = """CREATE TABLE celegbritylist_info (
        id INT NOT NULL AUTO_INCREMENT PRIMARY KEY,
        movie_id VARCHAR(10) NOT NULL,
        celebrity_type VARCHAR(20) NOT NULL,
        cover_url VARCHAR(256) NOT NULL,
        celebrity_name VARCHAR(50) NOT NULL,
        celebrity_role VARCHAR(50) NOT NULL
    );  """
    try:
        log("ready to create table..." + createTab)
        cursor.execute(createTab)
        db.commit()
        log("create table success...")
    except:
        log("create table fail...")
        db.rollback()


# 创建演职人员表
def create_commentlist_table(db, cursor):
    cursor.execute("DROP TABLE IF EXISTS commentlist_info;")  # 如果表存在则删除
    # 创建表sql语句
    createTab = """CREATE TABLE commentlist_info (
        id INT NOT NULL AUTO_INCREMENT PRIMARY KEY,
        movie_id VARCHAR(10) NOT NULL,
        nick_name VARCHAR(30) NOT NULL,
        cover_url VARCHAR(256) NOT NULL,
        comment_date VARCHAR(50) NOT NULL,
        comment_content TEXT
    );  """
    try:
        log("ready to create table..." + createTab)
        cursor.execute(createTab)
        db.commit()
        log("create table success...")
    except:
        log("create table fail...")
        db.rollback()


# 插入各个电影的演职人员数据
def insert_celebritylist_data(db, cursor):
    for i in celebrityListInfo:
        sql = "insert into celegbritylist_info (movie_id, celebrity_type,cover_url,celebrity_name,celebrity_role) " \
              "values (\'%s\',\'%s\',\'%s\',\'%s\',\'%s\')" % (
                  i.movie_id, i.celebrity_type, i.cover_url, i.celebrity_name, i.celebrity_role);
        try:
            log(sql)
            cursor.execute(sql)
            db.commit()
            log(i.celebrity_name + "被成功插入数据库。。。")
        except:
            log(i.celebrity_name + "插入数据库失败。。。")
            db.rollback()


# 插入猫眼电影top100列表中数据
def insert_movielist_data(db, cursor):
    for i in movieList:
        sql = "insert into movielist_info (movie_id, cover_url,movie_name,actor,release_time,movie_introduce ) " \
              "values (\'%s\',\'%s\',\'%s\',\'%s\',\'%s\',\'%s\')" % (
                  i.movie_id, i.cover_url, i.movie_name, i.actor, i.release_time, i.movie_introduce);
        try:
            log(sql)
            cursor.execute(sql)
            db.commit()
            log(i.movie_name + "被成功插入数据库。。。")
        except:
            log(i.movie_name + "插入数据库失败。。。")
            db.rollback()


# 插入猫眼电影top100每个电影的评论数据
def insert_commentlist_data(db, cursor):
    for i in commentListInfo:
        sql = "insert into commentlist_info (movie_id,nick_name, cover_url,comment_date,comment_content) " \
              "values (\'%s\',\'%s\',\'%s\',\'%s\',\'%s\')" % (
                  i.movie_id, i.nick_name, i.cover_url, i.comment_date, i.comment_content);
        try:
            log(sql)
            cursor.execute(sql)
            db.commit()
            log(i.nick_name + "被成功插入数据库。。。")
        except:
            log(i.nick_name + "插入数据库失败。。。")
            db.rollback()


get_datas()
# db = pymysql.connect(host="localhost", user="root", password="123456", database="test_python", charset="utf8")
# cursor = db.cursor()  # 创建一个游标对象
# create_commentlist_table(db, cursor)
