# -*- coding: utf-8 -*-
# !/usr/bin/env python
# @ author = 'zoushunli'
# @ author_email = '409358374@qq.com'
import os

import requests
import random
import bs4
from multiprocessing import Pool
import scrapy
import re
import sqlite3
import xlwt

'''学习爬虫'''

class DouBanMovie(object):
    def __init__(self):
        super(DouBanMovie, self).__init__()
        self.user_agent = [
            'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36',
            'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:76.0) Gecko/20100101 Firefox/76.0']
        self.headers = {'User-Agent': self.user_agent[random.randint(0, 1)]}
        self.re_compile = re.compile
        self.requests_get = requests.get
        self.bs4_BeautifulSoup = bs4.BeautifulSoup
        self.xlwt_Workbook = xlwt.Workbook
        self.sqlite3_connect = sqlite3.connect
    
    def get_html_text(self, url):
        
        html = ''
        try:
            r = self.requests_get(url, headers=self.headers, timeout=10)
            r.status_code
            r.encoding = r.apparent_encoding
            html = r.text
        except Exception as e:
            print('Exception:-->', e)
            print("产生异常")
        finally:
            return html
    
    def prase_html_data(self, html_text, data_list):
        
        find_link = self.re_compile('<a href="(.*?)">')
        find_img = self.re_compile('<img.*src="(.*?)"', re.S)
        find_title = self.re_compile('<span class="title">(.*?)</span>')
        find_rating = self.re_compile('<span class="rating_num" property="v:average">(.*)</span>')
        find_judge = self.re_compile('<span>(\d*)人评价</span>')
        find_inq = self.re_compile('<span class="inq">(.*)</span>')
        find_bd = self.re_compile('<p class="">(.*?)</p>', re.S)
        
        soup = self.bs4_BeautifulSoup(html_text, 'html.parser')
        items = soup.find_all('div', class_='item')
        
        for item in items:
            data = list()
            # img_src = item.img.attrs['src']
            item = str(item)
            link = re.findall(find_link, item)[0]
            title = re.findall(find_title, item)
            if len(title) == 2:
                title_zh = title[0].strip()
                title_en = title[1].replace('/', '').strip()
            else:
                title_zh = title[0].strip()
                title_en = ''
            
            img = re.findall(find_img, item)[0]
            bd = re.findall(find_bd, item)[0]
            bd = re.sub('<br(\s+)?/>(\s+)?', '', bd)
            bd = re.sub('/', '', bd).strip()
            rating = re.findall(find_rating, item)[0]
            judge = re.findall(find_judge, item)[0]
            inq = re.findall(find_inq, item)
            if inq:
                inq = inq[0].replace('。', '')
            else:
                inq = ''
            data.append(link)
            data.append(img)
            data.append(title_zh)
            data.append(title_en)
            data.append(rating)
            data.append(judge)
            data.append(inq)
            data.append(bd)
            data_list.append(data)
        
        return data_list
    
    def save_html_data_2_xlwt(self, data_list, path=None):
        print(data_list)
        work_book = self.xlwt_Workbook(encoding="utf-8", style_compression=0)
        work_sheet = work_book.add_sheet('豆瓣电影Top250', cell_overwrite_ok=True)
        movie_info = ('电影链接', '图片链接', '影片中文名', '影片外国名', '评分', '评价人数', '概况', '相关人员信息')
        for i in range(len(movie_info)):
            work_sheet.write(0, i, movie_info[i])
        
        for x in range(len(data_list)):
            for y in range(len(movie_info)):
                work_sheet.write(x + 1, y, data_list[x][y])
        if not path:
            work_book.save('豆瓣电影Top250.xls')
        else:
            work_book.save(path)
    
    def create_table(self, table_name, db_path):
        
        sql = '''
            create table %s
            (id INTEGER PRIMARY KEY AUTOINCREMENT,
            link text ,
            img text ,
            title_zh  varchar ,
            title_en  varchar ,
            rating numeric ,
            judge numeric ,
            inq text ,
            bd text);
            
            
            ''' % table_name
        conn = self.sqlite3_connect(db_path)
        print('Opened database successfully')
        cur = conn.cursor()  # 获取游标
        cur.execute(sql)  # 执行sql语句
        conn.commit()  # 提交数据
        conn.close()
    
    def insert_data(self, data_list, table_name, db_path):
        conn = self.sqlite3_connect(db_path)
        print('Opened database successfully')
        cur = conn.cursor()  # 获取游标
        for data in data_list:
            for index in range(len(data)):
                if index == 4 or index == 5:
                    continue
                data[index] = '"' + data[index] + '"'  # 把数据转成字符串 很重要
            sql = """
            insert into %s (link, img, title_zh, title_en, rating, judge, inq, bd)
            values (%s)""" % (table_name, ",".join(data))
            
            cur.execute(sql)  # 执行sql语句
            conn.commit()  # 提交数据
        cur.close()
        conn.close()
        print('成功插入')
    
    def save_html_data_2_sqlite(self, table_name, data_list, db_path):
        try:
            self.create_table(table_name, db_path)
        except:
            pass
        finally:
            self.insert_data(data_list, table_name, db_path)
    
    def get_cookies(self):
        session = requests.Session()
        print(session.cookies)
    
    def download_img(self, data_list):
        dir_path = './电影海报'
        if not os.path.exists(dir_path):
            os.makedirs(dir_path)
        for i in range(len(data_list)):
            img_url = data_list[i][1].replace('"', '')  # 去掉链接上的引号
            r = self.requests_get(img_url, headers=self.headers, timeout=10)
            img_ext = img_url.rsplit('/', 1)[1].split('.')[-1]
            image_name = data_list[i][2]
            file_name = image_name + '.' + img_ext
            file_path = dir_path + '/' + file_name
            with open(file_path, 'wb') as f:
                f.write(r.content)


def main():
    db_path = 'study_sqlite.db'
    table_name = '豆瓣电影Top250'
    url = 'https://movie.douban.com/top250?start='
    demo = DouBanMovie()
    data_list = list()
    
    for i in range(0, 10):
        new_url = url + str(i * 25)
        html = demo.get_html_text(new_url)
        demo.prase_html_data(html, data_list)
    
    demo.save_html_data_2_xlwt(data_list)
    # demo.save_html_data_2_sqlite(table_name, data_list, db_path)
    # demo.download_img(data_list)


if __name__ == "__main__":
    main()
