# 创建csv文件并使用os存储
import csv
import os
# 数据库相关库
from pymysql import *
from sqlalchemy import create_engine
# 请求库
import requests
# 数据解析
from lxml import etree
import re
# json格式
import json
# 数据清洗
import pandas as pd

# selenium库
# from selenium import  webdriver
# from selenium.webdriver.chrome.service import Service
# import time

# 定义类

# 创建数据库连接引擎
# 数据库类型+数据库驱动://数据库用户名:数据库密码@主机名:端口号/数据库名称
engine = create_engine('mysql+pymysql://root:123456@localhost:3306/dbm')

class spider(object):
    # 定义初始化方法
    def __init__(self):
        # 爬取数据的网站
        self.spiderUrl = 'https://movie.douban.com/top250?'
        # 创建一个Session对象
        # self.session = requests.Session()
        # 头部伪装
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36',
            # 'Referer': 'https://movie.douban.com/explore'
        }

    # 定义初始化函数
    def init(self):
        # 创建csv文件 -- 爬取数据存放位置
        if not os.path.exists('./tempData.csv'):
            with open('./tempData.csv', 'w', newline='') as writer_f:
                writer = csv.writer(writer_f)
                writer.writerow(["title", "cover", "rate", "detailLink", "directors", "casts", "year", "types", "country",\
                                 "lang", "time", "movieTime", "comment_len", "stars", "summary", "comments", "imgList", "movieUrl"])

        # 创建记录页数的txt文件 -- 避免重复爬取数据
        if not os.path.exists('./spiderPage.txt'):
            with open('./spiderPage.txt', 'w', encoding='utf-8') as w_f:
                w_f.write('0\n')

        # 数据库
        try:
            # 连接数据库
            conn = connect(host='localhost', user='root', password='123456', database='dbm', port=3306, charset='utf8mb4')
            # 创建表
            sql = '''
                    create table movies(
                        id int primary key auto_increment,
                        title varchar(255),
                        cover varchar(255),
                        rate varchar(255),
                        detailLink varchar(255),
                        directors varchar(255),
                        casts varchar(255),
                        year varchar(255),
                        types varchar(255),
                        country varchar(255),
                        lang varchar(255),
                        time varchar(255),
                        movieTime varchar(255),
                        comment_len varchar(255),
                        stars varchar(255),
                        summary varchar(255),
                        comments text,
                        imgList varchar(255),
                        movieUrl varchar(255)
                    )
                  '''
            # 游标
            cursor = conn.cursor()
            cursor.execute(sql)
            conn.commit()
        except:
            pass

    # 获取最新爬取数据的页数
    def get_page(self):
        with open('./spiderPage.txt', 'r') as r_f:
            # 返回最新爬取数据的页数
            return r_f.readlines()[-1].strip()

    # 写入最新爬取数据的页数
    def set_page(self, newPage):
        with open('./spiderPage.txt', 'a') as a_f:
            a_f.write(str(newPage) + '\n')

    # 将数据存储到csv文件
    def save_to_csv(self, resultList):
        with open('./tempData.csv', 'a', newline='', encoding='utf-8') as a_f:
            writer = csv.writer(a_f)
            for rowData in resultList:
                writer.writerow(rowData)

    # 简单的数据清洗
    def clear_csv(self):
        df = pd.read_csv('./tempData.csv')
        # 删除缺失值所在行
        df.dropna(inplace=True)
        # 去除重复值
        df.drop_duplicates()

        # 将df存储到数据库
        self.save_to_sql(df)

    # 将数据存储到数据表中
    def save_to_sql(self, df):
        pd.read_csv('./tempData.csv')
        df.to_sql('movies', con=engine, if_exists='replace')

    # 定义爬虫主函数
    # def startBrower(self):
    #     service = Service('./chromedriver.exe')
    #     options = webdriver.ChromeOptions()
    #     options.add_experimental_option('excludeSwitches', ['enable-automation'])
    #     brower = webdriver.Chrome(service=service, options=options)
    #     return brower
    def spiderMain(self):
        # 页数
        page = self.get_page()
        print("正在爬取第%d页" % (int(page) + 1))

        # 请求网站1地址
        spiderUrl = self.spiderUrl + 'start=' + str(int(page) * 25) + '&filter='
        # print(spiderUrl)
        response = requests.get(spiderUrl, headers=self.headers)
        html = response.text

        # 最终数据整合
        resultList = []
        # 爬取不到，跳过这一页
        try:
            # 数据解析
            tree = etree.HTML(html)
            # detailLink 详情页链接
            detailLink = tree.xpath('//li//div[@class="hd"]/a/@href')


            # 请求网站2地址
            for i in range(len(detailLink)):
                print("正在爬取第%d部电影" % (i + 1))
                # 数据整合
                resultData = []
                response_detail = requests.get(detailLink[i], headers=self.headers)
                response_detail_htm = response_detail.text
                detail_tree = etree.HTML(response_detail_htm)

                # title 电影名称
                title = detail_tree.xpath('//h1/span[1]/text()')
                title = title[0].split(" ")[0]
                resultData.append(title)
                # cover 封面
                cover = detail_tree.xpath('//div[@id="mainpic"]/a[@class="nbgnbg"]/img/@src')[0]
                resultData.append(cover)
                # rate 评分
                rate = detail_tree.xpath('//div[@id="interest_sectl"]//strong[@property="v:average"]/text()')[0]
                resultData.append(rate)
                # 详情页链接 单个
                resultData.append(detailLink[i])
                # directors 导演
                directors_ = detail_tree.xpath('//div[@id="info"]/span[1]//text()')
                name = []
                for x in directors_:
                    if x.strip() and not x.strip() == "/":
                        name.append(x)
                directors = ",".join(name[2::])
                resultData.append(directors)
                # casts 演员
                casts_ = detail_tree.xpath('//div[@id="info"]/span[@class="actor"]/span[@class="attrs"]//text()')
                casts = ",".join([x for x in casts_ if not x.strip() == "/"])
                resultData.append(casts)
                # year 电影年份
                    # \d+ 表示匹配多个数字, group()方法获取匹配的字符串
                year = re.search('\d+', detail_tree.xpath('//h1/span[@class="year"]/text()')[0]).group()
                resultData.append(year)
                # types 电影类型
                types = detail_tree.xpath('//*[@id="info"]/span[@property="v:genre"]/text()')
                types = ",".join(types)
                resultData.append(types)

                # country 国家
                info_ = detail_tree.xpath('//*[@id="info"]/text()')
                info = []
                for i in info_:
                    i = i.strip()
                    if i.strip() and not i.strip() == "/":
                        info.append(i)
                country = "".join(info[0].split(sep="/"))
                resultData.append(country)
                # lang 语言
                lang = "".join(info[1].split(sep="/"))
                resultData.append(lang)
                # time 电影上映时间
                time = detail_tree.xpath('//div[@id="info"]//span[@property="v:initialReleaseDate"]/text()')
                time = ",".join(time)
                resultData.append(time)
                # movieTime 电影片长
                movieTime = detail_tree.xpath('//div[@id="info"]//span[@property="v:runtime"]/text()')[0]
                resultData.append(movieTime)
                # comment_len 短评个数
                comment_len = detail_tree.xpath('//*[@id="comments-section"]//span[@class="pl"]/a/text()')
                comment_len = re.search('\d+', comment_len[0]).group()
                resultData.append(comment_len)
                # stars 星级占比 - 从5星到1星
                stars = detail_tree.xpath('//div[@class="ratings-on-weight"]/div[@class="item"]/span[@class="rating_per"]/text()')
                stars = ",".join(stars)
                resultData.append(stars)
                # summary 摘要
                summary_ = detail_tree.xpath('//*[@id="link-report-intra"]//span[@property="v:summary"]/text()')
                summary = summary_[0].strip()
                resultData.append(summary)
                # comments(user, star, times, content) 短评（用户名，星级，评论时间，评论内容）
                comments = []
                hot_comm = detail_tree.xpath('//*[@id="hot-comments"]/div[@class="comment-item "]')
                for comm in hot_comm:
                    user = comm.xpath('.//span[@class="comment-info"]/a/text()')[0]
                    try:
                        star = re.search('\d+', comm.xpath('.//span[@class="comment-info"]/span[2]/@class')[0]).group()
                    except:
                        star = '0'
                    # print(star)
                    times = comm.xpath('.//span[@class="comment-info"]/span[@class="comment-time "]/@title')[0]
                    content = comm.xpath('.//p[@class=" comment-content"]/span[@class="short"]/text()')[0]
                    comments.append({
                        "user": user,
                        "star": star,
                        "times": times,
                        "content": content
                    })
                comments = json.dumps(comments)
                resultData.append(comments)

                # imgList 图片列表
                imgList = detail_tree.xpath('//div[@id="related-pic"]/ul/li//img/@src')
                imgList = " , ".join(imgList)
                resultData.append(imgList)
                # movieUrl 电影预告片链接
                movieLink = detail_tree.xpath('//div[@id="related-pic"]/ul/li[@class="label-trailer"]/a/@href')[0]
                # 请求网站3地址
                response_movie = requests.get(movieLink, headers=self.headers)
                response_movie_htm = response_movie.text
                movie_tree = etree.HTML(response_movie_htm)
                movieUrl = movie_tree.xpath('//video/source/@src')[0]
                resultData.append(movieUrl)

                # 每一部电影的数据整合
                # print(resultData)
                resultList.append(resultData)
        except:
            pass

        # 递归函数 -- 实现翻页
        if int(page) < 10:
            # 存储到csv
            self.save_to_csv(resultList)
            # 翻页
            self.set_page(int(page) + 1)
            # 数据清洗
            self.clear_csv()
            # 递归
            self.spiderMain()

        # 所有数据整合
        # for idx in range(len(title)):
        #     resultData.append([title[idx], cover[idx], detailLink[idx]])
        # print(resultData)

        # 请求参数
        # params = {
        #     'start': self.page * 20
        # }
        # 相应内容
        # respJson = self.session.get(self.spiderUrl, headers=self.headers, params=params).json()['items']
        # print(respJson)

        # brower = self.startBrower()
        # brower.execute_cdp_cmd('Network.setExtraHTTPHeaders', {'headers': {'Referer': 'https://movie.douban.com/explore'}})
        # brower.get("https://movie.douban.com/explore")
        # time.sleep(10)


if __name__ == '__main__':
    # 实例化对象
    spiderObj = spider()
    # 初始化测试
    spiderObj.init()
    # spiderObj.clear_csv()
    # 爬虫主函数测试
    spiderObj.spiderMain()