# -*- coding:utf-8 -*-
# @Time:2024/4/1921:01
# @Author:miuzg
# @FileName:new test2.py
# @Software:PyCharm

import requests
from bs4 import BeautifulSoup
import pymysql


class DouBanSpider(object):
    def __init__(self):
        self.url = 'https://movie.douban.com/top250?start={}&filter='
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36 Edg/131.0.0.0'
        }
        self.db = pymysql.connect(
            host='127.0.0.1',
            port=3306,
            database='520day6',
            user='root',
            passwd='root',
            charset='utf8'
        )
        self.cur = self.db.cursor()
        self.sql = 'insert into movies(title,director,star,years,country,types,grade,info)' \
                   'values(%s,%s,%s,%s,%s,%s,%s,%s)'

    def get_html(self, url):
        res = requests.get(url, headers=self.headers)
        return res.text

    def parse_html(self, html):
        soup = BeautifulSoup(html, 'lxml')
        self.save(soup)

    def save(self, soup):
        datas = soup.find_all('div', class_='info')
        for data in datas:
            title = data.find('span', class_='title').text

            # 按照换行进行分割,strip的作用是清除字符串中的无用字符，用于清洗
            other = data.find('div', class_='bd').p.text.strip().split('\n')
            print(other)

            # 获取第一行数据
            director_info = other[0].strip().strip('...\\').split('\xa0\xa0\xa0')
            director = director_info[0][4:]
            star = director_info[1][4:] if len(director_info) > 1 else 'unknown'

            # 获取第二行数据
            msg = other[1].strip().split('/')
            years = msg[0].strip()
            country = msg[1].strip()
            types = msg[2].strip()

            # 获取评分
            grade = data.find('span', class_='rating_num').text

            # 获取介绍信息
            info = data.find('span', class_='inq').text if data.find('span', class_='inq') else 'unkown'

            # 汇总输出
            print(f'电影名为{title},导演为{director},主演为{star},年份为{years},国家为{country},类型为{types},评分为{grade}'
                  f'介绍信息为{info}')

            self.cur.execute(self.sql, (title, director, star, years, country, types, grade, info))
            self.db.commit()

    def run(self):
        for start in range(0, 226, 25):
            url = self.url.format(start)
            html = self.get_html(url)
            self.parse_html(html)

        self.cur.close()
        self.db.close()


if __name__ == '__main__':
    spider = DouBanSpider()
    spider.run()
