# -*- coding = utf-8 -*-
# @Time    : 2025/3/15 上午9:33
# @Author  : yqk
# @File    : 豆瓣爬虫-保存数据-sqlite3.py
# @Software: PyCharm

from bs4 import BeautifulSoup  # 网页解析
import re
import requests  # 修正模块名
import xlwt   # 用于保存数据为Excel
import sqlite3

findlink = re.compile(r'<a href="(.*?)">')
findImg = re.compile(r'<img.* src="(.*?)"', re.S)
findtitle = re.compile(r'<span class="title">(.*?)</span>')
findRating = re.compile(r'<span class="rating_num" property="v:average">(.*)</span>')  # 提取评分
findJudge = re.compile(r'<span>(\d*)人评价</span>')  # 提取评价人数
findIng = re.compile(r'<span class="other">(.*?)</span>')  # 提取影片相关内容
findBd = re.compile(r'<div class="bd">\s*<p>(.*?)</p>', re.S)  # 提取影片相关内容


def clean_data(data):
    # 去除 HTML 标签
    data = re.sub(r'<.*?>', '', data)

    data = data.replace("\xa0", " ").replace("\n", " ").replace("<br/>", " ").strip()

    data = re.sub(r'\s+', ' ', data)
    return data


def getData(baseurl):
    headers = {
        "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
        "accept-language": "zh-CN,zh;q=0.9",
        "referer": "https://movie.douban.com/top250?start=225&filter=",
        "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36"
    }

    datalist = []
    for i in range(0, 10):  # 这里只抓取第一页，实际使用时可以调整
        url = baseurl + str(i * 25)

        # 爬取页面内容
        response = requests.get(url, headers=headers)
        # 解析页面内容
        soup = BeautifulSoup(response.content.decode('utf-8'), 'html.parser')

        for item in soup.find_all('div', class_='item'):
            data = []
            item = str(item)

            link = re.findall(findlink, item)
            img = re.findall(findImg, item)
            title = re.findall(findtitle, item)
            rating = re.findall(findRating, item)
            judge = re.findall(findJudge, item)
            ing = re.findall(findIng, item)
            bd = re.findall(findBd, item)

            # 数据处理
            data.append(clean_data(link[0]) if link else "无链接")  # 链接
            data.append(clean_data(img[0]) if img else "无图片")  # 图片链接
            data.append(clean_data(title[0]) if title else "无标题")  # 电影标题
            data.append(clean_data(rating[0]) if rating else "无评分")  # 评分
            data.append(clean_data(judge[0]) if judge else "无评价")  # 评价人数
            data.append(clean_data(ing[0]) if ing else "暂无信息")  # 正在上映等信息
            data.append(clean_data(bd[0]) if bd else "暂无描述")  # 电影简介

            datalist.append(data)  # 添加到总数据列表
    return datalist


def saveData(datalist, savepath):
    # 确保 savepath 是字符串
    if not isinstance(savepath, str):
        print("错误：保存路径不是字符串类型")
        return

    book = xlwt.Workbook(encoding='utf-8', style_compression=0)

    sheet = book.add_sheet('豆瓣电影top250', cell_overwrite_ok=True)
    col = ('链接', '图片链接', '电影名称', '评分', '评价人数', '概况', '相关信息')
    # 写入列头
    for i in range(7):
        sheet.write(0, i, col[i])
    # 将数据写入Excel
    for i in range(len(datalist)):
        print(f"正在保存第 {i + 1} 条数据...")
        data = datalist[i]
        # 确保每条数据是一个列表，如果不是，创建一个列表
        if not isinstance(data, list):
            data = [data]  # 转换为列表
        # 确保每条数据有7个项，如果缺少就填充为"暂无数据"
        while len(data) < 7:
            data.append("暂无数据")
        # 写入每一行的数据
        for j in range(7):
            sheet.write(i + 1, j, data[j])

    # 这里确保 savepath 是字符串类型并传递给 save() 方法
    try:
        book.save(savepath)
        print("数据保存完成...")
    except Exception as e:
        print(f"保存数据时发生错误: {e}")


def saveDataDB(datalist, dbpath):
    init_db(dbpath)
    conn = sqlite3.connect(dbpath)
    cur = conn.cursor()

    for i, data in enumerate(datalist):
        # 替换数据中的单引号，防止 SQL 注入
        data = [d.replace("'", "''") for d in data]

        # 检查数据的长度是否符合要求
        if len(data) != 7:
            print(f"第 {i + 1} 条数据格式不正确，跳过")
            continue

        # 使用占位符构建 SQL 查询语句
        sql = '''
            INSERT INTO movie250 (
                info_link, pic_link, name, score, rated, introduction, info
            )
            VALUES (?, ?, ?, ?, ?, ?, ?)
        '''
        try:
            # 执行插入操作
            cur.execute(sql, data)
            print(f"第 {i + 1} 条数据已插入")
        except Exception as e:
            print(f"插入第 {i + 1} 条数据时发生错误: {e}")
            continue

    # 提交事务
    conn.commit()

    # 关闭游标和连接
    cur.close()
    conn.close()


def init_db(dbpath):
    # SQL 查询：如果表不存在，则创建表
    sql = '''
        CREATE TABLE IF NOT EXISTS movie250 (
            id INTEGER PRIMARY KEY AUTOINCREMENT,
            info_link TEXT,
            pic_link TEXT,
            name VARCHAR,
            score NUMERIC,
            rated NUMERIC,
            introduction TEXT,
            info TEXT
        );
    '''
    # 连接 SQLite 数据库
    conn = sqlite3.connect(dbpath)
    cursor = conn.cursor()

    # 执行 SQL 语句创建表
    cursor.execute(sql)

    # 提交事务
    conn.commit()

    # 关闭连接
    conn.close()

if __name__ == '__main__':
    baseurl = "https://movie.douban.com/top250?start="
    datalist = getData(baseurl)
    # savepath = ".\\豆瓣电影Top250.xls"  # 确保这里是正确的字符串路径
    # saveData(datalist, savepath)

    # 初始化数据库
    init_db('movie.db')

    # 保存数据
    saveDataDB(datalist, 'movie.db')
