import urllib.request, urllib.error
import re
from bs4 import BeautifulSoup
import pymysql


def main():
    baseUrl = "https://movie.douban.com/top250?start="
    dataList = getData(baseUrl)
    # 保存数据到MySQL数据库
    db = connectDB()
    if saveData(db, dataList):
        print("保存数据到MySQL数据库成功！")
    else:
        print("保存数据到MySQL数据库失败！")
    db.close()


# MySQL数据库
def connectDB():
    # 连接Mysql数据库
    cnn = pymysql.connect(host='localhost', user='root', password='root', port=3306, database='movieTop250',
                          charset='utf8')
    return cnn


# 正则匹配相关规则
findLink = re.compile(r'<a href="(.*?)">')
findImgSrc = re.compile(r'<img.*src="(.*?)"', re.S)
findTitle = re.compile(r'<span class="title">(.*)</span>')
findRatingNum = re.compile(r'<span class="rating_num" property="v:average">(.*)</span>')
findJudgeNum = re.compile(r'<span>(\d*)人评价</span>')
findInq = re.compile(r'<span class="inq">(.*)</span>')
findBd = re.compile(r'<p class="">(.*?)</p>', re.S)


def getData(baseUrl):
    dataList = []
    for i in range(0, 10):
        url = baseUrl + str(i * 25)
        html = askUrl(url)
        soup = BeautifulSoup(html, "html.parser")
        for item in soup.find_all('div', class_="item"):
            data = {}
            item = str(item)
            link = re.findall(findLink, item)[0]
            data["Link"] = link

            ImgSrc = re.findall(findImgSrc, item)[0]
            data["ImgSrc"] = ImgSrc

            Title = re.findall(findTitle, item)
            if len(Title) == 2:  # 片名可能有两种，中文名加外文名
                data["Title"] = Title[0]  # 中文名
                oTitle = "".join(Title[1].split())  # 分割所有\xa0
                data["oTitle"] = oTitle.replace("/", "")  # 外文名
            else:
                data["Title"] = Title[0]
                data["oTitle"] = ""  # 外文名为空占位

            RatingNum = re.findall(findRatingNum, item)[0]
            data["RatingNum"] = RatingNum

            JudgeNum = re.findall(findJudgeNum, item)[0]
            data["JudgeNum"] = JudgeNum

            Inq = re.findall(findInq, item)  # 可能没有概述
            if len(Inq) != 0:
                data["Inq"] = Inq[0]
            else:
                data["Inq"] = ""  # 留空占位

            bd = re.findall(findBd, item)[0]
            bd = re.sub('<br(\s+)?/>(\s+)?', " ", bd)  # 去掉换行<br/>
            bd = re.sub('/', " ", bd)  # 替换/
            bd = "".join(bd.split())  # split方法中不带参数时，表示分割所有换行符、制表符、空格
            data["Bd"] = bd
            dataList.append(data)
    return dataList


def askUrl(url):
    head = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
                      "Chrome/93.0.4577.82 Safari/537.36"}
    request = urllib.request.Request(url, headers=head)
    html = ""
    try:
        response = urllib.request.urlopen(request)
        html = response.read().decode("utf-8")
    except urllib.error.URLError as e:
        if hasattr(e, "code"):
            print(e.code)
        if hasattr(e, "reason"):
            print(e.reason)

    return html


def saveData(db, dataList):
    cursor = db.cursor()
    cursor.execute("truncate table top250")  # 清空表中数据以更新top250的数据
    sql = 'insert into top250(Link,ImgSrc, Title,oTitle,RatingNum,JudgeNum,Inq,Bd)' \
          'values(%s, %s, %s, %s, %s, %s, %s, %s)'
    try:
        for item in dataList:
            cursor.execute(sql, [item['Link'], item['ImgSrc'], item['Title'], item['oTitle'], item['RatingNum'],
                                 item['JudgeNum'], item['Inq'], item['Bd']])
        db.commit()
        return True
    except Exception as e:
        print(e)
        db.rollback()
    return False


if __name__ == '__main__':
    main()
