# @Time: 2020/4/16 12:27
# @Author: may
# @Desc: 爬取豆瓣电影top250

import requests
import re
from bs4 import BeautifulSoup
import pymysql

base_url = 'https://movie.douban.com/top250?start='
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
                  'Chrome/81.0.4044.92 Safari/537.36 Edg/81.0.416.53'
}


def get_data(url):
    re_ranking = re.compile(r'<em class="">(.*?)</em>')
    re_link = re.compile(r'<a href="(.*?)"')
    re_pic = re.compile(r'<img.*src="(.*?)"')
    re_title = re.compile(r'<span class="title">(.*?)</span>')
    re_info = re.compile(r'<p class="">(.*?)</p>', re.S)
    re_score = re.compile(r'<span class="rating_num" property="v:average">(.*?)</span>')
    re_comments = re.compile(r'<span>(.*?)人评价</span>')
    re_inq = re.compile(r'<span class="inq">(.*?)</span>')

    films = []
    for i in range(0, 10):
        url = base_url + str(i * 25)
        soup = get_html(url)
        for item in soup.find_all('div', class_='item'):
            film = []
            item = str(item)
            ranking = re.findall(re_ranking, item)[0]
            film.append(int(ranking))
            link = re.findall(re_link, item)[0]
            film.append(link.strip())
            title = re.findall(re_title, item)
            if len(title) > 1:
                title[1] = title[1].replace('\xa0/\xa0', '')
                film.append(title[0].strip())
                film.append(title[1].strip())
            else:
                film.append(title[0].strip())
                film.append('')
            pic = re.findall(re_pic, item)[0]
            film.append(pic)
            info = re.findall(re_info, item)[0]
            info = re.sub(r'<br/>(\s+)?', '', info)
            info = re.sub(r'\s', ' ', info)
            info = info.strip().replace('/', '')
            film.append(info)
            score = re.findall(re_score, item)[0]
            film.append(float(score))
            comments = re.findall(re_comments, item)[0]
            film.append(int(comments))
            inq = re.findall(re_inq, item)
            if len(inq) > 0:
                film.append(inq[0])
            else:
                film.append('')
            films.append(film)
    return films


def get_html(url):
    response = requests.get(url, headers=headers)
    soup = BeautifulSoup(response.text, 'lxml')
    return soup


def get_connect():
    return pymysql.connect(host='localhost', user='root', password="123456", database='douban', charset='utf8')


def write_to_db(films):
    sql = '''
            insert into films_top250(ranking, `name`, name_foreign, link, pic, info, score,comments, inq)
            values(%s, %s, %s, %s, %s, %s, %s, %s, %s);
        '''
    conn = get_connect()
    for film in films:
        try:
            with conn.cursor() as cursor:
                cnt = cursor.execute('select count(*) from films_top250 where ranking = %s', film[0])
                if cnt:
                    cursor.execute('delete from films_top250 where ranking = %s', film[0])
                cursor.execute(sql, (film[0], film[2], film[3], film[1], film[4], film[5], film[6], film[7], film[8]))
                conn.commit()
        except pymysql.Error:
            conn.rollback()
    conn.close()


if __name__ == '__main__':
    films = get_data(base_url)
    write_to_db(films)
