"""
爬取猫眼电影top100的数据

第一步：分析地址规律                                                    offset 和页面的关系
    第6页： https://www.maoyan.com/board/4?offset=50                   (page - 1 ) *10
    第7页 ： https://www.maoyan.com/board/4?offset=60
    第8页  ： https://www.maoyan.com/board/4?offset=70

# 第二步：判断页面是否是静态页面
# 第三部：编写正则表达式
   <div class="movie-item-info">.*?title="超脱" .*?>(.*?)</a>.*?<p class="star">(.*?)</p><p class="releasetime">(.*?)
# 第四步： 编写代码结构
"""
import time
from random import random

import requests
import re
from pymysql import Connection

class MaoyanSpider:
    def __init__(self):
        self.url = 'https://www.maoyan.com/board/4?offset={}'
        self.headers = {
            'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/136.0.0.0 Safari/537.36',
            'Cookie':'__mta=46944920.1747564742954.1747964798424.1747964822201.30; uuid_n_v=v1; uuid=1D2FCA8033D411F0AA512317FE8732D1A6B4DC4AC0154F3998F9FFEBCBDBE8FE; _ga=GA1.1.1845121898.1747564734; _lxsdk_cuid=196e2f9a500c8-09017bcdac77d4-26011f51-144000-196e2f9a500c8; ci=1%2C%E5%8C%97%E4%BA%AC; _lxsdk=1D2FCA8033D411F0AA512317FE8732D1A6B4DC4AC0154F3998F9FFEBCBDBE8FE; _csrf=9968f17108b30da28d825a9a9ad1538ec0c425c2a91890a3ad8ef1339bf7f922; Hm_lvt_e0bacf12e04a7bd88ddbd9c74ef2b533=1747564735,1747834343,1747874416,1747960486; HMACCOUNT=3BC3172826262B8A; _lx_utm=utm_source%3DBaidu%26utm_medium%3Dorganic; __mta=46944920.1747564742954.1747960486691.1747961667656.7; _ga_WN80P4PSY7=GS2.1.s1747963856$o7$g1$t1747964821$j0$l0$h0; Hm_lpvt_e0bacf12e04a7bd88ddbd9c74ef2b533=1747964822; _lxsdk_s=196fac3ca28-fc7-58e-122%7C%7C9'
        }
        self.conn = Connection(
            user="root",
            password="root",
            host="localhost",
            database="my_spider",
            port=3306,
            autocommit=True
        )
        self.cursor = self.conn.cursor()


    def get_html(self,url):
        """获取页面"""
        try:
            html = requests.get(url=url, headers=self.headers, timeout=5).content.decode("UTF-8")
            # 在这里调用解析函数直接解析页面
            self.parse_html(html)
        except Exception as e:
            print(r"连接超时：{url}")


    def parse_html(self,html):
        """解析页面"""
        regex = '<div class="movie-item-info">.*?<a .*?>(.*?)</a>.*?<p class="star">(.*?)</p>.*?<p class="releasetime">(.*?)</p>'
        pattern = re.compile(regex,re.S)
        # r_list = [("我不是药神","主演：徐振...","上映时间"),(),(),]
        r_list = pattern.findall(html)
        # print(r_list)
        # 直接调用保存页面的函数，对数据进行处理后进行保存
        self.save_html(r_list)
        print("保存成功")

    def save_html(self,r_list):
        """保存页面"""
        # 原来的格式：r_list = [('我不是药神', '\n                主演：徐峥,王传君,周一围\n        ', '上映时间：2018-07-05'),(),(),]
        # 我们要的格式： item = {"name":"我不是药神","star":"xxx","time"：“xxx”}


        conn = None
        cursor = None
        try:
            conn = self.conn
            cursor = self.cursor
            sql = "insert into maoyan_movie values(null,%s,%s,%s)"

            for r in r_list:
                li =[
                    r[0].strip(),
                    r[1].strip(),
                    r[2].strip()
                ]
                print(li)
                cursor.execute(sql,li)
            # conn.commit()
            # print("保存成功")
        except Exception as e:
            print(e)




    def run(self):
        """程序入口"""
        for page in range(0,91,10):
            # 需要提供一个URL
            #  https://www.maoyan.com/board/4?offset=
            page_url = self.url.format(page)
            print(page_url)
            self.get_html(url=page_url)
            time.sleep(random.randint(1, 3))

        # 所有程序执行完毕之后数据库关闭
        self.cursor.close()
        self.conn.close()



if __name__ == '__main__':
    spider = MaoyanSpider()
    spider.run()


'''

'''

