import random
from urllib import request

from lxml import etree
import MySQLdb

# 连接数据库
conn = MySQLdb.connect(
    host="localhost",  # mysql所在的主机的ip
    port=3306,  # mysql的端口号
    user="root",  # mysql的用户名
    password="123456",  # mysql的密码
    db="reptiles",  # 要使用的库名
    charset="utf8",  # 连接中使用的字符集
)
cursor = conn.cursor(MySQLdb.cursors.DictCursor)  # 获取游标对象,里面可以为空,这样写是为了显示全面的信息

for i in range(1, 11):
    url = "https://maoyan.com/board/4?offset={}0".format(i)

    # res = request.urlopen(url).read().decode("utf-8")
    print(url)
    # 结果发现访问过于频繁,此时我们需要更改userip和useragent
    user_agent = [
        "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50",
        "Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50",
        "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0;",
        "Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0)",
        "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
        "Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
        "Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; en) Presto/2.8.131 Version/11.11",
        "Opera/9.80 (Windows NT 6.1; U; en) Presto/2.8.131 Version/11.11",
        "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11",
        "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Maxthon 2.0)",
        "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; TencentTraveler 4.0)"
    ]
    user_agent = random.choice(user_agent)
    print(user_agent)
    # 通过解决user-agent的问题解决了访问的问题
    req = request.Request(url, headers={"user-agent": user_agent})
    res1 = request.urlopen(req).read().decode("utf-8")

    ele = etree.HTML(res1)
    # 在每次输入的网页的数据不对的时候,就打印一下,生成一个网页,然后查看是哪里的问题,看是不是访问次数太频繁,还是cookie的问题
    # with open("3.html", "w", encoding="utf-8") as w:
    #     w.write(res1)
    # print(res1)
    # 获取电影的url地址,从这里开始就是数据入库的操作了
    video_urls = ele.xpath("//a[@class='image-link']/@href")
    print(video_urls)
    for i in video_urls:
        video_url = "https://maoyan.com" + i
        print("这是电影地址")
        print(video_url)

        # 对新页面再次发送请求获取数据
        req = request.Request(video_url, headers={"user-agent": user_agent})
        res = request.urlopen(req).read().decode("utf-8")
        ele = etree.HTML(res)
        # with open("3.html", "w", encoding="utf-8") as w:
        #     w.write(res)

        # 电影名称
        name1 = ele.xpath("//h1[@class='name']/text()")
        name = (",".join(name1))  # 转为字符串格式,逗号隔开
        print(name)

        # 电影类别
        film_category1 = ele.xpath("//li[@class='ellipsis']/a/text()")
        film_category = (",".join(film_category1))

        # 出产国家,以及时长
        film_country1 = ele.xpath("//li[@class='ellipsis'][2]/text()")  # 小技巧,在标签后面中括号数字可匹配获取到的第几个标签内容
        film_country = (",".join(film_country1))

        # 首映
        premiere1 = ele.xpath("//li[@class='ellipsis'][3]/text()")
        premiere = (",".join(premiere1))

        # 猫眼口碑(这货居然加密)
        cat_evaluate1 = ele.xpath("//span[@class='stonefont']/text()")
        cat_evaluate = (",".join(cat_evaluate1))

        # 剧情简介
        synopsis1 = ele.xpath("//span[@class='dra']/text()")
        synopsis = (",".join(synopsis1))

        # 将数据入库
        list = [name, film_category, film_country, premiere, cat_evaluate, synopsis]
        sql = "insert into cat_video values (%s,%s,%s,%s,%s,%s)"
        last = cursor.execute(sql, list)
        conn.commit()
cursor.close()
conn.close()
